1 #!/usr/bin/env python3 1 #!/usr/bin/env python3 2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2- 2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 3 """Convert directories of JSON events to C cod 3 """Convert directories of JSON events to C code.""" 4 import argparse 4 import argparse 5 import csv 5 import csv 6 from functools import lru_cache 6 from functools import lru_cache 7 import json 7 import json 8 import metric 8 import metric 9 import os 9 import os 10 import sys 10 import sys 11 from typing import (Callable, Dict, Optional, 11 from typing import (Callable, Dict, Optional, Sequence, Set, Tuple) 12 import collections 12 import collections 13 13 14 # Global command line arguments. 14 # Global command line arguments. 15 _args = None 15 _args = None 16 # List of regular event tables. 16 # List of regular event tables. 17 _event_tables = [] 17 _event_tables = [] 18 # List of event tables generated from "/sys" d 18 # List of event tables generated from "/sys" directories. 19 _sys_event_tables = [] 19 _sys_event_tables = [] 20 # List of regular metric tables. 20 # List of regular metric tables. 21 _metric_tables = [] 21 _metric_tables = [] 22 # List of metric tables generated from "/sys" 22 # List of metric tables generated from "/sys" directories. 23 _sys_metric_tables = [] 23 _sys_metric_tables = [] 24 # Mapping between sys event table names and sy 24 # Mapping between sys event table names and sys metric table names. 25 _sys_event_table_to_metric_table_mapping = {} 25 _sys_event_table_to_metric_table_mapping = {} 26 # Map from an event name to an architecture st 26 # Map from an event name to an architecture standard 27 # JsonEvent. Architecture standard events are 27 # JsonEvent. Architecture standard events are in json files in the top 28 # f'{_args.starting_dir}/{_args.arch}' directo 28 # f'{_args.starting_dir}/{_args.arch}' directory. 29 _arch_std_events = {} 29 _arch_std_events = {} 30 # Events to write out when the table is closed 30 # Events to write out when the table is closed 31 _pending_events = [] 31 _pending_events = [] 32 # Name of events table to be written out 32 # Name of events table to be written out 33 _pending_events_tblname = None 33 _pending_events_tblname = None 34 # Metrics to write out when the table is close 34 # Metrics to write out when the table is closed 35 _pending_metrics = [] 35 _pending_metrics = [] 36 # Name of metrics table to be written out 36 # Name of metrics table to be written out 37 _pending_metrics_tblname = None 37 _pending_metrics_tblname = None 38 # Global BigCString shared by all structures. 38 # Global BigCString shared by all structures. 39 _bcs = None 39 _bcs = None 40 # Map from the name of a metric group to a des << 41 _metricgroups = {} << 42 # Order specific JsonEvent attributes will be 40 # Order specific JsonEvent attributes will be visited. 43 _json_event_attributes = [ 41 _json_event_attributes = [ 44 # cmp_sevent related attributes. 42 # cmp_sevent related attributes. 45 'name', 'topic', 'desc', !! 43 'name', 'pmu', 'topic', 'desc', 46 # Seems useful, put it early. 44 # Seems useful, put it early. 47 'event', 45 'event', 48 # Short things in alphabetical order. 46 # Short things in alphabetical order. 49 'compat', 'deprecated', 'perpkg', 'unit', 47 'compat', 'deprecated', 'perpkg', 'unit', 50 # Longer things (the last won't be iterate 48 # Longer things (the last won't be iterated over during decompress). 51 'long_desc' 49 'long_desc' 52 ] 50 ] 53 51 54 # Attributes that are in pmu_metric rather tha 52 # Attributes that are in pmu_metric rather than pmu_event. 55 _json_metric_attributes = [ 53 _json_metric_attributes = [ 56 'metric_name', 'metric_group', 'metric_exp !! 54 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 'desc', 57 'desc', 'long_desc', 'unit', 'compat', 'me !! 55 'long_desc', 'unit', 'compat', 'metricgroup_no_group', 'aggr_mode', 58 'default_metricgroup_name', 'aggr_mode', ' !! 56 'event_grouping' 59 ] 57 ] 60 # Attributes that are bools or enum int values 58 # Attributes that are bools or enum int values, encoded as '0', '1',... 61 _json_enum_attributes = ['aggr_mode', 'depreca 59 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg'] 62 60 63 def removesuffix(s: str, suffix: str) -> str: 61 def removesuffix(s: str, suffix: str) -> str: 64 """Remove the suffix from a string 62 """Remove the suffix from a string 65 63 66 The removesuffix function is added to str in 64 The removesuffix function is added to str in Python 3.9. We aim for 3.6 67 compatibility and so provide our own functio 65 compatibility and so provide our own function here. 68 """ 66 """ 69 return s[0:-len(suffix)] if s.endswith(suffi 67 return s[0:-len(suffix)] if s.endswith(suffix) else s 70 68 71 69 72 def file_name_to_table_name(prefix: str, paren 70 def file_name_to_table_name(prefix: str, parents: Sequence[str], 73 dirname: str) -> s 71 dirname: str) -> str: 74 """Generate a C table name from directory na 72 """Generate a C table name from directory names.""" 75 tblname = prefix 73 tblname = prefix 76 for p in parents: 74 for p in parents: 77 tblname += '_' + p 75 tblname += '_' + p 78 tblname += '_' + dirname 76 tblname += '_' + dirname 79 return tblname.replace('-', '_') 77 return tblname.replace('-', '_') 80 78 81 79 82 def c_len(s: str) -> int: 80 def c_len(s: str) -> int: 83 """Return the length of s a C string 81 """Return the length of s a C string 84 82 85 This doesn't handle all escape characters pr 83 This doesn't handle all escape characters properly. It first assumes 86 all \\ are for escaping, it then adjusts as !! 84 all \ are for escaping, it then adjusts as it will have over counted 87 \\. The code uses \000 rather than \0 as a t 85 \\. The code uses \000 rather than \0 as a terminator as an adjacent 88 number would be folded into a string of \0 ( 86 number would be folded into a string of \0 (ie. "\0" + "5" doesn't 89 equal a terminator followed by the number 5 87 equal a terminator followed by the number 5 but the escape of 90 \05). The code adjusts for \000 but not prop 88 \05). The code adjusts for \000 but not properly for all octal, hex 91 or unicode values. 89 or unicode values. 92 """ 90 """ 93 try: 91 try: 94 utf = s.encode(encoding='utf-8',errors='st 92 utf = s.encode(encoding='utf-8',errors='strict') 95 except: 93 except: 96 print(f'broken string {s}') 94 print(f'broken string {s}') 97 raise 95 raise 98 return len(utf) - utf.count(b'\\') + utf.cou 96 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2) 99 97 100 class BigCString: 98 class BigCString: 101 """A class to hold many strings concatenated 99 """A class to hold many strings concatenated together. 102 100 103 Generating a large number of stand-alone C s 101 Generating a large number of stand-alone C strings creates a large 104 number of relocations in position independen 102 number of relocations in position independent code. The BigCString 105 is a helper for this case. It builds a singl 103 is a helper for this case. It builds a single string which within it 106 are all the other C strings (to avoid memory 104 are all the other C strings (to avoid memory issues the string 107 itself is held as a list of strings). The of 105 itself is held as a list of strings). The offsets within the big 108 string are recorded and when stored to disk 106 string are recorded and when stored to disk these don't need 109 relocation. To reduce the size of the string 107 relocation. To reduce the size of the string further, identical 110 strings are merged. If a longer string ends- 108 strings are merged. If a longer string ends-with the same value as a 111 shorter string, these entries are also merge 109 shorter string, these entries are also merged. 112 """ 110 """ 113 strings: Set[str] 111 strings: Set[str] 114 big_string: Sequence[str] 112 big_string: Sequence[str] 115 offsets: Dict[str, int] 113 offsets: Dict[str, int] 116 insert_number: int << 117 insert_point: Dict[str, int] << 118 metrics: Set[str] << 119 114 120 def __init__(self): 115 def __init__(self): 121 self.strings = set() 116 self.strings = set() 122 self.insert_number = 0; << 123 self.insert_point = {} << 124 self.metrics = set() << 125 117 126 def add(self, s: str, metric: bool) -> None: !! 118 def add(self, s: str) -> None: 127 """Called to add to the big string.""" 119 """Called to add to the big string.""" 128 if s not in self.strings: !! 120 self.strings.add(s) 129 self.strings.add(s) << 130 self.insert_point[s] = self.insert_numbe << 131 self.insert_number += 1 << 132 if metric: << 133 self.metrics.add(s) << 134 121 135 def compute(self) -> None: 122 def compute(self) -> None: 136 """Called once all strings are added to co 123 """Called once all strings are added to compute the string and offsets.""" 137 124 138 folded_strings = {} 125 folded_strings = {} 139 # Determine if two strings can be folded, 126 # Determine if two strings can be folded, ie. let 1 string use the 140 # end of another. First reverse all string 127 # end of another. First reverse all strings and sort them. 141 sorted_reversed_strings = sorted([x[::-1] 128 sorted_reversed_strings = sorted([x[::-1] for x in self.strings]) 142 129 143 # Strings 'xyz' and 'yz' will now be [ 'zy 130 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward 144 # for each string to see if there is a bet 131 # for each string to see if there is a better candidate to fold it 145 # into, in the example rather than using ' 132 # into, in the example rather than using 'yz' we can use'xyz' at 146 # an offset of 1. We record which string c 133 # an offset of 1. We record which string can be folded into which 147 # in folded_strings, we don't need to reco 134 # in folded_strings, we don't need to record the offset as it is 148 # trivially computed from the string lengt 135 # trivially computed from the string lengths. 149 for pos,s in enumerate(sorted_reversed_str 136 for pos,s in enumerate(sorted_reversed_strings): 150 best_pos = pos 137 best_pos = pos 151 for check_pos in range(pos + 1, len(sort 138 for check_pos in range(pos + 1, len(sorted_reversed_strings)): 152 if sorted_reversed_strings[check_pos]. 139 if sorted_reversed_strings[check_pos].startswith(s): 153 best_pos = check_pos 140 best_pos = check_pos 154 else: 141 else: 155 break 142 break 156 if pos != best_pos: 143 if pos != best_pos: 157 folded_strings[s[::-1]] = sorted_rever 144 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1] 158 145 159 # Compute reverse mappings for debugging. 146 # Compute reverse mappings for debugging. 160 fold_into_strings = collections.defaultdic 147 fold_into_strings = collections.defaultdict(set) 161 for key, val in folded_strings.items(): 148 for key, val in folded_strings.items(): 162 if key != val: 149 if key != val: 163 fold_into_strings[val].add(key) 150 fold_into_strings[val].add(key) 164 151 165 # big_string_offset is the current locatio 152 # big_string_offset is the current location within the C string 166 # being appended to - comments, etc. don't 153 # being appended to - comments, etc. don't count. big_string is 167 # the string contents represented as a lis 154 # the string contents represented as a list. Strings are immutable 168 # in Python and so appending to one causes 155 # in Python and so appending to one causes memory issues, while 169 # lists are mutable. 156 # lists are mutable. 170 big_string_offset = 0 157 big_string_offset = 0 171 self.big_string = [] 158 self.big_string = [] 172 self.offsets = {} 159 self.offsets = {} 173 160 174 def string_cmp_key(s: str) -> Tuple[bool, << 175 return (s in self.metrics, self.insert_p << 176 << 177 # Emit all strings that aren't folded in a 161 # Emit all strings that aren't folded in a sorted manner. 178 for s in sorted(self.strings, key=string_c !! 162 for s in sorted(self.strings): 179 if s not in folded_strings: 163 if s not in folded_strings: 180 self.offsets[s] = big_string_offset 164 self.offsets[s] = big_string_offset 181 self.big_string.append(f'/* offset={bi 165 self.big_string.append(f'/* offset={big_string_offset} */ "') 182 self.big_string.append(s) 166 self.big_string.append(s) 183 self.big_string.append('"') 167 self.big_string.append('"') 184 if s in fold_into_strings: 168 if s in fold_into_strings: 185 self.big_string.append(' /* also: ' 169 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */') 186 self.big_string.append('\n') 170 self.big_string.append('\n') 187 big_string_offset += c_len(s) 171 big_string_offset += c_len(s) 188 continue 172 continue 189 173 190 # Compute the offsets of the folded string 174 # Compute the offsets of the folded strings. 191 for s in folded_strings.keys(): 175 for s in folded_strings.keys(): 192 assert s not in self.offsets 176 assert s not in self.offsets 193 folded_s = folded_strings[s] 177 folded_s = folded_strings[s] 194 self.offsets[s] = self.offsets[folded_s] 178 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s) 195 179 196 _bcs = BigCString() 180 _bcs = BigCString() 197 181 198 class JsonEvent: 182 class JsonEvent: 199 """Representation of an event loaded from a 183 """Representation of an event loaded from a json file dictionary.""" 200 184 201 def __init__(self, jd: dict): 185 def __init__(self, jd: dict): 202 """Constructor passed the dictionary of pa 186 """Constructor passed the dictionary of parsed json values.""" 203 187 204 def llx(x: int) -> str: 188 def llx(x: int) -> str: 205 """Convert an int to a string similar to 189 """Convert an int to a string similar to a printf modifier of %#llx.""" 206 return str(x) if x >= 0 and x < 10 else !! 190 return '0' if x == 0 else hex(x) 207 191 208 def fixdesc(s: str) -> str: 192 def fixdesc(s: str) -> str: 209 """Fix formatting issue for the desc str 193 """Fix formatting issue for the desc string.""" 210 if s is None: 194 if s is None: 211 return None 195 return None 212 return removesuffix(removesuffix(removes 196 return removesuffix(removesuffix(removesuffix(s, '. '), 213 '. '), 197 '. '), '.').replace('\n', '\\n').replace( 214 '\" 198 '\"', '\\"').replace('\r', '\\r') 215 199 216 def convert_aggr_mode(aggr_mode: str) -> O 200 def convert_aggr_mode(aggr_mode: str) -> Optional[str]: 217 """Returns the aggr_mode_class enum valu 201 """Returns the aggr_mode_class enum value associated with the JSON string.""" 218 if not aggr_mode: 202 if not aggr_mode: 219 return None 203 return None 220 aggr_mode_to_enum = { 204 aggr_mode_to_enum = { 221 'PerChip': '1', 205 'PerChip': '1', 222 'PerCore': '2', 206 'PerCore': '2', 223 } 207 } 224 return aggr_mode_to_enum[aggr_mode] 208 return aggr_mode_to_enum[aggr_mode] 225 209 226 def convert_metric_constraint(metric_const 210 def convert_metric_constraint(metric_constraint: str) -> Optional[str]: 227 """Returns the metric_event_groups enum 211 """Returns the metric_event_groups enum value associated with the JSON string.""" 228 if not metric_constraint: 212 if not metric_constraint: 229 return None 213 return None 230 metric_constraint_to_enum = { 214 metric_constraint_to_enum = { 231 'NO_GROUP_EVENTS': '1', 215 'NO_GROUP_EVENTS': '1', 232 'NO_GROUP_EVENTS_NMI': '2', 216 'NO_GROUP_EVENTS_NMI': '2', 233 'NO_NMI_WATCHDOG': '2', 217 'NO_NMI_WATCHDOG': '2', 234 'NO_GROUP_EVENTS_SMT': '3', 218 'NO_GROUP_EVENTS_SMT': '3', 235 } 219 } 236 return metric_constraint_to_enum[metric_ 220 return metric_constraint_to_enum[metric_constraint] 237 221 238 def lookup_msr(num: str) -> Optional[str]: 222 def lookup_msr(num: str) -> Optional[str]: 239 """Converts the msr number, or first in 223 """Converts the msr number, or first in a list to the appropriate event field.""" 240 if not num: 224 if not num: 241 return None 225 return None 242 msrmap = { 226 msrmap = { 243 0x3F6: 'ldlat=', 227 0x3F6: 'ldlat=', 244 0x1A6: 'offcore_rsp=', 228 0x1A6: 'offcore_rsp=', 245 0x1A7: 'offcore_rsp=', 229 0x1A7: 'offcore_rsp=', 246 0x3F7: 'frontend=', 230 0x3F7: 'frontend=', 247 } 231 } 248 return msrmap[int(num.split(',', 1)[0], 232 return msrmap[int(num.split(',', 1)[0], 0)] 249 233 250 def real_event(name: str, event: str) -> O 234 def real_event(name: str, event: str) -> Optional[str]: 251 """Convert well known event names to an 235 """Convert well known event names to an event string otherwise use the event argument.""" 252 fixed = { 236 fixed = { 253 'inst_retired.any': 'event=0xc0,peri 237 'inst_retired.any': 'event=0xc0,period=2000003', 254 'inst_retired.any_p': 'event=0xc0,pe 238 'inst_retired.any_p': 'event=0xc0,period=2000003', 255 'cpu_clk_unhalted.ref': 'event=0x0,u 239 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003', 256 'cpu_clk_unhalted.thread': 'event=0x 240 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003', 257 'cpu_clk_unhalted.core': 'event=0x3c 241 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003', 258 'cpu_clk_unhalted.thread_any': 'even 242 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003', 259 } 243 } 260 if not name: 244 if not name: 261 return None 245 return None 262 if name.lower() in fixed: 246 if name.lower() in fixed: 263 return fixed[name.lower()] 247 return fixed[name.lower()] 264 return event 248 return event 265 249 266 def unit_to_pmu(unit: str) -> Optional[str 250 def unit_to_pmu(unit: str) -> Optional[str]: 267 """Convert a JSON Unit to Linux PMU name 251 """Convert a JSON Unit to Linux PMU name.""" 268 if not unit: 252 if not unit: 269 return 'default_core' !! 253 return None 270 # Comment brought over from jevents.c: 254 # Comment brought over from jevents.c: 271 # it's not realistic to keep adding thes 255 # it's not realistic to keep adding these, we need something more scalable ... 272 table = { 256 table = { 273 'CBO': 'uncore_cbox', 257 'CBO': 'uncore_cbox', 274 'QPI LL': 'uncore_qpi', 258 'QPI LL': 'uncore_qpi', 275 'SBO': 'uncore_sbox', 259 'SBO': 'uncore_sbox', 276 'iMPH-U': 'uncore_arb', 260 'iMPH-U': 'uncore_arb', 277 'CPU-M-CF': 'cpum_cf', 261 'CPU-M-CF': 'cpum_cf', 278 'CPU-M-SF': 'cpum_sf', 262 'CPU-M-SF': 'cpum_sf', 279 'PAI-CRYPTO' : 'pai_crypto', 263 'PAI-CRYPTO' : 'pai_crypto', 280 'PAI-EXT' : 'pai_ext', 264 'PAI-EXT' : 'pai_ext', 281 'UPI LL': 'uncore_upi', 265 'UPI LL': 'uncore_upi', 282 'hisi_sicl,cpa': 'hisi_sicl,cpa', 266 'hisi_sicl,cpa': 'hisi_sicl,cpa', 283 'hisi_sccl,ddrc': 'hisi_sccl,ddrc', 267 'hisi_sccl,ddrc': 'hisi_sccl,ddrc', 284 'hisi_sccl,hha': 'hisi_sccl,hha', 268 'hisi_sccl,hha': 'hisi_sccl,hha', 285 'hisi_sccl,l3c': 'hisi_sccl,l3c', 269 'hisi_sccl,l3c': 'hisi_sccl,l3c', 286 'imx8_ddr': 'imx8_ddr', 270 'imx8_ddr': 'imx8_ddr', 287 'imx9_ddr': 'imx9_ddr', << 288 'L3PMC': 'amd_l3', 271 'L3PMC': 'amd_l3', 289 'DFPMC': 'amd_df', 272 'DFPMC': 'amd_df', 290 'UMCPMC': 'amd_umc', << 291 'cpu_core': 'cpu_core', 273 'cpu_core': 'cpu_core', 292 'cpu_atom': 'cpu_atom', 274 'cpu_atom': 'cpu_atom', 293 'ali_drw': 'ali_drw', << 294 'arm_cmn': 'arm_cmn', << 295 } 275 } 296 return table[unit] if unit in table else 276 return table[unit] if unit in table else f'uncore_{unit.lower()}' 297 277 298 def is_zero(val: str) -> bool: << 299 try: << 300 if val.startswith('0x'): << 301 return int(val, 16) == 0 << 302 else: << 303 return int(val) == 0 << 304 except e: << 305 return False << 306 << 307 def canonicalize_value(val: str) -> str: << 308 try: << 309 if val.startswith('0x'): << 310 return llx(int(val, 16)) << 311 return str(int(val)) << 312 except e: << 313 return val << 314 << 315 eventcode = 0 278 eventcode = 0 316 if 'EventCode' in jd: 279 if 'EventCode' in jd: 317 eventcode = int(jd['EventCode'].split(', 280 eventcode = int(jd['EventCode'].split(',', 1)[0], 0) 318 if 'ExtSel' in jd: 281 if 'ExtSel' in jd: 319 eventcode |= int(jd['ExtSel']) << 8 282 eventcode |= int(jd['ExtSel']) << 8 320 configcode = int(jd['ConfigCode'], 0) if ' 283 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None 321 eventidcode = int(jd['EventidCode'], 0) if << 322 self.name = jd['EventName'].lower() if 'Ev 284 self.name = jd['EventName'].lower() if 'EventName' in jd else None 323 self.topic = '' 285 self.topic = '' 324 self.compat = jd.get('Compat') 286 self.compat = jd.get('Compat') 325 self.desc = fixdesc(jd.get('BriefDescripti 287 self.desc = fixdesc(jd.get('BriefDescription')) 326 self.long_desc = fixdesc(jd.get('PublicDes 288 self.long_desc = fixdesc(jd.get('PublicDescription')) 327 precise = jd.get('PEBS') 289 precise = jd.get('PEBS') 328 msr = lookup_msr(jd.get('MSRIndex')) 290 msr = lookup_msr(jd.get('MSRIndex')) 329 msrval = jd.get('MSRValue') 291 msrval = jd.get('MSRValue') 330 extra_desc = '' 292 extra_desc = '' 331 if 'Data_LA' in jd: 293 if 'Data_LA' in jd: 332 extra_desc += ' Supports address when p 294 extra_desc += ' Supports address when precise' 333 if 'Errata' in jd: 295 if 'Errata' in jd: 334 extra_desc += '.' 296 extra_desc += '.' 335 if 'Errata' in jd: 297 if 'Errata' in jd: 336 extra_desc += ' Spec update: ' + jd['Er 298 extra_desc += ' Spec update: ' + jd['Errata'] 337 self.pmu = unit_to_pmu(jd.get('Unit')) 299 self.pmu = unit_to_pmu(jd.get('Unit')) 338 filter = jd.get('Filter') 300 filter = jd.get('Filter') 339 self.unit = jd.get('ScaleUnit') 301 self.unit = jd.get('ScaleUnit') 340 self.perpkg = jd.get('PerPkg') 302 self.perpkg = jd.get('PerPkg') 341 self.aggr_mode = convert_aggr_mode(jd.get( 303 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode')) 342 self.deprecated = jd.get('Deprecated') 304 self.deprecated = jd.get('Deprecated') 343 self.metric_name = jd.get('MetricName') 305 self.metric_name = jd.get('MetricName') 344 self.metric_group = jd.get('MetricGroup') 306 self.metric_group = jd.get('MetricGroup') 345 self.metricgroup_no_group = jd.get('Metric 307 self.metricgroup_no_group = jd.get('MetricgroupNoGroup') 346 self.default_metricgroup_name = jd.get('De << 347 self.event_grouping = convert_metric_const 308 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint')) 348 self.metric_expr = None 309 self.metric_expr = None 349 if 'MetricExpr' in jd: 310 if 'MetricExpr' in jd: 350 self.metric_expr = metric.ParsePerfJson( 311 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify() 351 # Note, the metric formula for the thresho 312 # Note, the metric formula for the threshold isn't parsed as the & 352 # and > have incorrect precedence. 313 # and > have incorrect precedence. 353 self.metric_threshold = jd.get('MetricThre 314 self.metric_threshold = jd.get('MetricThreshold') 354 315 355 arch_std = jd.get('ArchStdEvent') 316 arch_std = jd.get('ArchStdEvent') 356 if precise and self.desc and '(Precise Eve 317 if precise and self.desc and '(Precise Event)' not in self.desc: 357 extra_desc += ' (Must be precise)' if pr 318 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 358 319 'event)') 359 event = None !! 320 event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}' 360 if configcode is not None: << 361 event = f'config={llx(configcode)}' << 362 elif eventidcode is not None: << 363 event = f'eventid={llx(eventidcode)}' << 364 else: << 365 event = f'event={llx(eventcode)}' << 366 event_fields = [ 321 event_fields = [ 367 ('AnyThread', 'any='), 322 ('AnyThread', 'any='), 368 ('PortMask', 'ch_mask='), 323 ('PortMask', 'ch_mask='), 369 ('CounterMask', 'cmask='), 324 ('CounterMask', 'cmask='), 370 ('EdgeDetect', 'edge='), 325 ('EdgeDetect', 'edge='), 371 ('FCMask', 'fc_mask='), 326 ('FCMask', 'fc_mask='), 372 ('Invert', 'inv='), 327 ('Invert', 'inv='), 373 ('SampleAfterValue', 'period='), 328 ('SampleAfterValue', 'period='), 374 ('UMask', 'umask='), 329 ('UMask', 'umask='), 375 ('NodeType', 'type='), << 376 ('RdWrMask', 'rdwrmask='), << 377 ('EnAllCores', 'enallcores='), << 378 ('EnAllSlices', 'enallslices='), << 379 ('SliceId', 'sliceid='), << 380 ('ThreadMask', 'threadmask='), << 381 ] 330 ] 382 for key, value in event_fields: 331 for key, value in event_fields: 383 if key in jd and not is_zero(jd[key]): !! 332 if key in jd and jd[key] != '0': 384 event += f',{value}{canonicalize_value !! 333 event += ',' + value + jd[key] 385 if filter: 334 if filter: 386 event += f',{filter}' 335 event += f',{filter}' 387 if msr: 336 if msr: 388 event += f',{msr}{msrval}' 337 event += f',{msr}{msrval}' 389 if self.desc and extra_desc: 338 if self.desc and extra_desc: 390 self.desc += extra_desc 339 self.desc += extra_desc 391 if self.long_desc and extra_desc: 340 if self.long_desc and extra_desc: 392 self.long_desc += extra_desc 341 self.long_desc += extra_desc 393 if arch_std: !! 342 if self.pmu: 394 if arch_std.lower() in _arch_std_events: !! 343 if self.desc and not self.desc.endswith('. '): 395 event = _arch_std_events[arch_std.lowe !! 344 self.desc += '. ' 396 # Copy from the architecture standard !! 345 self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ') 397 for attr, value in _arch_std_events[ar !! 346 if arch_std and arch_std.lower() in _arch_std_events: 398 if hasattr(self, attr) and not getat !! 347 event = _arch_std_events[arch_std.lower()].event 399 setattr(self, attr, value) !! 348 # Copy from the architecture standard event to self for undefined fields. 400 else: !! 349 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items(): 401 raise argparse.ArgumentTypeError('Cann !! 350 if hasattr(self, attr) and not getattr(self, attr): >> 351 setattr(self, attr, value) 402 352 403 self.event = real_event(self.name, event) 353 self.event = real_event(self.name, event) 404 354 405 def __repr__(self) -> str: 355 def __repr__(self) -> str: 406 """String representation primarily for deb 356 """String representation primarily for debugging.""" 407 s = '{\n' 357 s = '{\n' 408 for attr, value in self.__dict__.items(): 358 for attr, value in self.__dict__.items(): 409 if value: 359 if value: 410 s += f'\t{attr} = {value},\n' 360 s += f'\t{attr} = {value},\n' 411 return s + '}' 361 return s + '}' 412 362 413 def build_c_string(self, metric: bool) -> st 363 def build_c_string(self, metric: bool) -> str: 414 s = '' 364 s = '' 415 for attr in _json_metric_attributes if met 365 for attr in _json_metric_attributes if metric else _json_event_attributes: 416 x = getattr(self, attr) 366 x = getattr(self, attr) 417 if metric and x and attr == 'metric_expr 367 if metric and x and attr == 'metric_expr': 418 # Convert parsed metric expressions in 368 # Convert parsed metric expressions into a string. Slashes 419 # must be doubled in the file. 369 # must be doubled in the file. 420 x = x.ToPerfJson().replace('\\', '\\\\ 370 x = x.ToPerfJson().replace('\\', '\\\\') 421 if metric and x and attr == 'metric_thre 371 if metric and x and attr == 'metric_threshold': 422 x = x.replace('\\', '\\\\') 372 x = x.replace('\\', '\\\\') 423 if attr in _json_enum_attributes: 373 if attr in _json_enum_attributes: 424 s += x if x else '0' 374 s += x if x else '0' 425 else: 375 else: 426 s += f'{x}\\000' if x else '\\000' 376 s += f'{x}\\000' if x else '\\000' 427 return s 377 return s 428 378 429 def to_c_string(self, metric: bool) -> str: 379 def to_c_string(self, metric: bool) -> str: 430 """Representation of the event as a C stru 380 """Representation of the event as a C struct initializer.""" 431 381 432 s = self.build_c_string(metric) 382 s = self.build_c_string(metric) 433 return f'{{ { _bcs.offsets[s] } }}, /* {s} 383 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n' 434 384 435 385 436 @lru_cache(maxsize=None) 386 @lru_cache(maxsize=None) 437 def read_json_events(path: str, topic: str) -> 387 def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]: 438 """Read json events from the specified file. 388 """Read json events from the specified file.""" 439 try: 389 try: 440 events = json.load(open(path), object_hook 390 events = json.load(open(path), object_hook=JsonEvent) 441 except BaseException as err: 391 except BaseException as err: 442 print(f"Exception processing {path}") 392 print(f"Exception processing {path}") 443 raise 393 raise 444 metrics: list[Tuple[str, str, metric.Express !! 394 metrics: list[Tuple[str, metric.Expression]] = [] 445 for event in events: 395 for event in events: 446 event.topic = topic 396 event.topic = topic 447 if event.metric_name and '-' not in event. 397 if event.metric_name and '-' not in event.metric_name: 448 metrics.append((event.pmu, event.metric_ !! 398 metrics.append((event.metric_name, event.metric_expr)) 449 updates = metric.RewriteMetricsInTermsOfOthe 399 updates = metric.RewriteMetricsInTermsOfOthers(metrics) 450 if updates: 400 if updates: 451 for event in events: 401 for event in events: 452 if event.metric_name in updates: 402 if event.metric_name in updates: 453 # print(f'Updated {event.metric_name} 403 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n' 454 # f'to\n"{updates[event.metric_n 404 # f'to\n"{updates[event.metric_name]}"') 455 event.metric_expr = updates[event.metr 405 event.metric_expr = updates[event.metric_name] 456 406 457 return events 407 return events 458 408 459 def preprocess_arch_std_files(archpath: str) - 409 def preprocess_arch_std_files(archpath: str) -> None: 460 """Read in all architecture standard events. 410 """Read in all architecture standard events.""" 461 global _arch_std_events 411 global _arch_std_events 462 for item in os.scandir(archpath): 412 for item in os.scandir(archpath): 463 if item.is_file() and item.name.endswith(' 413 if item.is_file() and item.name.endswith('.json'): 464 for event in read_json_events(item.path, 414 for event in read_json_events(item.path, topic=''): 465 if event.name: 415 if event.name: 466 _arch_std_events[event.name.lower()] 416 _arch_std_events[event.name.lower()] = event 467 if event.metric_name: 417 if event.metric_name: 468 _arch_std_events[event.metric_name.l 418 _arch_std_events[event.metric_name.lower()] = event 469 419 470 420 471 def add_events_table_entries(item: os.DirEntry 421 def add_events_table_entries(item: os.DirEntry, topic: str) -> None: 472 """Add contents of file to _pending_events t 422 """Add contents of file to _pending_events table.""" 473 for e in read_json_events(item.path, topic): 423 for e in read_json_events(item.path, topic): 474 if e.name: 424 if e.name: 475 _pending_events.append(e) 425 _pending_events.append(e) 476 if e.metric_name: 426 if e.metric_name: 477 _pending_metrics.append(e) 427 _pending_metrics.append(e) 478 428 479 429 480 def print_pending_events() -> None: 430 def print_pending_events() -> None: 481 """Optionally close events table.""" 431 """Optionally close events table.""" 482 432 483 def event_cmp_key(j: JsonEvent) -> Tuple[str !! 433 def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]: 484 def fix_none(s: Optional[str]) -> str: 434 def fix_none(s: Optional[str]) -> str: 485 if s is None: 435 if s is None: 486 return '' 436 return '' 487 return s 437 return s 488 438 489 return (fix_none(j.pmu).replace(',','_'), !! 439 return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu), 490 fix_none(j.metric_name)) 440 fix_none(j.metric_name)) 491 441 492 global _pending_events 442 global _pending_events 493 if not _pending_events: 443 if not _pending_events: 494 return 444 return 495 445 496 global _pending_events_tblname 446 global _pending_events_tblname 497 if _pending_events_tblname.endswith('_sys'): 447 if _pending_events_tblname.endswith('_sys'): 498 global _sys_event_tables 448 global _sys_event_tables 499 _sys_event_tables.append(_pending_events_t 449 _sys_event_tables.append(_pending_events_tblname) 500 else: 450 else: 501 global event_tables 451 global event_tables 502 _event_tables.append(_pending_events_tblna 452 _event_tables.append(_pending_events_tblname) 503 453 504 first = True !! 454 _args.output_file.write( 505 last_pmu = None !! 455 f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n') 506 last_name = None << 507 pmus = set() << 508 for event in sorted(_pending_events, key=eve << 509 if last_pmu and last_pmu == event.pmu: << 510 assert event.name != last_name, f"Duplic << 511 if event.pmu != last_pmu: << 512 if not first: << 513 _args.output_file.write('};\n') << 514 pmu_name = event.pmu.replace(',', '_') << 515 _args.output_file.write( << 516 f'static const struct compact_pmu_ev << 517 first = False << 518 last_pmu = event.pmu << 519 pmus.add((event.pmu, pmu_name)) << 520 456 >> 457 for event in sorted(_pending_events, key=event_cmp_key): 521 _args.output_file.write(event.to_c_string( 458 _args.output_file.write(event.to_c_string(metric=False)) 522 last_name = event.name << 523 _pending_events = [] 459 _pending_events = [] 524 460 525 _args.output_file.write(f""" << 526 }}; << 527 << 528 const struct pmu_table_entry {_pending_events_ << 529 """) << 530 for (pmu, tbl_pmu) in sorted(pmus): << 531 pmu_name = f"{pmu}\\000" << 532 _args.output_file.write(f"""{{ << 533 .entries = {_pending_events_tblname}_{tbl << 534 .num_entries = ARRAY_SIZE({_pending_event << 535 .pmu_name = {{ {_bcs.offsets[pmu_name]} / << 536 }}, << 537 """) << 538 _args.output_file.write('};\n\n') 461 _args.output_file.write('};\n\n') 539 462 540 def print_pending_metrics() -> None: 463 def print_pending_metrics() -> None: 541 """Optionally close metrics table.""" 464 """Optionally close metrics table.""" 542 465 543 def metric_cmp_key(j: JsonEvent) -> Tuple[bo 466 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]: 544 def fix_none(s: Optional[str]) -> str: 467 def fix_none(s: Optional[str]) -> str: 545 if s is None: 468 if s is None: 546 return '' 469 return '' 547 return s 470 return s 548 471 549 return (j.desc is not None, fix_none(j.pmu 472 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name)) 550 473 551 global _pending_metrics 474 global _pending_metrics 552 if not _pending_metrics: 475 if not _pending_metrics: 553 return 476 return 554 477 555 global _pending_metrics_tblname 478 global _pending_metrics_tblname 556 if _pending_metrics_tblname.endswith('_sys') 479 if _pending_metrics_tblname.endswith('_sys'): 557 global _sys_metric_tables 480 global _sys_metric_tables 558 _sys_metric_tables.append(_pending_metrics 481 _sys_metric_tables.append(_pending_metrics_tblname) 559 else: 482 else: 560 global metric_tables 483 global metric_tables 561 _metric_tables.append(_pending_metrics_tbl 484 _metric_tables.append(_pending_metrics_tblname) 562 485 563 first = True !! 486 _args.output_file.write( 564 last_pmu = None !! 487 f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n') 565 pmus = set() << 566 for metric in sorted(_pending_metrics, key=m << 567 if metric.pmu != last_pmu: << 568 if not first: << 569 _args.output_file.write('};\n') << 570 pmu_name = metric.pmu.replace(',', '_') << 571 _args.output_file.write( << 572 f'static const struct compact_pmu_ev << 573 first = False << 574 last_pmu = metric.pmu << 575 pmus.add((metric.pmu, pmu_name)) << 576 488 >> 489 for metric in sorted(_pending_metrics, key=metric_cmp_key): 577 _args.output_file.write(metric.to_c_string 490 _args.output_file.write(metric.to_c_string(metric=True)) 578 _pending_metrics = [] 491 _pending_metrics = [] 579 492 580 _args.output_file.write(f""" << 581 }}; << 582 << 583 const struct pmu_table_entry {_pending_metrics << 584 """) << 585 for (pmu, tbl_pmu) in sorted(pmus): << 586 pmu_name = f"{pmu}\\000" << 587 _args.output_file.write(f"""{{ << 588 .entries = {_pending_metrics_tblname}_{tb << 589 .num_entries = ARRAY_SIZE({_pending_metri << 590 .pmu_name = {{ {_bcs.offsets[pmu_name]} / << 591 }}, << 592 """) << 593 _args.output_file.write('};\n\n') 493 _args.output_file.write('};\n\n') 594 494 595 def get_topic(topic: str) -> str: 495 def get_topic(topic: str) -> str: 596 if topic.endswith('metrics.json'): 496 if topic.endswith('metrics.json'): 597 return 'metrics' 497 return 'metrics' 598 return removesuffix(topic, '.json').replace( 498 return removesuffix(topic, '.json').replace('-', ' ') 599 499 600 def preprocess_one_file(parents: Sequence[str] 500 def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 601 501 602 if item.is_dir(): 502 if item.is_dir(): 603 return 503 return 604 504 605 # base dir or too deep 505 # base dir or too deep 606 level = len(parents) 506 level = len(parents) 607 if level == 0 or level > 4: 507 if level == 0 or level > 4: 608 return 508 return 609 509 610 # Ignore other directories. If the file name 510 # Ignore other directories. If the file name does not have a .json 611 # extension, ignore it. It could be a readme 511 # extension, ignore it. It could be a readme.txt for instance. 612 if not item.is_file() or not item.name.endsw 512 if not item.is_file() or not item.name.endswith('.json'): 613 return 513 return 614 514 615 if item.name == 'metricgroups.json': << 616 metricgroup_descriptions = json.load(open( << 617 for mgroup in metricgroup_descriptions: << 618 assert len(mgroup) > 1, parents << 619 description = f"{metricgroup_description << 620 mgroup = f"{mgroup}\\000" << 621 _bcs.add(mgroup, metric=True) << 622 _bcs.add(description, metric=True) << 623 _metricgroups[mgroup] = description << 624 return << 625 << 626 topic = get_topic(item.name) 515 topic = get_topic(item.name) 627 for event in read_json_events(item.path, top 516 for event in read_json_events(item.path, topic): 628 pmu_name = f"{event.pmu}\\000" << 629 if event.name: 517 if event.name: 630 _bcs.add(pmu_name, metric=False) !! 518 _bcs.add(event.build_c_string(metric=False)) 631 _bcs.add(event.build_c_string(metric=Fal << 632 if event.metric_name: 519 if event.metric_name: 633 _bcs.add(pmu_name, metric=True) !! 520 _bcs.add(event.build_c_string(metric=True)) 634 _bcs.add(event.build_c_string(metric=Tru << 635 521 636 def process_one_file(parents: Sequence[str], i 522 def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 637 """Process a JSON file during the main walk. 523 """Process a JSON file during the main walk.""" 638 def is_leaf_dir_ignoring_sys(path: str) -> b !! 524 def is_leaf_dir(path: str) -> bool: 639 for item in os.scandir(path): 525 for item in os.scandir(path): 640 if item.is_dir() and item.name != 'sys': !! 526 if item.is_dir(): 641 return False 527 return False 642 return True 528 return True 643 529 644 # Model directories are leaves (ignoring pos !! 530 # model directory, reset topic 645 # directories). The FTW will walk into the d !! 531 if item.is_dir() and is_leaf_dir(item.path): 646 # pending events and metrics and update the << 647 # model directory. << 648 if item.is_dir() and is_leaf_dir_ignoring_sy << 649 print_pending_events() 532 print_pending_events() 650 print_pending_metrics() 533 print_pending_metrics() 651 534 652 global _pending_events_tblname 535 global _pending_events_tblname 653 _pending_events_tblname = file_name_to_tab 536 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name) 654 global _pending_metrics_tblname 537 global _pending_metrics_tblname 655 _pending_metrics_tblname = file_name_to_ta 538 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name) 656 539 657 if item.name == 'sys': 540 if item.name == 'sys': 658 _sys_event_table_to_metric_table_mapping 541 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname 659 return 542 return 660 543 661 # base dir or too deep 544 # base dir or too deep 662 level = len(parents) 545 level = len(parents) 663 if level == 0 or level > 4: 546 if level == 0 or level > 4: 664 return 547 return 665 548 666 # Ignore other directories. If the file name 549 # Ignore other directories. If the file name does not have a .json 667 # extension, ignore it. It could be a readme 550 # extension, ignore it. It could be a readme.txt for instance. 668 if not item.is_file() or not item.name.endsw !! 551 if not item.is_file() or not item.name.endswith('.json'): 669 return 552 return 670 553 671 add_events_table_entries(item, get_topic(ite 554 add_events_table_entries(item, get_topic(item.name)) 672 555 673 556 674 def print_mapping_table(archs: Sequence[str]) 557 def print_mapping_table(archs: Sequence[str]) -> None: 675 """Read the mapfile and generate the struct 558 """Read the mapfile and generate the struct from cpuid string to event table.""" 676 _args.output_file.write(""" 559 _args.output_file.write(""" 677 /* Struct used to make the PMU event table imp 560 /* Struct used to make the PMU event table implementation opaque to callers. */ 678 struct pmu_events_table { 561 struct pmu_events_table { 679 const struct pmu_table_entry *pmus; !! 562 const struct compact_pmu_event *entries; 680 uint32_t num_pmus; !! 563 size_t length; 681 }; 564 }; 682 565 683 /* Struct used to make the PMU metric table im 566 /* Struct used to make the PMU metric table implementation opaque to callers. */ 684 struct pmu_metrics_table { 567 struct pmu_metrics_table { 685 const struct pmu_table_entry *pmus; !! 568 const struct compact_pmu_event *entries; 686 uint32_t num_pmus; !! 569 size_t length; 687 }; 570 }; 688 571 689 /* 572 /* 690 * Map a CPU to its table of PMU events. The C 573 * Map a CPU to its table of PMU events. The CPU is identified by the 691 * cpuid field, which is an arch-specific iden 574 * cpuid field, which is an arch-specific identifier for the CPU. 692 * The identifier specified in tools/perf/pmu- 575 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile 693 * must match the get_cpuid_str() in tools/per 576 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c) 694 * 577 * 695 * The cpuid can contain any character other 578 * The cpuid can contain any character other than the comma. 696 */ 579 */ 697 struct pmu_events_map { 580 struct pmu_events_map { 698 const char *arch; 581 const char *arch; 699 const char *cpuid; 582 const char *cpuid; 700 struct pmu_events_table event_table; 583 struct pmu_events_table event_table; 701 struct pmu_metrics_table metric_table; 584 struct pmu_metrics_table metric_table; 702 }; 585 }; 703 586 704 /* 587 /* 705 * Global table mapping each known CPU for the 588 * Global table mapping each known CPU for the architecture to its 706 * table of PMU events. 589 * table of PMU events. 707 */ 590 */ 708 const struct pmu_events_map pmu_events_map[] = 591 const struct pmu_events_map pmu_events_map[] = { 709 """) 592 """) 710 for arch in archs: 593 for arch in archs: 711 if arch == 'test': 594 if arch == 'test': 712 _args.output_file.write("""{ 595 _args.output_file.write("""{ 713 \t.arch = "testarch", 596 \t.arch = "testarch", 714 \t.cpuid = "testcpu", 597 \t.cpuid = "testcpu", 715 \t.event_table = { 598 \t.event_table = { 716 \t\t.pmus = pmu_events__test_soc_cpu, !! 599 \t\t.entries = pmu_events__test_soc_cpu, 717 \t\t.num_pmus = ARRAY_SIZE(pmu_events__test_so !! 600 \t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu), 718 \t}, 601 \t}, 719 \t.metric_table = { 602 \t.metric_table = { 720 \t\t.pmus = pmu_metrics__test_soc_cpu, !! 603 \t\t.entries = pmu_metrics__test_soc_cpu, 721 \t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_s !! 604 \t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu), 722 \t} 605 \t} 723 }, 606 }, 724 """) 607 """) 725 else: 608 else: 726 with open(f'{_args.starting_dir}/{arch}/ 609 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile: 727 table = csv.reader(csvfile) 610 table = csv.reader(csvfile) 728 first = True 611 first = True 729 for row in table: 612 for row in table: 730 # Skip the first row or any row begi 613 # Skip the first row or any row beginning with #. 731 if not first and len(row) > 0 and no 614 if not first and len(row) > 0 and not row[0].startswith('#'): 732 event_tblname = file_name_to_table 615 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_')) 733 if event_tblname in _event_tables: 616 if event_tblname in _event_tables: 734 event_size = f'ARRAY_SIZE({event 617 event_size = f'ARRAY_SIZE({event_tblname})' 735 else: 618 else: 736 event_tblname = 'NULL' 619 event_tblname = 'NULL' 737 event_size = '0' 620 event_size = '0' 738 metric_tblname = file_name_to_tabl 621 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_')) 739 if metric_tblname in _metric_table 622 if metric_tblname in _metric_tables: 740 metric_size = f'ARRAY_SIZE({metr 623 metric_size = f'ARRAY_SIZE({metric_tblname})' 741 else: 624 else: 742 metric_tblname = 'NULL' 625 metric_tblname = 'NULL' 743 metric_size = '0' 626 metric_size = '0' 744 if event_size == '0' and metric_si 627 if event_size == '0' and metric_size == '0': 745 continue 628 continue 746 cpuid = row[0].replace('\\', '\\\\ 629 cpuid = row[0].replace('\\', '\\\\') 747 _args.output_file.write(f"""{{ 630 _args.output_file.write(f"""{{ 748 \t.arch = "{arch}", 631 \t.arch = "{arch}", 749 \t.cpuid = "{cpuid}", 632 \t.cpuid = "{cpuid}", 750 \t.event_table = {{ 633 \t.event_table = {{ 751 \t\t.pmus = {event_tblname}, !! 634 \t\t.entries = {event_tblname}, 752 \t\t.num_pmus = {event_size} !! 635 \t\t.length = {event_size} 753 \t}}, 636 \t}}, 754 \t.metric_table = {{ 637 \t.metric_table = {{ 755 \t\t.pmus = {metric_tblname}, !! 638 \t\t.entries = {metric_tblname}, 756 \t\t.num_pmus = {metric_size} !! 639 \t\t.length = {metric_size} 757 \t}} 640 \t}} 758 }}, 641 }}, 759 """) 642 """) 760 first = False 643 first = False 761 644 762 _args.output_file.write("""{ 645 _args.output_file.write("""{ 763 \t.arch = 0, 646 \t.arch = 0, 764 \t.cpuid = 0, 647 \t.cpuid = 0, 765 \t.event_table = { 0, 0 }, 648 \t.event_table = { 0, 0 }, 766 \t.metric_table = { 0, 0 }, 649 \t.metric_table = { 0, 0 }, 767 } 650 } 768 }; 651 }; 769 """) 652 """) 770 653 771 654 772 def print_system_mapping_table() -> None: 655 def print_system_mapping_table() -> None: 773 """C struct mapping table array for tables f 656 """C struct mapping table array for tables from /sys directories.""" 774 _args.output_file.write(""" 657 _args.output_file.write(""" 775 struct pmu_sys_events { 658 struct pmu_sys_events { 776 \tconst char *name; 659 \tconst char *name; 777 \tstruct pmu_events_table event_table; 660 \tstruct pmu_events_table event_table; 778 \tstruct pmu_metrics_table metric_table; 661 \tstruct pmu_metrics_table metric_table; 779 }; 662 }; 780 663 781 static const struct pmu_sys_events pmu_sys_eve 664 static const struct pmu_sys_events pmu_sys_event_tables[] = { 782 """) 665 """) 783 printed_metric_tables = [] 666 printed_metric_tables = [] 784 for tblname in _sys_event_tables: 667 for tblname in _sys_event_tables: 785 _args.output_file.write(f"""\t{{ 668 _args.output_file.write(f"""\t{{ 786 \t\t.event_table = {{ 669 \t\t.event_table = {{ 787 \t\t\t.pmus = {tblname}, !! 670 \t\t\t.entries = {tblname}, 788 \t\t\t.num_pmus = ARRAY_SIZE({tblname}) !! 671 \t\t\t.length = ARRAY_SIZE({tblname}) 789 \t\t}},""") 672 \t\t}},""") 790 metric_tblname = _sys_event_table_to_metri 673 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname] 791 if metric_tblname in _sys_metric_tables: 674 if metric_tblname in _sys_metric_tables: 792 _args.output_file.write(f""" 675 _args.output_file.write(f""" 793 \t\t.metric_table = {{ 676 \t\t.metric_table = {{ 794 \t\t\t.pmus = {metric_tblname}, !! 677 \t\t\t.entries = {metric_tblname}, 795 \t\t\t.num_pmus = ARRAY_SIZE({metric_tblname}) !! 678 \t\t\t.length = ARRAY_SIZE({metric_tblname}) 796 \t\t}},""") 679 \t\t}},""") 797 printed_metric_tables.append(metric_tbln 680 printed_metric_tables.append(metric_tblname) 798 _args.output_file.write(f""" 681 _args.output_file.write(f""" 799 \t\t.name = \"{tblname}\", 682 \t\t.name = \"{tblname}\", 800 \t}}, 683 \t}}, 801 """) 684 """) 802 for tblname in _sys_metric_tables: 685 for tblname in _sys_metric_tables: 803 if tblname in printed_metric_tables: 686 if tblname in printed_metric_tables: 804 continue 687 continue 805 _args.output_file.write(f"""\t{{ 688 _args.output_file.write(f"""\t{{ 806 \t\t.metric_table = {{ 689 \t\t.metric_table = {{ 807 \t\t\t.pmus = {tblname}, !! 690 \t\t\t.entries = {tblname}, 808 \t\t\t.num_pmus = ARRAY_SIZE({tblname}) !! 691 \t\t\t.length = ARRAY_SIZE({tblname}) 809 \t\t}}, 692 \t\t}}, 810 \t\t.name = \"{tblname}\", 693 \t\t.name = \"{tblname}\", 811 \t}}, 694 \t}}, 812 """) 695 """) 813 _args.output_file.write("""\t{ 696 _args.output_file.write("""\t{ 814 \t\t.event_table = { 0, 0 }, 697 \t\t.event_table = { 0, 0 }, 815 \t\t.metric_table = { 0, 0 }, 698 \t\t.metric_table = { 0, 0 }, 816 \t}, 699 \t}, 817 }; 700 }; 818 701 819 static void decompress_event(int offset, struc 702 static void decompress_event(int offset, struct pmu_event *pe) 820 { 703 { 821 \tconst char *p = &big_c_string[offset]; 704 \tconst char *p = &big_c_string[offset]; 822 """) 705 """) 823 for attr in _json_event_attributes: 706 for attr in _json_event_attributes: 824 _args.output_file.write(f'\n\tpe->{attr} = 707 _args.output_file.write(f'\n\tpe->{attr} = ') 825 if attr in _json_enum_attributes: 708 if attr in _json_enum_attributes: 826 _args.output_file.write("*p - '0';\n") 709 _args.output_file.write("*p - '0';\n") 827 else: 710 else: 828 _args.output_file.write("(*p == '\\0' ? 711 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 829 if attr == _json_event_attributes[-1]: 712 if attr == _json_event_attributes[-1]: 830 continue 713 continue 831 if attr in _json_enum_attributes: 714 if attr in _json_enum_attributes: 832 _args.output_file.write('\tp++;') 715 _args.output_file.write('\tp++;') 833 else: 716 else: 834 _args.output_file.write('\twhile (*p++); 717 _args.output_file.write('\twhile (*p++);') 835 _args.output_file.write("""} 718 _args.output_file.write("""} 836 719 837 static void decompress_metric(int offset, stru 720 static void decompress_metric(int offset, struct pmu_metric *pm) 838 { 721 { 839 \tconst char *p = &big_c_string[offset]; 722 \tconst char *p = &big_c_string[offset]; 840 """) 723 """) 841 for attr in _json_metric_attributes: 724 for attr in _json_metric_attributes: 842 _args.output_file.write(f'\n\tpm->{attr} = 725 _args.output_file.write(f'\n\tpm->{attr} = ') 843 if attr in _json_enum_attributes: 726 if attr in _json_enum_attributes: 844 _args.output_file.write("*p - '0';\n") 727 _args.output_file.write("*p - '0';\n") 845 else: 728 else: 846 _args.output_file.write("(*p == '\\0' ? 729 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 847 if attr == _json_metric_attributes[-1]: 730 if attr == _json_metric_attributes[-1]: 848 continue 731 continue 849 if attr in _json_enum_attributes: 732 if attr in _json_enum_attributes: 850 _args.output_file.write('\tp++;') 733 _args.output_file.write('\tp++;') 851 else: 734 else: 852 _args.output_file.write('\twhile (*p++); 735 _args.output_file.write('\twhile (*p++);') 853 _args.output_file.write("""} 736 _args.output_file.write("""} 854 737 855 static int pmu_events_table__for_each_event_pm !! 738 int pmu_events_table_for_each_event(const struct pmu_events_table *table, 856 << 857 << 858 << 859 { << 860 int ret; << 861 struct pmu_event pe = { << 862 .pmu = &big_c_string[pmu->pmu_ << 863 }; << 864 << 865 for (uint32_t i = 0; i < pmu->num_entr << 866 decompress_event(pmu->entries[ << 867 if (!pe.name) << 868 continue; << 869 ret = fn(&pe, table, data); << 870 if (ret) << 871 return ret; << 872 } << 873 return 0; << 874 } << 875 << 876 static int pmu_events_table__find_event_pmu(co << 877 co << 878 co << 879 pm << 880 vo << 881 { << 882 struct pmu_event pe = { << 883 .pmu = &big_c_string[pmu->pmu_ << 884 }; << 885 int low = 0, high = pmu->num_entries - << 886 << 887 while (low <= high) { << 888 int cmp, mid = (low + high) / << 889 << 890 decompress_event(pmu->entries[ << 891 << 892 if (!pe.name && !name) << 893 goto do_call; << 894 << 895 if (!pe.name && name) { << 896 low = mid + 1; << 897 continue; << 898 } << 899 if (pe.name && !name) { << 900 high = mid - 1; << 901 continue; << 902 } << 903 << 904 cmp = strcasecmp(pe.name, name << 905 if (cmp < 0) { << 906 low = mid + 1; << 907 continue; << 908 } << 909 if (cmp > 0) { << 910 high = mid - 1; << 911 continue; << 912 } << 913 do_call: << 914 return fn ? fn(&pe, table, dat << 915 } << 916 return PMU_EVENTS__NOT_FOUND; << 917 } << 918 << 919 int pmu_events_table__for_each_event(const str << 920 struct per << 921 pmu_event_ 739 pmu_event_iter_fn fn, 922 void *data 740 void *data) 923 { 741 { 924 for (size_t i = 0; i < table->num_pmus !! 742 for (size_t i = 0; i < table->length; i++) { 925 const struct pmu_table_entry * !! 743 struct pmu_event pe; 926 const char *pmu_name = &big_c_ << 927 int ret; 744 int ret; 928 745 929 if (pmu && !pmu__name_match(pm !! 746 decompress_event(table->entries[i].offset, &pe); >> 747 if (!pe.name) 930 continue; 748 continue; 931 !! 749 ret = fn(&pe, table, data); 932 ret = pmu_events_table__for_ea !! 750 if (ret) 933 if (pmu || ret) << 934 return ret; 751 return ret; 935 } 752 } 936 return 0; 753 return 0; 937 } 754 } 938 755 939 int pmu_events_table__find_event(const struct !! 756 int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table, 940 struct perf_p !! 757 pmu_metric_iter_fn fn, 941 const char *n !! 758 void *data) 942 pmu_event_ite << 943 void *data) << 944 { 759 { 945 for (size_t i = 0; i < table->num_pmus !! 760 for (size_t i = 0; i < table->length; i++) { 946 const struct pmu_table_entry * !! 761 struct pmu_metric pm; 947 const char *pmu_name = &big_c_ << 948 int ret; 762 int ret; 949 763 950 if (!pmu__name_match(pmu, pmu_ !! 764 decompress_metric(table->entries[i].offset, &pm); 951 continue; << 952 << 953 ret = pmu_events_table__find_e << 954 if (ret != PMU_EVENTS__NOT_FOU << 955 return ret; << 956 } << 957 return PMU_EVENTS__NOT_FOUND; << 958 } << 959 << 960 size_t pmu_events_table__num_events(const stru << 961 struct per << 962 { << 963 size_t count = 0; << 964 << 965 for (size_t i = 0; i < table->num_pmus << 966 const struct pmu_table_entry * << 967 const char *pmu_name = &big_c_ << 968 << 969 if (pmu__name_match(pmu, pmu_n << 970 count += table_pmu->nu << 971 } << 972 return count; << 973 } << 974 << 975 static int pmu_metrics_table__for_each_metric_ << 976 << 977 << 978 << 979 { << 980 int ret; << 981 struct pmu_metric pm = { << 982 .pmu = &big_c_string[pmu->pmu_ << 983 }; << 984 << 985 for (uint32_t i = 0; i < pmu->num_entr << 986 decompress_metric(pmu->entries << 987 if (!pm.metric_expr) 765 if (!pm.metric_expr) 988 continue; 766 continue; 989 ret = fn(&pm, table, data); 767 ret = fn(&pm, table, data); 990 if (ret) 768 if (ret) 991 return ret; 769 return ret; 992 } 770 } 993 return 0; 771 return 0; 994 } 772 } 995 773 996 int pmu_metrics_table__for_each_metric(const s !! 774 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) 997 pmu_metri << 998 void *dat << 999 { << 1000 for (size_t i = 0; i < table->num_pmu << 1001 int ret = pmu_metrics_table__ << 1002 << 1003 << 1004 if (ret) << 1005 return ret; << 1006 } << 1007 return 0; << 1008 } << 1009 << 1010 static const struct pmu_events_map *map_for_p << 1011 { 775 { 1012 static struct { !! 776 const struct pmu_events_table *table = NULL; 1013 const struct pmu_events_map * !! 777 char *cpuid = perf_pmu__getcpuid(pmu); 1014 struct perf_pmu *pmu; !! 778 int i; 1015 } last_result; << 1016 static struct { << 1017 const struct pmu_events_map * << 1018 char *cpuid; << 1019 } last_map_search; << 1020 static bool has_last_result, has_last << 1021 const struct pmu_events_map *map = NU << 1022 char *cpuid = NULL; << 1023 size_t i; << 1024 << 1025 if (has_last_result && last_result.pm << 1026 return last_result.map; << 1027 779 1028 cpuid = perf_pmu__getcpuid(pmu); !! 780 /* on some platforms which uses cpus map, cpuid can be NULL for 1029 << 1030 /* << 1031 * On some platforms which uses cpus << 1032 * PMUs other than CORE PMUs. 781 * PMUs other than CORE PMUs. 1033 */ 782 */ 1034 if (!cpuid) 783 if (!cpuid) 1035 goto out_update_last_result; << 1036 << 1037 if (has_last_map_search && !strcmp(la << 1038 map = last_map_search.map; << 1039 free(cpuid); << 1040 } else { << 1041 i = 0; << 1042 for (;;) { << 1043 map = &pmu_events_map << 1044 << 1045 if (!map->arch) { << 1046 map = NULL; << 1047 break; << 1048 } << 1049 << 1050 if (!strcmp_cpuid_str << 1051 break; << 1052 } << 1053 free(last_map_search.cpuid); << 1054 last_map_search.cpuid = cpuid; << 1055 last_map_search.map = map; << 1056 has_last_map_search = true; << 1057 } << 1058 out_update_last_result: << 1059 last_result.pmu = pmu; << 1060 last_result.map = map; << 1061 has_last_result = true; << 1062 return map; << 1063 } << 1064 << 1065 const struct pmu_events_table *perf_pmu__find << 1066 { << 1067 const struct pmu_events_map *map = ma << 1068 << 1069 if (!map) << 1070 return NULL; 784 return NULL; 1071 785 1072 if (!pmu) !! 786 i = 0; 1073 return &map->event_table; !! 787 for (;;) { 1074 !! 788 const struct pmu_events_map *map = &pmu_events_map[i++]; 1075 for (size_t i = 0; i < map->event_tab !! 789 if (!map->arch) 1076 const struct pmu_table_entry !! 790 break; 1077 const char *pmu_name = &big_c !! 791 1078 !! 792 if (!strcmp_cpuid_str(map->cpuid, cpuid)) { 1079 if (pmu__name_match(pmu, pmu_ !! 793 table = &map->event_table; 1080 return &map->event_t !! 794 break; >> 795 } 1081 } 796 } 1082 return NULL; !! 797 free(cpuid); >> 798 return table; 1083 } 799 } 1084 800 1085 const struct pmu_metrics_table *perf_pmu__fin 801 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu) 1086 { 802 { 1087 const struct pmu_events_map *map = ma !! 803 const struct pmu_metrics_table *table = NULL; >> 804 char *cpuid = perf_pmu__getcpuid(pmu); >> 805 int i; 1088 806 1089 if (!map) !! 807 /* on some platforms which uses cpus map, cpuid can be NULL for >> 808 * PMUs other than CORE PMUs. >> 809 */ >> 810 if (!cpuid) 1090 return NULL; 811 return NULL; 1091 812 1092 if (!pmu) !! 813 i = 0; 1093 return &map->metric_table; !! 814 for (;;) { 1094 !! 815 const struct pmu_events_map *map = &pmu_events_map[i++]; 1095 for (size_t i = 0; i < map->metric_ta !! 816 if (!map->arch) 1096 const struct pmu_table_entry !! 817 break; 1097 const char *pmu_name = &big_c !! 818 1098 !! 819 if (!strcmp_cpuid_str(map->cpuid, cpuid)) { 1099 if (pmu__name_match(pmu, pmu_ !! 820 table = &map->metric_table; 1100 return &map->metri !! 821 break; >> 822 } 1101 } 823 } 1102 return NULL; !! 824 free(cpuid); >> 825 return table; 1103 } 826 } 1104 827 1105 const struct pmu_events_table *find_core_even 828 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid) 1106 { 829 { 1107 for (const struct pmu_events_map *tab 830 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1108 tables->arch; 831 tables->arch; 1109 tables++) { 832 tables++) { 1110 if (!strcmp(tables->arch, arc 833 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1111 return &tables->event 834 return &tables->event_table; 1112 } 835 } 1113 return NULL; 836 return NULL; 1114 } 837 } 1115 838 1116 const struct pmu_metrics_table *find_core_met 839 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid) 1117 { 840 { 1118 for (const struct pmu_events_map *tab 841 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1119 tables->arch; 842 tables->arch; 1120 tables++) { 843 tables++) { 1121 if (!strcmp(tables->arch, arc 844 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1122 return &tables->metri 845 return &tables->metric_table; 1123 } 846 } 1124 return NULL; 847 return NULL; 1125 } 848 } 1126 849 1127 int pmu_for_each_core_event(pmu_event_iter_fn 850 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data) 1128 { 851 { 1129 for (const struct pmu_events_map *tab 852 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1130 tables->arch; 853 tables->arch; 1131 tables++) { 854 tables++) { 1132 int ret = pmu_events_table__f !! 855 int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data); 1133 << 1134 856 1135 if (ret) 857 if (ret) 1136 return ret; 858 return ret; 1137 } 859 } 1138 return 0; 860 return 0; 1139 } 861 } 1140 862 1141 int pmu_for_each_core_metric(pmu_metric_iter_ 863 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data) 1142 { 864 { 1143 for (const struct pmu_events_map *tab 865 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1144 tables->arch; 866 tables->arch; 1145 tables++) { 867 tables++) { 1146 int ret = pmu_metrics_table__ !! 868 int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data); 1147 869 1148 if (ret) 870 if (ret) 1149 return ret; 871 return ret; 1150 } 872 } 1151 return 0; 873 return 0; 1152 } 874 } 1153 875 1154 const struct pmu_events_table *find_sys_event 876 const struct pmu_events_table *find_sys_events_table(const char *name) 1155 { 877 { 1156 for (const struct pmu_sys_events *tab 878 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1157 tables->name; 879 tables->name; 1158 tables++) { 880 tables++) { 1159 if (!strcmp(tables->name, nam 881 if (!strcmp(tables->name, name)) 1160 return &tables->event 882 return &tables->event_table; 1161 } 883 } 1162 return NULL; 884 return NULL; 1163 } 885 } 1164 886 1165 int pmu_for_each_sys_event(pmu_event_iter_fn 887 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data) 1166 { 888 { 1167 for (const struct pmu_sys_events *tab 889 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1168 tables->name; 890 tables->name; 1169 tables++) { 891 tables++) { 1170 int ret = pmu_events_table__f !! 892 int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data); 1171 << 1172 893 1173 if (ret) 894 if (ret) 1174 return ret; 895 return ret; 1175 } 896 } 1176 return 0; 897 return 0; 1177 } 898 } 1178 899 1179 int pmu_for_each_sys_metric(pmu_metric_iter_f 900 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data) 1180 { 901 { 1181 for (const struct pmu_sys_events *tab 902 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1182 tables->name; 903 tables->name; 1183 tables++) { 904 tables++) { 1184 int ret = pmu_metrics_table__ !! 905 int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data); 1185 906 1186 if (ret) 907 if (ret) 1187 return ret; 908 return ret; 1188 } 909 } 1189 return 0; 910 return 0; 1190 } 911 } 1191 """) 912 """) 1192 913 1193 def print_metricgroups() -> None: << 1194 _args.output_file.write(""" << 1195 static const int metricgroups[][2] = { << 1196 """) << 1197 for mgroup in sorted(_metricgroups): << 1198 description = _metricgroups[mgroup] << 1199 _args.output_file.write( << 1200 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs. << 1201 ) << 1202 _args.output_file.write(""" << 1203 }; << 1204 << 1205 const char *describe_metricgroup(const char * << 1206 { << 1207 int low = 0, high = (int)ARRAY_SIZE(m << 1208 << 1209 while (low <= high) { << 1210 int mid = (low + high) / 2; << 1211 const char *mgroup = &big_c_s << 1212 int cmp = strcmp(mgroup, grou << 1213 << 1214 if (cmp == 0) { << 1215 return &big_c_string[ << 1216 } else if (cmp < 0) { << 1217 low = mid + 1; << 1218 } else { << 1219 high = mid - 1; << 1220 } << 1221 } << 1222 return NULL; << 1223 } << 1224 """) << 1225 914 1226 def main() -> None: 915 def main() -> None: 1227 global _args 916 global _args 1228 917 1229 def dir_path(path: str) -> str: 918 def dir_path(path: str) -> str: 1230 """Validate path is a directory for argpa 919 """Validate path is a directory for argparse.""" 1231 if os.path.isdir(path): 920 if os.path.isdir(path): 1232 return path 921 return path 1233 raise argparse.ArgumentTypeError(f'\'{pat 922 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory') 1234 923 1235 def ftw(path: str, parents: Sequence[str], 924 def ftw(path: str, parents: Sequence[str], 1236 action: Callable[[Sequence[str], os 925 action: Callable[[Sequence[str], os.DirEntry], None]) -> None: 1237 """Replicate the directory/file walking b 926 """Replicate the directory/file walking behavior of C's file tree walk.""" 1238 for item in sorted(os.scandir(path), key= 927 for item in sorted(os.scandir(path), key=lambda e: e.name): 1239 if _args.model != 'all' and item.is_dir 928 if _args.model != 'all' and item.is_dir(): 1240 # Check if the model matches one in _ 929 # Check if the model matches one in _args.model. 1241 if len(parents) == _args.model.split( 930 if len(parents) == _args.model.split(',')[0].count('/'): 1242 # We're testing the correct directo 931 # We're testing the correct directory. 1243 item_path = '/'.join(parents) + ('/ 932 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name 1244 if 'test' not in item_path and item 933 if 'test' not in item_path and item_path not in _args.model.split(','): 1245 continue 934 continue 1246 action(parents, item) 935 action(parents, item) 1247 if item.is_dir(): 936 if item.is_dir(): 1248 ftw(item.path, parents + [item.name], 937 ftw(item.path, parents + [item.name], action) 1249 938 1250 ap = argparse.ArgumentParser() 939 ap = argparse.ArgumentParser() 1251 ap.add_argument('arch', help='Architecture 940 ap.add_argument('arch', help='Architecture name like x86') 1252 ap.add_argument('model', help='''Select a m 941 ap.add_argument('model', help='''Select a model such as skylake to 1253 reduce the code size. Normally set to "all". 942 reduce the code size. Normally set to "all". For architectures like 1254 ARM64 with an implementor/model, the model mu 943 ARM64 with an implementor/model, the model must include the implementor 1255 such as "arm/cortex-a34".''', 944 such as "arm/cortex-a34".''', 1256 default='all') 945 default='all') 1257 ap.add_argument( 946 ap.add_argument( 1258 'starting_dir', 947 'starting_dir', 1259 type=dir_path, 948 type=dir_path, 1260 help='Root of tree containing architect 949 help='Root of tree containing architecture directories containing json files' 1261 ) 950 ) 1262 ap.add_argument( 951 ap.add_argument( 1263 'output_file', type=argparse.FileType(' 952 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout) 1264 _args = ap.parse_args() 953 _args = ap.parse_args() 1265 954 1266 _args.output_file.write(f""" << 1267 /* SPDX-License-Identifier: GPL-2.0 */ << 1268 /* THIS FILE WAS AUTOGENERATED BY jevents.py << 1269 """) << 1270 _args.output_file.write(""" 955 _args.output_file.write(""" 1271 #include <pmu-events/pmu-events.h> !! 956 #include "pmu-events/pmu-events.h" 1272 #include "util/header.h" 957 #include "util/header.h" 1273 #include "util/pmu.h" 958 #include "util/pmu.h" 1274 #include <string.h> 959 #include <string.h> 1275 #include <stddef.h> 960 #include <stddef.h> 1276 961 1277 struct compact_pmu_event { 962 struct compact_pmu_event { 1278 int offset; !! 963 int offset; 1279 }; << 1280 << 1281 struct pmu_table_entry { << 1282 const struct compact_pmu_event *entri << 1283 uint32_t num_entries; << 1284 struct compact_pmu_event pmu_name; << 1285 }; 964 }; 1286 965 1287 """) 966 """) 1288 archs = [] 967 archs = [] 1289 for item in os.scandir(_args.starting_dir): 968 for item in os.scandir(_args.starting_dir): 1290 if not item.is_dir(): 969 if not item.is_dir(): 1291 continue 970 continue 1292 if item.name == _args.arch or _args.arch 971 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test': 1293 archs.append(item.name) 972 archs.append(item.name) 1294 973 1295 if len(archs) < 2 and _args.arch != 'none': !! 974 if len(archs) < 2: 1296 raise IOError(f'Missing architecture dire 975 raise IOError(f'Missing architecture directory \'{_args.arch}\'') 1297 976 1298 archs.sort() 977 archs.sort() 1299 for arch in archs: 978 for arch in archs: 1300 arch_path = f'{_args.starting_dir}/{arch} 979 arch_path = f'{_args.starting_dir}/{arch}' 1301 preprocess_arch_std_files(arch_path) 980 preprocess_arch_std_files(arch_path) 1302 ftw(arch_path, [], preprocess_one_file) 981 ftw(arch_path, [], preprocess_one_file) 1303 982 1304 _bcs.compute() 983 _bcs.compute() 1305 _args.output_file.write('static const char 984 _args.output_file.write('static const char *const big_c_string =\n') 1306 for s in _bcs.big_string: 985 for s in _bcs.big_string: 1307 _args.output_file.write(s) 986 _args.output_file.write(s) 1308 _args.output_file.write(';\n\n') 987 _args.output_file.write(';\n\n') 1309 for arch in archs: 988 for arch in archs: 1310 arch_path = f'{_args.starting_dir}/{arch} 989 arch_path = f'{_args.starting_dir}/{arch}' 1311 ftw(arch_path, [], process_one_file) 990 ftw(arch_path, [], process_one_file) 1312 print_pending_events() 991 print_pending_events() 1313 print_pending_metrics() 992 print_pending_metrics() 1314 993 1315 print_mapping_table(archs) 994 print_mapping_table(archs) 1316 print_system_mapping_table() 995 print_system_mapping_table() 1317 print_metricgroups() !! 996 1318 997 1319 if __name__ == '__main__': 998 if __name__ == '__main__': 1320 main() 999 main()
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.