1 #!/usr/bin/env python3 1 #!/usr/bin/env python3 2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2- 2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 3 """Convert directories of JSON events to C cod 3 """Convert directories of JSON events to C code.""" 4 import argparse 4 import argparse 5 import csv 5 import csv 6 from functools import lru_cache 6 from functools import lru_cache 7 import json 7 import json 8 import metric 8 import metric 9 import os 9 import os 10 import sys 10 import sys 11 from typing import (Callable, Dict, Optional, 11 from typing import (Callable, Dict, Optional, Sequence, Set, Tuple) 12 import collections 12 import collections 13 13 14 # Global command line arguments. 14 # Global command line arguments. 15 _args = None 15 _args = None 16 # List of regular event tables. 16 # List of regular event tables. 17 _event_tables = [] 17 _event_tables = [] 18 # List of event tables generated from "/sys" d 18 # List of event tables generated from "/sys" directories. 19 _sys_event_tables = [] 19 _sys_event_tables = [] 20 # List of regular metric tables. 20 # List of regular metric tables. 21 _metric_tables = [] 21 _metric_tables = [] 22 # List of metric tables generated from "/sys" 22 # List of metric tables generated from "/sys" directories. 23 _sys_metric_tables = [] 23 _sys_metric_tables = [] 24 # Mapping between sys event table names and sy 24 # Mapping between sys event table names and sys metric table names. 25 _sys_event_table_to_metric_table_mapping = {} 25 _sys_event_table_to_metric_table_mapping = {} 26 # Map from an event name to an architecture st 26 # Map from an event name to an architecture standard 27 # JsonEvent. Architecture standard events are 27 # JsonEvent. Architecture standard events are in json files in the top 28 # f'{_args.starting_dir}/{_args.arch}' directo 28 # f'{_args.starting_dir}/{_args.arch}' directory. 29 _arch_std_events = {} 29 _arch_std_events = {} 30 # Events to write out when the table is closed 30 # Events to write out when the table is closed 31 _pending_events = [] 31 _pending_events = [] 32 # Name of events table to be written out 32 # Name of events table to be written out 33 _pending_events_tblname = None 33 _pending_events_tblname = None 34 # Metrics to write out when the table is close 34 # Metrics to write out when the table is closed 35 _pending_metrics = [] 35 _pending_metrics = [] 36 # Name of metrics table to be written out 36 # Name of metrics table to be written out 37 _pending_metrics_tblname = None 37 _pending_metrics_tblname = None 38 # Global BigCString shared by all structures. 38 # Global BigCString shared by all structures. 39 _bcs = None 39 _bcs = None 40 # Map from the name of a metric group to a des 40 # Map from the name of a metric group to a description of the group. 41 _metricgroups = {} 41 _metricgroups = {} 42 # Order specific JsonEvent attributes will be 42 # Order specific JsonEvent attributes will be visited. 43 _json_event_attributes = [ 43 _json_event_attributes = [ 44 # cmp_sevent related attributes. 44 # cmp_sevent related attributes. 45 'name', 'topic', 'desc', 45 'name', 'topic', 'desc', 46 # Seems useful, put it early. 46 # Seems useful, put it early. 47 'event', 47 'event', 48 # Short things in alphabetical order. 48 # Short things in alphabetical order. 49 'compat', 'deprecated', 'perpkg', 'unit', 49 'compat', 'deprecated', 'perpkg', 'unit', 50 # Longer things (the last won't be iterate 50 # Longer things (the last won't be iterated over during decompress). 51 'long_desc' 51 'long_desc' 52 ] 52 ] 53 53 54 # Attributes that are in pmu_metric rather tha 54 # Attributes that are in pmu_metric rather than pmu_event. 55 _json_metric_attributes = [ 55 _json_metric_attributes = [ 56 'metric_name', 'metric_group', 'metric_exp 56 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 57 'desc', 'long_desc', 'unit', 'compat', 'me 57 'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group', 58 'default_metricgroup_name', 'aggr_mode', ' 58 'default_metricgroup_name', 'aggr_mode', 'event_grouping' 59 ] 59 ] 60 # Attributes that are bools or enum int values 60 # Attributes that are bools or enum int values, encoded as '0', '1',... 61 _json_enum_attributes = ['aggr_mode', 'depreca 61 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg'] 62 62 63 def removesuffix(s: str, suffix: str) -> str: 63 def removesuffix(s: str, suffix: str) -> str: 64 """Remove the suffix from a string 64 """Remove the suffix from a string 65 65 66 The removesuffix function is added to str in 66 The removesuffix function is added to str in Python 3.9. We aim for 3.6 67 compatibility and so provide our own functio 67 compatibility and so provide our own function here. 68 """ 68 """ 69 return s[0:-len(suffix)] if s.endswith(suffi 69 return s[0:-len(suffix)] if s.endswith(suffix) else s 70 70 71 71 72 def file_name_to_table_name(prefix: str, paren 72 def file_name_to_table_name(prefix: str, parents: Sequence[str], 73 dirname: str) -> s 73 dirname: str) -> str: 74 """Generate a C table name from directory na 74 """Generate a C table name from directory names.""" 75 tblname = prefix 75 tblname = prefix 76 for p in parents: 76 for p in parents: 77 tblname += '_' + p 77 tblname += '_' + p 78 tblname += '_' + dirname 78 tblname += '_' + dirname 79 return tblname.replace('-', '_') 79 return tblname.replace('-', '_') 80 80 81 81 82 def c_len(s: str) -> int: 82 def c_len(s: str) -> int: 83 """Return the length of s a C string 83 """Return the length of s a C string 84 84 85 This doesn't handle all escape characters pr 85 This doesn't handle all escape characters properly. It first assumes 86 all \\ are for escaping, it then adjusts as 86 all \\ are for escaping, it then adjusts as it will have over counted 87 \\. The code uses \000 rather than \0 as a t 87 \\. The code uses \000 rather than \0 as a terminator as an adjacent 88 number would be folded into a string of \0 ( 88 number would be folded into a string of \0 (ie. "\0" + "5" doesn't 89 equal a terminator followed by the number 5 89 equal a terminator followed by the number 5 but the escape of 90 \05). The code adjusts for \000 but not prop 90 \05). The code adjusts for \000 but not properly for all octal, hex 91 or unicode values. 91 or unicode values. 92 """ 92 """ 93 try: 93 try: 94 utf = s.encode(encoding='utf-8',errors='st 94 utf = s.encode(encoding='utf-8',errors='strict') 95 except: 95 except: 96 print(f'broken string {s}') 96 print(f'broken string {s}') 97 raise 97 raise 98 return len(utf) - utf.count(b'\\') + utf.cou 98 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2) 99 99 100 class BigCString: 100 class BigCString: 101 """A class to hold many strings concatenated 101 """A class to hold many strings concatenated together. 102 102 103 Generating a large number of stand-alone C s 103 Generating a large number of stand-alone C strings creates a large 104 number of relocations in position independen 104 number of relocations in position independent code. The BigCString 105 is a helper for this case. It builds a singl 105 is a helper for this case. It builds a single string which within it 106 are all the other C strings (to avoid memory 106 are all the other C strings (to avoid memory issues the string 107 itself is held as a list of strings). The of 107 itself is held as a list of strings). The offsets within the big 108 string are recorded and when stored to disk 108 string are recorded and when stored to disk these don't need 109 relocation. To reduce the size of the string 109 relocation. To reduce the size of the string further, identical 110 strings are merged. If a longer string ends- 110 strings are merged. If a longer string ends-with the same value as a 111 shorter string, these entries are also merge 111 shorter string, these entries are also merged. 112 """ 112 """ 113 strings: Set[str] 113 strings: Set[str] 114 big_string: Sequence[str] 114 big_string: Sequence[str] 115 offsets: Dict[str, int] 115 offsets: Dict[str, int] 116 insert_number: int 116 insert_number: int 117 insert_point: Dict[str, int] 117 insert_point: Dict[str, int] 118 metrics: Set[str] 118 metrics: Set[str] 119 119 120 def __init__(self): 120 def __init__(self): 121 self.strings = set() 121 self.strings = set() 122 self.insert_number = 0; 122 self.insert_number = 0; 123 self.insert_point = {} 123 self.insert_point = {} 124 self.metrics = set() 124 self.metrics = set() 125 125 126 def add(self, s: str, metric: bool) -> None: 126 def add(self, s: str, metric: bool) -> None: 127 """Called to add to the big string.""" 127 """Called to add to the big string.""" 128 if s not in self.strings: 128 if s not in self.strings: 129 self.strings.add(s) 129 self.strings.add(s) 130 self.insert_point[s] = self.insert_numbe 130 self.insert_point[s] = self.insert_number 131 self.insert_number += 1 131 self.insert_number += 1 132 if metric: 132 if metric: 133 self.metrics.add(s) 133 self.metrics.add(s) 134 134 135 def compute(self) -> None: 135 def compute(self) -> None: 136 """Called once all strings are added to co 136 """Called once all strings are added to compute the string and offsets.""" 137 137 138 folded_strings = {} 138 folded_strings = {} 139 # Determine if two strings can be folded, 139 # Determine if two strings can be folded, ie. let 1 string use the 140 # end of another. First reverse all string 140 # end of another. First reverse all strings and sort them. 141 sorted_reversed_strings = sorted([x[::-1] 141 sorted_reversed_strings = sorted([x[::-1] for x in self.strings]) 142 142 143 # Strings 'xyz' and 'yz' will now be [ 'zy 143 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward 144 # for each string to see if there is a bet 144 # for each string to see if there is a better candidate to fold it 145 # into, in the example rather than using ' 145 # into, in the example rather than using 'yz' we can use'xyz' at 146 # an offset of 1. We record which string c 146 # an offset of 1. We record which string can be folded into which 147 # in folded_strings, we don't need to reco 147 # in folded_strings, we don't need to record the offset as it is 148 # trivially computed from the string lengt 148 # trivially computed from the string lengths. 149 for pos,s in enumerate(sorted_reversed_str 149 for pos,s in enumerate(sorted_reversed_strings): 150 best_pos = pos 150 best_pos = pos 151 for check_pos in range(pos + 1, len(sort 151 for check_pos in range(pos + 1, len(sorted_reversed_strings)): 152 if sorted_reversed_strings[check_pos]. 152 if sorted_reversed_strings[check_pos].startswith(s): 153 best_pos = check_pos 153 best_pos = check_pos 154 else: 154 else: 155 break 155 break 156 if pos != best_pos: 156 if pos != best_pos: 157 folded_strings[s[::-1]] = sorted_rever 157 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1] 158 158 159 # Compute reverse mappings for debugging. 159 # Compute reverse mappings for debugging. 160 fold_into_strings = collections.defaultdic 160 fold_into_strings = collections.defaultdict(set) 161 for key, val in folded_strings.items(): 161 for key, val in folded_strings.items(): 162 if key != val: 162 if key != val: 163 fold_into_strings[val].add(key) 163 fold_into_strings[val].add(key) 164 164 165 # big_string_offset is the current locatio 165 # big_string_offset is the current location within the C string 166 # being appended to - comments, etc. don't 166 # being appended to - comments, etc. don't count. big_string is 167 # the string contents represented as a lis 167 # the string contents represented as a list. Strings are immutable 168 # in Python and so appending to one causes 168 # in Python and so appending to one causes memory issues, while 169 # lists are mutable. 169 # lists are mutable. 170 big_string_offset = 0 170 big_string_offset = 0 171 self.big_string = [] 171 self.big_string = [] 172 self.offsets = {} 172 self.offsets = {} 173 173 174 def string_cmp_key(s: str) -> Tuple[bool, 174 def string_cmp_key(s: str) -> Tuple[bool, int, str]: 175 return (s in self.metrics, self.insert_p 175 return (s in self.metrics, self.insert_point[s], s) 176 176 177 # Emit all strings that aren't folded in a 177 # Emit all strings that aren't folded in a sorted manner. 178 for s in sorted(self.strings, key=string_c 178 for s in sorted(self.strings, key=string_cmp_key): 179 if s not in folded_strings: 179 if s not in folded_strings: 180 self.offsets[s] = big_string_offset 180 self.offsets[s] = big_string_offset 181 self.big_string.append(f'/* offset={bi 181 self.big_string.append(f'/* offset={big_string_offset} */ "') 182 self.big_string.append(s) 182 self.big_string.append(s) 183 self.big_string.append('"') 183 self.big_string.append('"') 184 if s in fold_into_strings: 184 if s in fold_into_strings: 185 self.big_string.append(' /* also: ' 185 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */') 186 self.big_string.append('\n') 186 self.big_string.append('\n') 187 big_string_offset += c_len(s) 187 big_string_offset += c_len(s) 188 continue 188 continue 189 189 190 # Compute the offsets of the folded string 190 # Compute the offsets of the folded strings. 191 for s in folded_strings.keys(): 191 for s in folded_strings.keys(): 192 assert s not in self.offsets 192 assert s not in self.offsets 193 folded_s = folded_strings[s] 193 folded_s = folded_strings[s] 194 self.offsets[s] = self.offsets[folded_s] 194 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s) 195 195 196 _bcs = BigCString() 196 _bcs = BigCString() 197 197 198 class JsonEvent: 198 class JsonEvent: 199 """Representation of an event loaded from a 199 """Representation of an event loaded from a json file dictionary.""" 200 200 201 def __init__(self, jd: dict): 201 def __init__(self, jd: dict): 202 """Constructor passed the dictionary of pa 202 """Constructor passed the dictionary of parsed json values.""" 203 203 204 def llx(x: int) -> str: 204 def llx(x: int) -> str: 205 """Convert an int to a string similar to 205 """Convert an int to a string similar to a printf modifier of %#llx.""" 206 return str(x) if x >= 0 and x < 10 else 206 return str(x) if x >= 0 and x < 10 else hex(x) 207 207 208 def fixdesc(s: str) -> str: 208 def fixdesc(s: str) -> str: 209 """Fix formatting issue for the desc str 209 """Fix formatting issue for the desc string.""" 210 if s is None: 210 if s is None: 211 return None 211 return None 212 return removesuffix(removesuffix(removes 212 return removesuffix(removesuffix(removesuffix(s, '. '), 213 '. '), 213 '. '), '.').replace('\n', '\\n').replace( 214 '\" 214 '\"', '\\"').replace('\r', '\\r') 215 215 216 def convert_aggr_mode(aggr_mode: str) -> O 216 def convert_aggr_mode(aggr_mode: str) -> Optional[str]: 217 """Returns the aggr_mode_class enum valu 217 """Returns the aggr_mode_class enum value associated with the JSON string.""" 218 if not aggr_mode: 218 if not aggr_mode: 219 return None 219 return None 220 aggr_mode_to_enum = { 220 aggr_mode_to_enum = { 221 'PerChip': '1', 221 'PerChip': '1', 222 'PerCore': '2', 222 'PerCore': '2', 223 } 223 } 224 return aggr_mode_to_enum[aggr_mode] 224 return aggr_mode_to_enum[aggr_mode] 225 225 226 def convert_metric_constraint(metric_const 226 def convert_metric_constraint(metric_constraint: str) -> Optional[str]: 227 """Returns the metric_event_groups enum 227 """Returns the metric_event_groups enum value associated with the JSON string.""" 228 if not metric_constraint: 228 if not metric_constraint: 229 return None 229 return None 230 metric_constraint_to_enum = { 230 metric_constraint_to_enum = { 231 'NO_GROUP_EVENTS': '1', 231 'NO_GROUP_EVENTS': '1', 232 'NO_GROUP_EVENTS_NMI': '2', 232 'NO_GROUP_EVENTS_NMI': '2', 233 'NO_NMI_WATCHDOG': '2', 233 'NO_NMI_WATCHDOG': '2', 234 'NO_GROUP_EVENTS_SMT': '3', 234 'NO_GROUP_EVENTS_SMT': '3', 235 } 235 } 236 return metric_constraint_to_enum[metric_ 236 return metric_constraint_to_enum[metric_constraint] 237 237 238 def lookup_msr(num: str) -> Optional[str]: 238 def lookup_msr(num: str) -> Optional[str]: 239 """Converts the msr number, or first in 239 """Converts the msr number, or first in a list to the appropriate event field.""" 240 if not num: 240 if not num: 241 return None 241 return None 242 msrmap = { 242 msrmap = { 243 0x3F6: 'ldlat=', 243 0x3F6: 'ldlat=', 244 0x1A6: 'offcore_rsp=', 244 0x1A6: 'offcore_rsp=', 245 0x1A7: 'offcore_rsp=', 245 0x1A7: 'offcore_rsp=', 246 0x3F7: 'frontend=', 246 0x3F7: 'frontend=', 247 } 247 } 248 return msrmap[int(num.split(',', 1)[0], 248 return msrmap[int(num.split(',', 1)[0], 0)] 249 249 250 def real_event(name: str, event: str) -> O 250 def real_event(name: str, event: str) -> Optional[str]: 251 """Convert well known event names to an 251 """Convert well known event names to an event string otherwise use the event argument.""" 252 fixed = { 252 fixed = { 253 'inst_retired.any': 'event=0xc0,peri 253 'inst_retired.any': 'event=0xc0,period=2000003', 254 'inst_retired.any_p': 'event=0xc0,pe 254 'inst_retired.any_p': 'event=0xc0,period=2000003', 255 'cpu_clk_unhalted.ref': 'event=0x0,u 255 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003', 256 'cpu_clk_unhalted.thread': 'event=0x 256 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003', 257 'cpu_clk_unhalted.core': 'event=0x3c 257 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003', 258 'cpu_clk_unhalted.thread_any': 'even 258 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003', 259 } 259 } 260 if not name: 260 if not name: 261 return None 261 return None 262 if name.lower() in fixed: 262 if name.lower() in fixed: 263 return fixed[name.lower()] 263 return fixed[name.lower()] 264 return event 264 return event 265 265 266 def unit_to_pmu(unit: str) -> Optional[str 266 def unit_to_pmu(unit: str) -> Optional[str]: 267 """Convert a JSON Unit to Linux PMU name 267 """Convert a JSON Unit to Linux PMU name.""" 268 if not unit: 268 if not unit: 269 return 'default_core' 269 return 'default_core' 270 # Comment brought over from jevents.c: 270 # Comment brought over from jevents.c: 271 # it's not realistic to keep adding thes 271 # it's not realistic to keep adding these, we need something more scalable ... 272 table = { 272 table = { 273 'CBO': 'uncore_cbox', 273 'CBO': 'uncore_cbox', 274 'QPI LL': 'uncore_qpi', 274 'QPI LL': 'uncore_qpi', 275 'SBO': 'uncore_sbox', 275 'SBO': 'uncore_sbox', 276 'iMPH-U': 'uncore_arb', 276 'iMPH-U': 'uncore_arb', 277 'CPU-M-CF': 'cpum_cf', 277 'CPU-M-CF': 'cpum_cf', 278 'CPU-M-SF': 'cpum_sf', 278 'CPU-M-SF': 'cpum_sf', 279 'PAI-CRYPTO' : 'pai_crypto', 279 'PAI-CRYPTO' : 'pai_crypto', 280 'PAI-EXT' : 'pai_ext', 280 'PAI-EXT' : 'pai_ext', 281 'UPI LL': 'uncore_upi', 281 'UPI LL': 'uncore_upi', 282 'hisi_sicl,cpa': 'hisi_sicl,cpa', 282 'hisi_sicl,cpa': 'hisi_sicl,cpa', 283 'hisi_sccl,ddrc': 'hisi_sccl,ddrc', 283 'hisi_sccl,ddrc': 'hisi_sccl,ddrc', 284 'hisi_sccl,hha': 'hisi_sccl,hha', 284 'hisi_sccl,hha': 'hisi_sccl,hha', 285 'hisi_sccl,l3c': 'hisi_sccl,l3c', 285 'hisi_sccl,l3c': 'hisi_sccl,l3c', 286 'imx8_ddr': 'imx8_ddr', 286 'imx8_ddr': 'imx8_ddr', 287 'imx9_ddr': 'imx9_ddr', 287 'imx9_ddr': 'imx9_ddr', 288 'L3PMC': 'amd_l3', 288 'L3PMC': 'amd_l3', 289 'DFPMC': 'amd_df', 289 'DFPMC': 'amd_df', 290 'UMCPMC': 'amd_umc', 290 'UMCPMC': 'amd_umc', 291 'cpu_core': 'cpu_core', 291 'cpu_core': 'cpu_core', 292 'cpu_atom': 'cpu_atom', 292 'cpu_atom': 'cpu_atom', 293 'ali_drw': 'ali_drw', 293 'ali_drw': 'ali_drw', 294 'arm_cmn': 'arm_cmn', 294 'arm_cmn': 'arm_cmn', 295 } 295 } 296 return table[unit] if unit in table else 296 return table[unit] if unit in table else f'uncore_{unit.lower()}' 297 297 298 def is_zero(val: str) -> bool: 298 def is_zero(val: str) -> bool: 299 try: 299 try: 300 if val.startswith('0x'): 300 if val.startswith('0x'): 301 return int(val, 16) == 0 301 return int(val, 16) == 0 302 else: 302 else: 303 return int(val) == 0 303 return int(val) == 0 304 except e: 304 except e: 305 return False 305 return False 306 306 307 def canonicalize_value(val: str) -> str: 307 def canonicalize_value(val: str) -> str: 308 try: 308 try: 309 if val.startswith('0x'): 309 if val.startswith('0x'): 310 return llx(int(val, 16)) 310 return llx(int(val, 16)) 311 return str(int(val)) 311 return str(int(val)) 312 except e: 312 except e: 313 return val 313 return val 314 314 315 eventcode = 0 315 eventcode = 0 316 if 'EventCode' in jd: 316 if 'EventCode' in jd: 317 eventcode = int(jd['EventCode'].split(', 317 eventcode = int(jd['EventCode'].split(',', 1)[0], 0) 318 if 'ExtSel' in jd: 318 if 'ExtSel' in jd: 319 eventcode |= int(jd['ExtSel']) << 8 319 eventcode |= int(jd['ExtSel']) << 8 320 configcode = int(jd['ConfigCode'], 0) if ' 320 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None 321 eventidcode = int(jd['EventidCode'], 0) if 321 eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None 322 self.name = jd['EventName'].lower() if 'Ev 322 self.name = jd['EventName'].lower() if 'EventName' in jd else None 323 self.topic = '' 323 self.topic = '' 324 self.compat = jd.get('Compat') 324 self.compat = jd.get('Compat') 325 self.desc = fixdesc(jd.get('BriefDescripti 325 self.desc = fixdesc(jd.get('BriefDescription')) 326 self.long_desc = fixdesc(jd.get('PublicDes 326 self.long_desc = fixdesc(jd.get('PublicDescription')) 327 precise = jd.get('PEBS') 327 precise = jd.get('PEBS') 328 msr = lookup_msr(jd.get('MSRIndex')) 328 msr = lookup_msr(jd.get('MSRIndex')) 329 msrval = jd.get('MSRValue') 329 msrval = jd.get('MSRValue') 330 extra_desc = '' 330 extra_desc = '' 331 if 'Data_LA' in jd: 331 if 'Data_LA' in jd: 332 extra_desc += ' Supports address when p 332 extra_desc += ' Supports address when precise' 333 if 'Errata' in jd: 333 if 'Errata' in jd: 334 extra_desc += '.' 334 extra_desc += '.' 335 if 'Errata' in jd: 335 if 'Errata' in jd: 336 extra_desc += ' Spec update: ' + jd['Er 336 extra_desc += ' Spec update: ' + jd['Errata'] 337 self.pmu = unit_to_pmu(jd.get('Unit')) 337 self.pmu = unit_to_pmu(jd.get('Unit')) 338 filter = jd.get('Filter') 338 filter = jd.get('Filter') 339 self.unit = jd.get('ScaleUnit') 339 self.unit = jd.get('ScaleUnit') 340 self.perpkg = jd.get('PerPkg') 340 self.perpkg = jd.get('PerPkg') 341 self.aggr_mode = convert_aggr_mode(jd.get( 341 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode')) 342 self.deprecated = jd.get('Deprecated') 342 self.deprecated = jd.get('Deprecated') 343 self.metric_name = jd.get('MetricName') 343 self.metric_name = jd.get('MetricName') 344 self.metric_group = jd.get('MetricGroup') 344 self.metric_group = jd.get('MetricGroup') 345 self.metricgroup_no_group = jd.get('Metric 345 self.metricgroup_no_group = jd.get('MetricgroupNoGroup') 346 self.default_metricgroup_name = jd.get('De 346 self.default_metricgroup_name = jd.get('DefaultMetricgroupName') 347 self.event_grouping = convert_metric_const 347 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint')) 348 self.metric_expr = None 348 self.metric_expr = None 349 if 'MetricExpr' in jd: 349 if 'MetricExpr' in jd: 350 self.metric_expr = metric.ParsePerfJson( 350 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify() 351 # Note, the metric formula for the thresho 351 # Note, the metric formula for the threshold isn't parsed as the & 352 # and > have incorrect precedence. 352 # and > have incorrect precedence. 353 self.metric_threshold = jd.get('MetricThre 353 self.metric_threshold = jd.get('MetricThreshold') 354 354 355 arch_std = jd.get('ArchStdEvent') 355 arch_std = jd.get('ArchStdEvent') 356 if precise and self.desc and '(Precise Eve 356 if precise and self.desc and '(Precise Event)' not in self.desc: 357 extra_desc += ' (Must be precise)' if pr 357 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 358 358 'event)') 359 event = None 359 event = None 360 if configcode is not None: 360 if configcode is not None: 361 event = f'config={llx(configcode)}' 361 event = f'config={llx(configcode)}' 362 elif eventidcode is not None: 362 elif eventidcode is not None: 363 event = f'eventid={llx(eventidcode)}' 363 event = f'eventid={llx(eventidcode)}' 364 else: 364 else: 365 event = f'event={llx(eventcode)}' 365 event = f'event={llx(eventcode)}' 366 event_fields = [ 366 event_fields = [ 367 ('AnyThread', 'any='), 367 ('AnyThread', 'any='), 368 ('PortMask', 'ch_mask='), 368 ('PortMask', 'ch_mask='), 369 ('CounterMask', 'cmask='), 369 ('CounterMask', 'cmask='), 370 ('EdgeDetect', 'edge='), 370 ('EdgeDetect', 'edge='), 371 ('FCMask', 'fc_mask='), 371 ('FCMask', 'fc_mask='), 372 ('Invert', 'inv='), 372 ('Invert', 'inv='), 373 ('SampleAfterValue', 'period='), 373 ('SampleAfterValue', 'period='), 374 ('UMask', 'umask='), 374 ('UMask', 'umask='), 375 ('NodeType', 'type='), 375 ('NodeType', 'type='), 376 ('RdWrMask', 'rdwrmask='), 376 ('RdWrMask', 'rdwrmask='), 377 ('EnAllCores', 'enallcores='), 377 ('EnAllCores', 'enallcores='), 378 ('EnAllSlices', 'enallslices='), 378 ('EnAllSlices', 'enallslices='), 379 ('SliceId', 'sliceid='), 379 ('SliceId', 'sliceid='), 380 ('ThreadMask', 'threadmask='), 380 ('ThreadMask', 'threadmask='), 381 ] 381 ] 382 for key, value in event_fields: 382 for key, value in event_fields: 383 if key in jd and not is_zero(jd[key]): 383 if key in jd and not is_zero(jd[key]): 384 event += f',{value}{canonicalize_value 384 event += f',{value}{canonicalize_value(jd[key])}' 385 if filter: 385 if filter: 386 event += f',{filter}' 386 event += f',{filter}' 387 if msr: 387 if msr: 388 event += f',{msr}{msrval}' 388 event += f',{msr}{msrval}' 389 if self.desc and extra_desc: 389 if self.desc and extra_desc: 390 self.desc += extra_desc 390 self.desc += extra_desc 391 if self.long_desc and extra_desc: 391 if self.long_desc and extra_desc: 392 self.long_desc += extra_desc 392 self.long_desc += extra_desc 393 if arch_std: 393 if arch_std: 394 if arch_std.lower() in _arch_std_events: 394 if arch_std.lower() in _arch_std_events: 395 event = _arch_std_events[arch_std.lowe 395 event = _arch_std_events[arch_std.lower()].event 396 # Copy from the architecture standard 396 # Copy from the architecture standard event to self for undefined fields. 397 for attr, value in _arch_std_events[ar 397 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items(): 398 if hasattr(self, attr) and not getat 398 if hasattr(self, attr) and not getattr(self, attr): 399 setattr(self, attr, value) 399 setattr(self, attr, value) 400 else: 400 else: 401 raise argparse.ArgumentTypeError('Cann 401 raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std) 402 402 403 self.event = real_event(self.name, event) 403 self.event = real_event(self.name, event) 404 404 405 def __repr__(self) -> str: 405 def __repr__(self) -> str: 406 """String representation primarily for deb 406 """String representation primarily for debugging.""" 407 s = '{\n' 407 s = '{\n' 408 for attr, value in self.__dict__.items(): 408 for attr, value in self.__dict__.items(): 409 if value: 409 if value: 410 s += f'\t{attr} = {value},\n' 410 s += f'\t{attr} = {value},\n' 411 return s + '}' 411 return s + '}' 412 412 413 def build_c_string(self, metric: bool) -> st 413 def build_c_string(self, metric: bool) -> str: 414 s = '' 414 s = '' 415 for attr in _json_metric_attributes if met 415 for attr in _json_metric_attributes if metric else _json_event_attributes: 416 x = getattr(self, attr) 416 x = getattr(self, attr) 417 if metric and x and attr == 'metric_expr 417 if metric and x and attr == 'metric_expr': 418 # Convert parsed metric expressions in 418 # Convert parsed metric expressions into a string. Slashes 419 # must be doubled in the file. 419 # must be doubled in the file. 420 x = x.ToPerfJson().replace('\\', '\\\\ 420 x = x.ToPerfJson().replace('\\', '\\\\') 421 if metric and x and attr == 'metric_thre 421 if metric and x and attr == 'metric_threshold': 422 x = x.replace('\\', '\\\\') 422 x = x.replace('\\', '\\\\') 423 if attr in _json_enum_attributes: 423 if attr in _json_enum_attributes: 424 s += x if x else '0' 424 s += x if x else '0' 425 else: 425 else: 426 s += f'{x}\\000' if x else '\\000' 426 s += f'{x}\\000' if x else '\\000' 427 return s 427 return s 428 428 429 def to_c_string(self, metric: bool) -> str: 429 def to_c_string(self, metric: bool) -> str: 430 """Representation of the event as a C stru 430 """Representation of the event as a C struct initializer.""" 431 431 432 s = self.build_c_string(metric) 432 s = self.build_c_string(metric) 433 return f'{{ { _bcs.offsets[s] } }}, /* {s} 433 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n' 434 434 435 435 436 @lru_cache(maxsize=None) 436 @lru_cache(maxsize=None) 437 def read_json_events(path: str, topic: str) -> 437 def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]: 438 """Read json events from the specified file. 438 """Read json events from the specified file.""" 439 try: 439 try: 440 events = json.load(open(path), object_hook 440 events = json.load(open(path), object_hook=JsonEvent) 441 except BaseException as err: 441 except BaseException as err: 442 print(f"Exception processing {path}") 442 print(f"Exception processing {path}") 443 raise 443 raise 444 metrics: list[Tuple[str, str, metric.Express 444 metrics: list[Tuple[str, str, metric.Expression]] = [] 445 for event in events: 445 for event in events: 446 event.topic = topic 446 event.topic = topic 447 if event.metric_name and '-' not in event. 447 if event.metric_name and '-' not in event.metric_name: 448 metrics.append((event.pmu, event.metric_ 448 metrics.append((event.pmu, event.metric_name, event.metric_expr)) 449 updates = metric.RewriteMetricsInTermsOfOthe 449 updates = metric.RewriteMetricsInTermsOfOthers(metrics) 450 if updates: 450 if updates: 451 for event in events: 451 for event in events: 452 if event.metric_name in updates: 452 if event.metric_name in updates: 453 # print(f'Updated {event.metric_name} 453 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n' 454 # f'to\n"{updates[event.metric_n 454 # f'to\n"{updates[event.metric_name]}"') 455 event.metric_expr = updates[event.metr 455 event.metric_expr = updates[event.metric_name] 456 456 457 return events 457 return events 458 458 459 def preprocess_arch_std_files(archpath: str) - 459 def preprocess_arch_std_files(archpath: str) -> None: 460 """Read in all architecture standard events. 460 """Read in all architecture standard events.""" 461 global _arch_std_events 461 global _arch_std_events 462 for item in os.scandir(archpath): 462 for item in os.scandir(archpath): 463 if item.is_file() and item.name.endswith(' 463 if item.is_file() and item.name.endswith('.json'): 464 for event in read_json_events(item.path, 464 for event in read_json_events(item.path, topic=''): 465 if event.name: 465 if event.name: 466 _arch_std_events[event.name.lower()] 466 _arch_std_events[event.name.lower()] = event 467 if event.metric_name: 467 if event.metric_name: 468 _arch_std_events[event.metric_name.l 468 _arch_std_events[event.metric_name.lower()] = event 469 469 470 470 471 def add_events_table_entries(item: os.DirEntry 471 def add_events_table_entries(item: os.DirEntry, topic: str) -> None: 472 """Add contents of file to _pending_events t 472 """Add contents of file to _pending_events table.""" 473 for e in read_json_events(item.path, topic): 473 for e in read_json_events(item.path, topic): 474 if e.name: 474 if e.name: 475 _pending_events.append(e) 475 _pending_events.append(e) 476 if e.metric_name: 476 if e.metric_name: 477 _pending_metrics.append(e) 477 _pending_metrics.append(e) 478 478 479 479 480 def print_pending_events() -> None: 480 def print_pending_events() -> None: 481 """Optionally close events table.""" 481 """Optionally close events table.""" 482 482 483 def event_cmp_key(j: JsonEvent) -> Tuple[str 483 def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]: 484 def fix_none(s: Optional[str]) -> str: 484 def fix_none(s: Optional[str]) -> str: 485 if s is None: 485 if s is None: 486 return '' 486 return '' 487 return s 487 return s 488 488 489 return (fix_none(j.pmu).replace(',','_'), 489 return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic), 490 fix_none(j.metric_name)) 490 fix_none(j.metric_name)) 491 491 492 global _pending_events 492 global _pending_events 493 if not _pending_events: 493 if not _pending_events: 494 return 494 return 495 495 496 global _pending_events_tblname 496 global _pending_events_tblname 497 if _pending_events_tblname.endswith('_sys'): 497 if _pending_events_tblname.endswith('_sys'): 498 global _sys_event_tables 498 global _sys_event_tables 499 _sys_event_tables.append(_pending_events_t 499 _sys_event_tables.append(_pending_events_tblname) 500 else: 500 else: 501 global event_tables 501 global event_tables 502 _event_tables.append(_pending_events_tblna 502 _event_tables.append(_pending_events_tblname) 503 503 504 first = True 504 first = True 505 last_pmu = None 505 last_pmu = None 506 last_name = None << 507 pmus = set() 506 pmus = set() 508 for event in sorted(_pending_events, key=eve 507 for event in sorted(_pending_events, key=event_cmp_key): 509 if last_pmu and last_pmu == event.pmu: << 510 assert event.name != last_name, f"Duplic << 511 if event.pmu != last_pmu: 508 if event.pmu != last_pmu: 512 if not first: 509 if not first: 513 _args.output_file.write('};\n') 510 _args.output_file.write('};\n') 514 pmu_name = event.pmu.replace(',', '_') 511 pmu_name = event.pmu.replace(',', '_') 515 _args.output_file.write( 512 _args.output_file.write( 516 f'static const struct compact_pmu_ev 513 f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n') 517 first = False 514 first = False 518 last_pmu = event.pmu 515 last_pmu = event.pmu 519 pmus.add((event.pmu, pmu_name)) 516 pmus.add((event.pmu, pmu_name)) 520 517 521 _args.output_file.write(event.to_c_string( 518 _args.output_file.write(event.to_c_string(metric=False)) 522 last_name = event.name << 523 _pending_events = [] 519 _pending_events = [] 524 520 525 _args.output_file.write(f""" 521 _args.output_file.write(f""" 526 }}; 522 }}; 527 523 528 const struct pmu_table_entry {_pending_events_ 524 const struct pmu_table_entry {_pending_events_tblname}[] = {{ 529 """) 525 """) 530 for (pmu, tbl_pmu) in sorted(pmus): 526 for (pmu, tbl_pmu) in sorted(pmus): 531 pmu_name = f"{pmu}\\000" 527 pmu_name = f"{pmu}\\000" 532 _args.output_file.write(f"""{{ 528 _args.output_file.write(f"""{{ 533 .entries = {_pending_events_tblname}_{tbl 529 .entries = {_pending_events_tblname}_{tbl_pmu}, 534 .num_entries = ARRAY_SIZE({_pending_event 530 .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}), 535 .pmu_name = {{ {_bcs.offsets[pmu_name]} / 531 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }}, 536 }}, 532 }}, 537 """) 533 """) 538 _args.output_file.write('};\n\n') 534 _args.output_file.write('};\n\n') 539 535 540 def print_pending_metrics() -> None: 536 def print_pending_metrics() -> None: 541 """Optionally close metrics table.""" 537 """Optionally close metrics table.""" 542 538 543 def metric_cmp_key(j: JsonEvent) -> Tuple[bo 539 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]: 544 def fix_none(s: Optional[str]) -> str: 540 def fix_none(s: Optional[str]) -> str: 545 if s is None: 541 if s is None: 546 return '' 542 return '' 547 return s 543 return s 548 544 549 return (j.desc is not None, fix_none(j.pmu 545 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name)) 550 546 551 global _pending_metrics 547 global _pending_metrics 552 if not _pending_metrics: 548 if not _pending_metrics: 553 return 549 return 554 550 555 global _pending_metrics_tblname 551 global _pending_metrics_tblname 556 if _pending_metrics_tblname.endswith('_sys') 552 if _pending_metrics_tblname.endswith('_sys'): 557 global _sys_metric_tables 553 global _sys_metric_tables 558 _sys_metric_tables.append(_pending_metrics 554 _sys_metric_tables.append(_pending_metrics_tblname) 559 else: 555 else: 560 global metric_tables 556 global metric_tables 561 _metric_tables.append(_pending_metrics_tbl 557 _metric_tables.append(_pending_metrics_tblname) 562 558 563 first = True 559 first = True 564 last_pmu = None 560 last_pmu = None 565 pmus = set() 561 pmus = set() 566 for metric in sorted(_pending_metrics, key=m 562 for metric in sorted(_pending_metrics, key=metric_cmp_key): 567 if metric.pmu != last_pmu: 563 if metric.pmu != last_pmu: 568 if not first: 564 if not first: 569 _args.output_file.write('};\n') 565 _args.output_file.write('};\n') 570 pmu_name = metric.pmu.replace(',', '_') 566 pmu_name = metric.pmu.replace(',', '_') 571 _args.output_file.write( 567 _args.output_file.write( 572 f'static const struct compact_pmu_ev 568 f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n') 573 first = False 569 first = False 574 last_pmu = metric.pmu 570 last_pmu = metric.pmu 575 pmus.add((metric.pmu, pmu_name)) 571 pmus.add((metric.pmu, pmu_name)) 576 572 577 _args.output_file.write(metric.to_c_string 573 _args.output_file.write(metric.to_c_string(metric=True)) 578 _pending_metrics = [] 574 _pending_metrics = [] 579 575 580 _args.output_file.write(f""" 576 _args.output_file.write(f""" 581 }}; 577 }}; 582 578 583 const struct pmu_table_entry {_pending_metrics 579 const struct pmu_table_entry {_pending_metrics_tblname}[] = {{ 584 """) 580 """) 585 for (pmu, tbl_pmu) in sorted(pmus): 581 for (pmu, tbl_pmu) in sorted(pmus): 586 pmu_name = f"{pmu}\\000" 582 pmu_name = f"{pmu}\\000" 587 _args.output_file.write(f"""{{ 583 _args.output_file.write(f"""{{ 588 .entries = {_pending_metrics_tblname}_{tb 584 .entries = {_pending_metrics_tblname}_{tbl_pmu}, 589 .num_entries = ARRAY_SIZE({_pending_metri 585 .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}), 590 .pmu_name = {{ {_bcs.offsets[pmu_name]} / 586 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }}, 591 }}, 587 }}, 592 """) 588 """) 593 _args.output_file.write('};\n\n') 589 _args.output_file.write('};\n\n') 594 590 595 def get_topic(topic: str) -> str: 591 def get_topic(topic: str) -> str: 596 if topic.endswith('metrics.json'): 592 if topic.endswith('metrics.json'): 597 return 'metrics' 593 return 'metrics' 598 return removesuffix(topic, '.json').replace( 594 return removesuffix(topic, '.json').replace('-', ' ') 599 595 600 def preprocess_one_file(parents: Sequence[str] 596 def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 601 597 602 if item.is_dir(): 598 if item.is_dir(): 603 return 599 return 604 600 605 # base dir or too deep 601 # base dir or too deep 606 level = len(parents) 602 level = len(parents) 607 if level == 0 or level > 4: 603 if level == 0 or level > 4: 608 return 604 return 609 605 610 # Ignore other directories. If the file name 606 # Ignore other directories. If the file name does not have a .json 611 # extension, ignore it. It could be a readme 607 # extension, ignore it. It could be a readme.txt for instance. 612 if not item.is_file() or not item.name.endsw 608 if not item.is_file() or not item.name.endswith('.json'): 613 return 609 return 614 610 615 if item.name == 'metricgroups.json': 611 if item.name == 'metricgroups.json': 616 metricgroup_descriptions = json.load(open( 612 metricgroup_descriptions = json.load(open(item.path)) 617 for mgroup in metricgroup_descriptions: 613 for mgroup in metricgroup_descriptions: 618 assert len(mgroup) > 1, parents 614 assert len(mgroup) > 1, parents 619 description = f"{metricgroup_description 615 description = f"{metricgroup_descriptions[mgroup]}\\000" 620 mgroup = f"{mgroup}\\000" 616 mgroup = f"{mgroup}\\000" 621 _bcs.add(mgroup, metric=True) 617 _bcs.add(mgroup, metric=True) 622 _bcs.add(description, metric=True) 618 _bcs.add(description, metric=True) 623 _metricgroups[mgroup] = description 619 _metricgroups[mgroup] = description 624 return 620 return 625 621 626 topic = get_topic(item.name) 622 topic = get_topic(item.name) 627 for event in read_json_events(item.path, top 623 for event in read_json_events(item.path, topic): 628 pmu_name = f"{event.pmu}\\000" 624 pmu_name = f"{event.pmu}\\000" 629 if event.name: 625 if event.name: 630 _bcs.add(pmu_name, metric=False) 626 _bcs.add(pmu_name, metric=False) 631 _bcs.add(event.build_c_string(metric=Fal 627 _bcs.add(event.build_c_string(metric=False), metric=False) 632 if event.metric_name: 628 if event.metric_name: 633 _bcs.add(pmu_name, metric=True) 629 _bcs.add(pmu_name, metric=True) 634 _bcs.add(event.build_c_string(metric=Tru 630 _bcs.add(event.build_c_string(metric=True), metric=True) 635 631 636 def process_one_file(parents: Sequence[str], i 632 def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 637 """Process a JSON file during the main walk. 633 """Process a JSON file during the main walk.""" 638 def is_leaf_dir_ignoring_sys(path: str) -> b !! 634 def is_leaf_dir(path: str) -> bool: 639 for item in os.scandir(path): 635 for item in os.scandir(path): 640 if item.is_dir() and item.name != 'sys': !! 636 if item.is_dir(): 641 return False 637 return False 642 return True 638 return True 643 639 644 # Model directories are leaves (ignoring pos !! 640 # model directory, reset topic 645 # directories). The FTW will walk into the d !! 641 if item.is_dir() and is_leaf_dir(item.path): 646 # pending events and metrics and update the << 647 # model directory. << 648 if item.is_dir() and is_leaf_dir_ignoring_sy << 649 print_pending_events() 642 print_pending_events() 650 print_pending_metrics() 643 print_pending_metrics() 651 644 652 global _pending_events_tblname 645 global _pending_events_tblname 653 _pending_events_tblname = file_name_to_tab 646 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name) 654 global _pending_metrics_tblname 647 global _pending_metrics_tblname 655 _pending_metrics_tblname = file_name_to_ta 648 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name) 656 649 657 if item.name == 'sys': 650 if item.name == 'sys': 658 _sys_event_table_to_metric_table_mapping 651 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname 659 return 652 return 660 653 661 # base dir or too deep 654 # base dir or too deep 662 level = len(parents) 655 level = len(parents) 663 if level == 0 or level > 4: 656 if level == 0 or level > 4: 664 return 657 return 665 658 666 # Ignore other directories. If the file name 659 # Ignore other directories. If the file name does not have a .json 667 # extension, ignore it. It could be a readme 660 # extension, ignore it. It could be a readme.txt for instance. 668 if not item.is_file() or not item.name.endsw 661 if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json': 669 return 662 return 670 663 671 add_events_table_entries(item, get_topic(ite 664 add_events_table_entries(item, get_topic(item.name)) 672 665 673 666 674 def print_mapping_table(archs: Sequence[str]) 667 def print_mapping_table(archs: Sequence[str]) -> None: 675 """Read the mapfile and generate the struct 668 """Read the mapfile and generate the struct from cpuid string to event table.""" 676 _args.output_file.write(""" 669 _args.output_file.write(""" 677 /* Struct used to make the PMU event table imp 670 /* Struct used to make the PMU event table implementation opaque to callers. */ 678 struct pmu_events_table { 671 struct pmu_events_table { 679 const struct pmu_table_entry *pmus; 672 const struct pmu_table_entry *pmus; 680 uint32_t num_pmus; 673 uint32_t num_pmus; 681 }; 674 }; 682 675 683 /* Struct used to make the PMU metric table im 676 /* Struct used to make the PMU metric table implementation opaque to callers. */ 684 struct pmu_metrics_table { 677 struct pmu_metrics_table { 685 const struct pmu_table_entry *pmus; 678 const struct pmu_table_entry *pmus; 686 uint32_t num_pmus; 679 uint32_t num_pmus; 687 }; 680 }; 688 681 689 /* 682 /* 690 * Map a CPU to its table of PMU events. The C 683 * Map a CPU to its table of PMU events. The CPU is identified by the 691 * cpuid field, which is an arch-specific iden 684 * cpuid field, which is an arch-specific identifier for the CPU. 692 * The identifier specified in tools/perf/pmu- 685 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile 693 * must match the get_cpuid_str() in tools/per 686 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c) 694 * 687 * 695 * The cpuid can contain any character other 688 * The cpuid can contain any character other than the comma. 696 */ 689 */ 697 struct pmu_events_map { 690 struct pmu_events_map { 698 const char *arch; 691 const char *arch; 699 const char *cpuid; 692 const char *cpuid; 700 struct pmu_events_table event_table; 693 struct pmu_events_table event_table; 701 struct pmu_metrics_table metric_table; 694 struct pmu_metrics_table metric_table; 702 }; 695 }; 703 696 704 /* 697 /* 705 * Global table mapping each known CPU for the 698 * Global table mapping each known CPU for the architecture to its 706 * table of PMU events. 699 * table of PMU events. 707 */ 700 */ 708 const struct pmu_events_map pmu_events_map[] = 701 const struct pmu_events_map pmu_events_map[] = { 709 """) 702 """) 710 for arch in archs: 703 for arch in archs: 711 if arch == 'test': 704 if arch == 'test': 712 _args.output_file.write("""{ 705 _args.output_file.write("""{ 713 \t.arch = "testarch", 706 \t.arch = "testarch", 714 \t.cpuid = "testcpu", 707 \t.cpuid = "testcpu", 715 \t.event_table = { 708 \t.event_table = { 716 \t\t.pmus = pmu_events__test_soc_cpu, 709 \t\t.pmus = pmu_events__test_soc_cpu, 717 \t\t.num_pmus = ARRAY_SIZE(pmu_events__test_so 710 \t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu), 718 \t}, 711 \t}, 719 \t.metric_table = { 712 \t.metric_table = { 720 \t\t.pmus = pmu_metrics__test_soc_cpu, 713 \t\t.pmus = pmu_metrics__test_soc_cpu, 721 \t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_s 714 \t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu), 722 \t} 715 \t} 723 }, 716 }, 724 """) 717 """) 725 else: 718 else: 726 with open(f'{_args.starting_dir}/{arch}/ 719 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile: 727 table = csv.reader(csvfile) 720 table = csv.reader(csvfile) 728 first = True 721 first = True 729 for row in table: 722 for row in table: 730 # Skip the first row or any row begi 723 # Skip the first row or any row beginning with #. 731 if not first and len(row) > 0 and no 724 if not first and len(row) > 0 and not row[0].startswith('#'): 732 event_tblname = file_name_to_table 725 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_')) 733 if event_tblname in _event_tables: 726 if event_tblname in _event_tables: 734 event_size = f'ARRAY_SIZE({event 727 event_size = f'ARRAY_SIZE({event_tblname})' 735 else: 728 else: 736 event_tblname = 'NULL' 729 event_tblname = 'NULL' 737 event_size = '0' 730 event_size = '0' 738 metric_tblname = file_name_to_tabl 731 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_')) 739 if metric_tblname in _metric_table 732 if metric_tblname in _metric_tables: 740 metric_size = f'ARRAY_SIZE({metr 733 metric_size = f'ARRAY_SIZE({metric_tblname})' 741 else: 734 else: 742 metric_tblname = 'NULL' 735 metric_tblname = 'NULL' 743 metric_size = '0' 736 metric_size = '0' 744 if event_size == '0' and metric_si 737 if event_size == '0' and metric_size == '0': 745 continue 738 continue 746 cpuid = row[0].replace('\\', '\\\\ 739 cpuid = row[0].replace('\\', '\\\\') 747 _args.output_file.write(f"""{{ 740 _args.output_file.write(f"""{{ 748 \t.arch = "{arch}", 741 \t.arch = "{arch}", 749 \t.cpuid = "{cpuid}", 742 \t.cpuid = "{cpuid}", 750 \t.event_table = {{ 743 \t.event_table = {{ 751 \t\t.pmus = {event_tblname}, 744 \t\t.pmus = {event_tblname}, 752 \t\t.num_pmus = {event_size} 745 \t\t.num_pmus = {event_size} 753 \t}}, 746 \t}}, 754 \t.metric_table = {{ 747 \t.metric_table = {{ 755 \t\t.pmus = {metric_tblname}, 748 \t\t.pmus = {metric_tblname}, 756 \t\t.num_pmus = {metric_size} 749 \t\t.num_pmus = {metric_size} 757 \t}} 750 \t}} 758 }}, 751 }}, 759 """) 752 """) 760 first = False 753 first = False 761 754 762 _args.output_file.write("""{ 755 _args.output_file.write("""{ 763 \t.arch = 0, 756 \t.arch = 0, 764 \t.cpuid = 0, 757 \t.cpuid = 0, 765 \t.event_table = { 0, 0 }, 758 \t.event_table = { 0, 0 }, 766 \t.metric_table = { 0, 0 }, 759 \t.metric_table = { 0, 0 }, 767 } 760 } 768 }; 761 }; 769 """) 762 """) 770 763 771 764 772 def print_system_mapping_table() -> None: 765 def print_system_mapping_table() -> None: 773 """C struct mapping table array for tables f 766 """C struct mapping table array for tables from /sys directories.""" 774 _args.output_file.write(""" 767 _args.output_file.write(""" 775 struct pmu_sys_events { 768 struct pmu_sys_events { 776 \tconst char *name; 769 \tconst char *name; 777 \tstruct pmu_events_table event_table; 770 \tstruct pmu_events_table event_table; 778 \tstruct pmu_metrics_table metric_table; 771 \tstruct pmu_metrics_table metric_table; 779 }; 772 }; 780 773 781 static const struct pmu_sys_events pmu_sys_eve 774 static const struct pmu_sys_events pmu_sys_event_tables[] = { 782 """) 775 """) 783 printed_metric_tables = [] 776 printed_metric_tables = [] 784 for tblname in _sys_event_tables: 777 for tblname in _sys_event_tables: 785 _args.output_file.write(f"""\t{{ 778 _args.output_file.write(f"""\t{{ 786 \t\t.event_table = {{ 779 \t\t.event_table = {{ 787 \t\t\t.pmus = {tblname}, 780 \t\t\t.pmus = {tblname}, 788 \t\t\t.num_pmus = ARRAY_SIZE({tblname}) 781 \t\t\t.num_pmus = ARRAY_SIZE({tblname}) 789 \t\t}},""") 782 \t\t}},""") 790 metric_tblname = _sys_event_table_to_metri 783 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname] 791 if metric_tblname in _sys_metric_tables: 784 if metric_tblname in _sys_metric_tables: 792 _args.output_file.write(f""" 785 _args.output_file.write(f""" 793 \t\t.metric_table = {{ 786 \t\t.metric_table = {{ 794 \t\t\t.pmus = {metric_tblname}, 787 \t\t\t.pmus = {metric_tblname}, 795 \t\t\t.num_pmus = ARRAY_SIZE({metric_tblname}) 788 \t\t\t.num_pmus = ARRAY_SIZE({metric_tblname}) 796 \t\t}},""") 789 \t\t}},""") 797 printed_metric_tables.append(metric_tbln 790 printed_metric_tables.append(metric_tblname) 798 _args.output_file.write(f""" 791 _args.output_file.write(f""" 799 \t\t.name = \"{tblname}\", 792 \t\t.name = \"{tblname}\", 800 \t}}, 793 \t}}, 801 """) 794 """) 802 for tblname in _sys_metric_tables: 795 for tblname in _sys_metric_tables: 803 if tblname in printed_metric_tables: 796 if tblname in printed_metric_tables: 804 continue 797 continue 805 _args.output_file.write(f"""\t{{ 798 _args.output_file.write(f"""\t{{ 806 \t\t.metric_table = {{ 799 \t\t.metric_table = {{ 807 \t\t\t.pmus = {tblname}, 800 \t\t\t.pmus = {tblname}, 808 \t\t\t.num_pmus = ARRAY_SIZE({tblname}) 801 \t\t\t.num_pmus = ARRAY_SIZE({tblname}) 809 \t\t}}, 802 \t\t}}, 810 \t\t.name = \"{tblname}\", 803 \t\t.name = \"{tblname}\", 811 \t}}, 804 \t}}, 812 """) 805 """) 813 _args.output_file.write("""\t{ 806 _args.output_file.write("""\t{ 814 \t\t.event_table = { 0, 0 }, 807 \t\t.event_table = { 0, 0 }, 815 \t\t.metric_table = { 0, 0 }, 808 \t\t.metric_table = { 0, 0 }, 816 \t}, 809 \t}, 817 }; 810 }; 818 811 819 static void decompress_event(int offset, struc 812 static void decompress_event(int offset, struct pmu_event *pe) 820 { 813 { 821 \tconst char *p = &big_c_string[offset]; 814 \tconst char *p = &big_c_string[offset]; 822 """) 815 """) 823 for attr in _json_event_attributes: 816 for attr in _json_event_attributes: 824 _args.output_file.write(f'\n\tpe->{attr} = 817 _args.output_file.write(f'\n\tpe->{attr} = ') 825 if attr in _json_enum_attributes: 818 if attr in _json_enum_attributes: 826 _args.output_file.write("*p - '0';\n") 819 _args.output_file.write("*p - '0';\n") 827 else: 820 else: 828 _args.output_file.write("(*p == '\\0' ? 821 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 829 if attr == _json_event_attributes[-1]: 822 if attr == _json_event_attributes[-1]: 830 continue 823 continue 831 if attr in _json_enum_attributes: 824 if attr in _json_enum_attributes: 832 _args.output_file.write('\tp++;') 825 _args.output_file.write('\tp++;') 833 else: 826 else: 834 _args.output_file.write('\twhile (*p++); 827 _args.output_file.write('\twhile (*p++);') 835 _args.output_file.write("""} 828 _args.output_file.write("""} 836 829 837 static void decompress_metric(int offset, stru 830 static void decompress_metric(int offset, struct pmu_metric *pm) 838 { 831 { 839 \tconst char *p = &big_c_string[offset]; 832 \tconst char *p = &big_c_string[offset]; 840 """) 833 """) 841 for attr in _json_metric_attributes: 834 for attr in _json_metric_attributes: 842 _args.output_file.write(f'\n\tpm->{attr} = 835 _args.output_file.write(f'\n\tpm->{attr} = ') 843 if attr in _json_enum_attributes: 836 if attr in _json_enum_attributes: 844 _args.output_file.write("*p - '0';\n") 837 _args.output_file.write("*p - '0';\n") 845 else: 838 else: 846 _args.output_file.write("(*p == '\\0' ? 839 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 847 if attr == _json_metric_attributes[-1]: 840 if attr == _json_metric_attributes[-1]: 848 continue 841 continue 849 if attr in _json_enum_attributes: 842 if attr in _json_enum_attributes: 850 _args.output_file.write('\tp++;') 843 _args.output_file.write('\tp++;') 851 else: 844 else: 852 _args.output_file.write('\twhile (*p++); 845 _args.output_file.write('\twhile (*p++);') 853 _args.output_file.write("""} 846 _args.output_file.write("""} 854 847 855 static int pmu_events_table__for_each_event_pm 848 static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table, 856 849 const struct pmu_table_entry *pmu, 857 850 pmu_event_iter_fn fn, 858 851 void *data) 859 { 852 { 860 int ret; 853 int ret; 861 struct pmu_event pe = { 854 struct pmu_event pe = { 862 .pmu = &big_c_string[pmu->pmu_ 855 .pmu = &big_c_string[pmu->pmu_name.offset], 863 }; 856 }; 864 857 865 for (uint32_t i = 0; i < pmu->num_entr 858 for (uint32_t i = 0; i < pmu->num_entries; i++) { 866 decompress_event(pmu->entries[ 859 decompress_event(pmu->entries[i].offset, &pe); 867 if (!pe.name) 860 if (!pe.name) 868 continue; 861 continue; 869 ret = fn(&pe, table, data); 862 ret = fn(&pe, table, data); 870 if (ret) 863 if (ret) 871 return ret; 864 return ret; 872 } 865 } 873 return 0; 866 return 0; 874 } 867 } 875 868 876 static int pmu_events_table__find_event_pmu(co 869 static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table, 877 co 870 const struct pmu_table_entry *pmu, 878 co 871 const char *name, 879 pm 872 pmu_event_iter_fn fn, 880 vo 873 void *data) 881 { 874 { 882 struct pmu_event pe = { 875 struct pmu_event pe = { 883 .pmu = &big_c_string[pmu->pmu_ 876 .pmu = &big_c_string[pmu->pmu_name.offset], 884 }; 877 }; 885 int low = 0, high = pmu->num_entries - 878 int low = 0, high = pmu->num_entries - 1; 886 879 887 while (low <= high) { 880 while (low <= high) { 888 int cmp, mid = (low + high) / 881 int cmp, mid = (low + high) / 2; 889 882 890 decompress_event(pmu->entries[ 883 decompress_event(pmu->entries[mid].offset, &pe); 891 884 892 if (!pe.name && !name) 885 if (!pe.name && !name) 893 goto do_call; 886 goto do_call; 894 887 895 if (!pe.name && name) { 888 if (!pe.name && name) { 896 low = mid + 1; 889 low = mid + 1; 897 continue; 890 continue; 898 } 891 } 899 if (pe.name && !name) { 892 if (pe.name && !name) { 900 high = mid - 1; 893 high = mid - 1; 901 continue; 894 continue; 902 } 895 } 903 896 904 cmp = strcasecmp(pe.name, name 897 cmp = strcasecmp(pe.name, name); 905 if (cmp < 0) { 898 if (cmp < 0) { 906 low = mid + 1; 899 low = mid + 1; 907 continue; 900 continue; 908 } 901 } 909 if (cmp > 0) { 902 if (cmp > 0) { 910 high = mid - 1; 903 high = mid - 1; 911 continue; 904 continue; 912 } 905 } 913 do_call: 906 do_call: 914 return fn ? fn(&pe, table, dat 907 return fn ? fn(&pe, table, data) : 0; 915 } 908 } 916 return PMU_EVENTS__NOT_FOUND; !! 909 return -1000; 917 } 910 } 918 911 919 int pmu_events_table__for_each_event(const str 912 int pmu_events_table__for_each_event(const struct pmu_events_table *table, 920 struct per 913 struct perf_pmu *pmu, 921 pmu_event_ 914 pmu_event_iter_fn fn, 922 void *data 915 void *data) 923 { 916 { 924 for (size_t i = 0; i < table->num_pmus 917 for (size_t i = 0; i < table->num_pmus; i++) { 925 const struct pmu_table_entry * 918 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 926 const char *pmu_name = &big_c_ 919 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 927 int ret; 920 int ret; 928 921 929 if (pmu && !pmu__name_match(pm 922 if (pmu && !pmu__name_match(pmu, pmu_name)) 930 continue; 923 continue; 931 924 932 ret = pmu_events_table__for_ea 925 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data); 933 if (pmu || ret) 926 if (pmu || ret) 934 return ret; 927 return ret; 935 } 928 } 936 return 0; 929 return 0; 937 } 930 } 938 931 939 int pmu_events_table__find_event(const struct 932 int pmu_events_table__find_event(const struct pmu_events_table *table, 940 struct perf_p 933 struct perf_pmu *pmu, 941 const char *n 934 const char *name, 942 pmu_event_ite 935 pmu_event_iter_fn fn, 943 void *data) 936 void *data) 944 { 937 { 945 for (size_t i = 0; i < table->num_pmus 938 for (size_t i = 0; i < table->num_pmus; i++) { 946 const struct pmu_table_entry * 939 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 947 const char *pmu_name = &big_c_ 940 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 948 int ret; 941 int ret; 949 942 950 if (!pmu__name_match(pmu, pmu_ 943 if (!pmu__name_match(pmu, pmu_name)) 951 continue; 944 continue; 952 945 953 ret = pmu_events_table__find_e 946 ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data); 954 if (ret != PMU_EVENTS__NOT_FOU !! 947 if (ret != -1000) 955 return ret; 948 return ret; 956 } 949 } 957 return PMU_EVENTS__NOT_FOUND; !! 950 return -1000; 958 } 951 } 959 952 960 size_t pmu_events_table__num_events(const stru 953 size_t pmu_events_table__num_events(const struct pmu_events_table *table, 961 struct per 954 struct perf_pmu *pmu) 962 { 955 { 963 size_t count = 0; 956 size_t count = 0; 964 957 965 for (size_t i = 0; i < table->num_pmus 958 for (size_t i = 0; i < table->num_pmus; i++) { 966 const struct pmu_table_entry * 959 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 967 const char *pmu_name = &big_c_ 960 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 968 961 969 if (pmu__name_match(pmu, pmu_n 962 if (pmu__name_match(pmu, pmu_name)) 970 count += table_pmu->nu 963 count += table_pmu->num_entries; 971 } 964 } 972 return count; 965 return count; 973 } 966 } 974 967 975 static int pmu_metrics_table__for_each_metric_ 968 static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table, 976 969 const struct pmu_table_entry *pmu, 977 970 pmu_metric_iter_fn fn, 978 971 void *data) 979 { 972 { 980 int ret; 973 int ret; 981 struct pmu_metric pm = { 974 struct pmu_metric pm = { 982 .pmu = &big_c_string[pmu->pmu_ 975 .pmu = &big_c_string[pmu->pmu_name.offset], 983 }; 976 }; 984 977 985 for (uint32_t i = 0; i < pmu->num_entr 978 for (uint32_t i = 0; i < pmu->num_entries; i++) { 986 decompress_metric(pmu->entries 979 decompress_metric(pmu->entries[i].offset, &pm); 987 if (!pm.metric_expr) 980 if (!pm.metric_expr) 988 continue; 981 continue; 989 ret = fn(&pm, table, data); 982 ret = fn(&pm, table, data); 990 if (ret) 983 if (ret) 991 return ret; 984 return ret; 992 } 985 } 993 return 0; 986 return 0; 994 } 987 } 995 988 996 int pmu_metrics_table__for_each_metric(const s 989 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, 997 pmu_metri 990 pmu_metric_iter_fn fn, 998 void *dat 991 void *data) 999 { 992 { 1000 for (size_t i = 0; i < table->num_pmu 993 for (size_t i = 0; i < table->num_pmus; i++) { 1001 int ret = pmu_metrics_table__ 994 int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i], 1002 995 fn, data); 1003 996 1004 if (ret) 997 if (ret) 1005 return ret; 998 return ret; 1006 } 999 } 1007 return 0; 1000 return 0; 1008 } 1001 } 1009 1002 1010 static const struct pmu_events_map *map_for_p 1003 static const struct pmu_events_map *map_for_pmu(struct perf_pmu *pmu) 1011 { 1004 { 1012 static struct { 1005 static struct { 1013 const struct pmu_events_map * 1006 const struct pmu_events_map *map; 1014 struct perf_pmu *pmu; 1007 struct perf_pmu *pmu; 1015 } last_result; 1008 } last_result; 1016 static struct { 1009 static struct { 1017 const struct pmu_events_map * 1010 const struct pmu_events_map *map; 1018 char *cpuid; 1011 char *cpuid; 1019 } last_map_search; 1012 } last_map_search; 1020 static bool has_last_result, has_last 1013 static bool has_last_result, has_last_map_search; 1021 const struct pmu_events_map *map = NU 1014 const struct pmu_events_map *map = NULL; 1022 char *cpuid = NULL; 1015 char *cpuid = NULL; 1023 size_t i; 1016 size_t i; 1024 1017 1025 if (has_last_result && last_result.pm 1018 if (has_last_result && last_result.pmu == pmu) 1026 return last_result.map; 1019 return last_result.map; 1027 1020 1028 cpuid = perf_pmu__getcpuid(pmu); 1021 cpuid = perf_pmu__getcpuid(pmu); 1029 1022 1030 /* 1023 /* 1031 * On some platforms which uses cpus 1024 * On some platforms which uses cpus map, cpuid can be NULL for 1032 * PMUs other than CORE PMUs. 1025 * PMUs other than CORE PMUs. 1033 */ 1026 */ 1034 if (!cpuid) 1027 if (!cpuid) 1035 goto out_update_last_result; 1028 goto out_update_last_result; 1036 1029 1037 if (has_last_map_search && !strcmp(la 1030 if (has_last_map_search && !strcmp(last_map_search.cpuid, cpuid)) { 1038 map = last_map_search.map; 1031 map = last_map_search.map; 1039 free(cpuid); 1032 free(cpuid); 1040 } else { 1033 } else { 1041 i = 0; 1034 i = 0; 1042 for (;;) { 1035 for (;;) { 1043 map = &pmu_events_map 1036 map = &pmu_events_map[i++]; 1044 1037 1045 if (!map->arch) { 1038 if (!map->arch) { 1046 map = NULL; 1039 map = NULL; 1047 break; 1040 break; 1048 } 1041 } 1049 1042 1050 if (!strcmp_cpuid_str 1043 if (!strcmp_cpuid_str(map->cpuid, cpuid)) 1051 break; 1044 break; 1052 } 1045 } 1053 free(last_map_search.cpuid); 1046 free(last_map_search.cpuid); 1054 last_map_search.cpuid = cpuid; 1047 last_map_search.cpuid = cpuid; 1055 last_map_search.map = map; 1048 last_map_search.map = map; 1056 has_last_map_search = true; 1049 has_last_map_search = true; 1057 } 1050 } 1058 out_update_last_result: 1051 out_update_last_result: 1059 last_result.pmu = pmu; 1052 last_result.pmu = pmu; 1060 last_result.map = map; 1053 last_result.map = map; 1061 has_last_result = true; 1054 has_last_result = true; 1062 return map; 1055 return map; 1063 } 1056 } 1064 1057 1065 const struct pmu_events_table *perf_pmu__find 1058 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) 1066 { 1059 { 1067 const struct pmu_events_map *map = ma 1060 const struct pmu_events_map *map = map_for_pmu(pmu); 1068 1061 1069 if (!map) 1062 if (!map) 1070 return NULL; 1063 return NULL; 1071 1064 1072 if (!pmu) 1065 if (!pmu) 1073 return &map->event_table; 1066 return &map->event_table; 1074 1067 1075 for (size_t i = 0; i < map->event_tab 1068 for (size_t i = 0; i < map->event_table.num_pmus; i++) { 1076 const struct pmu_table_entry 1069 const struct pmu_table_entry *table_pmu = &map->event_table.pmus[i]; 1077 const char *pmu_name = &big_c 1070 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 1078 1071 1079 if (pmu__name_match(pmu, pmu_ 1072 if (pmu__name_match(pmu, pmu_name)) 1080 return &map->event_t 1073 return &map->event_table; 1081 } 1074 } 1082 return NULL; 1075 return NULL; 1083 } 1076 } 1084 1077 1085 const struct pmu_metrics_table *perf_pmu__fin 1078 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu) 1086 { 1079 { 1087 const struct pmu_events_map *map = ma 1080 const struct pmu_events_map *map = map_for_pmu(pmu); 1088 1081 1089 if (!map) 1082 if (!map) 1090 return NULL; 1083 return NULL; 1091 1084 1092 if (!pmu) 1085 if (!pmu) 1093 return &map->metric_table; 1086 return &map->metric_table; 1094 1087 1095 for (size_t i = 0; i < map->metric_ta 1088 for (size_t i = 0; i < map->metric_table.num_pmus; i++) { 1096 const struct pmu_table_entry 1089 const struct pmu_table_entry *table_pmu = &map->metric_table.pmus[i]; 1097 const char *pmu_name = &big_c 1090 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 1098 1091 1099 if (pmu__name_match(pmu, pmu_ 1092 if (pmu__name_match(pmu, pmu_name)) 1100 return &map->metri 1093 return &map->metric_table; 1101 } 1094 } 1102 return NULL; 1095 return NULL; 1103 } 1096 } 1104 1097 1105 const struct pmu_events_table *find_core_even 1098 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid) 1106 { 1099 { 1107 for (const struct pmu_events_map *tab 1100 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1108 tables->arch; 1101 tables->arch; 1109 tables++) { 1102 tables++) { 1110 if (!strcmp(tables->arch, arc 1103 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1111 return &tables->event 1104 return &tables->event_table; 1112 } 1105 } 1113 return NULL; 1106 return NULL; 1114 } 1107 } 1115 1108 1116 const struct pmu_metrics_table *find_core_met 1109 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid) 1117 { 1110 { 1118 for (const struct pmu_events_map *tab 1111 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1119 tables->arch; 1112 tables->arch; 1120 tables++) { 1113 tables++) { 1121 if (!strcmp(tables->arch, arc 1114 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1122 return &tables->metri 1115 return &tables->metric_table; 1123 } 1116 } 1124 return NULL; 1117 return NULL; 1125 } 1118 } 1126 1119 1127 int pmu_for_each_core_event(pmu_event_iter_fn 1120 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data) 1128 { 1121 { 1129 for (const struct pmu_events_map *tab 1122 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1130 tables->arch; 1123 tables->arch; 1131 tables++) { 1124 tables++) { 1132 int ret = pmu_events_table__f 1125 int ret = pmu_events_table__for_each_event(&tables->event_table, 1133 1126 /*pmu=*/ NULL, fn, data); 1134 1127 1135 if (ret) 1128 if (ret) 1136 return ret; 1129 return ret; 1137 } 1130 } 1138 return 0; 1131 return 0; 1139 } 1132 } 1140 1133 1141 int pmu_for_each_core_metric(pmu_metric_iter_ 1134 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data) 1142 { 1135 { 1143 for (const struct pmu_events_map *tab 1136 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1144 tables->arch; 1137 tables->arch; 1145 tables++) { 1138 tables++) { 1146 int ret = pmu_metrics_table__ 1139 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); 1147 1140 1148 if (ret) 1141 if (ret) 1149 return ret; 1142 return ret; 1150 } 1143 } 1151 return 0; 1144 return 0; 1152 } 1145 } 1153 1146 1154 const struct pmu_events_table *find_sys_event 1147 const struct pmu_events_table *find_sys_events_table(const char *name) 1155 { 1148 { 1156 for (const struct pmu_sys_events *tab 1149 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1157 tables->name; 1150 tables->name; 1158 tables++) { 1151 tables++) { 1159 if (!strcmp(tables->name, nam 1152 if (!strcmp(tables->name, name)) 1160 return &tables->event 1153 return &tables->event_table; 1161 } 1154 } 1162 return NULL; 1155 return NULL; 1163 } 1156 } 1164 1157 1165 int pmu_for_each_sys_event(pmu_event_iter_fn 1158 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data) 1166 { 1159 { 1167 for (const struct pmu_sys_events *tab 1160 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1168 tables->name; 1161 tables->name; 1169 tables++) { 1162 tables++) { 1170 int ret = pmu_events_table__f 1163 int ret = pmu_events_table__for_each_event(&tables->event_table, 1171 1164 /*pmu=*/ NULL, fn, data); 1172 1165 1173 if (ret) 1166 if (ret) 1174 return ret; 1167 return ret; 1175 } 1168 } 1176 return 0; 1169 return 0; 1177 } 1170 } 1178 1171 1179 int pmu_for_each_sys_metric(pmu_metric_iter_f 1172 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data) 1180 { 1173 { 1181 for (const struct pmu_sys_events *tab 1174 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1182 tables->name; 1175 tables->name; 1183 tables++) { 1176 tables++) { 1184 int ret = pmu_metrics_table__ 1177 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); 1185 1178 1186 if (ret) 1179 if (ret) 1187 return ret; 1180 return ret; 1188 } 1181 } 1189 return 0; 1182 return 0; 1190 } 1183 } 1191 """) 1184 """) 1192 1185 1193 def print_metricgroups() -> None: 1186 def print_metricgroups() -> None: 1194 _args.output_file.write(""" 1187 _args.output_file.write(""" 1195 static const int metricgroups[][2] = { 1188 static const int metricgroups[][2] = { 1196 """) 1189 """) 1197 for mgroup in sorted(_metricgroups): 1190 for mgroup in sorted(_metricgroups): 1198 description = _metricgroups[mgroup] 1191 description = _metricgroups[mgroup] 1199 _args.output_file.write( 1192 _args.output_file.write( 1200 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs. 1193 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n' 1201 ) 1194 ) 1202 _args.output_file.write(""" 1195 _args.output_file.write(""" 1203 }; 1196 }; 1204 1197 1205 const char *describe_metricgroup(const char * 1198 const char *describe_metricgroup(const char *group) 1206 { 1199 { 1207 int low = 0, high = (int)ARRAY_SIZE(m 1200 int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1; 1208 1201 1209 while (low <= high) { 1202 while (low <= high) { 1210 int mid = (low + high) / 2; 1203 int mid = (low + high) / 2; 1211 const char *mgroup = &big_c_s 1204 const char *mgroup = &big_c_string[metricgroups[mid][0]]; 1212 int cmp = strcmp(mgroup, grou 1205 int cmp = strcmp(mgroup, group); 1213 1206 1214 if (cmp == 0) { 1207 if (cmp == 0) { 1215 return &big_c_string[ 1208 return &big_c_string[metricgroups[mid][1]]; 1216 } else if (cmp < 0) { 1209 } else if (cmp < 0) { 1217 low = mid + 1; 1210 low = mid + 1; 1218 } else { 1211 } else { 1219 high = mid - 1; 1212 high = mid - 1; 1220 } 1213 } 1221 } 1214 } 1222 return NULL; 1215 return NULL; 1223 } 1216 } 1224 """) 1217 """) 1225 1218 1226 def main() -> None: 1219 def main() -> None: 1227 global _args 1220 global _args 1228 1221 1229 def dir_path(path: str) -> str: 1222 def dir_path(path: str) -> str: 1230 """Validate path is a directory for argpa 1223 """Validate path is a directory for argparse.""" 1231 if os.path.isdir(path): 1224 if os.path.isdir(path): 1232 return path 1225 return path 1233 raise argparse.ArgumentTypeError(f'\'{pat 1226 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory') 1234 1227 1235 def ftw(path: str, parents: Sequence[str], 1228 def ftw(path: str, parents: Sequence[str], 1236 action: Callable[[Sequence[str], os 1229 action: Callable[[Sequence[str], os.DirEntry], None]) -> None: 1237 """Replicate the directory/file walking b 1230 """Replicate the directory/file walking behavior of C's file tree walk.""" 1238 for item in sorted(os.scandir(path), key= 1231 for item in sorted(os.scandir(path), key=lambda e: e.name): 1239 if _args.model != 'all' and item.is_dir 1232 if _args.model != 'all' and item.is_dir(): 1240 # Check if the model matches one in _ 1233 # Check if the model matches one in _args.model. 1241 if len(parents) == _args.model.split( 1234 if len(parents) == _args.model.split(',')[0].count('/'): 1242 # We're testing the correct directo 1235 # We're testing the correct directory. 1243 item_path = '/'.join(parents) + ('/ 1236 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name 1244 if 'test' not in item_path and item 1237 if 'test' not in item_path and item_path not in _args.model.split(','): 1245 continue 1238 continue 1246 action(parents, item) 1239 action(parents, item) 1247 if item.is_dir(): 1240 if item.is_dir(): 1248 ftw(item.path, parents + [item.name], 1241 ftw(item.path, parents + [item.name], action) 1249 1242 1250 ap = argparse.ArgumentParser() 1243 ap = argparse.ArgumentParser() 1251 ap.add_argument('arch', help='Architecture 1244 ap.add_argument('arch', help='Architecture name like x86') 1252 ap.add_argument('model', help='''Select a m 1245 ap.add_argument('model', help='''Select a model such as skylake to 1253 reduce the code size. Normally set to "all". 1246 reduce the code size. Normally set to "all". For architectures like 1254 ARM64 with an implementor/model, the model mu 1247 ARM64 with an implementor/model, the model must include the implementor 1255 such as "arm/cortex-a34".''', 1248 such as "arm/cortex-a34".''', 1256 default='all') 1249 default='all') 1257 ap.add_argument( 1250 ap.add_argument( 1258 'starting_dir', 1251 'starting_dir', 1259 type=dir_path, 1252 type=dir_path, 1260 help='Root of tree containing architect 1253 help='Root of tree containing architecture directories containing json files' 1261 ) 1254 ) 1262 ap.add_argument( 1255 ap.add_argument( 1263 'output_file', type=argparse.FileType(' 1256 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout) 1264 _args = ap.parse_args() 1257 _args = ap.parse_args() 1265 1258 1266 _args.output_file.write(f""" << 1267 /* SPDX-License-Identifier: GPL-2.0 */ << 1268 /* THIS FILE WAS AUTOGENERATED BY jevents.py << 1269 """) << 1270 _args.output_file.write(""" 1259 _args.output_file.write(""" 1271 #include <pmu-events/pmu-events.h> 1260 #include <pmu-events/pmu-events.h> 1272 #include "util/header.h" 1261 #include "util/header.h" 1273 #include "util/pmu.h" 1262 #include "util/pmu.h" 1274 #include <string.h> 1263 #include <string.h> 1275 #include <stddef.h> 1264 #include <stddef.h> 1276 1265 1277 struct compact_pmu_event { 1266 struct compact_pmu_event { 1278 int offset; 1267 int offset; 1279 }; 1268 }; 1280 1269 1281 struct pmu_table_entry { 1270 struct pmu_table_entry { 1282 const struct compact_pmu_event *entri 1271 const struct compact_pmu_event *entries; 1283 uint32_t num_entries; 1272 uint32_t num_entries; 1284 struct compact_pmu_event pmu_name; 1273 struct compact_pmu_event pmu_name; 1285 }; 1274 }; 1286 1275 1287 """) 1276 """) 1288 archs = [] 1277 archs = [] 1289 for item in os.scandir(_args.starting_dir): 1278 for item in os.scandir(_args.starting_dir): 1290 if not item.is_dir(): 1279 if not item.is_dir(): 1291 continue 1280 continue 1292 if item.name == _args.arch or _args.arch 1281 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test': 1293 archs.append(item.name) 1282 archs.append(item.name) 1294 1283 1295 if len(archs) < 2 and _args.arch != 'none': !! 1284 if len(archs) < 2: 1296 raise IOError(f'Missing architecture dire 1285 raise IOError(f'Missing architecture directory \'{_args.arch}\'') 1297 1286 1298 archs.sort() 1287 archs.sort() 1299 for arch in archs: 1288 for arch in archs: 1300 arch_path = f'{_args.starting_dir}/{arch} 1289 arch_path = f'{_args.starting_dir}/{arch}' 1301 preprocess_arch_std_files(arch_path) 1290 preprocess_arch_std_files(arch_path) 1302 ftw(arch_path, [], preprocess_one_file) 1291 ftw(arch_path, [], preprocess_one_file) 1303 1292 1304 _bcs.compute() 1293 _bcs.compute() 1305 _args.output_file.write('static const char 1294 _args.output_file.write('static const char *const big_c_string =\n') 1306 for s in _bcs.big_string: 1295 for s in _bcs.big_string: 1307 _args.output_file.write(s) 1296 _args.output_file.write(s) 1308 _args.output_file.write(';\n\n') 1297 _args.output_file.write(';\n\n') 1309 for arch in archs: 1298 for arch in archs: 1310 arch_path = f'{_args.starting_dir}/{arch} 1299 arch_path = f'{_args.starting_dir}/{arch}' 1311 ftw(arch_path, [], process_one_file) 1300 ftw(arch_path, [], process_one_file) 1312 print_pending_events() 1301 print_pending_events() 1313 print_pending_metrics() 1302 print_pending_metrics() 1314 1303 1315 print_mapping_table(archs) 1304 print_mapping_table(archs) 1316 print_system_mapping_table() 1305 print_system_mapping_table() 1317 print_metricgroups() 1306 print_metricgroups() 1318 1307 1319 if __name__ == '__main__': 1308 if __name__ == '__main__': 1320 main() 1309 main()
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.