1 // SPDX-License-Identifier: GPL-2.0+ 1 2 // 3 // Copyright 2019 Madhavan Srinivasan, IBM Cor 4 5 #define pr_fmt(fmt) "generic-compat-pmu: " 6 7 #include "isa207-common.h" 8 9 /* 10 * Raw event encoding: 11 * 12 * 60 56 52 48 13 * | - - - - | - - - - | - - - - | - - - - | - 14 * 15 * 28 24 20 16 16 * | - - - - | - - - - | - - - - | - - - - | - 17 * [ pmc ] 18 */ 19 20 /* 21 * Event codes defined in ISA v3.0B 22 */ 23 #define EVENT(_name, _code) _name = _code, 24 25 enum { 26 /* Cycles, alternate code */ 27 EVENT(PM_CYC_ALT, 28 /* One or more instructions completed 29 EVENT(PM_CYC_INST_CMPL, 30 /* Floating-point instruction complete 31 EVENT(PM_FLOP_CMPL, 32 /* Instruction ERAT/L1-TLB miss */ 33 EVENT(PM_L1_ITLB_MISS, 34 /* All instructions completed and none 35 EVENT(PM_NO_INST_AVAIL, 36 /* A load-type instruction completed ( 37 EVENT(PM_LD_CMPL, 38 /* Instruction completed, alternate co 39 EVENT(PM_INST_CMPL_ALT, 40 /* A store-type instruction completed 41 EVENT(PM_ST_CMPL, 42 /* Instruction Dispatched */ 43 EVENT(PM_INST_DISP, 44 /* Run_cycles */ 45 EVENT(PM_RUN_CYC, 46 /* Data ERAT/L1-TLB miss/reload */ 47 EVENT(PM_L1_DTLB_RELOAD, 48 /* Taken branch completed */ 49 EVENT(PM_BR_TAKEN_CMPL, 50 /* Demand iCache Miss */ 51 EVENT(PM_L1_ICACHE_MISS, 52 /* L1 Dcache reload from memory */ 53 EVENT(PM_L1_RELOAD_FROM_MEM, 54 /* L1 Dcache store miss */ 55 EVENT(PM_ST_MISS_L1, 56 /* Alternate code for PM_INST_DISP */ 57 EVENT(PM_INST_DISP_ALT, 58 /* Branch direction or target mispredi 59 EVENT(PM_BR_MISPREDICT, 60 /* Data TLB miss/reload */ 61 EVENT(PM_DTLB_MISS, 62 /* Demand LD - L3 Miss (not L2 hit and 63 EVENT(PM_DATA_FROM_L3MISS, 64 /* L1 Dcache load miss */ 65 EVENT(PM_LD_MISS_L1, 66 /* Cycle when instruction(s) dispatche 67 EVENT(PM_CYC_INST_DISP, 68 /* Branch or branch target mispredicte 69 EVENT(PM_BR_MPRED_CMPL, 70 /* Instructions completed with run lat 71 EVENT(PM_RUN_INST_CMPL, 72 /* Instruction TLB miss/reload */ 73 EVENT(PM_ITLB_MISS, 74 /* Load data not cached */ 75 EVENT(PM_LD_NOT_CACHED, 76 /* Instructions */ 77 EVENT(PM_INST_CMPL, 78 /* Cycles */ 79 EVENT(PM_CYC, 80 }; 81 82 #undef EVENT 83 84 /* Table of alternatives, sorted in increasing 85 /* Note that in each row, column 0 must be the 86 static const unsigned int generic_event_altern 87 { PM_CYC_ALT, PM_CYC 88 { PM_INST_CMPL_ALT, PM_INS 89 { PM_INST_DISP, PM_INS 90 }; 91 92 static int generic_get_alternatives(u64 event, 93 { 94 int num_alt = 0; 95 96 num_alt = isa207_get_alternatives(even 97 ARRA 98 gene 99 100 return num_alt; 101 } 102 103 GENERIC_EVENT_ATTR(cpu-cycles, 104 GENERIC_EVENT_ATTR(instructions, 105 GENERIC_EVENT_ATTR(stalled-cycles-frontend, 106 GENERIC_EVENT_ATTR(branch-misses, 107 GENERIC_EVENT_ATTR(cache-misses, 108 109 CACHE_EVENT_ATTR(L1-dcache-load-misses, 110 CACHE_EVENT_ATTR(L1-dcache-store-misses, 111 CACHE_EVENT_ATTR(L1-icache-load-misses, 112 CACHE_EVENT_ATTR(LLC-load-misses, 113 CACHE_EVENT_ATTR(branch-load-misses, 114 CACHE_EVENT_ATTR(dTLB-load-misses, 115 CACHE_EVENT_ATTR(iTLB-load-misses, 116 117 static struct attribute *generic_compat_events 118 GENERIC_EVENT_PTR(PM_CYC), 119 GENERIC_EVENT_PTR(PM_INST_CMPL), 120 GENERIC_EVENT_PTR(PM_NO_INST_AVAIL), 121 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL), 122 GENERIC_EVENT_PTR(PM_LD_MISS_L1), 123 CACHE_EVENT_PTR(PM_LD_MISS_L1), 124 CACHE_EVENT_PTR(PM_ST_MISS_L1), 125 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS), 126 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS), 127 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL), 128 CACHE_EVENT_PTR(PM_DTLB_MISS), 129 CACHE_EVENT_PTR(PM_ITLB_MISS), 130 NULL 131 }; 132 133 static const struct attribute_group generic_co 134 .name = "events", 135 .attrs = generic_compat_events_attr, 136 }; 137 138 PMU_FORMAT_ATTR(event, "config:0-19") 139 PMU_FORMAT_ATTR(pmcxsel, "config:0-7"); 140 PMU_FORMAT_ATTR(pmc, "config:16-19" 141 142 static struct attribute *generic_compat_pmu_fo 143 &format_attr_event.attr, 144 &format_attr_pmcxsel.attr, 145 &format_attr_pmc.attr, 146 NULL, 147 }; 148 149 static const struct attribute_group generic_co 150 .name = "format", 151 .attrs = generic_compat_pmu_format_att 152 }; 153 154 static struct attribute *generic_compat_pmu_ca 155 NULL 156 }; 157 158 static struct attribute_group generic_compat_p 159 .name = "caps", 160 .attrs = generic_compat_pmu_caps_attrs 161 }; 162 163 static const struct attribute_group *generic_c 164 &generic_compat_pmu_format_group, 165 &generic_compat_pmu_events_group, 166 &generic_compat_pmu_caps_group, 167 NULL, 168 }; 169 170 static int compat_generic_events[] = { 171 [PERF_COUNT_HW_CPU_CYCLES] = 172 [PERF_COUNT_HW_INSTRUCTIONS] = 173 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND 174 [PERF_COUNT_HW_BRANCH_MISSES] = 175 [PERF_COUNT_HW_CACHE_MISSES] = 176 }; 177 178 #define C(x) PERF_COUNT_HW_CACHE_##x 179 180 /* 181 * Table of generalized cache-related events. 182 * 0 means not supported, -1 means nonsensical 183 * are event codes. 184 */ 185 static u64 generic_compat_cache_events[C(MAX)] 186 [ C(L1D) ] = { 187 [ C(OP_READ) ] = { 188 [ C(RESULT_ACCESS) ] = 189 [ C(RESULT_MISS) ] = 190 }, 191 [ C(OP_WRITE) ] = { 192 [ C(RESULT_ACCESS) ] = 193 [ C(RESULT_MISS) ] = 194 }, 195 [ C(OP_PREFETCH) ] = { 196 [ C(RESULT_ACCESS) ] = 197 [ C(RESULT_MISS) ] = 198 }, 199 }, 200 [ C(L1I) ] = { 201 [ C(OP_READ) ] = { 202 [ C(RESULT_ACCESS) ] = 203 [ C(RESULT_MISS) ] = 204 }, 205 [ C(OP_WRITE) ] = { 206 [ C(RESULT_ACCESS) ] = 207 [ C(RESULT_MISS) ] = 208 }, 209 [ C(OP_PREFETCH) ] = { 210 [ C(RESULT_ACCESS) ] = 211 [ C(RESULT_MISS) ] = 212 }, 213 }, 214 [ C(LL) ] = { 215 [ C(OP_READ) ] = { 216 [ C(RESULT_ACCESS) ] = 217 [ C(RESULT_MISS) ] = 218 }, 219 [ C(OP_WRITE) ] = { 220 [ C(RESULT_ACCESS) ] = 221 [ C(RESULT_MISS) ] = 222 }, 223 [ C(OP_PREFETCH) ] = { 224 [ C(RESULT_ACCESS) ] = 225 [ C(RESULT_MISS) ] = 226 }, 227 }, 228 [ C(DTLB) ] = { 229 [ C(OP_READ) ] = { 230 [ C(RESULT_ACCESS) ] = 231 [ C(RESULT_MISS) ] = 232 }, 233 [ C(OP_WRITE) ] = { 234 [ C(RESULT_ACCESS) ] = 235 [ C(RESULT_MISS) ] = 236 }, 237 [ C(OP_PREFETCH) ] = { 238 [ C(RESULT_ACCESS) ] = 239 [ C(RESULT_MISS) ] = 240 }, 241 }, 242 [ C(ITLB) ] = { 243 [ C(OP_READ) ] = { 244 [ C(RESULT_ACCESS) ] = 245 [ C(RESULT_MISS) ] = 246 }, 247 [ C(OP_WRITE) ] = { 248 [ C(RESULT_ACCESS) ] = 249 [ C(RESULT_MISS) ] = 250 }, 251 [ C(OP_PREFETCH) ] = { 252 [ C(RESULT_ACCESS) ] = 253 [ C(RESULT_MISS) ] = 254 }, 255 }, 256 [ C(BPU) ] = { 257 [ C(OP_READ) ] = { 258 [ C(RESULT_ACCESS) ] = 259 [ C(RESULT_MISS) ] = 260 }, 261 [ C(OP_WRITE) ] = { 262 [ C(RESULT_ACCESS) ] = 263 [ C(RESULT_MISS) ] = 264 }, 265 [ C(OP_PREFETCH) ] = { 266 [ C(RESULT_ACCESS) ] = 267 [ C(RESULT_MISS) ] = 268 }, 269 }, 270 [ C(NODE) ] = { 271 [ C(OP_READ) ] = { 272 [ C(RESULT_ACCESS) ] = 273 [ C(RESULT_MISS) ] = 274 }, 275 [ C(OP_WRITE) ] = { 276 [ C(RESULT_ACCESS) ] = 277 [ C(RESULT_MISS) ] = 278 }, 279 [ C(OP_PREFETCH) ] = { 280 [ C(RESULT_ACCESS) ] = 281 [ C(RESULT_MISS) ] = 282 }, 283 }, 284 }; 285 286 #undef C 287 288 /* 289 * We set MMCR0[CC5-6RUN] so we can use counte 290 * PM_INST_CMPL and PM_CYC. 291 */ 292 static int generic_compute_mmcr(u64 event[], i 293 unsigned int h 294 struct perf_ev 295 { 296 int ret; 297 298 ret = isa207_compute_mmcr(event, n_ev, 299 if (!ret) 300 mmcr->mmcr0 |= MMCR0_C56RUN; 301 return ret; 302 } 303 304 static struct power_pmu generic_compat_pmu = { 305 .name = "ISAv3", 306 .n_counter = MAX_PMU_COUN 307 .add_fields = ISA207_ADD_F 308 .test_adder = ISA207_TEST_ 309 .compute_mmcr = generic_comp 310 .get_constraint = isa207_get_c 311 .get_alternatives = generic_get_ 312 .disable_pmc = isa207_disab 313 .flags = PPMU_HAS_SIE 314 .n_generic = ARRAY_SIZE(c 315 .generic_events = compat_gener 316 .cache_events = &generic_com 317 .attr_groups = generic_comp 318 }; 319 320 int __init init_generic_compat_pmu(void) 321 { 322 int rc = 0; 323 324 /* 325 * From ISA v2.07 on, PMU features are 326 * we require >= v3.0 because (a) that 327 * PM_INST_CMPL_ALT, which v2.07 doesn 328 * (b) we don't expect any non-IBM Pow 329 * implementations that conform to v2. 330 */ 331 if (!cpu_has_feature(CPU_FTR_ARCH_300) 332 return -ENODEV; 333 334 rc = register_power_pmu(&generic_compa 335 if (rc) 336 return rc; 337 338 /* Tell userspace that EBB is supporte 339 cur_cpu_spec->cpu_user_features2 |= PP 340 341 return 0; 342 } 343
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.