1 // SPDX-License-Identifier: GPL-2.0 2 /* Nehalem-EX/Westmere-EX uncore support */ 3 #include <asm/cpu_device_id.h> 4 #include "uncore.h" 5 6 /* NHM-EX event control */ 7 #define NHMEX_PMON_CTL_EV_SEL_MASK 0x000000ff 8 #define NHMEX_PMON_CTL_UMASK_MASK 0x0000ff00 9 #define NHMEX_PMON_CTL_EN_BIT0 (1 << 0) 10 #define NHMEX_PMON_CTL_EDGE_DET (1 << 18) 11 #define NHMEX_PMON_CTL_PMI_EN (1 << 20) 12 #define NHMEX_PMON_CTL_EN_BIT22 (1 << 22) 13 #define NHMEX_PMON_CTL_INVERT (1 << 23) 14 #define NHMEX_PMON_CTL_TRESH_MASK 0xff000000 15 #define NHMEX_PMON_RAW_EVENT_MASK (NHMEX_PMON_CTL_EV_SEL_MASK | \ 16 NHMEX_PMON_CTL_UMASK_MASK | \ 17 NHMEX_PMON_CTL_EDGE_DET | \ 18 NHMEX_PMON_CTL_INVERT | \ 19 NHMEX_PMON_CTL_TRESH_MASK) 20 21 /* NHM-EX Ubox */ 22 #define NHMEX_U_MSR_PMON_GLOBAL_CTL 0xc00 23 #define NHMEX_U_MSR_PMON_CTR 0xc11 24 #define NHMEX_U_MSR_PMON_EV_SEL 0xc10 25 26 #define NHMEX_U_PMON_GLOBAL_EN (1 << 0) 27 #define NHMEX_U_PMON_GLOBAL_PMI_CORE_SEL 0x0000001e 28 #define NHMEX_U_PMON_GLOBAL_EN_ALL (1 << 28) 29 #define NHMEX_U_PMON_GLOBAL_RST_ALL (1 << 29) 30 #define NHMEX_U_PMON_GLOBAL_FRZ_ALL (1 << 31) 31 32 #define NHMEX_U_PMON_RAW_EVENT_MASK \ 33 (NHMEX_PMON_CTL_EV_SEL_MASK | \ 34 NHMEX_PMON_CTL_EDGE_DET) 35 36 /* NHM-EX Cbox */ 37 #define NHMEX_C0_MSR_PMON_GLOBAL_CTL 0xd00 38 #define NHMEX_C0_MSR_PMON_CTR0 0xd11 39 #define NHMEX_C0_MSR_PMON_EV_SEL0 0xd10 40 #define NHMEX_C_MSR_OFFSET 0x20 41 42 /* NHM-EX Bbox */ 43 #define NHMEX_B0_MSR_PMON_GLOBAL_CTL 0xc20 44 #define NHMEX_B0_MSR_PMON_CTR0 0xc31 45 #define NHMEX_B0_MSR_PMON_CTL0 0xc30 46 #define NHMEX_B_MSR_OFFSET 0x40 47 #define NHMEX_B0_MSR_MATCH 0xe45 48 #define NHMEX_B0_MSR_MASK 0xe46 49 #define NHMEX_B1_MSR_MATCH 0xe4d 50 #define NHMEX_B1_MSR_MASK 0xe4e 51 52 #define NHMEX_B_PMON_CTL_EN (1 << 0) 53 #define NHMEX_B_PMON_CTL_EV_SEL_SHIFT 1 54 #define NHMEX_B_PMON_CTL_EV_SEL_MASK \ 55 (0x1f << NHMEX_B_PMON_CTL_EV_SEL_SHIFT) 56 #define NHMEX_B_PMON_CTR_SHIFT 6 57 #define NHMEX_B_PMON_CTR_MASK \ 58 (0x3 << NHMEX_B_PMON_CTR_SHIFT) 59 #define NHMEX_B_PMON_RAW_EVENT_MASK \ 60 (NHMEX_B_PMON_CTL_EV_SEL_MASK | \ 61 NHMEX_B_PMON_CTR_MASK) 62 63 /* NHM-EX Sbox */ 64 #define NHMEX_S0_MSR_PMON_GLOBAL_CTL 0xc40 65 #define NHMEX_S0_MSR_PMON_CTR0 0xc51 66 #define NHMEX_S0_MSR_PMON_CTL0 0xc50 67 #define NHMEX_S_MSR_OFFSET 0x80 68 #define NHMEX_S0_MSR_MM_CFG 0xe48 69 #define NHMEX_S0_MSR_MATCH 0xe49 70 #define NHMEX_S0_MSR_MASK 0xe4a 71 #define NHMEX_S1_MSR_MM_CFG 0xe58 72 #define NHMEX_S1_MSR_MATCH 0xe59 73 #define NHMEX_S1_MSR_MASK 0xe5a 74 75 #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) 76 #define NHMEX_S_EVENT_TO_R_PROG_EV 0 77 78 /* NHM-EX Mbox */ 79 #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 80 #define NHMEX_M0_MSR_PMU_DSP 0xca5 81 #define NHMEX_M0_MSR_PMU_ISS 0xca6 82 #define NHMEX_M0_MSR_PMU_MAP 0xca7 83 #define NHMEX_M0_MSR_PMU_MSC_THR 0xca8 84 #define NHMEX_M0_MSR_PMU_PGT 0xca9 85 #define NHMEX_M0_MSR_PMU_PLD 0xcaa 86 #define NHMEX_M0_MSR_PMU_ZDP_CTL_FVC 0xcab 87 #define NHMEX_M0_MSR_PMU_CTL0 0xcb0 88 #define NHMEX_M0_MSR_PMU_CNT0 0xcb1 89 #define NHMEX_M_MSR_OFFSET 0x40 90 #define NHMEX_M0_MSR_PMU_MM_CFG 0xe54 91 #define NHMEX_M1_MSR_PMU_MM_CFG 0xe5c 92 93 #define NHMEX_M_PMON_MM_CFG_EN (1ULL << 63) 94 #define NHMEX_M_PMON_ADDR_MATCH_MASK 0x3ffffffffULL 95 #define NHMEX_M_PMON_ADDR_MASK_MASK 0x7ffffffULL 96 #define NHMEX_M_PMON_ADDR_MASK_SHIFT 34 97 98 #define NHMEX_M_PMON_CTL_EN (1 << 0) 99 #define NHMEX_M_PMON_CTL_PMI_EN (1 << 1) 100 #define NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT 2 101 #define NHMEX_M_PMON_CTL_COUNT_MODE_MASK \ 102 (0x3 << NHMEX_M_PMON_CTL_COUNT_MODE_SHIFT) 103 #define NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT 4 104 #define NHMEX_M_PMON_CTL_STORAGE_MODE_MASK \ 105 (0x3 << NHMEX_M_PMON_CTL_STORAGE_MODE_SHIFT) 106 #define NHMEX_M_PMON_CTL_WRAP_MODE (1 << 6) 107 #define NHMEX_M_PMON_CTL_FLAG_MODE (1 << 7) 108 #define NHMEX_M_PMON_CTL_INC_SEL_SHIFT 9 109 #define NHMEX_M_PMON_CTL_INC_SEL_MASK \ 110 (0x1f << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) 111 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT 19 112 #define NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK \ 113 (0x7 << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) 114 #define NHMEX_M_PMON_RAW_EVENT_MASK \ 115 (NHMEX_M_PMON_CTL_COUNT_MODE_MASK | \ 116 NHMEX_M_PMON_CTL_STORAGE_MODE_MASK | \ 117 NHMEX_M_PMON_CTL_WRAP_MODE | \ 118 NHMEX_M_PMON_CTL_FLAG_MODE | \ 119 NHMEX_M_PMON_CTL_INC_SEL_MASK | \ 120 NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) 121 122 #define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) 123 #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (11 + 3 * (n))) 124 125 #define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) 126 #define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7ULL << (12 + 3 * (n))) 127 128 /* 129 * use the 9~13 bits to select event If the 7th bit is not set, 130 * otherwise use the 19~21 bits to select event. 131 */ 132 #define MBOX_INC_SEL(x) ((x) << NHMEX_M_PMON_CTL_INC_SEL_SHIFT) 133 #define MBOX_SET_FLAG_SEL(x) (((x) << NHMEX_M_PMON_CTL_SET_FLAG_SEL_SHIFT) | \ 134 NHMEX_M_PMON_CTL_FLAG_MODE) 135 #define MBOX_INC_SEL_MASK (NHMEX_M_PMON_CTL_INC_SEL_MASK | \ 136 NHMEX_M_PMON_CTL_FLAG_MODE) 137 #define MBOX_SET_FLAG_SEL_MASK (NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK | \ 138 NHMEX_M_PMON_CTL_FLAG_MODE) 139 #define MBOX_INC_SEL_EXTAR_REG(c, r) \ 140 EVENT_EXTRA_REG(MBOX_INC_SEL(c), NHMEX_M0_MSR_PMU_##r, \ 141 MBOX_INC_SEL_MASK, (u64)-1, NHMEX_M_##r) 142 #define MBOX_SET_FLAG_SEL_EXTRA_REG(c, r) \ 143 EVENT_EXTRA_REG(MBOX_SET_FLAG_SEL(c), NHMEX_M0_MSR_PMU_##r, \ 144 MBOX_SET_FLAG_SEL_MASK, \ 145 (u64)-1, NHMEX_M_##r) 146 147 /* NHM-EX Rbox */ 148 #define NHMEX_R_MSR_GLOBAL_CTL 0xe00 149 #define NHMEX_R_MSR_PMON_CTL0 0xe10 150 #define NHMEX_R_MSR_PMON_CNT0 0xe11 151 #define NHMEX_R_MSR_OFFSET 0x20 152 153 #define NHMEX_R_MSR_PORTN_QLX_CFG(n) \ 154 ((n) < 4 ? (0xe0c + (n)) : (0xe2c + (n) - 4)) 155 #define NHMEX_R_MSR_PORTN_IPERF_CFG0(n) (0xe04 + (n)) 156 #define NHMEX_R_MSR_PORTN_IPERF_CFG1(n) (0xe24 + (n)) 157 #define NHMEX_R_MSR_PORTN_XBR_OFFSET(n) \ 158 (((n) < 4 ? 0 : 0x10) + (n) * 4) 159 #define NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) \ 160 (0xe60 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) 161 #define NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(n) \ 162 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 1) 163 #define NHMEX_R_MSR_PORTN_XBR_SET1_MASK(n) \ 164 (NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(n) + 2) 165 #define NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) \ 166 (0xe70 + NHMEX_R_MSR_PORTN_XBR_OFFSET(n)) 167 #define NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(n) \ 168 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 1) 169 #define NHMEX_R_MSR_PORTN_XBR_SET2_MASK(n) \ 170 (NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(n) + 2) 171 172 #define NHMEX_R_PMON_CTL_EN (1 << 0) 173 #define NHMEX_R_PMON_CTL_EV_SEL_SHIFT 1 174 #define NHMEX_R_PMON_CTL_EV_SEL_MASK \ 175 (0x1f << NHMEX_R_PMON_CTL_EV_SEL_SHIFT) 176 #define NHMEX_R_PMON_CTL_PMI_EN (1 << 6) 177 #define NHMEX_R_PMON_RAW_EVENT_MASK NHMEX_R_PMON_CTL_EV_SEL_MASK 178 179 /* NHM-EX Wbox */ 180 #define NHMEX_W_MSR_GLOBAL_CTL 0xc80 181 #define NHMEX_W_MSR_PMON_CNT0 0xc90 182 #define NHMEX_W_MSR_PMON_EVT_SEL0 0xc91 183 #define NHMEX_W_MSR_PMON_FIXED_CTR 0x394 184 #define NHMEX_W_MSR_PMON_FIXED_CTL 0x395 185 186 #define NHMEX_W_PMON_GLOBAL_FIXED_EN (1ULL << 31) 187 188 #define __BITS_VALUE(x, i, n) ((typeof(x))(((x) >> ((i) * (n))) & \ 189 ((1ULL << (n)) - 1))) 190 191 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); 192 DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); 193 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); 194 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); 195 DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); 196 DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); 197 DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); 198 DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); 199 DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); 200 201 static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) 202 { 203 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL); 204 } 205 206 static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box) 207 { 208 wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0); 209 } 210 211 static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) 212 { 213 unsigned msr = uncore_msr_box_ctl(box); 214 u64 config; 215 216 if (msr) { 217 rdmsrl(msr, config); 218 config &= ~((1ULL << uncore_num_counters(box)) - 1); 219 /* WBox has a fixed counter */ 220 if (uncore_msr_fixed_ctl(box)) 221 config &= ~NHMEX_W_PMON_GLOBAL_FIXED_EN; 222 wrmsrl(msr, config); 223 } 224 } 225 226 static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) 227 { 228 unsigned msr = uncore_msr_box_ctl(box); 229 u64 config; 230 231 if (msr) { 232 rdmsrl(msr, config); 233 config |= (1ULL << uncore_num_counters(box)) - 1; 234 /* WBox has a fixed counter */ 235 if (uncore_msr_fixed_ctl(box)) 236 config |= NHMEX_W_PMON_GLOBAL_FIXED_EN; 237 wrmsrl(msr, config); 238 } 239 } 240 241 static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) 242 { 243 wrmsrl(event->hw.config_base, 0); 244 } 245 246 static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 247 { 248 struct hw_perf_event *hwc = &event->hw; 249 250 if (hwc->idx == UNCORE_PMC_IDX_FIXED) 251 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); 252 else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) 253 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); 254 else 255 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); 256 } 257 258 #define NHMEX_UNCORE_OPS_COMMON_INIT() \ 259 .init_box = nhmex_uncore_msr_init_box, \ 260 .exit_box = nhmex_uncore_msr_exit_box, \ 261 .disable_box = nhmex_uncore_msr_disable_box, \ 262 .enable_box = nhmex_uncore_msr_enable_box, \ 263 .disable_event = nhmex_uncore_msr_disable_event, \ 264 .read_counter = uncore_msr_read_counter 265 266 static struct intel_uncore_ops nhmex_uncore_ops = { 267 NHMEX_UNCORE_OPS_COMMON_INIT(), 268 .enable_event = nhmex_uncore_msr_enable_event, 269 }; 270 271 static struct attribute *nhmex_uncore_ubox_formats_attr[] = { 272 &format_attr_event.attr, 273 &format_attr_edge.attr, 274 NULL, 275 }; 276 277 static const struct attribute_group nhmex_uncore_ubox_format_group = { 278 .name = "format", 279 .attrs = nhmex_uncore_ubox_formats_attr, 280 }; 281 282 static struct intel_uncore_type nhmex_uncore_ubox = { 283 .name = "ubox", 284 .num_counters = 1, 285 .num_boxes = 1, 286 .perf_ctr_bits = 48, 287 .event_ctl = NHMEX_U_MSR_PMON_EV_SEL, 288 .perf_ctr = NHMEX_U_MSR_PMON_CTR, 289 .event_mask = NHMEX_U_PMON_RAW_EVENT_MASK, 290 .box_ctl = NHMEX_U_MSR_PMON_GLOBAL_CTL, 291 .ops = &nhmex_uncore_ops, 292 .format_group = &nhmex_uncore_ubox_format_group 293 }; 294 295 static struct attribute *nhmex_uncore_cbox_formats_attr[] = { 296 &format_attr_event.attr, 297 &format_attr_umask.attr, 298 &format_attr_edge.attr, 299 &format_attr_inv.attr, 300 &format_attr_thresh8.attr, 301 NULL, 302 }; 303 304 static const struct attribute_group nhmex_uncore_cbox_format_group = { 305 .name = "format", 306 .attrs = nhmex_uncore_cbox_formats_attr, 307 }; 308 309 /* msr offset for each instance of cbox */ 310 static u64 nhmex_cbox_msr_offsets[] = { 311 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, 312 }; 313 314 static struct intel_uncore_type nhmex_uncore_cbox = { 315 .name = "cbox", 316 .num_counters = 6, 317 .num_boxes = 10, 318 .perf_ctr_bits = 48, 319 .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, 320 .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, 321 .event_mask = NHMEX_PMON_RAW_EVENT_MASK, 322 .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, 323 .msr_offsets = nhmex_cbox_msr_offsets, 324 .pair_ctr_ctl = 1, 325 .ops = &nhmex_uncore_ops, 326 .format_group = &nhmex_uncore_cbox_format_group 327 }; 328 329 static struct uncore_event_desc nhmex_uncore_wbox_events[] = { 330 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0"), 331 { /* end: all zeroes */ }, 332 }; 333 334 static struct intel_uncore_type nhmex_uncore_wbox = { 335 .name = "wbox", 336 .num_counters = 4, 337 .num_boxes = 1, 338 .perf_ctr_bits = 48, 339 .event_ctl = NHMEX_W_MSR_PMON_CNT0, 340 .perf_ctr = NHMEX_W_MSR_PMON_EVT_SEL0, 341 .fixed_ctr = NHMEX_W_MSR_PMON_FIXED_CTR, 342 .fixed_ctl = NHMEX_W_MSR_PMON_FIXED_CTL, 343 .event_mask = NHMEX_PMON_RAW_EVENT_MASK, 344 .box_ctl = NHMEX_W_MSR_GLOBAL_CTL, 345 .pair_ctr_ctl = 1, 346 .event_descs = nhmex_uncore_wbox_events, 347 .ops = &nhmex_uncore_ops, 348 .format_group = &nhmex_uncore_cbox_format_group 349 }; 350 351 static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 352 { 353 struct hw_perf_event *hwc = &event->hw; 354 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 355 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 356 int ctr, ev_sel; 357 358 ctr = (hwc->config & NHMEX_B_PMON_CTR_MASK) >> 359 NHMEX_B_PMON_CTR_SHIFT; 360 ev_sel = (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK) >> 361 NHMEX_B_PMON_CTL_EV_SEL_SHIFT; 362 363 /* events that do not use the match/mask registers */ 364 if ((ctr == 0 && ev_sel > 0x3) || (ctr == 1 && ev_sel > 0x6) || 365 (ctr == 2 && ev_sel != 0x4) || ctr == 3) 366 return 0; 367 368 if (box->pmu->pmu_idx == 0) 369 reg1->reg = NHMEX_B0_MSR_MATCH; 370 else 371 reg1->reg = NHMEX_B1_MSR_MATCH; 372 reg1->idx = 0; 373 reg1->config = event->attr.config1; 374 reg2->config = event->attr.config2; 375 return 0; 376 } 377 378 static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 379 { 380 struct hw_perf_event *hwc = &event->hw; 381 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 382 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 383 384 if (reg1->idx != EXTRA_REG_NONE) { 385 wrmsrl(reg1->reg, reg1->config); 386 wrmsrl(reg1->reg + 1, reg2->config); 387 } 388 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | 389 (hwc->config & NHMEX_B_PMON_CTL_EV_SEL_MASK)); 390 } 391 392 /* 393 * The Bbox has 4 counters, but each counter monitors different events. 394 * Use bits 6-7 in the event config to select counter. 395 */ 396 static struct event_constraint nhmex_uncore_bbox_constraints[] = { 397 EVENT_CONSTRAINT(0 , 1, 0xc0), 398 EVENT_CONSTRAINT(0x40, 2, 0xc0), 399 EVENT_CONSTRAINT(0x80, 4, 0xc0), 400 EVENT_CONSTRAINT(0xc0, 8, 0xc0), 401 EVENT_CONSTRAINT_END, 402 }; 403 404 static struct attribute *nhmex_uncore_bbox_formats_attr[] = { 405 &format_attr_event5.attr, 406 &format_attr_counter.attr, 407 &format_attr_match.attr, 408 &format_attr_mask.attr, 409 NULL, 410 }; 411 412 static const struct attribute_group nhmex_uncore_bbox_format_group = { 413 .name = "format", 414 .attrs = nhmex_uncore_bbox_formats_attr, 415 }; 416 417 static struct intel_uncore_ops nhmex_uncore_bbox_ops = { 418 NHMEX_UNCORE_OPS_COMMON_INIT(), 419 .enable_event = nhmex_bbox_msr_enable_event, 420 .hw_config = nhmex_bbox_hw_config, 421 .get_constraint = uncore_get_constraint, 422 .put_constraint = uncore_put_constraint, 423 }; 424 425 static struct intel_uncore_type nhmex_uncore_bbox = { 426 .name = "bbox", 427 .num_counters = 4, 428 .num_boxes = 2, 429 .perf_ctr_bits = 48, 430 .event_ctl = NHMEX_B0_MSR_PMON_CTL0, 431 .perf_ctr = NHMEX_B0_MSR_PMON_CTR0, 432 .event_mask = NHMEX_B_PMON_RAW_EVENT_MASK, 433 .box_ctl = NHMEX_B0_MSR_PMON_GLOBAL_CTL, 434 .msr_offset = NHMEX_B_MSR_OFFSET, 435 .pair_ctr_ctl = 1, 436 .num_shared_regs = 1, 437 .constraints = nhmex_uncore_bbox_constraints, 438 .ops = &nhmex_uncore_bbox_ops, 439 .format_group = &nhmex_uncore_bbox_format_group 440 }; 441 442 static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 443 { 444 struct hw_perf_event *hwc = &event->hw; 445 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 446 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 447 448 /* only TO_R_PROG_EV event uses the match/mask register */ 449 if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != 450 NHMEX_S_EVENT_TO_R_PROG_EV) 451 return 0; 452 453 if (box->pmu->pmu_idx == 0) 454 reg1->reg = NHMEX_S0_MSR_MM_CFG; 455 else 456 reg1->reg = NHMEX_S1_MSR_MM_CFG; 457 reg1->idx = 0; 458 reg1->config = event->attr.config1; 459 reg2->config = event->attr.config2; 460 return 0; 461 } 462 463 static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 464 { 465 struct hw_perf_event *hwc = &event->hw; 466 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 467 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 468 469 if (reg1->idx != EXTRA_REG_NONE) { 470 wrmsrl(reg1->reg, 0); 471 wrmsrl(reg1->reg + 1, reg1->config); 472 wrmsrl(reg1->reg + 2, reg2->config); 473 wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); 474 } 475 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); 476 } 477 478 static struct attribute *nhmex_uncore_sbox_formats_attr[] = { 479 &format_attr_event.attr, 480 &format_attr_umask.attr, 481 &format_attr_edge.attr, 482 &format_attr_inv.attr, 483 &format_attr_thresh8.attr, 484 &format_attr_match.attr, 485 &format_attr_mask.attr, 486 NULL, 487 }; 488 489 static const struct attribute_group nhmex_uncore_sbox_format_group = { 490 .name = "format", 491 .attrs = nhmex_uncore_sbox_formats_attr, 492 }; 493 494 static struct intel_uncore_ops nhmex_uncore_sbox_ops = { 495 NHMEX_UNCORE_OPS_COMMON_INIT(), 496 .enable_event = nhmex_sbox_msr_enable_event, 497 .hw_config = nhmex_sbox_hw_config, 498 .get_constraint = uncore_get_constraint, 499 .put_constraint = uncore_put_constraint, 500 }; 501 502 static struct intel_uncore_type nhmex_uncore_sbox = { 503 .name = "sbox", 504 .num_counters = 4, 505 .num_boxes = 2, 506 .perf_ctr_bits = 48, 507 .event_ctl = NHMEX_S0_MSR_PMON_CTL0, 508 .perf_ctr = NHMEX_S0_MSR_PMON_CTR0, 509 .event_mask = NHMEX_PMON_RAW_EVENT_MASK, 510 .box_ctl = NHMEX_S0_MSR_PMON_GLOBAL_CTL, 511 .msr_offset = NHMEX_S_MSR_OFFSET, 512 .pair_ctr_ctl = 1, 513 .num_shared_regs = 1, 514 .ops = &nhmex_uncore_sbox_ops, 515 .format_group = &nhmex_uncore_sbox_format_group 516 }; 517 518 enum { 519 EXTRA_REG_NHMEX_M_FILTER, 520 EXTRA_REG_NHMEX_M_DSP, 521 EXTRA_REG_NHMEX_M_ISS, 522 EXTRA_REG_NHMEX_M_MAP, 523 EXTRA_REG_NHMEX_M_MSC_THR, 524 EXTRA_REG_NHMEX_M_PGT, 525 EXTRA_REG_NHMEX_M_PLD, 526 EXTRA_REG_NHMEX_M_ZDP_CTL_FVC, 527 }; 528 529 static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { 530 MBOX_INC_SEL_EXTAR_REG(0x0, DSP), 531 MBOX_INC_SEL_EXTAR_REG(0x4, MSC_THR), 532 MBOX_INC_SEL_EXTAR_REG(0x5, MSC_THR), 533 MBOX_INC_SEL_EXTAR_REG(0x9, ISS), 534 /* event 0xa uses two extra registers */ 535 MBOX_INC_SEL_EXTAR_REG(0xa, ISS), 536 MBOX_INC_SEL_EXTAR_REG(0xa, PLD), 537 MBOX_INC_SEL_EXTAR_REG(0xb, PLD), 538 /* events 0xd ~ 0x10 use the same extra register */ 539 MBOX_INC_SEL_EXTAR_REG(0xd, ZDP_CTL_FVC), 540 MBOX_INC_SEL_EXTAR_REG(0xe, ZDP_CTL_FVC), 541 MBOX_INC_SEL_EXTAR_REG(0xf, ZDP_CTL_FVC), 542 MBOX_INC_SEL_EXTAR_REG(0x10, ZDP_CTL_FVC), 543 MBOX_INC_SEL_EXTAR_REG(0x16, PGT), 544 MBOX_SET_FLAG_SEL_EXTRA_REG(0x0, DSP), 545 MBOX_SET_FLAG_SEL_EXTRA_REG(0x1, ISS), 546 MBOX_SET_FLAG_SEL_EXTRA_REG(0x5, PGT), 547 MBOX_SET_FLAG_SEL_EXTRA_REG(0x6, MAP), 548 EVENT_EXTRA_END 549 }; 550 551 /* Nehalem-EX or Westmere-EX ? */ 552 static bool uncore_nhmex; 553 554 static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) 555 { 556 struct intel_uncore_extra_reg *er; 557 unsigned long flags; 558 bool ret = false; 559 u64 mask; 560 561 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { 562 er = &box->shared_regs[idx]; 563 raw_spin_lock_irqsave(&er->lock, flags); 564 if (!atomic_read(&er->ref) || er->config == config) { 565 atomic_inc(&er->ref); 566 er->config = config; 567 ret = true; 568 } 569 raw_spin_unlock_irqrestore(&er->lock, flags); 570 571 return ret; 572 } 573 /* 574 * The ZDP_CTL_FVC MSR has 4 fields which are used to control 575 * events 0xd ~ 0x10. Besides these 4 fields, there are additional 576 * fields which are shared. 577 */ 578 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; 579 if (WARN_ON_ONCE(idx >= 4)) 580 return false; 581 582 /* mask of the shared fields */ 583 if (uncore_nhmex) 584 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; 585 else 586 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; 587 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; 588 589 raw_spin_lock_irqsave(&er->lock, flags); 590 /* add mask of the non-shared field if it's in use */ 591 if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { 592 if (uncore_nhmex) 593 mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 594 else 595 mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 596 } 597 598 if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { 599 atomic_add(1 << (idx * 8), &er->ref); 600 if (uncore_nhmex) 601 mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | 602 NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 603 else 604 mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | 605 WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 606 er->config &= ~mask; 607 er->config |= (config & mask); 608 ret = true; 609 } 610 raw_spin_unlock_irqrestore(&er->lock, flags); 611 612 return ret; 613 } 614 615 static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) 616 { 617 struct intel_uncore_extra_reg *er; 618 619 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { 620 er = &box->shared_regs[idx]; 621 atomic_dec(&er->ref); 622 return; 623 } 624 625 idx -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; 626 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; 627 atomic_sub(1 << (idx * 8), &er->ref); 628 } 629 630 static u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) 631 { 632 struct hw_perf_event *hwc = &event->hw; 633 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 634 u64 idx, orig_idx = __BITS_VALUE(reg1->idx, 0, 8); 635 u64 config = reg1->config; 636 637 /* get the non-shared control bits and shift them */ 638 idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; 639 if (uncore_nhmex) 640 config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 641 else 642 config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); 643 if (new_idx > orig_idx) { 644 idx = new_idx - orig_idx; 645 config <<= 3 * idx; 646 } else { 647 idx = orig_idx - new_idx; 648 config >>= 3 * idx; 649 } 650 651 /* add the shared control bits back */ 652 if (uncore_nhmex) 653 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; 654 else 655 config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; 656 config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; 657 if (modify) { 658 /* adjust the main event selector */ 659 if (new_idx > orig_idx) 660 hwc->config += idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; 661 else 662 hwc->config -= idx << NHMEX_M_PMON_CTL_INC_SEL_SHIFT; 663 reg1->config = config; 664 reg1->idx = ~0xff | new_idx; 665 } 666 return config; 667 } 668 669 static struct event_constraint * 670 nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 671 { 672 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 673 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 674 int i, idx[2], alloc = 0; 675 u64 config1 = reg1->config; 676 677 idx[0] = __BITS_VALUE(reg1->idx, 0, 8); 678 idx[1] = __BITS_VALUE(reg1->idx, 1, 8); 679 again: 680 for (i = 0; i < 2; i++) { 681 if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) 682 idx[i] = 0xff; 683 684 if (idx[i] == 0xff) 685 continue; 686 687 if (!nhmex_mbox_get_shared_reg(box, idx[i], 688 __BITS_VALUE(config1, i, 32))) 689 goto fail; 690 alloc |= (0x1 << i); 691 } 692 693 /* for the match/mask registers */ 694 if (reg2->idx != EXTRA_REG_NONE && 695 (uncore_box_is_fake(box) || !reg2->alloc) && 696 !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) 697 goto fail; 698 699 /* 700 * If it's a fake box -- as per validate_{group,event}() we 701 * shouldn't touch event state and we can avoid doing so 702 * since both will only call get_event_constraints() once 703 * on each event, this avoids the need for reg->alloc. 704 */ 705 if (!uncore_box_is_fake(box)) { 706 if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) 707 nhmex_mbox_alter_er(event, idx[0], true); 708 reg1->alloc |= alloc; 709 if (reg2->idx != EXTRA_REG_NONE) 710 reg2->alloc = 1; 711 } 712 return NULL; 713 fail: 714 if (idx[0] != 0xff && !(alloc & 0x1) && 715 idx[0] >= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) { 716 /* 717 * events 0xd ~ 0x10 are functional identical, but are 718 * controlled by different fields in the ZDP_CTL_FVC 719 * register. If we failed to take one field, try the 720 * rest 3 choices. 721 */ 722 BUG_ON(__BITS_VALUE(reg1->idx, 1, 8) != 0xff); 723 idx[0] -= EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; 724 idx[0] = (idx[0] + 1) % 4; 725 idx[0] += EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; 726 if (idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) { 727 config1 = nhmex_mbox_alter_er(event, idx[0], false); 728 goto again; 729 } 730 } 731 732 if (alloc & 0x1) 733 nhmex_mbox_put_shared_reg(box, idx[0]); 734 if (alloc & 0x2) 735 nhmex_mbox_put_shared_reg(box, idx[1]); 736 return &uncore_constraint_empty; 737 } 738 739 static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 740 { 741 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 742 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 743 744 if (uncore_box_is_fake(box)) 745 return; 746 747 if (reg1->alloc & 0x1) 748 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); 749 if (reg1->alloc & 0x2) 750 nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); 751 reg1->alloc = 0; 752 753 if (reg2->alloc) { 754 nhmex_mbox_put_shared_reg(box, reg2->idx); 755 reg2->alloc = 0; 756 } 757 } 758 759 static int nhmex_mbox_extra_reg_idx(struct extra_reg *er) 760 { 761 if (er->idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) 762 return er->idx; 763 return er->idx + (er->event >> NHMEX_M_PMON_CTL_INC_SEL_SHIFT) - 0xd; 764 } 765 766 static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 767 { 768 struct intel_uncore_type *type = box->pmu->type; 769 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 770 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 771 struct extra_reg *er; 772 unsigned msr; 773 int reg_idx = 0; 774 /* 775 * The mbox events may require 2 extra MSRs at the most. But only 776 * the lower 32 bits in these MSRs are significant, so we can use 777 * config1 to pass two MSRs' config. 778 */ 779 for (er = nhmex_uncore_mbox_extra_regs; er->msr; er++) { 780 if (er->event != (event->hw.config & er->config_mask)) 781 continue; 782 if (event->attr.config1 & ~er->valid_mask) 783 return -EINVAL; 784 785 msr = er->msr + type->msr_offset * box->pmu->pmu_idx; 786 if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) 787 return -EINVAL; 788 789 /* always use the 32~63 bits to pass the PLD config */ 790 if (er->idx == EXTRA_REG_NHMEX_M_PLD) 791 reg_idx = 1; 792 else if (WARN_ON_ONCE(reg_idx > 0)) 793 return -EINVAL; 794 795 reg1->idx &= ~(0xff << (reg_idx * 8)); 796 reg1->reg &= ~(0xffff << (reg_idx * 16)); 797 reg1->idx |= nhmex_mbox_extra_reg_idx(er) << (reg_idx * 8); 798 reg1->reg |= msr << (reg_idx * 16); 799 reg1->config = event->attr.config1; 800 reg_idx++; 801 } 802 /* 803 * The mbox only provides ability to perform address matching 804 * for the PLD events. 805 */ 806 if (reg_idx == 2) { 807 reg2->idx = EXTRA_REG_NHMEX_M_FILTER; 808 if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) 809 reg2->config = event->attr.config2; 810 else 811 reg2->config = ~0ULL; 812 if (box->pmu->pmu_idx == 0) 813 reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; 814 else 815 reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; 816 } 817 return 0; 818 } 819 820 static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) 821 { 822 struct intel_uncore_extra_reg *er; 823 unsigned long flags; 824 u64 config; 825 826 if (idx < EXTRA_REG_NHMEX_M_ZDP_CTL_FVC) 827 return box->shared_regs[idx].config; 828 829 er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; 830 raw_spin_lock_irqsave(&er->lock, flags); 831 config = er->config; 832 raw_spin_unlock_irqrestore(&er->lock, flags); 833 return config; 834 } 835 836 static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 837 { 838 struct hw_perf_event *hwc = &event->hw; 839 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 840 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 841 int idx; 842 843 idx = __BITS_VALUE(reg1->idx, 0, 8); 844 if (idx != 0xff) 845 wrmsrl(__BITS_VALUE(reg1->reg, 0, 16), 846 nhmex_mbox_shared_reg_config(box, idx)); 847 idx = __BITS_VALUE(reg1->idx, 1, 8); 848 if (idx != 0xff) 849 wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), 850 nhmex_mbox_shared_reg_config(box, idx)); 851 852 if (reg2->idx != EXTRA_REG_NONE) { 853 wrmsrl(reg2->reg, 0); 854 if (reg2->config != ~0ULL) { 855 wrmsrl(reg2->reg + 1, 856 reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); 857 wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & 858 (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); 859 wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); 860 } 861 } 862 863 wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); 864 } 865 866 DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); 867 DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); 868 DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); 869 DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); 870 DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); 871 DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); 872 DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); 873 DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); 874 DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); 875 DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); 876 DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); 877 DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); 878 DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); 879 DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); 880 DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); 881 DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); 882 883 static struct attribute *nhmex_uncore_mbox_formats_attr[] = { 884 &format_attr_count_mode.attr, 885 &format_attr_storage_mode.attr, 886 &format_attr_wrap_mode.attr, 887 &format_attr_flag_mode.attr, 888 &format_attr_inc_sel.attr, 889 &format_attr_set_flag_sel.attr, 890 &format_attr_filter_cfg_en.attr, 891 &format_attr_filter_match.attr, 892 &format_attr_filter_mask.attr, 893 &format_attr_dsp.attr, 894 &format_attr_thr.attr, 895 &format_attr_fvc.attr, 896 &format_attr_pgt.attr, 897 &format_attr_map.attr, 898 &format_attr_iss.attr, 899 &format_attr_pld.attr, 900 NULL, 901 }; 902 903 static const struct attribute_group nhmex_uncore_mbox_format_group = { 904 .name = "format", 905 .attrs = nhmex_uncore_mbox_formats_attr, 906 }; 907 908 static struct uncore_event_desc nhmex_uncore_mbox_events[] = { 909 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x2800"), 910 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x2820"), 911 { /* end: all zeroes */ }, 912 }; 913 914 static struct uncore_event_desc wsmex_uncore_mbox_events[] = { 915 INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), 916 INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), 917 { /* end: all zeroes */ }, 918 }; 919 920 static struct intel_uncore_ops nhmex_uncore_mbox_ops = { 921 NHMEX_UNCORE_OPS_COMMON_INIT(), 922 .enable_event = nhmex_mbox_msr_enable_event, 923 .hw_config = nhmex_mbox_hw_config, 924 .get_constraint = nhmex_mbox_get_constraint, 925 .put_constraint = nhmex_mbox_put_constraint, 926 }; 927 928 static struct intel_uncore_type nhmex_uncore_mbox = { 929 .name = "mbox", 930 .num_counters = 6, 931 .num_boxes = 2, 932 .perf_ctr_bits = 48, 933 .event_ctl = NHMEX_M0_MSR_PMU_CTL0, 934 .perf_ctr = NHMEX_M0_MSR_PMU_CNT0, 935 .event_mask = NHMEX_M_PMON_RAW_EVENT_MASK, 936 .box_ctl = NHMEX_M0_MSR_GLOBAL_CTL, 937 .msr_offset = NHMEX_M_MSR_OFFSET, 938 .pair_ctr_ctl = 1, 939 .num_shared_regs = 8, 940 .event_descs = nhmex_uncore_mbox_events, 941 .ops = &nhmex_uncore_mbox_ops, 942 .format_group = &nhmex_uncore_mbox_format_group, 943 }; 944 945 static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) 946 { 947 struct hw_perf_event *hwc = &event->hw; 948 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 949 950 /* adjust the main event selector and extra register index */ 951 if (reg1->idx % 2) { 952 reg1->idx--; 953 hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; 954 } else { 955 reg1->idx++; 956 hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; 957 } 958 959 /* adjust extra register config */ 960 switch (reg1->idx % 6) { 961 case 2: 962 /* shift the 8~15 bits to the 0~7 bits */ 963 reg1->config >>= 8; 964 break; 965 case 3: 966 /* shift the 0~7 bits to the 8~15 bits */ 967 reg1->config <<= 8; 968 break; 969 } 970 } 971 972 /* 973 * Each rbox has 4 event set which monitor PQI port 0~3 or 4~7. 974 * An event set consists of 6 events, the 3rd and 4th events in 975 * an event set use the same extra register. So an event set uses 976 * 5 extra registers. 977 */ 978 static struct event_constraint * 979 nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) 980 { 981 struct hw_perf_event *hwc = &event->hw; 982 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 983 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 984 struct intel_uncore_extra_reg *er; 985 unsigned long flags; 986 int idx, er_idx; 987 u64 config1; 988 bool ok = false; 989 990 if (!uncore_box_is_fake(box) && reg1->alloc) 991 return NULL; 992 993 idx = reg1->idx % 6; 994 config1 = reg1->config; 995 again: 996 er_idx = idx; 997 /* the 3rd and 4th events use the same extra register */ 998 if (er_idx > 2) 999 er_idx--; 1000 er_idx += (reg1->idx / 6) * 5; 1001 1002 er = &box->shared_regs[er_idx]; 1003 raw_spin_lock_irqsave(&er->lock, flags); 1004 if (idx < 2) { 1005 if (!atomic_read(&er->ref) || er->config == reg1->config) { 1006 atomic_inc(&er->ref); 1007 er->config = reg1->config; 1008 ok = true; 1009 } 1010 } else if (idx == 2 || idx == 3) { 1011 /* 1012 * these two events use different fields in a extra register, 1013 * the 0~7 bits and the 8~15 bits respectively. 1014 */ 1015 u64 mask = 0xff << ((idx - 2) * 8); 1016 if (!__BITS_VALUE(atomic_read(&er->ref), idx - 2, 8) || 1017 !((er->config ^ config1) & mask)) { 1018 atomic_add(1 << ((idx - 2) * 8), &er->ref); 1019 er->config &= ~mask; 1020 er->config |= config1 & mask; 1021 ok = true; 1022 } 1023 } else { 1024 if (!atomic_read(&er->ref) || 1025 (er->config == (hwc->config >> 32) && 1026 er->config1 == reg1->config && 1027 er->config2 == reg2->config)) { 1028 atomic_inc(&er->ref); 1029 er->config = (hwc->config >> 32); 1030 er->config1 = reg1->config; 1031 er->config2 = reg2->config; 1032 ok = true; 1033 } 1034 } 1035 raw_spin_unlock_irqrestore(&er->lock, flags); 1036 1037 if (!ok) { 1038 /* 1039 * The Rbox events are always in pairs. The paired 1040 * events are functional identical, but use different 1041 * extra registers. If we failed to take an extra 1042 * register, try the alternative. 1043 */ 1044 idx ^= 1; 1045 if (idx != reg1->idx % 6) { 1046 if (idx == 2) 1047 config1 >>= 8; 1048 else if (idx == 3) 1049 config1 <<= 8; 1050 goto again; 1051 } 1052 } else { 1053 if (!uncore_box_is_fake(box)) { 1054 if (idx != reg1->idx % 6) 1055 nhmex_rbox_alter_er(box, event); 1056 reg1->alloc = 1; 1057 } 1058 return NULL; 1059 } 1060 return &uncore_constraint_empty; 1061 } 1062 1063 static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) 1064 { 1065 struct intel_uncore_extra_reg *er; 1066 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1067 int idx, er_idx; 1068 1069 if (uncore_box_is_fake(box) || !reg1->alloc) 1070 return; 1071 1072 idx = reg1->idx % 6; 1073 er_idx = idx; 1074 if (er_idx > 2) 1075 er_idx--; 1076 er_idx += (reg1->idx / 6) * 5; 1077 1078 er = &box->shared_regs[er_idx]; 1079 if (idx == 2 || idx == 3) 1080 atomic_sub(1 << ((idx - 2) * 8), &er->ref); 1081 else 1082 atomic_dec(&er->ref); 1083 1084 reg1->alloc = 0; 1085 } 1086 1087 static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) 1088 { 1089 struct hw_perf_event *hwc = &event->hw; 1090 struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; 1091 struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; 1092 int idx; 1093 1094 idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> 1095 NHMEX_R_PMON_CTL_EV_SEL_SHIFT; 1096 if (idx >= 0x18) 1097 return -EINVAL; 1098 1099 reg1->idx = idx; 1100 reg1->config = event->attr.config1; 1101 1102 switch (idx % 6) { 1103 case 4: 1104 case 5: 1105 hwc->config |= event->attr.config & (~0ULL << 32); 1106 reg2->config = event->attr.config2; 1107 break; 1108 } 1109 return 0; 1110 } 1111 1112 static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) 1113 { 1114 struct hw_perf_event *hwc = &event->hw; 1115 struct hw_perf_event_extra *reg1 = &hwc->extra_reg; 1116 struct hw_perf_event_extra *reg2 = &hwc->branch_reg; 1117 int idx, port; 1118 1119 idx = reg1->idx; 1120 port = idx / 6 + box->pmu->pmu_idx * 4; 1121 1122 switch (idx % 6) { 1123 case 0: 1124 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); 1125 break; 1126 case 1: 1127 wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); 1128 break; 1129 case 2: 1130 case 3: 1131 wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), 1132 uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); 1133 break; 1134 case 4: 1135 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), 1136 hwc->config >> 32); 1137 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); 1138 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); 1139 break; 1140 case 5: 1141 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), 1142 hwc->config >> 32); 1143 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); 1144 wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); 1145 break; 1146 } 1147 1148 wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0 | 1149 (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); 1150 } 1151 1152 DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); 1153 DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); 1154 DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); 1155 DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); 1156 DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); 1157 1158 static struct attribute *nhmex_uncore_rbox_formats_attr[] = { 1159 &format_attr_event5.attr, 1160 &format_attr_xbr_mm_cfg.attr, 1161 &format_attr_xbr_match.attr, 1162 &format_attr_xbr_mask.attr, 1163 &format_attr_qlx_cfg.attr, 1164 &format_attr_iperf_cfg.attr, 1165 NULL, 1166 }; 1167 1168 static const struct attribute_group nhmex_uncore_rbox_format_group = { 1169 .name = "format", 1170 .attrs = nhmex_uncore_rbox_formats_attr, 1171 }; 1172 1173 static struct uncore_event_desc nhmex_uncore_rbox_events[] = { 1174 INTEL_UNCORE_EVENT_DESC(qpi0_flit_send, "event=0x0,iperf_cfg=0x80000000"), 1175 INTEL_UNCORE_EVENT_DESC(qpi1_filt_send, "event=0x6,iperf_cfg=0x80000000"), 1176 INTEL_UNCORE_EVENT_DESC(qpi0_idle_filt, "event=0x0,iperf_cfg=0x40000000"), 1177 INTEL_UNCORE_EVENT_DESC(qpi1_idle_filt, "event=0x6,iperf_cfg=0x40000000"), 1178 INTEL_UNCORE_EVENT_DESC(qpi0_date_response, "event=0x0,iperf_cfg=0xc4"), 1179 INTEL_UNCORE_EVENT_DESC(qpi1_date_response, "event=0x6,iperf_cfg=0xc4"), 1180 { /* end: all zeroes */ }, 1181 }; 1182 1183 static struct intel_uncore_ops nhmex_uncore_rbox_ops = { 1184 NHMEX_UNCORE_OPS_COMMON_INIT(), 1185 .enable_event = nhmex_rbox_msr_enable_event, 1186 .hw_config = nhmex_rbox_hw_config, 1187 .get_constraint = nhmex_rbox_get_constraint, 1188 .put_constraint = nhmex_rbox_put_constraint, 1189 }; 1190 1191 static struct intel_uncore_type nhmex_uncore_rbox = { 1192 .name = "rbox", 1193 .num_counters = 8, 1194 .num_boxes = 2, 1195 .perf_ctr_bits = 48, 1196 .event_ctl = NHMEX_R_MSR_PMON_CTL0, 1197 .perf_ctr = NHMEX_R_MSR_PMON_CNT0, 1198 .event_mask = NHMEX_R_PMON_RAW_EVENT_MASK, 1199 .box_ctl = NHMEX_R_MSR_GLOBAL_CTL, 1200 .msr_offset = NHMEX_R_MSR_OFFSET, 1201 .pair_ctr_ctl = 1, 1202 .num_shared_regs = 20, 1203 .event_descs = nhmex_uncore_rbox_events, 1204 .ops = &nhmex_uncore_rbox_ops, 1205 .format_group = &nhmex_uncore_rbox_format_group 1206 }; 1207 1208 static struct intel_uncore_type *nhmex_msr_uncores[] = { 1209 &nhmex_uncore_ubox, 1210 &nhmex_uncore_cbox, 1211 &nhmex_uncore_bbox, 1212 &nhmex_uncore_sbox, 1213 &nhmex_uncore_mbox, 1214 &nhmex_uncore_rbox, 1215 &nhmex_uncore_wbox, 1216 NULL, 1217 }; 1218 1219 void nhmex_uncore_cpu_init(void) 1220 { 1221 if (boot_cpu_data.x86_vfm == INTEL_NEHALEM_EX) 1222 uncore_nhmex = true; 1223 else 1224 nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; 1225 if (nhmex_uncore_cbox.num_boxes > topology_num_cores_per_package()) 1226 nhmex_uncore_cbox.num_boxes = topology_num_cores_per_package(); 1227 uncore_msr_uncores = nhmex_msr_uncores; 1228 } 1229 /* end of Nehalem-EX uncore support */ 1230
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.