~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/perf_event.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_PERF_EVENT_H
  3 #define _ASM_X86_PERF_EVENT_H
  4 
  5 #include <linux/static_call.h>
  6 
  7 /*
  8  * Performance event hw details:
  9  */
 10 
 11 #define INTEL_PMC_MAX_GENERIC                                  32
 12 #define INTEL_PMC_MAX_FIXED                                    16
 13 #define INTEL_PMC_IDX_FIXED                                    32
 14 
 15 #define X86_PMC_IDX_MAX                                        64
 16 
 17 #define MSR_ARCH_PERFMON_PERFCTR0                             0xc1
 18 #define MSR_ARCH_PERFMON_PERFCTR1                             0xc2
 19 
 20 #define MSR_ARCH_PERFMON_EVENTSEL0                           0x186
 21 #define MSR_ARCH_PERFMON_EVENTSEL1                           0x187
 22 
 23 #define ARCH_PERFMON_EVENTSEL_EVENT                     0x000000FFULL
 24 #define ARCH_PERFMON_EVENTSEL_UMASK                     0x0000FF00ULL
 25 #define ARCH_PERFMON_EVENTSEL_USR                       (1ULL << 16)
 26 #define ARCH_PERFMON_EVENTSEL_OS                        (1ULL << 17)
 27 #define ARCH_PERFMON_EVENTSEL_EDGE                      (1ULL << 18)
 28 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL               (1ULL << 19)
 29 #define ARCH_PERFMON_EVENTSEL_INT                       (1ULL << 20)
 30 #define ARCH_PERFMON_EVENTSEL_ANY                       (1ULL << 21)
 31 #define ARCH_PERFMON_EVENTSEL_ENABLE                    (1ULL << 22)
 32 #define ARCH_PERFMON_EVENTSEL_INV                       (1ULL << 23)
 33 #define ARCH_PERFMON_EVENTSEL_CMASK                     0xFF000000ULL
 34 #define ARCH_PERFMON_EVENTSEL_BR_CNTR                   (1ULL << 35)
 35 #define ARCH_PERFMON_EVENTSEL_EQ                        (1ULL << 36)
 36 #define ARCH_PERFMON_EVENTSEL_UMASK2                    (0xFFULL << 40)
 37 
 38 #define INTEL_FIXED_BITS_MASK                           0xFULL
 39 #define INTEL_FIXED_BITS_STRIDE                 4
 40 #define INTEL_FIXED_0_KERNEL                            (1ULL << 0)
 41 #define INTEL_FIXED_0_USER                              (1ULL << 1)
 42 #define INTEL_FIXED_0_ANYTHREAD                 (1ULL << 2)
 43 #define INTEL_FIXED_0_ENABLE_PMI                        (1ULL << 3)
 44 
 45 #define HSW_IN_TX                                       (1ULL << 32)
 46 #define HSW_IN_TX_CHECKPOINTED                          (1ULL << 33)
 47 #define ICL_EVENTSEL_ADAPTIVE                           (1ULL << 34)
 48 #define ICL_FIXED_0_ADAPTIVE                            (1ULL << 32)
 49 
 50 #define intel_fixed_bits_by_idx(_idx, _bits)                    \
 51         ((_bits) << ((_idx) * INTEL_FIXED_BITS_STRIDE))
 52 
 53 #define AMD64_EVENTSEL_INT_CORE_ENABLE                  (1ULL << 36)
 54 #define AMD64_EVENTSEL_GUESTONLY                        (1ULL << 40)
 55 #define AMD64_EVENTSEL_HOSTONLY                         (1ULL << 41)
 56 
 57 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT               37
 58 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK                \
 59         (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
 60 
 61 #define AMD64_EVENTSEL_EVENT    \
 62         (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
 63 #define INTEL_ARCH_EVENT_MASK   \
 64         (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
 65 
 66 #define AMD64_L3_SLICE_SHIFT                            48
 67 #define AMD64_L3_SLICE_MASK                             \
 68         (0xFULL << AMD64_L3_SLICE_SHIFT)
 69 #define AMD64_L3_SLICEID_MASK                           \
 70         (0x7ULL << AMD64_L3_SLICE_SHIFT)
 71 
 72 #define AMD64_L3_THREAD_SHIFT                           56
 73 #define AMD64_L3_THREAD_MASK                            \
 74         (0xFFULL << AMD64_L3_THREAD_SHIFT)
 75 #define AMD64_L3_F19H_THREAD_MASK                       \
 76         (0x3ULL << AMD64_L3_THREAD_SHIFT)
 77 
 78 #define AMD64_L3_EN_ALL_CORES                           BIT_ULL(47)
 79 #define AMD64_L3_EN_ALL_SLICES                          BIT_ULL(46)
 80 
 81 #define AMD64_L3_COREID_SHIFT                           42
 82 #define AMD64_L3_COREID_MASK                            \
 83         (0x7ULL << AMD64_L3_COREID_SHIFT)
 84 
 85 #define X86_RAW_EVENT_MASK              \
 86         (ARCH_PERFMON_EVENTSEL_EVENT |  \
 87          ARCH_PERFMON_EVENTSEL_UMASK |  \
 88          ARCH_PERFMON_EVENTSEL_EDGE  |  \
 89          ARCH_PERFMON_EVENTSEL_INV   |  \
 90          ARCH_PERFMON_EVENTSEL_CMASK)
 91 #define X86_ALL_EVENT_FLAGS                     \
 92         (ARCH_PERFMON_EVENTSEL_EDGE |           \
 93          ARCH_PERFMON_EVENTSEL_INV |            \
 94          ARCH_PERFMON_EVENTSEL_CMASK |          \
 95          ARCH_PERFMON_EVENTSEL_ANY |            \
 96          ARCH_PERFMON_EVENTSEL_PIN_CONTROL |    \
 97          HSW_IN_TX |                            \
 98          HSW_IN_TX_CHECKPOINTED)
 99 #define AMD64_RAW_EVENT_MASK            \
100         (X86_RAW_EVENT_MASK          |  \
101          AMD64_EVENTSEL_EVENT)
102 #define AMD64_RAW_EVENT_MASK_NB         \
103         (AMD64_EVENTSEL_EVENT        |  \
104          ARCH_PERFMON_EVENTSEL_UMASK)
105 
106 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_NB      \
107         (AMD64_EVENTSEL_EVENT   |               \
108          GENMASK_ULL(37, 36))
109 
110 #define AMD64_PERFMON_V2_EVENTSEL_UMASK_NB      \
111         (ARCH_PERFMON_EVENTSEL_UMASK    |       \
112          GENMASK_ULL(27, 24))
113 
114 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_NB              \
115         (AMD64_PERFMON_V2_EVENTSEL_EVENT_NB     |       \
116          AMD64_PERFMON_V2_EVENTSEL_UMASK_NB)
117 
118 #define AMD64_PERFMON_V2_ENABLE_UMC                     BIT_ULL(31)
119 #define AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC             GENMASK_ULL(7, 0)
120 #define AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC          GENMASK_ULL(9, 8)
121 #define AMD64_PERFMON_V2_RAW_EVENT_MASK_UMC             \
122         (AMD64_PERFMON_V2_EVENTSEL_EVENT_UMC    |       \
123          AMD64_PERFMON_V2_EVENTSEL_RDWRMASK_UMC)
124 
125 #define AMD64_NUM_COUNTERS                              4
126 #define AMD64_NUM_COUNTERS_CORE                         6
127 #define AMD64_NUM_COUNTERS_NB                           4
128 
129 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL           0x3c
130 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK         (0x00 << 8)
131 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX         0
132 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
133                 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
134 
135 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED              6
136 #define ARCH_PERFMON_EVENTS_COUNT                       7
137 
138 #define PEBS_DATACFG_MEMINFO    BIT_ULL(0)
139 #define PEBS_DATACFG_GP BIT_ULL(1)
140 #define PEBS_DATACFG_XMMS       BIT_ULL(2)
141 #define PEBS_DATACFG_LBRS       BIT_ULL(3)
142 #define PEBS_DATACFG_LBR_SHIFT  24
143 
144 /* Steal the highest bit of pebs_data_cfg for SW usage */
145 #define PEBS_UPDATE_DS_SW       BIT_ULL(63)
146 
147 /*
148  * Intel "Architectural Performance Monitoring" CPUID
149  * detection/enumeration details:
150  */
151 union cpuid10_eax {
152         struct {
153                 unsigned int version_id:8;
154                 unsigned int num_counters:8;
155                 unsigned int bit_width:8;
156                 unsigned int mask_length:8;
157         } split;
158         unsigned int full;
159 };
160 
161 union cpuid10_ebx {
162         struct {
163                 unsigned int no_unhalted_core_cycles:1;
164                 unsigned int no_instructions_retired:1;
165                 unsigned int no_unhalted_reference_cycles:1;
166                 unsigned int no_llc_reference:1;
167                 unsigned int no_llc_misses:1;
168                 unsigned int no_branch_instruction_retired:1;
169                 unsigned int no_branch_misses_retired:1;
170         } split;
171         unsigned int full;
172 };
173 
174 union cpuid10_edx {
175         struct {
176                 unsigned int num_counters_fixed:5;
177                 unsigned int bit_width_fixed:8;
178                 unsigned int reserved1:2;
179                 unsigned int anythread_deprecated:1;
180                 unsigned int reserved2:16;
181         } split;
182         unsigned int full;
183 };
184 
185 /*
186  * Intel "Architectural Performance Monitoring extension" CPUID
187  * detection/enumeration details:
188  */
189 #define ARCH_PERFMON_EXT_LEAF                   0x00000023
190 #define ARCH_PERFMON_EXT_UMASK2                 0x1
191 #define ARCH_PERFMON_EXT_EQ                     0x2
192 #define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT       0x1
193 #define ARCH_PERFMON_NUM_COUNTER_LEAF           0x1
194 
195 /*
196  * Intel Architectural LBR CPUID detection/enumeration details:
197  */
198 union cpuid28_eax {
199         struct {
200                 /* Supported LBR depth values */
201                 unsigned int    lbr_depth_mask:8;
202                 unsigned int    reserved:22;
203                 /* Deep C-state Reset */
204                 unsigned int    lbr_deep_c_reset:1;
205                 /* IP values contain LIP */
206                 unsigned int    lbr_lip:1;
207         } split;
208         unsigned int            full;
209 };
210 
211 union cpuid28_ebx {
212         struct {
213                 /* CPL Filtering Supported */
214                 unsigned int    lbr_cpl:1;
215                 /* Branch Filtering Supported */
216                 unsigned int    lbr_filter:1;
217                 /* Call-stack Mode Supported */
218                 unsigned int    lbr_call_stack:1;
219         } split;
220         unsigned int            full;
221 };
222 
223 union cpuid28_ecx {
224         struct {
225                 /* Mispredict Bit Supported */
226                 unsigned int    lbr_mispred:1;
227                 /* Timed LBRs Supported */
228                 unsigned int    lbr_timed_lbr:1;
229                 /* Branch Type Field Supported */
230                 unsigned int    lbr_br_type:1;
231                 unsigned int    reserved:13;
232                 /* Branch counters (Event Logging) Supported */
233                 unsigned int    lbr_counters:4;
234         } split;
235         unsigned int            full;
236 };
237 
238 /*
239  * AMD "Extended Performance Monitoring and Debug" CPUID
240  * detection/enumeration details:
241  */
242 union cpuid_0x80000022_ebx {
243         struct {
244                 /* Number of Core Performance Counters */
245                 unsigned int    num_core_pmc:4;
246                 /* Number of available LBR Stack Entries */
247                 unsigned int    lbr_v2_stack_sz:6;
248                 /* Number of Data Fabric Counters */
249                 unsigned int    num_df_pmc:6;
250                 /* Number of Unified Memory Controller Counters */
251                 unsigned int    num_umc_pmc:6;
252         } split;
253         unsigned int            full;
254 };
255 
256 struct x86_pmu_capability {
257         int             version;
258         int             num_counters_gp;
259         int             num_counters_fixed;
260         int             bit_width_gp;
261         int             bit_width_fixed;
262         unsigned int    events_mask;
263         int             events_mask_len;
264         unsigned int    pebs_ept        :1;
265 };
266 
267 /*
268  * Fixed-purpose performance events:
269  */
270 
271 /* RDPMC offset for Fixed PMCs */
272 #define INTEL_PMC_FIXED_RDPMC_BASE              (1 << 30)
273 #define INTEL_PMC_FIXED_RDPMC_METRICS           (1 << 29)
274 
275 /*
276  * All the fixed-mode PMCs are configured via this single MSR:
277  */
278 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
279 
280 /*
281  * There is no event-code assigned to the fixed-mode PMCs.
282  *
283  * For a fixed-mode PMC, which has an equivalent event on a general-purpose
284  * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
285  * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
286  *
287  * For a fixed-mode PMC, which doesn't have an equivalent event, a
288  * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
289  * The pseudo event-code for a fixed-mode PMC must be 0x00.
290  * The pseudo umask-code is 0xX. The X equals the index of the fixed
291  * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
292  *
293  * The counts are available in separate MSRs:
294  */
295 
296 /* Instr_Retired.Any: */
297 #define MSR_ARCH_PERFMON_FIXED_CTR0     0x309
298 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS        (INTEL_PMC_IDX_FIXED + 0)
299 
300 /* CPU_CLK_Unhalted.Core: */
301 #define MSR_ARCH_PERFMON_FIXED_CTR1     0x30a
302 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES  (INTEL_PMC_IDX_FIXED + 1)
303 
304 /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
305 #define MSR_ARCH_PERFMON_FIXED_CTR2     0x30b
306 #define INTEL_PMC_IDX_FIXED_REF_CYCLES  (INTEL_PMC_IDX_FIXED + 2)
307 #define INTEL_PMC_MSK_FIXED_REF_CYCLES  (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
308 
309 /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
310 #define MSR_ARCH_PERFMON_FIXED_CTR3     0x30c
311 #define INTEL_PMC_IDX_FIXED_SLOTS       (INTEL_PMC_IDX_FIXED + 3)
312 #define INTEL_PMC_MSK_FIXED_SLOTS       (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
313 
314 /* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
315 /* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
316 /* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */
317 
318 static inline bool use_fixed_pseudo_encoding(u64 code)
319 {
320         return !(code & 0xff);
321 }
322 
323 /*
324  * We model BTS tracing as another fixed-mode PMC.
325  *
326  * We choose the value 47 for the fixed index of BTS, since lower
327  * values are used by actual fixed events and higher values are used
328  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
329  */
330 #define INTEL_PMC_IDX_FIXED_BTS                 (INTEL_PMC_IDX_FIXED + 15)
331 
332 /*
333  * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
334  * each TopDown metric event.
335  *
336  * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
337  */
338 #define INTEL_PMC_IDX_METRIC_BASE               (INTEL_PMC_IDX_FIXED + 16)
339 #define INTEL_PMC_IDX_TD_RETIRING               (INTEL_PMC_IDX_METRIC_BASE + 0)
340 #define INTEL_PMC_IDX_TD_BAD_SPEC               (INTEL_PMC_IDX_METRIC_BASE + 1)
341 #define INTEL_PMC_IDX_TD_FE_BOUND               (INTEL_PMC_IDX_METRIC_BASE + 2)
342 #define INTEL_PMC_IDX_TD_BE_BOUND               (INTEL_PMC_IDX_METRIC_BASE + 3)
343 #define INTEL_PMC_IDX_TD_HEAVY_OPS              (INTEL_PMC_IDX_METRIC_BASE + 4)
344 #define INTEL_PMC_IDX_TD_BR_MISPREDICT          (INTEL_PMC_IDX_METRIC_BASE + 5)
345 #define INTEL_PMC_IDX_TD_FETCH_LAT              (INTEL_PMC_IDX_METRIC_BASE + 6)
346 #define INTEL_PMC_IDX_TD_MEM_BOUND              (INTEL_PMC_IDX_METRIC_BASE + 7)
347 #define INTEL_PMC_IDX_METRIC_END                INTEL_PMC_IDX_TD_MEM_BOUND
348 #define INTEL_PMC_MSK_TOPDOWN                   ((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
349                                                 INTEL_PMC_MSK_FIXED_SLOTS)
350 
351 /*
352  * There is no event-code assigned to the TopDown events.
353  *
354  * For the slots event, use the pseudo code of the fixed counter 3.
355  *
356  * For the metric events, the pseudo event-code is 0x00.
357  * The pseudo umask-code starts from the middle of the pseudo event
358  * space, 0x80.
359  */
360 #define INTEL_TD_SLOTS                          0x0400  /* TOPDOWN.SLOTS */
361 /* Level 1 metrics */
362 #define INTEL_TD_METRIC_RETIRING                0x8000  /* Retiring metric */
363 #define INTEL_TD_METRIC_BAD_SPEC                0x8100  /* Bad speculation metric */
364 #define INTEL_TD_METRIC_FE_BOUND                0x8200  /* FE bound metric */
365 #define INTEL_TD_METRIC_BE_BOUND                0x8300  /* BE bound metric */
366 /* Level 2 metrics */
367 #define INTEL_TD_METRIC_HEAVY_OPS               0x8400  /* Heavy Operations metric */
368 #define INTEL_TD_METRIC_BR_MISPREDICT           0x8500  /* Branch Mispredict metric */
369 #define INTEL_TD_METRIC_FETCH_LAT               0x8600  /* Fetch Latency metric */
370 #define INTEL_TD_METRIC_MEM_BOUND               0x8700  /* Memory bound metric */
371 
372 #define INTEL_TD_METRIC_MAX                     INTEL_TD_METRIC_MEM_BOUND
373 #define INTEL_TD_METRIC_NUM                     8
374 
375 static inline bool is_metric_idx(int idx)
376 {
377         return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
378 }
379 
380 static inline bool is_topdown_idx(int idx)
381 {
382         return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
383 }
384 
385 #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit)       \
386                         (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
387 
388 #define GLOBAL_STATUS_COND_CHG                  BIT_ULL(63)
389 #define GLOBAL_STATUS_BUFFER_OVF_BIT            62
390 #define GLOBAL_STATUS_BUFFER_OVF                BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
391 #define GLOBAL_STATUS_UNC_OVF                   BIT_ULL(61)
392 #define GLOBAL_STATUS_ASIF                      BIT_ULL(60)
393 #define GLOBAL_STATUS_COUNTERS_FROZEN           BIT_ULL(59)
394 #define GLOBAL_STATUS_LBRS_FROZEN_BIT           58
395 #define GLOBAL_STATUS_LBRS_FROZEN               BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
396 #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT         55
397 #define GLOBAL_STATUS_TRACE_TOPAPMI             BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
398 #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT      48
399 
400 #define GLOBAL_CTRL_EN_PERF_METRICS             48
401 /*
402  * We model guest LBR event tracing as another fixed-mode PMC like BTS.
403  *
404  * We choose bit 58 because it's used to indicate LBR stack frozen state
405  * for architectural perfmon v4, also we unconditionally mask that bit in
406  * the handle_pmi_common(), so it'll never be set in the overflow handling.
407  *
408  * With this fake counter assigned, the guest LBR event user (such as KVM),
409  * can program the LBR registers on its own, and we don't actually do anything
410  * with then in the host context.
411  */
412 #define INTEL_PMC_IDX_FIXED_VLBR        (GLOBAL_STATUS_LBRS_FROZEN_BIT)
413 
414 /*
415  * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
416  * since it would claim bit 58 which is effectively Fixed26.
417  */
418 #define INTEL_FIXED_VLBR_EVENT  0x1b00
419 
420 /*
421  * Adaptive PEBS v4
422  */
423 
424 struct pebs_basic {
425         u64 format_size;
426         u64 ip;
427         u64 applicable_counters;
428         u64 tsc;
429 };
430 
431 struct pebs_meminfo {
432         u64 address;
433         u64 aux;
434         u64 latency;
435         u64 tsx_tuning;
436 };
437 
438 struct pebs_gprs {
439         u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
440         u64 r8, r9, r10, r11, r12, r13, r14, r15;
441 };
442 
443 struct pebs_xmm {
444         u64 xmm[16*2];  /* two entries for each register */
445 };
446 
447 /*
448  * AMD Extended Performance Monitoring and Debug cpuid feature detection
449  */
450 #define EXT_PERFMON_DEBUG_FEATURES              0x80000022
451 
452 /*
453  * IBS cpuid feature detection
454  */
455 
456 #define IBS_CPUID_FEATURES              0x8000001b
457 
458 /*
459  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
460  * bit 0 is used to indicate the existence of IBS.
461  */
462 #define IBS_CAPS_AVAIL                  (1U<<0)
463 #define IBS_CAPS_FETCHSAM               (1U<<1)
464 #define IBS_CAPS_OPSAM                  (1U<<2)
465 #define IBS_CAPS_RDWROPCNT              (1U<<3)
466 #define IBS_CAPS_OPCNT                  (1U<<4)
467 #define IBS_CAPS_BRNTRGT                (1U<<5)
468 #define IBS_CAPS_OPCNTEXT               (1U<<6)
469 #define IBS_CAPS_RIPINVALIDCHK          (1U<<7)
470 #define IBS_CAPS_OPBRNFUSE              (1U<<8)
471 #define IBS_CAPS_FETCHCTLEXTD           (1U<<9)
472 #define IBS_CAPS_OPDATA4                (1U<<10)
473 #define IBS_CAPS_ZEN4                   (1U<<11)
474 
475 #define IBS_CAPS_DEFAULT                (IBS_CAPS_AVAIL         \
476                                          | IBS_CAPS_FETCHSAM    \
477                                          | IBS_CAPS_OPSAM)
478 
479 /*
480  * IBS APIC setup
481  */
482 #define IBSCTL                          0x1cc
483 #define IBSCTL_LVT_OFFSET_VALID         (1ULL<<8)
484 #define IBSCTL_LVT_OFFSET_MASK          0x0F
485 
486 /* IBS fetch bits/masks */
487 #define IBS_FETCH_L3MISSONLY    (1ULL<<59)
488 #define IBS_FETCH_RAND_EN       (1ULL<<57)
489 #define IBS_FETCH_VAL           (1ULL<<49)
490 #define IBS_FETCH_ENABLE        (1ULL<<48)
491 #define IBS_FETCH_CNT           0xFFFF0000ULL
492 #define IBS_FETCH_MAX_CNT       0x0000FFFFULL
493 
494 /*
495  * IBS op bits/masks
496  * The lower 7 bits of the current count are random bits
497  * preloaded by hardware and ignored in software
498  */
499 #define IBS_OP_CUR_CNT          (0xFFF80ULL<<32)
500 #define IBS_OP_CUR_CNT_RAND     (0x0007FULL<<32)
501 #define IBS_OP_CNT_CTL          (1ULL<<19)
502 #define IBS_OP_VAL              (1ULL<<18)
503 #define IBS_OP_ENABLE           (1ULL<<17)
504 #define IBS_OP_L3MISSONLY       (1ULL<<16)
505 #define IBS_OP_MAX_CNT          0x0000FFFFULL
506 #define IBS_OP_MAX_CNT_EXT      0x007FFFFFULL   /* not a register bit mask */
507 #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20)   /* separate upper 7 bits */
508 #define IBS_RIP_INVALID         (1ULL<<38)
509 
510 #ifdef CONFIG_X86_LOCAL_APIC
511 extern u32 get_ibs_caps(void);
512 extern int forward_event_to_ibs(struct perf_event *event);
513 #else
514 static inline u32 get_ibs_caps(void) { return 0; }
515 static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
516 #endif
517 
518 #ifdef CONFIG_PERF_EVENTS
519 extern void perf_events_lapic_init(void);
520 
521 /*
522  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
523  * unused and ABI specified to be 0, so nobody should care what we do with
524  * them.
525  *
526  * EXACT - the IP points to the exact instruction that triggered the
527  *         event (HW bugs exempt).
528  * VM    - original X86_VM_MASK; see set_linear_ip().
529  */
530 #define PERF_EFLAGS_EXACT       (1UL << 3)
531 #define PERF_EFLAGS_VM          (1UL << 5)
532 
533 struct pt_regs;
534 struct x86_perf_regs {
535         struct pt_regs  regs;
536         u64             *xmm_regs;
537 };
538 
539 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
540 extern unsigned long perf_misc_flags(struct pt_regs *regs);
541 #define perf_misc_flags(regs)   perf_misc_flags(regs)
542 
543 #include <asm/stacktrace.h>
544 
545 /*
546  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
547  * and the comment with PERF_EFLAGS_EXACT.
548  */
549 #define perf_arch_fetch_caller_regs(regs, __ip)         {       \
550         (regs)->ip = (__ip);                                    \
551         (regs)->sp = (unsigned long)__builtin_frame_address(0); \
552         (regs)->cs = __KERNEL_CS;                               \
553         regs->flags = 0;                                        \
554 }
555 
556 struct perf_guest_switch_msr {
557         unsigned msr;
558         u64 host, guest;
559 };
560 
561 struct x86_pmu_lbr {
562         unsigned int    nr;
563         unsigned int    from;
564         unsigned int    to;
565         unsigned int    info;
566         bool            has_callstack;
567 };
568 
569 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
570 extern u64 perf_get_hw_event_config(int hw_event);
571 extern void perf_check_microcode(void);
572 extern void perf_clear_dirty_counters(void);
573 extern int x86_perf_rdpmc_index(struct perf_event *event);
574 #else
575 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
576 {
577         memset(cap, 0, sizeof(*cap));
578 }
579 
580 static inline u64 perf_get_hw_event_config(int hw_event)
581 {
582         return 0;
583 }
584 
585 static inline void perf_events_lapic_init(void) { }
586 static inline void perf_check_microcode(void) { }
587 #endif
588 
589 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
590 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
591 extern void x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
592 #else
593 struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data);
594 static inline void x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
595 {
596         memset(lbr, 0, sizeof(*lbr));
597 }
598 #endif
599 
600 #ifdef CONFIG_CPU_SUP_INTEL
601  extern void intel_pt_handle_vmx(int on);
602 #else
603 static inline void intel_pt_handle_vmx(int on)
604 {
605 
606 }
607 #endif
608 
609 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
610  extern void amd_pmu_enable_virt(void);
611  extern void amd_pmu_disable_virt(void);
612 
613 #if defined(CONFIG_PERF_EVENTS_AMD_BRS)
614 
615 #define PERF_NEEDS_LOPWR_CB 1
616 
617 /*
618  * architectural low power callback impacts
619  * drivers/acpi/processor_idle.c
620  * drivers/acpi/acpi_pad.c
621  */
622 extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
623 
624 DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
625 
626 static __always_inline void perf_lopwr_cb(bool lopwr_in)
627 {
628         static_call_mod(perf_lopwr_cb)(lopwr_in);
629 }
630 
631 #endif /* PERF_NEEDS_LOPWR_CB */
632 
633 #else
634  static inline void amd_pmu_enable_virt(void) { }
635  static inline void amd_pmu_disable_virt(void) { }
636 #endif
637 
638 #define arch_perf_out_copy_user copy_from_user_nmi
639 
640 #endif /* _ASM_X86_PERF_EVENT_H */
641 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php