~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/pmu.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __KVM_X86_PMU_H
  3 #define __KVM_X86_PMU_H
  4 
  5 #include <linux/nospec.h>
  6 
  7 #include <asm/kvm_host.h>
  8 
  9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
 10 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
 11 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
 12 
 13 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |   \
 14                                           MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
 15 
 16 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
 17 #define fixed_ctrl_field(ctrl_reg, idx) \
 18         (((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
 19 
 20 #define VMWARE_BACKDOOR_PMC_HOST_TSC            0x10000
 21 #define VMWARE_BACKDOOR_PMC_REAL_TIME           0x10001
 22 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME       0x10002
 23 
 24 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
 25 
 26 struct kvm_pmu_emulated_event_selectors {
 27         u64 INSTRUCTIONS_RETIRED;
 28         u64 BRANCH_INSTRUCTIONS_RETIRED;
 29 };
 30 
 31 struct kvm_pmu_ops {
 32         struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
 33                 unsigned int idx, u64 *mask);
 34         struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
 35         int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx);
 36         bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
 37         int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 38         int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 39         void (*refresh)(struct kvm_vcpu *vcpu);
 40         void (*init)(struct kvm_vcpu *vcpu);
 41         void (*reset)(struct kvm_vcpu *vcpu);
 42         void (*deliver_pmi)(struct kvm_vcpu *vcpu);
 43         void (*cleanup)(struct kvm_vcpu *vcpu);
 44 
 45         const u64 EVENTSEL_EVENT;
 46         const int MAX_NR_GP_COUNTERS;
 47         const int MIN_NR_GP_COUNTERS;
 48 };
 49 
 50 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
 51 
 52 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
 53 {
 54         /*
 55          * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
 56          * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
 57          * greater than zero.  However, KVM only exposes and emulates the MSR
 58          * to/for the guest if the guest PMU supports at least "Architectural
 59          * Performance Monitoring Version 2".
 60          *
 61          * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
 62          */
 63         return pmu->version > 1;
 64 }
 65 
 66 /*
 67  * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
 68  * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
 69  * is tracked internally via index 32.  On Intel, (AMD doesn't support fixed
 70  * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
 71  * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
 72  * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
 73  * enabling/disable/reset operations.
 74  *
 75  * WARNING!  This helper is only for lookups that are initiated by KVM, it is
 76  * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
 77  * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
 78  * for RDPMC, not by adding 32 to the fixed counter index).
 79  */
 80 static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
 81 {
 82         if (idx < pmu->nr_arch_gp_counters)
 83                 return &pmu->gp_counters[idx];
 84 
 85         idx -= KVM_FIXED_PMC_BASE_IDX;
 86         if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
 87                 return &pmu->fixed_counters[idx];
 88 
 89         return NULL;
 90 }
 91 
 92 #define kvm_for_each_pmc(pmu, pmc, i, bitmap)                   \
 93         for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX)            \
 94                 if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i)))        \
 95                         continue;                               \
 96                 else                                            \
 97 
 98 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
 99 {
100         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
101 
102         return pmu->counter_bitmask[pmc->type];
103 }
104 
105 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
106 {
107         u64 counter, enabled, running;
108 
109         counter = pmc->counter + pmc->emulated_counter;
110 
111         if (pmc->perf_event && !pmc->is_paused)
112                 counter += perf_event_read_value(pmc->perf_event,
113                                                  &enabled, &running);
114         /* FIXME: Scaling needed? */
115         return counter & pmc_bitmask(pmc);
116 }
117 
118 void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
119 
120 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
121 {
122         return pmc->type == KVM_PMC_GP;
123 }
124 
125 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
126 {
127         return pmc->type == KVM_PMC_FIXED;
128 }
129 
130 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
131                                                  u64 data)
132 {
133         return !(pmu->global_ctrl_rsvd & data);
134 }
135 
136 /* returns general purpose PMC with the specified MSR. Note that it can be
137  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
138  * parameter to tell them apart.
139  */
140 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
141                                          u32 base)
142 {
143         if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
144                 u32 index = array_index_nospec(msr - base,
145                                                pmu->nr_arch_gp_counters);
146 
147                 return &pmu->gp_counters[index];
148         }
149 
150         return NULL;
151 }
152 
153 /* returns fixed PMC with the specified MSR */
154 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
155 {
156         int base = MSR_CORE_PERF_FIXED_CTR0;
157 
158         if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
159                 u32 index = array_index_nospec(msr - base,
160                                                pmu->nr_arch_fixed_counters);
161 
162                 return &pmu->fixed_counters[index];
163         }
164 
165         return NULL;
166 }
167 
168 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
169 {
170         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
171 
172         if (pmc_is_fixed(pmc))
173                 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
174                                         pmc->idx - KVM_FIXED_PMC_BASE_IDX) &
175                                         (INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER);
176 
177         return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
178 }
179 
180 extern struct x86_pmu_capability kvm_pmu_cap;
181 extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
182 
183 static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
184 {
185         bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
186         int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
187 
188         /*
189          * Hybrid PMUs don't play nice with virtualization without careful
190          * configuration by userspace, and KVM's APIs for reporting supported
191          * vPMU features do not account for hybrid PMUs.  Disable vPMU support
192          * for hybrid PMUs until KVM gains a way to let userspace opt-in.
193          */
194         if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
195                 enable_pmu = false;
196 
197         if (enable_pmu) {
198                 perf_get_x86_pmu_capability(&kvm_pmu_cap);
199 
200                 /*
201                  * WARN if perf did NOT disable hardware PMU if the number of
202                  * architecturally required GP counters aren't present, i.e. if
203                  * there are a non-zero number of counters, but fewer than what
204                  * is architecturally required.
205                  */
206                 if (!kvm_pmu_cap.num_counters_gp ||
207                     WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
208                         enable_pmu = false;
209                 else if (is_intel && !kvm_pmu_cap.version)
210                         enable_pmu = false;
211         }
212 
213         if (!enable_pmu) {
214                 memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
215                 return;
216         }
217 
218         kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
219         kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
220                                           pmu_ops->MAX_NR_GP_COUNTERS);
221         kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
222                                              KVM_MAX_NR_FIXED_COUNTERS);
223 
224         kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
225                 perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
226         kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
227                 perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
228 }
229 
230 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
231 {
232         set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
233         kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
234 }
235 
236 static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
237 {
238         int bit;
239 
240         if (!diff)
241                 return;
242 
243         for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
244                 set_bit(bit, pmu->reprogram_pmi);
245         kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
246 }
247 
248 /*
249  * Check if a PMC is enabled by comparing it against global_ctrl bits.
250  *
251  * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
252  */
253 static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
254 {
255         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
256 
257         if (!kvm_pmu_has_perf_global_ctrl(pmu))
258                 return true;
259 
260         return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
261 }
262 
263 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
264 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
265 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
266 int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
267 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
268 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
269 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
270 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
271 void kvm_pmu_init(struct kvm_vcpu *vcpu);
272 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
273 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
274 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
275 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
276 
277 bool is_vmware_backdoor_pmc(u32 pmc_idx);
278 
279 extern struct kvm_pmu_ops intel_pmu_ops;
280 extern struct kvm_pmu_ops amd_pmu_ops;
281 #endif /* __KVM_X86_PMU_H */
282 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php