~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/svm/pmu.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * KVM PMU support for AMD
  4  *
  5  * Copyright 2015, Red Hat, Inc. and/or its affiliates.
  6  *
  7  * Author:
  8  *   Wei Huang <wei@redhat.com>
  9  *
 10  * Implementation is based on pmu_intel.c file
 11  */
 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 13 
 14 #include <linux/types.h>
 15 #include <linux/kvm_host.h>
 16 #include <linux/perf_event.h>
 17 #include "x86.h"
 18 #include "cpuid.h"
 19 #include "lapic.h"
 20 #include "pmu.h"
 21 #include "svm.h"
 22 
 23 enum pmu_type {
 24         PMU_TYPE_COUNTER = 0,
 25         PMU_TYPE_EVNTSEL,
 26 };
 27 
 28 static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx)
 29 {
 30         unsigned int num_counters = pmu->nr_arch_gp_counters;
 31 
 32         if (pmc_idx >= num_counters)
 33                 return NULL;
 34 
 35         return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
 36 }
 37 
 38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
 39                                              enum pmu_type type)
 40 {
 41         struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 42         unsigned int idx;
 43 
 44         if (!vcpu->kvm->arch.enable_pmu)
 45                 return NULL;
 46 
 47         switch (msr) {
 48         case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
 49                 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
 50                         return NULL;
 51                 /*
 52                  * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
 53                  * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
 54                  */
 55                 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
 56                 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
 57                         return NULL;
 58                 break;
 59         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
 60                 if (type != PMU_TYPE_EVNTSEL)
 61                         return NULL;
 62                 idx = msr - MSR_K7_EVNTSEL0;
 63                 break;
 64         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
 65                 if (type != PMU_TYPE_COUNTER)
 66                         return NULL;
 67                 idx = msr - MSR_K7_PERFCTR0;
 68                 break;
 69         default:
 70                 return NULL;
 71         }
 72 
 73         return amd_pmu_get_pmc(pmu, idx);
 74 }
 75 
 76 static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
 77 {
 78         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 79 
 80         if (idx >= pmu->nr_arch_gp_counters)
 81                 return -EINVAL;
 82 
 83         return 0;
 84 }
 85 
 86 /* idx is the ECX register of RDPMC instruction */
 87 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 88         unsigned int idx, u64 *mask)
 89 {
 90         return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx);
 91 }
 92 
 93 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
 94 {
 95         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 96         struct kvm_pmc *pmc;
 97 
 98         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
 99         pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
100 
101         return pmc;
102 }
103 
104 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
105 {
106         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
107 
108         switch (msr) {
109         case MSR_K7_EVNTSEL0 ... MSR_K7_PERFCTR3:
110                 return pmu->version > 0;
111         case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
112                 return guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
113         case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
114         case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
115         case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
116                 return pmu->version > 1;
117         default:
118                 if (msr > MSR_F15H_PERF_CTR5 &&
119                     msr < MSR_F15H_PERF_CTL0 + 2 * pmu->nr_arch_gp_counters)
120                         return pmu->version > 1;
121                 break;
122         }
123 
124         return amd_msr_idx_to_pmc(vcpu, msr);
125 }
126 
127 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
128 {
129         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
130         struct kvm_pmc *pmc;
131         u32 msr = msr_info->index;
132 
133         /* MSR_PERFCTRn */
134         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
135         if (pmc) {
136                 msr_info->data = pmc_read_counter(pmc);
137                 return 0;
138         }
139         /* MSR_EVNTSELn */
140         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
141         if (pmc) {
142                 msr_info->data = pmc->eventsel;
143                 return 0;
144         }
145 
146         return 1;
147 }
148 
149 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
150 {
151         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
152         struct kvm_pmc *pmc;
153         u32 msr = msr_info->index;
154         u64 data = msr_info->data;
155 
156         /* MSR_PERFCTRn */
157         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
158         if (pmc) {
159                 pmc_write_counter(pmc, data);
160                 return 0;
161         }
162         /* MSR_EVNTSELn */
163         pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
164         if (pmc) {
165                 data &= ~pmu->reserved_bits;
166                 if (data != pmc->eventsel) {
167                         pmc->eventsel = data;
168                         kvm_pmu_request_counter_reprogram(pmc);
169                 }
170                 return 0;
171         }
172 
173         return 1;
174 }
175 
176 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
177 {
178         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
179         union cpuid_0x80000022_ebx ebx;
180 
181         pmu->version = 1;
182         if (guest_cpuid_has(vcpu, X86_FEATURE_PERFMON_V2)) {
183                 pmu->version = 2;
184                 /*
185                  * Note, PERFMON_V2 is also in 0x80000022.0x0, i.e. the guest
186                  * CPUID entry is guaranteed to be non-NULL.
187                  */
188                 BUILD_BUG_ON(x86_feature_cpuid(X86_FEATURE_PERFMON_V2).function != 0x80000022 ||
189                              x86_feature_cpuid(X86_FEATURE_PERFMON_V2).index);
190                 ebx.full = kvm_find_cpuid_entry_index(vcpu, 0x80000022, 0)->ebx;
191                 pmu->nr_arch_gp_counters = ebx.split.num_core_pmc;
192         } else if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
193                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
194         } else {
195                 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
196         }
197 
198         pmu->nr_arch_gp_counters = min_t(unsigned int, pmu->nr_arch_gp_counters,
199                                          kvm_pmu_cap.num_counters_gp);
200 
201         if (pmu->version > 1) {
202                 pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
203                 pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
204         }
205 
206         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
207         pmu->reserved_bits = 0xfffffff000280000ull;
208         pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
209         /* not applicable to AMD; but clean them to prevent any fall out */
210         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
211         pmu->nr_arch_fixed_counters = 0;
212         bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
213 }
214 
215 static void amd_pmu_init(struct kvm_vcpu *vcpu)
216 {
217         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
218         int i;
219 
220         BUILD_BUG_ON(KVM_MAX_NR_AMD_GP_COUNTERS > AMD64_NUM_COUNTERS_CORE);
221 
222         for (i = 0; i < KVM_MAX_NR_AMD_GP_COUNTERS; i++) {
223                 pmu->gp_counters[i].type = KVM_PMC_GP;
224                 pmu->gp_counters[i].vcpu = vcpu;
225                 pmu->gp_counters[i].idx = i;
226                 pmu->gp_counters[i].current_config = 0;
227         }
228 }
229 
230 struct kvm_pmu_ops amd_pmu_ops __initdata = {
231         .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
232         .msr_idx_to_pmc = amd_msr_idx_to_pmc,
233         .check_rdpmc_early = amd_check_rdpmc_early,
234         .is_valid_msr = amd_is_valid_msr,
235         .get_msr = amd_pmu_get_msr,
236         .set_msr = amd_pmu_set_msr,
237         .refresh = amd_pmu_refresh,
238         .init = amd_pmu_init,
239         .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
240         .MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS,
241         .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
242 };
243 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php