~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * vpmu_counter_access - Test vPMU event counter access
  4  *
  5  * Copyright (c) 2023 Google LLC.
  6  *
  7  * This test checks if the guest can see the same number of the PMU event
  8  * counters (PMCR_EL0.N) that userspace sets, if the guest can access
  9  * those counters, and if the guest is prevented from accessing any
 10  * other counters.
 11  * It also checks if the userspace accesses to the PMU regsisters honor the
 12  * PMCR.N value that's set for the guest.
 13  * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
 14  */
 15 #include <kvm_util.h>
 16 #include <processor.h>
 17 #include <test_util.h>
 18 #include <vgic.h>
 19 #include <perf/arm_pmuv3.h>
 20 #include <linux/bitfield.h>
 21 
 22 /* The max number of the PMU event counters (excluding the cycle counter) */
 23 #define ARMV8_PMU_MAX_GENERAL_COUNTERS  (ARMV8_PMU_MAX_COUNTERS - 1)
 24 
 25 /* The cycle counter bit position that's common among the PMU registers */
 26 #define ARMV8_PMU_CYCLE_IDX             31
 27 
 28 struct vpmu_vm {
 29         struct kvm_vm *vm;
 30         struct kvm_vcpu *vcpu;
 31         int gic_fd;
 32 };
 33 
 34 static struct vpmu_vm vpmu_vm;
 35 
 36 struct pmreg_sets {
 37         uint64_t set_reg_id;
 38         uint64_t clr_reg_id;
 39 };
 40 
 41 #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
 42 
 43 static uint64_t get_pmcr_n(uint64_t pmcr)
 44 {
 45         return FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
 46 }
 47 
 48 static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
 49 {
 50         u64p_replace_bits((__u64 *) pmcr, pmcr_n, ARMV8_PMU_PMCR_N);
 51 }
 52 
 53 static uint64_t get_counters_mask(uint64_t n)
 54 {
 55         uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX);
 56 
 57         if (n)
 58                 mask |= GENMASK(n - 1, 0);
 59         return mask;
 60 }
 61 
 62 /* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
 63 static inline unsigned long read_sel_evcntr(int sel)
 64 {
 65         write_sysreg(sel, pmselr_el0);
 66         isb();
 67         return read_sysreg(pmxevcntr_el0);
 68 }
 69 
 70 /* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */
 71 static inline void write_sel_evcntr(int sel, unsigned long val)
 72 {
 73         write_sysreg(sel, pmselr_el0);
 74         isb();
 75         write_sysreg(val, pmxevcntr_el0);
 76         isb();
 77 }
 78 
 79 /* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
 80 static inline unsigned long read_sel_evtyper(int sel)
 81 {
 82         write_sysreg(sel, pmselr_el0);
 83         isb();
 84         return read_sysreg(pmxevtyper_el0);
 85 }
 86 
 87 /* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */
 88 static inline void write_sel_evtyper(int sel, unsigned long val)
 89 {
 90         write_sysreg(sel, pmselr_el0);
 91         isb();
 92         write_sysreg(val, pmxevtyper_el0);
 93         isb();
 94 }
 95 
 96 static void pmu_disable_reset(void)
 97 {
 98         uint64_t pmcr = read_sysreg(pmcr_el0);
 99 
100         /* Reset all counters, disabling them */
101         pmcr &= ~ARMV8_PMU_PMCR_E;
102         write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0);
103         isb();
104 }
105 
106 #define RETURN_READ_PMEVCNTRN(n) \
107         return read_sysreg(pmevcntr##n##_el0)
108 static unsigned long read_pmevcntrn(int n)
109 {
110         PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
111         return 0;
112 }
113 
114 #define WRITE_PMEVCNTRN(n) \
115         write_sysreg(val, pmevcntr##n##_el0)
116 static void write_pmevcntrn(int n, unsigned long val)
117 {
118         PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
119         isb();
120 }
121 
122 #define READ_PMEVTYPERN(n) \
123         return read_sysreg(pmevtyper##n##_el0)
124 static unsigned long read_pmevtypern(int n)
125 {
126         PMEVN_SWITCH(n, READ_PMEVTYPERN);
127         return 0;
128 }
129 
130 #define WRITE_PMEVTYPERN(n) \
131         write_sysreg(val, pmevtyper##n##_el0)
132 static void write_pmevtypern(int n, unsigned long val)
133 {
134         PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
135         isb();
136 }
137 
138 /*
139  * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
140  * accessors that test cases will use. Each of the accessors will
141  * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
142  * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
143  * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
144  *
145  * This is used to test that combinations of those accessors provide
146  * the consistent behavior.
147  */
148 struct pmc_accessor {
149         /* A function to be used to read PMEVTCNTR<n>_EL0 */
150         unsigned long   (*read_cntr)(int idx);
151         /* A function to be used to write PMEVTCNTR<n>_EL0 */
152         void            (*write_cntr)(int idx, unsigned long val);
153         /* A function to be used to read PMEVTYPER<n>_EL0 */
154         unsigned long   (*read_typer)(int idx);
155         /* A function to be used to write PMEVTYPER<n>_EL0 */
156         void            (*write_typer)(int idx, unsigned long val);
157 };
158 
159 struct pmc_accessor pmc_accessors[] = {
160         /* test with all direct accesses */
161         { read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern },
162         /* test with all indirect accesses */
163         { read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper },
164         /* read with direct accesses, and write with indirect accesses */
165         { read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper },
166         /* read with indirect accesses, and write with direct accesses */
167         { read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
168 };
169 
170 /*
171  * Convert a pointer of pmc_accessor to an index in pmc_accessors[],
172  * assuming that the pointer is one of the entries in pmc_accessors[].
173  */
174 #define PMC_ACC_TO_IDX(acc)     (acc - &pmc_accessors[0])
175 
176 #define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected)                     \
177 {                                                                                \
178         uint64_t _tval = read_sysreg(regname);                                   \
179                                                                                  \
180         if (set_expected)                                                        \
181                 __GUEST_ASSERT((_tval & mask),                                   \
182                                 "tval: 0x%lx; mask: 0x%lx; set_expected: %u",    \
183                                 _tval, mask, set_expected);                      \
184         else                                                                     \
185                 __GUEST_ASSERT(!(_tval & mask),                                  \
186                                 "tval: 0x%lx; mask: 0x%lx; set_expected: %u",    \
187                                 _tval, mask, set_expected);                      \
188 }
189 
190 /*
191  * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers
192  * are set or cleared as specified in @set_expected.
193  */
194 static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected)
195 {
196         GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected);
197         GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected);
198         GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected);
199         GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected);
200         GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected);
201         GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected);
202 }
203 
204 /*
205  * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding
206  * to the specified counter (@pmc_idx) can be read/written as expected.
207  * When @set_op is true, it tries to set the bit for the counter in
208  * those registers by writing the SET registers (the bit won't be set
209  * if the counter is not implemented though).
210  * Otherwise, it tries to clear the bits in the registers by writing
211  * the CLR registers.
212  * Then, it checks if the values indicated in the registers are as expected.
213  */
214 static void test_bitmap_pmu_regs(int pmc_idx, bool set_op)
215 {
216         uint64_t pmcr_n, test_bit = BIT(pmc_idx);
217         bool set_expected = false;
218 
219         if (set_op) {
220                 write_sysreg(test_bit, pmcntenset_el0);
221                 write_sysreg(test_bit, pmintenset_el1);
222                 write_sysreg(test_bit, pmovsset_el0);
223 
224                 /* The bit will be set only if the counter is implemented */
225                 pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0));
226                 set_expected = (pmc_idx < pmcr_n) ? true : false;
227         } else {
228                 write_sysreg(test_bit, pmcntenclr_el0);
229                 write_sysreg(test_bit, pmintenclr_el1);
230                 write_sysreg(test_bit, pmovsclr_el0);
231         }
232         check_bitmap_pmu_regs(test_bit, set_expected);
233 }
234 
235 /*
236  * Tests for reading/writing registers for the (implemented) event counter
237  * specified by @pmc_idx.
238  */
239 static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
240 {
241         uint64_t write_data, read_data;
242 
243         /* Disable all PMCs and reset all PMCs to zero. */
244         pmu_disable_reset();
245 
246         /*
247          * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1.
248          */
249 
250         /* Make sure that the bit in those registers are set to 0 */
251         test_bitmap_pmu_regs(pmc_idx, false);
252         /* Test if setting the bit in those registers works */
253         test_bitmap_pmu_regs(pmc_idx, true);
254         /* Test if clearing the bit in those registers works */
255         test_bitmap_pmu_regs(pmc_idx, false);
256 
257         /*
258          * Tests for reading/writing the event type register.
259          */
260 
261         /*
262          * Set the event type register to an arbitrary value just for testing
263          * of reading/writing the register.
264          * Arm ARM says that for the event from 0x0000 to 0x003F,
265          * the value indicated in the PMEVTYPER<n>_EL0.evtCount field is
266          * the value written to the field even when the specified event
267          * is not supported.
268          */
269         write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED);
270         acc->write_typer(pmc_idx, write_data);
271         read_data = acc->read_typer(pmc_idx);
272         __GUEST_ASSERT(read_data == write_data,
273                        "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
274                        pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
275 
276         /*
277          * Tests for reading/writing the event count register.
278          */
279 
280         read_data = acc->read_cntr(pmc_idx);
281 
282         /* The count value must be 0, as it is disabled and reset */
283         __GUEST_ASSERT(read_data == 0,
284                        "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx",
285                        pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
286 
287         write_data = read_data + pmc_idx + 0x12345;
288         acc->write_cntr(pmc_idx, write_data);
289         read_data = acc->read_cntr(pmc_idx);
290         __GUEST_ASSERT(read_data == write_data,
291                        "pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
292                        pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
293 }
294 
295 #define INVALID_EC      (-1ul)
296 uint64_t expected_ec = INVALID_EC;
297 
298 static void guest_sync_handler(struct ex_regs *regs)
299 {
300         uint64_t esr, ec;
301 
302         esr = read_sysreg(esr_el1);
303         ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
304 
305         __GUEST_ASSERT(expected_ec == ec,
306                         "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx",
307                         regs->pc, esr, ec, expected_ec);
308 
309         /* skip the trapping instruction */
310         regs->pc += 4;
311 
312         /* Use INVALID_EC to indicate an exception occurred */
313         expected_ec = INVALID_EC;
314 }
315 
316 /*
317  * Run the given operation that should trigger an exception with the
318  * given exception class. The exception handler (guest_sync_handler)
319  * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip
320  * the instruction that trapped.
321  */
322 #define TEST_EXCEPTION(ec, ops)                         \
323 ({                                                      \
324         GUEST_ASSERT(ec != INVALID_EC);                 \
325         WRITE_ONCE(expected_ec, ec);                    \
326         dsb(ish);                                       \
327         ops;                                            \
328         GUEST_ASSERT(expected_ec == INVALID_EC);        \
329 })
330 
331 /*
332  * Tests for reading/writing registers for the unimplemented event counter
333  * specified by @pmc_idx (>= PMCR_EL0.N).
334  */
335 static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
336 {
337         /*
338          * Reading/writing the event count/type registers should cause
339          * an UNDEFINED exception.
340          */
341         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx));
342         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0));
343         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx));
344         TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0));
345         /*
346          * The bit corresponding to the (unimplemented) counter in
347          * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ.
348          */
349         test_bitmap_pmu_regs(pmc_idx, 1);
350         test_bitmap_pmu_regs(pmc_idx, 0);
351 }
352 
353 /*
354  * The guest is configured with PMUv3 with @expected_pmcr_n number of
355  * event counters.
356  * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
357  * if reading/writing PMU registers for implemented or unimplemented
358  * counters works as expected.
359  */
360 static void guest_code(uint64_t expected_pmcr_n)
361 {
362         uint64_t pmcr, pmcr_n, unimp_mask;
363         int i, pmc;
364 
365         __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
366                         "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%x",
367                         expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
368 
369         pmcr = read_sysreg(pmcr_el0);
370         pmcr_n = get_pmcr_n(pmcr);
371 
372         /* Make sure that PMCR_EL0.N indicates the value userspace set */
373         __GUEST_ASSERT(pmcr_n == expected_pmcr_n,
374                         "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
375                         expected_pmcr_n, pmcr_n);
376 
377         /*
378          * Make sure that (RAZ) bits corresponding to unimplemented event
379          * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset
380          * to zero.
381          * (NOTE: bits for implemented event counters are reset to UNKNOWN)
382          */
383         unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
384         check_bitmap_pmu_regs(unimp_mask, false);
385 
386         /*
387          * Tests for reading/writing PMU registers for implemented counters.
388          * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
389          */
390         for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
391                 for (pmc = 0; pmc < pmcr_n; pmc++)
392                         test_access_pmc_regs(&pmc_accessors[i], pmc);
393         }
394 
395         /*
396          * Tests for reading/writing PMU registers for unimplemented counters.
397          * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
398          */
399         for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
400                 for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
401                         test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
402         }
403 
404         GUEST_DONE();
405 }
406 
407 /* Create a VM that has one vCPU with PMUv3 configured. */
408 static void create_vpmu_vm(void *guest_code)
409 {
410         struct kvm_vcpu_init init;
411         uint8_t pmuver, ec;
412         uint64_t dfr0, irq = 23;
413         struct kvm_device_attr irq_attr = {
414                 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
415                 .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
416                 .addr = (uint64_t)&irq,
417         };
418         struct kvm_device_attr init_attr = {
419                 .group = KVM_ARM_VCPU_PMU_V3_CTRL,
420                 .attr = KVM_ARM_VCPU_PMU_V3_INIT,
421         };
422 
423         /* The test creates the vpmu_vm multiple times. Ensure a clean state */
424         memset(&vpmu_vm, 0, sizeof(vpmu_vm));
425 
426         vpmu_vm.vm = vm_create(1);
427         vm_init_descriptor_tables(vpmu_vm.vm);
428         for (ec = 0; ec < ESR_EC_NUM; ec++) {
429                 vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
430                                         guest_sync_handler);
431         }
432 
433         /* Create vCPU with PMUv3 */
434         vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
435         init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
436         vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
437         vcpu_init_descriptor_tables(vpmu_vm.vcpu);
438         vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64);
439         __TEST_REQUIRE(vpmu_vm.gic_fd >= 0,
440                        "Failed to create vgic-v3, skipping");
441 
442         /* Make sure that PMUv3 support is indicated in the ID register */
443         vcpu_get_reg(vpmu_vm.vcpu,
444                      KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
445         pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
446         TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
447                     pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
448                     "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
449 
450         /* Initialize vPMU */
451         vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
452         vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
453 }
454 
455 static void destroy_vpmu_vm(void)
456 {
457         close(vpmu_vm.gic_fd);
458         kvm_vm_free(vpmu_vm.vm);
459 }
460 
461 static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
462 {
463         struct ucall uc;
464 
465         vcpu_args_set(vcpu, 1, pmcr_n);
466         vcpu_run(vcpu);
467         switch (get_ucall(vcpu, &uc)) {
468         case UCALL_ABORT:
469                 REPORT_GUEST_ASSERT(uc);
470                 break;
471         case UCALL_DONE:
472                 break;
473         default:
474                 TEST_FAIL("Unknown ucall %lu", uc.cmd);
475                 break;
476         }
477 }
478 
479 static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
480 {
481         struct kvm_vcpu *vcpu;
482         uint64_t pmcr, pmcr_orig;
483 
484         create_vpmu_vm(guest_code);
485         vcpu = vpmu_vm.vcpu;
486 
487         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
488         pmcr = pmcr_orig;
489 
490         /*
491          * Setting a larger value of PMCR.N should not modify the field, and
492          * return a success.
493          */
494         set_pmcr_n(&pmcr, pmcr_n);
495         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
496         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
497 
498         if (expect_fail)
499                 TEST_ASSERT(pmcr_orig == pmcr,
500                             "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx",
501                             pmcr, pmcr_n);
502         else
503                 TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr),
504                             "Failed to update PMCR.N to %lu (received: %lu)",
505                             pmcr_n, get_pmcr_n(pmcr));
506 }
507 
508 /*
509  * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
510  * and run the test.
511  */
512 static void run_access_test(uint64_t pmcr_n)
513 {
514         uint64_t sp;
515         struct kvm_vcpu *vcpu;
516         struct kvm_vcpu_init init;
517 
518         pr_debug("Test with pmcr_n %lu\n", pmcr_n);
519 
520         test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
521         vcpu = vpmu_vm.vcpu;
522 
523         /* Save the initial sp to restore them later to run the guest again */
524         vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
525 
526         run_vcpu(vcpu, pmcr_n);
527 
528         /*
529          * Reset and re-initialize the vCPU, and run the guest code again to
530          * check if PMCR_EL0.N is preserved.
531          */
532         vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
533         init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
534         aarch64_vcpu_setup(vcpu, &init);
535         vcpu_init_descriptor_tables(vcpu);
536         vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
537         vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
538 
539         run_vcpu(vcpu, pmcr_n);
540 
541         destroy_vpmu_vm();
542 }
543 
544 static struct pmreg_sets validity_check_reg_sets[] = {
545         PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0),
546         PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1),
547         PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0),
548 };
549 
550 /*
551  * Create a VM, and check if KVM handles the userspace accesses of
552  * the PMU register sets in @validity_check_reg_sets[] correctly.
553  */
554 static void run_pmregs_validity_test(uint64_t pmcr_n)
555 {
556         int i;
557         struct kvm_vcpu *vcpu;
558         uint64_t set_reg_id, clr_reg_id, reg_val;
559         uint64_t valid_counters_mask, max_counters_mask;
560 
561         test_create_vpmu_vm_with_pmcr_n(pmcr_n, false);
562         vcpu = vpmu_vm.vcpu;
563 
564         valid_counters_mask = get_counters_mask(pmcr_n);
565         max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS);
566 
567         for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) {
568                 set_reg_id = validity_check_reg_sets[i].set_reg_id;
569                 clr_reg_id = validity_check_reg_sets[i].clr_reg_id;
570 
571                 /*
572                  * Test if the 'set' and 'clr' variants of the registers
573                  * are initialized based on the number of valid counters.
574                  */
575                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
576                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
577                             "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
578                             KVM_ARM64_SYS_REG(set_reg_id), reg_val);
579 
580                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
581                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
582                             "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
583                             KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
584 
585                 /*
586                  * Using the 'set' variant, force-set the register to the
587                  * max number of possible counters and test if KVM discards
588                  * the bits for unimplemented counters as it should.
589                  */
590                 vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
591 
592                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
593                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
594                             "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
595                             KVM_ARM64_SYS_REG(set_reg_id), reg_val);
596 
597                 vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
598                 TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
599                             "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
600                             KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
601         }
602 
603         destroy_vpmu_vm();
604 }
605 
606 /*
607  * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
608  * the vCPU to @pmcr_n, which is larger than the host value.
609  * The attempt should fail as @pmcr_n is too big to set for the vCPU.
610  */
611 static void run_error_test(uint64_t pmcr_n)
612 {
613         pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
614 
615         test_create_vpmu_vm_with_pmcr_n(pmcr_n, true);
616         destroy_vpmu_vm();
617 }
618 
619 /*
620  * Return the default number of implemented PMU event counters excluding
621  * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
622  */
623 static uint64_t get_pmcr_n_limit(void)
624 {
625         uint64_t pmcr;
626 
627         create_vpmu_vm(guest_code);
628         vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
629         destroy_vpmu_vm();
630         return get_pmcr_n(pmcr);
631 }
632 
633 int main(void)
634 {
635         uint64_t i, pmcr_n;
636 
637         TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
638 
639         pmcr_n = get_pmcr_n_limit();
640         for (i = 0; i <= pmcr_n; i++) {
641                 run_access_test(i);
642                 run_pmregs_validity_test(i);
643         }
644 
645         for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
646                 run_error_test(i);
647 
648         return 0;
649 }
650 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php