~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/kvm/aarch64/set_id_regs.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * set_id_regs - Test for setting ID register from usersapce.
  4  *
  5  * Copyright (c) 2023 Google LLC.
  6  *
  7  *
  8  * Test that KVM supports setting ID registers from userspace and handles the
  9  * feature set correctly.
 10  */
 11 
 12 #include <stdint.h>
 13 #include "kvm_util.h"
 14 #include "processor.h"
 15 #include "test_util.h"
 16 #include <linux/bitfield.h>
 17 
 18 enum ftr_type {
 19         FTR_EXACT,                      /* Use a predefined safe value */
 20         FTR_LOWER_SAFE,                 /* Smaller value is safe */
 21         FTR_HIGHER_SAFE,                /* Bigger value is safe */
 22         FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
 23         FTR_END,                        /* Mark the last ftr bits */
 24 };
 25 
 26 #define FTR_SIGNED      true    /* Value should be treated as signed */
 27 #define FTR_UNSIGNED    false   /* Value should be treated as unsigned */
 28 
 29 struct reg_ftr_bits {
 30         char *name;
 31         bool sign;
 32         enum ftr_type type;
 33         uint8_t shift;
 34         uint64_t mask;
 35         /*
 36          * For FTR_EXACT, safe_val is used as the exact safe value.
 37          * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
 38          */
 39         int64_t safe_val;
 40 };
 41 
 42 struct test_feature_reg {
 43         uint32_t reg;
 44         const struct reg_ftr_bits *ftr_bits;
 45 };
 46 
 47 #define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL)       \
 48         {                                                               \
 49                 .name = #NAME,                                          \
 50                 .sign = SIGNED,                                         \
 51                 .type = TYPE,                                           \
 52                 .shift = SHIFT,                                         \
 53                 .mask = MASK,                                           \
 54                 .safe_val = SAFE_VAL,                                   \
 55         }
 56 
 57 #define REG_FTR_BITS(type, reg, field, safe_val) \
 58         __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
 59                        reg##_##field##_MASK, safe_val)
 60 
 61 #define S_REG_FTR_BITS(type, reg, field, safe_val) \
 62         __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
 63                        reg##_##field##_MASK, safe_val)
 64 
 65 #define REG_FTR_END                                     \
 66         {                                               \
 67                 .type = FTR_END,                        \
 68         }
 69 
 70 static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
 71         S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
 72         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP),
 73         REG_FTR_END,
 74 };
 75 
 76 static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
 77         S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3),
 78         REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8),
 79         REG_FTR_END,
 80 };
 81 
 82 static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
 83         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
 84         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
 85         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
 86         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
 87         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
 88         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
 89         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
 90         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
 91         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
 92         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
 93         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
 94         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
 95         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
 96         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
 97         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
 98         REG_FTR_END,
 99 };
100 
101 static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
102         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
103         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
104         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
105         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
106         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
107         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
108         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
109         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
110         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
111         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
112         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
113         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
114         REG_FTR_END,
115 };
116 
117 static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
118         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
119         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
120         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
121         REG_FTR_END,
122 };
123 
124 static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
125         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
126         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
127         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
128         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
129         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
130         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
131         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
132         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
133         REG_FTR_END,
134 };
135 
136 static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
137         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
138         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
139         S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
140         S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
141         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
142         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
143         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
144         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
145         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
146         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
147         REG_FTR_END,
148 };
149 
150 static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
151         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
152         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
153         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
154         REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
155         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
156         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
157         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
158         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
159         REG_FTR_END,
160 };
161 
162 static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
163         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
164         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
165         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
166         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
167         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
168         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
169         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
170         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
171         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
172         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
173         REG_FTR_END,
174 };
175 
176 static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
177         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
178         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
179         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
180         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
181         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
182         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
183         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
184         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
185         REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
186         REG_FTR_END,
187 };
188 
189 #define TEST_REG(id, table)                     \
190         {                                       \
191                 .reg = id,                      \
192                 .ftr_bits = &((table)[0]),      \
193         }
194 
195 static struct test_feature_reg test_regs[] = {
196         TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
197         TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
198         TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
199         TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
200         TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
201         TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
202         TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
203         TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
204         TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
205         TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
206 };
207 
208 #define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
209 
210 static void guest_code(void)
211 {
212         GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
213         GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
214         GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
215         GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
216         GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
217         GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
218         GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
219         GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
220         GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
221         GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
222         GUEST_REG_SYNC(SYS_CTR_EL0);
223 
224         GUEST_DONE();
225 }
226 
227 /* Return a safe value to a given ftr_bits an ftr value */
228 uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
229 {
230         uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
231 
232         if (ftr_bits->sign == FTR_UNSIGNED) {
233                 switch (ftr_bits->type) {
234                 case FTR_EXACT:
235                         ftr = ftr_bits->safe_val;
236                         break;
237                 case FTR_LOWER_SAFE:
238                         if (ftr > ftr_bits->safe_val)
239                                 ftr--;
240                         break;
241                 case FTR_HIGHER_SAFE:
242                         if (ftr < ftr_max)
243                                 ftr++;
244                         break;
245                 case FTR_HIGHER_OR_ZERO_SAFE:
246                         if (ftr == ftr_max)
247                                 ftr = 0;
248                         else if (ftr != 0)
249                                 ftr++;
250                         break;
251                 default:
252                         break;
253                 }
254         } else if (ftr != ftr_max) {
255                 switch (ftr_bits->type) {
256                 case FTR_EXACT:
257                         ftr = ftr_bits->safe_val;
258                         break;
259                 case FTR_LOWER_SAFE:
260                         if (ftr > ftr_bits->safe_val)
261                                 ftr--;
262                         break;
263                 case FTR_HIGHER_SAFE:
264                         if (ftr < ftr_max - 1)
265                                 ftr++;
266                         break;
267                 case FTR_HIGHER_OR_ZERO_SAFE:
268                         if (ftr != 0 && ftr != ftr_max - 1)
269                                 ftr++;
270                         break;
271                 default:
272                         break;
273                 }
274         }
275 
276         return ftr;
277 }
278 
279 /* Return an invalid value to a given ftr_bits an ftr value */
280 uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
281 {
282         uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
283 
284         if (ftr_bits->sign == FTR_UNSIGNED) {
285                 switch (ftr_bits->type) {
286                 case FTR_EXACT:
287                         ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
288                         break;
289                 case FTR_LOWER_SAFE:
290                         ftr++;
291                         break;
292                 case FTR_HIGHER_SAFE:
293                         ftr--;
294                         break;
295                 case FTR_HIGHER_OR_ZERO_SAFE:
296                         if (ftr == 0)
297                                 ftr = ftr_max;
298                         else
299                                 ftr--;
300                         break;
301                 default:
302                         break;
303                 }
304         } else if (ftr != ftr_max) {
305                 switch (ftr_bits->type) {
306                 case FTR_EXACT:
307                         ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
308                         break;
309                 case FTR_LOWER_SAFE:
310                         ftr++;
311                         break;
312                 case FTR_HIGHER_SAFE:
313                         ftr--;
314                         break;
315                 case FTR_HIGHER_OR_ZERO_SAFE:
316                         if (ftr == 0)
317                                 ftr = ftr_max - 1;
318                         else
319                                 ftr--;
320                         break;
321                 default:
322                         break;
323                 }
324         } else {
325                 ftr = 0;
326         }
327 
328         return ftr;
329 }
330 
331 static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
332                                      const struct reg_ftr_bits *ftr_bits)
333 {
334         uint8_t shift = ftr_bits->shift;
335         uint64_t mask = ftr_bits->mask;
336         uint64_t val, new_val, ftr;
337 
338         vcpu_get_reg(vcpu, reg, &val);
339         ftr = (val & mask) >> shift;
340 
341         ftr = get_safe_value(ftr_bits, ftr);
342 
343         ftr <<= shift;
344         val &= ~mask;
345         val |= ftr;
346 
347         vcpu_set_reg(vcpu, reg, val);
348         vcpu_get_reg(vcpu, reg, &new_val);
349         TEST_ASSERT_EQ(new_val, val);
350 
351         return new_val;
352 }
353 
354 static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
355                               const struct reg_ftr_bits *ftr_bits)
356 {
357         uint8_t shift = ftr_bits->shift;
358         uint64_t mask = ftr_bits->mask;
359         uint64_t val, old_val, ftr;
360         int r;
361 
362         vcpu_get_reg(vcpu, reg, &val);
363         ftr = (val & mask) >> shift;
364 
365         ftr = get_invalid_value(ftr_bits, ftr);
366 
367         old_val = val;
368         ftr <<= shift;
369         val &= ~mask;
370         val |= ftr;
371 
372         r = __vcpu_set_reg(vcpu, reg, val);
373         TEST_ASSERT(r < 0 && errno == EINVAL,
374                     "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
375 
376         vcpu_get_reg(vcpu, reg, &val);
377         TEST_ASSERT_EQ(val, old_val);
378 }
379 
380 static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
381 
382 #define encoding_to_range_idx(encoding)                                                 \
383         KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding),      \
384                                      sys_reg_CRn(encoding), sys_reg_CRm(encoding),      \
385                                      sys_reg_Op2(encoding))
386 
387 
388 static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
389 {
390         uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
391         struct reg_mask_range range = {
392                 .addr = (__u64)masks,
393         };
394         int ret;
395 
396         /* KVM should return error when reserved field is not zero */
397         range.reserved[0] = 1;
398         ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
399         TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
400 
401         /* Get writable masks for feature ID registers */
402         memset(range.reserved, 0, sizeof(range.reserved));
403         vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
404 
405         for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
406                 const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
407                 uint32_t reg_id = test_regs[i].reg;
408                 uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
409                 int idx;
410 
411                 /* Get the index to masks array for the idreg */
412                 idx = encoding_to_range_idx(reg_id);
413 
414                 for (int j = 0;  ftr_bits[j].type != FTR_END; j++) {
415                         /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
416                         if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
417                                 ksft_test_result_skip("%s on AARCH64 only system\n",
418                                                       ftr_bits[j].name);
419                                 continue;
420                         }
421 
422                         /* Make sure the feature field is writable */
423                         TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
424 
425                         test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
426 
427                         test_reg_vals[idx] = test_reg_set_success(vcpu, reg,
428                                                                   &ftr_bits[j]);
429 
430                         ksft_test_result_pass("%s\n", ftr_bits[j].name);
431                 }
432         }
433 }
434 
435 static void test_guest_reg_read(struct kvm_vcpu *vcpu)
436 {
437         bool done = false;
438         struct ucall uc;
439 
440         while (!done) {
441                 vcpu_run(vcpu);
442 
443                 switch (get_ucall(vcpu, &uc)) {
444                 case UCALL_ABORT:
445                         REPORT_GUEST_ASSERT(uc);
446                         break;
447                 case UCALL_SYNC:
448                         /* Make sure the written values are seen by guest */
449                         TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
450                                        uc.args[3]);
451                         break;
452                 case UCALL_DONE:
453                         done = true;
454                         break;
455                 default:
456                         TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
457                 }
458         }
459 }
460 
461 /* Politely lifted from arch/arm64/include/asm/cache.h */
462 /* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
463 #define CLIDR_CTYPE_SHIFT(level)        (3 * (level - 1))
464 #define CLIDR_CTYPE_MASK(level)         (7 << CLIDR_CTYPE_SHIFT(level))
465 #define CLIDR_CTYPE(clidr, level)       \
466         (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
467 
468 static void test_clidr(struct kvm_vcpu *vcpu)
469 {
470         uint64_t clidr;
471         int level;
472 
473         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), &clidr);
474 
475         /* find the first empty level in the cache hierarchy */
476         for (level = 1; level < 7; level++) {
477                 if (!CLIDR_CTYPE(clidr, level))
478                         break;
479         }
480 
481         /*
482          * If you have a mind-boggling 7 levels of cache, congratulations, you
483          * get to fix this.
484          */
485         TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy");
486 
487         /* stick in a unified cache level */
488         clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level);
489 
490         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), clidr);
491         test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;
492 }
493 
494 static void test_ctr(struct kvm_vcpu *vcpu)
495 {
496         u64 ctr;
497 
498         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), &ctr);
499         ctr &= ~CTR_EL0_DIC_MASK;
500         if (ctr & CTR_EL0_IminLine_MASK)
501                 ctr--;
502 
503         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), ctr);
504         test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;
505 }
506 
507 static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
508 {
509         u64 val;
510 
511         test_clidr(vcpu);
512         test_ctr(vcpu);
513 
514         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val);
515         val++;
516         vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
517 
518         test_reg_vals[encoding_to_range_idx(SYS_MPIDR_EL1)] = val;
519         ksft_test_result_pass("%s\n", __func__);
520 }
521 
522 static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)
523 {
524         size_t idx = encoding_to_range_idx(encoding);
525         uint64_t observed;
526 
527         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding), &observed);
528         TEST_ASSERT_EQ(test_reg_vals[idx], observed);
529 }
530 
531 static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
532 {
533         /*
534          * Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an
535          * architectural reset of the vCPU.
536          */
537         aarch64_vcpu_setup(vcpu, NULL);
538 
539         for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
540                 test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
541 
542         test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);
543         test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
544         test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);
545 
546         ksft_test_result_pass("%s\n", __func__);
547 }
548 
549 int main(void)
550 {
551         struct kvm_vcpu *vcpu;
552         struct kvm_vm *vm;
553         bool aarch64_only;
554         uint64_t val, el0;
555         int test_cnt;
556 
557         TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
558 
559         vm = vm_create_with_one_vcpu(&vcpu, guest_code);
560 
561         /* Check for AARCH64 only system */
562         vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
563         el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
564         aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
565 
566         ksft_print_header();
567 
568         test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
569                    ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
570                    ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
571                    ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
572                    ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
573                    ARRAY_SIZE(test_regs) + 2;
574 
575         ksft_set_plan(test_cnt);
576 
577         test_vm_ftr_id_regs(vcpu, aarch64_only);
578         test_vcpu_ftr_id_regs(vcpu);
579 
580         test_guest_reg_read(vcpu);
581 
582         test_reset_preserves_id_regs(vcpu);
583 
584         kvm_vm_free(vm);
585 
586         ksft_finished();
587 }
588 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php