~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kvm/sys_regs.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2012,2013 - ARM Ltd
  4  * Author: Marc Zyngier <marc.zyngier@arm.com>
  5  *
  6  * Derived from arch/arm/kvm/coproc.c:
  7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  8  * Authors: Rusty Russell <rusty@rustcorp.com.au>
  9  *          Christoffer Dall <c.dall@virtualopensystems.com>
 10  */
 11 
 12 #include <linux/bitfield.h>
 13 #include <linux/bsearch.h>
 14 #include <linux/cacheinfo.h>
 15 #include <linux/debugfs.h>
 16 #include <linux/kvm_host.h>
 17 #include <linux/mm.h>
 18 #include <linux/printk.h>
 19 #include <linux/uaccess.h>
 20 
 21 #include <asm/cacheflush.h>
 22 #include <asm/cputype.h>
 23 #include <asm/debug-monitors.h>
 24 #include <asm/esr.h>
 25 #include <asm/kvm_arm.h>
 26 #include <asm/kvm_emulate.h>
 27 #include <asm/kvm_hyp.h>
 28 #include <asm/kvm_mmu.h>
 29 #include <asm/kvm_nested.h>
 30 #include <asm/perf_event.h>
 31 #include <asm/sysreg.h>
 32 
 33 #include <trace/events/kvm.h>
 34 
 35 #include "sys_regs.h"
 36 #include "vgic/vgic.h"
 37 
 38 #include "trace.h"
 39 
 40 /*
 41  * For AArch32, we only take care of what is being trapped. Anything
 42  * that has to do with init and userspace access has to go via the
 43  * 64bit interface.
 44  */
 45 
 46 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
 47 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 48                       u64 val);
 49 
 50 static bool bad_trap(struct kvm_vcpu *vcpu,
 51                      struct sys_reg_params *params,
 52                      const struct sys_reg_desc *r,
 53                      const char *msg)
 54 {
 55         WARN_ONCE(1, "Unexpected %s\n", msg);
 56         print_sys_reg_instr(params);
 57         kvm_inject_undefined(vcpu);
 58         return false;
 59 }
 60 
 61 static bool read_from_write_only(struct kvm_vcpu *vcpu,
 62                                  struct sys_reg_params *params,
 63                                  const struct sys_reg_desc *r)
 64 {
 65         return bad_trap(vcpu, params, r,
 66                         "sys_reg read to write-only register");
 67 }
 68 
 69 static bool write_to_read_only(struct kvm_vcpu *vcpu,
 70                                struct sys_reg_params *params,
 71                                const struct sys_reg_desc *r)
 72 {
 73         return bad_trap(vcpu, params, r,
 74                         "sys_reg write to read-only register");
 75 }
 76 
 77 #define PURE_EL2_SYSREG(el2)                                            \
 78         case el2: {                                                     \
 79                 *el1r = el2;                                            \
 80                 return true;                                            \
 81         }
 82 
 83 #define MAPPED_EL2_SYSREG(el2, el1, fn)                                 \
 84         case el2: {                                                     \
 85                 *xlate = fn;                                            \
 86                 *el1r = el1;                                            \
 87                 return true;                                            \
 88         }
 89 
 90 static bool get_el2_to_el1_mapping(unsigned int reg,
 91                                    unsigned int *el1r, u64 (**xlate)(u64))
 92 {
 93         switch (reg) {
 94                 PURE_EL2_SYSREG(  VPIDR_EL2     );
 95                 PURE_EL2_SYSREG(  VMPIDR_EL2    );
 96                 PURE_EL2_SYSREG(  ACTLR_EL2     );
 97                 PURE_EL2_SYSREG(  HCR_EL2       );
 98                 PURE_EL2_SYSREG(  MDCR_EL2      );
 99                 PURE_EL2_SYSREG(  HSTR_EL2      );
100                 PURE_EL2_SYSREG(  HACR_EL2      );
101                 PURE_EL2_SYSREG(  VTTBR_EL2     );
102                 PURE_EL2_SYSREG(  VTCR_EL2      );
103                 PURE_EL2_SYSREG(  RVBAR_EL2     );
104                 PURE_EL2_SYSREG(  TPIDR_EL2     );
105                 PURE_EL2_SYSREG(  HPFAR_EL2     );
106                 PURE_EL2_SYSREG(  CNTHCTL_EL2   );
107                 MAPPED_EL2_SYSREG(SCTLR_EL2,   SCTLR_EL1,
108                                   translate_sctlr_el2_to_sctlr_el1           );
109                 MAPPED_EL2_SYSREG(CPTR_EL2,    CPACR_EL1,
110                                   translate_cptr_el2_to_cpacr_el1            );
111                 MAPPED_EL2_SYSREG(TTBR0_EL2,   TTBR0_EL1,
112                                   translate_ttbr0_el2_to_ttbr0_el1           );
113                 MAPPED_EL2_SYSREG(TTBR1_EL2,   TTBR1_EL1,   NULL             );
114                 MAPPED_EL2_SYSREG(TCR_EL2,     TCR_EL1,
115                                   translate_tcr_el2_to_tcr_el1               );
116                 MAPPED_EL2_SYSREG(VBAR_EL2,    VBAR_EL1,    NULL             );
117                 MAPPED_EL2_SYSREG(AFSR0_EL2,   AFSR0_EL1,   NULL             );
118                 MAPPED_EL2_SYSREG(AFSR1_EL2,   AFSR1_EL1,   NULL             );
119                 MAPPED_EL2_SYSREG(ESR_EL2,     ESR_EL1,     NULL             );
120                 MAPPED_EL2_SYSREG(FAR_EL2,     FAR_EL1,     NULL             );
121                 MAPPED_EL2_SYSREG(MAIR_EL2,    MAIR_EL1,    NULL             );
122                 MAPPED_EL2_SYSREG(AMAIR_EL2,   AMAIR_EL1,   NULL             );
123                 MAPPED_EL2_SYSREG(ELR_EL2,     ELR_EL1,     NULL             );
124                 MAPPED_EL2_SYSREG(SPSR_EL2,    SPSR_EL1,    NULL             );
125                 MAPPED_EL2_SYSREG(ZCR_EL2,     ZCR_EL1,     NULL             );
126         default:
127                 return false;
128         }
129 }
130 
131 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
132 {
133         u64 val = 0x8badf00d8badf00d;
134         u64 (*xlate)(u64) = NULL;
135         unsigned int el1r;
136 
137         if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
138                 goto memory_read;
139 
140         if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
141                 if (!is_hyp_ctxt(vcpu))
142                         goto memory_read;
143 
144                 /*
145                  * If this register does not have an EL1 counterpart,
146                  * then read the stored EL2 version.
147                  */
148                 if (reg == el1r)
149                         goto memory_read;
150 
151                 /*
152                  * If we have a non-VHE guest and that the sysreg
153                  * requires translation to be used at EL1, use the
154                  * in-memory copy instead.
155                  */
156                 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
157                         goto memory_read;
158 
159                 /* Get the current version of the EL1 counterpart. */
160                 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
161                 return val;
162         }
163 
164         /* EL1 register can't be on the CPU if the guest is in vEL2. */
165         if (unlikely(is_hyp_ctxt(vcpu)))
166                 goto memory_read;
167 
168         if (__vcpu_read_sys_reg_from_cpu(reg, &val))
169                 return val;
170 
171 memory_read:
172         return __vcpu_sys_reg(vcpu, reg);
173 }
174 
175 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
176 {
177         u64 (*xlate)(u64) = NULL;
178         unsigned int el1r;
179 
180         if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
181                 goto memory_write;
182 
183         if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
184                 if (!is_hyp_ctxt(vcpu))
185                         goto memory_write;
186 
187                 /*
188                  * Always store a copy of the write to memory to avoid having
189                  * to reverse-translate virtual EL2 system registers for a
190                  * non-VHE guest hypervisor.
191                  */
192                 __vcpu_sys_reg(vcpu, reg) = val;
193 
194                 /* No EL1 counterpart? We're done here.? */
195                 if (reg == el1r)
196                         return;
197 
198                 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
199                         val = xlate(val);
200 
201                 /* Redirect this to the EL1 version of the register. */
202                 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
203                 return;
204         }
205 
206         /* EL1 register can't be on the CPU if the guest is in vEL2. */
207         if (unlikely(is_hyp_ctxt(vcpu)))
208                 goto memory_write;
209 
210         if (__vcpu_write_sys_reg_to_cpu(val, reg))
211                 return;
212 
213 memory_write:
214          __vcpu_sys_reg(vcpu, reg) = val;
215 }
216 
217 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
218 #define CSSELR_MAX 14
219 
220 /*
221  * Returns the minimum line size for the selected cache, expressed as
222  * Log2(bytes).
223  */
224 static u8 get_min_cache_line_size(bool icache)
225 {
226         u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
227         u8 field;
228 
229         if (icache)
230                 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
231         else
232                 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
233 
234         /*
235          * Cache line size is represented as Log2(words) in CTR_EL0.
236          * Log2(bytes) can be derived with the following:
237          *
238          * Log2(words) + 2 = Log2(bytes / 4) + 2
239          *                 = Log2(bytes) - 2 + 2
240          *                 = Log2(bytes)
241          */
242         return field + 2;
243 }
244 
245 /* Which cache CCSIDR represents depends on CSSELR value. */
246 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
247 {
248         u8 line_size;
249 
250         if (vcpu->arch.ccsidr)
251                 return vcpu->arch.ccsidr[csselr];
252 
253         line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
254 
255         /*
256          * Fabricate a CCSIDR value as the overriding value does not exist.
257          * The real CCSIDR value will not be used as it can vary by the
258          * physical CPU which the vcpu currently resides in.
259          *
260          * The line size is determined with get_min_cache_line_size(), which
261          * should be valid for all CPUs even if they have different cache
262          * configuration.
263          *
264          * The associativity bits are cleared, meaning the geometry of all data
265          * and unified caches (which are guaranteed to be PIPT and thus
266          * non-aliasing) are 1 set and 1 way.
267          * Guests should not be doing cache operations by set/way at all, and
268          * for this reason, we trap them and attempt to infer the intent, so
269          * that we can flush the entire guest's address space at the appropriate
270          * time. The exposed geometry minimizes the number of the traps.
271          * [If guests should attempt to infer aliasing properties from the
272          * geometry (which is not permitted by the architecture), they would
273          * only do so for virtually indexed caches.]
274          *
275          * We don't check if the cache level exists as it is allowed to return
276          * an UNKNOWN value if not.
277          */
278         return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
279 }
280 
281 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
282 {
283         u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
284         u32 *ccsidr = vcpu->arch.ccsidr;
285         u32 i;
286 
287         if ((val & CCSIDR_EL1_RES0) ||
288             line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
289                 return -EINVAL;
290 
291         if (!ccsidr) {
292                 if (val == get_ccsidr(vcpu, csselr))
293                         return 0;
294 
295                 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
296                 if (!ccsidr)
297                         return -ENOMEM;
298 
299                 for (i = 0; i < CSSELR_MAX; i++)
300                         ccsidr[i] = get_ccsidr(vcpu, i);
301 
302                 vcpu->arch.ccsidr = ccsidr;
303         }
304 
305         ccsidr[csselr] = val;
306 
307         return 0;
308 }
309 
310 static bool access_rw(struct kvm_vcpu *vcpu,
311                       struct sys_reg_params *p,
312                       const struct sys_reg_desc *r)
313 {
314         if (p->is_write)
315                 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
316         else
317                 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
318 
319         return true;
320 }
321 
322 /*
323  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
324  */
325 static bool access_dcsw(struct kvm_vcpu *vcpu,
326                         struct sys_reg_params *p,
327                         const struct sys_reg_desc *r)
328 {
329         if (!p->is_write)
330                 return read_from_write_only(vcpu, p, r);
331 
332         /*
333          * Only track S/W ops if we don't have FWB. It still indicates
334          * that the guest is a bit broken (S/W operations should only
335          * be done by firmware, knowing that there is only a single
336          * CPU left in the system, and certainly not from non-secure
337          * software).
338          */
339         if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
340                 kvm_set_way_flush(vcpu);
341 
342         return true;
343 }
344 
345 static bool access_dcgsw(struct kvm_vcpu *vcpu,
346                          struct sys_reg_params *p,
347                          const struct sys_reg_desc *r)
348 {
349         if (!kvm_has_mte(vcpu->kvm)) {
350                 kvm_inject_undefined(vcpu);
351                 return false;
352         }
353 
354         /* Treat MTE S/W ops as we treat the classic ones: with contempt */
355         return access_dcsw(vcpu, p, r);
356 }
357 
358 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
359 {
360         switch (r->aarch32_map) {
361         case AA32_LO:
362                 *mask = GENMASK_ULL(31, 0);
363                 *shift = 0;
364                 break;
365         case AA32_HI:
366                 *mask = GENMASK_ULL(63, 32);
367                 *shift = 32;
368                 break;
369         default:
370                 *mask = GENMASK_ULL(63, 0);
371                 *shift = 0;
372                 break;
373         }
374 }
375 
376 /*
377  * Generic accessor for VM registers. Only called as long as HCR_TVM
378  * is set. If the guest enables the MMU, we stop trapping the VM
379  * sys_regs and leave it in complete control of the caches.
380  */
381 static bool access_vm_reg(struct kvm_vcpu *vcpu,
382                           struct sys_reg_params *p,
383                           const struct sys_reg_desc *r)
384 {
385         bool was_enabled = vcpu_has_cache_enabled(vcpu);
386         u64 val, mask, shift;
387 
388         if (reg_to_encoding(r) == SYS_TCR2_EL1 &&
389             !kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) {
390                 kvm_inject_undefined(vcpu);
391                 return false;
392         }
393 
394         BUG_ON(!p->is_write);
395 
396         get_access_mask(r, &mask, &shift);
397 
398         if (~mask) {
399                 val = vcpu_read_sys_reg(vcpu, r->reg);
400                 val &= ~mask;
401         } else {
402                 val = 0;
403         }
404 
405         val |= (p->regval & (mask >> shift)) << shift;
406         vcpu_write_sys_reg(vcpu, val, r->reg);
407 
408         kvm_toggle_cache(vcpu, was_enabled);
409         return true;
410 }
411 
412 static bool access_actlr(struct kvm_vcpu *vcpu,
413                          struct sys_reg_params *p,
414                          const struct sys_reg_desc *r)
415 {
416         u64 mask, shift;
417 
418         if (p->is_write)
419                 return ignore_write(vcpu, p);
420 
421         get_access_mask(r, &mask, &shift);
422         p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
423 
424         return true;
425 }
426 
427 /*
428  * Trap handler for the GICv3 SGI generation system register.
429  * Forward the request to the VGIC emulation.
430  * The cp15_64 code makes sure this automatically works
431  * for both AArch64 and AArch32 accesses.
432  */
433 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
434                            struct sys_reg_params *p,
435                            const struct sys_reg_desc *r)
436 {
437         bool g1;
438 
439         if (!kvm_has_gicv3(vcpu->kvm)) {
440                 kvm_inject_undefined(vcpu);
441                 return false;
442         }
443 
444         if (!p->is_write)
445                 return read_from_write_only(vcpu, p, r);
446 
447         /*
448          * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
449          * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
450          * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
451          * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
452          * group.
453          */
454         if (p->Op0 == 0) {              /* AArch32 */
455                 switch (p->Op1) {
456                 default:                /* Keep GCC quiet */
457                 case 0:                 /* ICC_SGI1R */
458                         g1 = true;
459                         break;
460                 case 1:                 /* ICC_ASGI1R */
461                 case 2:                 /* ICC_SGI0R */
462                         g1 = false;
463                         break;
464                 }
465         } else {                        /* AArch64 */
466                 switch (p->Op2) {
467                 default:                /* Keep GCC quiet */
468                 case 5:                 /* ICC_SGI1R_EL1 */
469                         g1 = true;
470                         break;
471                 case 6:                 /* ICC_ASGI1R_EL1 */
472                 case 7:                 /* ICC_SGI0R_EL1 */
473                         g1 = false;
474                         break;
475                 }
476         }
477 
478         vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
479 
480         return true;
481 }
482 
483 static bool access_gic_sre(struct kvm_vcpu *vcpu,
484                            struct sys_reg_params *p,
485                            const struct sys_reg_desc *r)
486 {
487         if (p->is_write)
488                 return ignore_write(vcpu, p);
489 
490         p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
491         return true;
492 }
493 
494 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
495                         struct sys_reg_params *p,
496                         const struct sys_reg_desc *r)
497 {
498         if (p->is_write)
499                 return ignore_write(vcpu, p);
500         else
501                 return read_zero(vcpu, p);
502 }
503 
504 static bool trap_undef(struct kvm_vcpu *vcpu,
505                        struct sys_reg_params *p,
506                        const struct sys_reg_desc *r)
507 {
508         kvm_inject_undefined(vcpu);
509         return false;
510 }
511 
512 /*
513  * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
514  * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
515  * system, these registers should UNDEF. LORID_EL1 being a RO register, we
516  * treat it separately.
517  */
518 static bool trap_loregion(struct kvm_vcpu *vcpu,
519                           struct sys_reg_params *p,
520                           const struct sys_reg_desc *r)
521 {
522         u32 sr = reg_to_encoding(r);
523 
524         if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
525                 kvm_inject_undefined(vcpu);
526                 return false;
527         }
528 
529         if (p->is_write && sr == SYS_LORID_EL1)
530                 return write_to_read_only(vcpu, p, r);
531 
532         return trap_raz_wi(vcpu, p, r);
533 }
534 
535 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
536                            struct sys_reg_params *p,
537                            const struct sys_reg_desc *r)
538 {
539         u64 oslsr;
540 
541         if (!p->is_write)
542                 return read_from_write_only(vcpu, p, r);
543 
544         /* Forward the OSLK bit to OSLSR */
545         oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~OSLSR_EL1_OSLK;
546         if (p->regval & OSLAR_EL1_OSLK)
547                 oslsr |= OSLSR_EL1_OSLK;
548 
549         __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
550         return true;
551 }
552 
553 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
554                            struct sys_reg_params *p,
555                            const struct sys_reg_desc *r)
556 {
557         if (p->is_write)
558                 return write_to_read_only(vcpu, p, r);
559 
560         p->regval = __vcpu_sys_reg(vcpu, r->reg);
561         return true;
562 }
563 
564 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
565                          u64 val)
566 {
567         /*
568          * The only modifiable bit is the OSLK bit. Refuse the write if
569          * userspace attempts to change any other bit in the register.
570          */
571         if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
572                 return -EINVAL;
573 
574         __vcpu_sys_reg(vcpu, rd->reg) = val;
575         return 0;
576 }
577 
578 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
579                                    struct sys_reg_params *p,
580                                    const struct sys_reg_desc *r)
581 {
582         if (p->is_write) {
583                 return ignore_write(vcpu, p);
584         } else {
585                 p->regval = read_sysreg(dbgauthstatus_el1);
586                 return true;
587         }
588 }
589 
590 /*
591  * We want to avoid world-switching all the DBG registers all the
592  * time:
593  *
594  * - If we've touched any debug register, it is likely that we're
595  *   going to touch more of them. It then makes sense to disable the
596  *   traps and start doing the save/restore dance
597  * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
598  *   then mandatory to save/restore the registers, as the guest
599  *   depends on them.
600  *
601  * For this, we use a DIRTY bit, indicating the guest has modified the
602  * debug registers, used as follow:
603  *
604  * On guest entry:
605  * - If the dirty bit is set (because we're coming back from trapping),
606  *   disable the traps, save host registers, restore guest registers.
607  * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
608  *   set the dirty bit, disable the traps, save host registers,
609  *   restore guest registers.
610  * - Otherwise, enable the traps
611  *
612  * On guest exit:
613  * - If the dirty bit is set, save guest registers, restore host
614  *   registers and clear the dirty bit. This ensure that the host can
615  *   now use the debug registers.
616  */
617 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
618                             struct sys_reg_params *p,
619                             const struct sys_reg_desc *r)
620 {
621         access_rw(vcpu, p, r);
622         if (p->is_write)
623                 vcpu_set_flag(vcpu, DEBUG_DIRTY);
624 
625         trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
626 
627         return true;
628 }
629 
630 /*
631  * reg_to_dbg/dbg_to_reg
632  *
633  * A 32 bit write to a debug register leave top bits alone
634  * A 32 bit read from a debug register only returns the bottom bits
635  *
636  * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
637  * switches between host and guest values in future.
638  */
639 static void reg_to_dbg(struct kvm_vcpu *vcpu,
640                        struct sys_reg_params *p,
641                        const struct sys_reg_desc *rd,
642                        u64 *dbg_reg)
643 {
644         u64 mask, shift, val;
645 
646         get_access_mask(rd, &mask, &shift);
647 
648         val = *dbg_reg;
649         val &= ~mask;
650         val |= (p->regval & (mask >> shift)) << shift;
651         *dbg_reg = val;
652 
653         vcpu_set_flag(vcpu, DEBUG_DIRTY);
654 }
655 
656 static void dbg_to_reg(struct kvm_vcpu *vcpu,
657                        struct sys_reg_params *p,
658                        const struct sys_reg_desc *rd,
659                        u64 *dbg_reg)
660 {
661         u64 mask, shift;
662 
663         get_access_mask(rd, &mask, &shift);
664         p->regval = (*dbg_reg & mask) >> shift;
665 }
666 
667 static bool trap_bvr(struct kvm_vcpu *vcpu,
668                      struct sys_reg_params *p,
669                      const struct sys_reg_desc *rd)
670 {
671         u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
672 
673         if (p->is_write)
674                 reg_to_dbg(vcpu, p, rd, dbg_reg);
675         else
676                 dbg_to_reg(vcpu, p, rd, dbg_reg);
677 
678         trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
679 
680         return true;
681 }
682 
683 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
684                    u64 val)
685 {
686         vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
687         return 0;
688 }
689 
690 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
691                    u64 *val)
692 {
693         *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
694         return 0;
695 }
696 
697 static u64 reset_bvr(struct kvm_vcpu *vcpu,
698                       const struct sys_reg_desc *rd)
699 {
700         vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
701         return rd->val;
702 }
703 
704 static bool trap_bcr(struct kvm_vcpu *vcpu,
705                      struct sys_reg_params *p,
706                      const struct sys_reg_desc *rd)
707 {
708         u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
709 
710         if (p->is_write)
711                 reg_to_dbg(vcpu, p, rd, dbg_reg);
712         else
713                 dbg_to_reg(vcpu, p, rd, dbg_reg);
714 
715         trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
716 
717         return true;
718 }
719 
720 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
721                    u64 val)
722 {
723         vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
724         return 0;
725 }
726 
727 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
728                    u64 *val)
729 {
730         *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
731         return 0;
732 }
733 
734 static u64 reset_bcr(struct kvm_vcpu *vcpu,
735                       const struct sys_reg_desc *rd)
736 {
737         vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
738         return rd->val;
739 }
740 
741 static bool trap_wvr(struct kvm_vcpu *vcpu,
742                      struct sys_reg_params *p,
743                      const struct sys_reg_desc *rd)
744 {
745         u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
746 
747         if (p->is_write)
748                 reg_to_dbg(vcpu, p, rd, dbg_reg);
749         else
750                 dbg_to_reg(vcpu, p, rd, dbg_reg);
751 
752         trace_trap_reg(__func__, rd->CRm, p->is_write,
753                 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
754 
755         return true;
756 }
757 
758 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
759                    u64 val)
760 {
761         vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
762         return 0;
763 }
764 
765 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
766                    u64 *val)
767 {
768         *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
769         return 0;
770 }
771 
772 static u64 reset_wvr(struct kvm_vcpu *vcpu,
773                       const struct sys_reg_desc *rd)
774 {
775         vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
776         return rd->val;
777 }
778 
779 static bool trap_wcr(struct kvm_vcpu *vcpu,
780                      struct sys_reg_params *p,
781                      const struct sys_reg_desc *rd)
782 {
783         u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
784 
785         if (p->is_write)
786                 reg_to_dbg(vcpu, p, rd, dbg_reg);
787         else
788                 dbg_to_reg(vcpu, p, rd, dbg_reg);
789 
790         trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
791 
792         return true;
793 }
794 
795 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
796                    u64 val)
797 {
798         vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
799         return 0;
800 }
801 
802 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
803                    u64 *val)
804 {
805         *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
806         return 0;
807 }
808 
809 static u64 reset_wcr(struct kvm_vcpu *vcpu,
810                       const struct sys_reg_desc *rd)
811 {
812         vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
813         return rd->val;
814 }
815 
816 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
817 {
818         u64 amair = read_sysreg(amair_el1);
819         vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
820         return amair;
821 }
822 
823 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
824 {
825         u64 actlr = read_sysreg(actlr_el1);
826         vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
827         return actlr;
828 }
829 
830 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
831 {
832         u64 mpidr;
833 
834         /*
835          * Map the vcpu_id into the first three affinity level fields of
836          * the MPIDR. We limit the number of VCPUs in level 0 due to a
837          * limitation to 16 CPUs in that level in the ICC_SGIxR registers
838          * of the GICv3 to be able to address each CPU directly when
839          * sending IPIs.
840          */
841         mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
842         mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
843         mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
844         mpidr |= (1ULL << 31);
845         vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
846 
847         return mpidr;
848 }
849 
850 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
851                                    const struct sys_reg_desc *r)
852 {
853         if (kvm_vcpu_has_pmu(vcpu))
854                 return 0;
855 
856         return REG_HIDDEN;
857 }
858 
859 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
860 {
861         u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
862         u8 n = vcpu->kvm->arch.pmcr_n;
863 
864         if (n)
865                 mask |= GENMASK(n - 1, 0);
866 
867         reset_unknown(vcpu, r);
868         __vcpu_sys_reg(vcpu, r->reg) &= mask;
869 
870         return __vcpu_sys_reg(vcpu, r->reg);
871 }
872 
873 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
874 {
875         reset_unknown(vcpu, r);
876         __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
877 
878         return __vcpu_sys_reg(vcpu, r->reg);
879 }
880 
881 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
882 {
883         /* This thing will UNDEF, who cares about the reset value? */
884         if (!kvm_vcpu_has_pmu(vcpu))
885                 return 0;
886 
887         reset_unknown(vcpu, r);
888         __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm);
889 
890         return __vcpu_sys_reg(vcpu, r->reg);
891 }
892 
893 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
894 {
895         reset_unknown(vcpu, r);
896         __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
897 
898         return __vcpu_sys_reg(vcpu, r->reg);
899 }
900 
901 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
902 {
903         u64 pmcr = 0;
904 
905         if (!kvm_supports_32bit_el0())
906                 pmcr |= ARMV8_PMU_PMCR_LC;
907 
908         /*
909          * The value of PMCR.N field is included when the
910          * vCPU register is read via kvm_vcpu_read_pmcr().
911          */
912         __vcpu_sys_reg(vcpu, r->reg) = pmcr;
913 
914         return __vcpu_sys_reg(vcpu, r->reg);
915 }
916 
917 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
918 {
919         u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
920         bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
921 
922         if (!enabled)
923                 kvm_inject_undefined(vcpu);
924 
925         return !enabled;
926 }
927 
928 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
929 {
930         return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
931 }
932 
933 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
934 {
935         return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
936 }
937 
938 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
939 {
940         return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
941 }
942 
943 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
944 {
945         return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
946 }
947 
948 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
949                         const struct sys_reg_desc *r)
950 {
951         u64 val;
952 
953         if (pmu_access_el0_disabled(vcpu))
954                 return false;
955 
956         if (p->is_write) {
957                 /*
958                  * Only update writeable bits of PMCR (continuing into
959                  * kvm_pmu_handle_pmcr() as well)
960                  */
961                 val = kvm_vcpu_read_pmcr(vcpu);
962                 val &= ~ARMV8_PMU_PMCR_MASK;
963                 val |= p->regval & ARMV8_PMU_PMCR_MASK;
964                 if (!kvm_supports_32bit_el0())
965                         val |= ARMV8_PMU_PMCR_LC;
966                 kvm_pmu_handle_pmcr(vcpu, val);
967         } else {
968                 /* PMCR.P & PMCR.C are RAZ */
969                 val = kvm_vcpu_read_pmcr(vcpu)
970                       & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
971                 p->regval = val;
972         }
973 
974         return true;
975 }
976 
977 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
978                           const struct sys_reg_desc *r)
979 {
980         if (pmu_access_event_counter_el0_disabled(vcpu))
981                 return false;
982 
983         if (p->is_write)
984                 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
985         else
986                 /* return PMSELR.SEL field */
987                 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
988                             & ARMV8_PMU_COUNTER_MASK;
989 
990         return true;
991 }
992 
993 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
994                           const struct sys_reg_desc *r)
995 {
996         u64 pmceid, mask, shift;
997 
998         BUG_ON(p->is_write);
999 
1000         if (pmu_access_el0_disabled(vcpu))
1001                 return false;
1002 
1003         get_access_mask(r, &mask, &shift);
1004 
1005         pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1006         pmceid &= mask;
1007         pmceid >>= shift;
1008 
1009         p->regval = pmceid;
1010 
1011         return true;
1012 }
1013 
1014 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1015 {
1016         u64 pmcr, val;
1017 
1018         pmcr = kvm_vcpu_read_pmcr(vcpu);
1019         val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1020         if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1021                 kvm_inject_undefined(vcpu);
1022                 return false;
1023         }
1024 
1025         return true;
1026 }
1027 
1028 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1029                           u64 *val)
1030 {
1031         u64 idx;
1032 
1033         if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1034                 /* PMCCNTR_EL0 */
1035                 idx = ARMV8_PMU_CYCLE_IDX;
1036         else
1037                 /* PMEVCNTRn_EL0 */
1038                 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1039 
1040         *val = kvm_pmu_get_counter_value(vcpu, idx);
1041         return 0;
1042 }
1043 
1044 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1045                               struct sys_reg_params *p,
1046                               const struct sys_reg_desc *r)
1047 {
1048         u64 idx = ~0UL;
1049 
1050         if (r->CRn == 9 && r->CRm == 13) {
1051                 if (r->Op2 == 2) {
1052                         /* PMXEVCNTR_EL0 */
1053                         if (pmu_access_event_counter_el0_disabled(vcpu))
1054                                 return false;
1055 
1056                         idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1057                               & ARMV8_PMU_COUNTER_MASK;
1058                 } else if (r->Op2 == 0) {
1059                         /* PMCCNTR_EL0 */
1060                         if (pmu_access_cycle_counter_el0_disabled(vcpu))
1061                                 return false;
1062 
1063                         idx = ARMV8_PMU_CYCLE_IDX;
1064                 }
1065         } else if (r->CRn == 0 && r->CRm == 9) {
1066                 /* PMCCNTR */
1067                 if (pmu_access_event_counter_el0_disabled(vcpu))
1068                         return false;
1069 
1070                 idx = ARMV8_PMU_CYCLE_IDX;
1071         } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1072                 /* PMEVCNTRn_EL0 */
1073                 if (pmu_access_event_counter_el0_disabled(vcpu))
1074                         return false;
1075 
1076                 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1077         }
1078 
1079         /* Catch any decoding mistake */
1080         WARN_ON(idx == ~0UL);
1081 
1082         if (!pmu_counter_idx_valid(vcpu, idx))
1083                 return false;
1084 
1085         if (p->is_write) {
1086                 if (pmu_access_el0_disabled(vcpu))
1087                         return false;
1088 
1089                 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1090         } else {
1091                 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1092         }
1093 
1094         return true;
1095 }
1096 
1097 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1098                                const struct sys_reg_desc *r)
1099 {
1100         u64 idx, reg;
1101 
1102         if (pmu_access_el0_disabled(vcpu))
1103                 return false;
1104 
1105         if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1106                 /* PMXEVTYPER_EL0 */
1107                 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
1108                 reg = PMEVTYPER0_EL0 + idx;
1109         } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1110                 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1111                 if (idx == ARMV8_PMU_CYCLE_IDX)
1112                         reg = PMCCFILTR_EL0;
1113                 else
1114                         /* PMEVTYPERn_EL0 */
1115                         reg = PMEVTYPER0_EL0 + idx;
1116         } else {
1117                 BUG();
1118         }
1119 
1120         if (!pmu_counter_idx_valid(vcpu, idx))
1121                 return false;
1122 
1123         if (p->is_write) {
1124                 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1125                 kvm_vcpu_pmu_restore_guest(vcpu);
1126         } else {
1127                 p->regval = __vcpu_sys_reg(vcpu, reg);
1128         }
1129 
1130         return true;
1131 }
1132 
1133 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1134 {
1135         bool set;
1136 
1137         val &= kvm_pmu_valid_counter_mask(vcpu);
1138 
1139         switch (r->reg) {
1140         case PMOVSSET_EL0:
1141                 /* CRm[1] being set indicates a SET register, and CLR otherwise */
1142                 set = r->CRm & 2;
1143                 break;
1144         default:
1145                 /* Op2[0] being set indicates a SET register, and CLR otherwise */
1146                 set = r->Op2 & 1;
1147                 break;
1148         }
1149 
1150         if (set)
1151                 __vcpu_sys_reg(vcpu, r->reg) |= val;
1152         else
1153                 __vcpu_sys_reg(vcpu, r->reg) &= ~val;
1154 
1155         return 0;
1156 }
1157 
1158 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1159 {
1160         u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1161 
1162         *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1163         return 0;
1164 }
1165 
1166 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1167                            const struct sys_reg_desc *r)
1168 {
1169         u64 val, mask;
1170 
1171         if (pmu_access_el0_disabled(vcpu))
1172                 return false;
1173 
1174         mask = kvm_pmu_valid_counter_mask(vcpu);
1175         if (p->is_write) {
1176                 val = p->regval & mask;
1177                 if (r->Op2 & 0x1) {
1178                         /* accessing PMCNTENSET_EL0 */
1179                         __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
1180                         kvm_pmu_enable_counter_mask(vcpu, val);
1181                         kvm_vcpu_pmu_restore_guest(vcpu);
1182                 } else {
1183                         /* accessing PMCNTENCLR_EL0 */
1184                         __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
1185                         kvm_pmu_disable_counter_mask(vcpu, val);
1186                 }
1187         } else {
1188                 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1189         }
1190 
1191         return true;
1192 }
1193 
1194 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1195                            const struct sys_reg_desc *r)
1196 {
1197         u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1198 
1199         if (check_pmu_access_disabled(vcpu, 0))
1200                 return false;
1201 
1202         if (p->is_write) {
1203                 u64 val = p->regval & mask;
1204 
1205                 if (r->Op2 & 0x1)
1206                         /* accessing PMINTENSET_EL1 */
1207                         __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
1208                 else
1209                         /* accessing PMINTENCLR_EL1 */
1210                         __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
1211         } else {
1212                 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1213         }
1214 
1215         return true;
1216 }
1217 
1218 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1219                          const struct sys_reg_desc *r)
1220 {
1221         u64 mask = kvm_pmu_valid_counter_mask(vcpu);
1222 
1223         if (pmu_access_el0_disabled(vcpu))
1224                 return false;
1225 
1226         if (p->is_write) {
1227                 if (r->CRm & 0x2)
1228                         /* accessing PMOVSSET_EL0 */
1229                         __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
1230                 else
1231                         /* accessing PMOVSCLR_EL0 */
1232                         __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
1233         } else {
1234                 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1235         }
1236 
1237         return true;
1238 }
1239 
1240 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1241                            const struct sys_reg_desc *r)
1242 {
1243         u64 mask;
1244 
1245         if (!p->is_write)
1246                 return read_from_write_only(vcpu, p, r);
1247 
1248         if (pmu_write_swinc_el0_disabled(vcpu))
1249                 return false;
1250 
1251         mask = kvm_pmu_valid_counter_mask(vcpu);
1252         kvm_pmu_software_increment(vcpu, p->regval & mask);
1253         return true;
1254 }
1255 
1256 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1257                              const struct sys_reg_desc *r)
1258 {
1259         if (p->is_write) {
1260                 if (!vcpu_mode_priv(vcpu)) {
1261                         kvm_inject_undefined(vcpu);
1262                         return false;
1263                 }
1264 
1265                 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
1266                                p->regval & ARMV8_PMU_USERENR_MASK;
1267         } else {
1268                 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1269                             & ARMV8_PMU_USERENR_MASK;
1270         }
1271 
1272         return true;
1273 }
1274 
1275 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1276                     u64 *val)
1277 {
1278         *val = kvm_vcpu_read_pmcr(vcpu);
1279         return 0;
1280 }
1281 
1282 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1283                     u64 val)
1284 {
1285         u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1286         struct kvm *kvm = vcpu->kvm;
1287 
1288         mutex_lock(&kvm->arch.config_lock);
1289 
1290         /*
1291          * The vCPU can't have more counters than the PMU hardware
1292          * implements. Ignore this error to maintain compatibility
1293          * with the existing KVM behavior.
1294          */
1295         if (!kvm_vm_has_ran_once(kvm) &&
1296             new_n <= kvm_arm_pmu_get_max_counters(kvm))
1297                 kvm->arch.pmcr_n = new_n;
1298 
1299         mutex_unlock(&kvm->arch.config_lock);
1300 
1301         /*
1302          * Ignore writes to RES0 bits, read only bits that are cleared on
1303          * vCPU reset, and writable bits that KVM doesn't support yet.
1304          * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1305          * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1306          * But, we leave the bit as it is here, as the vCPU's PMUver might
1307          * be changed later (NOTE: the bit will be cleared on first vCPU run
1308          * if necessary).
1309          */
1310         val &= ARMV8_PMU_PMCR_MASK;
1311 
1312         /* The LC bit is RES1 when AArch32 is not supported */
1313         if (!kvm_supports_32bit_el0())
1314                 val |= ARMV8_PMU_PMCR_LC;
1315 
1316         __vcpu_sys_reg(vcpu, r->reg) = val;
1317         return 0;
1318 }
1319 
1320 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1321 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                      \
1322         { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
1323           trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr },                \
1324         { SYS_DESC(SYS_DBGBCRn_EL1(n)),                                 \
1325           trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr },                \
1326         { SYS_DESC(SYS_DBGWVRn_EL1(n)),                                 \
1327           trap_wvr, reset_wvr, 0, 0,  get_wvr, set_wvr },               \
1328         { SYS_DESC(SYS_DBGWCRn_EL1(n)),                                 \
1329           trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
1330 
1331 #define PMU_SYS_REG(name)                                               \
1332         SYS_DESC(SYS_##name), .reset = reset_pmu_reg,                   \
1333         .visibility = pmu_visibility
1334 
1335 /* Macro to expand the PMEVCNTRn_EL0 register */
1336 #define PMU_PMEVCNTR_EL0(n)                                             \
1337         { PMU_SYS_REG(PMEVCNTRn_EL0(n)),                                \
1338           .reset = reset_pmevcntr, .get_user = get_pmu_evcntr,          \
1339           .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1340 
1341 /* Macro to expand the PMEVTYPERn_EL0 register */
1342 #define PMU_PMEVTYPER_EL0(n)                                            \
1343         { PMU_SYS_REG(PMEVTYPERn_EL0(n)),                               \
1344           .reset = reset_pmevtyper,                                     \
1345           .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1346 
1347 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1348                          const struct sys_reg_desc *r)
1349 {
1350         kvm_inject_undefined(vcpu);
1351 
1352         return false;
1353 }
1354 
1355 /* Macro to expand the AMU counter and type registers*/
1356 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1357 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1358 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1359 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1360 
1361 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1362                         const struct sys_reg_desc *rd)
1363 {
1364         return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1365 }
1366 
1367 /*
1368  * If we land here on a PtrAuth access, that is because we didn't
1369  * fixup the access on exit by allowing the PtrAuth sysregs. The only
1370  * way this happens is when the guest does not have PtrAuth support
1371  * enabled.
1372  */
1373 #define __PTRAUTH_KEY(k)                                                \
1374         { SYS_DESC(SYS_## k), undef_access, reset_unknown, k,           \
1375         .visibility = ptrauth_visibility}
1376 
1377 #define PTRAUTH_KEY(k)                                                  \
1378         __PTRAUTH_KEY(k ## KEYLO_EL1),                                  \
1379         __PTRAUTH_KEY(k ## KEYHI_EL1)
1380 
1381 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1382                               struct sys_reg_params *p,
1383                               const struct sys_reg_desc *r)
1384 {
1385         enum kvm_arch_timers tmr;
1386         enum kvm_arch_timer_regs treg;
1387         u64 reg = reg_to_encoding(r);
1388 
1389         switch (reg) {
1390         case SYS_CNTP_TVAL_EL0:
1391         case SYS_AARCH32_CNTP_TVAL:
1392                 tmr = TIMER_PTIMER;
1393                 treg = TIMER_REG_TVAL;
1394                 break;
1395         case SYS_CNTP_CTL_EL0:
1396         case SYS_AARCH32_CNTP_CTL:
1397                 tmr = TIMER_PTIMER;
1398                 treg = TIMER_REG_CTL;
1399                 break;
1400         case SYS_CNTP_CVAL_EL0:
1401         case SYS_AARCH32_CNTP_CVAL:
1402                 tmr = TIMER_PTIMER;
1403                 treg = TIMER_REG_CVAL;
1404                 break;
1405         case SYS_CNTPCT_EL0:
1406         case SYS_CNTPCTSS_EL0:
1407         case SYS_AARCH32_CNTPCT:
1408                 tmr = TIMER_PTIMER;
1409                 treg = TIMER_REG_CNT;
1410                 break;
1411         default:
1412                 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1413                 kvm_inject_undefined(vcpu);
1414                 return false;
1415         }
1416 
1417         if (p->is_write)
1418                 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1419         else
1420                 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1421 
1422         return true;
1423 }
1424 
1425 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1426                                     s64 new, s64 cur)
1427 {
1428         struct arm64_ftr_bits kvm_ftr = *ftrp;
1429 
1430         /* Some features have different safe value type in KVM than host features */
1431         switch (id) {
1432         case SYS_ID_AA64DFR0_EL1:
1433                 switch (kvm_ftr.shift) {
1434                 case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1435                         kvm_ftr.type = FTR_LOWER_SAFE;
1436                         break;
1437                 case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1438                         kvm_ftr.type = FTR_LOWER_SAFE;
1439                         break;
1440                 }
1441                 break;
1442         case SYS_ID_DFR0_EL1:
1443                 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1444                         kvm_ftr.type = FTR_LOWER_SAFE;
1445                 break;
1446         }
1447 
1448         return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1449 }
1450 
1451 /*
1452  * arm64_check_features() - Check if a feature register value constitutes
1453  * a subset of features indicated by the idreg's KVM sanitised limit.
1454  *
1455  * This function will check if each feature field of @val is the "safe" value
1456  * against idreg's KVM sanitised limit return from reset() callback.
1457  * If a field value in @val is the same as the one in limit, it is always
1458  * considered the safe value regardless For register fields that are not in
1459  * writable, only the value in limit is considered the safe value.
1460  *
1461  * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1462  */
1463 static int arm64_check_features(struct kvm_vcpu *vcpu,
1464                                 const struct sys_reg_desc *rd,
1465                                 u64 val)
1466 {
1467         const struct arm64_ftr_reg *ftr_reg;
1468         const struct arm64_ftr_bits *ftrp = NULL;
1469         u32 id = reg_to_encoding(rd);
1470         u64 writable_mask = rd->val;
1471         u64 limit = rd->reset(vcpu, rd);
1472         u64 mask = 0;
1473 
1474         /*
1475          * Hidden and unallocated ID registers may not have a corresponding
1476          * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1477          * only safe value is 0.
1478          */
1479         if (sysreg_visible_as_raz(vcpu, rd))
1480                 return val ? -E2BIG : 0;
1481 
1482         ftr_reg = get_arm64_ftr_reg(id);
1483         if (!ftr_reg)
1484                 return -EINVAL;
1485 
1486         ftrp = ftr_reg->ftr_bits;
1487 
1488         for (; ftrp && ftrp->width; ftrp++) {
1489                 s64 f_val, f_lim, safe_val;
1490                 u64 ftr_mask;
1491 
1492                 ftr_mask = arm64_ftr_mask(ftrp);
1493                 if ((ftr_mask & writable_mask) != ftr_mask)
1494                         continue;
1495 
1496                 f_val = arm64_ftr_value(ftrp, val);
1497                 f_lim = arm64_ftr_value(ftrp, limit);
1498                 mask |= ftr_mask;
1499 
1500                 if (f_val == f_lim)
1501                         safe_val = f_val;
1502                 else
1503                         safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1504 
1505                 if (safe_val != f_val)
1506                         return -E2BIG;
1507         }
1508 
1509         /* For fields that are not writable, values in limit are the safe values. */
1510         if ((val & ~mask) != (limit & ~mask))
1511                 return -E2BIG;
1512 
1513         return 0;
1514 }
1515 
1516 static u8 pmuver_to_perfmon(u8 pmuver)
1517 {
1518         switch (pmuver) {
1519         case ID_AA64DFR0_EL1_PMUVer_IMP:
1520                 return ID_DFR0_EL1_PerfMon_PMUv3;
1521         case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1522                 return ID_DFR0_EL1_PerfMon_IMPDEF;
1523         default:
1524                 /* Anything ARMv8.1+ and NI have the same value. For now. */
1525                 return pmuver;
1526         }
1527 }
1528 
1529 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1530 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1531                                        const struct sys_reg_desc *r)
1532 {
1533         u32 id = reg_to_encoding(r);
1534         u64 val;
1535 
1536         if (sysreg_visible_as_raz(vcpu, r))
1537                 return 0;
1538 
1539         val = read_sanitised_ftr_reg(id);
1540 
1541         switch (id) {
1542         case SYS_ID_AA64PFR1_EL1:
1543                 if (!kvm_has_mte(vcpu->kvm))
1544                         val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1545 
1546                 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1547                 break;
1548         case SYS_ID_AA64ISAR1_EL1:
1549                 if (!vcpu_has_ptrauth(vcpu))
1550                         val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1551                                  ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1552                                  ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1553                                  ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1554                 break;
1555         case SYS_ID_AA64ISAR2_EL1:
1556                 if (!vcpu_has_ptrauth(vcpu))
1557                         val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1558                                  ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1559                 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1560                         val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1561                 break;
1562         case SYS_ID_AA64MMFR2_EL1:
1563                 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1564                 break;
1565         case SYS_ID_MMFR4_EL1:
1566                 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1567                 break;
1568         }
1569 
1570         return val;
1571 }
1572 
1573 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1574                                      const struct sys_reg_desc *r)
1575 {
1576         return __kvm_read_sanitised_id_reg(vcpu, r);
1577 }
1578 
1579 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1580 {
1581         return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1582 }
1583 
1584 static bool is_feature_id_reg(u32 encoding)
1585 {
1586         return (sys_reg_Op0(encoding) == 3 &&
1587                 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1588                 sys_reg_CRn(encoding) == 0 &&
1589                 sys_reg_CRm(encoding) <= 7);
1590 }
1591 
1592 /*
1593  * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1594  * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1595  * registers KVM maintains on a per-VM basis.
1596  */
1597 static inline bool is_vm_ftr_id_reg(u32 id)
1598 {
1599         if (id == SYS_CTR_EL0)
1600                 return true;
1601 
1602         return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1603                 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1604                 sys_reg_CRm(id) < 8);
1605 }
1606 
1607 static inline bool is_vcpu_ftr_id_reg(u32 id)
1608 {
1609         return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1610 }
1611 
1612 static inline bool is_aa32_id_reg(u32 id)
1613 {
1614         return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1615                 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1616                 sys_reg_CRm(id) <= 3);
1617 }
1618 
1619 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1620                                   const struct sys_reg_desc *r)
1621 {
1622         u32 id = reg_to_encoding(r);
1623 
1624         switch (id) {
1625         case SYS_ID_AA64ZFR0_EL1:
1626                 if (!vcpu_has_sve(vcpu))
1627                         return REG_RAZ;
1628                 break;
1629         }
1630 
1631         return 0;
1632 }
1633 
1634 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1635                                        const struct sys_reg_desc *r)
1636 {
1637         /*
1638          * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1639          * EL. Promote to RAZ/WI in order to guarantee consistency between
1640          * systems.
1641          */
1642         if (!kvm_supports_32bit_el0())
1643                 return REG_RAZ | REG_USER_WI;
1644 
1645         return id_visibility(vcpu, r);
1646 }
1647 
1648 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1649                                    const struct sys_reg_desc *r)
1650 {
1651         return REG_RAZ;
1652 }
1653 
1654 /* cpufeature ID register access trap handlers */
1655 
1656 static bool access_id_reg(struct kvm_vcpu *vcpu,
1657                           struct sys_reg_params *p,
1658                           const struct sys_reg_desc *r)
1659 {
1660         if (p->is_write)
1661                 return write_to_read_only(vcpu, p, r);
1662 
1663         p->regval = read_id_reg(vcpu, r);
1664 
1665         return true;
1666 }
1667 
1668 /* Visibility overrides for SVE-specific control registers */
1669 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1670                                    const struct sys_reg_desc *rd)
1671 {
1672         if (vcpu_has_sve(vcpu))
1673                 return 0;
1674 
1675         return REG_HIDDEN;
1676 }
1677 
1678 static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1679                                           const struct sys_reg_desc *rd)
1680 {
1681         u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1682 
1683         if (!vcpu_has_sve(vcpu))
1684                 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1685 
1686         /*
1687          * The default is to expose CSV2 == 1 if the HW isn't affected.
1688          * Although this is a per-CPU feature, we make it global because
1689          * asymmetric systems are just a nuisance.
1690          *
1691          * Userspace can override this as long as it doesn't promise
1692          * the impossible.
1693          */
1694         if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1695                 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1696                 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1697         }
1698         if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1699                 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1700                 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1701         }
1702 
1703         if (kvm_vgic_global_state.type == VGIC_V3) {
1704                 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1705                 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1706         }
1707 
1708         val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1709 
1710         return val;
1711 }
1712 
1713 #define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit)                        \
1714 ({                                                                             \
1715         u64 __f_val = FIELD_GET(reg##_##field##_MASK, val);                    \
1716         (val) &= ~reg##_##field##_MASK;                                        \
1717         (val) |= FIELD_PREP(reg##_##field##_MASK,                              \
1718                             min(__f_val,                                       \
1719                                 (u64)SYS_FIELD_VALUE(reg, field, limit)));     \
1720         (val);                                                                 \
1721 })
1722 
1723 static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1724                                           const struct sys_reg_desc *rd)
1725 {
1726         u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1727 
1728         val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1729 
1730         /*
1731          * Only initialize the PMU version if the vCPU was configured with one.
1732          */
1733         val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1734         if (kvm_vcpu_has_pmu(vcpu))
1735                 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1736                                       kvm_arm_pmu_get_pmuver_limit());
1737 
1738         /* Hide SPE from guests */
1739         val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1740 
1741         return val;
1742 }
1743 
1744 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1745                                const struct sys_reg_desc *rd,
1746                                u64 val)
1747 {
1748         u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
1749         u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1750 
1751         /*
1752          * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1753          * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1754          * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1755          * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1756          * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1757          *
1758          * At minimum, we're on the hook to allow values that were given to
1759          * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1760          * with a more sensible NI. The value of an ID register changing under
1761          * the nose of the guest is unfortunate, but is certainly no more
1762          * surprising than an ill-guided PMU driver poking at impdef system
1763          * registers that end in an UNDEF...
1764          */
1765         if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1766                 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1767 
1768         /*
1769          * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
1770          * nonzero minimum safe value.
1771          */
1772         if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
1773                 return -EINVAL;
1774 
1775         return set_id_reg(vcpu, rd, val);
1776 }
1777 
1778 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
1779                                       const struct sys_reg_desc *rd)
1780 {
1781         u8 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1782         u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
1783 
1784         val &= ~ID_DFR0_EL1_PerfMon_MASK;
1785         if (kvm_vcpu_has_pmu(vcpu))
1786                 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1787 
1788         val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
1789 
1790         return val;
1791 }
1792 
1793 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1794                            const struct sys_reg_desc *rd,
1795                            u64 val)
1796 {
1797         u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1798         u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
1799 
1800         if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1801                 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1802                 perfmon = 0;
1803         }
1804 
1805         /*
1806          * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1807          * it doesn't promise more than what the HW gives us on the
1808          * AArch64 side (as everything is emulated with that), and
1809          * that this is a PMUv3.
1810          */
1811         if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
1812                 return -EINVAL;
1813 
1814         if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
1815                 return -EINVAL;
1816 
1817         return set_id_reg(vcpu, rd, val);
1818 }
1819 
1820 /*
1821  * cpufeature ID register user accessors
1822  *
1823  * For now, these registers are immutable for userspace, so no values
1824  * are stored, and for set_id_reg() we don't allow the effective value
1825  * to be changed.
1826  */
1827 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1828                       u64 *val)
1829 {
1830         /*
1831          * Avoid locking if the VM has already started, as the ID registers are
1832          * guaranteed to be invariant at that point.
1833          */
1834         if (kvm_vm_has_ran_once(vcpu->kvm)) {
1835                 *val = read_id_reg(vcpu, rd);
1836                 return 0;
1837         }
1838 
1839         mutex_lock(&vcpu->kvm->arch.config_lock);
1840         *val = read_id_reg(vcpu, rd);
1841         mutex_unlock(&vcpu->kvm->arch.config_lock);
1842 
1843         return 0;
1844 }
1845 
1846 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1847                       u64 val)
1848 {
1849         u32 id = reg_to_encoding(rd);
1850         int ret;
1851 
1852         mutex_lock(&vcpu->kvm->arch.config_lock);
1853 
1854         /*
1855          * Once the VM has started the ID registers are immutable. Reject any
1856          * write that does not match the final register value.
1857          */
1858         if (kvm_vm_has_ran_once(vcpu->kvm)) {
1859                 if (val != read_id_reg(vcpu, rd))
1860                         ret = -EBUSY;
1861                 else
1862                         ret = 0;
1863 
1864                 mutex_unlock(&vcpu->kvm->arch.config_lock);
1865                 return ret;
1866         }
1867 
1868         ret = arm64_check_features(vcpu, rd, val);
1869         if (!ret)
1870                 kvm_set_vm_id_reg(vcpu->kvm, id, val);
1871 
1872         mutex_unlock(&vcpu->kvm->arch.config_lock);
1873 
1874         /*
1875          * arm64_check_features() returns -E2BIG to indicate the register's
1876          * feature set is a superset of the maximally-allowed register value.
1877          * While it would be nice to precisely describe this to userspace, the
1878          * existing UAPI for KVM_SET_ONE_REG has it that invalid register
1879          * writes return -EINVAL.
1880          */
1881         if (ret == -E2BIG)
1882                 ret = -EINVAL;
1883         return ret;
1884 }
1885 
1886 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
1887 {
1888         u64 *p = __vm_id_reg(&kvm->arch, reg);
1889 
1890         lockdep_assert_held(&kvm->arch.config_lock);
1891 
1892         if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
1893                 return;
1894 
1895         *p = val;
1896 }
1897 
1898 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1899                        u64 *val)
1900 {
1901         *val = 0;
1902         return 0;
1903 }
1904 
1905 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1906                       u64 val)
1907 {
1908         return 0;
1909 }
1910 
1911 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1912                        const struct sys_reg_desc *r)
1913 {
1914         if (p->is_write)
1915                 return write_to_read_only(vcpu, p, r);
1916 
1917         p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
1918         return true;
1919 }
1920 
1921 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1922                          const struct sys_reg_desc *r)
1923 {
1924         if (p->is_write)
1925                 return write_to_read_only(vcpu, p, r);
1926 
1927         p->regval = __vcpu_sys_reg(vcpu, r->reg);
1928         return true;
1929 }
1930 
1931 /*
1932  * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
1933  * by the physical CPU which the vcpu currently resides in.
1934  */
1935 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1936 {
1937         u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1938         u64 clidr;
1939         u8 loc;
1940 
1941         if ((ctr_el0 & CTR_EL0_IDC)) {
1942                 /*
1943                  * Data cache clean to the PoU is not required so LoUU and LoUIS
1944                  * will not be set and a unified cache, which will be marked as
1945                  * LoC, will be added.
1946                  *
1947                  * If not DIC, let the unified cache L2 so that an instruction
1948                  * cache can be added as L1 later.
1949                  */
1950                 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
1951                 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
1952         } else {
1953                 /*
1954                  * Data cache clean to the PoU is required so let L1 have a data
1955                  * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
1956                  * it can be marked as LoC too.
1957                  */
1958                 loc = 1;
1959                 clidr = 1 << CLIDR_LOUU_SHIFT;
1960                 clidr |= 1 << CLIDR_LOUIS_SHIFT;
1961                 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
1962         }
1963 
1964         /*
1965          * Instruction cache invalidation to the PoU is required so let L1 have
1966          * an instruction cache. If L1 already has a data cache, it will be
1967          * CACHE_TYPE_SEPARATE.
1968          */
1969         if (!(ctr_el0 & CTR_EL0_DIC))
1970                 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
1971 
1972         clidr |= loc << CLIDR_LOC_SHIFT;
1973 
1974         /*
1975          * Add tag cache unified to data cache. Allocation tags and data are
1976          * unified in a cache line so that it looks valid even if there is only
1977          * one cache line.
1978          */
1979         if (kvm_has_mte(vcpu->kvm))
1980                 clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
1981 
1982         __vcpu_sys_reg(vcpu, r->reg) = clidr;
1983 
1984         return __vcpu_sys_reg(vcpu, r->reg);
1985 }
1986 
1987 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1988                       u64 val)
1989 {
1990         u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
1991         u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
1992 
1993         if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
1994                 return -EINVAL;
1995 
1996         __vcpu_sys_reg(vcpu, rd->reg) = val;
1997 
1998         return 0;
1999 }
2000 
2001 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2002                           const struct sys_reg_desc *r)
2003 {
2004         int reg = r->reg;
2005 
2006         if (p->is_write)
2007                 vcpu_write_sys_reg(vcpu, p->regval, reg);
2008         else
2009                 p->regval = vcpu_read_sys_reg(vcpu, reg);
2010         return true;
2011 }
2012 
2013 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2014                           const struct sys_reg_desc *r)
2015 {
2016         u32 csselr;
2017 
2018         if (p->is_write)
2019                 return write_to_read_only(vcpu, p, r);
2020 
2021         csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2022         csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2023         if (csselr < CSSELR_MAX)
2024                 p->regval = get_ccsidr(vcpu, csselr);
2025 
2026         return true;
2027 }
2028 
2029 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2030                                    const struct sys_reg_desc *rd)
2031 {
2032         if (kvm_has_mte(vcpu->kvm))
2033                 return 0;
2034 
2035         return REG_HIDDEN;
2036 }
2037 
2038 #define MTE_REG(name) {                         \
2039         SYS_DESC(SYS_##name),                   \
2040         .access = undef_access,                 \
2041         .reset = reset_unknown,                 \
2042         .reg = name,                            \
2043         .visibility = mte_visibility,           \
2044 }
2045 
2046 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2047                                    const struct sys_reg_desc *rd)
2048 {
2049         if (vcpu_has_nv(vcpu))
2050                 return 0;
2051 
2052         return REG_HIDDEN;
2053 }
2054 
2055 static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2056                           struct sys_reg_params *p,
2057                           const struct sys_reg_desc *r)
2058 {
2059         /*
2060          * We really shouldn't be here, and this is likely the result
2061          * of a misconfigured trap, as this register should target the
2062          * VNCR page, and nothing else.
2063          */
2064         return bad_trap(vcpu, p, r,
2065                         "trap of VNCR-backed register");
2066 }
2067 
2068 static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2069                            struct sys_reg_params *p,
2070                            const struct sys_reg_desc *r)
2071 {
2072         /*
2073          * We really shouldn't be here, and this is likely the result
2074          * of a misconfigured trap, as this register should target the
2075          * corresponding EL1, and nothing else.
2076          */
2077         return bad_trap(vcpu, p, r,
2078                         "trap of EL2 register redirected to EL1");
2079 }
2080 
2081 #define EL2_REG(name, acc, rst, v) {            \
2082         SYS_DESC(SYS_##name),                   \
2083         .access = acc,                          \
2084         .reset = rst,                           \
2085         .reg = name,                            \
2086         .visibility = el2_visibility,           \
2087         .val = v,                               \
2088 }
2089 
2090 #define EL2_REG_VNCR(name, rst, v)      EL2_REG(name, bad_vncr_trap, rst, v)
2091 #define EL2_REG_REDIR(name, rst, v)     EL2_REG(name, bad_redir_trap, rst, v)
2092 
2093 /*
2094  * EL{0,1}2 registers are the EL2 view on an EL0 or EL1 register when
2095  * HCR_EL2.E2H==1, and only in the sysreg table for convenience of
2096  * handling traps. Given that, they are always hidden from userspace.
2097  */
2098 static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu,
2099                                            const struct sys_reg_desc *rd)
2100 {
2101         return REG_HIDDEN_USER;
2102 }
2103 
2104 #define EL12_REG(name, acc, rst, v) {           \
2105         SYS_DESC(SYS_##name##_EL12),            \
2106         .access = acc,                          \
2107         .reset = rst,                           \
2108         .reg = name##_EL1,                      \
2109         .val = v,                               \
2110         .visibility = hidden_user_visibility,   \
2111 }
2112 
2113 /*
2114  * Since reset() callback and field val are not used for idregs, they will be
2115  * used for specific purposes for idregs.
2116  * The reset() would return KVM sanitised register value. The value would be the
2117  * same as the host kernel sanitised value if there is no KVM sanitisation.
2118  * The val would be used as a mask indicating writable fields for the idreg.
2119  * Only bits with 1 are writable from userspace. This mask might not be
2120  * necessary in the future whenever all ID registers are enabled as writable
2121  * from userspace.
2122  */
2123 
2124 #define ID_DESC(name)                           \
2125         SYS_DESC(SYS_##name),                   \
2126         .access = access_id_reg,                \
2127         .get_user = get_id_reg                  \
2128 
2129 /* sys_reg_desc initialiser for known cpufeature ID registers */
2130 #define ID_SANITISED(name) {                    \
2131         ID_DESC(name),                          \
2132         .set_user = set_id_reg,                 \
2133         .visibility = id_visibility,            \
2134         .reset = kvm_read_sanitised_id_reg,     \
2135         .val = 0,                               \
2136 }
2137 
2138 /* sys_reg_desc initialiser for known cpufeature ID registers */
2139 #define AA32_ID_SANITISED(name) {               \
2140         ID_DESC(name),                          \
2141         .set_user = set_id_reg,                 \
2142         .visibility = aa32_id_visibility,       \
2143         .reset = kvm_read_sanitised_id_reg,     \
2144         .val = 0,                               \
2145 }
2146 
2147 /* sys_reg_desc initialiser for writable ID registers */
2148 #define ID_WRITABLE(name, mask) {               \
2149         ID_DESC(name),                          \
2150         .set_user = set_id_reg,                 \
2151         .visibility = id_visibility,            \
2152         .reset = kvm_read_sanitised_id_reg,     \
2153         .val = mask,                            \
2154 }
2155 
2156 /*
2157  * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2158  * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2159  * (1 <= crm < 8, 0 <= Op2 < 8).
2160  */
2161 #define ID_UNALLOCATED(crm, op2) {                      \
2162         Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2),     \
2163         .access = access_id_reg,                        \
2164         .get_user = get_id_reg,                         \
2165         .set_user = set_id_reg,                         \
2166         .visibility = raz_visibility,                   \
2167         .reset = kvm_read_sanitised_id_reg,             \
2168         .val = 0,                                       \
2169 }
2170 
2171 /*
2172  * sys_reg_desc initialiser for known ID registers that we hide from guests.
2173  * For now, these are exposed just like unallocated ID regs: they appear
2174  * RAZ for the guest.
2175  */
2176 #define ID_HIDDEN(name) {                       \
2177         ID_DESC(name),                          \
2178         .set_user = set_id_reg,                 \
2179         .visibility = raz_visibility,           \
2180         .reset = kvm_read_sanitised_id_reg,     \
2181         .val = 0,                               \
2182 }
2183 
2184 static bool access_sp_el1(struct kvm_vcpu *vcpu,
2185                           struct sys_reg_params *p,
2186                           const struct sys_reg_desc *r)
2187 {
2188         if (p->is_write)
2189                 __vcpu_sys_reg(vcpu, SP_EL1) = p->regval;
2190         else
2191                 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2192 
2193         return true;
2194 }
2195 
2196 static bool access_elr(struct kvm_vcpu *vcpu,
2197                        struct sys_reg_params *p,
2198                        const struct sys_reg_desc *r)
2199 {
2200         if (p->is_write)
2201                 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2202         else
2203                 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2204 
2205         return true;
2206 }
2207 
2208 static bool access_spsr(struct kvm_vcpu *vcpu,
2209                         struct sys_reg_params *p,
2210                         const struct sys_reg_desc *r)
2211 {
2212         if (p->is_write)
2213                 __vcpu_sys_reg(vcpu, SPSR_EL1) = p->regval;
2214         else
2215                 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2216 
2217         return true;
2218 }
2219 
2220 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2221 {
2222         u64 val = r->val;
2223 
2224         if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2225                 val |= HCR_E2H;
2226 
2227         return __vcpu_sys_reg(vcpu, r->reg) = val;
2228 }
2229 
2230 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2231                                        const struct sys_reg_desc *rd)
2232 {
2233         unsigned int r;
2234 
2235         r = el2_visibility(vcpu, rd);
2236         if (r)
2237                 return r;
2238 
2239         return sve_visibility(vcpu, rd);
2240 }
2241 
2242 static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2243                            struct sys_reg_params *p,
2244                            const struct sys_reg_desc *r)
2245 {
2246         unsigned int vq;
2247 
2248         if (guest_hyp_sve_traps_enabled(vcpu)) {
2249                 kvm_inject_nested_sve_trap(vcpu);
2250                 return true;
2251         }
2252 
2253         if (!p->is_write) {
2254                 p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
2255                 return true;
2256         }
2257 
2258         vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2259         vq = min(vq, vcpu_sve_max_vq(vcpu));
2260         vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
2261         return true;
2262 }
2263 
2264 /*
2265  * Architected system registers.
2266  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
2267  *
2268  * Debug handling: We do trap most, if not all debug related system
2269  * registers. The implementation is good enough to ensure that a guest
2270  * can use these with minimal performance degradation. The drawback is
2271  * that we don't implement any of the external debug architecture.
2272  * This should be revisited if we ever encounter a more demanding
2273  * guest...
2274  */
2275 static const struct sys_reg_desc sys_reg_descs[] = {
2276         DBG_BCR_BVR_WCR_WVR_EL1(0),
2277         DBG_BCR_BVR_WCR_WVR_EL1(1),
2278         { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
2279         { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
2280         DBG_BCR_BVR_WCR_WVR_EL1(2),
2281         DBG_BCR_BVR_WCR_WVR_EL1(3),
2282         DBG_BCR_BVR_WCR_WVR_EL1(4),
2283         DBG_BCR_BVR_WCR_WVR_EL1(5),
2284         DBG_BCR_BVR_WCR_WVR_EL1(6),
2285         DBG_BCR_BVR_WCR_WVR_EL1(7),
2286         DBG_BCR_BVR_WCR_WVR_EL1(8),
2287         DBG_BCR_BVR_WCR_WVR_EL1(9),
2288         DBG_BCR_BVR_WCR_WVR_EL1(10),
2289         DBG_BCR_BVR_WCR_WVR_EL1(11),
2290         DBG_BCR_BVR_WCR_WVR_EL1(12),
2291         DBG_BCR_BVR_WCR_WVR_EL1(13),
2292         DBG_BCR_BVR_WCR_WVR_EL1(14),
2293         DBG_BCR_BVR_WCR_WVR_EL1(15),
2294 
2295         { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
2296         { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
2297         { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
2298                 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
2299         { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
2300         { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
2301         { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
2302         { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
2303         { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
2304 
2305         { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
2306         { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
2307         // DBGDTR[TR]X_EL0 share the same encoding
2308         { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
2309 
2310         { SYS_DESC(SYS_DBGVCR32_EL2), trap_undef, reset_val, DBGVCR32_EL2, 0 },
2311 
2312         { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
2313 
2314         /*
2315          * ID regs: all ID_SANITISED() entries here must have corresponding
2316          * entries in arm64_ftr_regs[].
2317          */
2318 
2319         /* AArch64 mappings of the AArch32 ID registers */
2320         /* CRm=1 */
2321         AA32_ID_SANITISED(ID_PFR0_EL1),
2322         AA32_ID_SANITISED(ID_PFR1_EL1),
2323         { SYS_DESC(SYS_ID_DFR0_EL1),
2324           .access = access_id_reg,
2325           .get_user = get_id_reg,
2326           .set_user = set_id_dfr0_el1,
2327           .visibility = aa32_id_visibility,
2328           .reset = read_sanitised_id_dfr0_el1,
2329           .val = ID_DFR0_EL1_PerfMon_MASK |
2330                  ID_DFR0_EL1_CopDbg_MASK, },
2331         ID_HIDDEN(ID_AFR0_EL1),
2332         AA32_ID_SANITISED(ID_MMFR0_EL1),
2333         AA32_ID_SANITISED(ID_MMFR1_EL1),
2334         AA32_ID_SANITISED(ID_MMFR2_EL1),
2335         AA32_ID_SANITISED(ID_MMFR3_EL1),
2336 
2337         /* CRm=2 */
2338         AA32_ID_SANITISED(ID_ISAR0_EL1),
2339         AA32_ID_SANITISED(ID_ISAR1_EL1),
2340         AA32_ID_SANITISED(ID_ISAR2_EL1),
2341         AA32_ID_SANITISED(ID_ISAR3_EL1),
2342         AA32_ID_SANITISED(ID_ISAR4_EL1),
2343         AA32_ID_SANITISED(ID_ISAR5_EL1),
2344         AA32_ID_SANITISED(ID_MMFR4_EL1),
2345         AA32_ID_SANITISED(ID_ISAR6_EL1),
2346 
2347         /* CRm=3 */
2348         AA32_ID_SANITISED(MVFR0_EL1),
2349         AA32_ID_SANITISED(MVFR1_EL1),
2350         AA32_ID_SANITISED(MVFR2_EL1),
2351         ID_UNALLOCATED(3,3),
2352         AA32_ID_SANITISED(ID_PFR2_EL1),
2353         ID_HIDDEN(ID_DFR1_EL1),
2354         AA32_ID_SANITISED(ID_MMFR5_EL1),
2355         ID_UNALLOCATED(3,7),
2356 
2357         /* AArch64 ID registers */
2358         /* CRm=4 */
2359         { SYS_DESC(SYS_ID_AA64PFR0_EL1),
2360           .access = access_id_reg,
2361           .get_user = get_id_reg,
2362           .set_user = set_id_reg,
2363           .reset = read_sanitised_id_aa64pfr0_el1,
2364           .val = ~(ID_AA64PFR0_EL1_AMU |
2365                    ID_AA64PFR0_EL1_MPAM |
2366                    ID_AA64PFR0_EL1_SVE |
2367                    ID_AA64PFR0_EL1_RAS |
2368                    ID_AA64PFR0_EL1_GIC |
2369                    ID_AA64PFR0_EL1_AdvSIMD |
2370                    ID_AA64PFR0_EL1_FP), },
2371         ID_SANITISED(ID_AA64PFR1_EL1),
2372         ID_UNALLOCATED(4,2),
2373         ID_UNALLOCATED(4,3),
2374         ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
2375         ID_HIDDEN(ID_AA64SMFR0_EL1),
2376         ID_UNALLOCATED(4,6),
2377         ID_UNALLOCATED(4,7),
2378 
2379         /* CRm=5 */
2380         { SYS_DESC(SYS_ID_AA64DFR0_EL1),
2381           .access = access_id_reg,
2382           .get_user = get_id_reg,
2383           .set_user = set_id_aa64dfr0_el1,
2384           .reset = read_sanitised_id_aa64dfr0_el1,
2385           .val = ID_AA64DFR0_EL1_PMUVer_MASK |
2386                  ID_AA64DFR0_EL1_DebugVer_MASK, },
2387         ID_SANITISED(ID_AA64DFR1_EL1),
2388         ID_UNALLOCATED(5,2),
2389         ID_UNALLOCATED(5,3),
2390         ID_HIDDEN(ID_AA64AFR0_EL1),
2391         ID_HIDDEN(ID_AA64AFR1_EL1),
2392         ID_UNALLOCATED(5,6),
2393         ID_UNALLOCATED(5,7),
2394 
2395         /* CRm=6 */
2396         ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
2397         ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
2398                                         ID_AA64ISAR1_EL1_GPA |
2399                                         ID_AA64ISAR1_EL1_API |
2400                                         ID_AA64ISAR1_EL1_APA)),
2401         ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
2402                                         ID_AA64ISAR2_EL1_APA3 |
2403                                         ID_AA64ISAR2_EL1_GPA3)),
2404         ID_UNALLOCATED(6,3),
2405         ID_UNALLOCATED(6,4),
2406         ID_UNALLOCATED(6,5),
2407         ID_UNALLOCATED(6,6),
2408         ID_UNALLOCATED(6,7),
2409 
2410         /* CRm=7 */
2411         ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 |
2412                                         ID_AA64MMFR0_EL1_TGRAN4_2 |
2413                                         ID_AA64MMFR0_EL1_TGRAN64_2 |
2414                                         ID_AA64MMFR0_EL1_TGRAN16_2)),
2415         ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
2416                                         ID_AA64MMFR1_EL1_HCX |
2417                                         ID_AA64MMFR1_EL1_TWED |
2418                                         ID_AA64MMFR1_EL1_XNX |
2419                                         ID_AA64MMFR1_EL1_VH |
2420                                         ID_AA64MMFR1_EL1_VMIDBits)),
2421         ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 |
2422                                         ID_AA64MMFR2_EL1_EVT |
2423                                         ID_AA64MMFR2_EL1_FWB |
2424                                         ID_AA64MMFR2_EL1_IDS |
2425                                         ID_AA64MMFR2_EL1_NV |
2426                                         ID_AA64MMFR2_EL1_CCIDX)),
2427         ID_SANITISED(ID_AA64MMFR3_EL1),
2428         ID_SANITISED(ID_AA64MMFR4_EL1),
2429         ID_UNALLOCATED(7,5),
2430         ID_UNALLOCATED(7,6),
2431         ID_UNALLOCATED(7,7),
2432 
2433         { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
2434         { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
2435         { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
2436 
2437         MTE_REG(RGSR_EL1),
2438         MTE_REG(GCR_EL1),
2439 
2440         { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
2441         { SYS_DESC(SYS_TRFCR_EL1), undef_access },
2442         { SYS_DESC(SYS_SMPRI_EL1), undef_access },
2443         { SYS_DESC(SYS_SMCR_EL1), undef_access },
2444         { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
2445         { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
2446         { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
2447         { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0 },
2448 
2449         PTRAUTH_KEY(APIA),
2450         PTRAUTH_KEY(APIB),
2451         PTRAUTH_KEY(APDA),
2452         PTRAUTH_KEY(APDB),
2453         PTRAUTH_KEY(APGA),
2454 
2455         { SYS_DESC(SYS_SPSR_EL1), access_spsr},
2456         { SYS_DESC(SYS_ELR_EL1), access_elr},
2457 
2458         { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
2459         { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
2460         { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
2461 
2462         { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
2463         { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
2464         { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
2465         { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
2466         { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
2467         { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
2468         { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
2469         { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
2470 
2471         MTE_REG(TFSR_EL1),
2472         MTE_REG(TFSRE0_EL1),
2473 
2474         { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
2475         { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
2476 
2477         { SYS_DESC(SYS_PMSCR_EL1), undef_access },
2478         { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
2479         { SYS_DESC(SYS_PMSICR_EL1), undef_access },
2480         { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
2481         { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
2482         { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
2483         { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
2484         { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
2485         { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
2486         { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
2487         { SYS_DESC(SYS_PMBSR_EL1), undef_access },
2488         /* PMBIDR_EL1 is not trapped */
2489 
2490         { PMU_SYS_REG(PMINTENSET_EL1),
2491           .access = access_pminten, .reg = PMINTENSET_EL1,
2492           .get_user = get_pmreg, .set_user = set_pmreg },
2493         { PMU_SYS_REG(PMINTENCLR_EL1),
2494           .access = access_pminten, .reg = PMINTENSET_EL1,
2495           .get_user = get_pmreg, .set_user = set_pmreg },
2496         { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
2497 
2498         { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
2499         { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
2500         { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
2501         { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
2502 
2503         { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
2504         { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
2505         { SYS_DESC(SYS_LORN_EL1), trap_loregion },
2506         { SYS_DESC(SYS_LORC_EL1), trap_loregion },
2507         { SYS_DESC(SYS_LORID_EL1), trap_loregion },
2508 
2509         { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
2510         { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
2511 
2512         { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
2513         { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
2514         { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
2515         { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
2516         { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
2517         { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
2518         { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
2519         { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
2520         { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
2521         { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
2522         { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
2523         { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
2524 
2525         { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
2526         { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
2527 
2528         { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
2529 
2530         { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
2531 
2532         { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
2533 
2534         { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
2535         { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
2536           .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
2537         { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
2538         { SYS_DESC(SYS_SMIDR_EL1), undef_access },
2539         { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
2540         ID_WRITABLE(CTR_EL0, CTR_EL0_DIC_MASK |
2541                              CTR_EL0_IDC_MASK |
2542                              CTR_EL0_DminLine_MASK |
2543                              CTR_EL0_IminLine_MASK),
2544         { SYS_DESC(SYS_SVCR), undef_access },
2545 
2546         { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
2547           .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
2548         { PMU_SYS_REG(PMCNTENSET_EL0),
2549           .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2550           .get_user = get_pmreg, .set_user = set_pmreg },
2551         { PMU_SYS_REG(PMCNTENCLR_EL0),
2552           .access = access_pmcnten, .reg = PMCNTENSET_EL0,
2553           .get_user = get_pmreg, .set_user = set_pmreg },
2554         { PMU_SYS_REG(PMOVSCLR_EL0),
2555           .access = access_pmovs, .reg = PMOVSSET_EL0,
2556           .get_user = get_pmreg, .set_user = set_pmreg },
2557         /*
2558          * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
2559          * previously (and pointlessly) advertised in the past...
2560          */
2561         { PMU_SYS_REG(PMSWINC_EL0),
2562           .get_user = get_raz_reg, .set_user = set_wi_reg,
2563           .access = access_pmswinc, .reset = NULL },
2564         { PMU_SYS_REG(PMSELR_EL0),
2565           .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
2566         { PMU_SYS_REG(PMCEID0_EL0),
2567           .access = access_pmceid, .reset = NULL },
2568         { PMU_SYS_REG(PMCEID1_EL0),
2569           .access = access_pmceid, .reset = NULL },
2570         { PMU_SYS_REG(PMCCNTR_EL0),
2571           .access = access_pmu_evcntr, .reset = reset_unknown,
2572           .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr},
2573         { PMU_SYS_REG(PMXEVTYPER_EL0),
2574           .access = access_pmu_evtyper, .reset = NULL },
2575         { PMU_SYS_REG(PMXEVCNTR_EL0),
2576           .access = access_pmu_evcntr, .reset = NULL },
2577         /*
2578          * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
2579          * in 32bit mode. Here we choose to reset it as zero for consistency.
2580          */
2581         { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
2582           .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
2583         { PMU_SYS_REG(PMOVSSET_EL0),
2584           .access = access_pmovs, .reg = PMOVSSET_EL0,
2585           .get_user = get_pmreg, .set_user = set_pmreg },
2586 
2587         { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
2588         { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
2589         { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
2590 
2591         { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
2592 
2593         { SYS_DESC(SYS_AMCR_EL0), undef_access },
2594         { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
2595         { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
2596         { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
2597         { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
2598         { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
2599         { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
2600         { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
2601         AMU_AMEVCNTR0_EL0(0),
2602         AMU_AMEVCNTR0_EL0(1),
2603         AMU_AMEVCNTR0_EL0(2),
2604         AMU_AMEVCNTR0_EL0(3),
2605         AMU_AMEVCNTR0_EL0(4),
2606         AMU_AMEVCNTR0_EL0(5),
2607         AMU_AMEVCNTR0_EL0(6),
2608         AMU_AMEVCNTR0_EL0(7),
2609         AMU_AMEVCNTR0_EL0(8),
2610         AMU_AMEVCNTR0_EL0(9),
2611         AMU_AMEVCNTR0_EL0(10),
2612         AMU_AMEVCNTR0_EL0(11),
2613         AMU_AMEVCNTR0_EL0(12),
2614         AMU_AMEVCNTR0_EL0(13),
2615         AMU_AMEVCNTR0_EL0(14),
2616         AMU_AMEVCNTR0_EL0(15),
2617         AMU_AMEVTYPER0_EL0(0),
2618         AMU_AMEVTYPER0_EL0(1),
2619         AMU_AMEVTYPER0_EL0(2),
2620         AMU_AMEVTYPER0_EL0(3),
2621         AMU_AMEVTYPER0_EL0(4),
2622         AMU_AMEVTYPER0_EL0(5),
2623         AMU_AMEVTYPER0_EL0(6),
2624         AMU_AMEVTYPER0_EL0(7),
2625         AMU_AMEVTYPER0_EL0(8),
2626         AMU_AMEVTYPER0_EL0(9),
2627         AMU_AMEVTYPER0_EL0(10),
2628         AMU_AMEVTYPER0_EL0(11),
2629         AMU_AMEVTYPER0_EL0(12),
2630         AMU_AMEVTYPER0_EL0(13),
2631         AMU_AMEVTYPER0_EL0(14),
2632         AMU_AMEVTYPER0_EL0(15),
2633         AMU_AMEVCNTR1_EL0(0),
2634         AMU_AMEVCNTR1_EL0(1),
2635         AMU_AMEVCNTR1_EL0(2),
2636         AMU_AMEVCNTR1_EL0(3),
2637         AMU_AMEVCNTR1_EL0(4),
2638         AMU_AMEVCNTR1_EL0(5),
2639         AMU_AMEVCNTR1_EL0(6),
2640         AMU_AMEVCNTR1_EL0(7),
2641         AMU_AMEVCNTR1_EL0(8),
2642         AMU_AMEVCNTR1_EL0(9),
2643         AMU_AMEVCNTR1_EL0(10),
2644         AMU_AMEVCNTR1_EL0(11),
2645         AMU_AMEVCNTR1_EL0(12),
2646         AMU_AMEVCNTR1_EL0(13),
2647         AMU_AMEVCNTR1_EL0(14),
2648         AMU_AMEVCNTR1_EL0(15),
2649         AMU_AMEVTYPER1_EL0(0),
2650         AMU_AMEVTYPER1_EL0(1),
2651         AMU_AMEVTYPER1_EL0(2),
2652         AMU_AMEVTYPER1_EL0(3),
2653         AMU_AMEVTYPER1_EL0(4),
2654         AMU_AMEVTYPER1_EL0(5),
2655         AMU_AMEVTYPER1_EL0(6),
2656         AMU_AMEVTYPER1_EL0(7),
2657         AMU_AMEVTYPER1_EL0(8),
2658         AMU_AMEVTYPER1_EL0(9),
2659         AMU_AMEVTYPER1_EL0(10),
2660         AMU_AMEVTYPER1_EL0(11),
2661         AMU_AMEVTYPER1_EL0(12),
2662         AMU_AMEVTYPER1_EL0(13),
2663         AMU_AMEVTYPER1_EL0(14),
2664         AMU_AMEVTYPER1_EL0(15),
2665 
2666         { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
2667         { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
2668         { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
2669         { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
2670         { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
2671 
2672         /* PMEVCNTRn_EL0 */
2673         PMU_PMEVCNTR_EL0(0),
2674         PMU_PMEVCNTR_EL0(1),
2675         PMU_PMEVCNTR_EL0(2),
2676         PMU_PMEVCNTR_EL0(3),
2677         PMU_PMEVCNTR_EL0(4),
2678         PMU_PMEVCNTR_EL0(5),
2679         PMU_PMEVCNTR_EL0(6),
2680         PMU_PMEVCNTR_EL0(7),
2681         PMU_PMEVCNTR_EL0(8),
2682         PMU_PMEVCNTR_EL0(9),
2683         PMU_PMEVCNTR_EL0(10),
2684         PMU_PMEVCNTR_EL0(11),
2685         PMU_PMEVCNTR_EL0(12),
2686         PMU_PMEVCNTR_EL0(13),
2687         PMU_PMEVCNTR_EL0(14),
2688         PMU_PMEVCNTR_EL0(15),
2689         PMU_PMEVCNTR_EL0(16),
2690         PMU_PMEVCNTR_EL0(17),
2691         PMU_PMEVCNTR_EL0(18),
2692         PMU_PMEVCNTR_EL0(19),
2693         PMU_PMEVCNTR_EL0(20),
2694         PMU_PMEVCNTR_EL0(21),
2695         PMU_PMEVCNTR_EL0(22),
2696         PMU_PMEVCNTR_EL0(23),
2697         PMU_PMEVCNTR_EL0(24),
2698         PMU_PMEVCNTR_EL0(25),
2699         PMU_PMEVCNTR_EL0(26),
2700         PMU_PMEVCNTR_EL0(27),
2701         PMU_PMEVCNTR_EL0(28),
2702         PMU_PMEVCNTR_EL0(29),
2703         PMU_PMEVCNTR_EL0(30),
2704         /* PMEVTYPERn_EL0 */
2705         PMU_PMEVTYPER_EL0(0),
2706         PMU_PMEVTYPER_EL0(1),
2707         PMU_PMEVTYPER_EL0(2),
2708         PMU_PMEVTYPER_EL0(3),
2709         PMU_PMEVTYPER_EL0(4),
2710         PMU_PMEVTYPER_EL0(5),
2711         PMU_PMEVTYPER_EL0(6),
2712         PMU_PMEVTYPER_EL0(7),
2713         PMU_PMEVTYPER_EL0(8),
2714         PMU_PMEVTYPER_EL0(9),
2715         PMU_PMEVTYPER_EL0(10),
2716         PMU_PMEVTYPER_EL0(11),
2717         PMU_PMEVTYPER_EL0(12),
2718         PMU_PMEVTYPER_EL0(13),
2719         PMU_PMEVTYPER_EL0(14),
2720         PMU_PMEVTYPER_EL0(15),
2721         PMU_PMEVTYPER_EL0(16),
2722         PMU_PMEVTYPER_EL0(17),
2723         PMU_PMEVTYPER_EL0(18),
2724         PMU_PMEVTYPER_EL0(19),
2725         PMU_PMEVTYPER_EL0(20),
2726         PMU_PMEVTYPER_EL0(21),
2727         PMU_PMEVTYPER_EL0(22),
2728         PMU_PMEVTYPER_EL0(23),
2729         PMU_PMEVTYPER_EL0(24),
2730         PMU_PMEVTYPER_EL0(25),
2731         PMU_PMEVTYPER_EL0(26),
2732         PMU_PMEVTYPER_EL0(27),
2733         PMU_PMEVTYPER_EL0(28),
2734         PMU_PMEVTYPER_EL0(29),
2735         PMU_PMEVTYPER_EL0(30),
2736         /*
2737          * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
2738          * in 32bit mode. Here we choose to reset it as zero for consistency.
2739          */
2740         { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
2741           .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
2742 
2743         EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
2744         EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
2745         EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
2746         EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
2747         EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
2748         EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
2749         EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
2750         EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
2751         EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
2752         EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0),
2753         EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
2754         EL2_REG_VNCR(HACR_EL2, reset_val, 0),
2755 
2756         { SYS_DESC(SYS_ZCR_EL2), .access = access_zcr_el2, .reset = reset_val,
2757           .visibility = sve_el2_visibility, .reg = ZCR_EL2 },
2758 
2759         EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
2760 
2761         EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
2762         EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
2763         EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
2764         EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
2765         EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
2766 
2767         { SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 },
2768         EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
2769         EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
2770         EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
2771         EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
2772         EL2_REG_REDIR(ELR_EL2, reset_val, 0),
2773         { SYS_DESC(SYS_SP_EL1), access_sp_el1},
2774 
2775         /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
2776         { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi,
2777           .visibility = hidden_user_visibility },
2778         { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi,
2779           .visibility = hidden_user_visibility },
2780         { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi,
2781           .visibility = hidden_user_visibility },
2782         { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi,
2783           .visibility = hidden_user_visibility },
2784 
2785         { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 },
2786         EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
2787         EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
2788         EL2_REG_REDIR(ESR_EL2, reset_val, 0),
2789         { SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 },
2790 
2791         EL2_REG_REDIR(FAR_EL2, reset_val, 0),
2792         EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
2793 
2794         EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
2795         EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
2796 
2797         EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
2798         EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
2799         { SYS_DESC(SYS_RMR_EL2), trap_undef },
2800 
2801         EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
2802         EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
2803 
2804         EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
2805         EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
2806 
2807         EL12_REG(CNTKCTL, access_rw, reset_val, 0),
2808 
2809         EL2_REG(SP_EL2, NULL, reset_unknown, 0),
2810 };
2811 
2812 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
2813 {
2814         struct kvm *kvm = vpcu->kvm;
2815         u8 CRm = sys_reg_CRm(instr);
2816 
2817         if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
2818             !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
2819                 return false;
2820 
2821         if (CRm == TLBI_CRm_nROS &&
2822             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
2823                 return false;
2824 
2825         return true;
2826 }
2827 
2828 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2829                            const struct sys_reg_desc *r)
2830 {
2831         u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2832 
2833         if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
2834                 kvm_inject_undefined(vcpu);
2835                 return false;
2836         }
2837 
2838         write_lock(&vcpu->kvm->mmu_lock);
2839 
2840         /*
2841          * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
2842          * corresponding VMIDs.
2843          */
2844         kvm_nested_s2_unmap(vcpu->kvm);
2845 
2846         write_unlock(&vcpu->kvm->mmu_lock);
2847 
2848         return true;
2849 }
2850 
2851 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
2852 {
2853         struct kvm *kvm = vpcu->kvm;
2854         u8 CRm = sys_reg_CRm(instr);
2855         u8 Op2 = sys_reg_Op2(instr);
2856 
2857         if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
2858             !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
2859                 return false;
2860 
2861         if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
2862             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
2863                 return false;
2864 
2865         if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
2866             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
2867                 return false;
2868 
2869         if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
2870             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
2871                 return false;
2872 
2873         return true;
2874 }
2875 
2876 /* Only defined here as this is an internal "abstraction" */
2877 union tlbi_info {
2878         struct {
2879                 u64     start;
2880                 u64     size;
2881         } range;
2882 
2883         struct {
2884                 u64     addr;
2885         } ipa;
2886 
2887         struct {
2888                 u64     addr;
2889                 u32     encoding;
2890         } va;
2891 };
2892 
2893 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
2894                                const union tlbi_info *info)
2895 {
2896         kvm_stage2_unmap_range(mmu, info->range.start, info->range.size);
2897 }
2898 
2899 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2900                                 const struct sys_reg_desc *r)
2901 {
2902         u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2903         u64 limit, vttbr;
2904 
2905         if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding)) {
2906                 kvm_inject_undefined(vcpu);
2907                 return false;
2908         }
2909 
2910         vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
2911         limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
2912 
2913         kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
2914                                    &(union tlbi_info) {
2915                                            .range = {
2916                                                    .start = 0,
2917                                                    .size = limit,
2918                                            },
2919                                    },
2920                                    s2_mmu_unmap_range);
2921 
2922         return true;
2923 }
2924 
2925 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2926                               const struct sys_reg_desc *r)
2927 {
2928         u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
2929         u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
2930         u64 base, range, tg, num, scale;
2931         int shift;
2932 
2933         if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
2934                 kvm_inject_undefined(vcpu);
2935                 return false;
2936         }
2937 
2938         /*
2939          * Because the shadow S2 structure doesn't necessarily reflect that
2940          * of the guest's S2 (different base granule size, for example), we
2941          * decide to ignore TTL and only use the described range.
2942          */
2943         tg      = FIELD_GET(GENMASK(47, 46), p->regval);
2944         scale   = FIELD_GET(GENMASK(45, 44), p->regval);
2945         num     = FIELD_GET(GENMASK(43, 39), p->regval);
2946         base    = p->regval & GENMASK(36, 0);
2947 
2948         switch(tg) {
2949         case 1:
2950                 shift = 12;
2951                 break;
2952         case 2:
2953                 shift = 14;
2954                 break;
2955         case 3:
2956         default:                /* IMPDEF: handle tg==0 as 64k */
2957                 shift = 16;
2958                 break;
2959         }
2960 
2961         base <<= shift;
2962         range = __TLBI_RANGE_PAGES(num, scale) << shift;
2963 
2964         kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
2965                                    &(union tlbi_info) {
2966                                            .range = {
2967                                                    .start = base,
2968                                                    .size = range,
2969                                            },
2970                                    },
2971                                    s2_mmu_unmap_range);
2972 
2973         return true;
2974 }
2975 
2976 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
2977                              const union tlbi_info *info)
2978 {
2979         unsigned long max_size;
2980         u64 base_addr;
2981 
2982         /*
2983          * We drop a number of things from the supplied value:
2984          *
2985          * - NS bit: we're non-secure only.
2986          *
2987          * - IPA[51:48]: We don't support 52bit IPA just yet...
2988          *
2989          * And of course, adjust the IPA to be on an actual address.
2990          */
2991         base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
2992         max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
2993         base_addr &= ~(max_size - 1);
2994 
2995         kvm_stage2_unmap_range(mmu, base_addr, max_size);
2996 }
2997 
2998 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2999                              const struct sys_reg_desc *r)
3000 {
3001         u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3002         u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3003 
3004         if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding)) {
3005                 kvm_inject_undefined(vcpu);
3006                 return false;
3007         }
3008 
3009         kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3010                                    &(union tlbi_info) {
3011                                            .ipa = {
3012                                                    .addr = p->regval,
3013                                            },
3014                                    },
3015                                    s2_mmu_unmap_ipa);
3016 
3017         return true;
3018 }
3019 
3020 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
3021                              const union tlbi_info *info)
3022 {
3023         WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
3024 }
3025 
3026 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3027                             const struct sys_reg_desc *r)
3028 {
3029         u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3030         u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3031 
3032         /*
3033          * If we're here, this is because we've trapped on a EL1 TLBI
3034          * instruction that affects the EL1 translation regime while
3035          * we're running in a context that doesn't allow us to let the
3036          * HW do its thing (aka vEL2):
3037          *
3038          * - HCR_EL2.E2H == 0 : a non-VHE guest
3039          * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
3040          *
3041          * We don't expect these helpers to ever be called when running
3042          * in a vEL1 context.
3043          */
3044 
3045         WARN_ON(!vcpu_is_el2(vcpu));
3046 
3047         if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding)) {
3048                 kvm_inject_undefined(vcpu);
3049                 return false;
3050         }
3051 
3052         kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3053                                    &(union tlbi_info) {
3054                                            .va = {
3055                                                    .addr = p->regval,
3056                                                    .encoding = sys_encoding,
3057                                            },
3058                                    },
3059                                    s2_mmu_tlbi_s1e1);
3060 
3061         return true;
3062 }
3063 
3064 #define SYS_INSN(insn, access_fn)                                       \
3065         {                                                               \
3066                 SYS_DESC(OP_##insn),                                    \
3067                 .access = (access_fn),                                  \
3068         }
3069 
3070 static struct sys_reg_desc sys_insn_descs[] = {
3071         { SYS_DESC(SYS_DC_ISW), access_dcsw },
3072         { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
3073         { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
3074         { SYS_DESC(SYS_DC_CSW), access_dcsw },
3075         { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
3076         { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
3077         { SYS_DESC(SYS_DC_CISW), access_dcsw },
3078         { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
3079         { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
3080 
3081         SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
3082         SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
3083         SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
3084         SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
3085         SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
3086         SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
3087 
3088         SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
3089         SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
3090         SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
3091         SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
3092 
3093         SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
3094         SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
3095         SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
3096         SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
3097         SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
3098         SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
3099 
3100         SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
3101         SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
3102         SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
3103         SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
3104 
3105         SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
3106         SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
3107         SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
3108         SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
3109 
3110         SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
3111         SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
3112         SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
3113         SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
3114         SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
3115         SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
3116 
3117         SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
3118         SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
3119         SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
3120         SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
3121         SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
3122         SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
3123 
3124         SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
3125         SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
3126         SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
3127         SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
3128 
3129         SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
3130         SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
3131         SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
3132         SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
3133         SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
3134         SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
3135 
3136         SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
3137         SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
3138         SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
3139         SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
3140 
3141         SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
3142         SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
3143         SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
3144         SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
3145 
3146         SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
3147         SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
3148         SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
3149         SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
3150         SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
3151         SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
3152 
3153         SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
3154         SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
3155         SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
3156         SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
3157 
3158         SYS_INSN(TLBI_ALLE2OS, trap_undef),
3159         SYS_INSN(TLBI_VAE2OS, trap_undef),
3160         SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
3161         SYS_INSN(TLBI_VALE2OS, trap_undef),
3162         SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
3163 
3164         SYS_INSN(TLBI_RVAE2IS, trap_undef),
3165         SYS_INSN(TLBI_RVALE2IS, trap_undef),
3166 
3167         SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
3168         SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
3169         SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
3170         SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
3171         SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
3172         SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
3173         SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
3174         SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
3175         SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
3176         SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
3177         SYS_INSN(TLBI_RVAE2OS, trap_undef),
3178         SYS_INSN(TLBI_RVALE2OS, trap_undef),
3179         SYS_INSN(TLBI_RVAE2, trap_undef),
3180         SYS_INSN(TLBI_RVALE2, trap_undef),
3181         SYS_INSN(TLBI_ALLE1, handle_alle1is),
3182         SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
3183 
3184         SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
3185         SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
3186         SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
3187         SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
3188 
3189         SYS_INSN(TLBI_ALLE2OSNXS, trap_undef),
3190         SYS_INSN(TLBI_VAE2OSNXS, trap_undef),
3191         SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
3192         SYS_INSN(TLBI_VALE2OSNXS, trap_undef),
3193         SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
3194 
3195         SYS_INSN(TLBI_RVAE2ISNXS, trap_undef),
3196         SYS_INSN(TLBI_RVALE2ISNXS, trap_undef),
3197         SYS_INSN(TLBI_ALLE2ISNXS, trap_undef),
3198         SYS_INSN(TLBI_VAE2ISNXS, trap_undef),
3199 
3200         SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
3201         SYS_INSN(TLBI_VALE2ISNXS, trap_undef),
3202         SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
3203         SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
3204         SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
3205         SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
3206         SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
3207         SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
3208         SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
3209         SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
3210         SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
3211         SYS_INSN(TLBI_RVAE2OSNXS, trap_undef),
3212         SYS_INSN(TLBI_RVALE2OSNXS, trap_undef),
3213         SYS_INSN(TLBI_RVAE2NXS, trap_undef),
3214         SYS_INSN(TLBI_RVALE2NXS, trap_undef),
3215         SYS_INSN(TLBI_ALLE2NXS, trap_undef),
3216         SYS_INSN(TLBI_VAE2NXS, trap_undef),
3217         SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
3218         SYS_INSN(TLBI_VALE2NXS, trap_undef),
3219         SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
3220 };
3221 
3222 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
3223                         struct sys_reg_params *p,
3224                         const struct sys_reg_desc *r)
3225 {
3226         if (p->is_write) {
3227                 return ignore_write(vcpu, p);
3228         } else {
3229                 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
3230                 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
3231 
3232                 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
3233                              (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
3234                              (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
3235                              (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
3236                              (1 << 15) | (el3 << 14) | (el3 << 12));
3237                 return true;
3238         }
3239 }
3240 
3241 /*
3242  * AArch32 debug register mappings
3243  *
3244  * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
3245  * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
3246  *
3247  * None of the other registers share their location, so treat them as
3248  * if they were 64bit.
3249  */
3250 #define DBG_BCR_BVR_WCR_WVR(n)                                                \
3251         /* DBGBVRn */                                                         \
3252         { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
3253         /* DBGBCRn */                                                         \
3254         { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },           \
3255         /* DBGWVRn */                                                         \
3256         { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },           \
3257         /* DBGWCRn */                                                         \
3258         { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
3259 
3260 #define DBGBXVR(n)                                                            \
3261         { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
3262 
3263 /*
3264  * Trapped cp14 registers. We generally ignore most of the external
3265  * debug, on the principle that they don't really make sense to a
3266  * guest. Revisit this one day, would this principle change.
3267  */
3268 static const struct sys_reg_desc cp14_regs[] = {
3269         /* DBGDIDR */
3270         { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
3271         /* DBGDTRRXext */
3272         { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
3273 
3274         DBG_BCR_BVR_WCR_WVR(0),
3275         /* DBGDSCRint */
3276         { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
3277         DBG_BCR_BVR_WCR_WVR(1),
3278         /* DBGDCCINT */
3279         { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
3280         /* DBGDSCRext */
3281         { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
3282         DBG_BCR_BVR_WCR_WVR(2),
3283         /* DBGDTR[RT]Xint */
3284         { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
3285         /* DBGDTR[RT]Xext */
3286         { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
3287         DBG_BCR_BVR_WCR_WVR(3),
3288         DBG_BCR_BVR_WCR_WVR(4),
3289         DBG_BCR_BVR_WCR_WVR(5),
3290         /* DBGWFAR */
3291         { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
3292         /* DBGOSECCR */
3293         { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
3294         DBG_BCR_BVR_WCR_WVR(6),
3295         /* DBGVCR */
3296         { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
3297         DBG_BCR_BVR_WCR_WVR(7),
3298         DBG_BCR_BVR_WCR_WVR(8),
3299         DBG_BCR_BVR_WCR_WVR(9),
3300         DBG_BCR_BVR_WCR_WVR(10),
3301         DBG_BCR_BVR_WCR_WVR(11),
3302         DBG_BCR_BVR_WCR_WVR(12),
3303         DBG_BCR_BVR_WCR_WVR(13),
3304         DBG_BCR_BVR_WCR_WVR(14),
3305         DBG_BCR_BVR_WCR_WVR(15),
3306 
3307         /* DBGDRAR (32bit) */
3308         { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
3309 
3310         DBGBXVR(0),
3311         /* DBGOSLAR */
3312         { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
3313         DBGBXVR(1),
3314         /* DBGOSLSR */
3315         { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
3316         DBGBXVR(2),
3317         DBGBXVR(3),
3318         /* DBGOSDLR */
3319         { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
3320         DBGBXVR(4),
3321         /* DBGPRCR */
3322         { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
3323         DBGBXVR(5),
3324         DBGBXVR(6),
3325         DBGBXVR(7),
3326         DBGBXVR(8),
3327         DBGBXVR(9),
3328         DBGBXVR(10),
3329         DBGBXVR(11),
3330         DBGBXVR(12),
3331         DBGBXVR(13),
3332         DBGBXVR(14),
3333         DBGBXVR(15),
3334 
3335         /* DBGDSAR (32bit) */
3336         { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
3337 
3338         /* DBGDEVID2 */
3339         { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
3340         /* DBGDEVID1 */
3341         { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
3342         /* DBGDEVID */
3343         { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
3344         /* DBGCLAIMSET */
3345         { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
3346         /* DBGCLAIMCLR */
3347         { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
3348         /* DBGAUTHSTATUS */
3349         { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
3350 };
3351 
3352 /* Trapped cp14 64bit registers */
3353 static const struct sys_reg_desc cp14_64_regs[] = {
3354         /* DBGDRAR (64bit) */
3355         { Op1( 0), CRm( 1), .access = trap_raz_wi },
3356 
3357         /* DBGDSAR (64bit) */
3358         { Op1( 0), CRm( 2), .access = trap_raz_wi },
3359 };
3360 
3361 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2)                  \
3362         AA32(_map),                                                     \
3363         Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2),                     \
3364         .visibility = pmu_visibility
3365 
3366 /* Macro to expand the PMEVCNTRn register */
3367 #define PMU_PMEVCNTR(n)                                                 \
3368         { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,                           \
3369           (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)),                  \
3370           .access = access_pmu_evcntr }
3371 
3372 /* Macro to expand the PMEVTYPERn register */
3373 #define PMU_PMEVTYPER(n)                                                \
3374         { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110,                           \
3375           (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)),                  \
3376           .access = access_pmu_evtyper }
3377 /*
3378  * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
3379  * depending on the way they are accessed (as a 32bit or a 64bit
3380  * register).
3381  */
3382 static const struct sys_reg_desc cp15_regs[] = {
3383         { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
3384         { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
3385         /* ACTLR */
3386         { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
3387         /* ACTLR2 */
3388         { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
3389         { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
3390         { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
3391         /* TTBCR */
3392         { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
3393         /* TTBCR2 */
3394         { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
3395         { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
3396         /* DFSR */
3397         { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
3398         { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
3399         /* ADFSR */
3400         { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
3401         /* AIFSR */
3402         { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
3403         /* DFAR */
3404         { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
3405         /* IFAR */
3406         { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
3407 
3408         /*
3409          * DC{C,I,CI}SW operations:
3410          */
3411         { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
3412         { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
3413         { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
3414 
3415         /* PMU */
3416         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
3417         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
3418         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
3419         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
3420         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
3421         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
3422         { CP15_PMU_SYS_REG(LO,     0, 9, 12, 6), .access = access_pmceid },
3423         { CP15_PMU_SYS_REG(LO,     0, 9, 12, 7), .access = access_pmceid },
3424         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
3425         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
3426         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
3427         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
3428         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
3429         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
3430         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
3431         { CP15_PMU_SYS_REG(HI,     0, 9, 14, 4), .access = access_pmceid },
3432         { CP15_PMU_SYS_REG(HI,     0, 9, 14, 5), .access = access_pmceid },
3433         /* PMMIR */
3434         { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
3435 
3436         /* PRRR/MAIR0 */
3437         { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
3438         /* NMRR/MAIR1 */
3439         { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
3440         /* AMAIR0 */
3441         { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
3442         /* AMAIR1 */
3443         { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
3444 
3445         /* ICC_SRE */
3446         { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
3447 
3448         { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
3449 
3450         /* Arch Tmers */
3451         { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
3452         { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
3453 
3454         /* PMEVCNTRn */
3455         PMU_PMEVCNTR(0),
3456         PMU_PMEVCNTR(1),
3457         PMU_PMEVCNTR(2),
3458         PMU_PMEVCNTR(3),
3459         PMU_PMEVCNTR(4),
3460         PMU_PMEVCNTR(5),
3461         PMU_PMEVCNTR(6),
3462         PMU_PMEVCNTR(7),
3463         PMU_PMEVCNTR(8),
3464         PMU_PMEVCNTR(9),
3465         PMU_PMEVCNTR(10),
3466         PMU_PMEVCNTR(11),
3467         PMU_PMEVCNTR(12),
3468         PMU_PMEVCNTR(13),
3469         PMU_PMEVCNTR(14),
3470         PMU_PMEVCNTR(15),
3471         PMU_PMEVCNTR(16),
3472         PMU_PMEVCNTR(17),
3473         PMU_PMEVCNTR(18),
3474         PMU_PMEVCNTR(19),
3475         PMU_PMEVCNTR(20),
3476         PMU_PMEVCNTR(21),
3477         PMU_PMEVCNTR(22),
3478         PMU_PMEVCNTR(23),
3479         PMU_PMEVCNTR(24),
3480         PMU_PMEVCNTR(25),
3481         PMU_PMEVCNTR(26),
3482         PMU_PMEVCNTR(27),
3483         PMU_PMEVCNTR(28),
3484         PMU_PMEVCNTR(29),
3485         PMU_PMEVCNTR(30),
3486         /* PMEVTYPERn */
3487         PMU_PMEVTYPER(0),
3488         PMU_PMEVTYPER(1),
3489         PMU_PMEVTYPER(2),
3490         PMU_PMEVTYPER(3),
3491         PMU_PMEVTYPER(4),
3492         PMU_PMEVTYPER(5),
3493         PMU_PMEVTYPER(6),
3494         PMU_PMEVTYPER(7),
3495         PMU_PMEVTYPER(8),
3496         PMU_PMEVTYPER(9),
3497         PMU_PMEVTYPER(10),
3498         PMU_PMEVTYPER(11),
3499         PMU_PMEVTYPER(12),
3500         PMU_PMEVTYPER(13),
3501         PMU_PMEVTYPER(14),
3502         PMU_PMEVTYPER(15),
3503         PMU_PMEVTYPER(16),
3504         PMU_PMEVTYPER(17),
3505         PMU_PMEVTYPER(18),
3506         PMU_PMEVTYPER(19),
3507         PMU_PMEVTYPER(20),
3508         PMU_PMEVTYPER(21),
3509         PMU_PMEVTYPER(22),
3510         PMU_PMEVTYPER(23),
3511         PMU_PMEVTYPER(24),
3512         PMU_PMEVTYPER(25),
3513         PMU_PMEVTYPER(26),
3514         PMU_PMEVTYPER(27),
3515         PMU_PMEVTYPER(28),
3516         PMU_PMEVTYPER(29),
3517         PMU_PMEVTYPER(30),
3518         /* PMCCFILTR */
3519         { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
3520 
3521         { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
3522         { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
3523 
3524         /* CCSIDR2 */
3525         { Op1(1), CRn( 0), CRm( 0),  Op2(2), undef_access },
3526 
3527         { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
3528 };
3529 
3530 static const struct sys_reg_desc cp15_64_regs[] = {
3531         { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
3532         { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
3533         { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
3534         { SYS_DESC(SYS_AARCH32_CNTPCT),       access_arch_timer },
3535         { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
3536         { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
3537         { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
3538         { SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
3539         { SYS_DESC(SYS_AARCH32_CNTPCTSS),     access_arch_timer },
3540 };
3541 
3542 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
3543                                bool is_32)
3544 {
3545         unsigned int i;
3546 
3547         for (i = 0; i < n; i++) {
3548                 if (!is_32 && table[i].reg && !table[i].reset) {
3549                         kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
3550                                 &table[i], i, table[i].name);
3551                         return false;
3552                 }
3553 
3554                 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
3555                         kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
3556                                 &table[i], i, table[i - 1].name, table[i].name);
3557                         return false;
3558                 }
3559         }
3560 
3561         return true;
3562 }
3563 
3564 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
3565 {
3566         kvm_inject_undefined(vcpu);
3567         return 1;
3568 }
3569 
3570 static void perform_access(struct kvm_vcpu *vcpu,
3571                            struct sys_reg_params *params,
3572                            const struct sys_reg_desc *r)
3573 {
3574         trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
3575 
3576         /* Check for regs disabled by runtime config */
3577         if (sysreg_hidden(vcpu, r)) {
3578                 kvm_inject_undefined(vcpu);
3579                 return;
3580         }
3581 
3582         /*
3583          * Not having an accessor means that we have configured a trap
3584          * that we don't know how to handle. This certainly qualifies
3585          * as a gross bug that should be fixed right away.
3586          */
3587         BUG_ON(!r->access);
3588 
3589         /* Skip instruction if instructed so */
3590         if (likely(r->access(vcpu, params, r)))
3591                 kvm_incr_pc(vcpu);
3592 }
3593 
3594 /*
3595  * emulate_cp --  tries to match a sys_reg access in a handling table, and
3596  *                call the corresponding trap handler.
3597  *
3598  * @params: pointer to the descriptor of the access
3599  * @table: array of trap descriptors
3600  * @num: size of the trap descriptor array
3601  *
3602  * Return true if the access has been handled, false if not.
3603  */
3604 static bool emulate_cp(struct kvm_vcpu *vcpu,
3605                        struct sys_reg_params *params,
3606                        const struct sys_reg_desc *table,
3607                        size_t num)
3608 {
3609         const struct sys_reg_desc *r;
3610 
3611         if (!table)
3612                 return false;   /* Not handled */
3613 
3614         r = find_reg(params, table, num);
3615 
3616         if (r) {
3617                 perform_access(vcpu, params, r);
3618                 return true;
3619         }
3620 
3621         /* Not handled */
3622         return false;
3623 }
3624 
3625 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
3626                                 struct sys_reg_params *params)
3627 {
3628         u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
3629         int cp = -1;
3630 
3631         switch (esr_ec) {
3632         case ESR_ELx_EC_CP15_32:
3633         case ESR_ELx_EC_CP15_64:
3634                 cp = 15;
3635                 break;
3636         case ESR_ELx_EC_CP14_MR:
3637         case ESR_ELx_EC_CP14_64:
3638                 cp = 14;
3639                 break;
3640         default:
3641                 WARN_ON(1);
3642         }
3643 
3644         print_sys_reg_msg(params,
3645                           "Unsupported guest CP%d access at: %08lx [%08lx]\n",
3646                           cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3647         kvm_inject_undefined(vcpu);
3648 }
3649 
3650 /**
3651  * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
3652  * @vcpu: The VCPU pointer
3653  * @global: &struct sys_reg_desc
3654  * @nr_global: size of the @global array
3655  */
3656 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
3657                             const struct sys_reg_desc *global,
3658                             size_t nr_global)
3659 {
3660         struct sys_reg_params params;
3661         u64 esr = kvm_vcpu_get_esr(vcpu);
3662         int Rt = kvm_vcpu_sys_get_rt(vcpu);
3663         int Rt2 = (esr >> 10) & 0x1f;
3664 
3665         params.CRm = (esr >> 1) & 0xf;
3666         params.is_write = ((esr & 1) == 0);
3667 
3668         params.Op0 = 0;
3669         params.Op1 = (esr >> 16) & 0xf;
3670         params.Op2 = 0;
3671         params.CRn = 0;
3672 
3673         /*
3674          * Make a 64-bit value out of Rt and Rt2. As we use the same trap
3675          * backends between AArch32 and AArch64, we get away with it.
3676          */
3677         if (params.is_write) {
3678                 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
3679                 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
3680         }
3681 
3682         /*
3683          * If the table contains a handler, handle the
3684          * potential register operation in the case of a read and return
3685          * with success.
3686          */
3687         if (emulate_cp(vcpu, &params, global, nr_global)) {
3688                 /* Split up the value between registers for the read side */
3689                 if (!params.is_write) {
3690                         vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
3691                         vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
3692                 }
3693 
3694                 return 1;
3695         }
3696 
3697         unhandled_cp_access(vcpu, &params);
3698         return 1;
3699 }
3700 
3701 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
3702 
3703 /*
3704  * The CP10 ID registers are architecturally mapped to AArch64 feature
3705  * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
3706  * from AArch32.
3707  */
3708 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
3709 {
3710         u8 reg_id = (esr >> 10) & 0xf;
3711         bool valid;
3712 
3713         params->is_write = ((esr & 1) == 0);
3714         params->Op0 = 3;
3715         params->Op1 = 0;
3716         params->CRn = 0;
3717         params->CRm = 3;
3718 
3719         /* CP10 ID registers are read-only */
3720         valid = !params->is_write;
3721 
3722         switch (reg_id) {
3723         /* MVFR0 */
3724         case 0b0111:
3725                 params->Op2 = 0;
3726                 break;
3727         /* MVFR1 */
3728         case 0b0110:
3729                 params->Op2 = 1;
3730                 break;
3731         /* MVFR2 */
3732         case 0b0101:
3733                 params->Op2 = 2;
3734                 break;
3735         default:
3736                 valid = false;
3737         }
3738 
3739         if (valid)
3740                 return true;
3741 
3742         kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
3743                       params->is_write ? "write" : "read", reg_id);
3744         return false;
3745 }
3746 
3747 /**
3748  * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
3749  *                        VFP Register' from AArch32.
3750  * @vcpu: The vCPU pointer
3751  *
3752  * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
3753  * Work out the correct AArch64 system register encoding and reroute to the
3754  * AArch64 system register emulation.
3755  */
3756 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
3757 {
3758         int Rt = kvm_vcpu_sys_get_rt(vcpu);
3759         u64 esr = kvm_vcpu_get_esr(vcpu);
3760         struct sys_reg_params params;
3761 
3762         /* UNDEF on any unhandled register access */
3763         if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
3764                 kvm_inject_undefined(vcpu);
3765                 return 1;
3766         }
3767 
3768         if (emulate_sys_reg(vcpu, &params))
3769                 vcpu_set_reg(vcpu, Rt, params.regval);
3770 
3771         return 1;
3772 }
3773 
3774 /**
3775  * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
3776  *                             CRn=0, which corresponds to the AArch32 feature
3777  *                             registers.
3778  * @vcpu: the vCPU pointer
3779  * @params: the system register access parameters.
3780  *
3781  * Our cp15 system register tables do not enumerate the AArch32 feature
3782  * registers. Conveniently, our AArch64 table does, and the AArch32 system
3783  * register encoding can be trivially remapped into the AArch64 for the feature
3784  * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
3785  *
3786  * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
3787  * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
3788  * range are either UNKNOWN or RES0. Rerouting remains architectural as we
3789  * treat undefined registers in this range as RAZ.
3790  */
3791 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
3792                                    struct sys_reg_params *params)
3793 {
3794         int Rt = kvm_vcpu_sys_get_rt(vcpu);
3795 
3796         /* Treat impossible writes to RO registers as UNDEFINED */
3797         if (params->is_write) {
3798                 unhandled_cp_access(vcpu, params);
3799                 return 1;
3800         }
3801 
3802         params->Op0 = 3;
3803 
3804         /*
3805          * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
3806          * Avoid conflicting with future expansion of AArch64 feature registers
3807          * and simply treat them as RAZ here.
3808          */
3809         if (params->CRm > 3)
3810                 params->regval = 0;
3811         else if (!emulate_sys_reg(vcpu, params))
3812                 return 1;
3813 
3814         vcpu_set_reg(vcpu, Rt, params->regval);
3815         return 1;
3816 }
3817 
3818 /**
3819  * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
3820  * @vcpu: The VCPU pointer
3821  * @params: &struct sys_reg_params
3822  * @global: &struct sys_reg_desc
3823  * @nr_global: size of the @global array
3824  */
3825 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
3826                             struct sys_reg_params *params,
3827                             const struct sys_reg_desc *global,
3828                             size_t nr_global)
3829 {
3830         int Rt  = kvm_vcpu_sys_get_rt(vcpu);
3831 
3832         params->regval = vcpu_get_reg(vcpu, Rt);
3833 
3834         if (emulate_cp(vcpu, params, global, nr_global)) {
3835                 if (!params->is_write)
3836                         vcpu_set_reg(vcpu, Rt, params->regval);
3837                 return 1;
3838         }
3839 
3840         unhandled_cp_access(vcpu, params);
3841         return 1;
3842 }
3843 
3844 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
3845 {
3846         return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
3847 }
3848 
3849 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
3850 {
3851         struct sys_reg_params params;
3852 
3853         params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3854 
3855         /*
3856          * Certain AArch32 ID registers are handled by rerouting to the AArch64
3857          * system register table. Registers in the ID range where CRm=0 are
3858          * excluded from this scheme as they do not trivially map into AArch64
3859          * system register encodings.
3860          */
3861         if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
3862                 return kvm_emulate_cp15_id_reg(vcpu, &params);
3863 
3864         return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
3865 }
3866 
3867 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
3868 {
3869         return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
3870 }
3871 
3872 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
3873 {
3874         struct sys_reg_params params;
3875 
3876         params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
3877 
3878         return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
3879 }
3880 
3881 /**
3882  * emulate_sys_reg - Emulate a guest access to an AArch64 system register
3883  * @vcpu: The VCPU pointer
3884  * @params: Decoded system register parameters
3885  *
3886  * Return: true if the system register access was successful, false otherwise.
3887  */
3888 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
3889                             struct sys_reg_params *params)
3890 {
3891         const struct sys_reg_desc *r;
3892 
3893         r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
3894         if (likely(r)) {
3895                 perform_access(vcpu, params, r);
3896                 return true;
3897         }
3898 
3899         print_sys_reg_msg(params,
3900                           "Unsupported guest sys_reg access at: %lx [%08lx]\n",
3901                           *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
3902         kvm_inject_undefined(vcpu);
3903 
3904         return false;
3905 }
3906 
3907 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
3908 {
3909         unsigned long i, idreg_idx = 0;
3910 
3911         for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
3912                 const struct sys_reg_desc *r = &sys_reg_descs[i];
3913 
3914                 if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
3915                         continue;
3916 
3917                 if (idreg_idx == pos)
3918                         return r;
3919 
3920                 idreg_idx++;
3921         }
3922 
3923         return NULL;
3924 }
3925 
3926 static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
3927 {
3928         struct kvm *kvm = s->private;
3929         u8 *iter;
3930 
3931         mutex_lock(&kvm->arch.config_lock);
3932 
3933         iter = &kvm->arch.idreg_debugfs_iter;
3934         if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
3935             *iter == (u8)~0) {
3936                 *iter = *pos;
3937                 if (!idregs_debug_find(kvm, *iter))
3938                         iter = NULL;
3939         } else {
3940                 iter = ERR_PTR(-EBUSY);
3941         }
3942 
3943         mutex_unlock(&kvm->arch.config_lock);
3944 
3945         return iter;
3946 }
3947 
3948 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
3949 {
3950         struct kvm *kvm = s->private;
3951 
3952         (*pos)++;
3953 
3954         if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
3955                 kvm->arch.idreg_debugfs_iter++;
3956 
3957                 return &kvm->arch.idreg_debugfs_iter;
3958         }
3959 
3960         return NULL;
3961 }
3962 
3963 static void idregs_debug_stop(struct seq_file *s, void *v)
3964 {
3965         struct kvm *kvm = s->private;
3966 
3967         if (IS_ERR(v))
3968                 return;
3969 
3970         mutex_lock(&kvm->arch.config_lock);
3971 
3972         kvm->arch.idreg_debugfs_iter = ~0;
3973 
3974         mutex_unlock(&kvm->arch.config_lock);
3975 }
3976 
3977 static int idregs_debug_show(struct seq_file *s, void *v)
3978 {
3979         const struct sys_reg_desc *desc;
3980         struct kvm *kvm = s->private;
3981 
3982         desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
3983 
3984         if (!desc->name)
3985                 return 0;
3986 
3987         seq_printf(s, "%20s:\t%016llx\n",
3988                    desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
3989 
3990         return 0;
3991 }
3992 
3993 static const struct seq_operations idregs_debug_sops = {
3994         .start  = idregs_debug_start,
3995         .next   = idregs_debug_next,
3996         .stop   = idregs_debug_stop,
3997         .show   = idregs_debug_show,
3998 };
3999 
4000 DEFINE_SEQ_ATTRIBUTE(idregs_debug);
4001 
4002 void kvm_sys_regs_create_debugfs(struct kvm *kvm)
4003 {
4004         kvm->arch.idreg_debugfs_iter = ~0;
4005 
4006         debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
4007                             &idregs_debug_fops);
4008 }
4009 
4010 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
4011 {
4012         u32 id = reg_to_encoding(reg);
4013         struct kvm *kvm = vcpu->kvm;
4014 
4015         if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
4016                 return;
4017 
4018         kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
4019 }
4020 
4021 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
4022                                   const struct sys_reg_desc *reg)
4023 {
4024         if (kvm_vcpu_initialized(vcpu))
4025                 return;
4026 
4027         reg->reset(vcpu, reg);
4028 }
4029 
4030 /**
4031  * kvm_reset_sys_regs - sets system registers to reset value
4032  * @vcpu: The VCPU pointer
4033  *
4034  * This function finds the right table above and sets the registers on the
4035  * virtual CPU struct to their architecturally defined reset values.
4036  */
4037 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
4038 {
4039         struct kvm *kvm = vcpu->kvm;
4040         unsigned long i;
4041 
4042         for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4043                 const struct sys_reg_desc *r = &sys_reg_descs[i];
4044 
4045                 if (!r->reset)
4046                         continue;
4047 
4048                 if (is_vm_ftr_id_reg(reg_to_encoding(r)))
4049                         reset_vm_ftr_id_reg(vcpu, r);
4050                 else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
4051                         reset_vcpu_ftr_id_reg(vcpu, r);
4052                 else
4053                         r->reset(vcpu, r);
4054         }
4055 
4056         set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
4057 }
4058 
4059 /**
4060  * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
4061  *                       trap on a guest execution
4062  * @vcpu: The VCPU pointer
4063  */
4064 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
4065 {
4066         const struct sys_reg_desc *desc = NULL;
4067         struct sys_reg_params params;
4068         unsigned long esr = kvm_vcpu_get_esr(vcpu);
4069         int Rt = kvm_vcpu_sys_get_rt(vcpu);
4070         int sr_idx;
4071 
4072         trace_kvm_handle_sys_reg(esr);
4073 
4074         if (triage_sysreg_trap(vcpu, &sr_idx))
4075                 return 1;
4076 
4077         params = esr_sys64_to_params(esr);
4078         params.regval = vcpu_get_reg(vcpu, Rt);
4079 
4080         /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
4081         if (params.Op0 == 2 || params.Op0 == 3)
4082                 desc = &sys_reg_descs[sr_idx];
4083         else
4084                 desc = &sys_insn_descs[sr_idx];
4085 
4086         perform_access(vcpu, &params, desc);
4087 
4088         /* Read from system register? */
4089         if (!params.is_write &&
4090             (params.Op0 == 2 || params.Op0 == 3))
4091                 vcpu_set_reg(vcpu, Rt, params.regval);
4092 
4093         return 1;
4094 }
4095 
4096 /******************************************************************************
4097  * Userspace API
4098  *****************************************************************************/
4099 
4100 static bool index_to_params(u64 id, struct sys_reg_params *params)
4101 {
4102         switch (id & KVM_REG_SIZE_MASK) {
4103         case KVM_REG_SIZE_U64:
4104                 /* Any unused index bits means it's not valid. */
4105                 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
4106                               | KVM_REG_ARM_COPROC_MASK
4107                               | KVM_REG_ARM64_SYSREG_OP0_MASK
4108                               | KVM_REG_ARM64_SYSREG_OP1_MASK
4109                               | KVM_REG_ARM64_SYSREG_CRN_MASK
4110                               | KVM_REG_ARM64_SYSREG_CRM_MASK
4111                               | KVM_REG_ARM64_SYSREG_OP2_MASK))
4112                         return false;
4113                 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
4114                                >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
4115                 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
4116                                >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
4117                 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
4118                                >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
4119                 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
4120                                >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
4121                 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
4122                                >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
4123                 return true;
4124         default:
4125                 return false;
4126         }
4127 }
4128 
4129 const struct sys_reg_desc *get_reg_by_id(u64 id,
4130                                          const struct sys_reg_desc table[],
4131                                          unsigned int num)
4132 {
4133         struct sys_reg_params params;
4134 
4135         if (!index_to_params(id, &params))
4136                 return NULL;
4137 
4138         return find_reg(&params, table, num);
4139 }
4140 
4141 /* Decode an index value, and find the sys_reg_desc entry. */
4142 static const struct sys_reg_desc *
4143 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
4144                    const struct sys_reg_desc table[], unsigned int num)
4145 
4146 {
4147         const struct sys_reg_desc *r;
4148 
4149         /* We only do sys_reg for now. */
4150         if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
4151                 return NULL;
4152 
4153         r = get_reg_by_id(id, table, num);
4154 
4155         /* Not saved in the sys_reg array and not otherwise accessible? */
4156         if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
4157                 r = NULL;
4158 
4159         return r;
4160 }
4161 
4162 /*
4163  * These are the invariant sys_reg registers: we let the guest see the
4164  * host versions of these, so they're part of the guest state.
4165  *
4166  * A future CPU may provide a mechanism to present different values to
4167  * the guest, or a future kvm may trap them.
4168  */
4169 
4170 #define FUNCTION_INVARIANT(reg)                                         \
4171         static u64 reset_##reg(struct kvm_vcpu *v,                      \
4172                                const struct sys_reg_desc *r)            \
4173         {                                                               \
4174                 ((struct sys_reg_desc *)r)->val = read_sysreg(reg);     \
4175                 return ((struct sys_reg_desc *)r)->val;                 \
4176         }
4177 
4178 FUNCTION_INVARIANT(midr_el1)
4179 FUNCTION_INVARIANT(revidr_el1)
4180 FUNCTION_INVARIANT(aidr_el1)
4181 
4182 /* ->val is filled in by kvm_sys_reg_table_init() */
4183 static struct sys_reg_desc invariant_sys_regs[] __ro_after_init = {
4184         { SYS_DESC(SYS_MIDR_EL1), NULL, reset_midr_el1 },
4185         { SYS_DESC(SYS_REVIDR_EL1), NULL, reset_revidr_el1 },
4186         { SYS_DESC(SYS_AIDR_EL1), NULL, reset_aidr_el1 },
4187 };
4188 
4189 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
4190 {
4191         const struct sys_reg_desc *r;
4192 
4193         r = get_reg_by_id(id, invariant_sys_regs,
4194                           ARRAY_SIZE(invariant_sys_regs));
4195         if (!r)
4196                 return -ENOENT;
4197 
4198         return put_user(r->val, uaddr);
4199 }
4200 
4201 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
4202 {
4203         const struct sys_reg_desc *r;
4204         u64 val;
4205 
4206         r = get_reg_by_id(id, invariant_sys_regs,
4207                           ARRAY_SIZE(invariant_sys_regs));
4208         if (!r)
4209                 return -ENOENT;
4210 
4211         if (get_user(val, uaddr))
4212                 return -EFAULT;
4213 
4214         /* This is what we mean by invariant: you can't change it. */
4215         if (r->val != val)
4216                 return -EINVAL;
4217 
4218         return 0;
4219 }
4220 
4221 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
4222 {
4223         u32 val;
4224         u32 __user *uval = uaddr;
4225 
4226         /* Fail if we have unknown bits set. */
4227         if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
4228                    | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
4229                 return -ENOENT;
4230 
4231         switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
4232         case KVM_REG_ARM_DEMUX_ID_CCSIDR:
4233                 if (KVM_REG_SIZE(id) != 4)
4234                         return -ENOENT;
4235                 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
4236                         >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
4237                 if (val >= CSSELR_MAX)
4238                         return -ENOENT;
4239 
4240                 return put_user(get_ccsidr(vcpu, val), uval);
4241         default:
4242                 return -ENOENT;
4243         }
4244 }
4245 
4246 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
4247 {
4248         u32 val, newval;
4249         u32 __user *uval = uaddr;
4250 
4251         /* Fail if we have unknown bits set. */
4252         if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
4253                    | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
4254                 return -ENOENT;
4255 
4256         switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
4257         case KVM_REG_ARM_DEMUX_ID_CCSIDR:
4258                 if (KVM_REG_SIZE(id) != 4)
4259                         return -ENOENT;
4260                 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
4261                         >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
4262                 if (val >= CSSELR_MAX)
4263                         return -ENOENT;
4264 
4265                 if (get_user(newval, uval))
4266                         return -EFAULT;
4267 
4268                 return set_ccsidr(vcpu, val, newval);
4269         default:
4270                 return -ENOENT;
4271         }
4272 }
4273 
4274 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4275                          const struct sys_reg_desc table[], unsigned int num)
4276 {
4277         u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
4278         const struct sys_reg_desc *r;
4279         u64 val;
4280         int ret;
4281 
4282         r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
4283         if (!r || sysreg_hidden_user(vcpu, r))
4284                 return -ENOENT;
4285 
4286         if (r->get_user) {
4287                 ret = (r->get_user)(vcpu, r, &val);
4288         } else {
4289                 val = __vcpu_sys_reg(vcpu, r->reg);
4290                 ret = 0;
4291         }
4292 
4293         if (!ret)
4294                 ret = put_user(val, uaddr);
4295 
4296         return ret;
4297 }
4298 
4299 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
4300 {
4301         void __user *uaddr = (void __user *)(unsigned long)reg->addr;
4302         int err;
4303 
4304         if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
4305                 return demux_c15_get(vcpu, reg->id, uaddr);
4306 
4307         err = get_invariant_sys_reg(reg->id, uaddr);
4308         if (err != -ENOENT)
4309                 return err;
4310 
4311         return kvm_sys_reg_get_user(vcpu, reg,
4312                                     sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4313 }
4314 
4315 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4316                          const struct sys_reg_desc table[], unsigned int num)
4317 {
4318         u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
4319         const struct sys_reg_desc *r;
4320         u64 val;
4321         int ret;
4322 
4323         if (get_user(val, uaddr))
4324                 return -EFAULT;
4325 
4326         r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
4327         if (!r || sysreg_hidden_user(vcpu, r))
4328                 return -ENOENT;
4329 
4330         if (sysreg_user_write_ignore(vcpu, r))
4331                 return 0;
4332 
4333         if (r->set_user) {
4334                 ret = (r->set_user)(vcpu, r, val);
4335         } else {
4336                 __vcpu_sys_reg(vcpu, r->reg) = val;
4337                 ret = 0;
4338         }
4339 
4340         return ret;
4341 }
4342 
4343 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
4344 {
4345         void __user *uaddr = (void __user *)(unsigned long)reg->addr;
4346         int err;
4347 
4348         if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
4349                 return demux_c15_set(vcpu, reg->id, uaddr);
4350 
4351         err = set_invariant_sys_reg(reg->id, uaddr);
4352         if (err != -ENOENT)
4353                 return err;
4354 
4355         return kvm_sys_reg_set_user(vcpu, reg,
4356                                     sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4357 }
4358 
4359 static unsigned int num_demux_regs(void)
4360 {
4361         return CSSELR_MAX;
4362 }
4363 
4364 static int write_demux_regids(u64 __user *uindices)
4365 {
4366         u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
4367         unsigned int i;
4368 
4369         val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
4370         for (i = 0; i < CSSELR_MAX; i++) {
4371                 if (put_user(val | i, uindices))
4372                         return -EFAULT;
4373                 uindices++;
4374         }
4375         return 0;
4376 }
4377 
4378 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
4379 {
4380         return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
4381                 KVM_REG_ARM64_SYSREG |
4382                 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
4383                 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
4384                 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
4385                 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
4386                 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
4387 }
4388 
4389 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
4390 {
4391         if (!*uind)
4392                 return true;
4393 
4394         if (put_user(sys_reg_to_index(reg), *uind))
4395                 return false;
4396 
4397         (*uind)++;
4398         return true;
4399 }
4400 
4401 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
4402                             const struct sys_reg_desc *rd,
4403                             u64 __user **uind,
4404                             unsigned int *total)
4405 {
4406         /*
4407          * Ignore registers we trap but don't save,
4408          * and for which no custom user accessor is provided.
4409          */
4410         if (!(rd->reg || rd->get_user))
4411                 return 0;
4412 
4413         if (sysreg_hidden_user(vcpu, rd))
4414                 return 0;
4415 
4416         if (!copy_reg_to_user(rd, uind))
4417                 return -EFAULT;
4418 
4419         (*total)++;
4420         return 0;
4421 }
4422 
4423 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
4424 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
4425 {
4426         const struct sys_reg_desc *i2, *end2;
4427         unsigned int total = 0;
4428         int err;
4429 
4430         i2 = sys_reg_descs;
4431         end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
4432 
4433         while (i2 != end2) {
4434                 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
4435                 if (err)
4436                         return err;
4437         }
4438         return total;
4439 }
4440 
4441 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
4442 {
4443         return ARRAY_SIZE(invariant_sys_regs)
4444                 + num_demux_regs()
4445                 + walk_sys_regs(vcpu, (u64 __user *)NULL);
4446 }
4447 
4448 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
4449 {
4450         unsigned int i;
4451         int err;
4452 
4453         /* Then give them all the invariant registers' indices. */
4454         for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
4455                 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
4456                         return -EFAULT;
4457                 uindices++;
4458         }
4459 
4460         err = walk_sys_regs(vcpu, uindices);
4461         if (err < 0)
4462                 return err;
4463         uindices += err;
4464 
4465         return write_demux_regids(uindices);
4466 }
4467 
4468 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r)                       \
4469         KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r),            \
4470                 sys_reg_Op1(r),                                 \
4471                 sys_reg_CRn(r),                                 \
4472                 sys_reg_CRm(r),                                 \
4473                 sys_reg_Op2(r))
4474 
4475 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
4476 {
4477         const void *zero_page = page_to_virt(ZERO_PAGE(0));
4478         u64 __user *masks = (u64 __user *)range->addr;
4479 
4480         /* Only feature id range is supported, reserved[13] must be zero. */
4481         if (range->range ||
4482             memcmp(range->reserved, zero_page, sizeof(range->reserved)))
4483                 return -EINVAL;
4484 
4485         /* Wipe the whole thing first */
4486         if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
4487                 return -EFAULT;
4488 
4489         for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4490                 const struct sys_reg_desc *reg = &sys_reg_descs[i];
4491                 u32 encoding = reg_to_encoding(reg);
4492                 u64 val;
4493 
4494                 if (!is_feature_id_reg(encoding) || !reg->set_user)
4495                         continue;
4496 
4497                 if (!reg->val ||
4498                     (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
4499                         continue;
4500                 }
4501                 val = reg->val;
4502 
4503                 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
4504                         return -EFAULT;
4505         }
4506 
4507         return 0;
4508 }
4509 
4510 static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
4511 {
4512         struct kvm *kvm = vcpu->kvm;
4513 
4514         if (has_vhe() || has_hvhe())
4515                 vcpu->arch.hcr_el2 |= HCR_E2H;
4516         if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
4517                 /* route synchronous external abort exceptions to EL2 */
4518                 vcpu->arch.hcr_el2 |= HCR_TEA;
4519                 /* trap error record accesses */
4520                 vcpu->arch.hcr_el2 |= HCR_TERR;
4521         }
4522 
4523         if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
4524                 vcpu->arch.hcr_el2 |= HCR_FWB;
4525 
4526         if (cpus_have_final_cap(ARM64_HAS_EVT) &&
4527             !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
4528             kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
4529                 vcpu->arch.hcr_el2 |= HCR_TID4;
4530         else
4531                 vcpu->arch.hcr_el2 |= HCR_TID2;
4532 
4533         if (vcpu_el1_is_32bit(vcpu))
4534                 vcpu->arch.hcr_el2 &= ~HCR_RW;
4535 
4536         if (kvm_has_mte(vcpu->kvm))
4537                 vcpu->arch.hcr_el2 |= HCR_ATA;
4538 
4539         /*
4540          * In the absence of FGT, we cannot independently trap TLBI
4541          * Range instructions. This isn't great, but trapping all
4542          * TLBIs would be far worse. Live with it...
4543          */
4544         if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
4545                 vcpu->arch.hcr_el2 |= HCR_TTLBOS;
4546 }
4547 
4548 void kvm_calculate_traps(struct kvm_vcpu *vcpu)
4549 {
4550         struct kvm *kvm = vcpu->kvm;
4551 
4552         mutex_lock(&kvm->arch.config_lock);
4553         vcpu_set_hcr(vcpu);
4554 
4555         if (cpus_have_final_cap(ARM64_HAS_HCX)) {
4556                 /*
4557                  * In general, all HCRX_EL2 bits are gated by a feature.
4558                  * The only reason we can set SMPME without checking any
4559                  * feature is that its effects are not directly observable
4560                  * from the guest.
4561                  */
4562                 vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
4563 
4564                 if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
4565                         vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
4566 
4567                 if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
4568                         vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
4569         }
4570 
4571         if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
4572                 goto out;
4573 
4574         kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1           |
4575                                        HFGxTR_EL2_nMAIR2_EL1            |
4576                                        HFGxTR_EL2_nS2POR_EL1            |
4577                                        HFGxTR_EL2_nPOR_EL1              |
4578                                        HFGxTR_EL2_nPOR_EL0              |
4579                                        HFGxTR_EL2_nACCDATA_EL1          |
4580                                        HFGxTR_EL2_nSMPRI_EL1_MASK       |
4581                                        HFGxTR_EL2_nTPIDR2_EL0_MASK);
4582 
4583         if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
4584                 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
4585                                                 HFGITR_EL2_TLBIRVALE1OS |
4586                                                 HFGITR_EL2_TLBIRVAAE1OS |
4587                                                 HFGITR_EL2_TLBIRVAE1OS  |
4588                                                 HFGITR_EL2_TLBIVAALE1OS |
4589                                                 HFGITR_EL2_TLBIVALE1OS  |
4590                                                 HFGITR_EL2_TLBIVAAE1OS  |
4591                                                 HFGITR_EL2_TLBIASIDE1OS |
4592                                                 HFGITR_EL2_TLBIVAE1OS   |
4593                                                 HFGITR_EL2_TLBIVMALLE1OS);
4594 
4595         if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
4596                 kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1  |
4597                                                 HFGITR_EL2_TLBIRVALE1   |
4598                                                 HFGITR_EL2_TLBIRVAAE1   |
4599                                                 HFGITR_EL2_TLBIRVAE1    |
4600                                                 HFGITR_EL2_TLBIRVAALE1IS|
4601                                                 HFGITR_EL2_TLBIRVALE1IS |
4602                                                 HFGITR_EL2_TLBIRVAAE1IS |
4603                                                 HFGITR_EL2_TLBIRVAE1IS  |
4604                                                 HFGITR_EL2_TLBIRVAALE1OS|
4605                                                 HFGITR_EL2_TLBIRVALE1OS |
4606                                                 HFGITR_EL2_TLBIRVAAE1OS |
4607                                                 HFGITR_EL2_TLBIRVAE1OS);
4608 
4609         if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
4610                 kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
4611                                                 HFGxTR_EL2_nPIR_EL1);
4612 
4613         if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
4614                 kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
4615                                                   HAFGRTR_EL2_RES1);
4616 
4617         set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
4618 out:
4619         mutex_unlock(&kvm->arch.config_lock);
4620 }
4621 
4622 int __init kvm_sys_reg_table_init(void)
4623 {
4624         bool valid = true;
4625         unsigned int i;
4626         int ret = 0;
4627 
4628         /* Make sure tables are unique and in order. */
4629         valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
4630         valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
4631         valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
4632         valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
4633         valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
4634         valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
4635         valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
4636 
4637         if (!valid)
4638                 return -EINVAL;
4639 
4640         /* We abuse the reset function to overwrite the table itself. */
4641         for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
4642                 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
4643 
4644         ret = populate_nv_trap_config();
4645 
4646         for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
4647                 ret = populate_sysreg_config(sys_reg_descs + i, i);
4648 
4649         for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
4650                 ret = populate_sysreg_config(sys_insn_descs + i, i);
4651 
4652         return ret;
4653 }
4654 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php