~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kvm/pauth.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2024 - Google LLC
  4  * Author: Marc Zyngier <maz@kernel.org>
  5  *
  6  * Primitive PAuth emulation for ERETAA/ERETAB.
  7  *
  8  * This code assumes that is is run from EL2, and that it is part of
  9  * the emulation of ERETAx for a guest hypervisor. That's a lot of
 10  * baked-in assumptions and shortcuts.
 11  *
 12  * Do no reuse for anything else!
 13  */
 14 
 15 #include <linux/kvm_host.h>
 16 
 17 #include <asm/gpr-num.h>
 18 #include <asm/kvm_emulate.h>
 19 #include <asm/pointer_auth.h>
 20 
 21 /* PACGA Xd, Xn, Xm */
 22 #define PACGA(d,n,m)                                    \
 23         asm volatile(__DEFINE_ASM_GPR_NUMS              \
 24                      ".inst 0x9AC03000          |"      \
 25                      "(.L__gpr_num_%[Rd] << 0)  |"      \
 26                      "(.L__gpr_num_%[Rn] << 5)  |"      \
 27                      "(.L__gpr_num_%[Rm] << 16)\n"      \
 28                      : [Rd] "=r" ((d))                  \
 29                      : [Rn] "r" ((n)), [Rm] "r" ((m)))
 30 
 31 static u64 compute_pac(struct kvm_vcpu *vcpu, u64 ptr,
 32                        struct ptrauth_key ikey)
 33 {
 34         struct ptrauth_key gkey;
 35         u64 mod, pac = 0;
 36 
 37         preempt_disable();
 38 
 39         if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
 40                 mod = __vcpu_sys_reg(vcpu, SP_EL2);
 41         else
 42                 mod = read_sysreg(sp_el1);
 43 
 44         gkey.lo = read_sysreg_s(SYS_APGAKEYLO_EL1);
 45         gkey.hi = read_sysreg_s(SYS_APGAKEYHI_EL1);
 46 
 47         __ptrauth_key_install_nosync(APGA, ikey);
 48         isb();
 49 
 50         PACGA(pac, ptr, mod);
 51         isb();
 52 
 53         __ptrauth_key_install_nosync(APGA, gkey);
 54 
 55         preempt_enable();
 56 
 57         /* PAC in the top 32bits */
 58         return pac;
 59 }
 60 
 61 static bool effective_tbi(struct kvm_vcpu *vcpu, bool bit55)
 62 {
 63         u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
 64         bool tbi, tbid;
 65 
 66         /*
 67          * Since we are authenticating an instruction address, we have
 68          * to take TBID into account. If E2H==0, ignore VA[55], as
 69          * TCR_EL2 only has a single TBI/TBID. If VA[55] was set in
 70          * this case, this is likely a guest bug...
 71          */
 72         if (!vcpu_el2_e2h_is_set(vcpu)) {
 73                 tbi = tcr & BIT(20);
 74                 tbid = tcr & BIT(29);
 75         } else if (bit55) {
 76                 tbi = tcr & TCR_TBI1;
 77                 tbid = tcr & TCR_TBID1;
 78         } else {
 79                 tbi = tcr & TCR_TBI0;
 80                 tbid = tcr & TCR_TBID0;
 81         }
 82 
 83         return tbi && !tbid;
 84 }
 85 
 86 static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool bit55)
 87 {
 88         static const int maxtxsz = 39; // Revisit these two values once
 89         static const int mintxsz = 16; // (if) we support TTST/LVA/LVA2
 90         u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
 91         int txsz;
 92 
 93         if (!vcpu_el2_e2h_is_set(vcpu) || !bit55)
 94                 txsz = FIELD_GET(TCR_T0SZ_MASK, tcr);
 95         else
 96                 txsz = FIELD_GET(TCR_T1SZ_MASK, tcr);
 97 
 98         return 64 - clamp(txsz, mintxsz, maxtxsz);
 99 }
100 
101 static u64 compute_pac_mask(struct kvm_vcpu *vcpu, bool bit55)
102 {
103         int bottom_pac;
104         u64 mask;
105 
106         bottom_pac = compute_bottom_pac(vcpu, bit55);
107 
108         mask = GENMASK(54, bottom_pac);
109         if (!effective_tbi(vcpu, bit55))
110                 mask |= GENMASK(63, 56);
111 
112         return mask;
113 }
114 
115 static u64 to_canonical_addr(struct kvm_vcpu *vcpu, u64 ptr, u64 mask)
116 {
117         bool bit55 = !!(ptr & BIT(55));
118 
119         if (bit55)
120                 return ptr | mask;
121 
122         return ptr & ~mask;
123 }
124 
125 static u64 corrupt_addr(struct kvm_vcpu *vcpu, u64 ptr)
126 {
127         bool bit55 = !!(ptr & BIT(55));
128         u64 mask, error_code;
129         int shift;
130 
131         if (effective_tbi(vcpu, bit55)) {
132                 mask = GENMASK(54, 53);
133                 shift = 53;
134         } else {
135                 mask = GENMASK(62, 61);
136                 shift = 61;
137         }
138 
139         if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu)))
140                 error_code = 2 << shift;
141         else
142                 error_code = 1 << shift;
143 
144         ptr &= ~mask;
145         ptr |= error_code;
146 
147         return ptr;
148 }
149 
150 /*
151  * Authenticate an ERETAA/ERETAB instruction, returning true if the
152  * authentication succeeded and false otherwise. In all cases, *elr
153  * contains the VA to ERET to. Potential exception injection is left
154  * to the caller.
155  */
156 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
157 {
158         u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2);
159         u64 esr = kvm_vcpu_get_esr(vcpu);
160         u64 ptr, cptr, pac, mask;
161         struct ptrauth_key ikey;
162 
163         *elr = ptr = vcpu_read_sys_reg(vcpu, ELR_EL2);
164 
165         /* We assume we're already in the context of an ERETAx */
166         if (esr_iss_is_eretab(esr)) {
167                 if (!(sctlr & SCTLR_EL1_EnIB))
168                         return true;
169 
170                 ikey.lo = __vcpu_sys_reg(vcpu, APIBKEYLO_EL1);
171                 ikey.hi = __vcpu_sys_reg(vcpu, APIBKEYHI_EL1);
172         } else {
173                 if (!(sctlr & SCTLR_EL1_EnIA))
174                         return true;
175 
176                 ikey.lo = __vcpu_sys_reg(vcpu, APIAKEYLO_EL1);
177                 ikey.hi = __vcpu_sys_reg(vcpu, APIAKEYHI_EL1);
178         }
179 
180         mask = compute_pac_mask(vcpu, !!(ptr & BIT(55)));
181         cptr = to_canonical_addr(vcpu, ptr, mask);
182 
183         pac = compute_pac(vcpu, cptr, ikey);
184 
185         /*
186          * Slightly deviate from the pseudocode: if we have a PAC
187          * match with the signed pointer, then it must be good.
188          * Anything after this point is pure error handling.
189          */
190         if ((pac & mask) == (ptr & mask)) {
191                 *elr = cptr;
192                 return true;
193         }
194 
195         /*
196          * Authentication failed, corrupt the canonical address if
197          * PAuth2 isn't implemented, or some XORing if it is.
198          */
199         if (!kvm_has_pauth(vcpu->kvm, PAuth2))
200                 cptr = corrupt_addr(vcpu, cptr);
201         else
202                 cptr = ptr ^ (pac & mask);
203 
204         *elr = cptr;
205         return false;
206 }
207 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php