~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/kvm_nested.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __ARM64_KVM_NESTED_H
  3 #define __ARM64_KVM_NESTED_H
  4 
  5 #include <linux/bitfield.h>
  6 #include <linux/kvm_host.h>
  7 #include <asm/kvm_emulate.h>
  8 #include <asm/kvm_pgtable.h>
  9 
 10 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
 11 {
 12         return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
 13                 cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
 14                 vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
 15 }
 16 
 17 /* Translation helpers from non-VHE EL2 to EL1 */
 18 static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
 19 {
 20         return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
 21 }
 22 
 23 static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
 24 {
 25         return TCR_EPD1_MASK |                          /* disable TTBR1_EL1 */
 26                ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
 27                tcr_el2_ps_to_tcr_el1_ips(tcr) |
 28                (tcr & TCR_EL2_TG0_MASK) |
 29                (tcr & TCR_EL2_ORGN0_MASK) |
 30                (tcr & TCR_EL2_IRGN0_MASK) |
 31                (tcr & TCR_EL2_T0SZ_MASK);
 32 }
 33 
 34 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
 35 {
 36         u64 cpacr_el1 = CPACR_ELx_RES1;
 37 
 38         if (cptr_el2 & CPTR_EL2_TTA)
 39                 cpacr_el1 |= CPACR_ELx_TTA;
 40         if (!(cptr_el2 & CPTR_EL2_TFP))
 41                 cpacr_el1 |= CPACR_ELx_FPEN;
 42         if (!(cptr_el2 & CPTR_EL2_TZ))
 43                 cpacr_el1 |= CPACR_ELx_ZEN;
 44 
 45         cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
 46 
 47         return cpacr_el1;
 48 }
 49 
 50 static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
 51 {
 52         /* Only preserve the minimal set of bits we support */
 53         val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
 54                 SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
 55         val |= SCTLR_EL1_RES1;
 56 
 57         return val;
 58 }
 59 
 60 static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
 61 {
 62         /* Clear the ASID field */
 63         return ttbr0 & ~GENMASK_ULL(63, 48);
 64 }
 65 
 66 extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
 67 extern void kvm_init_nested(struct kvm *kvm);
 68 extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
 69 extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
 70 extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
 71 
 72 union tlbi_info;
 73 
 74 extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
 75                                        const union tlbi_info *info,
 76                                        void (*)(struct kvm_s2_mmu *,
 77                                                 const union tlbi_info *));
 78 extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
 79 extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
 80 
 81 struct kvm_s2_trans {
 82         phys_addr_t output;
 83         unsigned long block_size;
 84         bool writable;
 85         bool readable;
 86         int level;
 87         u32 esr;
 88         u64 upper_attr;
 89 };
 90 
 91 static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
 92 {
 93         return trans->output;
 94 }
 95 
 96 static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
 97 {
 98         return trans->block_size;
 99 }
100 
101 static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
102 {
103         return trans->esr;
104 }
105 
106 static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
107 {
108         return trans->readable;
109 }
110 
111 static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
112 {
113         return trans->writable;
114 }
115 
116 static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
117 {
118         return !(trans->upper_attr & BIT(54));
119 }
120 
121 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
122                               struct kvm_s2_trans *result);
123 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
124                                     struct kvm_s2_trans *trans);
125 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
126 extern void kvm_nested_s2_wp(struct kvm *kvm);
127 extern void kvm_nested_s2_unmap(struct kvm *kvm);
128 extern void kvm_nested_s2_flush(struct kvm *kvm);
129 
130 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
131 
132 static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
133 {
134         struct kvm *kvm = vpcu->kvm;
135         u8 CRm = sys_reg_CRm(instr);
136 
137         if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
138               sys_reg_Op1(instr) == TLBI_Op1_EL1))
139                 return false;
140 
141         if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
142               (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
143                kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
144                 return false;
145 
146         if (CRm == TLBI_CRm_nROS &&
147             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
148                 return false;
149 
150         if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
151              CRm == TLBI_CRm_RNS) &&
152             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
153                 return false;
154 
155         return true;
156 }
157 
158 static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
159 {
160         struct kvm *kvm = vpcu->kvm;
161         u8 CRm = sys_reg_CRm(instr);
162 
163         if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
164               sys_reg_Op1(instr) == TLBI_Op1_EL2))
165                 return false;
166 
167         if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
168               (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
169                kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
170                 return false;
171 
172         if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
173                 return false;
174 
175         if (CRm == TLBI_CRm_nROS &&
176             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
177                 return false;
178 
179         if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
180              CRm == TLBI_CRm_RNS) &&
181             !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
182                 return false;
183 
184         return true;
185 }
186 
187 int kvm_init_nv_sysregs(struct kvm *kvm);
188 
189 #ifdef CONFIG_ARM64_PTR_AUTH
190 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
191 #else
192 static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
193 {
194         /* We really should never execute this... */
195         WARN_ON_ONCE(1);
196         *elr = 0xbad9acc0debadbad;
197         return false;
198 }
199 #endif
200 
201 #define KVM_NV_GUEST_MAP_SZ     (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
202 
203 static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
204 {
205         return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
206 }
207 
208 #endif /* __ARM64_KVM_NESTED_H */
209 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php