~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/kvm_asm.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Copyright (C) 2012,2013 - ARM Ltd
  4  * Author: Marc Zyngier <marc.zyngier@arm.com>
  5  */
  6 
  7 #ifndef __ARM_KVM_ASM_H__
  8 #define __ARM_KVM_ASM_H__
  9 
 10 #include <asm/hyp_image.h>
 11 #include <asm/insn.h>
 12 #include <asm/virt.h>
 13 
 14 #define ARM_EXIT_WITH_SERROR_BIT  31
 15 #define ARM_EXCEPTION_CODE(x)     ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
 16 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
 17 #define ARM_SERROR_PENDING(x)     !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
 18 
 19 #define ARM_EXCEPTION_IRQ         0
 20 #define ARM_EXCEPTION_EL1_SERROR  1
 21 #define ARM_EXCEPTION_TRAP        2
 22 #define ARM_EXCEPTION_IL          3
 23 /* The hyp-stub will return this for any kvm_call_hyp() call */
 24 #define ARM_EXCEPTION_HYP_GONE    HVC_STUB_ERR
 25 
 26 #define kvm_arm_exception_type                                  \
 27         {ARM_EXCEPTION_IRQ,             "IRQ"           },      \
 28         {ARM_EXCEPTION_EL1_SERROR,      "SERROR"        },      \
 29         {ARM_EXCEPTION_TRAP,            "TRAP"          },      \
 30         {ARM_EXCEPTION_HYP_GONE,        "HYP_GONE"      }
 31 
 32 /*
 33  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
 34  * that jumps over this.
 35  */
 36 #define KVM_VECTOR_PREAMBLE     (2 * AARCH64_INSN_SIZE)
 37 
 38 #define KVM_HOST_SMCCC_ID(id)                                           \
 39         ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,                         \
 40                            ARM_SMCCC_SMC_64,                            \
 41                            ARM_SMCCC_OWNER_VENDOR_HYP,                  \
 42                            (id))
 43 
 44 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
 45 
 46 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init                    0
 47 
 48 #ifndef __ASSEMBLY__
 49 
 50 #include <linux/mm.h>
 51 
 52 enum __kvm_host_smccc_func {
 53         /* Hypercalls available only prior to pKVM finalisation */
 54         /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
 55         __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
 56         __KVM_HOST_SMCCC_FUNC___pkvm_init,
 57         __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
 58         __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
 59         __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
 60         __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
 61         __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
 62         __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
 63 
 64         /* Hypercalls available after pKVM finalisation */
 65         __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
 66         __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
 67         __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
 68         __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
 69         __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
 70         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
 71         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
 72         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
 73         __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
 74         __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
 75         __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
 76         __KVM_HOST_SMCCC_FUNC___vgic_v3_save_vmcr_aprs,
 77         __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
 78         __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
 79         __KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
 80         __KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
 81         __KVM_HOST_SMCCC_FUNC___pkvm_teardown_vm,
 82 };
 83 
 84 #define DECLARE_KVM_VHE_SYM(sym)        extern char sym[]
 85 #define DECLARE_KVM_NVHE_SYM(sym)       extern char kvm_nvhe_sym(sym)[]
 86 
 87 /*
 88  * Define a pair of symbols sharing the same name but one defined in
 89  * VHE and the other in nVHE hyp implementations.
 90  */
 91 #define DECLARE_KVM_HYP_SYM(sym)                \
 92         DECLARE_KVM_VHE_SYM(sym);               \
 93         DECLARE_KVM_NVHE_SYM(sym)
 94 
 95 #define DECLARE_KVM_VHE_PER_CPU(type, sym)      \
 96         DECLARE_PER_CPU(type, sym)
 97 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)     \
 98         DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
 99 
100 #define DECLARE_KVM_HYP_PER_CPU(type, sym)      \
101         DECLARE_KVM_VHE_PER_CPU(type, sym);     \
102         DECLARE_KVM_NVHE_PER_CPU(type, sym)
103 
104 /*
105  * Compute pointer to a symbol defined in nVHE percpu region.
106  * Returns NULL if percpu memory has not been allocated yet.
107  */
108 #define this_cpu_ptr_nvhe_sym(sym)      per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
109 #define per_cpu_ptr_nvhe_sym(sym, cpu)                                          \
110         ({                                                                      \
111                 unsigned long base, off;                                        \
112                 base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];              \
113                 off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
114                       (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
115                 base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
116         })
117 
118 #if defined(__KVM_NVHE_HYPERVISOR__)
119 
120 #define CHOOSE_NVHE_SYM(sym)    sym
121 #define CHOOSE_HYP_SYM(sym)     CHOOSE_NVHE_SYM(sym)
122 
123 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
124 extern void *__nvhe_undefined_symbol;
125 #define CHOOSE_VHE_SYM(sym)             __nvhe_undefined_symbol
126 #define this_cpu_ptr_hyp_sym(sym)       (&__nvhe_undefined_symbol)
127 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__nvhe_undefined_symbol)
128 
129 #elif defined(__KVM_VHE_HYPERVISOR__)
130 
131 #define CHOOSE_VHE_SYM(sym)     sym
132 #define CHOOSE_HYP_SYM(sym)     CHOOSE_VHE_SYM(sym)
133 
134 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
135 extern void *__vhe_undefined_symbol;
136 #define CHOOSE_NVHE_SYM(sym)            __vhe_undefined_symbol
137 #define this_cpu_ptr_hyp_sym(sym)       (&__vhe_undefined_symbol)
138 #define per_cpu_ptr_hyp_sym(sym, cpu)   (&__vhe_undefined_symbol)
139 
140 #else
141 
142 /*
143  * BIG FAT WARNINGS:
144  *
145  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
146  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
147  *   while this is used early at boot time, when the capabilities are
148  *   not final yet....
149  *
150  * - Don't let the nVHE hypervisor have access to this, as it will
151  *   pick the *wrong* symbol (yes, it runs at EL2...).
152  */
153 #define CHOOSE_HYP_SYM(sym)             (is_kernel_in_hyp_mode()        \
154                                            ? CHOOSE_VHE_SYM(sym)        \
155                                            : CHOOSE_NVHE_SYM(sym))
156 
157 #define this_cpu_ptr_hyp_sym(sym)       (is_kernel_in_hyp_mode()        \
158                                            ? this_cpu_ptr(&sym)         \
159                                            : this_cpu_ptr_nvhe_sym(sym))
160 
161 #define per_cpu_ptr_hyp_sym(sym, cpu)   (is_kernel_in_hyp_mode()        \
162                                            ? per_cpu_ptr(&sym, cpu)     \
163                                            : per_cpu_ptr_nvhe_sym(sym, cpu))
164 
165 #define CHOOSE_VHE_SYM(sym)     sym
166 #define CHOOSE_NVHE_SYM(sym)    kvm_nvhe_sym(sym)
167 
168 #endif
169 
170 struct kvm_nvhe_init_params {
171         unsigned long mair_el2;
172         unsigned long tcr_el2;
173         unsigned long tpidr_el2;
174         unsigned long stack_hyp_va;
175         unsigned long stack_pa;
176         phys_addr_t pgd_pa;
177         unsigned long hcr_el2;
178         unsigned long vttbr;
179         unsigned long vtcr;
180 };
181 
182 /*
183  * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
184  * hyp_panic() in non-protected mode.
185  *
186  * @stack_base:                 hyp VA of the hyp_stack base.
187  * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
188  * @fp:                         hyp FP where the backtrace begins.
189  * @pc:                         hyp PC where the backtrace begins.
190  */
191 struct kvm_nvhe_stacktrace_info {
192         unsigned long stack_base;
193         unsigned long overflow_stack_base;
194         unsigned long fp;
195         unsigned long pc;
196 };
197 
198 /* Translate a kernel address @ptr into its equivalent linear mapping */
199 #define kvm_ksym_ref(ptr)                                               \
200         ({                                                              \
201                 void *val = (ptr);                                      \
202                 if (!is_kernel_in_hyp_mode())                           \
203                         val = lm_alias((ptr));                          \
204                 val;                                                    \
205          })
206 #define kvm_ksym_ref_nvhe(sym)  kvm_ksym_ref(kvm_nvhe_sym(sym))
207 
208 struct kvm;
209 struct kvm_vcpu;
210 struct kvm_s2_mmu;
211 
212 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
213 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
214 #define __kvm_hyp_init          CHOOSE_NVHE_SYM(__kvm_hyp_init)
215 #define __kvm_hyp_vector        CHOOSE_HYP_SYM(__kvm_hyp_vector)
216 
217 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
218 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
219 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
220 
221 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
222 #define __bp_harden_hyp_vecs    CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
223 
224 extern void __kvm_flush_vm_context(void);
225 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
226 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
227                                      int level);
228 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
229                                          phys_addr_t ipa,
230                                          int level);
231 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
232                                         phys_addr_t start, unsigned long pages);
233 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
234 
235 extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
236 
237 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
238 
239 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
240 
241 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
242 
243 extern u64 __vgic_v3_get_gic_config(void);
244 extern void __vgic_v3_init_lrs(void);
245 
246 extern u64 __kvm_get_mdcr_el2(void);
247 
248 #define __KVM_EXTABLE(from, to)                                         \
249         "       .pushsection    __kvm_ex_table, \"a\"\n"                \
250         "       .align          3\n"                                    \
251         "       .long           (" #from " - .), (" #to " - .)\n"       \
252         "       .popsection\n"
253 
254 
255 #define __kvm_at(at_op, addr)                                           \
256 ( {                                                                     \
257         int __kvm_at_err = 0;                                           \
258         u64 spsr, elr;                                                  \
259         asm volatile(                                                   \
260         "       mrs     %1, spsr_el2\n"                                 \
261         "       mrs     %2, elr_el2\n"                                  \
262         "1:     at      "at_op", %3\n"                                  \
263         "       isb\n"                                                  \
264         "       b       9f\n"                                           \
265         "2:     msr     spsr_el2, %1\n"                                 \
266         "       msr     elr_el2, %2\n"                                  \
267         "       mov     %w0, %4\n"                                      \
268         "9:\n"                                                          \
269         __KVM_EXTABLE(1b, 2b)                                           \
270         : "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)                \
271         : "r" (addr), "i" (-EFAULT));                                   \
272         __kvm_at_err;                                                   \
273 } )
274 
275 void __noreturn hyp_panic(void);
276 asmlinkage void kvm_unexpected_el2_exception(void);
277 asmlinkage void __noreturn hyp_panic(void);
278 asmlinkage void __noreturn hyp_panic_bad_stack(void);
279 asmlinkage void kvm_unexpected_el2_exception(void);
280 struct kvm_cpu_context;
281 void handle_trap(struct kvm_cpu_context *host_ctxt);
282 asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on);
283 void __noreturn __pkvm_init_finalise(void);
284 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
285 void kvm_patch_vector_branch(struct alt_instr *alt,
286         __le32 *origptr, __le32 *updptr, int nr_inst);
287 void kvm_get_kimage_voffset(struct alt_instr *alt,
288         __le32 *origptr, __le32 *updptr, int nr_inst);
289 void kvm_compute_final_ctr_el0(struct alt_instr *alt,
290         __le32 *origptr, __le32 *updptr, int nr_inst);
291 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
292         u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
293 
294 #else /* __ASSEMBLY__ */
295 
296 .macro get_host_ctxt reg, tmp
297         adr_this_cpu \reg, kvm_host_data, \tmp
298         add     \reg, \reg, #HOST_DATA_CONTEXT
299 .endm
300 
301 .macro get_vcpu_ptr vcpu, ctxt
302         get_host_ctxt \ctxt, \vcpu
303         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
304 .endm
305 
306 .macro get_loaded_vcpu vcpu, ctxt
307         adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
308         ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
309 .endm
310 
311 .macro set_loaded_vcpu vcpu, ctxt, tmp
312         adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
313         str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
314 .endm
315 
316 /*
317  * KVM extable for unexpected exceptions.
318  * Create a struct kvm_exception_table_entry output to a section that can be
319  * mapped by EL2. The table is not sorted.
320  *
321  * The caller must ensure:
322  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
323  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
324  */
325 .macro  _kvm_extable, from, to
326         .pushsection    __kvm_ex_table, "a"
327         .align          3
328         .long           (\from - .), (\to - .)
329         .popsection
330 .endm
331 
332 #define CPU_XREG_OFFSET(x)      (CPU_USER_PT_REGS + 8*x)
333 #define CPU_LR_OFFSET           CPU_XREG_OFFSET(30)
334 #define CPU_SP_EL0_OFFSET       (CPU_LR_OFFSET + 8)
335 
336 /*
337  * We treat x18 as callee-saved as the host may use it as a platform
338  * register (e.g. for shadow call stack).
339  */
340 .macro save_callee_saved_regs ctxt
341         str     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
342         stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
343         stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
344         stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
345         stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
346         stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
347         stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
348 .endm
349 
350 .macro restore_callee_saved_regs ctxt
351         // We require \ctxt is not x18-x28
352         ldr     x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
353         ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
354         ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
355         ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
356         ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
357         ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
358         ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
359 .endm
360 
361 .macro save_sp_el0 ctxt, tmp
362         mrs     \tmp,   sp_el0
363         str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
364 .endm
365 
366 .macro restore_sp_el0 ctxt, tmp
367         ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
368         msr     sp_el0, \tmp
369 .endm
370 
371 #endif
372 
373 #endif /* __ARM_KVM_ASM_H__ */
374 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php