~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kvm/hyp/hyp-entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Copyright (C) 2015-2018 - ARM Ltd
  4  * Author: Marc Zyngier <marc.zyngier@arm.com>
  5  */
  6 
  7 #include <linux/arm-smccc.h>
  8 #include <linux/linkage.h>
  9 
 10 #include <asm/alternative.h>
 11 #include <asm/assembler.h>
 12 #include <asm/cpufeature.h>
 13 #include <asm/kvm_arm.h>
 14 #include <asm/kvm_asm.h>
 15 #include <asm/mmu.h>
 16 #include <asm/spectre.h>
 17 
 18 .macro save_caller_saved_regs_vect
 19         /* x0 and x1 were saved in the vector entry */
 20         stp     x2, x3,   [sp, #-16]!
 21         stp     x4, x5,   [sp, #-16]!
 22         stp     x6, x7,   [sp, #-16]!
 23         stp     x8, x9,   [sp, #-16]!
 24         stp     x10, x11, [sp, #-16]!
 25         stp     x12, x13, [sp, #-16]!
 26         stp     x14, x15, [sp, #-16]!
 27         stp     x16, x17, [sp, #-16]!
 28 .endm
 29 
 30 .macro restore_caller_saved_regs_vect
 31         ldp     x16, x17, [sp], #16
 32         ldp     x14, x15, [sp], #16
 33         ldp     x12, x13, [sp], #16
 34         ldp     x10, x11, [sp], #16
 35         ldp     x8, x9,   [sp], #16
 36         ldp     x6, x7,   [sp], #16
 37         ldp     x4, x5,   [sp], #16
 38         ldp     x2, x3,   [sp], #16
 39         ldp     x0, x1,   [sp], #16
 40 .endm
 41 
 42         .text
 43 
 44 el1_sync:                               // Guest trapped into EL2
 45 
 46         mrs     x0, esr_el2
 47         ubfx    x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
 48         cmp     x0, #ESR_ELx_EC_HVC64
 49         ccmp    x0, #ESR_ELx_EC_HVC32, #4, ne
 50         b.ne    el1_trap
 51 
 52         /*
 53          * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
 54          * The workaround has already been applied on the host,
 55          * so let's quickly get back to the guest. We don't bother
 56          * restoring x1, as it can be clobbered anyway.
 57          */
 58         ldr     x1, [sp]                                // Guest's x0
 59         eor     w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
 60         cbz     w1, wa_epilogue
 61 
 62         /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
 63         eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
 64                           ARM_SMCCC_ARCH_WORKAROUND_2)
 65         cbz     w1, wa_epilogue
 66 
 67         eor     w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_2 ^ \
 68                           ARM_SMCCC_ARCH_WORKAROUND_3)
 69         cbnz    w1, el1_trap
 70 
 71 wa_epilogue:
 72         mov     x0, xzr
 73         add     sp, sp, #16
 74         eret
 75         sb
 76 
 77 el1_trap:
 78         get_vcpu_ptr    x1, x0
 79         mov     x0, #ARM_EXCEPTION_TRAP
 80         b       __guest_exit
 81 
 82 el1_irq:
 83 el1_fiq:
 84         get_vcpu_ptr    x1, x0
 85         mov     x0, #ARM_EXCEPTION_IRQ
 86         b       __guest_exit
 87 
 88 el1_error:
 89         get_vcpu_ptr    x1, x0
 90         mov     x0, #ARM_EXCEPTION_EL1_SERROR
 91         b       __guest_exit
 92 
 93 el2_sync:
 94         /* Check for illegal exception return */
 95         mrs     x0, spsr_el2
 96         tbnz    x0, #20, 1f
 97 
 98         save_caller_saved_regs_vect
 99         stp     x29, x30, [sp, #-16]!
100         bl      kvm_unexpected_el2_exception
101         ldp     x29, x30, [sp], #16
102         restore_caller_saved_regs_vect
103 
104         eret
105 
106 1:
107         /* Let's attempt a recovery from the illegal exception return */
108         get_vcpu_ptr    x1, x0
109         mov     x0, #ARM_EXCEPTION_IL
110         b       __guest_exit
111 
112 
113 el2_error:
114         save_caller_saved_regs_vect
115         stp     x29, x30, [sp, #-16]!
116 
117         bl      kvm_unexpected_el2_exception
118 
119         ldp     x29, x30, [sp], #16
120         restore_caller_saved_regs_vect
121 
122         eret
123         sb
124 
125 .macro invalid_vector   label, target = __guest_exit_panic
126         .align  2
127 SYM_CODE_START_LOCAL(\label)
128         b \target
129 SYM_CODE_END(\label)
130 .endm
131 
132         /* None of these should ever happen */
133         invalid_vector  el2t_sync_invalid
134         invalid_vector  el2t_irq_invalid
135         invalid_vector  el2t_fiq_invalid
136         invalid_vector  el2t_error_invalid
137         invalid_vector  el2h_irq_invalid
138         invalid_vector  el2h_fiq_invalid
139 
140         .ltorg
141 
142         .align 11
143 
144 .macro check_preamble_length start, end
145 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
146 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
147         .error "KVM vector preamble length mismatch"
148 .endif
149 .endm
150 
151 .macro valid_vect target
152         .align 7
153 661:
154         esb
155         stp     x0, x1, [sp, #-16]!
156 662:
157         /*
158          * spectre vectors __bp_harden_hyp_vecs generate br instructions at runtime
159          * that jump at offset 8 at __kvm_hyp_vector.
160          * As hyp .text is guarded section, it needs bti j.
161          */
162         bti j
163         b       \target
164 
165 check_preamble_length 661b, 662b
166 .endm
167 
168 .macro invalid_vect target
169         .align 7
170 661:
171         nop
172         stp     x0, x1, [sp, #-16]!
173 662:
174         /* Check valid_vect */
175         bti j
176         b       \target
177 
178 check_preamble_length 661b, 662b
179 .endm
180 
181 SYM_CODE_START(__kvm_hyp_vector)
182         invalid_vect    el2t_sync_invalid       // Synchronous EL2t
183         invalid_vect    el2t_irq_invalid        // IRQ EL2t
184         invalid_vect    el2t_fiq_invalid        // FIQ EL2t
185         invalid_vect    el2t_error_invalid      // Error EL2t
186 
187         valid_vect      el2_sync                // Synchronous EL2h
188         invalid_vect    el2h_irq_invalid        // IRQ EL2h
189         invalid_vect    el2h_fiq_invalid        // FIQ EL2h
190         valid_vect      el2_error               // Error EL2h
191 
192         valid_vect      el1_sync                // Synchronous 64-bit EL1
193         valid_vect      el1_irq                 // IRQ 64-bit EL1
194         valid_vect      el1_fiq                 // FIQ 64-bit EL1
195         valid_vect      el1_error               // Error 64-bit EL1
196 
197         valid_vect      el1_sync                // Synchronous 32-bit EL1
198         valid_vect      el1_irq                 // IRQ 32-bit EL1
199         valid_vect      el1_fiq                 // FIQ 32-bit EL1
200         valid_vect      el1_error               // Error 32-bit EL1
201 SYM_CODE_END(__kvm_hyp_vector)
202 
203 .macro spectrev2_smccc_wa1_smc
204         sub     sp, sp, #(8 * 4)
205         stp     x2, x3, [sp, #(8 * 0)]
206         stp     x0, x1, [sp, #(8 * 2)]
207         alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_wa3
208         /* Patched to mov WA3 when supported */
209         mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_1
210         alternative_cb_end
211         smc     #0
212         ldp     x2, x3, [sp, #(8 * 0)]
213         add     sp, sp, #(8 * 2)
214 .endm
215 
216 .macro hyp_ventry       indirect, spectrev2
217         .align  7
218 1:      esb
219         .if \spectrev2 != 0
220         spectrev2_smccc_wa1_smc
221         .else
222         stp     x0, x1, [sp, #-16]!
223         mitigate_spectre_bhb_loop       x0
224         mitigate_spectre_bhb_clear_insn
225         .endif
226         .if \indirect != 0
227         alternative_cb ARM64_ALWAYS_SYSTEM, kvm_patch_vector_branch
228         /*
229          * For ARM64_SPECTRE_V3A configurations, these NOPs get replaced with:
230          *
231          * movz x0, #(addr & 0xffff)
232          * movk x0, #((addr >> 16) & 0xffff), lsl #16
233          * movk x0, #((addr >> 32) & 0xffff), lsl #32
234          * br   x0
235          *
236          * Where:
237          * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
238          * See kvm_patch_vector_branch for details.
239          */
240         nop
241         nop
242         nop
243         nop
244         alternative_cb_end
245         .endif
246         b       __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
247 .endm
248 
249 .macro generate_vectors indirect, spectrev2
250 0:
251         .rept 16
252         hyp_ventry      \indirect, \spectrev2
253         .endr
254         .org 0b + SZ_2K         // Safety measure
255 .endm
256 
257         .align  11
258 SYM_CODE_START(__bp_harden_hyp_vecs)
259         generate_vectors indirect = 0, spectrev2 = 1 // HYP_VECTOR_SPECTRE_DIRECT
260         generate_vectors indirect = 1, spectrev2 = 0 // HYP_VECTOR_INDIRECT
261         generate_vectors indirect = 1, spectrev2 = 1 // HYP_VECTOR_SPECTRE_INDIRECT
262 1:      .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
263         .org 1b
264 SYM_CODE_END(__bp_harden_hyp_vecs)

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php