~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/svm/vmenter.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/x86/kvm/svm/vmenter.S (Architecture alpha) and /arch/m68k/kvm/svm/vmenter.S (Architecture m68k)


  1 /* SPDX-License-Identifier: GPL-2.0 */            
  2 #include <linux/linkage.h>                        
  3 #include <asm/asm.h>                              
  4 #include <asm/asm-offsets.h>                      
  5 #include <asm/bitsperlong.h>                      
  6 #include <asm/frame.h>                            
  7 #include <asm/kvm_vcpu_regs.h>                    
  8 #include <asm/nospec-branch.h>                    
  9 #include "kvm-asm-offsets.h"                      
 10                                                   
 11 #define WORD_SIZE (BITS_PER_LONG / 8)             
 12                                                   
 13 /* Intentionally omit RAX as it's context swit    
 14 #define VCPU_RCX        (SVM_vcpu_arch_regs +     
 15 #define VCPU_RDX        (SVM_vcpu_arch_regs +     
 16 #define VCPU_RBX        (SVM_vcpu_arch_regs +     
 17 /* Intentionally omit RSP as it's context swit    
 18 #define VCPU_RBP        (SVM_vcpu_arch_regs +     
 19 #define VCPU_RSI        (SVM_vcpu_arch_regs +     
 20 #define VCPU_RDI        (SVM_vcpu_arch_regs +     
 21                                                   
 22 #ifdef CONFIG_X86_64                              
 23 #define VCPU_R8         (SVM_vcpu_arch_regs +     
 24 #define VCPU_R9         (SVM_vcpu_arch_regs +     
 25 #define VCPU_R10        (SVM_vcpu_arch_regs +     
 26 #define VCPU_R11        (SVM_vcpu_arch_regs +     
 27 #define VCPU_R12        (SVM_vcpu_arch_regs +     
 28 #define VCPU_R13        (SVM_vcpu_arch_regs +     
 29 #define VCPU_R14        (SVM_vcpu_arch_regs +     
 30 #define VCPU_R15        (SVM_vcpu_arch_regs +     
 31 #endif                                            
 32                                                   
 33 #define SVM_vmcb01_pa   (SVM_vmcb01 + KVM_VMCB    
 34                                                   
 35 .section .noinstr.text, "ax"                      
 36                                                   
 37 .macro RESTORE_GUEST_SPEC_CTRL                    
 38         /* No need to do anything if SPEC_CTRL    
 39         ALTERNATIVE_2 "", \                       
 40                 "jmp 800f", X86_FEATURE_MSR_SP    
 41                 "", X86_FEATURE_V_SPEC_CTRL       
 42 801:                                              
 43 .endm                                             
 44 .macro RESTORE_GUEST_SPEC_CTRL_BODY               
 45 800:                                              
 46         /*                                        
 47          * SPEC_CTRL handling: if the guest's     
 48          * host's, write the MSR.  This is kep    
 49          * case does not have to jump.            
 50          *                                        
 51          * IMPORTANT: To avoid RSB underflow a    
 52          * there must not be any returns or in    
 53          * and vmentry.                           
 54          */                                       
 55         movl SVM_spec_ctrl(%_ASM_DI), %eax        
 56         cmp PER_CPU_VAR(x86_spec_ctrl_current)    
 57         je 801b                                   
 58         mov $MSR_IA32_SPEC_CTRL, %ecx             
 59         xor %edx, %edx                            
 60         wrmsr                                     
 61         jmp 801b                                  
 62 .endm                                             
 63                                                   
 64 .macro RESTORE_HOST_SPEC_CTRL                     
 65         /* No need to do anything if SPEC_CTRL    
 66         ALTERNATIVE_2 "", \                       
 67                 "jmp 900f", X86_FEATURE_MSR_SP    
 68                 "", X86_FEATURE_V_SPEC_CTRL       
 69 901:                                              
 70 .endm                                             
 71 .macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_i    
 72 900:                                              
 73         /* Same for after vmexit.  */             
 74         mov $MSR_IA32_SPEC_CTRL, %ecx             
 75                                                   
 76         /*                                        
 77          * Load the value that the guest had w    
 78          * if it was not intercepted during gu    
 79          */                                       
 80         cmpb $0, \spec_ctrl_intercepted           
 81         jnz 998f                                  
 82         rdmsr                                     
 83         movl %eax, SVM_spec_ctrl(%_ASM_DI)        
 84 998:                                              
 85                                                   
 86         /* Now restore the host value of the M    
 87         movl PER_CPU_VAR(x86_spec_ctrl_current    
 88         cmp SVM_spec_ctrl(%_ASM_DI), %eax         
 89         je 901b                                   
 90         xor %edx, %edx                            
 91         wrmsr                                     
 92         jmp 901b                                  
 93 .endm                                             
 94                                                   
 95                                                   
 96 /**                                               
 97  * __svm_vcpu_run - Run a vCPU via a transitio    
 98  * @svm:        struct vcpu_svm *                 
 99  * @spec_ctrl_intercepted: bool                   
100  */                                               
101 SYM_FUNC_START(__svm_vcpu_run)                    
102         push %_ASM_BP                             
103         mov  %_ASM_SP, %_ASM_BP                   
104 #ifdef CONFIG_X86_64                              
105         push %r15                                 
106         push %r14                                 
107         push %r13                                 
108         push %r12                                 
109 #else                                             
110         push %edi                                 
111         push %esi                                 
112 #endif                                            
113         push %_ASM_BX                             
114                                                   
115         /*                                        
116          * Save variables needed after vmexit     
117          * order compared to when they are nee    
118          */                                       
119                                                   
120         /* Accessed directly from the stack in    
121         push %_ASM_ARG2                           
122                                                   
123         /* Needed to restore access to percpu     
124         __ASM_SIZE(push) PER_CPU_VAR(svm_data     
125                                                   
126         /* Finally save @svm. */                  
127         push %_ASM_ARG1                           
128                                                   
129 .ifnc _ASM_ARG1, _ASM_DI                          
130         /*                                        
131          * Stash @svm in RDI early. On 32-bit,    
132          * and RDX which are clobbered by REST    
133          */                                       
134         mov %_ASM_ARG1, %_ASM_DI                  
135 .endif                                            
136                                                   
137         /* Clobbers RAX, RCX, RDX.  */            
138         RESTORE_GUEST_SPEC_CTRL                   
139                                                   
140         /*                                        
141          * Use a single vmcb (vmcb01 because i    
142          * context switching guest state via V    
143          * the state doesn't need to be copied    
144          * vmcb02 when switching vmcbs for nes    
145          */                                       
146         mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX     
147 1:      vmload %_ASM_AX                           
148 2:                                                
149                                                   
150         /* Get svm->current_vmcb->pa into RAX.    
151         mov SVM_current_vmcb(%_ASM_DI), %_ASM_    
152         mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX       
153                                                   
154         /* Load guest registers. */               
155         mov VCPU_RCX(%_ASM_DI), %_ASM_CX          
156         mov VCPU_RDX(%_ASM_DI), %_ASM_DX          
157         mov VCPU_RBX(%_ASM_DI), %_ASM_BX          
158         mov VCPU_RBP(%_ASM_DI), %_ASM_BP          
159         mov VCPU_RSI(%_ASM_DI), %_ASM_SI          
160 #ifdef CONFIG_X86_64                              
161         mov VCPU_R8 (%_ASM_DI),  %r8              
162         mov VCPU_R9 (%_ASM_DI),  %r9              
163         mov VCPU_R10(%_ASM_DI), %r10              
164         mov VCPU_R11(%_ASM_DI), %r11              
165         mov VCPU_R12(%_ASM_DI), %r12              
166         mov VCPU_R13(%_ASM_DI), %r13              
167         mov VCPU_R14(%_ASM_DI), %r14              
168         mov VCPU_R15(%_ASM_DI), %r15              
169 #endif                                            
170         mov VCPU_RDI(%_ASM_DI), %_ASM_DI          
171                                                   
172         /* Enter guest mode */                    
173         sti                                       
174                                                   
175 3:      vmrun %_ASM_AX                            
176 4:                                                
177         cli                                       
178                                                   
179         /* Pop @svm to RAX while it's the only    
180         pop %_ASM_AX                              
181                                                   
182         /* Save all guest registers.  */          
183         mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)        
184         mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)        
185         mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)        
186         mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)        
187         mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)        
188         mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)        
189 #ifdef CONFIG_X86_64                              
190         mov %r8,  VCPU_R8 (%_ASM_AX)              
191         mov %r9,  VCPU_R9 (%_ASM_AX)              
192         mov %r10, VCPU_R10(%_ASM_AX)              
193         mov %r11, VCPU_R11(%_ASM_AX)              
194         mov %r12, VCPU_R12(%_ASM_AX)              
195         mov %r13, VCPU_R13(%_ASM_AX)              
196         mov %r14, VCPU_R14(%_ASM_AX)              
197         mov %r15, VCPU_R15(%_ASM_AX)              
198 #endif                                            
199                                                   
200         /* @svm can stay in RDI from now on.      
201         mov %_ASM_AX, %_ASM_DI                    
202                                                   
203         mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX     
204 5:      vmsave %_ASM_AX                           
205 6:                                                
206                                                   
207         /* Restores GSBASE among other things,    
208         pop %_ASM_AX                              
209 7:      vmload %_ASM_AX                           
210 8:                                                
211                                                   
212         /* IMPORTANT: Stuff the RSB immediatel    
213         FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR    
214                                                   
215         /* Clobbers RAX, RCX, RDX.  */            
216         RESTORE_HOST_SPEC_CTRL                    
217                                                   
218         /*                                        
219          * Mitigate RETBleed for AMD/Hygon Zen    
220          * untrained as soon as we exit the VM    
221          * kernel. This should be done before     
222          * because interrupt handlers won't sa    
223          * from the kernel.                       
224          */                                       
225         UNTRAIN_RET_VM                            
226                                                   
227         /*                                        
228          * Clear all general purpose registers    
229          * speculative use of the guest's valu    
230          * via the stack.  In theory, an L1 ca    
231          * could lead to speculative execution    
232          * Zeroing XORs are dirt cheap, i.e. t    
233          * free.  RSP and RAX are exempt as th    
234          * during VM-Exit.                        
235          */                                       
236         xor %ecx, %ecx                            
237         xor %edx, %edx                            
238         xor %ebx, %ebx                            
239         xor %ebp, %ebp                            
240         xor %esi, %esi                            
241         xor %edi, %edi                            
242 #ifdef CONFIG_X86_64                              
243         xor %r8d,  %r8d                           
244         xor %r9d,  %r9d                           
245         xor %r10d, %r10d                          
246         xor %r11d, %r11d                          
247         xor %r12d, %r12d                          
248         xor %r13d, %r13d                          
249         xor %r14d, %r14d                          
250         xor %r15d, %r15d                          
251 #endif                                            
252                                                   
253         /* "Pop" @spec_ctrl_intercepted.  */      
254         pop %_ASM_BX                              
255                                                   
256         pop %_ASM_BX                              
257                                                   
258 #ifdef CONFIG_X86_64                              
259         pop %r12                                  
260         pop %r13                                  
261         pop %r14                                  
262         pop %r15                                  
263 #else                                             
264         pop %esi                                  
265         pop %edi                                  
266 #endif                                            
267         pop %_ASM_BP                              
268         RET                                       
269                                                   
270         RESTORE_GUEST_SPEC_CTRL_BODY              
271         RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)    
272                                                   
273 10:     cmpb $0, _ASM_RIP(kvm_rebooting)          
274         jne 2b                                    
275         ud2                                       
276 30:     cmpb $0, _ASM_RIP(kvm_rebooting)          
277         jne 4b                                    
278         ud2                                       
279 50:     cmpb $0, _ASM_RIP(kvm_rebooting)          
280         jne 6b                                    
281         ud2                                       
282 70:     cmpb $0, _ASM_RIP(kvm_rebooting)          
283         jne 8b                                    
284         ud2                                       
285                                                   
286         _ASM_EXTABLE(1b, 10b)                     
287         _ASM_EXTABLE(3b, 30b)                     
288         _ASM_EXTABLE(5b, 50b)                     
289         _ASM_EXTABLE(7b, 70b)                     
290                                                   
291 SYM_FUNC_END(__svm_vcpu_run)                      
292                                                   
293 #ifdef CONFIG_KVM_AMD_SEV                         
294                                                   
295                                                   
296 #ifdef CONFIG_X86_64                              
297 #define SEV_ES_GPRS_BASE 0x300                    
298 #define SEV_ES_RBX      (SEV_ES_GPRS_BASE + __    
299 #define SEV_ES_RBP      (SEV_ES_GPRS_BASE + __    
300 #define SEV_ES_RSI      (SEV_ES_GPRS_BASE + __    
301 #define SEV_ES_RDI      (SEV_ES_GPRS_BASE + __    
302 #define SEV_ES_R12      (SEV_ES_GPRS_BASE + __    
303 #define SEV_ES_R13      (SEV_ES_GPRS_BASE + __    
304 #define SEV_ES_R14      (SEV_ES_GPRS_BASE + __    
305 #define SEV_ES_R15      (SEV_ES_GPRS_BASE + __    
306 #endif                                            
307                                                   
308 /**                                               
309  * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU v    
310  * @svm:        struct vcpu_svm *                 
311  * @spec_ctrl_intercepted: bool                   
312  */                                               
313 SYM_FUNC_START(__svm_sev_es_vcpu_run)             
314         FRAME_BEGIN                               
315                                                   
316         /*                                        
317          * Save non-volatile (callee-saved) re    
318          * Except for RAX and RSP, all GPRs ar    
319          * saved on VMRUN.                        
320          */                                       
321         mov %rbp, SEV_ES_RBP (%rdx)               
322         mov %r15, SEV_ES_R15 (%rdx)               
323         mov %r14, SEV_ES_R14 (%rdx)               
324         mov %r13, SEV_ES_R13 (%rdx)               
325         mov %r12, SEV_ES_R12 (%rdx)               
326         mov %rbx, SEV_ES_RBX (%rdx)               
327                                                   
328         /*                                        
329          * Save volatile registers that hold a    
330          * #VMEXIT (RDI=@svm and RSI=@spec_ctr    
331          */                                       
332         mov %rdi, SEV_ES_RDI (%rdx)               
333         mov %rsi, SEV_ES_RSI (%rdx)               
334                                                   
335         /* Clobbers RAX, RCX, RDX (@hostsa). *    
336         RESTORE_GUEST_SPEC_CTRL                   
337                                                   
338         /* Get svm->current_vmcb->pa into RAX.    
339         mov SVM_current_vmcb(%rdi), %rax          
340         mov KVM_VMCB_pa(%rax), %rax               
341                                                   
342         /* Enter guest mode */                    
343         sti                                       
344                                                   
345 1:      vmrun %rax                                
346                                                   
347 2:      cli                                       
348                                                   
349         /* IMPORTANT: Stuff the RSB immediatel    
350         FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOO    
351                                                   
352         /* Clobbers RAX, RCX, RDX, consumes RD    
353         RESTORE_HOST_SPEC_CTRL                    
354                                                   
355         /*                                        
356          * Mitigate RETBleed for AMD/Hygon Zen    
357          * untrained as soon as we exit the VM    
358          * kernel. This should be done before     
359          * because interrupt handlers won't sa    
360          * from the kernel.                       
361          */                                       
362         UNTRAIN_RET_VM                            
363                                                   
364         FRAME_END                                 
365         RET                                       
366                                                   
367         RESTORE_GUEST_SPEC_CTRL_BODY              
368         RESTORE_HOST_SPEC_CTRL_BODY %sil          
369                                                   
370 3:      cmpb $0, kvm_rebooting(%rip)              
371         jne 2b                                    
372         ud2                                       
373                                                   
374         _ASM_EXTABLE(1b, 3b)                      
375                                                   
376 SYM_FUNC_END(__svm_sev_es_vcpu_run)               
377 #endif /* CONFIG_KVM_AMD_SEV */                   
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php