~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/book3s_segment.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/powerpc/kvm/book3s_segment.S (Architecture ppc) and /arch/m68k/kvm/book3s_segment.S (Architecture m68k)


  1 /* SPDX-License-Identifier: GPL-2.0-only */       
  2 /*                                                
  3  *                                                
  4  * Copyright SUSE Linux Products GmbH 2010        
  5  *                                                
  6  * Authors: Alexander Graf <agraf@suse.de>         
  7  */                                               
  8                                                   
  9 /* Real mode helpers */                           
 10                                                   
 11 #include <asm/asm-compat.h>                       
 12 #include <asm/feature-fixups.h>                   
 13                                                   
 14 #if defined(CONFIG_PPC_BOOK3S_64)                 
 15                                                   
 16 #define GET_SHADOW_VCPU(reg)    \                 
 17         mr      reg, r13                          
 18                                                   
 19 #elif defined(CONFIG_PPC_BOOK3S_32)               
 20                                                   
 21 #define GET_SHADOW_VCPU(reg)                      
 22         tophys(reg, r2);                          
 23         lwz     reg, (THREAD + THREAD_KVM_SVCP    
 24         tophys(reg, reg)                          
 25                                                   
 26 #endif                                            
 27                                                   
 28 /* Disable for nested KVM */                      
 29 #define USE_QUICK_LAST_INST                       
 30                                                   
 31                                                   
 32 /* Get helper functions for subarch specific f    
 33                                                   
 34 #if defined(CONFIG_PPC_BOOK3S_64)                 
 35 #include "book3s_64_slb.S"                        
 36 #elif defined(CONFIG_PPC_BOOK3S_32)               
 37 #include "book3s_32_sr.S"                         
 38 #endif                                            
 39                                                   
 40 /*********************************************    
 41  *                                                
 42  *                               Entry code       
 43  *                                                
 44  *********************************************    
 45                                                   
 46 .global kvmppc_handler_trampoline_enter           
 47 kvmppc_handler_trampoline_enter:                  
 48                                                   
 49         /* Required state:                        
 50          *                                        
 51          * MSR = ~IR|DR                           
 52          * R1 = host R1                           
 53          * R2 = host R2                           
 54          * R4 = guest shadow MSR                  
 55          * R5 = normal host MSR                   
 56          * R6 = current host MSR (EE, IR, DR o    
 57          * LR = highmem guest exit code           
 58          * all other volatile GPRS = free         
 59          * SVCPU[CR] = guest CR                   
 60          * SVCPU[XER] = guest XER                 
 61          * SVCPU[CTR] = guest CTR                 
 62          * SVCPU[LR] = guest LR                   
 63          */                                       
 64                                                   
 65         /* r3 = shadow vcpu */                    
 66         GET_SHADOW_VCPU(r3)                       
 67                                                   
 68         /* Save guest exit handler address and    
 69         mflr    r0                                
 70         PPC_STL r0, HSTATE_VMHANDLER(r3)          
 71         PPC_STL r5, HSTATE_HOST_MSR(r3)           
 72                                                   
 73         /* Save R1/R2 in the PACA (64-bit) or     
 74         PPC_STL r1, HSTATE_HOST_R1(r3)            
 75         PPC_STL r2, HSTATE_HOST_R2(r3)            
 76                                                   
 77         /* Activate guest mode, so faults get     
 78         li      r11, KVM_GUEST_MODE_GUEST         
 79         stb     r11, HSTATE_IN_GUEST(r3)          
 80                                                   
 81         /* Switch to guest segment. This is su    
 82         LOAD_GUEST_SEGMENTS                       
 83                                                   
 84 #ifdef CONFIG_PPC_BOOK3S_64                       
 85 BEGIN_FTR_SECTION                                 
 86         /* Save host FSCR */                      
 87         mfspr   r8, SPRN_FSCR                     
 88         std     r8, HSTATE_HOST_FSCR(r13)         
 89         /* Set FSCR during guest execution */     
 90         ld      r9, SVCPU_SHADOW_FSCR(r13)        
 91         mtspr   SPRN_FSCR, r9                     
 92 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)          
 93                                                   
 94         /* Some guests may need to have dcbz s    
 95          *                                        
 96          * Usually we ensure that by patching     
 97          * to trap on dcbz and emulate it in t    
 98          *                                        
 99          * If we can, we should tell the CPU t    
100          * because that's a lot faster.           
101          */                                       
102         lbz     r0, HSTATE_RESTORE_HID5(r3)       
103         cmpwi   r0, 0                             
104         beq     no_dcbz32_on                      
105                                                   
106         mfspr   r0,SPRN_HID5                      
107         ori     r0, r0, 0x80            /* XXX    
108         mtspr   SPRN_HID5,r0                      
109 no_dcbz32_on:                                     
110                                                   
111 #endif /* CONFIG_PPC_BOOK3S_64 */                 
112                                                   
113         /* Enter guest */                         
114                                                   
115         PPC_LL  r8, SVCPU_CTR(r3)                 
116         PPC_LL  r9, SVCPU_LR(r3)                  
117         lwz     r10, SVCPU_CR(r3)                 
118         PPC_LL  r11, SVCPU_XER(r3)                
119                                                   
120         mtctr   r8                                
121         mtlr    r9                                
122         mtcr    r10                               
123         mtxer   r11                               
124                                                   
125         /* Move SRR0 and SRR1 into the respect    
126         PPC_LL  r9, SVCPU_PC(r3)                  
127         /* First clear RI in our current MSR v    
128         li      r0, MSR_RI                        
129         andc    r6, r6, r0                        
130                                                   
131         PPC_LL  r0, SVCPU_R0(r3)                  
132         PPC_LL  r1, SVCPU_R1(r3)                  
133         PPC_LL  r2, SVCPU_R2(r3)                  
134         PPC_LL  r5, SVCPU_R5(r3)                  
135         PPC_LL  r7, SVCPU_R7(r3)                  
136         PPC_LL  r8, SVCPU_R8(r3)                  
137         PPC_LL  r10, SVCPU_R10(r3)                
138         PPC_LL  r11, SVCPU_R11(r3)                
139         PPC_LL  r12, SVCPU_R12(r3)                
140         PPC_LL  r13, SVCPU_R13(r3)                
141                                                   
142         MTMSR_EERI(r6)                            
143         mtsrr0  r9                                
144         mtsrr1  r4                                
145                                                   
146         PPC_LL  r4, SVCPU_R4(r3)                  
147         PPC_LL  r6, SVCPU_R6(r3)                  
148         PPC_LL  r9, SVCPU_R9(r3)                  
149         PPC_LL  r3, (SVCPU_R3)(r3)                
150                                                   
151         RFI_TO_GUEST                              
152 kvmppc_handler_trampoline_enter_end:              
153                                                   
154                                                   
155                                                   
156 /*********************************************    
157  *                                                
158  *                               Exit code        
159  *                                                
160  *********************************************    
161                                                   
162 .global kvmppc_interrupt_pr                       
163 kvmppc_interrupt_pr:                              
164         /* 64-bit entry. Register usage at thi    
165          *                                        
166          * SPRG_SCRATCH0   = guest R13            
167          * R9              = HSTATE_IN_GUEST      
168          * R12             = (guest CR << 32)     
169          * R13             = PACA                 
170          * HSTATE.SCRATCH0 = guest R12            
171          * HSTATE.SCRATCH2 = guest R9             
172          */                                       
173 #ifdef CONFIG_PPC64                               
174         /* Match 32-bit entry */                  
175         ld      r9,HSTATE_SCRATCH2(r13)           
176         rotldi  r12, r12, 32              /* F    
177         stw     r12, HSTATE_SCRATCH1(r13) /* C    
178         srdi    r12, r12, 32              /* s    
179 #endif                                            
180                                                   
181 .global kvmppc_handler_trampoline_exit            
182 kvmppc_handler_trampoline_exit:                   
183         /* Register usage at this point:          
184          *                                        
185          * SPRG_SCRATCH0   = guest R13            
186          * R12             = exit handler id      
187          * R13             = shadow vcpu (32-b    
188          * HSTATE.SCRATCH0 = guest R12            
189          * HSTATE.SCRATCH1 = guest CR             
190          */                                       
191                                                   
192         /* Save registers */                      
193                                                   
194         PPC_STL r0, SVCPU_R0(r13)                 
195         PPC_STL r1, SVCPU_R1(r13)                 
196         PPC_STL r2, SVCPU_R2(r13)                 
197         PPC_STL r3, SVCPU_R3(r13)                 
198         PPC_STL r4, SVCPU_R4(r13)                 
199         PPC_STL r5, SVCPU_R5(r13)                 
200         PPC_STL r6, SVCPU_R6(r13)                 
201         PPC_STL r7, SVCPU_R7(r13)                 
202         PPC_STL r8, SVCPU_R8(r13)                 
203         PPC_STL r9, SVCPU_R9(r13)                 
204         PPC_STL r10, SVCPU_R10(r13)               
205         PPC_STL r11, SVCPU_R11(r13)               
206                                                   
207         /* Restore R1/R2 so we can handle faul    
208         PPC_LL  r1, HSTATE_HOST_R1(r13)           
209         PPC_LL  r2, HSTATE_HOST_R2(r13)           
210                                                   
211         /* Save guest PC and MSR */               
212 #ifdef CONFIG_PPC64                               
213 BEGIN_FTR_SECTION                                 
214         andi.   r0, r12, 0x2                      
215         cmpwi   cr1, r0, 0                        
216         beq     1f                                
217         mfspr   r3,SPRN_HSRR0                     
218         mfspr   r4,SPRN_HSRR1                     
219         andi.   r12,r12,0x3ffd                    
220         b       2f                                
221 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)             
222 #endif                                            
223 1:      mfsrr0  r3                                
224         mfsrr1  r4                                
225 2:                                                
226         PPC_STL r3, SVCPU_PC(r13)                 
227         PPC_STL r4, SVCPU_SHADOW_SRR1(r13)        
228                                                   
229         /* Get scratch'ed off registers */        
230         GET_SCRATCH0(r9)                          
231         PPC_LL  r8, HSTATE_SCRATCH0(r13)          
232         lwz     r7, HSTATE_SCRATCH1(r13)          
233                                                   
234         PPC_STL r9, SVCPU_R13(r13)                
235         PPC_STL r8, SVCPU_R12(r13)                
236         stw     r7, SVCPU_CR(r13)                 
237                                                   
238         /* Save more register state  */           
239                                                   
240         mfxer   r5                                
241         mfdar   r6                                
242         mfdsisr r7                                
243         mfctr   r8                                
244         mflr    r9                                
245                                                   
246         PPC_STL r5, SVCPU_XER(r13)                
247         PPC_STL r6, SVCPU_FAULT_DAR(r13)          
248         stw     r7, SVCPU_FAULT_DSISR(r13)        
249         PPC_STL r8, SVCPU_CTR(r13)                
250         PPC_STL r9, SVCPU_LR(r13)                 
251                                                   
252         /*                                        
253          * In order for us to easily get the l    
254          * we got the #vmexit at, we exploit t    
255          * virtual layout is still the same he    
256          * ld from the guest's PC address         
257          */                                       
258                                                   
259         /* We only load the last instruction w    
260         cmpwi   r12, BOOK3S_INTERRUPT_DATA_STO    
261         beq     ld_last_inst                      
262         cmpwi   r12, BOOK3S_INTERRUPT_PROGRAM     
263         beq     ld_last_inst                      
264         cmpwi   r12, BOOK3S_INTERRUPT_SYSCALL     
265         beq     ld_last_prev_inst                 
266         cmpwi   r12, BOOK3S_INTERRUPT_ALIGNMEN    
267         beq-    ld_last_inst                      
268 #ifdef CONFIG_PPC64                               
269 BEGIN_FTR_SECTION                                 
270         cmpwi   r12, BOOK3S_INTERRUPT_H_EMUL_A    
271         beq-    ld_last_inst                      
272 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)             
273 BEGIN_FTR_SECTION                                 
274         cmpwi   r12, BOOK3S_INTERRUPT_FAC_UNAV    
275         beq-    ld_last_inst                      
276 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)          
277 #endif                                            
278                                                   
279         b       no_ld_last_inst                   
280                                                   
281 ld_last_prev_inst:                                
282         addi    r3, r3, -4                        
283                                                   
284 ld_last_inst:                                     
285         /* Save off the guest instruction we'r    
286                                                   
287         /* In case lwz faults */                  
288         li      r0, KVM_INST_FETCH_FAILED         
289                                                   
290 #ifdef USE_QUICK_LAST_INST                        
291                                                   
292         /* Set guest mode to 'jump over instru    
293          * we'll just continue at the next IP.    
294         li      r9, KVM_GUEST_MODE_SKIP           
295         stb     r9, HSTATE_IN_GUEST(r13)          
296                                                   
297         /*    1) enable paging for data */        
298         mfmsr   r9                                
299         ori     r11, r9, MSR_DR                   
300         mtmsr   r11                               
301         sync                                      
302         /*    2) fetch the instruction */         
303         lwz     r0, 0(r3)                         
304         /*    3) disable paging again */          
305         mtmsr   r9                                
306         sync                                      
307                                                   
308 #endif                                            
309         stw     r0, SVCPU_LAST_INST(r13)          
310                                                   
311 no_ld_last_inst:                                  
312                                                   
313         /* Unset guest mode */                    
314         li      r9, KVM_GUEST_MODE_NONE           
315         stb     r9, HSTATE_IN_GUEST(r13)          
316                                                   
317         /* Switch back to host MMU */             
318         LOAD_HOST_SEGMENTS                        
319                                                   
320 #ifdef CONFIG_PPC_BOOK3S_64                       
321                                                   
322         lbz     r5, HSTATE_RESTORE_HID5(r13)      
323         cmpwi   r5, 0                             
324         beq     no_dcbz32_off                     
325                                                   
326         li      r4, 0                             
327         mfspr   r5,SPRN_HID5                      
328         rldimi  r5,r4,6,56                        
329         mtspr   SPRN_HID5,r5                      
330                                                   
331 no_dcbz32_off:                                    
332                                                   
333 BEGIN_FTR_SECTION                                 
334         /* Save guest FSCR on a FAC_UNAVAIL in    
335         cmpwi   r12, BOOK3S_INTERRUPT_FAC_UNAV    
336         bne+    no_fscr_save                      
337         mfspr   r7, SPRN_FSCR                     
338         std     r7, SVCPU_SHADOW_FSCR(r13)        
339 no_fscr_save:                                     
340         /* Restore host FSCR */                   
341         ld      r8, HSTATE_HOST_FSCR(r13)         
342         mtspr   SPRN_FSCR, r8                     
343 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)          
344                                                   
345 #endif /* CONFIG_PPC_BOOK3S_64 */                 
346                                                   
347         /*                                        
348          * For some interrupts, we need to cal    
349          * handler, so it can do work for us.     
350          * as if the interrupt arrived from th    
351          * so let's fake it here where most st    
352          *                                        
353          * Having set up SRR0/1 with the addre    
354          * to continue with relocation on (pot    
355          * space), we either just go straight     
356          * or we jump to an interrupt handler     
357          * interrupt to be handled first.  In     
358          * the rfi[d] at the end of the interr    
359          * get us back to where we want to con    
360          */                                       
361                                                   
362         /* Register usage at this point:          
363          *                                        
364          * R1       = host R1                     
365          * R2       = host R2                     
366          * R10      = raw exit handler id         
367          * R12      = exit handler id             
368          * R13      = shadow vcpu (32-bit) or     
369          * SVCPU.*  = guest *                     
370          *                                        
371          */                                       
372                                                   
373         PPC_LL  r6, HSTATE_HOST_MSR(r13)          
374 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM               
375         /*                                        
376          * We don't want to change MSR[TS] bit    
377          * The actual TM handling logic will b    
378          * recovered DR/IR bits after HSTATE_V    
379          * And MSR_TM can be enabled in HOST_M    
380          * not suppress this change and can le    
381          * Manually set MSR to prevent TS stat    
382          */                                       
383         mfmsr   r7                                
384         rldicl  r7, r7, 64 - MSR_TS_S_LG, 62      
385         rldimi  r6, r7, MSR_TS_S_LG, 63 - MSR_    
386 #endif                                            
387         PPC_LL  r8, HSTATE_VMHANDLER(r13)         
388                                                   
389 #ifdef CONFIG_PPC64                               
390 BEGIN_FTR_SECTION                                 
391         beq     cr1, 1f                           
392         mtspr   SPRN_HSRR1, r6                    
393         mtspr   SPRN_HSRR0, r8                    
394 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)             
395 #endif                                            
396 1:      /* Restore host msr -> SRR1 */            
397         mtsrr1  r6                                
398         /* Load highmem handler address */        
399         mtsrr0  r8                                
400                                                   
401         /* RFI into the highmem handler, or ju    
402         cmpwi   r12, BOOK3S_INTERRUPT_EXTERNAL    
403         beqa    BOOK3S_INTERRUPT_EXTERNAL         
404         cmpwi   r12, BOOK3S_INTERRUPT_DECREMEN    
405         beqa    BOOK3S_INTERRUPT_DECREMENTER      
406         cmpwi   r12, BOOK3S_INTERRUPT_PERFMON     
407         beqa    BOOK3S_INTERRUPT_PERFMON          
408         cmpwi   r12, BOOK3S_INTERRUPT_DOORBELL    
409         beqa    BOOK3S_INTERRUPT_DOORBELL         
410                                                   
411         RFI_TO_KERNEL                             
412 kvmppc_handler_trampoline_exit_end:               
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php