~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/bookehv_interrupts.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/powerpc/kvm/bookehv_interrupts.S (Architecture m68k) and /arch/i386/kvm/bookehv_interrupts.S (Architecture i386)


  1 /* SPDX-License-Identifier: GPL-2.0-only */       
  2 /*                                                
  3  *                                                
  4  * Copyright (C) 2010-2011 Freescale Semicondu    
  5  *                                                
  6  * Author: Varun Sethi <varun.sethi@freescale.c    
  7  * Author: Scott Wood <scotwood@freescale.com>     
  8  * Author: Mihai Caraman <mihai.caraman@freesca    
  9  *                                                
 10  * This file is derived from arch/powerpc/kvm/    
 11  */                                               
 12                                                   
 13 #include <asm/ppc_asm.h>                          
 14 #include <asm/kvm_asm.h>                          
 15 #include <asm/reg.h>                              
 16 #include <asm/page.h>                             
 17 #include <asm/asm-compat.h>                       
 18 #include <asm/asm-offsets.h>                      
 19 #include <asm/bitsperlong.h>                      
 20                                                   
 21 #ifdef CONFIG_64BIT                               
 22 #include <asm/exception-64e.h>                    
 23 #include <asm/hw_irq.h>                           
 24 #include <asm/irqflags.h>                         
 25 #else                                             
 26 #include "../kernel/head_booke.h" /* for THREA    
 27 #endif                                            
 28                                                   
 29 #define LONGBYTES               (BITS_PER_LONG    
 30                                                   
 31 #define VCPU_GUEST_SPRG(n)      (VCPU_GUEST_SP    
 32                                                   
 33 /* The host stack layout: */                      
 34 #define HOST_R1         0 /* Implied by stwu.     
 35 #define HOST_CALLEE_LR  PPC_LR_STKOFF             
 36 #define HOST_RUN        (HOST_CALLEE_LR + LONG    
 37 /*                                                
 38  * r2 is special: it holds 'current', and it m    
 39  * kernel with the -ffixed-r2 gcc option.         
 40  */                                               
 41 #define HOST_R2         (HOST_RUN + LONGBYTES)    
 42 #define HOST_CR         (HOST_R2 + LONGBYTES)     
 43 #define HOST_NV_GPRS    (HOST_CR + LONGBYTES)     
 44 #define __HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n     
 45 #define HOST_NV_GPR(n)  __HOST_NV_GPR(__REG_##    
 46 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31)     
 47 #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE     
 48 /* LR in caller stack frame. */                   
 49 #define HOST_STACK_LR   (HOST_STACK_SIZE + PPC    
 50                                                   
 51 #define NEED_EMU                0x00000001 /*     
 52 #define NEED_DEAR               0x00000002 /*     
 53 #define NEED_ESR                0x00000004 /*     
 54                                                   
 55 /*                                                
 56  * On entry:                                      
 57  * r4 = vcpu, r5 = srr0, r6 = srr1                
 58  * saved in vcpu: cr, ctr, r3-r13                 
 59  */                                               
 60 .macro kvm_handler_common intno, srr0, flags      
 61         /* Restore host stack pointer */          
 62         PPC_STL r1, VCPU_GPR(R1)(r4)              
 63         PPC_STL r2, VCPU_GPR(R2)(r4)              
 64         PPC_LL  r1, VCPU_HOST_STACK(r4)           
 65         PPC_LL  r2, HOST_R2(r1)                   
 66                                                   
 67 START_BTB_FLUSH_SECTION                           
 68         BTB_FLUSH(r10)                            
 69 END_BTB_FLUSH_SECTION                             
 70                                                   
 71         mfspr   r10, SPRN_PID                     
 72         lwz     r8, VCPU_HOST_PID(r4)             
 73         PPC_LL  r11, VCPU_SHARED(r4)              
 74         PPC_STL r14, VCPU_GPR(R14)(r4) /* We n    
 75         li      r14, \intno                       
 76                                                   
 77         stw     r10, VCPU_GUEST_PID(r4)           
 78         mtspr   SPRN_PID, r8                      
 79                                                   
 80 #ifdef CONFIG_KVM_EXIT_TIMING                     
 81         /* save exit time */                      
 82 1:      mfspr   r7, SPRN_TBRU                     
 83         mfspr   r8, SPRN_TBRL                     
 84         mfspr   r9, SPRN_TBRU                     
 85         cmpw    r9, r7                            
 86         stw     r8, VCPU_TIMING_EXIT_TBL(r4)      
 87         bne-    1b                                
 88         stw     r9, VCPU_TIMING_EXIT_TBU(r4)      
 89 #endif                                            
 90                                                   
 91         oris    r8, r6, MSR_CE@h                  
 92         PPC_STD(r6, VCPU_SHARED_MSR, r11)         
 93         ori     r8, r8, MSR_ME | MSR_RI           
 94         PPC_STL r5, VCPU_PC(r4)                   
 95                                                   
 96         /*                                        
 97          * Make sure CE/ME/RI are set (if appr    
 98          * whether or not the guest had it set    
 99          * somewhat expensive, skip in the com    
100          * had all these bits set (and thus th    
101          * appropriate for the exception type)    
102          */                                       
103         cmpw    r6, r8                            
104         beq     1f                                
105         mfmsr   r7                                
106         .if     \srr0 != SPRN_MCSRR0 && \srr0     
107         oris    r7, r7, MSR_CE@h                  
108         .endif                                    
109         .if     \srr0 != SPRN_MCSRR0              
110         ori     r7, r7, MSR_ME | MSR_RI           
111         .endif                                    
112         mtmsr   r7                                
113 1:                                                
114                                                   
115         .if     \flags & NEED_EMU                 
116         PPC_STL r15, VCPU_GPR(R15)(r4)            
117         PPC_STL r16, VCPU_GPR(R16)(r4)            
118         PPC_STL r17, VCPU_GPR(R17)(r4)            
119         PPC_STL r18, VCPU_GPR(R18)(r4)            
120         PPC_STL r19, VCPU_GPR(R19)(r4)            
121         PPC_STL r20, VCPU_GPR(R20)(r4)            
122         PPC_STL r21, VCPU_GPR(R21)(r4)            
123         PPC_STL r22, VCPU_GPR(R22)(r4)            
124         PPC_STL r23, VCPU_GPR(R23)(r4)            
125         PPC_STL r24, VCPU_GPR(R24)(r4)            
126         PPC_STL r25, VCPU_GPR(R25)(r4)            
127         PPC_STL r26, VCPU_GPR(R26)(r4)            
128         PPC_STL r27, VCPU_GPR(R27)(r4)            
129         PPC_STL r28, VCPU_GPR(R28)(r4)            
130         PPC_STL r29, VCPU_GPR(R29)(r4)            
131         PPC_STL r30, VCPU_GPR(R30)(r4)            
132         PPC_STL r31, VCPU_GPR(R31)(r4)            
133                                                   
134         /*                                        
135          * We don't use external PID support.     
136          * handled by KVM and this implies adi    
137          * DTB_MISS, DSI and LRAT) to check ES    
138          * is too intrusive for the host. Get     
139          * kvmppc_get_last_inst().                
140          */                                       
141         li      r9, KVM_INST_FETCH_FAILED         
142         PPC_STL r9, VCPU_LAST_INST(r4)            
143         .endif                                    
144                                                   
145         .if     \flags & NEED_ESR                 
146         mfspr   r8, SPRN_ESR                      
147         PPC_STL r8, VCPU_FAULT_ESR(r4)            
148         .endif                                    
149                                                   
150         .if     \flags & NEED_DEAR                
151         mfspr   r9, SPRN_DEAR                     
152         PPC_STL r9, VCPU_FAULT_DEAR(r4)           
153         .endif                                    
154                                                   
155         b       kvmppc_resume_host                
156 .endm                                             
157                                                   
158 #ifdef CONFIG_64BIT                               
159 /* Exception types */                             
160 #define EX_GEN                  1                 
161 #define EX_GDBELL               2                 
162 #define EX_DBG                  3                 
163 #define EX_MC                   4                 
164 #define EX_CRIT                 5                 
165 #define EX_TLB                  6                 
166                                                   
167 /*                                                
168  * For input register values, see arch/powerpc    
169  */                                               
170 .macro kvm_handler intno type scratch, paca_ex    
171  _GLOBAL(kvmppc_handler_\intno\()_\srr1)          
172         mr      r11, r4                           
173         /*                                        
174          * Get vcpu from Paca: paca->__current    
175          */                                       
176         PPC_LL  r4, PACACURRENT(r13)              
177         PPC_LL  r4, (THREAD + THREAD_KVM_VCPU)    
178         PPC_STL r10, VCPU_CR(r4)                  
179         PPC_STL r11, VCPU_GPR(R4)(r4)             
180         PPC_STL r5, VCPU_GPR(R5)(r4)              
181         PPC_STL r6, VCPU_GPR(R6)(r4)              
182         PPC_STL r8, VCPU_GPR(R8)(r4)              
183         PPC_STL r9, VCPU_GPR(R9)(r4)              
184         .if \type == EX_TLB                       
185         PPC_LL  r5, EX_TLB_R13(r12)               
186         PPC_LL  r6, EX_TLB_R10(r12)               
187         PPC_LL  r8, EX_TLB_R11(r12)               
188         mfspr   r12, \scratch                     
189         .else                                     
190         mfspr   r5, \scratch                      
191         PPC_LL  r6, (\paca_ex + \ex_r10)(r13)     
192         PPC_LL  r8, (\paca_ex + \ex_r11)(r13)     
193         .endif                                    
194         PPC_STL r5, VCPU_GPR(R13)(r4)             
195         PPC_STL r3, VCPU_GPR(R3)(r4)              
196         PPC_STL r7, VCPU_GPR(R7)(r4)              
197         PPC_STL r12, VCPU_GPR(R12)(r4)            
198         PPC_STL r6, VCPU_GPR(R10)(r4)             
199         PPC_STL r8, VCPU_GPR(R11)(r4)             
200         mfctr   r5                                
201         PPC_STL r5, VCPU_CTR(r4)                  
202         mfspr   r5, \srr0                         
203         mfspr   r6, \srr1                         
204         kvm_handler_common \intno, \srr0, \fla    
205 .endm                                             
206                                                   
207 #define EX_PARAMS(type) \                         
208         EX_##type, \                              
209         SPRN_SPRG_##type##_SCRATCH, \             
210         PACA_EX##type, \                          
211         EX_R10, \                                 
212         EX_R11                                    
213                                                   
214 #define EX_PARAMS_TLB \                           
215         EX_TLB, \                                 
216         SPRN_SPRG_GEN_SCRATCH, \                  
217         PACA_EXTLB, \                             
218         EX_TLB_R10, \                             
219         EX_TLB_R11                                
220                                                   
221 kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAM    
222         SPRN_CSRR0, SPRN_CSRR1, 0                 
223 kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_    
224         SPRN_MCSRR0, SPRN_MCSRR1, 0               
225 kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_P    
226         SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_    
227 kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_P    
228         SPRN_SRR0, SPRN_SRR1, NEED_ESR            
229 kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAM    
230         SPRN_SRR0, SPRN_SRR1, 0                   
231 kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARA    
232         SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED    
233 kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS    
234         SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED    
235 kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PAR    
236         SPRN_SRR0, SPRN_SRR1, 0                   
237 kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PAR    
238         SPRN_SRR0, SPRN_SRR1, 0                   
239 kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PA    
240         SPRN_SRR0, SPRN_SRR1, 0                   
241 kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN    
242         SPRN_SRR0, SPRN_SRR1, 0                   
243 kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAM    
244         SPRN_CSRR0, SPRN_CSRR1, 0                 
245 /*                                                
246  * Only bolted TLB miss exception handlers are    
247  */                                               
248 kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARA    
249         SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED    
250 kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARA    
251         SPRN_SRR0, SPRN_SRR1, 0                   
252 kvm_handler BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, E    
253         SPRN_SRR0, SPRN_SRR1, 0                   
254 kvm_handler BOOKE_INTERRUPT_ALTIVEC_ASSIST, EX    
255         SPRN_SRR0, SPRN_SRR1, 0                   
256 kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITO    
257         SPRN_SRR0, SPRN_SRR1, 0                   
258 kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAM    
259         SPRN_SRR0, SPRN_SRR1, 0                   
260 kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL,    
261         SPRN_CSRR0, SPRN_CSRR1, 0                 
262 kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS    
263         SPRN_SRR0, SPRN_SRR1, NEED_EMU            
264 kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PAR    
265         SPRN_SRR0, SPRN_SRR1, 0                   
266 kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PA    
267         SPRN_GSRR0, SPRN_GSRR1, 0                 
268 kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT,     
269         SPRN_CSRR0, SPRN_CSRR1, 0                 
270 kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(D    
271         SPRN_DSRR0, SPRN_DSRR1, 0                 
272 kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(C    
273         SPRN_CSRR0, SPRN_CSRR1, 0                 
274 kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PAR    
275         SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED    
276 #else                                             
277 /*                                                
278  * For input register values, see arch/powerpc    
279  */                                               
280 .macro kvm_handler intno srr0, srr1, flags        
281 _GLOBAL(kvmppc_handler_\intno\()_\srr1)           
282         PPC_LL  r11, THREAD_KVM_VCPU(r10)         
283         PPC_STL r3, VCPU_GPR(R3)(r11)             
284         mfspr   r3, SPRN_SPRG_RSCRATCH0           
285         PPC_STL r4, VCPU_GPR(R4)(r11)             
286         PPC_LL  r4, THREAD_NORMSAVE(0)(r10)       
287         PPC_STL r5, VCPU_GPR(R5)(r11)             
288         PPC_STL r13, VCPU_CR(r11)                 
289         mfspr   r5, \srr0                         
290         PPC_STL r3, VCPU_GPR(R10)(r11)            
291         PPC_LL  r3, THREAD_NORMSAVE(2)(r10)       
292         PPC_STL r6, VCPU_GPR(R6)(r11)             
293         PPC_STL r4, VCPU_GPR(R11)(r11)            
294         mfspr   r6, \srr1                         
295         PPC_STL r7, VCPU_GPR(R7)(r11)             
296         PPC_STL r8, VCPU_GPR(R8)(r11)             
297         PPC_STL r9, VCPU_GPR(R9)(r11)             
298         PPC_STL r3, VCPU_GPR(R13)(r11)            
299         mfctr   r7                                
300         PPC_STL r12, VCPU_GPR(R12)(r11)           
301         PPC_STL r7, VCPU_CTR(r11)                 
302         mr      r4, r11                           
303         kvm_handler_common \intno, \srr0, \fla    
304 .endm                                             
305                                                   
306 .macro kvm_lvl_handler intno scratch srr0, srr    
307 _GLOBAL(kvmppc_handler_\intno\()_\srr1)           
308         mfspr   r10, SPRN_SPRG_THREAD             
309         PPC_LL  r11, THREAD_KVM_VCPU(r10)         
310         PPC_STL r3, VCPU_GPR(R3)(r11)             
311         mfspr   r3, \scratch                      
312         PPC_STL r4, VCPU_GPR(R4)(r11)             
313         PPC_LL  r4, GPR9(r8)                      
314         PPC_STL r5, VCPU_GPR(R5)(r11)             
315         PPC_STL r9, VCPU_CR(r11)                  
316         mfspr   r5, \srr0                         
317         PPC_STL r3, VCPU_GPR(R8)(r11)             
318         PPC_LL  r3, GPR10(r8)                     
319         PPC_STL r6, VCPU_GPR(R6)(r11)             
320         PPC_STL r4, VCPU_GPR(R9)(r11)             
321         mfspr   r6, \srr1                         
322         PPC_LL  r4, GPR11(r8)                     
323         PPC_STL r7, VCPU_GPR(R7)(r11)             
324         PPC_STL r3, VCPU_GPR(R10)(r11)            
325         mfctr   r7                                
326         PPC_STL r12, VCPU_GPR(R12)(r11)           
327         PPC_STL r13, VCPU_GPR(R13)(r11)           
328         PPC_STL r4, VCPU_GPR(R11)(r11)            
329         PPC_STL r7, VCPU_CTR(r11)                 
330         mr      r4, r11                           
331         kvm_handler_common \intno, \srr0, \fla    
332 .endm                                             
333                                                   
334 kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \       
335         SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, S    
336 kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK,    
337         SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SP    
338 kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \       
339         SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED    
340 kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN    
341 kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR    
342 kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \          
343         SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEE    
344 kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0    
345 kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_S    
346 kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0    
347 kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_S    
348 kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_    
349 kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SP    
350 kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \       
351         SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, S    
352 kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \          
353         SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED    
354 kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SR    
355 kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITO    
356 kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR    
357 kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITI    
358         SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, S    
359 kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0    
360 kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_S    
361 kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_    
362 kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CR    
363         SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, S    
364 kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \          
365         SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, S    
366 kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \          
367         SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SP    
368 #endif                                            
369                                                   
370 /* Registers:                                     
371  *  SPRG_SCRATCH0: guest r10                      
372  *  r4: vcpu pointer                              
373  *  r11: vcpu->arch.shared                        
374  *  r14: KVM exit number                          
375  */                                               
376 _GLOBAL(kvmppc_resume_host)                       
377         /* Save remaining volatile guest regis    
378         mfspr   r3, SPRN_VRSAVE                   
379         PPC_STL r0, VCPU_GPR(R0)(r4)              
380         mflr    r5                                
381         mfspr   r6, SPRN_SPRG4                    
382         PPC_STL r5, VCPU_LR(r4)                   
383         mfspr   r7, SPRN_SPRG5                    
384         stw     r3, VCPU_VRSAVE(r4)               
385 #ifdef CONFIG_64BIT                               
386         PPC_LL  r3, PACA_SPRG_VDSO(r13)           
387 #endif                                            
388         mfspr   r5, SPRN_SPRG9                    
389         PPC_STD(r6, VCPU_SHARED_SPRG4, r11)       
390         mfspr   r8, SPRN_SPRG6                    
391         PPC_STD(r7, VCPU_SHARED_SPRG5, r11)       
392         mfspr   r9, SPRN_SPRG7                    
393 #ifdef CONFIG_64BIT                               
394         mtspr   SPRN_SPRG_VDSO_WRITE, r3          
395 #endif                                            
396         PPC_STD(r5, VCPU_SPRG9, r4)               
397         PPC_STD(r8, VCPU_SHARED_SPRG6, r11)       
398         mfxer   r3                                
399         PPC_STD(r9, VCPU_SHARED_SPRG7, r11)       
400                                                   
401         /* save guest MAS registers and restor    
402         mfspr   r5, SPRN_MAS0                     
403         PPC_STL r3, VCPU_XER(r4)                  
404         mfspr   r6, SPRN_MAS1                     
405         stw     r5, VCPU_SHARED_MAS0(r11)         
406         mfspr   r7, SPRN_MAS2                     
407         stw     r6, VCPU_SHARED_MAS1(r11)         
408         PPC_STD(r7, VCPU_SHARED_MAS2, r11)        
409         mfspr   r5, SPRN_MAS3                     
410         mfspr   r6, SPRN_MAS4                     
411         stw     r5, VCPU_SHARED_MAS7_3+4(r11)     
412         mfspr   r7, SPRN_MAS6                     
413         stw     r6, VCPU_SHARED_MAS4(r11)         
414         mfspr   r5, SPRN_MAS7                     
415         lwz     r6, VCPU_HOST_MAS4(r4)            
416         stw     r7, VCPU_SHARED_MAS6(r11)         
417         lwz     r8, VCPU_HOST_MAS6(r4)            
418         mtspr   SPRN_MAS4, r6                     
419         stw     r5, VCPU_SHARED_MAS7_3+0(r11)     
420         mtspr   SPRN_MAS6, r8                     
421         /* Enable MAS register updates via exc    
422         mfspr   r3, SPRN_EPCR                     
423         rlwinm  r3, r3, 0, ~SPRN_EPCR_DMIUH       
424         mtspr   SPRN_EPCR, r3                     
425         isync                                     
426                                                   
427         /* Switch to kernel stack and jump to     
428         mr      r3, r4                            
429         mr      r5, r14 /* intno */               
430         mr      r14, r4 /* Save vcpu pointer.     
431         mr      r4, r5                            
432         bl      kvmppc_handle_exit                
433                                                   
434         /* Restore vcpu pointer and the nonvol    
435         mr      r4, r14                           
436         PPC_LL  r14, VCPU_GPR(R14)(r4)            
437                                                   
438         andi.   r5, r3, RESUME_FLAG_NV            
439         beq     skip_nv_load                      
440         PPC_LL  r15, VCPU_GPR(R15)(r4)            
441         PPC_LL  r16, VCPU_GPR(R16)(r4)            
442         PPC_LL  r17, VCPU_GPR(R17)(r4)            
443         PPC_LL  r18, VCPU_GPR(R18)(r4)            
444         PPC_LL  r19, VCPU_GPR(R19)(r4)            
445         PPC_LL  r20, VCPU_GPR(R20)(r4)            
446         PPC_LL  r21, VCPU_GPR(R21)(r4)            
447         PPC_LL  r22, VCPU_GPR(R22)(r4)            
448         PPC_LL  r23, VCPU_GPR(R23)(r4)            
449         PPC_LL  r24, VCPU_GPR(R24)(r4)            
450         PPC_LL  r25, VCPU_GPR(R25)(r4)            
451         PPC_LL  r26, VCPU_GPR(R26)(r4)            
452         PPC_LL  r27, VCPU_GPR(R27)(r4)            
453         PPC_LL  r28, VCPU_GPR(R28)(r4)            
454         PPC_LL  r29, VCPU_GPR(R29)(r4)            
455         PPC_LL  r30, VCPU_GPR(R30)(r4)            
456         PPC_LL  r31, VCPU_GPR(R31)(r4)            
457 skip_nv_load:                                     
458         /* Should we return to the guest? */      
459         andi.   r5, r3, RESUME_FLAG_HOST          
460         beq     lightweight_exit                  
461                                                   
462         srawi   r3, r3, 2 /* Shift -ERR back d    
463                                                   
464 heavyweight_exit:                                 
465         /* Not returning to guest. */             
466         PPC_LL  r5, HOST_STACK_LR(r1)             
467         lwz     r6, HOST_CR(r1)                   
468                                                   
469         /*                                        
470          * We already saved guest volatile reg    
471          * non-volatiles.                         
472          */                                       
473                                                   
474         PPC_STL r15, VCPU_GPR(R15)(r4)            
475         PPC_STL r16, VCPU_GPR(R16)(r4)            
476         PPC_STL r17, VCPU_GPR(R17)(r4)            
477         PPC_STL r18, VCPU_GPR(R18)(r4)            
478         PPC_STL r19, VCPU_GPR(R19)(r4)            
479         PPC_STL r20, VCPU_GPR(R20)(r4)            
480         PPC_STL r21, VCPU_GPR(R21)(r4)            
481         PPC_STL r22, VCPU_GPR(R22)(r4)            
482         PPC_STL r23, VCPU_GPR(R23)(r4)            
483         PPC_STL r24, VCPU_GPR(R24)(r4)            
484         PPC_STL r25, VCPU_GPR(R25)(r4)            
485         PPC_STL r26, VCPU_GPR(R26)(r4)            
486         PPC_STL r27, VCPU_GPR(R27)(r4)            
487         PPC_STL r28, VCPU_GPR(R28)(r4)            
488         PPC_STL r29, VCPU_GPR(R29)(r4)            
489         PPC_STL r30, VCPU_GPR(R30)(r4)            
490         PPC_STL r31, VCPU_GPR(R31)(r4)            
491                                                   
492         /* Load host non-volatile register sta    
493         PPC_LL  r14, HOST_NV_GPR(R14)(r1)         
494         PPC_LL  r15, HOST_NV_GPR(R15)(r1)         
495         PPC_LL  r16, HOST_NV_GPR(R16)(r1)         
496         PPC_LL  r17, HOST_NV_GPR(R17)(r1)         
497         PPC_LL  r18, HOST_NV_GPR(R18)(r1)         
498         PPC_LL  r19, HOST_NV_GPR(R19)(r1)         
499         PPC_LL  r20, HOST_NV_GPR(R20)(r1)         
500         PPC_LL  r21, HOST_NV_GPR(R21)(r1)         
501         PPC_LL  r22, HOST_NV_GPR(R22)(r1)         
502         PPC_LL  r23, HOST_NV_GPR(R23)(r1)         
503         PPC_LL  r24, HOST_NV_GPR(R24)(r1)         
504         PPC_LL  r25, HOST_NV_GPR(R25)(r1)         
505         PPC_LL  r26, HOST_NV_GPR(R26)(r1)         
506         PPC_LL  r27, HOST_NV_GPR(R27)(r1)         
507         PPC_LL  r28, HOST_NV_GPR(R28)(r1)         
508         PPC_LL  r29, HOST_NV_GPR(R29)(r1)         
509         PPC_LL  r30, HOST_NV_GPR(R30)(r1)         
510         PPC_LL  r31, HOST_NV_GPR(R31)(r1)         
511                                                   
512         /* Return to kvm_vcpu_run(). */           
513         mtlr    r5                                
514         mtcr    r6                                
515         addi    r1, r1, HOST_STACK_SIZE           
516         /* r3 still contains the return code f    
517         blr                                       
518                                                   
519 /* Registers:                                     
520  *  r3: vcpu pointer                              
521  */                                               
522 _GLOBAL(__kvmppc_vcpu_run)                        
523         stwu    r1, -HOST_STACK_SIZE(r1)          
524         PPC_STL r1, VCPU_HOST_STACK(r3) /* Sav    
525                                                   
526         /* Save host state to stack. */           
527         mr      r4, r3                            
528         mflr    r3                                
529         mfcr    r5                                
530         PPC_STL r3, HOST_STACK_LR(r1)             
531                                                   
532         stw     r5, HOST_CR(r1)                   
533                                                   
534         /* Save host non-volatile register sta    
535         PPC_STL r14, HOST_NV_GPR(R14)(r1)         
536         PPC_STL r15, HOST_NV_GPR(R15)(r1)         
537         PPC_STL r16, HOST_NV_GPR(R16)(r1)         
538         PPC_STL r17, HOST_NV_GPR(R17)(r1)         
539         PPC_STL r18, HOST_NV_GPR(R18)(r1)         
540         PPC_STL r19, HOST_NV_GPR(R19)(r1)         
541         PPC_STL r20, HOST_NV_GPR(R20)(r1)         
542         PPC_STL r21, HOST_NV_GPR(R21)(r1)         
543         PPC_STL r22, HOST_NV_GPR(R22)(r1)         
544         PPC_STL r23, HOST_NV_GPR(R23)(r1)         
545         PPC_STL r24, HOST_NV_GPR(R24)(r1)         
546         PPC_STL r25, HOST_NV_GPR(R25)(r1)         
547         PPC_STL r26, HOST_NV_GPR(R26)(r1)         
548         PPC_STL r27, HOST_NV_GPR(R27)(r1)         
549         PPC_STL r28, HOST_NV_GPR(R28)(r1)         
550         PPC_STL r29, HOST_NV_GPR(R29)(r1)         
551         PPC_STL r30, HOST_NV_GPR(R30)(r1)         
552         PPC_STL r31, HOST_NV_GPR(R31)(r1)         
553                                                   
554         /* Load guest non-volatiles. */           
555         PPC_LL  r14, VCPU_GPR(R14)(r4)            
556         PPC_LL  r15, VCPU_GPR(R15)(r4)            
557         PPC_LL  r16, VCPU_GPR(R16)(r4)            
558         PPC_LL  r17, VCPU_GPR(R17)(r4)            
559         PPC_LL  r18, VCPU_GPR(R18)(r4)            
560         PPC_LL  r19, VCPU_GPR(R19)(r4)            
561         PPC_LL  r20, VCPU_GPR(R20)(r4)            
562         PPC_LL  r21, VCPU_GPR(R21)(r4)            
563         PPC_LL  r22, VCPU_GPR(R22)(r4)            
564         PPC_LL  r23, VCPU_GPR(R23)(r4)            
565         PPC_LL  r24, VCPU_GPR(R24)(r4)            
566         PPC_LL  r25, VCPU_GPR(R25)(r4)            
567         PPC_LL  r26, VCPU_GPR(R26)(r4)            
568         PPC_LL  r27, VCPU_GPR(R27)(r4)            
569         PPC_LL  r28, VCPU_GPR(R28)(r4)            
570         PPC_LL  r29, VCPU_GPR(R29)(r4)            
571         PPC_LL  r30, VCPU_GPR(R30)(r4)            
572         PPC_LL  r31, VCPU_GPR(R31)(r4)            
573                                                   
574                                                   
575 lightweight_exit:                                 
576         PPC_STL r2, HOST_R2(r1)                   
577                                                   
578         mfspr   r3, SPRN_PID                      
579         stw     r3, VCPU_HOST_PID(r4)             
580         lwz     r3, VCPU_GUEST_PID(r4)            
581         mtspr   SPRN_PID, r3                      
582                                                   
583         PPC_LL  r11, VCPU_SHARED(r4)              
584         /* Disable MAS register updates via ex    
585         mfspr   r3, SPRN_EPCR                     
586         oris    r3, r3, SPRN_EPCR_DMIUH@h         
587         mtspr   SPRN_EPCR, r3                     
588         isync                                     
589         /* Save host mas4 and mas6 and load gu    
590         mfspr   r3, SPRN_MAS4                     
591         stw     r3, VCPU_HOST_MAS4(r4)            
592         mfspr   r3, SPRN_MAS6                     
593         stw     r3, VCPU_HOST_MAS6(r4)            
594         lwz     r3, VCPU_SHARED_MAS0(r11)         
595         lwz     r5, VCPU_SHARED_MAS1(r11)         
596         PPC_LD(r6, VCPU_SHARED_MAS2, r11)         
597         lwz     r7, VCPU_SHARED_MAS7_3+4(r11)     
598         lwz     r8, VCPU_SHARED_MAS4(r11)         
599         mtspr   SPRN_MAS0, r3                     
600         mtspr   SPRN_MAS1, r5                     
601         mtspr   SPRN_MAS2, r6                     
602         mtspr   SPRN_MAS3, r7                     
603         mtspr   SPRN_MAS4, r8                     
604         lwz     r3, VCPU_SHARED_MAS6(r11)         
605         lwz     r5, VCPU_SHARED_MAS7_3+0(r11)     
606         mtspr   SPRN_MAS6, r3                     
607         mtspr   SPRN_MAS7, r5                     
608                                                   
609         /*                                        
610          * Host interrupt handlers may have cl    
611          * SPRGs, so we need to reload them he    
612          */                                       
613         lwz     r3, VCPU_VRSAVE(r4)               
614         PPC_LD(r5, VCPU_SHARED_SPRG4, r11)        
615         mtspr   SPRN_VRSAVE, r3                   
616         PPC_LD(r6, VCPU_SHARED_SPRG5, r11)        
617         mtspr   SPRN_SPRG4W, r5                   
618         PPC_LD(r7, VCPU_SHARED_SPRG6, r11)        
619         mtspr   SPRN_SPRG5W, r6                   
620         PPC_LD(r8, VCPU_SHARED_SPRG7, r11)        
621         mtspr   SPRN_SPRG6W, r7                   
622         PPC_LD(r5, VCPU_SPRG9, r4)                
623         mtspr   SPRN_SPRG7W, r8                   
624         mtspr   SPRN_SPRG9, r5                    
625                                                   
626         /* Load some guest volatiles. */          
627         PPC_LL  r3, VCPU_LR(r4)                   
628         PPC_LL  r5, VCPU_XER(r4)                  
629         PPC_LL  r6, VCPU_CTR(r4)                  
630         PPC_LL  r7, VCPU_CR(r4)                   
631         PPC_LL  r8, VCPU_PC(r4)                   
632         PPC_LD(r9, VCPU_SHARED_MSR, r11)          
633         PPC_LL  r0, VCPU_GPR(R0)(r4)              
634         PPC_LL  r1, VCPU_GPR(R1)(r4)              
635         PPC_LL  r2, VCPU_GPR(R2)(r4)              
636         PPC_LL  r10, VCPU_GPR(R10)(r4)            
637         PPC_LL  r11, VCPU_GPR(R11)(r4)            
638         PPC_LL  r12, VCPU_GPR(R12)(r4)            
639         PPC_LL  r13, VCPU_GPR(R13)(r4)            
640         mtlr    r3                                
641         mtxer   r5                                
642         mtctr   r6                                
643         mtsrr0  r8                                
644         mtsrr1  r9                                
645                                                   
646 #ifdef CONFIG_KVM_EXIT_TIMING                     
647         /* save enter time */                     
648 1:                                                
649         mfspr   r6, SPRN_TBRU                     
650         mfspr   r9, SPRN_TBRL                     
651         mfspr   r8, SPRN_TBRU                     
652         cmpw    r8, r6                            
653         stw     r9, VCPU_TIMING_LAST_ENTER_TBL    
654         bne     1b                                
655         stw     r8, VCPU_TIMING_LAST_ENTER_TBU    
656 #endif                                            
657                                                   
658         /*                                        
659          * Don't execute any instruction which    
660          * below instruction.                     
661          */                                       
662         mtcr    r7                                
663                                                   
664         /* Finish loading guest volatiles and     
665         PPC_LL  r5, VCPU_GPR(R5)(r4)              
666         PPC_LL  r6, VCPU_GPR(R6)(r4)              
667         PPC_LL  r7, VCPU_GPR(R7)(r4)              
668         PPC_LL  r8, VCPU_GPR(R8)(r4)              
669         PPC_LL  r9, VCPU_GPR(R9)(r4)              
670                                                   
671         PPC_LL  r3, VCPU_GPR(R3)(r4)              
672         PPC_LL  r4, VCPU_GPR(R4)(r4)              
673         rfi                                       
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php