~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kvm/hyp/nvhe/tlb.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm64/kvm/hyp/nvhe/tlb.c (Version linux-6.11-rc3) and /arch/i386/kvm/hyp/nvhe/tlb.c (Version linux-6.3.13)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /*                                                
  3  * Copyright (C) 2015 - ARM Ltd                   
  4  * Author: Marc Zyngier <marc.zyngier@arm.com>    
  5  */                                               
  6                                                   
  7 #include <asm/kvm_hyp.h>                          
  8 #include <asm/kvm_mmu.h>                          
  9 #include <asm/tlbflush.h>                         
 10                                                   
 11 #include <nvhe/mem_protect.h>                     
 12                                                   
 13 struct tlb_inv_context {                          
 14         struct kvm_s2_mmu       *mmu;             
 15         u64                     tcr;              
 16         u64                     sctlr;            
 17 };                                                
 18                                                   
 19 static void enter_vmid_context(struct kvm_s2_m    
 20                                struct tlb_inv_    
 21                                bool nsh)          
 22 {                                                 
 23         struct kvm_s2_mmu *host_s2_mmu = &host    
 24         struct kvm_cpu_context *host_ctxt;        
 25         struct kvm_vcpu *vcpu;                    
 26                                                   
 27         host_ctxt = &this_cpu_ptr(&kvm_host_da    
 28         vcpu = host_ctxt->__hyp_running_vcpu;     
 29         cxt->mmu = NULL;                          
 30                                                   
 31         /*                                        
 32          * We have two requirements:              
 33          *                                        
 34          * - ensure that the page table update    
 35          *   CPUs, for which a dsb(DOMAIN-st)     
 36          *   being either ish or nsh, dependin    
 37          *   type.                                
 38          *                                        
 39          * - complete any speculative page tab    
 40          *   we trapped to EL2 so that we can     
 41          *   registers out of context, for whi    
 42          *                                        
 43          * The composition of these two barrie    
 44          * the 'nsh' parameter tracks the dist    
 45          * Inner-Shareable and Non-Shareable,     
 46          * callers.                               
 47          */                                       
 48         if (nsh)                                  
 49                 dsb(nsh);                         
 50         else                                      
 51                 dsb(ish);                         
 52                                                   
 53         /*                                        
 54          * If we're already in the desired con    
 55          */                                       
 56         if (vcpu) {                               
 57                 /*                                
 58                  * We're in guest context. How    
 59                  * to be called from within __    
 60                  * __hyp_running_vcpu is set t    
 61                  */                               
 62                 if (mmu == vcpu->arch.hw_mmu |    
 63                         return;                   
 64                                                   
 65                 cxt->mmu = vcpu->arch.hw_mmu;     
 66         } else {                                  
 67                 /* We're in host context. */      
 68                 if (mmu == host_s2_mmu)           
 69                         return;                   
 70                                                   
 71                 cxt->mmu = host_s2_mmu;           
 72         }                                         
 73                                                   
 74         if (cpus_have_final_cap(ARM64_WORKAROU    
 75                 u64 val;                          
 76                                                   
 77                 /*                                
 78                  * For CPUs that are affected     
 79                  * avoid a Stage-1 walk with t    
 80                  * the new VMID set in the VTT    
 81                  * We're guaranteed that the h    
 82                  * we can simply set the EPD b    
 83                  * TLB fill. For guests, we en    
 84                  * temporarily enabled in the     
 85                  */                               
 86                 val = cxt->tcr = read_sysreg_e    
 87                 val |= TCR_EPD1_MASK | TCR_EPD    
 88                 write_sysreg_el1(val, SYS_TCR)    
 89                 isb();                            
 90                                                   
 91                 if (vcpu) {                       
 92                         val = cxt->sctlr = rea    
 93                         if (!(val & SCTLR_ELx_    
 94                                 val |= SCTLR_E    
 95                                 write_sysreg_e    
 96                                 isb();            
 97                         }                         
 98                 } else {                          
 99                         /* The host S1 MMU is     
100                         cxt->sctlr = SCTLR_ELx    
101                 }                                 
102         }                                         
103                                                   
104         /*                                        
105          * __load_stage2() includes an ISB onl    
106          * workaround is applied. Take care of    
107          * ensuring that we always have an ISB    
108          * to back.                               
109          */                                       
110         if (vcpu)                                 
111                 __load_host_stage2();             
112         else                                      
113                 __load_stage2(mmu, kern_hyp_va    
114                                                   
115         asm(ALTERNATIVE("isb", "nop", ARM64_WO    
116 }                                                 
117                                                   
118 static void exit_vmid_context(struct tlb_inv_c    
119 {                                                 
120         struct kvm_s2_mmu *mmu = cxt->mmu;        
121         struct kvm_cpu_context *host_ctxt;        
122         struct kvm_vcpu *vcpu;                    
123                                                   
124         host_ctxt = &this_cpu_ptr(&kvm_host_da    
125         vcpu = host_ctxt->__hyp_running_vcpu;     
126                                                   
127         if (!mmu)                                 
128                 return;                           
129                                                   
130         if (vcpu)                                 
131                 __load_stage2(mmu, kern_hyp_va    
132         else                                      
133                 __load_host_stage2();             
134                                                   
135         if (cpus_have_final_cap(ARM64_WORKAROU    
136                 /* Ensure write of the old VMI    
137                 isb();                            
138                                                   
139                 if (!(cxt->sctlr & SCTLR_ELx_M    
140                         write_sysreg_el1(cxt->    
141                         isb();                    
142                 }                                 
143                                                   
144                 write_sysreg_el1(cxt->tcr, SYS    
145         }                                         
146 }                                                 
147                                                   
148 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mm    
149                               phys_addr_t ipa,    
150 {                                                 
151         struct tlb_inv_context cxt;               
152                                                   
153         /* Switch to requested VMID */            
154         enter_vmid_context(mmu, &cxt, false);     
155                                                   
156         /*                                        
157          * We could do so much better if we ha    
158          * Instead, we invalidate Stage-2 for     
159          * whole of Stage-1. Weep...              
160          */                                       
161         ipa >>= 12;                               
162         __tlbi_level(ipas2e1is, ipa, level);      
163                                                   
164         /*                                        
165          * We have to ensure completion of the    
166          * since a table walk on another CPU c    
167          * complete (S1 + S2) walk based on th    
168          * the Stage-1 invalidation happened f    
169          */                                       
170         dsb(ish);                                 
171         __tlbi(vmalle1is);                        
172         dsb(ish);                                 
173         isb();                                    
174                                                   
175         exit_vmid_context(&cxt);                  
176 }                                                 
177                                                   
178 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s    
179                                   phys_addr_t     
180 {                                                 
181         struct tlb_inv_context cxt;               
182                                                   
183         /* Switch to requested VMID */            
184         enter_vmid_context(mmu, &cxt, true);      
185                                                   
186         /*                                        
187          * We could do so much better if we ha    
188          * Instead, we invalidate Stage-2 for     
189          * whole of Stage-1. Weep...              
190          */                                       
191         ipa >>= 12;                               
192         __tlbi_level(ipas2e1, ipa, level);        
193                                                   
194         /*                                        
195          * We have to ensure completion of the    
196          * since a table walk on another CPU c    
197          * complete (S1 + S2) walk based on th    
198          * the Stage-1 invalidation happened f    
199          */                                       
200         dsb(nsh);                                 
201         __tlbi(vmalle1);                          
202         dsb(nsh);                                 
203         isb();                                    
204                                                   
205         exit_vmid_context(&cxt);                  
206 }                                                 
207                                                   
208 void __kvm_tlb_flush_vmid_range(struct kvm_s2_    
209                                 phys_addr_t st    
210 {                                                 
211         struct tlb_inv_context cxt;               
212         unsigned long stride;                     
213                                                   
214         /*                                        
215          * Since the range of addresses may no    
216          * the same level, assume the worst ca    
217          */                                       
218         stride = PAGE_SIZE;                       
219         start = round_down(start, stride);        
220                                                   
221         /* Switch to requested VMID */            
222         enter_vmid_context(mmu, &cxt, false);     
223                                                   
224         __flush_s2_tlb_range_op(ipas2e1is, sta    
225                                 TLBI_TTL_UNKNO    
226                                                   
227         dsb(ish);                                 
228         __tlbi(vmalle1is);                        
229         dsb(ish);                                 
230         isb();                                    
231                                                   
232         exit_vmid_context(&cxt);                  
233 }                                                 
234                                                   
235 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *m    
236 {                                                 
237         struct tlb_inv_context cxt;               
238                                                   
239         /* Switch to requested VMID */            
240         enter_vmid_context(mmu, &cxt, false);     
241                                                   
242         __tlbi(vmalls12e1is);                     
243         dsb(ish);                                 
244         isb();                                    
245                                                   
246         exit_vmid_context(&cxt);                  
247 }                                                 
248                                                   
249 void __kvm_flush_cpu_context(struct kvm_s2_mmu    
250 {                                                 
251         struct tlb_inv_context cxt;               
252                                                   
253         /* Switch to requested VMID */            
254         enter_vmid_context(mmu, &cxt, false);     
255                                                   
256         __tlbi(vmalle1);                          
257         asm volatile("ic iallu");                 
258         dsb(nsh);                                 
259         isb();                                    
260                                                   
261         exit_vmid_context(&cxt);                  
262 }                                                 
263                                                   
264 void __kvm_flush_vm_context(void)                 
265 {                                                 
266         /* Same remark as in enter_vmid_contex    
267         dsb(ish);                                 
268         __tlbi(alle1is);                          
269         dsb(ish);                                 
270 }                                                 
271                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php