~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kvm/tlb.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/riscv/kvm/tlb.c (Version linux-6.12-rc7) and /arch/ppc/kvm/tlb.c (Version linux-5.14.21)


  1 // SPDX-License-Identifier: GPL-2.0                 1 
  2 /*                                                
  3  * Copyright (c) 2022 Ventana Micro Systems In    
  4  */                                               
  5                                                   
  6 #include <linux/bitmap.h>                         
  7 #include <linux/cpumask.h>                        
  8 #include <linux/errno.h>                          
  9 #include <linux/err.h>                            
 10 #include <linux/module.h>                         
 11 #include <linux/smp.h>                            
 12 #include <linux/kvm_host.h>                       
 13 #include <asm/cacheflush.h>                       
 14 #include <asm/csr.h>                              
 15 #include <asm/cpufeature.h>                       
 16 #include <asm/insn-def.h>                         
 17                                                   
 18 #define has_svinval()   riscv_has_extension_un    
 19                                                   
 20 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsi    
 21                                           gpa_    
 22                                           unsi    
 23 {                                                 
 24         gpa_t pos;                                
 25                                                   
 26         if (PTRS_PER_PTE < (gpsz >> order)) {     
 27                 kvm_riscv_local_hfence_gvma_vm    
 28                 return;                           
 29         }                                         
 30                                                   
 31         if (has_svinval()) {                      
 32                 asm volatile (SFENCE_W_INVAL()    
 33                 for (pos = gpa; pos < (gpa + g    
 34                         asm volatile (HINVAL_G    
 35                         : : "r" (pos >> 2), "r    
 36                 asm volatile (SFENCE_INVAL_IR(    
 37         } else {                                  
 38                 for (pos = gpa; pos < (gpa + g    
 39                         asm volatile (HFENCE_G    
 40                         : : "r" (pos >> 2), "r    
 41         }                                         
 42 }                                                 
 43                                                   
 44 void kvm_riscv_local_hfence_gvma_vmid_all(unsi    
 45 {                                                 
 46         asm volatile(HFENCE_GVMA(zero, %0) : :    
 47 }                                                 
 48                                                   
 49 void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa    
 50                                      unsigned     
 51 {                                                 
 52         gpa_t pos;                                
 53                                                   
 54         if (PTRS_PER_PTE < (gpsz >> order)) {     
 55                 kvm_riscv_local_hfence_gvma_al    
 56                 return;                           
 57         }                                         
 58                                                   
 59         if (has_svinval()) {                      
 60                 asm volatile (SFENCE_W_INVAL()    
 61                 for (pos = gpa; pos < (gpa + g    
 62                         asm volatile(HINVAL_GV    
 63                         : : "r" (pos >> 2) : "    
 64                 asm volatile (SFENCE_INVAL_IR(    
 65         } else {                                  
 66                 for (pos = gpa; pos < (gpa + g    
 67                         asm volatile(HFENCE_GV    
 68                         : : "r" (pos >> 2) : "    
 69         }                                         
 70 }                                                 
 71                                                   
 72 void kvm_riscv_local_hfence_gvma_all(void)        
 73 {                                                 
 74         asm volatile(HFENCE_GVMA(zero, zero) :    
 75 }                                                 
 76                                                   
 77 void kvm_riscv_local_hfence_vvma_asid_gva(unsi    
 78                                           unsi    
 79                                           unsi    
 80                                           unsi    
 81                                           unsi    
 82 {                                                 
 83         unsigned long pos, hgatp;                 
 84                                                   
 85         if (PTRS_PER_PTE < (gvsz >> order)) {     
 86                 kvm_riscv_local_hfence_vvma_as    
 87                 return;                           
 88         }                                         
 89                                                   
 90         hgatp = csr_swap(CSR_HGATP, vmid << HG    
 91                                                   
 92         if (has_svinval()) {                      
 93                 asm volatile (SFENCE_W_INVAL()    
 94                 for (pos = gva; pos < (gva + g    
 95                         asm volatile(HINVAL_VV    
 96                         : : "r" (pos), "r" (as    
 97                 asm volatile (SFENCE_INVAL_IR(    
 98         } else {                                  
 99                 for (pos = gva; pos < (gva + g    
100                         asm volatile(HFENCE_VV    
101                         : : "r" (pos), "r" (as    
102         }                                         
103                                                   
104         csr_write(CSR_HGATP, hgatp);              
105 }                                                 
106                                                   
107 void kvm_riscv_local_hfence_vvma_asid_all(unsi    
108                                           unsi    
109 {                                                 
110         unsigned long hgatp;                      
111                                                   
112         hgatp = csr_swap(CSR_HGATP, vmid << HG    
113                                                   
114         asm volatile(HFENCE_VVMA(zero, %0) : :    
115                                                   
116         csr_write(CSR_HGATP, hgatp);              
117 }                                                 
118                                                   
119 void kvm_riscv_local_hfence_vvma_gva(unsigned     
120                                      unsigned     
121                                      unsigned     
122 {                                                 
123         unsigned long pos, hgatp;                 
124                                                   
125         if (PTRS_PER_PTE < (gvsz >> order)) {     
126                 kvm_riscv_local_hfence_vvma_al    
127                 return;                           
128         }                                         
129                                                   
130         hgatp = csr_swap(CSR_HGATP, vmid << HG    
131                                                   
132         if (has_svinval()) {                      
133                 asm volatile (SFENCE_W_INVAL()    
134                 for (pos = gva; pos < (gva + g    
135                         asm volatile(HINVAL_VV    
136                         : : "r" (pos) : "memor    
137                 asm volatile (SFENCE_INVAL_IR(    
138         } else {                                  
139                 for (pos = gva; pos < (gva + g    
140                         asm volatile(HFENCE_VV    
141                         : : "r" (pos) : "memor    
142         }                                         
143                                                   
144         csr_write(CSR_HGATP, hgatp);              
145 }                                                 
146                                                   
147 void kvm_riscv_local_hfence_vvma_all(unsigned     
148 {                                                 
149         unsigned long hgatp;                      
150                                                   
151         hgatp = csr_swap(CSR_HGATP, vmid << HG    
152                                                   
153         asm volatile(HFENCE_VVMA(zero, zero) :    
154                                                   
155         csr_write(CSR_HGATP, hgatp);              
156 }                                                 
157                                                   
158 void kvm_riscv_local_tlb_sanitize(struct kvm_v    
159 {                                                 
160         unsigned long vmid;                       
161                                                   
162         if (!kvm_riscv_gstage_vmid_bits() ||      
163             vcpu->arch.last_exit_cpu == vcpu->    
164                 return;                           
165                                                   
166         /*                                        
167          * On RISC-V platforms with hardware V    
168          * VMID for all VCPUs of a particular     
169          * have stale G-stage TLB entries on t    
170          * some other VCPU of the same Guest w    
171          * current Host CPU.                      
172          *                                        
173          * To cleanup stale TLB entries, we si    
174          * entries by VMID whenever underlying    
175          */                                       
176                                                   
177         vmid = READ_ONCE(vcpu->kvm->arch.vmid.    
178         kvm_riscv_local_hfence_gvma_vmid_all(v    
179 }                                                 
180                                                   
181 void kvm_riscv_fence_i_process(struct kvm_vcpu    
182 {                                                 
183         kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_P    
184         local_flush_icache_all();                 
185 }                                                 
186                                                   
187 void kvm_riscv_hfence_gvma_vmid_all_process(st    
188 {                                                 
189         struct kvm_vmid *vmid;                    
190                                                   
191         vmid = &vcpu->kvm->arch.vmid;             
192         kvm_riscv_local_hfence_gvma_vmid_all(R    
193 }                                                 
194                                                   
195 void kvm_riscv_hfence_vvma_all_process(struct     
196 {                                                 
197         struct kvm_vmid *vmid;                    
198                                                   
199         vmid = &vcpu->kvm->arch.vmid;             
200         kvm_riscv_local_hfence_vvma_all(READ_O    
201 }                                                 
202                                                   
203 static bool vcpu_hfence_dequeue(struct kvm_vcp    
204                                 struct kvm_ris    
205 {                                                 
206         bool ret = false;                         
207         struct kvm_vcpu_arch *varch = &vcpu->a    
208                                                   
209         spin_lock(&varch->hfence_lock);           
210                                                   
211         if (varch->hfence_queue[varch->hfence_    
212                 memcpy(out_data, &varch->hfenc    
213                        sizeof(*out_data));        
214                 varch->hfence_queue[varch->hfe    
215                                                   
216                 varch->hfence_head++;             
217                 if (varch->hfence_head == KVM_    
218                         varch->hfence_head = 0    
219                                                   
220                 ret = true;                       
221         }                                         
222                                                   
223         spin_unlock(&varch->hfence_lock);         
224                                                   
225         return ret;                               
226 }                                                 
227                                                   
228 static bool vcpu_hfence_enqueue(struct kvm_vcp    
229                                 const struct k    
230 {                                                 
231         bool ret = false;                         
232         struct kvm_vcpu_arch *varch = &vcpu->a    
233                                                   
234         spin_lock(&varch->hfence_lock);           
235                                                   
236         if (!varch->hfence_queue[varch->hfence    
237                 memcpy(&varch->hfence_queue[va    
238                        data, sizeof(*data));      
239                                                   
240                 varch->hfence_tail++;             
241                 if (varch->hfence_tail == KVM_    
242                         varch->hfence_tail = 0    
243                                                   
244                 ret = true;                       
245         }                                         
246                                                   
247         spin_unlock(&varch->hfence_lock);         
248                                                   
249         return ret;                               
250 }                                                 
251                                                   
252 void kvm_riscv_hfence_process(struct kvm_vcpu     
253 {                                                 
254         struct kvm_riscv_hfence d = { 0 };        
255         struct kvm_vmid *v = &vcpu->kvm->arch.    
256                                                   
257         while (vcpu_hfence_dequeue(vcpu, &d))     
258                 switch (d.type) {                 
259                 case KVM_RISCV_HFENCE_UNKNOWN:    
260                         break;                    
261                 case KVM_RISCV_HFENCE_GVMA_VMI    
262                         kvm_riscv_local_hfence    
263                                                   
264                                                   
265                         break;                    
266                 case KVM_RISCV_HFENCE_VVMA_ASI    
267                         kvm_riscv_vcpu_pmu_inc    
268                         kvm_riscv_local_hfence    
269                                                   
270                                                   
271                         break;                    
272                 case KVM_RISCV_HFENCE_VVMA_ASI    
273                         kvm_riscv_vcpu_pmu_inc    
274                         kvm_riscv_local_hfence    
275                                                   
276                         break;                    
277                 case KVM_RISCV_HFENCE_VVMA_GVA    
278                         kvm_riscv_vcpu_pmu_inc    
279                         kvm_riscv_local_hfence    
280                                                   
281                                                   
282                         break;                    
283                 default:                          
284                         break;                    
285                 }                                 
286         }                                         
287 }                                                 
288                                                   
289 static void make_xfence_request(struct kvm *kv    
290                                 unsigned long     
291                                 unsigned int r    
292                                 const struct k    
293 {                                                 
294         unsigned long i;                          
295         struct kvm_vcpu *vcpu;                    
296         unsigned int actual_req = req;            
297         DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPU    
298                                                   
299         bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);    
300         kvm_for_each_vcpu(i, vcpu, kvm) {         
301                 if (hbase != -1UL) {              
302                         if (vcpu->vcpu_id < hb    
303                                 continue;         
304                         if (!(hmask & (1UL <<     
305                                 continue;         
306                 }                                 
307                                                   
308                 bitmap_set(vcpu_mask, i, 1);      
309                                                   
310                 if (!data || !data->type)         
311                         continue;                 
312                                                   
313                 /*                                
314                  * Enqueue hfence data to VCPU    
315                  * have space in the VCPU hfen    
316                  * a more conservative hfence     
317                  */                               
318                 if (!vcpu_hfence_enqueue(vcpu,    
319                         actual_req = fallback_    
320         }                                         
321                                                   
322         kvm_make_vcpus_request_mask(kvm, actua    
323 }                                                 
324                                                   
325 void kvm_riscv_fence_i(struct kvm *kvm,           
326                        unsigned long hbase, un    
327 {                                                 
328         make_xfence_request(kvm, hbase, hmask,    
329                             KVM_REQ_FENCE_I, N    
330 }                                                 
331                                                   
332 void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm    
333                                     unsigned l    
334                                     gpa_t gpa,    
335                                     unsigned l    
336 {                                                 
337         struct kvm_riscv_hfence data;             
338                                                   
339         data.type = KVM_RISCV_HFENCE_GVMA_VMID    
340         data.asid = 0;                            
341         data.addr = gpa;                          
342         data.size = gpsz;                         
343         data.order = order;                       
344         make_xfence_request(kvm, hbase, hmask,    
345                             KVM_REQ_HFENCE_GVM    
346 }                                                 
347                                                   
348 void kvm_riscv_hfence_gvma_vmid_all(struct kvm    
349                                     unsigned l    
350 {                                                 
351         make_xfence_request(kvm, hbase, hmask,    
352                             KVM_REQ_HFENCE_GVM    
353 }                                                 
354                                                   
355 void kvm_riscv_hfence_vvma_asid_gva(struct kvm    
356                                     unsigned l    
357                                     unsigned l    
358                                     unsigned l    
359 {                                                 
360         struct kvm_riscv_hfence data;             
361                                                   
362         data.type = KVM_RISCV_HFENCE_VVMA_ASID    
363         data.asid = asid;                         
364         data.addr = gva;                          
365         data.size = gvsz;                         
366         data.order = order;                       
367         make_xfence_request(kvm, hbase, hmask,    
368                             KVM_REQ_HFENCE_VVM    
369 }                                                 
370                                                   
371 void kvm_riscv_hfence_vvma_asid_all(struct kvm    
372                                     unsigned l    
373                                     unsigned l    
374 {                                                 
375         struct kvm_riscv_hfence data;             
376                                                   
377         data.type = KVM_RISCV_HFENCE_VVMA_ASID    
378         data.asid = asid;                         
379         data.addr = data.size = data.order = 0    
380         make_xfence_request(kvm, hbase, hmask,    
381                             KVM_REQ_HFENCE_VVM    
382 }                                                 
383                                                   
384 void kvm_riscv_hfence_vvma_gva(struct kvm *kvm    
385                                unsigned long h    
386                                unsigned long g    
387                                unsigned long o    
388 {                                                 
389         struct kvm_riscv_hfence data;             
390                                                   
391         data.type = KVM_RISCV_HFENCE_VVMA_GVA;    
392         data.asid = 0;                            
393         data.addr = gva;                          
394         data.size = gvsz;                         
395         data.order = order;                       
396         make_xfence_request(kvm, hbase, hmask,    
397                             KVM_REQ_HFENCE_VVM    
398 }                                                 
399                                                   
400 void kvm_riscv_hfence_vvma_all(struct kvm *kvm    
401                                unsigned long h    
402 {                                                 
403         make_xfence_request(kvm, hbase, hmask,    
404                             KVM_REQ_HFENCE_VVM    
405 }                                                 
406                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php