~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kvm/mmu.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/riscv/kvm/mmu.c (Version linux-6.11-rc3) and /arch/sparc/kvm/mmu.c (Version linux-5.12.19)


  1 // SPDX-License-Identifier: GPL-2.0                 1 
  2 /*                                                
  3  * Copyright (C) 2019 Western Digital Corporat    
  4  *                                                
  5  * Authors:                                       
  6  *     Anup Patel <anup.patel@wdc.com>            
  7  */                                               
  8                                                   
  9 #include <linux/bitops.h>                         
 10 #include <linux/errno.h>                          
 11 #include <linux/err.h>                            
 12 #include <linux/hugetlb.h>                        
 13 #include <linux/module.h>                         
 14 #include <linux/uaccess.h>                        
 15 #include <linux/vmalloc.h>                        
 16 #include <linux/kvm_host.h>                       
 17 #include <linux/sched/signal.h>                   
 18 #include <asm/csr.h>                              
 19 #include <asm/page.h>                             
 20 #include <asm/pgtable.h>                          
 21                                                   
 22 #ifdef CONFIG_64BIT                               
 23 static unsigned long gstage_mode __ro_after_in    
 24 static unsigned long gstage_pgd_levels __ro_af    
 25 #define gstage_index_bits       9                 
 26 #else                                             
 27 static unsigned long gstage_mode __ro_after_in    
 28 static unsigned long gstage_pgd_levels __ro_af    
 29 #define gstage_index_bits       10                
 30 #endif                                            
 31                                                   
 32 #define gstage_pgd_xbits        2                 
 33 #define gstage_pgd_size (1UL << (HGATP_PAGE_SH    
 34 #define gstage_gpa_bits (HGATP_PAGE_SHIFT + \     
 35                          (gstage_pgd_levels *     
 36                          gstage_pgd_xbits)        
 37 #define gstage_gpa_size ((gpa_t)(1ULL << gstag    
 38                                                   
 39 #define gstage_pte_leaf(__ptep) \                 
 40         (pte_val(*(__ptep)) & (_PAGE_READ | _P    
 41                                                   
 42 static inline unsigned long gstage_pte_index(g    
 43 {                                                 
 44         unsigned long mask;                       
 45         unsigned long shift = HGATP_PAGE_SHIFT    
 46                                                   
 47         if (level == (gstage_pgd_levels - 1))     
 48                 mask = (PTRS_PER_PTE * (1UL <<    
 49         else                                      
 50                 mask = PTRS_PER_PTE - 1;          
 51                                                   
 52         return (addr >> shift) & mask;            
 53 }                                                 
 54                                                   
 55 static inline unsigned long gstage_pte_page_va    
 56 {                                                 
 57         return (unsigned long)pfn_to_virt(__pa    
 58 }                                                 
 59                                                   
 60 static int gstage_page_size_to_level(unsigned     
 61 {                                                 
 62         u32 i;                                    
 63         unsigned long psz = 1UL << 12;            
 64                                                   
 65         for (i = 0; i < gstage_pgd_levels; i++    
 66                 if (page_size == (psz << (i *     
 67                         *out_level = i;           
 68                         return 0;                 
 69                 }                                 
 70         }                                         
 71                                                   
 72         return -EINVAL;                           
 73 }                                                 
 74                                                   
 75 static int gstage_level_to_page_order(u32 leve    
 76 {                                                 
 77         if (gstage_pgd_levels < level)            
 78                 return -EINVAL;                   
 79                                                   
 80         *out_pgorder = 12 + (level * gstage_in    
 81         return 0;                                 
 82 }                                                 
 83                                                   
 84 static int gstage_level_to_page_size(u32 level    
 85 {                                                 
 86         int rc;                                   
 87         unsigned long page_order = PAGE_SHIFT;    
 88                                                   
 89         rc = gstage_level_to_page_order(level,    
 90         if (rc)                                   
 91                 return rc;                        
 92                                                   
 93         *out_pgsize = BIT(page_order);            
 94         return 0;                                 
 95 }                                                 
 96                                                   
 97 static bool gstage_get_leaf_entry(struct kvm *    
 98                                   pte_t **ptep    
 99 {                                                 
100         pte_t *ptep;                              
101         u32 current_level = gstage_pgd_levels     
102                                                   
103         *ptep_level = current_level;              
104         ptep = (pte_t *)kvm->arch.pgd;            
105         ptep = &ptep[gstage_pte_index(addr, cu    
106         while (ptep && pte_val(ptep_get(ptep))    
107                 if (gstage_pte_leaf(ptep)) {      
108                         *ptep_level = current_    
109                         *ptepp = ptep;            
110                         return true;              
111                 }                                 
112                                                   
113                 if (current_level) {              
114                         current_level--;          
115                         *ptep_level = current_    
116                         ptep = (pte_t *)gstage    
117                         ptep = &ptep[gstage_pt    
118                 } else {                          
119                         ptep = NULL;              
120                 }                                 
121         }                                         
122                                                   
123         return false;                             
124 }                                                 
125                                                   
126 static void gstage_remote_tlb_flush(struct kvm    
127 {                                                 
128         unsigned long order = PAGE_SHIFT;         
129                                                   
130         if (gstage_level_to_page_order(level,     
131                 return;                           
132         addr &= ~(BIT(order) - 1);                
133                                                   
134         kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1    
135 }                                                 
136                                                   
137 static int gstage_set_pte(struct kvm *kvm, u32    
138                            struct kvm_mmu_memo    
139                            gpa_t addr, const p    
140 {                                                 
141         u32 current_level = gstage_pgd_levels     
142         pte_t *next_ptep = (pte_t *)kvm->arch.    
143         pte_t *ptep = &next_ptep[gstage_pte_in    
144                                                   
145         if (current_level < level)                
146                 return -EINVAL;                   
147                                                   
148         while (current_level != level) {          
149                 if (gstage_pte_leaf(ptep))        
150                         return -EEXIST;           
151                                                   
152                 if (!pte_val(ptep_get(ptep)))     
153                         if (!pcache)              
154                                 return -ENOMEM    
155                         next_ptep = kvm_mmu_me    
156                         if (!next_ptep)           
157                                 return -ENOMEM    
158                         set_pte(ptep, pfn_pte(    
159                                                   
160                 } else {                          
161                         if (gstage_pte_leaf(pt    
162                                 return -EEXIST    
163                         next_ptep = (pte_t *)g    
164                 }                                 
165                                                   
166                 current_level--;                  
167                 ptep = &next_ptep[gstage_pte_i    
168         }                                         
169                                                   
170         set_pte(ptep, *new_pte);                  
171         if (gstage_pte_leaf(ptep))                
172                 gstage_remote_tlb_flush(kvm, c    
173                                                   
174         return 0;                                 
175 }                                                 
176                                                   
177 static int gstage_map_page(struct kvm *kvm,       
178                            struct kvm_mmu_memo    
179                            gpa_t gpa, phys_add    
180                            unsigned long page_    
181                            bool page_rdonly, b    
182 {                                                 
183         int ret;                                  
184         u32 level = 0;                            
185         pte_t new_pte;                            
186         pgprot_t prot;                            
187                                                   
188         ret = gstage_page_size_to_level(page_s    
189         if (ret)                                  
190                 return ret;                       
191                                                   
192         /*                                        
193          * A RISC-V implementation can choose     
194          * 1) Update 'A' and 'D' PTE bits in h    
195          * 2) Generate page fault when 'A' and    
196          *    PTE so that software can update     
197          *                                        
198          * We support both options mentioned a    
199          * always set 'A' and 'D' PTE bits at     
200          * mapping. To support KVM dirty page     
201          * mentioned above, we will write-prot    
202          * dirty pages.                           
203          */                                       
204                                                   
205         if (page_exec) {                          
206                 if (page_rdonly)                  
207                         prot = PAGE_READ_EXEC;    
208                 else                              
209                         prot = PAGE_WRITE_EXEC    
210         } else {                                  
211                 if (page_rdonly)                  
212                         prot = PAGE_READ;         
213                 else                              
214                         prot = PAGE_WRITE;        
215         }                                         
216         new_pte = pfn_pte(PFN_DOWN(hpa), prot)    
217         new_pte = pte_mkdirty(new_pte);           
218                                                   
219         return gstage_set_pte(kvm, level, pcac    
220 }                                                 
221                                                   
222 enum gstage_op {                                  
223         GSTAGE_OP_NOP = 0,      /* Nothing */     
224         GSTAGE_OP_CLEAR,        /* Clear/Unmap    
225         GSTAGE_OP_WP,           /* Write-prote    
226 };                                                
227                                                   
228 static void gstage_op_pte(struct kvm *kvm, gpa    
229                           pte_t *ptep, u32 pte    
230 {                                                 
231         int i, ret;                               
232         pte_t *next_ptep;                         
233         u32 next_ptep_level;                      
234         unsigned long next_page_size, page_siz    
235                                                   
236         ret = gstage_level_to_page_size(ptep_l    
237         if (ret)                                  
238                 return;                           
239                                                   
240         BUG_ON(addr & (page_size - 1));           
241                                                   
242         if (!pte_val(ptep_get(ptep)))             
243                 return;                           
244                                                   
245         if (ptep_level && !gstage_pte_leaf(pte    
246                 next_ptep = (pte_t *)gstage_pt    
247                 next_ptep_level = ptep_level -    
248                 ret = gstage_level_to_page_siz    
249                                                   
250                 if (ret)                          
251                         return;                   
252                                                   
253                 if (op == GSTAGE_OP_CLEAR)        
254                         set_pte(ptep, __pte(0)    
255                 for (i = 0; i < PTRS_PER_PTE;     
256                         gstage_op_pte(kvm, add    
257                                         &next_    
258                 if (op == GSTAGE_OP_CLEAR)        
259                         put_page(virt_to_page(    
260         } else {                                  
261                 if (op == GSTAGE_OP_CLEAR)        
262                         set_pte(ptep, __pte(0)    
263                 else if (op == GSTAGE_OP_WP)      
264                         set_pte(ptep, __pte(pt    
265                 gstage_remote_tlb_flush(kvm, p    
266         }                                         
267 }                                                 
268                                                   
269 static void gstage_unmap_range(struct kvm *kvm    
270                                gpa_t size, boo    
271 {                                                 
272         int ret;                                  
273         pte_t *ptep;                              
274         u32 ptep_level;                           
275         bool found_leaf;                          
276         unsigned long page_size;                  
277         gpa_t addr = start, end = start + size    
278                                                   
279         while (addr < end) {                      
280                 found_leaf = gstage_get_leaf_e    
281                                                   
282                 ret = gstage_level_to_page_siz    
283                 if (ret)                          
284                         break;                    
285                                                   
286                 if (!found_leaf)                  
287                         goto next;                
288                                                   
289                 if (!(addr & (page_size - 1))     
290                         gstage_op_pte(kvm, add    
291                                       ptep_lev    
292                                                   
293 next:                                             
294                 addr += page_size;                
295                                                   
296                 /*                                
297                  * If the range is too large,     
298                  * to prevent starvation and l    
299                  */                               
300                 if (may_block && addr < end)      
301                         cond_resched_lock(&kvm    
302         }                                         
303 }                                                 
304                                                   
305 static void gstage_wp_range(struct kvm *kvm, g    
306 {                                                 
307         int ret;                                  
308         pte_t *ptep;                              
309         u32 ptep_level;                           
310         bool found_leaf;                          
311         gpa_t addr = start;                       
312         unsigned long page_size;                  
313                                                   
314         while (addr < end) {                      
315                 found_leaf = gstage_get_leaf_e    
316                                                   
317                 ret = gstage_level_to_page_siz    
318                 if (ret)                          
319                         break;                    
320                                                   
321                 if (!found_leaf)                  
322                         goto next;                
323                                                   
324                 if (!(addr & (page_size - 1))     
325                         gstage_op_pte(kvm, add    
326                                       ptep_lev    
327                                                   
328 next:                                             
329                 addr += page_size;                
330         }                                         
331 }                                                 
332                                                   
333 static void gstage_wp_memory_region(struct kvm    
334 {                                                 
335         struct kvm_memslots *slots = kvm_memsl    
336         struct kvm_memory_slot *memslot = id_t    
337         phys_addr_t start = memslot->base_gfn     
338         phys_addr_t end = (memslot->base_gfn +    
339                                                   
340         spin_lock(&kvm->mmu_lock);                
341         gstage_wp_range(kvm, start, end);         
342         spin_unlock(&kvm->mmu_lock);              
343         kvm_flush_remote_tlbs(kvm);               
344 }                                                 
345                                                   
346 int kvm_riscv_gstage_ioremap(struct kvm *kvm,     
347                              phys_addr_t hpa,     
348                              bool writable, bo    
349 {                                                 
350         pte_t pte;                                
351         int ret = 0;                              
352         unsigned long pfn;                        
353         phys_addr_t addr, end;                    
354         struct kvm_mmu_memory_cache pcache = {    
355                 .gfp_custom = (in_atomic) ? GF    
356                 .gfp_zero = __GFP_ZERO,           
357         };                                        
358                                                   
359         end = (gpa + size + PAGE_SIZE - 1) & P    
360         pfn = __phys_to_pfn(hpa);                 
361                                                   
362         for (addr = gpa; addr < end; addr += P    
363                 pte = pfn_pte(pfn, PAGE_KERNEL    
364                                                   
365                 if (!writable)                    
366                         pte = pte_wrprotect(pt    
367                                                   
368                 ret = kvm_mmu_topup_memory_cac    
369                 if (ret)                          
370                         goto out;                 
371                                                   
372                 spin_lock(&kvm->mmu_lock);        
373                 ret = gstage_set_pte(kvm, 0, &    
374                 spin_unlock(&kvm->mmu_lock);      
375                 if (ret)                          
376                         goto out;                 
377                                                   
378                 pfn++;                            
379         }                                         
380                                                   
381 out:                                              
382         kvm_mmu_free_memory_cache(&pcache);       
383         return ret;                               
384 }                                                 
385                                                   
386 void kvm_riscv_gstage_iounmap(struct kvm *kvm,    
387 {                                                 
388         spin_lock(&kvm->mmu_lock);                
389         gstage_unmap_range(kvm, gpa, size, fal    
390         spin_unlock(&kvm->mmu_lock);              
391 }                                                 
392                                                   
393 void kvm_arch_mmu_enable_log_dirty_pt_masked(s    
394                                              s    
395                                              g    
396                                              u    
397 {                                                 
398         phys_addr_t base_gfn = slot->base_gfn     
399         phys_addr_t start = (base_gfn +  __ffs    
400         phys_addr_t end = (base_gfn + __fls(ma    
401                                                   
402         gstage_wp_range(kvm, start, end);         
403 }                                                 
404                                                   
405 void kvm_arch_sync_dirty_log(struct kvm *kvm,     
406 {                                                 
407 }                                                 
408                                                   
409 void kvm_arch_free_memslot(struct kvm *kvm, st    
410 {                                                 
411 }                                                 
412                                                   
413 void kvm_arch_memslots_updated(struct kvm *kvm    
414 {                                                 
415 }                                                 
416                                                   
417 void kvm_arch_flush_shadow_all(struct kvm *kvm    
418 {                                                 
419         kvm_riscv_gstage_free_pgd(kvm);           
420 }                                                 
421                                                   
422 void kvm_arch_flush_shadow_memslot(struct kvm     
423                                    struct kvm_    
424 {                                                 
425         gpa_t gpa = slot->base_gfn << PAGE_SHI    
426         phys_addr_t size = slot->npages << PAG    
427                                                   
428         spin_lock(&kvm->mmu_lock);                
429         gstage_unmap_range(kvm, gpa, size, fal    
430         spin_unlock(&kvm->mmu_lock);              
431 }                                                 
432                                                   
433 void kvm_arch_commit_memory_region(struct kvm     
434                                 struct kvm_mem    
435                                 const struct k    
436                                 enum kvm_mr_ch    
437 {                                                 
438         /*                                        
439          * At this point memslot has been comm    
440          * allocated dirty_bitmap[], dirty pag    
441          * the memory slot is write protected.    
442          */                                       
443         if (change != KVM_MR_DELETE && new->fl    
444                 gstage_wp_memory_region(kvm, n    
445 }                                                 
446                                                   
447 int kvm_arch_prepare_memory_region(struct kvm     
448                                 const struct k    
449                                 struct kvm_mem    
450                                 enum kvm_mr_ch    
451 {                                                 
452         hva_t hva, reg_end, size;                 
453         gpa_t base_gpa;                           
454         bool writable;                            
455         int ret = 0;                              
456                                                   
457         if (change != KVM_MR_CREATE && change     
458                         change != KVM_MR_FLAGS    
459                 return 0;                         
460                                                   
461         /*                                        
462          * Prevent userspace from creating a m    
463          * space addressable by the KVM guest     
464          */                                       
465         if ((new->base_gfn + new->npages) >=      
466             (gstage_gpa_size >> PAGE_SHIFT))      
467                 return -EFAULT;                   
468                                                   
469         hva = new->userspace_addr;                
470         size = new->npages << PAGE_SHIFT;         
471         reg_end = hva + size;                     
472         base_gpa = new->base_gfn << PAGE_SHIFT    
473         writable = !(new->flags & KVM_MEM_READ    
474                                                   
475         mmap_read_lock(current->mm);              
476                                                   
477         /*                                        
478          * A memory region could potentially c    
479          * any holes between them, so iterate     
480          * out if we can map any of them right    
481          *                                        
482          *     +------------------------------    
483          * +---------------+----------------+     
484          * |   : VMA 1     |      VMA 2     |     
485          * +---------------+----------------+     
486          *     |               memory region      
487          *     +------------------------------    
488          */                                       
489         do {                                      
490                 struct vm_area_struct *vma = f    
491                 hva_t vm_start, vm_end;           
492                                                   
493                 if (!vma || vma->vm_start >= r    
494                         break;                    
495                                                   
496                 /*                                
497                  * Mapping a read-only VMA is     
498                  * memory region is configured    
499                  */                               
500                 if (writable && !(vma->vm_flag    
501                         ret = -EPERM;             
502                         break;                    
503                 }                                 
504                                                   
505                 /* Take the intersection of th    
506                 vm_start = max(hva, vma->vm_st    
507                 vm_end = min(reg_end, vma->vm_    
508                                                   
509                 if (vma->vm_flags & VM_PFNMAP)    
510                         gpa_t gpa = base_gpa +    
511                         phys_addr_t pa;           
512                                                   
513                         pa = (phys_addr_t)vma-    
514                         pa += vm_start - vma->    
515                                                   
516                         /* IO region dirty pag    
517                         if (new->flags & KVM_M    
518                                 ret = -EINVAL;    
519                                 goto out;         
520                         }                         
521                                                   
522                         ret = kvm_riscv_gstage    
523                                                   
524                                                   
525                         if (ret)                  
526                                 break;            
527                 }                                 
528                 hva = vm_end;                     
529         } while (hva < reg_end);                  
530                                                   
531         if (change == KVM_MR_FLAGS_ONLY)          
532                 goto out;                         
533                                                   
534         if (ret)                                  
535                 kvm_riscv_gstage_iounmap(kvm,     
536                                                   
537 out:                                              
538         mmap_read_unlock(current->mm);            
539         return ret;                               
540 }                                                 
541                                                   
542 bool kvm_unmap_gfn_range(struct kvm *kvm, stru    
543 {                                                 
544         if (!kvm->arch.pgd)                       
545                 return false;                     
546                                                   
547         gstage_unmap_range(kvm, range->start <    
548                            (range->end - range    
549                            range->may_block);     
550         return false;                             
551 }                                                 
552                                                   
553 bool kvm_age_gfn(struct kvm *kvm, struct kvm_g    
554 {                                                 
555         pte_t *ptep;                              
556         u32 ptep_level = 0;                       
557         u64 size = (range->end - range->start)    
558                                                   
559         if (!kvm->arch.pgd)                       
560                 return false;                     
561                                                   
562         WARN_ON(size != PAGE_SIZE && size != P    
563                                                   
564         if (!gstage_get_leaf_entry(kvm, range-    
565                                    &ptep, &pte    
566                 return false;                     
567                                                   
568         return ptep_test_and_clear_young(NULL,    
569 }                                                 
570                                                   
571 bool kvm_test_age_gfn(struct kvm *kvm, struct     
572 {                                                 
573         pte_t *ptep;                              
574         u32 ptep_level = 0;                       
575         u64 size = (range->end - range->start)    
576                                                   
577         if (!kvm->arch.pgd)                       
578                 return false;                     
579                                                   
580         WARN_ON(size != PAGE_SIZE && size != P    
581                                                   
582         if (!gstage_get_leaf_entry(kvm, range-    
583                                    &ptep, &pte    
584                 return false;                     
585                                                   
586         return pte_young(ptep_get(ptep));         
587 }                                                 
588                                                   
589 int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu    
590                          struct kvm_memory_slo    
591                          gpa_t gpa, unsigned l    
592 {                                                 
593         int ret;                                  
594         kvm_pfn_t hfn;                            
595         bool writable;                            
596         short vma_pageshift;                      
597         gfn_t gfn = gpa >> PAGE_SHIFT;            
598         struct vm_area_struct *vma;               
599         struct kvm *kvm = vcpu->kvm;              
600         struct kvm_mmu_memory_cache *pcache =     
601         bool logging = (memslot->dirty_bitmap     
602                         !(memslot->flags & KVM    
603         unsigned long vma_pagesize, mmu_seq;      
604                                                   
605         /* We need minimum second+third level     
606         ret = kvm_mmu_topup_memory_cache(pcach    
607         if (ret) {                                
608                 kvm_err("Failed to topup G-sta    
609                 return ret;                       
610         }                                         
611                                                   
612         mmap_read_lock(current->mm);              
613                                                   
614         vma = vma_lookup(current->mm, hva);       
615         if (unlikely(!vma)) {                     
616                 kvm_err("Failed to find VMA fo    
617                 mmap_read_unlock(current->mm);    
618                 return -EFAULT;                   
619         }                                         
620                                                   
621         if (is_vm_hugetlb_page(vma))              
622                 vma_pageshift = huge_page_shif    
623         else                                      
624                 vma_pageshift = PAGE_SHIFT;       
625         vma_pagesize = 1ULL << vma_pageshift;     
626         if (logging || (vma->vm_flags & VM_PFN    
627                 vma_pagesize = PAGE_SIZE;         
628                                                   
629         if (vma_pagesize == PMD_SIZE || vma_pa    
630                 gfn = (gpa & huge_page_mask(hs    
631                                                   
632         /*                                        
633          * Read mmu_invalidate_seq so that KVM    
634          * vma_lookup() or gfn_to_pfn_prot() b    
635          * kvm->mmu_lock.                         
636          *                                        
637          * Rely on mmap_read_unlock() for an i    
638          * with the smp_wmb() in kvm_mmu_inval    
639          */                                       
640         mmu_seq = kvm->mmu_invalidate_seq;        
641         mmap_read_unlock(current->mm);            
642                                                   
643         if (vma_pagesize != PUD_SIZE &&           
644             vma_pagesize != PMD_SIZE &&           
645             vma_pagesize != PAGE_SIZE) {          
646                 kvm_err("Invalid VMA page size    
647                 return -EFAULT;                   
648         }                                         
649                                                   
650         hfn = gfn_to_pfn_prot(kvm, gfn, is_wri    
651         if (hfn == KVM_PFN_ERR_HWPOISON) {        
652                 send_sig_mceerr(BUS_MCEERR_AR,    
653                                 vma_pageshift,    
654                 return 0;                         
655         }                                         
656         if (is_error_noslot_pfn(hfn))             
657                 return -EFAULT;                   
658                                                   
659         /*                                        
660          * If logging is active then we allow     
661          * for write faults.                      
662          */                                       
663         if (logging && !is_write)                 
664                 writable = false;                 
665                                                   
666         spin_lock(&kvm->mmu_lock);                
667                                                   
668         if (mmu_invalidate_retry(kvm, mmu_seq)    
669                 goto out_unlock;                  
670                                                   
671         if (writable) {                           
672                 kvm_set_pfn_dirty(hfn);           
673                 mark_page_dirty(kvm, gfn);        
674                 ret = gstage_map_page(kvm, pca    
675                                       vma_page    
676         } else {                                  
677                 ret = gstage_map_page(kvm, pca    
678                                       vma_page    
679         }                                         
680                                                   
681         if (ret)                                  
682                 kvm_err("Failed to map in G-st    
683                                                   
684 out_unlock:                                       
685         spin_unlock(&kvm->mmu_lock);              
686         kvm_set_pfn_accessed(hfn);                
687         kvm_release_pfn_clean(hfn);               
688         return ret;                               
689 }                                                 
690                                                   
691 int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm    
692 {                                                 
693         struct page *pgd_page;                    
694                                                   
695         if (kvm->arch.pgd != NULL) {              
696                 kvm_err("kvm_arch already init    
697                 return -EINVAL;                   
698         }                                         
699                                                   
700         pgd_page = alloc_pages(GFP_KERNEL | __    
701                                 get_order(gsta    
702         if (!pgd_page)                            
703                 return -ENOMEM;                   
704         kvm->arch.pgd = page_to_virt(pgd_page)    
705         kvm->arch.pgd_phys = page_to_phys(pgd_    
706                                                   
707         return 0;                                 
708 }                                                 
709                                                   
710 void kvm_riscv_gstage_free_pgd(struct kvm *kvm    
711 {                                                 
712         void *pgd = NULL;                         
713                                                   
714         spin_lock(&kvm->mmu_lock);                
715         if (kvm->arch.pgd) {                      
716                 gstage_unmap_range(kvm, 0UL, g    
717                 pgd = READ_ONCE(kvm->arch.pgd)    
718                 kvm->arch.pgd = NULL;             
719                 kvm->arch.pgd_phys = 0;           
720         }                                         
721         spin_unlock(&kvm->mmu_lock);              
722                                                   
723         if (pgd)                                  
724                 free_pages((unsigned long)pgd,    
725 }                                                 
726                                                   
727 void kvm_riscv_gstage_update_hgatp(struct kvm_    
728 {                                                 
729         unsigned long hgatp = gstage_mode;        
730         struct kvm_arch *k = &vcpu->kvm->arch;    
731                                                   
732         hgatp |= (READ_ONCE(k->vmid.vmid) << H    
733         hgatp |= (k->pgd_phys >> PAGE_SHIFT) &    
734                                                   
735         csr_write(CSR_HGATP, hgatp);              
736                                                   
737         if (!kvm_riscv_gstage_vmid_bits())        
738                 kvm_riscv_local_hfence_gvma_al    
739 }                                                 
740                                                   
741 void __init kvm_riscv_gstage_mode_detect(void)    
742 {                                                 
743 #ifdef CONFIG_64BIT                               
744         /* Try Sv57x4 G-stage mode */             
745         csr_write(CSR_HGATP, HGATP_MODE_SV57X4    
746         if ((csr_read(CSR_HGATP) >> HGATP_MODE    
747                 gstage_mode = (HGATP_MODE_SV57    
748                 gstage_pgd_levels = 5;            
749                 goto skip_sv48x4_test;            
750         }                                         
751                                                   
752         /* Try Sv48x4 G-stage mode */             
753         csr_write(CSR_HGATP, HGATP_MODE_SV48X4    
754         if ((csr_read(CSR_HGATP) >> HGATP_MODE    
755                 gstage_mode = (HGATP_MODE_SV48    
756                 gstage_pgd_levels = 4;            
757         }                                         
758 skip_sv48x4_test:                                 
759                                                   
760         csr_write(CSR_HGATP, 0);                  
761         kvm_riscv_local_hfence_gvma_all();        
762 #endif                                            
763 }                                                 
764                                                   
765 unsigned long __init kvm_riscv_gstage_mode(voi    
766 {                                                 
767         return gstage_mode >> HGATP_MODE_SHIFT    
768 }                                                 
769                                                   
770 int kvm_riscv_gstage_gpa_bits(void)               
771 {                                                 
772         return gstage_gpa_bits;                   
773 }                                                 
774                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php