~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/hmm.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/hmm.c (Version linux-6.12-rc7) and /mm/hmm.c (Version linux-2.4.37.11)


  1 // SPDX-License-Identifier: GPL-2.0-or-later        1 
  2 /*                                                
  3  * Copyright 2013 Red Hat Inc.                    
  4  *                                                
  5  * Authors: Jérôme Glisse <jglisse@redhat.co    
  6  */                                               
  7 /*                                                
  8  * Refer to include/linux/hmm.h for informatio    
  9  * management or HMM for short.                   
 10  */                                               
 11 #include <linux/pagewalk.h>                       
 12 #include <linux/hmm.h>                            
 13 #include <linux/init.h>                           
 14 #include <linux/rmap.h>                           
 15 #include <linux/swap.h>                           
 16 #include <linux/slab.h>                           
 17 #include <linux/sched.h>                          
 18 #include <linux/mmzone.h>                         
 19 #include <linux/pagemap.h>                        
 20 #include <linux/swapops.h>                        
 21 #include <linux/hugetlb.h>                        
 22 #include <linux/memremap.h>                       
 23 #include <linux/sched/mm.h>                       
 24 #include <linux/jump_label.h>                     
 25 #include <linux/dma-mapping.h>                    
 26 #include <linux/mmu_notifier.h>                   
 27 #include <linux/memory_hotplug.h>                 
 28                                                   
 29 #include "internal.h"                             
 30                                                   
 31 struct hmm_vma_walk {                             
 32         struct hmm_range        *range;           
 33         unsigned long           last;             
 34 };                                                
 35                                                   
 36 enum {                                            
 37         HMM_NEED_FAULT = 1 << 0,                  
 38         HMM_NEED_WRITE_FAULT = 1 << 1,            
 39         HMM_NEED_ALL_BITS = HMM_NEED_FAULT | H    
 40 };                                                
 41                                                   
 42 static int hmm_pfns_fill(unsigned long addr, u    
 43                          struct hmm_range *ran    
 44 {                                                 
 45         unsigned long i = (addr - range->start    
 46                                                   
 47         for (; addr < end; addr += PAGE_SIZE,     
 48                 range->hmm_pfns[i] = cpu_flags    
 49         return 0;                                 
 50 }                                                 
 51                                                   
 52 /*                                                
 53  * hmm_vma_fault() - fault in a range lacking     
 54  * @addr: range virtual start address (inclusi    
 55  * @end: range virtual end address (exclusive)    
 56  * @required_fault: HMM_NEED_* flags              
 57  * @walk: mm_walk structure                       
 58  * Return: -EBUSY after page fault, or page fa    
 59  *                                                
 60  * This function will be called whenever pmd_n    
 61  * or whenever there is no page directory cove    
 62  */                                               
 63 static int hmm_vma_fault(unsigned long addr, u    
 64                          unsigned int required    
 65 {                                                 
 66         struct hmm_vma_walk *hmm_vma_walk = wa    
 67         struct vm_area_struct *vma = walk->vma    
 68         unsigned int fault_flags = FAULT_FLAG_    
 69                                                   
 70         WARN_ON_ONCE(!required_fault);            
 71         hmm_vma_walk->last = addr;                
 72                                                   
 73         if (required_fault & HMM_NEED_WRITE_FA    
 74                 if (!(vma->vm_flags & VM_WRITE    
 75                         return -EPERM;            
 76                 fault_flags |= FAULT_FLAG_WRIT    
 77         }                                         
 78                                                   
 79         for (; addr < end; addr += PAGE_SIZE)     
 80                 if (handle_mm_fault(vma, addr,    
 81                     VM_FAULT_ERROR)               
 82                         return -EFAULT;           
 83         return -EBUSY;                            
 84 }                                                 
 85                                                   
 86 static unsigned int hmm_pte_need_fault(const s    
 87                                        unsigne    
 88                                        unsigne    
 89 {                                                 
 90         struct hmm_range *range = hmm_vma_walk    
 91                                                   
 92         /*                                        
 93          * So we not only consider the individ    
 94          * consider the default flags requeste    
 95          * be used 2 ways. The first one where    
 96          * multiple page faults into one reque    
 97          * those faults. The second one where     
 98          * fault a range with specific flags.     
 99          * waste to have the user pre-fill the    
100          * flags value.                           
101          */                                       
102         pfn_req_flags &= range->pfn_flags_mask    
103         pfn_req_flags |= range->default_flags;    
104                                                   
105         /* We aren't ask to do anything ... */    
106         if (!(pfn_req_flags & HMM_PFN_REQ_FAUL    
107                 return 0;                         
108                                                   
109         /* Need to write fault ? */               
110         if ((pfn_req_flags & HMM_PFN_REQ_WRITE    
111             !(cpu_flags & HMM_PFN_WRITE))         
112                 return HMM_NEED_FAULT | HMM_NE    
113                                                   
114         /* If CPU page table is not valid then    
115         if (!(cpu_flags & HMM_PFN_VALID))         
116                 return HMM_NEED_FAULT;            
117         return 0;                                 
118 }                                                 
119                                                   
120 static unsigned int                               
121 hmm_range_need_fault(const struct hmm_vma_walk    
122                      const unsigned long hmm_p    
123                      unsigned long cpu_flags)     
124 {                                                 
125         struct hmm_range *range = hmm_vma_walk    
126         unsigned int required_fault = 0;          
127         unsigned long i;                          
128                                                   
129         /*                                        
130          * If the default flags do not request    
131          * not allow for individual pages to b    
132          * hmm_pte_need_fault() will always re    
133          */                                       
134         if (!((range->default_flags | range->p    
135               HMM_PFN_REQ_FAULT))                 
136                 return 0;                         
137                                                   
138         for (i = 0; i < npages; ++i) {            
139                 required_fault |= hmm_pte_need    
140                                                   
141                 if (required_fault == HMM_NEED    
142                         return required_fault;    
143         }                                         
144         return required_fault;                    
145 }                                                 
146                                                   
147 static int hmm_vma_walk_hole(unsigned long add    
148                              __always_unused i    
149 {                                                 
150         struct hmm_vma_walk *hmm_vma_walk = wa    
151         struct hmm_range *range = hmm_vma_walk    
152         unsigned int required_fault;              
153         unsigned long i, npages;                  
154         unsigned long *hmm_pfns;                  
155                                                   
156         i = (addr - range->start) >> PAGE_SHIF    
157         npages = (end - addr) >> PAGE_SHIFT;      
158         hmm_pfns = &range->hmm_pfns[i];           
159         required_fault =                          
160                 hmm_range_need_fault(hmm_vma_w    
161         if (!walk->vma) {                         
162                 if (required_fault)               
163                         return -EFAULT;           
164                 return hmm_pfns_fill(addr, end    
165         }                                         
166         if (required_fault)                       
167                 return hmm_vma_fault(addr, end    
168         return hmm_pfns_fill(addr, end, range,    
169 }                                                 
170                                                   
171 static inline unsigned long hmm_pfn_flags_orde    
172 {                                                 
173         return order << HMM_PFN_ORDER_SHIFT;      
174 }                                                 
175                                                   
176 static inline unsigned long pmd_to_hmm_pfn_fla    
177                                                   
178 {                                                 
179         if (pmd_protnone(pmd))                    
180                 return 0;                         
181         return (pmd_write(pmd) ? (HMM_PFN_VALI    
182                                  HMM_PFN_VALID    
183                hmm_pfn_flags_order(PMD_SHIFT -    
184 }                                                 
185                                                   
186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE                
187 static int hmm_vma_handle_pmd(struct mm_walk *    
188                               unsigned long en    
189                               pmd_t pmd)          
190 {                                                 
191         struct hmm_vma_walk *hmm_vma_walk = wa    
192         struct hmm_range *range = hmm_vma_walk    
193         unsigned long pfn, npages, i;             
194         unsigned int required_fault;              
195         unsigned long cpu_flags;                  
196                                                   
197         npages = (end - addr) >> PAGE_SHIFT;      
198         cpu_flags = pmd_to_hmm_pfn_flags(range    
199         required_fault =                          
200                 hmm_range_need_fault(hmm_vma_w    
201         if (required_fault)                       
202                 return hmm_vma_fault(addr, end    
203                                                   
204         pfn = pmd_pfn(pmd) + ((addr & ~PMD_MAS    
205         for (i = 0; addr < end; addr += PAGE_S    
206                 hmm_pfns[i] = pfn | cpu_flags;    
207         return 0;                                 
208 }                                                 
209 #else /* CONFIG_TRANSPARENT_HUGEPAGE */           
210 /* stub to allow the code below to compile */     
211 int hmm_vma_handle_pmd(struct mm_walk *walk, u    
212                 unsigned long end, unsigned lo    
213 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */          
214                                                   
215 static inline unsigned long pte_to_hmm_pfn_fla    
216                                                   
217 {                                                 
218         if (pte_none(pte) || !pte_present(pte)    
219                 return 0;                         
220         return pte_write(pte) ? (HMM_PFN_VALID    
221 }                                                 
222                                                   
223 static int hmm_vma_handle_pte(struct mm_walk *    
224                               unsigned long en    
225                               unsigned long *h    
226 {                                                 
227         struct hmm_vma_walk *hmm_vma_walk = wa    
228         struct hmm_range *range = hmm_vma_walk    
229         unsigned int required_fault;              
230         unsigned long cpu_flags;                  
231         pte_t pte = ptep_get(ptep);               
232         uint64_t pfn_req_flags = *hmm_pfn;        
233                                                   
234         if (pte_none_mostly(pte)) {               
235                 required_fault =                  
236                         hmm_pte_need_fault(hmm    
237                 if (required_fault)               
238                         goto fault;               
239                 *hmm_pfn = 0;                     
240                 return 0;                         
241         }                                         
242                                                   
243         if (!pte_present(pte)) {                  
244                 swp_entry_t entry = pte_to_swp    
245                                                   
246                 /*                                
247                  * Don't fault in device priva    
248                  * just report the PFN.           
249                  */                               
250                 if (is_device_private_entry(en    
251                     pfn_swap_entry_to_page(ent    
252                     range->dev_private_owner)     
253                         cpu_flags = HMM_PFN_VA    
254                         if (is_writable_device    
255                                 cpu_flags |= H    
256                         *hmm_pfn = swp_offset_    
257                         return 0;                 
258                 }                                 
259                                                   
260                 required_fault =                  
261                         hmm_pte_need_fault(hmm    
262                 if (!required_fault) {            
263                         *hmm_pfn = 0;             
264                         return 0;                 
265                 }                                 
266                                                   
267                 if (!non_swap_entry(entry))       
268                         goto fault;               
269                                                   
270                 if (is_device_private_entry(en    
271                         goto fault;               
272                                                   
273                 if (is_device_exclusive_entry(    
274                         goto fault;               
275                                                   
276                 if (is_migration_entry(entry))    
277                         pte_unmap(ptep);          
278                         hmm_vma_walk->last = a    
279                         migration_entry_wait(w    
280                         return -EBUSY;            
281                 }                                 
282                                                   
283                 /* Report error for everything    
284                 pte_unmap(ptep);                  
285                 return -EFAULT;                   
286         }                                         
287                                                   
288         cpu_flags = pte_to_hmm_pfn_flags(range    
289         required_fault =                          
290                 hmm_pte_need_fault(hmm_vma_wal    
291         if (required_fault)                       
292                 goto fault;                       
293                                                   
294         /*                                        
295          * Bypass devmap pte such as DAX page     
296          * flags(pfn_req_flags) are fulfilled.    
297          * Since each architecture defines a s    
298          * fall through and treat it like a no    
299          */                                       
300         if (!vm_normal_page(walk->vma, addr, p    
301             !pte_devmap(pte) &&                   
302             !is_zero_pfn(pte_pfn(pte))) {         
303                 if (hmm_pte_need_fault(hmm_vma    
304                         pte_unmap(ptep);          
305                         return -EFAULT;           
306                 }                                 
307                 *hmm_pfn = HMM_PFN_ERROR;         
308                 return 0;                         
309         }                                         
310                                                   
311         *hmm_pfn = pte_pfn(pte) | cpu_flags;      
312         return 0;                                 
313                                                   
314 fault:                                            
315         pte_unmap(ptep);                          
316         /* Fault any virtual address we were a    
317         return hmm_vma_fault(addr, end, requir    
318 }                                                 
319                                                   
320 static int hmm_vma_walk_pmd(pmd_t *pmdp,          
321                             unsigned long star    
322                             unsigned long end,    
323                             struct mm_walk *wa    
324 {                                                 
325         struct hmm_vma_walk *hmm_vma_walk = wa    
326         struct hmm_range *range = hmm_vma_walk    
327         unsigned long *hmm_pfns =                 
328                 &range->hmm_pfns[(start - rang    
329         unsigned long npages = (end - start) >    
330         unsigned long addr = start;               
331         pte_t *ptep;                              
332         pmd_t pmd;                                
333                                                   
334 again:                                            
335         pmd = pmdp_get_lockless(pmdp);            
336         if (pmd_none(pmd))                        
337                 return hmm_vma_walk_hole(start    
338                                                   
339         if (thp_migration_supported() && is_pm    
340                 if (hmm_range_need_fault(hmm_v    
341                         hmm_vma_walk->last = a    
342                         pmd_migration_entry_wa    
343                         return -EBUSY;            
344                 }                                 
345                 return hmm_pfns_fill(start, en    
346         }                                         
347                                                   
348         if (!pmd_present(pmd)) {                  
349                 if (hmm_range_need_fault(hmm_v    
350                         return -EFAULT;           
351                 return hmm_pfns_fill(start, en    
352         }                                         
353                                                   
354         if (pmd_devmap(pmd) || pmd_trans_huge(    
355                 /*                                
356                  * No need to take pmd_lock he    
357                  * is splitting the huge pmd w    
358                  * mmu_notifier callback.         
359                  *                                
360                  * So just read pmd value and     
361                  * huge or device mapping one     
362                  * values.                        
363                  */                               
364                 pmd = pmdp_get_lockless(pmdp);    
365                 if (!pmd_devmap(pmd) && !pmd_t    
366                         goto again;               
367                                                   
368                 return hmm_vma_handle_pmd(walk    
369         }                                         
370                                                   
371         /*                                        
372          * We have handled all the valid cases    
373          * huge or transparent huge. At this p    
374          * entry pointing to pte directory or     
375          * recover.                               
376          */                                       
377         if (pmd_bad(pmd)) {                       
378                 if (hmm_range_need_fault(hmm_v    
379                         return -EFAULT;           
380                 return hmm_pfns_fill(start, en    
381         }                                         
382                                                   
383         ptep = pte_offset_map(pmdp, addr);        
384         if (!ptep)                                
385                 goto again;                       
386         for (; addr < end; addr += PAGE_SIZE,     
387                 int r;                            
388                                                   
389                 r = hmm_vma_handle_pte(walk, a    
390                 if (r) {                          
391                         /* hmm_vma_handle_pte(    
392                         return r;                 
393                 }                                 
394         }                                         
395         pte_unmap(ptep - 1);                      
396         return 0;                                 
397 }                                                 
398                                                   
399 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \      
400     defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEP    
401 static inline unsigned long pud_to_hmm_pfn_fla    
402                                                   
403 {                                                 
404         if (!pud_present(pud))                    
405                 return 0;                         
406         return (pud_write(pud) ? (HMM_PFN_VALI    
407                                  HMM_PFN_VALID    
408                hmm_pfn_flags_order(PUD_SHIFT -    
409 }                                                 
410                                                   
411 static int hmm_vma_walk_pud(pud_t *pudp, unsig    
412                 struct mm_walk *walk)             
413 {                                                 
414         struct hmm_vma_walk *hmm_vma_walk = wa    
415         struct hmm_range *range = hmm_vma_walk    
416         unsigned long addr = start;               
417         pud_t pud;                                
418         spinlock_t *ptl = pud_trans_huge_lock(    
419                                                   
420         if (!ptl)                                 
421                 return 0;                         
422                                                   
423         /* Normally we don't want to split the    
424         walk->action = ACTION_CONTINUE;           
425                                                   
426         pud = READ_ONCE(*pudp);                   
427         if (!pud_present(pud)) {                  
428                 spin_unlock(ptl);                 
429                 return hmm_vma_walk_hole(start    
430         }                                         
431                                                   
432         if (pud_leaf(pud) && pud_devmap(pud))     
433                 unsigned long i, npages, pfn;     
434                 unsigned int required_fault;      
435                 unsigned long *hmm_pfns;          
436                 unsigned long cpu_flags;          
437                                                   
438                 i = (addr - range->start) >> P    
439                 npages = (end - addr) >> PAGE_    
440                 hmm_pfns = &range->hmm_pfns[i]    
441                                                   
442                 cpu_flags = pud_to_hmm_pfn_fla    
443                 required_fault = hmm_range_nee    
444                                                   
445                 if (required_fault) {             
446                         spin_unlock(ptl);         
447                         return hmm_vma_fault(a    
448                 }                                 
449                                                   
450                 pfn = pud_pfn(pud) + ((addr &     
451                 for (i = 0; i < npages; ++i, +    
452                         hmm_pfns[i] = pfn | cp    
453                 goto out_unlock;                  
454         }                                         
455                                                   
456         /* Ask for the PUD to be split */         
457         walk->action = ACTION_SUBTREE;            
458                                                   
459 out_unlock:                                       
460         spin_unlock(ptl);                         
461         return 0;                                 
462 }                                                 
463 #else                                             
464 #define hmm_vma_walk_pud        NULL              
465 #endif                                            
466                                                   
467 #ifdef CONFIG_HUGETLB_PAGE                        
468 static int hmm_vma_walk_hugetlb_entry(pte_t *p    
469                                       unsigned    
470                                       struct m    
471 {                                                 
472         unsigned long addr = start, i, pfn;       
473         struct hmm_vma_walk *hmm_vma_walk = wa    
474         struct hmm_range *range = hmm_vma_walk    
475         struct vm_area_struct *vma = walk->vma    
476         unsigned int required_fault;              
477         unsigned long pfn_req_flags;              
478         unsigned long cpu_flags;                  
479         spinlock_t *ptl;                          
480         pte_t entry;                              
481                                                   
482         ptl = huge_pte_lock(hstate_vma(vma), w    
483         entry = huge_ptep_get(walk->mm, addr,     
484                                                   
485         i = (start - range->start) >> PAGE_SHI    
486         pfn_req_flags = range->hmm_pfns[i];       
487         cpu_flags = pte_to_hmm_pfn_flags(range    
488                     hmm_pfn_flags_order(huge_p    
489         required_fault =                          
490                 hmm_pte_need_fault(hmm_vma_wal    
491         if (required_fault) {                     
492                 int ret;                          
493                                                   
494                 spin_unlock(ptl);                 
495                 hugetlb_vma_unlock_read(vma);     
496                 /*                                
497                  * Avoid deadlock: drop the vm    
498                  * hmm_vma_fault(), which will    
499                  * drop the vma lock. This is     
500                  * protection point of view, b    
501                  * use here of either pte or p    
502                  * lock.                          
503                  */                               
504                 ret = hmm_vma_fault(addr, end,    
505                 hugetlb_vma_lock_read(vma);       
506                 return ret;                       
507         }                                         
508                                                   
509         pfn = pte_pfn(entry) + ((start & ~hmas    
510         for (; addr < end; addr += PAGE_SIZE,     
511                 range->hmm_pfns[i] = pfn | cpu    
512                                                   
513         spin_unlock(ptl);                         
514         return 0;                                 
515 }                                                 
516 #else                                             
517 #define hmm_vma_walk_hugetlb_entry NULL           
518 #endif /* CONFIG_HUGETLB_PAGE */                  
519                                                   
520 static int hmm_vma_walk_test(unsigned long sta    
521                              struct mm_walk *w    
522 {                                                 
523         struct hmm_vma_walk *hmm_vma_walk = wa    
524         struct hmm_range *range = hmm_vma_walk    
525         struct vm_area_struct *vma = walk->vma    
526                                                   
527         if (!(vma->vm_flags & (VM_IO | VM_PFNM    
528             vma->vm_flags & VM_READ)              
529                 return 0;                         
530                                                   
531         /*                                        
532          * vma ranges that don't have struct p    
533          * devices directly cannot be handled     
534          *                                        
535          * If the vma does not allow read acce    
536          * allow write access either. HMM does    
537          * allow write without read.              
538          *                                        
539          * If a fault is requested for an unsu    
540          * failure.                               
541          */                                       
542         if (hmm_range_need_fault(hmm_vma_walk,    
543                                  range->hmm_pf    
544                                          ((sta    
545                                  (end - start)    
546                 return -EFAULT;                   
547                                                   
548         hmm_pfns_fill(start, end, range, HMM_P    
549                                                   
550         /* Skip this vma and continue processi    
551         return 1;                                 
552 }                                                 
553                                                   
554 static const struct mm_walk_ops hmm_walk_ops =    
555         .pud_entry      = hmm_vma_walk_pud,       
556         .pmd_entry      = hmm_vma_walk_pmd,       
557         .pte_hole       = hmm_vma_walk_hole,      
558         .hugetlb_entry  = hmm_vma_walk_hugetlb    
559         .test_walk      = hmm_vma_walk_test,      
560         .walk_lock      = PGWALK_RDLOCK,          
561 };                                                
562                                                   
563 /**                                               
564  * hmm_range_fault - try to fault some address    
565  * @range:      argument structure                
566  *                                                
567  * Returns 0 on success or one of the followin    
568  *                                                
569  * -EINVAL:     Invalid arguments or mm or vir    
570  *              (e.g., device file vma).          
571  * -ENOMEM:     Out of memory.                    
572  * -EPERM:      Invalid permission (e.g., aski    
573  *              only).                            
574  * -EBUSY:      The range has been invalidated    
575  *              the invalidation to finish.       
576  * -EFAULT:     A page was requested to be val    
577  *              ie it has no backing VMA or it    
578  *                                                
579  * This is similar to get_user_pages(), except    
580  * without mutating them (ie causing faults).     
581  */                                               
582 int hmm_range_fault(struct hmm_range *range)      
583 {                                                 
584         struct hmm_vma_walk hmm_vma_walk = {      
585                 .range = range,                   
586                 .last = range->start,             
587         };                                        
588         struct mm_struct *mm = range->notifier    
589         int ret;                                  
590                                                   
591         mmap_assert_locked(mm);                   
592                                                   
593         do {                                      
594                 /* If range is no longer valid    
595                 if (mmu_interval_check_retry(r    
596                                              r    
597                         return -EBUSY;            
598                 ret = walk_page_range(mm, hmm_    
599                                       &hmm_wal    
600                 /*                                
601                  * When -EBUSY is returned the    
602                  * hmm_vma_walk.last set to an    
603                  * in pfns. All entries < last    
604                  * output, and all >= are stil    
605                  */                               
606         } while (ret == -EBUSY);                  
607         return ret;                               
608 }                                                 
609 EXPORT_SYMBOL(hmm_range_fault);                   
610                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php