~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/hmm.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/hmm.c (Version linux-6.12-rc7) and /mm/hmm.c (Version linux-5.18.19)


  1 // SPDX-License-Identifier: GPL-2.0-or-later        1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*                                                  2 /*
  3  * Copyright 2013 Red Hat Inc.                      3  * Copyright 2013 Red Hat Inc.
  4  *                                                  4  *
  5  * Authors: Jérôme Glisse <jglisse@redhat.co      5  * Authors: Jérôme Glisse <jglisse@redhat.com>
  6  */                                                 6  */
  7 /*                                                  7 /*
  8  * Refer to include/linux/hmm.h for informatio      8  * Refer to include/linux/hmm.h for information about heterogeneous memory
  9  * management or HMM for short.                     9  * management or HMM for short.
 10  */                                                10  */
 11 #include <linux/pagewalk.h>                        11 #include <linux/pagewalk.h>
 12 #include <linux/hmm.h>                             12 #include <linux/hmm.h>
 13 #include <linux/init.h>                            13 #include <linux/init.h>
 14 #include <linux/rmap.h>                            14 #include <linux/rmap.h>
 15 #include <linux/swap.h>                            15 #include <linux/swap.h>
 16 #include <linux/slab.h>                            16 #include <linux/slab.h>
 17 #include <linux/sched.h>                           17 #include <linux/sched.h>
 18 #include <linux/mmzone.h>                          18 #include <linux/mmzone.h>
 19 #include <linux/pagemap.h>                         19 #include <linux/pagemap.h>
 20 #include <linux/swapops.h>                         20 #include <linux/swapops.h>
 21 #include <linux/hugetlb.h>                         21 #include <linux/hugetlb.h>
 22 #include <linux/memremap.h>                        22 #include <linux/memremap.h>
 23 #include <linux/sched/mm.h>                        23 #include <linux/sched/mm.h>
 24 #include <linux/jump_label.h>                      24 #include <linux/jump_label.h>
 25 #include <linux/dma-mapping.h>                     25 #include <linux/dma-mapping.h>
 26 #include <linux/mmu_notifier.h>                    26 #include <linux/mmu_notifier.h>
 27 #include <linux/memory_hotplug.h>                  27 #include <linux/memory_hotplug.h>
 28                                                    28 
 29 #include "internal.h"                              29 #include "internal.h"
 30                                                    30 
 31 struct hmm_vma_walk {                              31 struct hmm_vma_walk {
 32         struct hmm_range        *range;            32         struct hmm_range        *range;
 33         unsigned long           last;              33         unsigned long           last;
 34 };                                                 34 };
 35                                                    35 
 36 enum {                                             36 enum {
 37         HMM_NEED_FAULT = 1 << 0,                   37         HMM_NEED_FAULT = 1 << 0,
 38         HMM_NEED_WRITE_FAULT = 1 << 1,             38         HMM_NEED_WRITE_FAULT = 1 << 1,
 39         HMM_NEED_ALL_BITS = HMM_NEED_FAULT | H     39         HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
 40 };                                                 40 };
 41                                                    41 
 42 static int hmm_pfns_fill(unsigned long addr, u     42 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
 43                          struct hmm_range *ran     43                          struct hmm_range *range, unsigned long cpu_flags)
 44 {                                                  44 {
 45         unsigned long i = (addr - range->start     45         unsigned long i = (addr - range->start) >> PAGE_SHIFT;
 46                                                    46 
 47         for (; addr < end; addr += PAGE_SIZE,      47         for (; addr < end; addr += PAGE_SIZE, i++)
 48                 range->hmm_pfns[i] = cpu_flags     48                 range->hmm_pfns[i] = cpu_flags;
 49         return 0;                                  49         return 0;
 50 }                                                  50 }
 51                                                    51 
 52 /*                                                 52 /*
 53  * hmm_vma_fault() - fault in a range lacking      53  * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
 54  * @addr: range virtual start address (inclusi     54  * @addr: range virtual start address (inclusive)
 55  * @end: range virtual end address (exclusive)     55  * @end: range virtual end address (exclusive)
 56  * @required_fault: HMM_NEED_* flags               56  * @required_fault: HMM_NEED_* flags
 57  * @walk: mm_walk structure                        57  * @walk: mm_walk structure
 58  * Return: -EBUSY after page fault, or page fa     58  * Return: -EBUSY after page fault, or page fault error
 59  *                                                 59  *
 60  * This function will be called whenever pmd_n     60  * This function will be called whenever pmd_none() or pte_none() returns true,
 61  * or whenever there is no page directory cove     61  * or whenever there is no page directory covering the virtual address range.
 62  */                                                62  */
 63 static int hmm_vma_fault(unsigned long addr, u     63 static int hmm_vma_fault(unsigned long addr, unsigned long end,
 64                          unsigned int required     64                          unsigned int required_fault, struct mm_walk *walk)
 65 {                                                  65 {
 66         struct hmm_vma_walk *hmm_vma_walk = wa     66         struct hmm_vma_walk *hmm_vma_walk = walk->private;
 67         struct vm_area_struct *vma = walk->vma     67         struct vm_area_struct *vma = walk->vma;
 68         unsigned int fault_flags = FAULT_FLAG_     68         unsigned int fault_flags = FAULT_FLAG_REMOTE;
 69                                                    69 
 70         WARN_ON_ONCE(!required_fault);             70         WARN_ON_ONCE(!required_fault);
 71         hmm_vma_walk->last = addr;                 71         hmm_vma_walk->last = addr;
 72                                                    72 
 73         if (required_fault & HMM_NEED_WRITE_FA     73         if (required_fault & HMM_NEED_WRITE_FAULT) {
 74                 if (!(vma->vm_flags & VM_WRITE     74                 if (!(vma->vm_flags & VM_WRITE))
 75                         return -EPERM;             75                         return -EPERM;
 76                 fault_flags |= FAULT_FLAG_WRIT     76                 fault_flags |= FAULT_FLAG_WRITE;
 77         }                                          77         }
 78                                                    78 
 79         for (; addr < end; addr += PAGE_SIZE)      79         for (; addr < end; addr += PAGE_SIZE)
 80                 if (handle_mm_fault(vma, addr,     80                 if (handle_mm_fault(vma, addr, fault_flags, NULL) &
 81                     VM_FAULT_ERROR)                81                     VM_FAULT_ERROR)
 82                         return -EFAULT;            82                         return -EFAULT;
 83         return -EBUSY;                             83         return -EBUSY;
 84 }                                                  84 }
 85                                                    85 
 86 static unsigned int hmm_pte_need_fault(const s     86 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
 87                                        unsigne     87                                        unsigned long pfn_req_flags,
 88                                        unsigne     88                                        unsigned long cpu_flags)
 89 {                                                  89 {
 90         struct hmm_range *range = hmm_vma_walk     90         struct hmm_range *range = hmm_vma_walk->range;
 91                                                    91 
 92         /*                                         92         /*
 93          * So we not only consider the individ     93          * So we not only consider the individual per page request we also
 94          * consider the default flags requeste     94          * consider the default flags requested for the range. The API can
 95          * be used 2 ways. The first one where     95          * be used 2 ways. The first one where the HMM user coalesces
 96          * multiple page faults into one reque     96          * multiple page faults into one request and sets flags per pfn for
 97          * those faults. The second one where      97          * those faults. The second one where the HMM user wants to pre-
 98          * fault a range with specific flags.      98          * fault a range with specific flags. For the latter one it is a
 99          * waste to have the user pre-fill the     99          * waste to have the user pre-fill the pfn arrays with a default
100          * flags value.                           100          * flags value.
101          */                                       101          */
102         pfn_req_flags &= range->pfn_flags_mask    102         pfn_req_flags &= range->pfn_flags_mask;
103         pfn_req_flags |= range->default_flags;    103         pfn_req_flags |= range->default_flags;
104                                                   104 
105         /* We aren't ask to do anything ... */    105         /* We aren't ask to do anything ... */
106         if (!(pfn_req_flags & HMM_PFN_REQ_FAUL    106         if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
107                 return 0;                         107                 return 0;
108                                                   108 
109         /* Need to write fault ? */               109         /* Need to write fault ? */
110         if ((pfn_req_flags & HMM_PFN_REQ_WRITE    110         if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
111             !(cpu_flags & HMM_PFN_WRITE))         111             !(cpu_flags & HMM_PFN_WRITE))
112                 return HMM_NEED_FAULT | HMM_NE    112                 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
113                                                   113 
114         /* If CPU page table is not valid then    114         /* If CPU page table is not valid then we need to fault */
115         if (!(cpu_flags & HMM_PFN_VALID))         115         if (!(cpu_flags & HMM_PFN_VALID))
116                 return HMM_NEED_FAULT;            116                 return HMM_NEED_FAULT;
117         return 0;                                 117         return 0;
118 }                                                 118 }
119                                                   119 
120 static unsigned int                               120 static unsigned int
121 hmm_range_need_fault(const struct hmm_vma_walk    121 hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
122                      const unsigned long hmm_p    122                      const unsigned long hmm_pfns[], unsigned long npages,
123                      unsigned long cpu_flags)     123                      unsigned long cpu_flags)
124 {                                                 124 {
125         struct hmm_range *range = hmm_vma_walk    125         struct hmm_range *range = hmm_vma_walk->range;
126         unsigned int required_fault = 0;          126         unsigned int required_fault = 0;
127         unsigned long i;                          127         unsigned long i;
128                                                   128 
129         /*                                        129         /*
130          * If the default flags do not request    130          * If the default flags do not request to fault pages, and the mask does
131          * not allow for individual pages to b    131          * not allow for individual pages to be faulted, then
132          * hmm_pte_need_fault() will always re    132          * hmm_pte_need_fault() will always return 0.
133          */                                       133          */
134         if (!((range->default_flags | range->p    134         if (!((range->default_flags | range->pfn_flags_mask) &
135               HMM_PFN_REQ_FAULT))                 135               HMM_PFN_REQ_FAULT))
136                 return 0;                         136                 return 0;
137                                                   137 
138         for (i = 0; i < npages; ++i) {            138         for (i = 0; i < npages; ++i) {
139                 required_fault |= hmm_pte_need    139                 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
140                                                   140                                                      cpu_flags);
141                 if (required_fault == HMM_NEED    141                 if (required_fault == HMM_NEED_ALL_BITS)
142                         return required_fault;    142                         return required_fault;
143         }                                         143         }
144         return required_fault;                    144         return required_fault;
145 }                                                 145 }
146                                                   146 
147 static int hmm_vma_walk_hole(unsigned long add    147 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
148                              __always_unused i    148                              __always_unused int depth, struct mm_walk *walk)
149 {                                                 149 {
150         struct hmm_vma_walk *hmm_vma_walk = wa    150         struct hmm_vma_walk *hmm_vma_walk = walk->private;
151         struct hmm_range *range = hmm_vma_walk    151         struct hmm_range *range = hmm_vma_walk->range;
152         unsigned int required_fault;              152         unsigned int required_fault;
153         unsigned long i, npages;                  153         unsigned long i, npages;
154         unsigned long *hmm_pfns;                  154         unsigned long *hmm_pfns;
155                                                   155 
156         i = (addr - range->start) >> PAGE_SHIF    156         i = (addr - range->start) >> PAGE_SHIFT;
157         npages = (end - addr) >> PAGE_SHIFT;      157         npages = (end - addr) >> PAGE_SHIFT;
158         hmm_pfns = &range->hmm_pfns[i];           158         hmm_pfns = &range->hmm_pfns[i];
159         required_fault =                          159         required_fault =
160                 hmm_range_need_fault(hmm_vma_w    160                 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
161         if (!walk->vma) {                         161         if (!walk->vma) {
162                 if (required_fault)               162                 if (required_fault)
163                         return -EFAULT;           163                         return -EFAULT;
164                 return hmm_pfns_fill(addr, end    164                 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
165         }                                         165         }
166         if (required_fault)                       166         if (required_fault)
167                 return hmm_vma_fault(addr, end    167                 return hmm_vma_fault(addr, end, required_fault, walk);
168         return hmm_pfns_fill(addr, end, range,    168         return hmm_pfns_fill(addr, end, range, 0);
169 }                                                 169 }
170                                                   170 
171 static inline unsigned long hmm_pfn_flags_orde    171 static inline unsigned long hmm_pfn_flags_order(unsigned long order)
172 {                                                 172 {
173         return order << HMM_PFN_ORDER_SHIFT;      173         return order << HMM_PFN_ORDER_SHIFT;
174 }                                                 174 }
175                                                   175 
176 static inline unsigned long pmd_to_hmm_pfn_fla    176 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
177                                                   177                                                  pmd_t pmd)
178 {                                                 178 {
179         if (pmd_protnone(pmd))                    179         if (pmd_protnone(pmd))
180                 return 0;                         180                 return 0;
181         return (pmd_write(pmd) ? (HMM_PFN_VALI    181         return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
182                                  HMM_PFN_VALID    182                                  HMM_PFN_VALID) |
183                hmm_pfn_flags_order(PMD_SHIFT -    183                hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
184 }                                                 184 }
185                                                   185 
186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE                186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
187 static int hmm_vma_handle_pmd(struct mm_walk *    187 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
188                               unsigned long en    188                               unsigned long end, unsigned long hmm_pfns[],
189                               pmd_t pmd)          189                               pmd_t pmd)
190 {                                                 190 {
191         struct hmm_vma_walk *hmm_vma_walk = wa    191         struct hmm_vma_walk *hmm_vma_walk = walk->private;
192         struct hmm_range *range = hmm_vma_walk    192         struct hmm_range *range = hmm_vma_walk->range;
193         unsigned long pfn, npages, i;             193         unsigned long pfn, npages, i;
194         unsigned int required_fault;              194         unsigned int required_fault;
195         unsigned long cpu_flags;                  195         unsigned long cpu_flags;
196                                                   196 
197         npages = (end - addr) >> PAGE_SHIFT;      197         npages = (end - addr) >> PAGE_SHIFT;
198         cpu_flags = pmd_to_hmm_pfn_flags(range    198         cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
199         required_fault =                          199         required_fault =
200                 hmm_range_need_fault(hmm_vma_w    200                 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
201         if (required_fault)                       201         if (required_fault)
202                 return hmm_vma_fault(addr, end    202                 return hmm_vma_fault(addr, end, required_fault, walk);
203                                                   203 
204         pfn = pmd_pfn(pmd) + ((addr & ~PMD_MAS    204         pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
205         for (i = 0; addr < end; addr += PAGE_S    205         for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
206                 hmm_pfns[i] = pfn | cpu_flags;    206                 hmm_pfns[i] = pfn | cpu_flags;
207         return 0;                                 207         return 0;
208 }                                                 208 }
209 #else /* CONFIG_TRANSPARENT_HUGEPAGE */           209 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
210 /* stub to allow the code below to compile */     210 /* stub to allow the code below to compile */
211 int hmm_vma_handle_pmd(struct mm_walk *walk, u    211 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
212                 unsigned long end, unsigned lo    212                 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
213 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */          213 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
214                                                   214 
215 static inline unsigned long pte_to_hmm_pfn_fla    215 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
216                                                   216                                                  pte_t pte)
217 {                                                 217 {
218         if (pte_none(pte) || !pte_present(pte)    218         if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
219                 return 0;                         219                 return 0;
220         return pte_write(pte) ? (HMM_PFN_VALID    220         return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
221 }                                                 221 }
222                                                   222 
223 static int hmm_vma_handle_pte(struct mm_walk *    223 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
224                               unsigned long en    224                               unsigned long end, pmd_t *pmdp, pte_t *ptep,
225                               unsigned long *h    225                               unsigned long *hmm_pfn)
226 {                                                 226 {
227         struct hmm_vma_walk *hmm_vma_walk = wa    227         struct hmm_vma_walk *hmm_vma_walk = walk->private;
228         struct hmm_range *range = hmm_vma_walk    228         struct hmm_range *range = hmm_vma_walk->range;
229         unsigned int required_fault;              229         unsigned int required_fault;
230         unsigned long cpu_flags;                  230         unsigned long cpu_flags;
231         pte_t pte = ptep_get(ptep);            !! 231         pte_t pte = *ptep;
232         uint64_t pfn_req_flags = *hmm_pfn;        232         uint64_t pfn_req_flags = *hmm_pfn;
233                                                   233 
234         if (pte_none_mostly(pte)) {            !! 234         if (pte_none(pte)) {
235                 required_fault =                  235                 required_fault =
236                         hmm_pte_need_fault(hmm    236                         hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
237                 if (required_fault)               237                 if (required_fault)
238                         goto fault;               238                         goto fault;
239                 *hmm_pfn = 0;                     239                 *hmm_pfn = 0;
240                 return 0;                         240                 return 0;
241         }                                         241         }
242                                                   242 
243         if (!pte_present(pte)) {                  243         if (!pte_present(pte)) {
244                 swp_entry_t entry = pte_to_swp    244                 swp_entry_t entry = pte_to_swp_entry(pte);
245                                                   245 
246                 /*                                246                 /*
247                  * Don't fault in device priva    247                  * Don't fault in device private pages owned by the caller,
248                  * just report the PFN.           248                  * just report the PFN.
249                  */                               249                  */
250                 if (is_device_private_entry(en    250                 if (is_device_private_entry(entry) &&
251                     pfn_swap_entry_to_page(ent    251                     pfn_swap_entry_to_page(entry)->pgmap->owner ==
252                     range->dev_private_owner)     252                     range->dev_private_owner) {
253                         cpu_flags = HMM_PFN_VA    253                         cpu_flags = HMM_PFN_VALID;
254                         if (is_writable_device    254                         if (is_writable_device_private_entry(entry))
255                                 cpu_flags |= H    255                                 cpu_flags |= HMM_PFN_WRITE;
256                         *hmm_pfn = swp_offset_ !! 256                         *hmm_pfn = swp_offset(entry) | cpu_flags;
257                         return 0;                 257                         return 0;
258                 }                                 258                 }
259                                                   259 
260                 required_fault =                  260                 required_fault =
261                         hmm_pte_need_fault(hmm    261                         hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
262                 if (!required_fault) {            262                 if (!required_fault) {
263                         *hmm_pfn = 0;             263                         *hmm_pfn = 0;
264                         return 0;                 264                         return 0;
265                 }                                 265                 }
266                                                   266 
267                 if (!non_swap_entry(entry))       267                 if (!non_swap_entry(entry))
268                         goto fault;               268                         goto fault;
269                                                   269 
270                 if (is_device_private_entry(en    270                 if (is_device_private_entry(entry))
271                         goto fault;               271                         goto fault;
272                                                   272 
273                 if (is_device_exclusive_entry(    273                 if (is_device_exclusive_entry(entry))
274                         goto fault;               274                         goto fault;
275                                                   275 
276                 if (is_migration_entry(entry))    276                 if (is_migration_entry(entry)) {
277                         pte_unmap(ptep);          277                         pte_unmap(ptep);
278                         hmm_vma_walk->last = a    278                         hmm_vma_walk->last = addr;
279                         migration_entry_wait(w    279                         migration_entry_wait(walk->mm, pmdp, addr);
280                         return -EBUSY;            280                         return -EBUSY;
281                 }                                 281                 }
282                                                   282 
283                 /* Report error for everything    283                 /* Report error for everything else */
284                 pte_unmap(ptep);                  284                 pte_unmap(ptep);
285                 return -EFAULT;                   285                 return -EFAULT;
286         }                                         286         }
287                                                   287 
288         cpu_flags = pte_to_hmm_pfn_flags(range    288         cpu_flags = pte_to_hmm_pfn_flags(range, pte);
289         required_fault =                          289         required_fault =
290                 hmm_pte_need_fault(hmm_vma_wal    290                 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
291         if (required_fault)                       291         if (required_fault)
292                 goto fault;                       292                 goto fault;
293                                                   293 
294         /*                                        294         /*
295          * Bypass devmap pte such as DAX page     295          * Bypass devmap pte such as DAX page when all pfn requested
296          * flags(pfn_req_flags) are fulfilled.    296          * flags(pfn_req_flags) are fulfilled.
297          * Since each architecture defines a s    297          * Since each architecture defines a struct page for the zero page, just
298          * fall through and treat it like a no    298          * fall through and treat it like a normal page.
299          */                                       299          */
300         if (!vm_normal_page(walk->vma, addr, p    300         if (!vm_normal_page(walk->vma, addr, pte) &&
301             !pte_devmap(pte) &&                   301             !pte_devmap(pte) &&
302             !is_zero_pfn(pte_pfn(pte))) {         302             !is_zero_pfn(pte_pfn(pte))) {
303                 if (hmm_pte_need_fault(hmm_vma    303                 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
304                         pte_unmap(ptep);          304                         pte_unmap(ptep);
305                         return -EFAULT;           305                         return -EFAULT;
306                 }                                 306                 }
307                 *hmm_pfn = HMM_PFN_ERROR;         307                 *hmm_pfn = HMM_PFN_ERROR;
308                 return 0;                         308                 return 0;
309         }                                         309         }
310                                                   310 
311         *hmm_pfn = pte_pfn(pte) | cpu_flags;      311         *hmm_pfn = pte_pfn(pte) | cpu_flags;
312         return 0;                                 312         return 0;
313                                                   313 
314 fault:                                            314 fault:
315         pte_unmap(ptep);                          315         pte_unmap(ptep);
316         /* Fault any virtual address we were a    316         /* Fault any virtual address we were asked to fault */
317         return hmm_vma_fault(addr, end, requir    317         return hmm_vma_fault(addr, end, required_fault, walk);
318 }                                                 318 }
319                                                   319 
320 static int hmm_vma_walk_pmd(pmd_t *pmdp,          320 static int hmm_vma_walk_pmd(pmd_t *pmdp,
321                             unsigned long star    321                             unsigned long start,
322                             unsigned long end,    322                             unsigned long end,
323                             struct mm_walk *wa    323                             struct mm_walk *walk)
324 {                                                 324 {
325         struct hmm_vma_walk *hmm_vma_walk = wa    325         struct hmm_vma_walk *hmm_vma_walk = walk->private;
326         struct hmm_range *range = hmm_vma_walk    326         struct hmm_range *range = hmm_vma_walk->range;
327         unsigned long *hmm_pfns =                 327         unsigned long *hmm_pfns =
328                 &range->hmm_pfns[(start - rang    328                 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
329         unsigned long npages = (end - start) >    329         unsigned long npages = (end - start) >> PAGE_SHIFT;
330         unsigned long addr = start;               330         unsigned long addr = start;
331         pte_t *ptep;                              331         pte_t *ptep;
332         pmd_t pmd;                                332         pmd_t pmd;
333                                                   333 
334 again:                                            334 again:
335         pmd = pmdp_get_lockless(pmdp);         !! 335         pmd = READ_ONCE(*pmdp);
336         if (pmd_none(pmd))                        336         if (pmd_none(pmd))
337                 return hmm_vma_walk_hole(start    337                 return hmm_vma_walk_hole(start, end, -1, walk);
338                                                   338 
339         if (thp_migration_supported() && is_pm    339         if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
340                 if (hmm_range_need_fault(hmm_v    340                 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
341                         hmm_vma_walk->last = a    341                         hmm_vma_walk->last = addr;
342                         pmd_migration_entry_wa    342                         pmd_migration_entry_wait(walk->mm, pmdp);
343                         return -EBUSY;            343                         return -EBUSY;
344                 }                                 344                 }
345                 return hmm_pfns_fill(start, en    345                 return hmm_pfns_fill(start, end, range, 0);
346         }                                         346         }
347                                                   347 
348         if (!pmd_present(pmd)) {                  348         if (!pmd_present(pmd)) {
349                 if (hmm_range_need_fault(hmm_v    349                 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
350                         return -EFAULT;           350                         return -EFAULT;
351                 return hmm_pfns_fill(start, en    351                 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
352         }                                         352         }
353                                                   353 
354         if (pmd_devmap(pmd) || pmd_trans_huge(    354         if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
355                 /*                                355                 /*
356                  * No need to take pmd_lock he    356                  * No need to take pmd_lock here, even if some other thread
357                  * is splitting the huge pmd w    357                  * is splitting the huge pmd we will get that event through
358                  * mmu_notifier callback.         358                  * mmu_notifier callback.
359                  *                                359                  *
360                  * So just read pmd value and     360                  * So just read pmd value and check again it's a transparent
361                  * huge or device mapping one     361                  * huge or device mapping one and compute corresponding pfn
362                  * values.                        362                  * values.
363                  */                               363                  */
364                 pmd = pmdp_get_lockless(pmdp); !! 364                 pmd = pmd_read_atomic(pmdp);
                                                   >> 365                 barrier();
365                 if (!pmd_devmap(pmd) && !pmd_t    366                 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
366                         goto again;               367                         goto again;
367                                                   368 
368                 return hmm_vma_handle_pmd(walk    369                 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
369         }                                         370         }
370                                                   371 
371         /*                                        372         /*
372          * We have handled all the valid cases    373          * We have handled all the valid cases above ie either none, migration,
373          * huge or transparent huge. At this p    374          * huge or transparent huge. At this point either it is a valid pmd
374          * entry pointing to pte directory or     375          * entry pointing to pte directory or it is a bad pmd that will not
375          * recover.                               376          * recover.
376          */                                       377          */
377         if (pmd_bad(pmd)) {                       378         if (pmd_bad(pmd)) {
378                 if (hmm_range_need_fault(hmm_v    379                 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
379                         return -EFAULT;           380                         return -EFAULT;
380                 return hmm_pfns_fill(start, en    381                 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
381         }                                         382         }
382                                                   383 
383         ptep = pte_offset_map(pmdp, addr);        384         ptep = pte_offset_map(pmdp, addr);
384         if (!ptep)                             << 
385                 goto again;                    << 
386         for (; addr < end; addr += PAGE_SIZE,     385         for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
387                 int r;                            386                 int r;
388                                                   387 
389                 r = hmm_vma_handle_pte(walk, a    388                 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
390                 if (r) {                          389                 if (r) {
391                         /* hmm_vma_handle_pte(    390                         /* hmm_vma_handle_pte() did pte_unmap() */
392                         return r;                 391                         return r;
393                 }                                 392                 }
394         }                                         393         }
395         pte_unmap(ptep - 1);                      394         pte_unmap(ptep - 1);
396         return 0;                                 395         return 0;
397 }                                                 396 }
398                                                   397 
399 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \      398 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
400     defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEP    399     defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
401 static inline unsigned long pud_to_hmm_pfn_fla    400 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
402                                                   401                                                  pud_t pud)
403 {                                                 402 {
404         if (!pud_present(pud))                    403         if (!pud_present(pud))
405                 return 0;                         404                 return 0;
406         return (pud_write(pud) ? (HMM_PFN_VALI    405         return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
407                                  HMM_PFN_VALID    406                                  HMM_PFN_VALID) |
408                hmm_pfn_flags_order(PUD_SHIFT -    407                hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
409 }                                                 408 }
410                                                   409 
411 static int hmm_vma_walk_pud(pud_t *pudp, unsig    410 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
412                 struct mm_walk *walk)             411                 struct mm_walk *walk)
413 {                                                 412 {
414         struct hmm_vma_walk *hmm_vma_walk = wa    413         struct hmm_vma_walk *hmm_vma_walk = walk->private;
415         struct hmm_range *range = hmm_vma_walk    414         struct hmm_range *range = hmm_vma_walk->range;
416         unsigned long addr = start;               415         unsigned long addr = start;
417         pud_t pud;                                416         pud_t pud;
418         spinlock_t *ptl = pud_trans_huge_lock(    417         spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
419                                                   418 
420         if (!ptl)                                 419         if (!ptl)
421                 return 0;                         420                 return 0;
422                                                   421 
423         /* Normally we don't want to split the    422         /* Normally we don't want to split the huge page */
424         walk->action = ACTION_CONTINUE;           423         walk->action = ACTION_CONTINUE;
425                                                   424 
426         pud = READ_ONCE(*pudp);                   425         pud = READ_ONCE(*pudp);
427         if (!pud_present(pud)) {               !! 426         if (pud_none(pud)) {
428                 spin_unlock(ptl);                 427                 spin_unlock(ptl);
429                 return hmm_vma_walk_hole(start    428                 return hmm_vma_walk_hole(start, end, -1, walk);
430         }                                         429         }
431                                                   430 
432         if (pud_leaf(pud) && pud_devmap(pud))  !! 431         if (pud_huge(pud) && pud_devmap(pud)) {
433                 unsigned long i, npages, pfn;     432                 unsigned long i, npages, pfn;
434                 unsigned int required_fault;      433                 unsigned int required_fault;
435                 unsigned long *hmm_pfns;          434                 unsigned long *hmm_pfns;
436                 unsigned long cpu_flags;          435                 unsigned long cpu_flags;
437                                                   436 
                                                   >> 437                 if (!pud_present(pud)) {
                                                   >> 438                         spin_unlock(ptl);
                                                   >> 439                         return hmm_vma_walk_hole(start, end, -1, walk);
                                                   >> 440                 }
                                                   >> 441 
438                 i = (addr - range->start) >> P    442                 i = (addr - range->start) >> PAGE_SHIFT;
439                 npages = (end - addr) >> PAGE_    443                 npages = (end - addr) >> PAGE_SHIFT;
440                 hmm_pfns = &range->hmm_pfns[i]    444                 hmm_pfns = &range->hmm_pfns[i];
441                                                   445 
442                 cpu_flags = pud_to_hmm_pfn_fla    446                 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
443                 required_fault = hmm_range_nee    447                 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
444                                                   448                                                       npages, cpu_flags);
445                 if (required_fault) {             449                 if (required_fault) {
446                         spin_unlock(ptl);         450                         spin_unlock(ptl);
447                         return hmm_vma_fault(a    451                         return hmm_vma_fault(addr, end, required_fault, walk);
448                 }                                 452                 }
449                                                   453 
450                 pfn = pud_pfn(pud) + ((addr &     454                 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
451                 for (i = 0; i < npages; ++i, +    455                 for (i = 0; i < npages; ++i, ++pfn)
452                         hmm_pfns[i] = pfn | cp    456                         hmm_pfns[i] = pfn | cpu_flags;
453                 goto out_unlock;                  457                 goto out_unlock;
454         }                                         458         }
455                                                   459 
456         /* Ask for the PUD to be split */         460         /* Ask for the PUD to be split */
457         walk->action = ACTION_SUBTREE;            461         walk->action = ACTION_SUBTREE;
458                                                   462 
459 out_unlock:                                       463 out_unlock:
460         spin_unlock(ptl);                         464         spin_unlock(ptl);
461         return 0;                                 465         return 0;
462 }                                                 466 }
463 #else                                             467 #else
464 #define hmm_vma_walk_pud        NULL              468 #define hmm_vma_walk_pud        NULL
465 #endif                                            469 #endif
466                                                   470 
467 #ifdef CONFIG_HUGETLB_PAGE                        471 #ifdef CONFIG_HUGETLB_PAGE
468 static int hmm_vma_walk_hugetlb_entry(pte_t *p    472 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
469                                       unsigned    473                                       unsigned long start, unsigned long end,
470                                       struct m    474                                       struct mm_walk *walk)
471 {                                                 475 {
472         unsigned long addr = start, i, pfn;       476         unsigned long addr = start, i, pfn;
473         struct hmm_vma_walk *hmm_vma_walk = wa    477         struct hmm_vma_walk *hmm_vma_walk = walk->private;
474         struct hmm_range *range = hmm_vma_walk    478         struct hmm_range *range = hmm_vma_walk->range;
475         struct vm_area_struct *vma = walk->vma    479         struct vm_area_struct *vma = walk->vma;
476         unsigned int required_fault;              480         unsigned int required_fault;
477         unsigned long pfn_req_flags;              481         unsigned long pfn_req_flags;
478         unsigned long cpu_flags;                  482         unsigned long cpu_flags;
479         spinlock_t *ptl;                          483         spinlock_t *ptl;
480         pte_t entry;                              484         pte_t entry;
481                                                   485 
482         ptl = huge_pte_lock(hstate_vma(vma), w    486         ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
483         entry = huge_ptep_get(walk->mm, addr,  !! 487         entry = huge_ptep_get(pte);
484                                                   488 
485         i = (start - range->start) >> PAGE_SHI    489         i = (start - range->start) >> PAGE_SHIFT;
486         pfn_req_flags = range->hmm_pfns[i];       490         pfn_req_flags = range->hmm_pfns[i];
487         cpu_flags = pte_to_hmm_pfn_flags(range    491         cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
488                     hmm_pfn_flags_order(huge_p    492                     hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
489         required_fault =                          493         required_fault =
490                 hmm_pte_need_fault(hmm_vma_wal    494                 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
491         if (required_fault) {                     495         if (required_fault) {
492                 int ret;                       << 
493                                                << 
494                 spin_unlock(ptl);                 496                 spin_unlock(ptl);
495                 hugetlb_vma_unlock_read(vma);  !! 497                 return hmm_vma_fault(addr, end, required_fault, walk);
496                 /*                             << 
497                  * Avoid deadlock: drop the vm << 
498                  * hmm_vma_fault(), which will << 
499                  * drop the vma lock. This is  << 
500                  * protection point of view, b << 
501                  * use here of either pte or p << 
502                  * lock.                       << 
503                  */                            << 
504                 ret = hmm_vma_fault(addr, end, << 
505                 hugetlb_vma_lock_read(vma);    << 
506                 return ret;                    << 
507         }                                         498         }
508                                                   499 
509         pfn = pte_pfn(entry) + ((start & ~hmas    500         pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
510         for (; addr < end; addr += PAGE_SIZE,     501         for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
511                 range->hmm_pfns[i] = pfn | cpu    502                 range->hmm_pfns[i] = pfn | cpu_flags;
512                                                   503 
513         spin_unlock(ptl);                         504         spin_unlock(ptl);
514         return 0;                                 505         return 0;
515 }                                                 506 }
516 #else                                             507 #else
517 #define hmm_vma_walk_hugetlb_entry NULL           508 #define hmm_vma_walk_hugetlb_entry NULL
518 #endif /* CONFIG_HUGETLB_PAGE */                  509 #endif /* CONFIG_HUGETLB_PAGE */
519                                                   510 
520 static int hmm_vma_walk_test(unsigned long sta    511 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
521                              struct mm_walk *w    512                              struct mm_walk *walk)
522 {                                                 513 {
523         struct hmm_vma_walk *hmm_vma_walk = wa    514         struct hmm_vma_walk *hmm_vma_walk = walk->private;
524         struct hmm_range *range = hmm_vma_walk    515         struct hmm_range *range = hmm_vma_walk->range;
525         struct vm_area_struct *vma = walk->vma    516         struct vm_area_struct *vma = walk->vma;
526                                                   517 
527         if (!(vma->vm_flags & (VM_IO | VM_PFNM    518         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
528             vma->vm_flags & VM_READ)              519             vma->vm_flags & VM_READ)
529                 return 0;                         520                 return 0;
530                                                   521 
531         /*                                        522         /*
532          * vma ranges that don't have struct p    523          * vma ranges that don't have struct page backing them or map I/O
533          * devices directly cannot be handled     524          * devices directly cannot be handled by hmm_range_fault().
534          *                                        525          *
535          * If the vma does not allow read acce    526          * If the vma does not allow read access, then assume that it does not
536          * allow write access either. HMM does    527          * allow write access either. HMM does not support architectures that
537          * allow write without read.              528          * allow write without read.
538          *                                        529          *
539          * If a fault is requested for an unsu    530          * If a fault is requested for an unsupported range then it is a hard
540          * failure.                               531          * failure.
541          */                                       532          */
542         if (hmm_range_need_fault(hmm_vma_walk,    533         if (hmm_range_need_fault(hmm_vma_walk,
543                                  range->hmm_pf    534                                  range->hmm_pfns +
544                                          ((sta    535                                          ((start - range->start) >> PAGE_SHIFT),
545                                  (end - start)    536                                  (end - start) >> PAGE_SHIFT, 0))
546                 return -EFAULT;                   537                 return -EFAULT;
547                                                   538 
548         hmm_pfns_fill(start, end, range, HMM_P    539         hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
549                                                   540 
550         /* Skip this vma and continue processi    541         /* Skip this vma and continue processing the next vma. */
551         return 1;                                 542         return 1;
552 }                                                 543 }
553                                                   544 
554 static const struct mm_walk_ops hmm_walk_ops =    545 static const struct mm_walk_ops hmm_walk_ops = {
555         .pud_entry      = hmm_vma_walk_pud,       546         .pud_entry      = hmm_vma_walk_pud,
556         .pmd_entry      = hmm_vma_walk_pmd,       547         .pmd_entry      = hmm_vma_walk_pmd,
557         .pte_hole       = hmm_vma_walk_hole,      548         .pte_hole       = hmm_vma_walk_hole,
558         .hugetlb_entry  = hmm_vma_walk_hugetlb    549         .hugetlb_entry  = hmm_vma_walk_hugetlb_entry,
559         .test_walk      = hmm_vma_walk_test,      550         .test_walk      = hmm_vma_walk_test,
560         .walk_lock      = PGWALK_RDLOCK,       << 
561 };                                                551 };
562                                                   552 
563 /**                                               553 /**
564  * hmm_range_fault - try to fault some address    554  * hmm_range_fault - try to fault some address in a virtual address range
565  * @range:      argument structure                555  * @range:      argument structure
566  *                                                556  *
567  * Returns 0 on success or one of the followin    557  * Returns 0 on success or one of the following error codes:
568  *                                                558  *
569  * -EINVAL:     Invalid arguments or mm or vir    559  * -EINVAL:     Invalid arguments or mm or virtual address is in an invalid vma
570  *              (e.g., device file vma).          560  *              (e.g., device file vma).
571  * -ENOMEM:     Out of memory.                    561  * -ENOMEM:     Out of memory.
572  * -EPERM:      Invalid permission (e.g., aski    562  * -EPERM:      Invalid permission (e.g., asking for write and range is read
573  *              only).                            563  *              only).
574  * -EBUSY:      The range has been invalidated    564  * -EBUSY:      The range has been invalidated and the caller needs to wait for
575  *              the invalidation to finish.       565  *              the invalidation to finish.
576  * -EFAULT:     A page was requested to be val    566  * -EFAULT:     A page was requested to be valid and could not be made valid
577  *              ie it has no backing VMA or it    567  *              ie it has no backing VMA or it is illegal to access
578  *                                                568  *
579  * This is similar to get_user_pages(), except    569  * This is similar to get_user_pages(), except that it can read the page tables
580  * without mutating them (ie causing faults).     570  * without mutating them (ie causing faults).
581  */                                               571  */
582 int hmm_range_fault(struct hmm_range *range)      572 int hmm_range_fault(struct hmm_range *range)
583 {                                                 573 {
584         struct hmm_vma_walk hmm_vma_walk = {      574         struct hmm_vma_walk hmm_vma_walk = {
585                 .range = range,                   575                 .range = range,
586                 .last = range->start,             576                 .last = range->start,
587         };                                        577         };
588         struct mm_struct *mm = range->notifier    578         struct mm_struct *mm = range->notifier->mm;
589         int ret;                                  579         int ret;
590                                                   580 
591         mmap_assert_locked(mm);                   581         mmap_assert_locked(mm);
592                                                   582 
593         do {                                      583         do {
594                 /* If range is no longer valid    584                 /* If range is no longer valid force retry. */
595                 if (mmu_interval_check_retry(r    585                 if (mmu_interval_check_retry(range->notifier,
596                                              r    586                                              range->notifier_seq))
597                         return -EBUSY;            587                         return -EBUSY;
598                 ret = walk_page_range(mm, hmm_    588                 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
599                                       &hmm_wal    589                                       &hmm_walk_ops, &hmm_vma_walk);
600                 /*                                590                 /*
601                  * When -EBUSY is returned the    591                  * When -EBUSY is returned the loop restarts with
602                  * hmm_vma_walk.last set to an    592                  * hmm_vma_walk.last set to an address that has not been stored
603                  * in pfns. All entries < last    593                  * in pfns. All entries < last in the pfn array are set to their
604                  * output, and all >= are stil    594                  * output, and all >= are still at their input values.
605                  */                               595                  */
606         } while (ret == -EBUSY);                  596         } while (ret == -EBUSY);
607         return ret;                               597         return ret;
608 }                                                 598 }
609 EXPORT_SYMBOL(hmm_range_fault);                   599 EXPORT_SYMBOL(hmm_range_fault);
610                                                   600 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php