~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/gup.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 #include <linux/kernel.h>
  3 #include <linux/errno.h>
  4 #include <linux/err.h>
  5 #include <linux/spinlock.h>
  6 
  7 #include <linux/mm.h>
  8 #include <linux/memfd.h>
  9 #include <linux/memremap.h>
 10 #include <linux/pagemap.h>
 11 #include <linux/rmap.h>
 12 #include <linux/swap.h>
 13 #include <linux/swapops.h>
 14 #include <linux/secretmem.h>
 15 
 16 #include <linux/sched/signal.h>
 17 #include <linux/rwsem.h>
 18 #include <linux/hugetlb.h>
 19 #include <linux/migrate.h>
 20 #include <linux/mm_inline.h>
 21 #include <linux/pagevec.h>
 22 #include <linux/sched/mm.h>
 23 #include <linux/shmem_fs.h>
 24 
 25 #include <asm/mmu_context.h>
 26 #include <asm/tlbflush.h>
 27 
 28 #include "internal.h"
 29 
 30 struct follow_page_context {
 31         struct dev_pagemap *pgmap;
 32         unsigned int page_mask;
 33 };
 34 
 35 static inline void sanity_check_pinned_pages(struct page **pages,
 36                                              unsigned long npages)
 37 {
 38         if (!IS_ENABLED(CONFIG_DEBUG_VM))
 39                 return;
 40 
 41         /*
 42          * We only pin anonymous pages if they are exclusive. Once pinned, we
 43          * can no longer turn them possibly shared and PageAnonExclusive() will
 44          * stick around until the page is freed.
 45          *
 46          * We'd like to verify that our pinned anonymous pages are still mapped
 47          * exclusively. The issue with anon THP is that we don't know how
 48          * they are/were mapped when pinning them. However, for anon
 49          * THP we can assume that either the given page (PTE-mapped THP) or
 50          * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
 51          * neither is the case, there is certainly something wrong.
 52          */
 53         for (; npages; npages--, pages++) {
 54                 struct page *page = *pages;
 55                 struct folio *folio = page_folio(page);
 56 
 57                 if (is_zero_page(page) ||
 58                     !folio_test_anon(folio))
 59                         continue;
 60                 if (!folio_test_large(folio) || folio_test_hugetlb(folio))
 61                         VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
 62                 else
 63                         /* Either a PTE-mapped or a PMD-mapped THP. */
 64                         VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
 65                                        !PageAnonExclusive(page), page);
 66         }
 67 }
 68 
 69 /*
 70  * Return the folio with ref appropriately incremented,
 71  * or NULL if that failed.
 72  */
 73 static inline struct folio *try_get_folio(struct page *page, int refs)
 74 {
 75         struct folio *folio;
 76 
 77 retry:
 78         folio = page_folio(page);
 79         if (WARN_ON_ONCE(folio_ref_count(folio) < 0))
 80                 return NULL;
 81         if (unlikely(!folio_ref_try_add(folio, refs)))
 82                 return NULL;
 83 
 84         /*
 85          * At this point we have a stable reference to the folio; but it
 86          * could be that between calling page_folio() and the refcount
 87          * increment, the folio was split, in which case we'd end up
 88          * holding a reference on a folio that has nothing to do with the page
 89          * we were given anymore.
 90          * So now that the folio is stable, recheck that the page still
 91          * belongs to this folio.
 92          */
 93         if (unlikely(page_folio(page) != folio)) {
 94                 if (!put_devmap_managed_folio_refs(folio, refs))
 95                         folio_put_refs(folio, refs);
 96                 goto retry;
 97         }
 98 
 99         return folio;
100 }
101 
102 static void gup_put_folio(struct folio *folio, int refs, unsigned int flags)
103 {
104         if (flags & FOLL_PIN) {
105                 if (is_zero_folio(folio))
106                         return;
107                 node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs);
108                 if (folio_test_large(folio))
109                         atomic_sub(refs, &folio->_pincount);
110                 else
111                         refs *= GUP_PIN_COUNTING_BIAS;
112         }
113 
114         if (!put_devmap_managed_folio_refs(folio, refs))
115                 folio_put_refs(folio, refs);
116 }
117 
118 /**
119  * try_grab_folio() - add a folio's refcount by a flag-dependent amount
120  * @folio:    pointer to folio to be grabbed
121  * @refs:     the value to (effectively) add to the folio's refcount
122  * @flags:    gup flags: these are the FOLL_* flag values
123  *
124  * This might not do anything at all, depending on the flags argument.
125  *
126  * "grab" names in this file mean, "look at flags to decide whether to use
127  * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
128  *
129  * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
130  * time.
131  *
132  * Return: 0 for success, or if no action was required (if neither FOLL_PIN
133  * nor FOLL_GET was set, nothing is done). A negative error code for failure:
134  *
135  *   -ENOMEM            FOLL_GET or FOLL_PIN was set, but the folio could not
136  *                      be grabbed.
137  *
138  * It is called when we have a stable reference for the folio, typically in
139  * GUP slow path.
140  */
141 int __must_check try_grab_folio(struct folio *folio, int refs,
142                                 unsigned int flags)
143 {
144         if (WARN_ON_ONCE(folio_ref_count(folio) <= 0))
145                 return -ENOMEM;
146 
147         if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(&folio->page)))
148                 return -EREMOTEIO;
149 
150         if (flags & FOLL_GET)
151                 folio_ref_add(folio, refs);
152         else if (flags & FOLL_PIN) {
153                 /*
154                  * Don't take a pin on the zero page - it's not going anywhere
155                  * and it is used in a *lot* of places.
156                  */
157                 if (is_zero_folio(folio))
158                         return 0;
159 
160                 /*
161                  * Increment the normal page refcount field at least once,
162                  * so that the page really is pinned.
163                  */
164                 if (folio_test_large(folio)) {
165                         folio_ref_add(folio, refs);
166                         atomic_add(refs, &folio->_pincount);
167                 } else {
168                         folio_ref_add(folio, refs * GUP_PIN_COUNTING_BIAS);
169                 }
170 
171                 node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
172         }
173 
174         return 0;
175 }
176 
177 /**
178  * unpin_user_page() - release a dma-pinned page
179  * @page:            pointer to page to be released
180  *
181  * Pages that were pinned via pin_user_pages*() must be released via either
182  * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
183  * that such pages can be separately tracked and uniquely handled. In
184  * particular, interactions with RDMA and filesystems need special handling.
185  */
186 void unpin_user_page(struct page *page)
187 {
188         sanity_check_pinned_pages(&page, 1);
189         gup_put_folio(page_folio(page), 1, FOLL_PIN);
190 }
191 EXPORT_SYMBOL(unpin_user_page);
192 
193 /**
194  * unpin_folio() - release a dma-pinned folio
195  * @folio:         pointer to folio to be released
196  *
197  * Folios that were pinned via memfd_pin_folios() or other similar routines
198  * must be released either using unpin_folio() or unpin_folios().
199  */
200 void unpin_folio(struct folio *folio)
201 {
202         gup_put_folio(folio, 1, FOLL_PIN);
203 }
204 EXPORT_SYMBOL_GPL(unpin_folio);
205 
206 /**
207  * folio_add_pin - Try to get an additional pin on a pinned folio
208  * @folio: The folio to be pinned
209  *
210  * Get an additional pin on a folio we already have a pin on.  Makes no change
211  * if the folio is a zero_page.
212  */
213 void folio_add_pin(struct folio *folio)
214 {
215         if (is_zero_folio(folio))
216                 return;
217 
218         /*
219          * Similar to try_grab_folio(): be sure to *also* increment the normal
220          * page refcount field at least once, so that the page really is
221          * pinned.
222          */
223         if (folio_test_large(folio)) {
224                 WARN_ON_ONCE(atomic_read(&folio->_pincount) < 1);
225                 folio_ref_inc(folio);
226                 atomic_inc(&folio->_pincount);
227         } else {
228                 WARN_ON_ONCE(folio_ref_count(folio) < GUP_PIN_COUNTING_BIAS);
229                 folio_ref_add(folio, GUP_PIN_COUNTING_BIAS);
230         }
231 }
232 
233 static inline struct folio *gup_folio_range_next(struct page *start,
234                 unsigned long npages, unsigned long i, unsigned int *ntails)
235 {
236         struct page *next = nth_page(start, i);
237         struct folio *folio = page_folio(next);
238         unsigned int nr = 1;
239 
240         if (folio_test_large(folio))
241                 nr = min_t(unsigned int, npages - i,
242                            folio_nr_pages(folio) - folio_page_idx(folio, next));
243 
244         *ntails = nr;
245         return folio;
246 }
247 
248 static inline struct folio *gup_folio_next(struct page **list,
249                 unsigned long npages, unsigned long i, unsigned int *ntails)
250 {
251         struct folio *folio = page_folio(list[i]);
252         unsigned int nr;
253 
254         for (nr = i + 1; nr < npages; nr++) {
255                 if (page_folio(list[nr]) != folio)
256                         break;
257         }
258 
259         *ntails = nr - i;
260         return folio;
261 }
262 
263 /**
264  * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
265  * @pages:  array of pages to be maybe marked dirty, and definitely released.
266  * @npages: number of pages in the @pages array.
267  * @make_dirty: whether to mark the pages dirty
268  *
269  * "gup-pinned page" refers to a page that has had one of the get_user_pages()
270  * variants called on that page.
271  *
272  * For each page in the @pages array, make that page (or its head page, if a
273  * compound page) dirty, if @make_dirty is true, and if the page was previously
274  * listed as clean. In any case, releases all pages using unpin_user_page(),
275  * possibly via unpin_user_pages(), for the non-dirty case.
276  *
277  * Please see the unpin_user_page() documentation for details.
278  *
279  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
280  * required, then the caller should a) verify that this is really correct,
281  * because _lock() is usually required, and b) hand code it:
282  * set_page_dirty_lock(), unpin_user_page().
283  *
284  */
285 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
286                                  bool make_dirty)
287 {
288         unsigned long i;
289         struct folio *folio;
290         unsigned int nr;
291 
292         if (!make_dirty) {
293                 unpin_user_pages(pages, npages);
294                 return;
295         }
296 
297         sanity_check_pinned_pages(pages, npages);
298         for (i = 0; i < npages; i += nr) {
299                 folio = gup_folio_next(pages, npages, i, &nr);
300                 /*
301                  * Checking PageDirty at this point may race with
302                  * clear_page_dirty_for_io(), but that's OK. Two key
303                  * cases:
304                  *
305                  * 1) This code sees the page as already dirty, so it
306                  * skips the call to set_page_dirty(). That could happen
307                  * because clear_page_dirty_for_io() called
308                  * folio_mkclean(), followed by set_page_dirty().
309                  * However, now the page is going to get written back,
310                  * which meets the original intention of setting it
311                  * dirty, so all is well: clear_page_dirty_for_io() goes
312                  * on to call TestClearPageDirty(), and write the page
313                  * back.
314                  *
315                  * 2) This code sees the page as clean, so it calls
316                  * set_page_dirty(). The page stays dirty, despite being
317                  * written back, so it gets written back again in the
318                  * next writeback cycle. This is harmless.
319                  */
320                 if (!folio_test_dirty(folio)) {
321                         folio_lock(folio);
322                         folio_mark_dirty(folio);
323                         folio_unlock(folio);
324                 }
325                 gup_put_folio(folio, nr, FOLL_PIN);
326         }
327 }
328 EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
329 
330 /**
331  * unpin_user_page_range_dirty_lock() - release and optionally dirty
332  * gup-pinned page range
333  *
334  * @page:  the starting page of a range maybe marked dirty, and definitely released.
335  * @npages: number of consecutive pages to release.
336  * @make_dirty: whether to mark the pages dirty
337  *
338  * "gup-pinned page range" refers to a range of pages that has had one of the
339  * pin_user_pages() variants called on that page.
340  *
341  * For the page ranges defined by [page .. page+npages], make that range (or
342  * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
343  * page range was previously listed as clean.
344  *
345  * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
346  * required, then the caller should a) verify that this is really correct,
347  * because _lock() is usually required, and b) hand code it:
348  * set_page_dirty_lock(), unpin_user_page().
349  *
350  */
351 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
352                                       bool make_dirty)
353 {
354         unsigned long i;
355         struct folio *folio;
356         unsigned int nr;
357 
358         for (i = 0; i < npages; i += nr) {
359                 folio = gup_folio_range_next(page, npages, i, &nr);
360                 if (make_dirty && !folio_test_dirty(folio)) {
361                         folio_lock(folio);
362                         folio_mark_dirty(folio);
363                         folio_unlock(folio);
364                 }
365                 gup_put_folio(folio, nr, FOLL_PIN);
366         }
367 }
368 EXPORT_SYMBOL(unpin_user_page_range_dirty_lock);
369 
370 static void gup_fast_unpin_user_pages(struct page **pages, unsigned long npages)
371 {
372         unsigned long i;
373         struct folio *folio;
374         unsigned int nr;
375 
376         /*
377          * Don't perform any sanity checks because we might have raced with
378          * fork() and some anonymous pages might now actually be shared --
379          * which is why we're unpinning after all.
380          */
381         for (i = 0; i < npages; i += nr) {
382                 folio = gup_folio_next(pages, npages, i, &nr);
383                 gup_put_folio(folio, nr, FOLL_PIN);
384         }
385 }
386 
387 /**
388  * unpin_user_pages() - release an array of gup-pinned pages.
389  * @pages:  array of pages to be marked dirty and released.
390  * @npages: number of pages in the @pages array.
391  *
392  * For each page in the @pages array, release the page using unpin_user_page().
393  *
394  * Please see the unpin_user_page() documentation for details.
395  */
396 void unpin_user_pages(struct page **pages, unsigned long npages)
397 {
398         unsigned long i;
399         struct folio *folio;
400         unsigned int nr;
401 
402         /*
403          * If this WARN_ON() fires, then the system *might* be leaking pages (by
404          * leaving them pinned), but probably not. More likely, gup/pup returned
405          * a hard -ERRNO error to the caller, who erroneously passed it here.
406          */
407         if (WARN_ON(IS_ERR_VALUE(npages)))
408                 return;
409 
410         sanity_check_pinned_pages(pages, npages);
411         for (i = 0; i < npages; i += nr) {
412                 folio = gup_folio_next(pages, npages, i, &nr);
413                 gup_put_folio(folio, nr, FOLL_PIN);
414         }
415 }
416 EXPORT_SYMBOL(unpin_user_pages);
417 
418 /**
419  * unpin_folios() - release an array of gup-pinned folios.
420  * @folios:  array of folios to be marked dirty and released.
421  * @nfolios: number of folios in the @folios array.
422  *
423  * For each folio in the @folios array, release the folio using gup_put_folio.
424  *
425  * Please see the unpin_folio() documentation for details.
426  */
427 void unpin_folios(struct folio **folios, unsigned long nfolios)
428 {
429         unsigned long i = 0, j;
430 
431         /*
432          * If this WARN_ON() fires, then the system *might* be leaking folios
433          * (by leaving them pinned), but probably not. More likely, gup/pup
434          * returned a hard -ERRNO error to the caller, who erroneously passed
435          * it here.
436          */
437         if (WARN_ON(IS_ERR_VALUE(nfolios)))
438                 return;
439 
440         while (i < nfolios) {
441                 for (j = i + 1; j < nfolios; j++)
442                         if (folios[i] != folios[j])
443                                 break;
444 
445                 if (folios[i])
446                         gup_put_folio(folios[i], j - i, FOLL_PIN);
447                 i = j;
448         }
449 }
450 EXPORT_SYMBOL_GPL(unpin_folios);
451 
452 /*
453  * Set the MMF_HAS_PINNED if not set yet; after set it'll be there for the mm's
454  * lifecycle.  Avoid setting the bit unless necessary, or it might cause write
455  * cache bouncing on large SMP machines for concurrent pinned gups.
456  */
457 static inline void mm_set_has_pinned_flag(unsigned long *mm_flags)
458 {
459         if (!test_bit(MMF_HAS_PINNED, mm_flags))
460                 set_bit(MMF_HAS_PINNED, mm_flags);
461 }
462 
463 #ifdef CONFIG_MMU
464 
465 #ifdef CONFIG_HAVE_GUP_FAST
466 static int record_subpages(struct page *page, unsigned long sz,
467                            unsigned long addr, unsigned long end,
468                            struct page **pages)
469 {
470         struct page *start_page;
471         int nr;
472 
473         start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT);
474         for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
475                 pages[nr] = nth_page(start_page, nr);
476 
477         return nr;
478 }
479 
480 /**
481  * try_grab_folio_fast() - Attempt to get or pin a folio in fast path.
482  * @page:  pointer to page to be grabbed
483  * @refs:  the value to (effectively) add to the folio's refcount
484  * @flags: gup flags: these are the FOLL_* flag values.
485  *
486  * "grab" names in this file mean, "look at flags to decide whether to use
487  * FOLL_PIN or FOLL_GET behavior, when incrementing the folio's refcount.
488  *
489  * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
490  * same time. (That's true throughout the get_user_pages*() and
491  * pin_user_pages*() APIs.) Cases:
492  *
493  *    FOLL_GET: folio's refcount will be incremented by @refs.
494  *
495  *    FOLL_PIN on large folios: folio's refcount will be incremented by
496  *    @refs, and its pincount will be incremented by @refs.
497  *
498  *    FOLL_PIN on single-page folios: folio's refcount will be incremented by
499  *    @refs * GUP_PIN_COUNTING_BIAS.
500  *
501  * Return: The folio containing @page (with refcount appropriately
502  * incremented) for success, or NULL upon failure. If neither FOLL_GET
503  * nor FOLL_PIN was set, that's considered failure, and furthermore,
504  * a likely bug in the caller, so a warning is also emitted.
505  *
506  * It uses add ref unless zero to elevate the folio refcount and must be called
507  * in fast path only.
508  */
509 static struct folio *try_grab_folio_fast(struct page *page, int refs,
510                                          unsigned int flags)
511 {
512         struct folio *folio;
513 
514         /* Raise warn if it is not called in fast GUP */
515         VM_WARN_ON_ONCE(!irqs_disabled());
516 
517         if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0))
518                 return NULL;
519 
520         if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
521                 return NULL;
522 
523         if (flags & FOLL_GET)
524                 return try_get_folio(page, refs);
525 
526         /* FOLL_PIN is set */
527 
528         /*
529          * Don't take a pin on the zero page - it's not going anywhere
530          * and it is used in a *lot* of places.
531          */
532         if (is_zero_page(page))
533                 return page_folio(page);
534 
535         folio = try_get_folio(page, refs);
536         if (!folio)
537                 return NULL;
538 
539         /*
540          * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
541          * right zone, so fail and let the caller fall back to the slow
542          * path.
543          */
544         if (unlikely((flags & FOLL_LONGTERM) &&
545                      !folio_is_longterm_pinnable(folio))) {
546                 if (!put_devmap_managed_folio_refs(folio, refs))
547                         folio_put_refs(folio, refs);
548                 return NULL;
549         }
550 
551         /*
552          * When pinning a large folio, use an exact count to track it.
553          *
554          * However, be sure to *also* increment the normal folio
555          * refcount field at least once, so that the folio really
556          * is pinned.  That's why the refcount from the earlier
557          * try_get_folio() is left intact.
558          */
559         if (folio_test_large(folio))
560                 atomic_add(refs, &folio->_pincount);
561         else
562                 folio_ref_add(folio,
563                                 refs * (GUP_PIN_COUNTING_BIAS - 1));
564         /*
565          * Adjust the pincount before re-checking the PTE for changes.
566          * This is essentially a smp_mb() and is paired with a memory
567          * barrier in folio_try_share_anon_rmap_*().
568          */
569         smp_mb__after_atomic();
570 
571         node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
572 
573         return folio;
574 }
575 #endif  /* CONFIG_HAVE_GUP_FAST */
576 
577 static struct page *no_page_table(struct vm_area_struct *vma,
578                                   unsigned int flags, unsigned long address)
579 {
580         if (!(flags & FOLL_DUMP))
581                 return NULL;
582 
583         /*
584          * When core dumping, we don't want to allocate unnecessary pages or
585          * page tables.  Return error instead of NULL to skip handle_mm_fault,
586          * then get_dump_page() will return NULL to leave a hole in the dump.
587          * But we can only make this optimization where a hole would surely
588          * be zero-filled if handle_mm_fault() actually did handle it.
589          */
590         if (is_vm_hugetlb_page(vma)) {
591                 struct hstate *h = hstate_vma(vma);
592 
593                 if (!hugetlbfs_pagecache_present(h, vma, address))
594                         return ERR_PTR(-EFAULT);
595         } else if ((vma_is_anonymous(vma) || !vma->vm_ops->fault)) {
596                 return ERR_PTR(-EFAULT);
597         }
598 
599         return NULL;
600 }
601 
602 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
603 static struct page *follow_huge_pud(struct vm_area_struct *vma,
604                                     unsigned long addr, pud_t *pudp,
605                                     int flags, struct follow_page_context *ctx)
606 {
607         struct mm_struct *mm = vma->vm_mm;
608         struct page *page;
609         pud_t pud = *pudp;
610         unsigned long pfn = pud_pfn(pud);
611         int ret;
612 
613         assert_spin_locked(pud_lockptr(mm, pudp));
614 
615         if ((flags & FOLL_WRITE) && !pud_write(pud))
616                 return NULL;
617 
618         if (!pud_present(pud))
619                 return NULL;
620 
621         pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT;
622 
623         if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) &&
624             pud_devmap(pud)) {
625                 /*
626                  * device mapped pages can only be returned if the caller
627                  * will manage the page reference count.
628                  *
629                  * At least one of FOLL_GET | FOLL_PIN must be set, so
630                  * assert that here:
631                  */
632                 if (!(flags & (FOLL_GET | FOLL_PIN)))
633                         return ERR_PTR(-EEXIST);
634 
635                 if (flags & FOLL_TOUCH)
636                         touch_pud(vma, addr, pudp, flags & FOLL_WRITE);
637 
638                 ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap);
639                 if (!ctx->pgmap)
640                         return ERR_PTR(-EFAULT);
641         }
642 
643         page = pfn_to_page(pfn);
644 
645         if (!pud_devmap(pud) && !pud_write(pud) &&
646             gup_must_unshare(vma, flags, page))
647                 return ERR_PTR(-EMLINK);
648 
649         ret = try_grab_folio(page_folio(page), 1, flags);
650         if (ret)
651                 page = ERR_PTR(ret);
652         else
653                 ctx->page_mask = HPAGE_PUD_NR - 1;
654 
655         return page;
656 }
657 
658 /* FOLL_FORCE can write to even unwritable PMDs in COW mappings. */
659 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
660                                         struct vm_area_struct *vma,
661                                         unsigned int flags)
662 {
663         /* If the pmd is writable, we can write to the page. */
664         if (pmd_write(pmd))
665                 return true;
666 
667         /* Maybe FOLL_FORCE is set to override it? */
668         if (!(flags & FOLL_FORCE))
669                 return false;
670 
671         /* But FOLL_FORCE has no effect on shared mappings */
672         if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
673                 return false;
674 
675         /* ... or read-only private ones */
676         if (!(vma->vm_flags & VM_MAYWRITE))
677                 return false;
678 
679         /* ... or already writable ones that just need to take a write fault */
680         if (vma->vm_flags & VM_WRITE)
681                 return false;
682 
683         /*
684          * See can_change_pte_writable(): we broke COW and could map the page
685          * writable if we have an exclusive anonymous page ...
686          */
687         if (!page || !PageAnon(page) || !PageAnonExclusive(page))
688                 return false;
689 
690         /* ... and a write-fault isn't required for other reasons. */
691         if (pmd_needs_soft_dirty_wp(vma, pmd))
692                 return false;
693         return !userfaultfd_huge_pmd_wp(vma, pmd);
694 }
695 
696 static struct page *follow_huge_pmd(struct vm_area_struct *vma,
697                                     unsigned long addr, pmd_t *pmd,
698                                     unsigned int flags,
699                                     struct follow_page_context *ctx)
700 {
701         struct mm_struct *mm = vma->vm_mm;
702         pmd_t pmdval = *pmd;
703         struct page *page;
704         int ret;
705 
706         assert_spin_locked(pmd_lockptr(mm, pmd));
707 
708         page = pmd_page(pmdval);
709         if ((flags & FOLL_WRITE) &&
710             !can_follow_write_pmd(pmdval, page, vma, flags))
711                 return NULL;
712 
713         /* Avoid dumping huge zero page */
714         if ((flags & FOLL_DUMP) && is_huge_zero_pmd(pmdval))
715                 return ERR_PTR(-EFAULT);
716 
717         if (pmd_protnone(*pmd) && !gup_can_follow_protnone(vma, flags))
718                 return NULL;
719 
720         if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
721                 return ERR_PTR(-EMLINK);
722 
723         VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
724                         !PageAnonExclusive(page), page);
725 
726         ret = try_grab_folio(page_folio(page), 1, flags);
727         if (ret)
728                 return ERR_PTR(ret);
729 
730 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
731         if (pmd_trans_huge(pmdval) && (flags & FOLL_TOUCH))
732                 touch_pmd(vma, addr, pmd, flags & FOLL_WRITE);
733 #endif  /* CONFIG_TRANSPARENT_HUGEPAGE */
734 
735         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
736         ctx->page_mask = HPAGE_PMD_NR - 1;
737 
738         return page;
739 }
740 
741 #else  /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
742 static struct page *follow_huge_pud(struct vm_area_struct *vma,
743                                     unsigned long addr, pud_t *pudp,
744                                     int flags, struct follow_page_context *ctx)
745 {
746         return NULL;
747 }
748 
749 static struct page *follow_huge_pmd(struct vm_area_struct *vma,
750                                     unsigned long addr, pmd_t *pmd,
751                                     unsigned int flags,
752                                     struct follow_page_context *ctx)
753 {
754         return NULL;
755 }
756 #endif  /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */
757 
758 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
759                 pte_t *pte, unsigned int flags)
760 {
761         if (flags & FOLL_TOUCH) {
762                 pte_t orig_entry = ptep_get(pte);
763                 pte_t entry = orig_entry;
764 
765                 if (flags & FOLL_WRITE)
766                         entry = pte_mkdirty(entry);
767                 entry = pte_mkyoung(entry);
768 
769                 if (!pte_same(orig_entry, entry)) {
770                         set_pte_at(vma->vm_mm, address, pte, entry);
771                         update_mmu_cache(vma, address, pte);
772                 }
773         }
774 
775         /* Proper page table entry exists, but no corresponding struct page */
776         return -EEXIST;
777 }
778 
779 /* FOLL_FORCE can write to even unwritable PTEs in COW mappings. */
780 static inline bool can_follow_write_pte(pte_t pte, struct page *page,
781                                         struct vm_area_struct *vma,
782                                         unsigned int flags)
783 {
784         /* If the pte is writable, we can write to the page. */
785         if (pte_write(pte))
786                 return true;
787 
788         /* Maybe FOLL_FORCE is set to override it? */
789         if (!(flags & FOLL_FORCE))
790                 return false;
791 
792         /* But FOLL_FORCE has no effect on shared mappings */
793         if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED))
794                 return false;
795 
796         /* ... or read-only private ones */
797         if (!(vma->vm_flags & VM_MAYWRITE))
798                 return false;
799 
800         /* ... or already writable ones that just need to take a write fault */
801         if (vma->vm_flags & VM_WRITE)
802                 return false;
803 
804         /*
805          * See can_change_pte_writable(): we broke COW and could map the page
806          * writable if we have an exclusive anonymous page ...
807          */
808         if (!page || !PageAnon(page) || !PageAnonExclusive(page))
809                 return false;
810 
811         /* ... and a write-fault isn't required for other reasons. */
812         if (pte_needs_soft_dirty_wp(vma, pte))
813                 return false;
814         return !userfaultfd_pte_wp(vma, pte);
815 }
816 
817 static struct page *follow_page_pte(struct vm_area_struct *vma,
818                 unsigned long address, pmd_t *pmd, unsigned int flags,
819                 struct dev_pagemap **pgmap)
820 {
821         struct mm_struct *mm = vma->vm_mm;
822         struct page *page;
823         spinlock_t *ptl;
824         pte_t *ptep, pte;
825         int ret;
826 
827         /* FOLL_GET and FOLL_PIN are mutually exclusive. */
828         if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
829                          (FOLL_PIN | FOLL_GET)))
830                 return ERR_PTR(-EINVAL);
831 
832         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
833         if (!ptep)
834                 return no_page_table(vma, flags, address);
835         pte = ptep_get(ptep);
836         if (!pte_present(pte))
837                 goto no_page;
838         if (pte_protnone(pte) && !gup_can_follow_protnone(vma, flags))
839                 goto no_page;
840 
841         page = vm_normal_page(vma, address, pte);
842 
843         /*
844          * We only care about anon pages in can_follow_write_pte() and don't
845          * have to worry about pte_devmap() because they are never anon.
846          */
847         if ((flags & FOLL_WRITE) &&
848             !can_follow_write_pte(pte, page, vma, flags)) {
849                 page = NULL;
850                 goto out;
851         }
852 
853         if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
854                 /*
855                  * Only return device mapping pages in the FOLL_GET or FOLL_PIN
856                  * case since they are only valid while holding the pgmap
857                  * reference.
858                  */
859                 *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
860                 if (*pgmap)
861                         page = pte_page(pte);
862                 else
863                         goto no_page;
864         } else if (unlikely(!page)) {
865                 if (flags & FOLL_DUMP) {
866                         /* Avoid special (like zero) pages in core dumps */
867                         page = ERR_PTR(-EFAULT);
868                         goto out;
869                 }
870 
871                 if (is_zero_pfn(pte_pfn(pte))) {
872                         page = pte_page(pte);
873                 } else {
874                         ret = follow_pfn_pte(vma, address, ptep, flags);
875                         page = ERR_PTR(ret);
876                         goto out;
877                 }
878         }
879 
880         if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
881                 page = ERR_PTR(-EMLINK);
882                 goto out;
883         }
884 
885         VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
886                        !PageAnonExclusive(page), page);
887 
888         /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
889         ret = try_grab_folio(page_folio(page), 1, flags);
890         if (unlikely(ret)) {
891                 page = ERR_PTR(ret);
892                 goto out;
893         }
894 
895         /*
896          * We need to make the page accessible if and only if we are going
897          * to access its content (the FOLL_PIN case).  Please see
898          * Documentation/core-api/pin_user_pages.rst for details.
899          */
900         if (flags & FOLL_PIN) {
901                 ret = arch_make_page_accessible(page);
902                 if (ret) {
903                         unpin_user_page(page);
904                         page = ERR_PTR(ret);
905                         goto out;
906                 }
907         }
908         if (flags & FOLL_TOUCH) {
909                 if ((flags & FOLL_WRITE) &&
910                     !pte_dirty(pte) && !PageDirty(page))
911                         set_page_dirty(page);
912                 /*
913                  * pte_mkyoung() would be more correct here, but atomic care
914                  * is needed to avoid losing the dirty bit: it is easier to use
915                  * mark_page_accessed().
916                  */
917                 mark_page_accessed(page);
918         }
919 out:
920         pte_unmap_unlock(ptep, ptl);
921         return page;
922 no_page:
923         pte_unmap_unlock(ptep, ptl);
924         if (!pte_none(pte))
925                 return NULL;
926         return no_page_table(vma, flags, address);
927 }
928 
929 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
930                                     unsigned long address, pud_t *pudp,
931                                     unsigned int flags,
932                                     struct follow_page_context *ctx)
933 {
934         pmd_t *pmd, pmdval;
935         spinlock_t *ptl;
936         struct page *page;
937         struct mm_struct *mm = vma->vm_mm;
938 
939         pmd = pmd_offset(pudp, address);
940         pmdval = pmdp_get_lockless(pmd);
941         if (pmd_none(pmdval))
942                 return no_page_table(vma, flags, address);
943         if (!pmd_present(pmdval))
944                 return no_page_table(vma, flags, address);
945         if (pmd_devmap(pmdval)) {
946                 ptl = pmd_lock(mm, pmd);
947                 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
948                 spin_unlock(ptl);
949                 if (page)
950                         return page;
951                 return no_page_table(vma, flags, address);
952         }
953         if (likely(!pmd_leaf(pmdval)))
954                 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
955 
956         if (pmd_protnone(pmdval) && !gup_can_follow_protnone(vma, flags))
957                 return no_page_table(vma, flags, address);
958 
959         ptl = pmd_lock(mm, pmd);
960         pmdval = *pmd;
961         if (unlikely(!pmd_present(pmdval))) {
962                 spin_unlock(ptl);
963                 return no_page_table(vma, flags, address);
964         }
965         if (unlikely(!pmd_leaf(pmdval))) {
966                 spin_unlock(ptl);
967                 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
968         }
969         if (pmd_trans_huge(pmdval) && (flags & FOLL_SPLIT_PMD)) {
970                 spin_unlock(ptl);
971                 split_huge_pmd(vma, pmd, address);
972                 /* If pmd was left empty, stuff a page table in there quickly */
973                 return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) :
974                         follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
975         }
976         page = follow_huge_pmd(vma, address, pmd, flags, ctx);
977         spin_unlock(ptl);
978         return page;
979 }
980 
981 static struct page *follow_pud_mask(struct vm_area_struct *vma,
982                                     unsigned long address, p4d_t *p4dp,
983                                     unsigned int flags,
984                                     struct follow_page_context *ctx)
985 {
986         pud_t *pudp, pud;
987         spinlock_t *ptl;
988         struct page *page;
989         struct mm_struct *mm = vma->vm_mm;
990 
991         pudp = pud_offset(p4dp, address);
992         pud = READ_ONCE(*pudp);
993         if (!pud_present(pud))
994                 return no_page_table(vma, flags, address);
995         if (pud_leaf(pud)) {
996                 ptl = pud_lock(mm, pudp);
997                 page = follow_huge_pud(vma, address, pudp, flags, ctx);
998                 spin_unlock(ptl);
999                 if (page)
1000                         return page;
1001                 return no_page_table(vma, flags, address);
1002         }
1003         if (unlikely(pud_bad(pud)))
1004                 return no_page_table(vma, flags, address);
1005 
1006         return follow_pmd_mask(vma, address, pudp, flags, ctx);
1007 }
1008 
1009 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
1010                                     unsigned long address, pgd_t *pgdp,
1011                                     unsigned int flags,
1012                                     struct follow_page_context *ctx)
1013 {
1014         p4d_t *p4dp, p4d;
1015 
1016         p4dp = p4d_offset(pgdp, address);
1017         p4d = READ_ONCE(*p4dp);
1018         BUILD_BUG_ON(p4d_leaf(p4d));
1019 
1020         if (!p4d_present(p4d) || p4d_bad(p4d))
1021                 return no_page_table(vma, flags, address);
1022 
1023         return follow_pud_mask(vma, address, p4dp, flags, ctx);
1024 }
1025 
1026 /**
1027  * follow_page_mask - look up a page descriptor from a user-virtual address
1028  * @vma: vm_area_struct mapping @address
1029  * @address: virtual address to look up
1030  * @flags: flags modifying lookup behaviour
1031  * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
1032  *       pointer to output page_mask
1033  *
1034  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
1035  *
1036  * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
1037  * the device's dev_pagemap metadata to avoid repeating expensive lookups.
1038  *
1039  * When getting an anonymous page and the caller has to trigger unsharing
1040  * of a shared anonymous page first, -EMLINK is returned. The caller should
1041  * trigger a fault with FAULT_FLAG_UNSHARE set. Note that unsharing is only
1042  * relevant with FOLL_PIN and !FOLL_WRITE.
1043  *
1044  * On output, the @ctx->page_mask is set according to the size of the page.
1045  *
1046  * Return: the mapped (struct page *), %NULL if no mapping exists, or
1047  * an error pointer if there is a mapping to something not represented
1048  * by a page descriptor (see also vm_normal_page()).
1049  */
1050 static struct page *follow_page_mask(struct vm_area_struct *vma,
1051                               unsigned long address, unsigned int flags,
1052                               struct follow_page_context *ctx)
1053 {
1054         pgd_t *pgd;
1055         struct mm_struct *mm = vma->vm_mm;
1056         struct page *page;
1057 
1058         vma_pgtable_walk_begin(vma);
1059 
1060         ctx->page_mask = 0;
1061         pgd = pgd_offset(mm, address);
1062 
1063         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1064                 page = no_page_table(vma, flags, address);
1065         else
1066                 page = follow_p4d_mask(vma, address, pgd, flags, ctx);
1067 
1068         vma_pgtable_walk_end(vma);
1069 
1070         return page;
1071 }
1072 
1073 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1074                          unsigned int foll_flags)
1075 {
1076         struct follow_page_context ctx = { NULL };
1077         struct page *page;
1078 
1079         if (vma_is_secretmem(vma))
1080                 return NULL;
1081 
1082         if (WARN_ON_ONCE(foll_flags & FOLL_PIN))
1083                 return NULL;
1084 
1085         /*
1086          * We never set FOLL_HONOR_NUMA_FAULT because callers don't expect
1087          * to fail on PROT_NONE-mapped pages.
1088          */
1089         page = follow_page_mask(vma, address, foll_flags, &ctx);
1090         if (ctx.pgmap)
1091                 put_dev_pagemap(ctx.pgmap);
1092         return page;
1093 }
1094 
1095 static int get_gate_page(struct mm_struct *mm, unsigned long address,
1096                 unsigned int gup_flags, struct vm_area_struct **vma,
1097                 struct page **page)
1098 {
1099         pgd_t *pgd;
1100         p4d_t *p4d;
1101         pud_t *pud;
1102         pmd_t *pmd;
1103         pte_t *pte;
1104         pte_t entry;
1105         int ret = -EFAULT;
1106 
1107         /* user gate pages are read-only */
1108         if (gup_flags & FOLL_WRITE)
1109                 return -EFAULT;
1110         if (address > TASK_SIZE)
1111                 pgd = pgd_offset_k(address);
1112         else
1113                 pgd = pgd_offset_gate(mm, address);
1114         if (pgd_none(*pgd))
1115                 return -EFAULT;
1116         p4d = p4d_offset(pgd, address);
1117         if (p4d_none(*p4d))
1118                 return -EFAULT;
1119         pud = pud_offset(p4d, address);
1120         if (pud_none(*pud))
1121                 return -EFAULT;
1122         pmd = pmd_offset(pud, address);
1123         if (!pmd_present(*pmd))
1124                 return -EFAULT;
1125         pte = pte_offset_map(pmd, address);
1126         if (!pte)
1127                 return -EFAULT;
1128         entry = ptep_get(pte);
1129         if (pte_none(entry))
1130                 goto unmap;
1131         *vma = get_gate_vma(mm);
1132         if (!page)
1133                 goto out;
1134         *page = vm_normal_page(*vma, address, entry);
1135         if (!*page) {
1136                 if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry)))
1137                         goto unmap;
1138                 *page = pte_page(entry);
1139         }
1140         ret = try_grab_folio(page_folio(*page), 1, gup_flags);
1141         if (unlikely(ret))
1142                 goto unmap;
1143 out:
1144         ret = 0;
1145 unmap:
1146         pte_unmap(pte);
1147         return ret;
1148 }
1149 
1150 /*
1151  * mmap_lock must be held on entry.  If @flags has FOLL_UNLOCKABLE but not
1152  * FOLL_NOWAIT, the mmap_lock may be released.  If it is, *@locked will be set
1153  * to 0 and -EBUSY returned.
1154  */
1155 static int faultin_page(struct vm_area_struct *vma,
1156                 unsigned long address, unsigned int *flags, bool unshare,
1157                 int *locked)
1158 {
1159         unsigned int fault_flags = 0;
1160         vm_fault_t ret;
1161 
1162         if (*flags & FOLL_NOFAULT)
1163                 return -EFAULT;
1164         if (*flags & FOLL_WRITE)
1165                 fault_flags |= FAULT_FLAG_WRITE;
1166         if (*flags & FOLL_REMOTE)
1167                 fault_flags |= FAULT_FLAG_REMOTE;
1168         if (*flags & FOLL_UNLOCKABLE) {
1169                 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1170                 /*
1171                  * FAULT_FLAG_INTERRUPTIBLE is opt-in. GUP callers must set
1172                  * FOLL_INTERRUPTIBLE to enable FAULT_FLAG_INTERRUPTIBLE.
1173                  * That's because some callers may not be prepared to
1174                  * handle early exits caused by non-fatal signals.
1175                  */
1176                 if (*flags & FOLL_INTERRUPTIBLE)
1177                         fault_flags |= FAULT_FLAG_INTERRUPTIBLE;
1178         }
1179         if (*flags & FOLL_NOWAIT)
1180                 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
1181         if (*flags & FOLL_TRIED) {
1182                 /*
1183                  * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
1184                  * can co-exist
1185                  */
1186                 fault_flags |= FAULT_FLAG_TRIED;
1187         }
1188         if (unshare) {
1189                 fault_flags |= FAULT_FLAG_UNSHARE;
1190                 /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
1191                 VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
1192         }
1193 
1194         ret = handle_mm_fault(vma, address, fault_flags, NULL);
1195 
1196         if (ret & VM_FAULT_COMPLETED) {
1197                 /*
1198                  * With FAULT_FLAG_RETRY_NOWAIT we'll never release the
1199                  * mmap lock in the page fault handler. Sanity check this.
1200                  */
1201                 WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
1202                 *locked = 0;
1203 
1204                 /*
1205                  * We should do the same as VM_FAULT_RETRY, but let's not
1206                  * return -EBUSY since that's not reflecting the reality of
1207                  * what has happened - we've just fully completed a page
1208                  * fault, with the mmap lock released.  Use -EAGAIN to show
1209                  * that we want to take the mmap lock _again_.
1210                  */
1211                 return -EAGAIN;
1212         }
1213 
1214         if (ret & VM_FAULT_ERROR) {
1215                 int err = vm_fault_to_errno(ret, *flags);
1216 
1217                 if (err)
1218                         return err;
1219                 BUG();
1220         }
1221 
1222         if (ret & VM_FAULT_RETRY) {
1223                 if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
1224                         *locked = 0;
1225                 return -EBUSY;
1226         }
1227 
1228         return 0;
1229 }
1230 
1231 /*
1232  * Writing to file-backed mappings which require folio dirty tracking using GUP
1233  * is a fundamentally broken operation, as kernel write access to GUP mappings
1234  * do not adhere to the semantics expected by a file system.
1235  *
1236  * Consider the following scenario:-
1237  *
1238  * 1. A folio is written to via GUP which write-faults the memory, notifying
1239  *    the file system and dirtying the folio.
1240  * 2. Later, writeback is triggered, resulting in the folio being cleaned and
1241  *    the PTE being marked read-only.
1242  * 3. The GUP caller writes to the folio, as it is mapped read/write via the
1243  *    direct mapping.
1244  * 4. The GUP caller, now done with the page, unpins it and sets it dirty
1245  *    (though it does not have to).
1246  *
1247  * This results in both data being written to a folio without writenotify, and
1248  * the folio being dirtied unexpectedly (if the caller decides to do so).
1249  */
1250 static bool writable_file_mapping_allowed(struct vm_area_struct *vma,
1251                                           unsigned long gup_flags)
1252 {
1253         /*
1254          * If we aren't pinning then no problematic write can occur. A long term
1255          * pin is the most egregious case so this is the case we disallow.
1256          */
1257         if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) !=
1258             (FOLL_PIN | FOLL_LONGTERM))
1259                 return true;
1260 
1261         /*
1262          * If the VMA does not require dirty tracking then no problematic write
1263          * can occur either.
1264          */
1265         return !vma_needs_dirty_tracking(vma);
1266 }
1267 
1268 static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
1269 {
1270         vm_flags_t vm_flags = vma->vm_flags;
1271         int write = (gup_flags & FOLL_WRITE);
1272         int foreign = (gup_flags & FOLL_REMOTE);
1273         bool vma_anon = vma_is_anonymous(vma);
1274 
1275         if (vm_flags & (VM_IO | VM_PFNMAP))
1276                 return -EFAULT;
1277 
1278         if ((gup_flags & FOLL_ANON) && !vma_anon)
1279                 return -EFAULT;
1280 
1281         if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
1282                 return -EOPNOTSUPP;
1283 
1284         if (vma_is_secretmem(vma))
1285                 return -EFAULT;
1286 
1287         if (write) {
1288                 if (!vma_anon &&
1289                     !writable_file_mapping_allowed(vma, gup_flags))
1290                         return -EFAULT;
1291 
1292                 if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) {
1293                         if (!(gup_flags & FOLL_FORCE))
1294                                 return -EFAULT;
1295                         /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */
1296                         if (is_vm_hugetlb_page(vma))
1297                                 return -EFAULT;
1298                         /*
1299                          * We used to let the write,force case do COW in a
1300                          * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
1301                          * set a breakpoint in a read-only mapping of an
1302                          * executable, without corrupting the file (yet only
1303                          * when that file had been opened for writing!).
1304                          * Anon pages in shared mappings are surprising: now
1305                          * just reject it.
1306                          */
1307                         if (!is_cow_mapping(vm_flags))
1308                                 return -EFAULT;
1309                 }
1310         } else if (!(vm_flags & VM_READ)) {
1311                 if (!(gup_flags & FOLL_FORCE))
1312                         return -EFAULT;
1313                 /*
1314                  * Is there actually any vma we can reach here which does not
1315                  * have VM_MAYREAD set?
1316                  */
1317                 if (!(vm_flags & VM_MAYREAD))
1318                         return -EFAULT;
1319         }
1320         /*
1321          * gups are always data accesses, not instruction
1322          * fetches, so execute=false here
1323          */
1324         if (!arch_vma_access_permitted(vma, write, false, foreign))
1325                 return -EFAULT;
1326         return 0;
1327 }
1328 
1329 /*
1330  * This is "vma_lookup()", but with a warning if we would have
1331  * historically expanded the stack in the GUP code.
1332  */
1333 static struct vm_area_struct *gup_vma_lookup(struct mm_struct *mm,
1334          unsigned long addr)
1335 {
1336 #ifdef CONFIG_STACK_GROWSUP
1337         return vma_lookup(mm, addr);
1338 #else
1339         static volatile unsigned long next_warn;
1340         struct vm_area_struct *vma;
1341         unsigned long now, next;
1342 
1343         vma = find_vma(mm, addr);
1344         if (!vma || (addr >= vma->vm_start))
1345                 return vma;
1346 
1347         /* Only warn for half-way relevant accesses */
1348         if (!(vma->vm_flags & VM_GROWSDOWN))
1349                 return NULL;
1350         if (vma->vm_start - addr > 65536)
1351                 return NULL;
1352 
1353         /* Let's not warn more than once an hour.. */
1354         now = jiffies; next = next_warn;
1355         if (next && time_before(now, next))
1356                 return NULL;
1357         next_warn = now + 60*60*HZ;
1358 
1359         /* Let people know things may have changed. */
1360         pr_warn("GUP no longer grows the stack in %s (%d): %lx-%lx (%lx)\n",
1361                 current->comm, task_pid_nr(current),
1362                 vma->vm_start, vma->vm_end, addr);
1363         dump_stack();
1364         return NULL;
1365 #endif
1366 }
1367 
1368 /**
1369  * __get_user_pages() - pin user pages in memory
1370  * @mm:         mm_struct of target mm
1371  * @start:      starting user address
1372  * @nr_pages:   number of pages from start to pin
1373  * @gup_flags:  flags modifying pin behaviour
1374  * @pages:      array that receives pointers to the pages pinned.
1375  *              Should be at least nr_pages long. Or NULL, if caller
1376  *              only intends to ensure the pages are faulted in.
1377  * @locked:     whether we're still with the mmap_lock held
1378  *
1379  * Returns either number of pages pinned (which may be less than the
1380  * number requested), or an error. Details about the return value:
1381  *
1382  * -- If nr_pages is 0, returns 0.
1383  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1384  * -- If nr_pages is >0, and some pages were pinned, returns the number of
1385  *    pages pinned. Again, this may be less than nr_pages.
1386  * -- 0 return value is possible when the fault would need to be retried.
1387  *
1388  * The caller is responsible for releasing returned @pages, via put_page().
1389  *
1390  * Must be called with mmap_lock held.  It may be released.  See below.
1391  *
1392  * __get_user_pages walks a process's page tables and takes a reference to
1393  * each struct page that each user address corresponds to at a given
1394  * instant. That is, it takes the page that would be accessed if a user
1395  * thread accesses the given user virtual address at that instant.
1396  *
1397  * This does not guarantee that the page exists in the user mappings when
1398  * __get_user_pages returns, and there may even be a completely different
1399  * page there in some cases (eg. if mmapped pagecache has been invalidated
1400  * and subsequently re-faulted). However it does guarantee that the page
1401  * won't be freed completely. And mostly callers simply care that the page
1402  * contains data that was valid *at some point in time*. Typically, an IO
1403  * or similar operation cannot guarantee anything stronger anyway because
1404  * locks can't be held over the syscall boundary.
1405  *
1406  * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1407  * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1408  * appropriate) must be called after the page is finished with, and
1409  * before put_page is called.
1410  *
1411  * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
1412  * be released. If this happens *@locked will be set to 0 on return.
1413  *
1414  * A caller using such a combination of @gup_flags must therefore hold the
1415  * mmap_lock for reading only, and recognize when it's been released. Otherwise,
1416  * it must be held for either reading or writing and will not be released.
1417  *
1418  * In most cases, get_user_pages or get_user_pages_fast should be used
1419  * instead of __get_user_pages. __get_user_pages should be used only if
1420  * you need some special @gup_flags.
1421  */
1422 static long __get_user_pages(struct mm_struct *mm,
1423                 unsigned long start, unsigned long nr_pages,
1424                 unsigned int gup_flags, struct page **pages,
1425                 int *locked)
1426 {
1427         long ret = 0, i = 0;
1428         struct vm_area_struct *vma = NULL;
1429         struct follow_page_context ctx = { NULL };
1430 
1431         if (!nr_pages)
1432                 return 0;
1433 
1434         start = untagged_addr_remote(mm, start);
1435 
1436         VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1437 
1438         do {
1439                 struct page *page;
1440                 unsigned int foll_flags = gup_flags;
1441                 unsigned int page_increm;
1442 
1443                 /* first iteration or cross vma bound */
1444                 if (!vma || start >= vma->vm_end) {
1445                         /*
1446                          * MADV_POPULATE_(READ|WRITE) wants to handle VMA
1447                          * lookups+error reporting differently.
1448                          */
1449                         if (gup_flags & FOLL_MADV_POPULATE) {
1450                                 vma = vma_lookup(mm, start);
1451                                 if (!vma) {
1452                                         ret = -ENOMEM;
1453                                         goto out;
1454                                 }
1455                                 if (check_vma_flags(vma, gup_flags)) {
1456                                         ret = -EINVAL;
1457                                         goto out;
1458                                 }
1459                                 goto retry;
1460                         }
1461                         vma = gup_vma_lookup(mm, start);
1462                         if (!vma && in_gate_area(mm, start)) {
1463                                 ret = get_gate_page(mm, start & PAGE_MASK,
1464                                                 gup_flags, &vma,
1465                                                 pages ? &page : NULL);
1466                                 if (ret)
1467                                         goto out;
1468                                 ctx.page_mask = 0;
1469                                 goto next_page;
1470                         }
1471 
1472                         if (!vma) {
1473                                 ret = -EFAULT;
1474                                 goto out;
1475                         }
1476                         ret = check_vma_flags(vma, gup_flags);
1477                         if (ret)
1478                                 goto out;
1479                 }
1480 retry:
1481                 /*
1482                  * If we have a pending SIGKILL, don't keep faulting pages and
1483                  * potentially allocating memory.
1484                  */
1485                 if (fatal_signal_pending(current)) {
1486                         ret = -EINTR;
1487                         goto out;
1488                 }
1489                 cond_resched();
1490 
1491                 page = follow_page_mask(vma, start, foll_flags, &ctx);
1492                 if (!page || PTR_ERR(page) == -EMLINK) {
1493                         ret = faultin_page(vma, start, &foll_flags,
1494                                            PTR_ERR(page) == -EMLINK, locked);
1495                         switch (ret) {
1496                         case 0:
1497                                 goto retry;
1498                         case -EBUSY:
1499                         case -EAGAIN:
1500                                 ret = 0;
1501                                 fallthrough;
1502                         case -EFAULT:
1503                         case -ENOMEM:
1504                         case -EHWPOISON:
1505                                 goto out;
1506                         }
1507                         BUG();
1508                 } else if (PTR_ERR(page) == -EEXIST) {
1509                         /*
1510                          * Proper page table entry exists, but no corresponding
1511                          * struct page. If the caller expects **pages to be
1512                          * filled in, bail out now, because that can't be done
1513                          * for this page.
1514                          */
1515                         if (pages) {
1516                                 ret = PTR_ERR(page);
1517                                 goto out;
1518                         }
1519                 } else if (IS_ERR(page)) {
1520                         ret = PTR_ERR(page);
1521                         goto out;
1522                 }
1523 next_page:
1524                 page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1525                 if (page_increm > nr_pages)
1526                         page_increm = nr_pages;
1527 
1528                 if (pages) {
1529                         struct page *subpage;
1530                         unsigned int j;
1531 
1532                         /*
1533                          * This must be a large folio (and doesn't need to
1534                          * be the whole folio; it can be part of it), do
1535                          * the refcount work for all the subpages too.
1536                          *
1537                          * NOTE: here the page may not be the head page
1538                          * e.g. when start addr is not thp-size aligned.
1539                          * try_grab_folio() should have taken care of tail
1540                          * pages.
1541                          */
1542                         if (page_increm > 1) {
1543                                 struct folio *folio = page_folio(page);
1544 
1545                                 /*
1546                                  * Since we already hold refcount on the
1547                                  * large folio, this should never fail.
1548                                  */
1549                                 if (try_grab_folio(folio, page_increm - 1,
1550                                                    foll_flags)) {
1551                                         /*
1552                                          * Release the 1st page ref if the
1553                                          * folio is problematic, fail hard.
1554                                          */
1555                                         gup_put_folio(folio, 1,
1556                                                       foll_flags);
1557                                         ret = -EFAULT;
1558                                         goto out;
1559                                 }
1560                         }
1561 
1562                         for (j = 0; j < page_increm; j++) {
1563                                 subpage = nth_page(page, j);
1564                                 pages[i + j] = subpage;
1565                                 flush_anon_page(vma, subpage, start + j * PAGE_SIZE);
1566                                 flush_dcache_page(subpage);
1567                         }
1568                 }
1569 
1570                 i += page_increm;
1571                 start += page_increm * PAGE_SIZE;
1572                 nr_pages -= page_increm;
1573         } while (nr_pages);
1574 out:
1575         if (ctx.pgmap)
1576                 put_dev_pagemap(ctx.pgmap);
1577         return i ? i : ret;
1578 }
1579 
1580 static bool vma_permits_fault(struct vm_area_struct *vma,
1581                               unsigned int fault_flags)
1582 {
1583         bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
1584         bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1585         vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1586 
1587         if (!(vm_flags & vma->vm_flags))
1588                 return false;
1589 
1590         /*
1591          * The architecture might have a hardware protection
1592          * mechanism other than read/write that can deny access.
1593          *
1594          * gup always represents data access, not instruction
1595          * fetches, so execute=false here:
1596          */
1597         if (!arch_vma_access_permitted(vma, write, false, foreign))
1598                 return false;
1599 
1600         return true;
1601 }
1602 
1603 /**
1604  * fixup_user_fault() - manually resolve a user page fault
1605  * @mm:         mm_struct of target mm
1606  * @address:    user address
1607  * @fault_flags:flags to pass down to handle_mm_fault()
1608  * @unlocked:   did we unlock the mmap_lock while retrying, maybe NULL if caller
1609  *              does not allow retry. If NULL, the caller must guarantee
1610  *              that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1611  *
1612  * This is meant to be called in the specific scenario where for locking reasons
1613  * we try to access user memory in atomic context (within a pagefault_disable()
1614  * section), this returns -EFAULT, and we want to resolve the user fault before
1615  * trying again.
1616  *
1617  * Typically this is meant to be used by the futex code.
1618  *
1619  * The main difference with get_user_pages() is that this function will
1620  * unconditionally call handle_mm_fault() which will in turn perform all the
1621  * necessary SW fixup of the dirty and young bits in the PTE, while
1622  * get_user_pages() only guarantees to update these in the struct page.
1623  *
1624  * This is important for some architectures where those bits also gate the
1625  * access permission to the page because they are maintained in software.  On
1626  * such architectures, gup() will not be enough to make a subsequent access
1627  * succeed.
1628  *
1629  * This function will not return with an unlocked mmap_lock. So it has not the
1630  * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1631  */
1632 int fixup_user_fault(struct mm_struct *mm,
1633                      unsigned long address, unsigned int fault_flags,
1634                      bool *unlocked)
1635 {
1636         struct vm_area_struct *vma;
1637         vm_fault_t ret;
1638 
1639         address = untagged_addr_remote(mm, address);
1640 
1641         if (unlocked)
1642                 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1643 
1644 retry:
1645         vma = gup_vma_lookup(mm, address);
1646         if (!vma)
1647                 return -EFAULT;
1648 
1649         if (!vma_permits_fault(vma, fault_flags))
1650                 return -EFAULT;
1651 
1652         if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1653             fatal_signal_pending(current))
1654                 return -EINTR;
1655 
1656         ret = handle_mm_fault(vma, address, fault_flags, NULL);
1657 
1658         if (ret & VM_FAULT_COMPLETED) {
1659                 /*
1660                  * NOTE: it's a pity that we need to retake the lock here
1661                  * to pair with the unlock() in the callers. Ideally we
1662                  * could tell the callers so they do not need to unlock.
1663                  */
1664                 mmap_read_lock(mm);
1665                 *unlocked = true;
1666                 return 0;
1667         }
1668 
1669         if (ret & VM_FAULT_ERROR) {
1670                 int err = vm_fault_to_errno(ret, 0);
1671 
1672                 if (err)
1673                         return err;
1674                 BUG();
1675         }
1676 
1677         if (ret & VM_FAULT_RETRY) {
1678                 mmap_read_lock(mm);
1679                 *unlocked = true;
1680                 fault_flags |= FAULT_FLAG_TRIED;
1681                 goto retry;
1682         }
1683 
1684         return 0;
1685 }
1686 EXPORT_SYMBOL_GPL(fixup_user_fault);
1687 
1688 /*
1689  * GUP always responds to fatal signals.  When FOLL_INTERRUPTIBLE is
1690  * specified, it'll also respond to generic signals.  The caller of GUP
1691  * that has FOLL_INTERRUPTIBLE should take care of the GUP interruption.
1692  */
1693 static bool gup_signal_pending(unsigned int flags)
1694 {
1695         if (fatal_signal_pending(current))
1696                 return true;
1697 
1698         if (!(flags & FOLL_INTERRUPTIBLE))
1699                 return false;
1700 
1701         return signal_pending(current);
1702 }
1703 
1704 /*
1705  * Locking: (*locked == 1) means that the mmap_lock has already been acquired by
1706  * the caller. This function may drop the mmap_lock. If it does so, then it will
1707  * set (*locked = 0).
1708  *
1709  * (*locked == 0) means that the caller expects this function to acquire and
1710  * drop the mmap_lock. Therefore, the value of *locked will still be zero when
1711  * the function returns, even though it may have changed temporarily during
1712  * function execution.
1713  *
1714  * Please note that this function, unlike __get_user_pages(), will not return 0
1715  * for nr_pages > 0, unless FOLL_NOWAIT is used.
1716  */
1717 static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1718                                                 unsigned long start,
1719                                                 unsigned long nr_pages,
1720                                                 struct page **pages,
1721                                                 int *locked,
1722                                                 unsigned int flags)
1723 {
1724         long ret, pages_done;
1725         bool must_unlock = false;
1726 
1727         if (!nr_pages)
1728                 return 0;
1729 
1730         /*
1731          * The internal caller expects GUP to manage the lock internally and the
1732          * lock must be released when this returns.
1733          */
1734         if (!*locked) {
1735                 if (mmap_read_lock_killable(mm))
1736                         return -EAGAIN;
1737                 must_unlock = true;
1738                 *locked = 1;
1739         }
1740         else
1741                 mmap_assert_locked(mm);
1742 
1743         if (flags & FOLL_PIN)
1744                 mm_set_has_pinned_flag(&mm->flags);
1745 
1746         /*
1747          * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1748          * is to set FOLL_GET if the caller wants pages[] filled in (but has
1749          * carelessly failed to specify FOLL_GET), so keep doing that, but only
1750          * for FOLL_GET, not for the newer FOLL_PIN.
1751          *
1752          * FOLL_PIN always expects pages to be non-null, but no need to assert
1753          * that here, as any failures will be obvious enough.
1754          */
1755         if (pages && !(flags & FOLL_PIN))
1756                 flags |= FOLL_GET;
1757 
1758         pages_done = 0;
1759         for (;;) {
1760                 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1761                                        locked);
1762                 if (!(flags & FOLL_UNLOCKABLE)) {
1763                         /* VM_FAULT_RETRY couldn't trigger, bypass */
1764                         pages_done = ret;
1765                         break;
1766                 }
1767 
1768                 /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
1769                 if (!*locked) {
1770                         BUG_ON(ret < 0);
1771                         BUG_ON(ret >= nr_pages);
1772                 }
1773 
1774                 if (ret > 0) {
1775                         nr_pages -= ret;
1776                         pages_done += ret;
1777                         if (!nr_pages)
1778                                 break;
1779                 }
1780                 if (*locked) {
1781                         /*
1782                          * VM_FAULT_RETRY didn't trigger or it was a
1783                          * FOLL_NOWAIT.
1784                          */
1785                         if (!pages_done)
1786                                 pages_done = ret;
1787                         break;
1788                 }
1789                 /*
1790                  * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1791                  * For the prefault case (!pages) we only update counts.
1792                  */
1793                 if (likely(pages))
1794                         pages += ret;
1795                 start += ret << PAGE_SHIFT;
1796 
1797                 /* The lock was temporarily dropped, so we must unlock later */
1798                 must_unlock = true;
1799 
1800 retry:
1801                 /*
1802                  * Repeat on the address that fired VM_FAULT_RETRY
1803                  * with both FAULT_FLAG_ALLOW_RETRY and
1804                  * FAULT_FLAG_TRIED.  Note that GUP can be interrupted
1805                  * by fatal signals of even common signals, depending on
1806                  * the caller's request. So we need to check it before we
1807                  * start trying again otherwise it can loop forever.
1808                  */
1809                 if (gup_signal_pending(flags)) {
1810                         if (!pages_done)
1811                                 pages_done = -EINTR;
1812                         break;
1813                 }
1814 
1815                 ret = mmap_read_lock_killable(mm);
1816                 if (ret) {
1817                         BUG_ON(ret > 0);
1818                         if (!pages_done)
1819                                 pages_done = ret;
1820                         break;
1821                 }
1822 
1823                 *locked = 1;
1824                 ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1825                                        pages, locked);
1826                 if (!*locked) {
1827                         /* Continue to retry until we succeeded */
1828                         BUG_ON(ret != 0);
1829                         goto retry;
1830                 }
1831                 if (ret != 1) {
1832                         BUG_ON(ret > 1);
1833                         if (!pages_done)
1834                                 pages_done = ret;
1835                         break;
1836                 }
1837                 nr_pages--;
1838                 pages_done++;
1839                 if (!nr_pages)
1840                         break;
1841                 if (likely(pages))
1842                         pages++;
1843                 start += PAGE_SIZE;
1844         }
1845         if (must_unlock && *locked) {
1846                 /*
1847                  * We either temporarily dropped the lock, or the caller
1848                  * requested that we both acquire and drop the lock. Either way,
1849                  * we must now unlock, and notify the caller of that state.
1850                  */
1851                 mmap_read_unlock(mm);
1852                 *locked = 0;
1853         }
1854 
1855         /*
1856          * Failing to pin anything implies something has gone wrong (except when
1857          * FOLL_NOWAIT is specified).
1858          */
1859         if (WARN_ON_ONCE(pages_done == 0 && !(flags & FOLL_NOWAIT)))
1860                 return -EFAULT;
1861 
1862         return pages_done;
1863 }
1864 
1865 /**
1866  * populate_vma_page_range() -  populate a range of pages in the vma.
1867  * @vma:   target vma
1868  * @start: start address
1869  * @end:   end address
1870  * @locked: whether the mmap_lock is still held
1871  *
1872  * This takes care of mlocking the pages too if VM_LOCKED is set.
1873  *
1874  * Return either number of pages pinned in the vma, or a negative error
1875  * code on error.
1876  *
1877  * vma->vm_mm->mmap_lock must be held.
1878  *
1879  * If @locked is NULL, it may be held for read or write and will
1880  * be unperturbed.
1881  *
1882  * If @locked is non-NULL, it must held for read only and may be
1883  * released.  If it's released, *@locked will be set to 0.
1884  */
1885 long populate_vma_page_range(struct vm_area_struct *vma,
1886                 unsigned long start, unsigned long end, int *locked)
1887 {
1888         struct mm_struct *mm = vma->vm_mm;
1889         unsigned long nr_pages = (end - start) / PAGE_SIZE;
1890         int local_locked = 1;
1891         int gup_flags;
1892         long ret;
1893 
1894         VM_BUG_ON(!PAGE_ALIGNED(start));
1895         VM_BUG_ON(!PAGE_ALIGNED(end));
1896         VM_BUG_ON_VMA(start < vma->vm_start, vma);
1897         VM_BUG_ON_VMA(end   > vma->vm_end, vma);
1898         mmap_assert_locked(mm);
1899 
1900         /*
1901          * Rightly or wrongly, the VM_LOCKONFAULT case has never used
1902          * faultin_page() to break COW, so it has no work to do here.
1903          */
1904         if (vma->vm_flags & VM_LOCKONFAULT)
1905                 return nr_pages;
1906 
1907         /* ... similarly, we've never faulted in PROT_NONE pages */
1908         if (!vma_is_accessible(vma))
1909                 return -EFAULT;
1910 
1911         gup_flags = FOLL_TOUCH;
1912         /*
1913          * We want to touch writable mappings with a write fault in order
1914          * to break COW, except for shared mappings because these don't COW
1915          * and we would not want to dirty them for nothing.
1916          *
1917          * Otherwise, do a read fault, and use FOLL_FORCE in case it's not
1918          * readable (ie write-only or executable).
1919          */
1920         if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1921                 gup_flags |= FOLL_WRITE;
1922         else
1923                 gup_flags |= FOLL_FORCE;
1924 
1925         if (locked)
1926                 gup_flags |= FOLL_UNLOCKABLE;
1927 
1928         /*
1929          * We made sure addr is within a VMA, so the following will
1930          * not result in a stack expansion that recurses back here.
1931          */
1932         ret = __get_user_pages(mm, start, nr_pages, gup_flags,
1933                                NULL, locked ? locked : &local_locked);
1934         lru_add_drain();
1935         return ret;
1936 }
1937 
1938 /*
1939  * faultin_page_range() - populate (prefault) page tables inside the
1940  *                        given range readable/writable
1941  *
1942  * This takes care of mlocking the pages, too, if VM_LOCKED is set.
1943  *
1944  * @mm: the mm to populate page tables in
1945  * @start: start address
1946  * @end: end address
1947  * @write: whether to prefault readable or writable
1948  * @locked: whether the mmap_lock is still held
1949  *
1950  * Returns either number of processed pages in the MM, or a negative error
1951  * code on error (see __get_user_pages()). Note that this function reports
1952  * errors related to VMAs, such as incompatible mappings, as expected by
1953  * MADV_POPULATE_(READ|WRITE).
1954  *
1955  * The range must be page-aligned.
1956  *
1957  * mm->mmap_lock must be held. If it's released, *@locked will be set to 0.
1958  */
1959 long faultin_page_range(struct mm_struct *mm, unsigned long start,
1960                         unsigned long end, bool write, int *locked)
1961 {
1962         unsigned long nr_pages = (end - start) / PAGE_SIZE;
1963         int gup_flags;
1964         long ret;
1965 
1966         VM_BUG_ON(!PAGE_ALIGNED(start));
1967         VM_BUG_ON(!PAGE_ALIGNED(end));
1968         mmap_assert_locked(mm);
1969 
1970         /*
1971          * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1972          *             the page dirty with FOLL_WRITE -- which doesn't make a
1973          *             difference with !FOLL_FORCE, because the page is writable
1974          *             in the page table.
1975          * FOLL_HWPOISON: Return -EHWPOISON instead of -EFAULT when we hit
1976          *                a poisoned page.
1977          * !FOLL_FORCE: Require proper access permissions.
1978          */
1979         gup_flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_UNLOCKABLE |
1980                     FOLL_MADV_POPULATE;
1981         if (write)
1982                 gup_flags |= FOLL_WRITE;
1983 
1984         ret = __get_user_pages_locked(mm, start, nr_pages, NULL, locked,
1985                                       gup_flags);
1986         lru_add_drain();
1987         return ret;
1988 }
1989 
1990 /*
1991  * __mm_populate - populate and/or mlock pages within a range of address space.
1992  *
1993  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1994  * flags. VMAs must be already marked with the desired vm_flags, and
1995  * mmap_lock must not be held.
1996  */
1997 int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1998 {
1999         struct mm_struct *mm = current->mm;
2000         unsigned long end, nstart, nend;
2001         struct vm_area_struct *vma = NULL;
2002         int locked = 0;
2003         long ret = 0;
2004 
2005         end = start + len;
2006 
2007         for (nstart = start; nstart < end; nstart = nend) {
2008                 /*
2009                  * We want to fault in pages for [nstart; end) address range.
2010                  * Find first corresponding VMA.
2011                  */
2012                 if (!locked) {
2013                         locked = 1;
2014                         mmap_read_lock(mm);
2015                         vma = find_vma_intersection(mm, nstart, end);
2016                 } else if (nstart >= vma->vm_end)
2017                         vma = find_vma_intersection(mm, vma->vm_end, end);
2018 
2019                 if (!vma)
2020                         break;
2021                 /*
2022                  * Set [nstart; nend) to intersection of desired address
2023                  * range with the first VMA. Also, skip undesirable VMA types.
2024                  */
2025                 nend = min(end, vma->vm_end);
2026                 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
2027                         continue;
2028                 if (nstart < vma->vm_start)
2029                         nstart = vma->vm_start;
2030                 /*
2031                  * Now fault in a range of pages. populate_vma_page_range()
2032                  * double checks the vma flags, so that it won't mlock pages
2033                  * if the vma was already munlocked.
2034                  */
2035                 ret = populate_vma_page_range(vma, nstart, nend, &locked);
2036                 if (ret < 0) {
2037                         if (ignore_errors) {
2038                                 ret = 0;
2039                                 continue;       /* continue at next VMA */
2040                         }
2041                         break;
2042                 }
2043                 nend = nstart + ret * PAGE_SIZE;
2044                 ret = 0;
2045         }
2046         if (locked)
2047                 mmap_read_unlock(mm);
2048         return ret;     /* 0 or negative error code */
2049 }
2050 #else /* CONFIG_MMU */
2051 static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
2052                 unsigned long nr_pages, struct page **pages,
2053                 int *locked, unsigned int foll_flags)
2054 {
2055         struct vm_area_struct *vma;
2056         bool must_unlock = false;
2057         unsigned long vm_flags;
2058         long i;
2059 
2060         if (!nr_pages)
2061                 return 0;
2062 
2063         /*
2064          * The internal caller expects GUP to manage the lock internally and the
2065          * lock must be released when this returns.
2066          */
2067         if (!*locked) {
2068                 if (mmap_read_lock_killable(mm))
2069                         return -EAGAIN;
2070                 must_unlock = true;
2071                 *locked = 1;
2072         }
2073 
2074         /* calculate required read or write permissions.
2075          * If FOLL_FORCE is set, we only require the "MAY" flags.
2076          */
2077         vm_flags  = (foll_flags & FOLL_WRITE) ?
2078                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
2079         vm_flags &= (foll_flags & FOLL_FORCE) ?
2080                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
2081 
2082         for (i = 0; i < nr_pages; i++) {
2083                 vma = find_vma(mm, start);
2084                 if (!vma)
2085                         break;
2086 
2087                 /* protect what we can, including chardevs */
2088                 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
2089                     !(vm_flags & vma->vm_flags))
2090                         break;
2091 
2092                 if (pages) {
2093                         pages[i] = virt_to_page((void *)start);
2094                         if (pages[i])
2095                                 get_page(pages[i]);
2096                 }
2097 
2098                 start = (start + PAGE_SIZE) & PAGE_MASK;
2099         }
2100 
2101         if (must_unlock && *locked) {
2102                 mmap_read_unlock(mm);
2103                 *locked = 0;
2104         }
2105 
2106         return i ? : -EFAULT;
2107 }
2108 #endif /* !CONFIG_MMU */
2109 
2110 /**
2111  * fault_in_writeable - fault in userspace address range for writing
2112  * @uaddr: start of address range
2113  * @size: size of address range
2114  *
2115  * Returns the number of bytes not faulted in (like copy_to_user() and
2116  * copy_from_user()).
2117  */
2118 size_t fault_in_writeable(char __user *uaddr, size_t size)
2119 {
2120         char __user *start = uaddr, *end;
2121 
2122         if (unlikely(size == 0))
2123                 return 0;
2124         if (!user_write_access_begin(uaddr, size))
2125                 return size;
2126         if (!PAGE_ALIGNED(uaddr)) {
2127                 unsafe_put_user(0, uaddr, out);
2128                 uaddr = (char __user *)PAGE_ALIGN((unsigned long)uaddr);
2129         }
2130         end = (char __user *)PAGE_ALIGN((unsigned long)start + size);
2131         if (unlikely(end < start))
2132                 end = NULL;
2133         while (uaddr != end) {
2134                 unsafe_put_user(0, uaddr, out);
2135                 uaddr += PAGE_SIZE;
2136         }
2137 
2138 out:
2139         user_write_access_end();
2140         if (size > uaddr - start)
2141                 return size - (uaddr - start);
2142         return 0;
2143 }
2144 EXPORT_SYMBOL(fault_in_writeable);
2145 
2146 /**
2147  * fault_in_subpage_writeable - fault in an address range for writing
2148  * @uaddr: start of address range
2149  * @size: size of address range
2150  *
2151  * Fault in a user address range for writing while checking for permissions at
2152  * sub-page granularity (e.g. arm64 MTE). This function should be used when
2153  * the caller cannot guarantee forward progress of a copy_to_user() loop.
2154  *
2155  * Returns the number of bytes not faulted in (like copy_to_user() and
2156  * copy_from_user()).
2157  */
2158 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size)
2159 {
2160         size_t faulted_in;
2161 
2162         /*
2163          * Attempt faulting in at page granularity first for page table
2164          * permission checking. The arch-specific probe_subpage_writeable()
2165          * functions may not check for this.
2166          */
2167         faulted_in = size - fault_in_writeable(uaddr, size);
2168         if (faulted_in)
2169                 faulted_in -= probe_subpage_writeable(uaddr, faulted_in);
2170 
2171         return size - faulted_in;
2172 }
2173 EXPORT_SYMBOL(fault_in_subpage_writeable);
2174 
2175 /*
2176  * fault_in_safe_writeable - fault in an address range for writing
2177  * @uaddr: start of address range
2178  * @size: length of address range
2179  *
2180  * Faults in an address range for writing.  This is primarily useful when we
2181  * already know that some or all of the pages in the address range aren't in
2182  * memory.
2183  *
2184  * Unlike fault_in_writeable(), this function is non-destructive.
2185  *
2186  * Note that we don't pin or otherwise hold the pages referenced that we fault
2187  * in.  There's no guarantee that they'll stay in memory for any duration of
2188  * time.
2189  *
2190  * Returns the number of bytes not faulted in, like copy_to_user() and
2191  * copy_from_user().
2192  */
2193 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size)
2194 {
2195         unsigned long start = (unsigned long)uaddr, end;
2196         struct mm_struct *mm = current->mm;
2197         bool unlocked = false;
2198 
2199         if (unlikely(size == 0))
2200                 return 0;
2201         end = PAGE_ALIGN(start + size);
2202         if (end < start)
2203                 end = 0;
2204 
2205         mmap_read_lock(mm);
2206         do {
2207                 if (fixup_user_fault(mm, start, FAULT_FLAG_WRITE, &unlocked))
2208                         break;
2209                 start = (start + PAGE_SIZE) & PAGE_MASK;
2210         } while (start != end);
2211         mmap_read_unlock(mm);
2212 
2213         if (size > (unsigned long)uaddr - start)
2214                 return size - ((unsigned long)uaddr - start);
2215         return 0;
2216 }
2217 EXPORT_SYMBOL(fault_in_safe_writeable);
2218 
2219 /**
2220  * fault_in_readable - fault in userspace address range for reading
2221  * @uaddr: start of user address range
2222  * @size: size of user address range
2223  *
2224  * Returns the number of bytes not faulted in (like copy_to_user() and
2225  * copy_from_user()).
2226  */
2227 size_t fault_in_readable(const char __user *uaddr, size_t size)
2228 {
2229         const char __user *start = uaddr, *end;
2230         volatile char c;
2231 
2232         if (unlikely(size == 0))
2233                 return 0;
2234         if (!user_read_access_begin(uaddr, size))
2235                 return size;
2236         if (!PAGE_ALIGNED(uaddr)) {
2237                 unsafe_get_user(c, uaddr, out);
2238                 uaddr = (const char __user *)PAGE_ALIGN((unsigned long)uaddr);
2239         }
2240         end = (const char __user *)PAGE_ALIGN((unsigned long)start + size);
2241         if (unlikely(end < start))
2242                 end = NULL;
2243         while (uaddr != end) {
2244                 unsafe_get_user(c, uaddr, out);
2245                 uaddr += PAGE_SIZE;
2246         }
2247 
2248 out:
2249         user_read_access_end();
2250         (void)c;
2251         if (size > uaddr - start)
2252                 return size - (uaddr - start);
2253         return 0;
2254 }
2255 EXPORT_SYMBOL(fault_in_readable);
2256 
2257 /**
2258  * get_dump_page() - pin user page in memory while writing it to core dump
2259  * @addr: user address
2260  *
2261  * Returns struct page pointer of user page pinned for dump,
2262  * to be freed afterwards by put_page().
2263  *
2264  * Returns NULL on any kind of failure - a hole must then be inserted into
2265  * the corefile, to preserve alignment with its headers; and also returns
2266  * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
2267  * allowing a hole to be left in the corefile to save disk space.
2268  *
2269  * Called without mmap_lock (takes and releases the mmap_lock by itself).
2270  */
2271 #ifdef CONFIG_ELF_CORE
2272 struct page *get_dump_page(unsigned long addr)
2273 {
2274         struct page *page;
2275         int locked = 0;
2276         int ret;
2277 
2278         ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
2279                                       FOLL_FORCE | FOLL_DUMP | FOLL_GET);
2280         return (ret == 1) ? page : NULL;
2281 }
2282 #endif /* CONFIG_ELF_CORE */
2283 
2284 #ifdef CONFIG_MIGRATION
2285 /*
2286  * Returns the number of collected folios. Return value is always >= 0.
2287  */
2288 static unsigned long collect_longterm_unpinnable_folios(
2289                                         struct list_head *movable_folio_list,
2290                                         unsigned long nr_folios,
2291                                         struct folio **folios)
2292 {
2293         unsigned long i, collected = 0;
2294         struct folio *prev_folio = NULL;
2295         bool drain_allow = true;
2296 
2297         for (i = 0; i < nr_folios; i++) {
2298                 struct folio *folio = folios[i];
2299 
2300                 if (folio == prev_folio)
2301                         continue;
2302                 prev_folio = folio;
2303 
2304                 if (folio_is_longterm_pinnable(folio))
2305                         continue;
2306 
2307                 collected++;
2308 
2309                 if (folio_is_device_coherent(folio))
2310                         continue;
2311 
2312                 if (folio_test_hugetlb(folio)) {
2313                         isolate_hugetlb(folio, movable_folio_list);
2314                         continue;
2315                 }
2316 
2317                 if (!folio_test_lru(folio) && drain_allow) {
2318                         lru_add_drain_all();
2319                         drain_allow = false;
2320                 }
2321 
2322                 if (!folio_isolate_lru(folio))
2323                         continue;
2324 
2325                 list_add_tail(&folio->lru, movable_folio_list);
2326                 node_stat_mod_folio(folio,
2327                                     NR_ISOLATED_ANON + folio_is_file_lru(folio),
2328                                     folio_nr_pages(folio));
2329         }
2330 
2331         return collected;
2332 }
2333 
2334 /*
2335  * Unpins all folios and migrates device coherent folios and movable_folio_list.
2336  * Returns -EAGAIN if all folios were successfully migrated or -errno for
2337  * failure (or partial success).
2338  */
2339 static int migrate_longterm_unpinnable_folios(
2340                                         struct list_head *movable_folio_list,
2341                                         unsigned long nr_folios,
2342                                         struct folio **folios)
2343 {
2344         int ret;
2345         unsigned long i;
2346 
2347         for (i = 0; i < nr_folios; i++) {
2348                 struct folio *folio = folios[i];
2349 
2350                 if (folio_is_device_coherent(folio)) {
2351                         /*
2352                          * Migration will fail if the folio is pinned, so
2353                          * convert the pin on the source folio to a normal
2354                          * reference.
2355                          */
2356                         folios[i] = NULL;
2357                         folio_get(folio);
2358                         gup_put_folio(folio, 1, FOLL_PIN);
2359 
2360                         if (migrate_device_coherent_page(&folio->page)) {
2361                                 ret = -EBUSY;
2362                                 goto err;
2363                         }
2364 
2365                         continue;
2366                 }
2367 
2368                 /*
2369                  * We can't migrate folios with unexpected references, so drop
2370                  * the reference obtained by __get_user_pages_locked().
2371                  * Migrating folios have been added to movable_folio_list after
2372                  * calling folio_isolate_lru() which takes a reference so the
2373                  * folio won't be freed if it's migrating.
2374                  */
2375                 unpin_folio(folios[i]);
2376                 folios[i] = NULL;
2377         }
2378 
2379         if (!list_empty(movable_folio_list)) {
2380                 struct migration_target_control mtc = {
2381                         .nid = NUMA_NO_NODE,
2382                         .gfp_mask = GFP_USER | __GFP_NOWARN,
2383                         .reason = MR_LONGTERM_PIN,
2384                 };
2385 
2386                 if (migrate_pages(movable_folio_list, alloc_migration_target,
2387                                   NULL, (unsigned long)&mtc, MIGRATE_SYNC,
2388                                   MR_LONGTERM_PIN, NULL)) {
2389                         ret = -ENOMEM;
2390                         goto err;
2391                 }
2392         }
2393 
2394         putback_movable_pages(movable_folio_list);
2395 
2396         return -EAGAIN;
2397 
2398 err:
2399         unpin_folios(folios, nr_folios);
2400         putback_movable_pages(movable_folio_list);
2401 
2402         return ret;
2403 }
2404 
2405 /*
2406  * Check whether all folios are *allowed* to be pinned indefinitely (longterm).
2407  * Rather confusingly, all folios in the range are required to be pinned via
2408  * FOLL_PIN, before calling this routine.
2409  *
2410  * If any folios in the range are not allowed to be pinned, then this routine
2411  * will migrate those folios away, unpin all the folios in the range and return
2412  * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
2413  * call this routine again.
2414  *
2415  * If an error other than -EAGAIN occurs, this indicates a migration failure.
2416  * The caller should give up, and propagate the error back up the call stack.
2417  *
2418  * If everything is OK and all folios in the range are allowed to be pinned,
2419  * then this routine leaves all folios pinned and returns zero for success.
2420  */
2421 static long check_and_migrate_movable_folios(unsigned long nr_folios,
2422                                              struct folio **folios)
2423 {
2424         unsigned long collected;
2425         LIST_HEAD(movable_folio_list);
2426 
2427         collected = collect_longterm_unpinnable_folios(&movable_folio_list,
2428                                                        nr_folios, folios);
2429         if (!collected)
2430                 return 0;
2431 
2432         return migrate_longterm_unpinnable_folios(&movable_folio_list,
2433                                                   nr_folios, folios);
2434 }
2435 
2436 /*
2437  * This routine just converts all the pages in the @pages array to folios and
2438  * calls check_and_migrate_movable_folios() to do the heavy lifting.
2439  *
2440  * Please see the check_and_migrate_movable_folios() documentation for details.
2441  */
2442 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2443                                             struct page **pages)
2444 {
2445         struct folio **folios;
2446         long i, ret;
2447 
2448         folios = kmalloc_array(nr_pages, sizeof(*folios), GFP_KERNEL);
2449         if (!folios)
2450                 return -ENOMEM;
2451 
2452         for (i = 0; i < nr_pages; i++)
2453                 folios[i] = page_folio(pages[i]);
2454 
2455         ret = check_and_migrate_movable_folios(nr_pages, folios);
2456 
2457         kfree(folios);
2458         return ret;
2459 }
2460 #else
2461 static long check_and_migrate_movable_pages(unsigned long nr_pages,
2462                                             struct page **pages)
2463 {
2464         return 0;
2465 }
2466 
2467 static long check_and_migrate_movable_folios(unsigned long nr_folios,
2468                                              struct folio **folios)
2469 {
2470         return 0;
2471 }
2472 #endif /* CONFIG_MIGRATION */
2473 
2474 /*
2475  * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
2476  * allows us to process the FOLL_LONGTERM flag.
2477  */
2478 static long __gup_longterm_locked(struct mm_struct *mm,
2479                                   unsigned long start,
2480                                   unsigned long nr_pages,
2481                                   struct page **pages,
2482                                   int *locked,
2483                                   unsigned int gup_flags)
2484 {
2485         unsigned int flags;
2486         long rc, nr_pinned_pages;
2487 
2488         if (!(gup_flags & FOLL_LONGTERM))
2489                 return __get_user_pages_locked(mm, start, nr_pages, pages,
2490                                                locked, gup_flags);
2491 
2492         flags = memalloc_pin_save();
2493         do {
2494                 nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
2495                                                           pages, locked,
2496                                                           gup_flags);
2497                 if (nr_pinned_pages <= 0) {
2498                         rc = nr_pinned_pages;
2499                         break;
2500                 }
2501 
2502                 /* FOLL_LONGTERM implies FOLL_PIN */
2503                 rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
2504         } while (rc == -EAGAIN);
2505         memalloc_pin_restore(flags);
2506         return rc ? rc : nr_pinned_pages;
2507 }
2508 
2509 /*
2510  * Check that the given flags are valid for the exported gup/pup interface, and
2511  * update them with the required flags that the caller must have set.
2512  */
2513 static bool is_valid_gup_args(struct page **pages, int *locked,
2514                               unsigned int *gup_flags_p, unsigned int to_set)
2515 {
2516         unsigned int gup_flags = *gup_flags_p;
2517 
2518         /*
2519          * These flags not allowed to be specified externally to the gup
2520          * interfaces:
2521          * - FOLL_TOUCH/FOLL_PIN/FOLL_TRIED/FOLL_FAST_ONLY are internal only
2522          * - FOLL_REMOTE is internal only and used on follow_page()
2523          * - FOLL_UNLOCKABLE is internal only and used if locked is !NULL
2524          */
2525         if (WARN_ON_ONCE(gup_flags & INTERNAL_GUP_FLAGS))
2526                 return false;
2527 
2528         gup_flags |= to_set;
2529         if (locked) {
2530                 /* At the external interface locked must be set */
2531                 if (WARN_ON_ONCE(*locked != 1))
2532                         return false;
2533 
2534                 gup_flags |= FOLL_UNLOCKABLE;
2535         }
2536 
2537         /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2538         if (WARN_ON_ONCE((gup_flags & (FOLL_PIN | FOLL_GET)) ==
2539                          (FOLL_PIN | FOLL_GET)))
2540                 return false;
2541 
2542         /* LONGTERM can only be specified when pinning */
2543         if (WARN_ON_ONCE(!(gup_flags & FOLL_PIN) && (gup_flags & FOLL_LONGTERM)))
2544                 return false;
2545 
2546         /* Pages input must be given if using GET/PIN */
2547         if (WARN_ON_ONCE((gup_flags & (FOLL_GET | FOLL_PIN)) && !pages))
2548                 return false;
2549 
2550         /* We want to allow the pgmap to be hot-unplugged at all times */
2551         if (WARN_ON_ONCE((gup_flags & FOLL_LONGTERM) &&
2552                          (gup_flags & FOLL_PCI_P2PDMA)))
2553                 return false;
2554 
2555         *gup_flags_p = gup_flags;
2556         return true;
2557 }
2558 
2559 #ifdef CONFIG_MMU
2560 /**
2561  * get_user_pages_remote() - pin user pages in memory
2562  * @mm:         mm_struct of target mm
2563  * @start:      starting user address
2564  * @nr_pages:   number of pages from start to pin
2565  * @gup_flags:  flags modifying lookup behaviour
2566  * @pages:      array that receives pointers to the pages pinned.
2567  *              Should be at least nr_pages long. Or NULL, if caller
2568  *              only intends to ensure the pages are faulted in.
2569  * @locked:     pointer to lock flag indicating whether lock is held and
2570  *              subsequently whether VM_FAULT_RETRY functionality can be
2571  *              utilised. Lock must initially be held.
2572  *
2573  * Returns either number of pages pinned (which may be less than the
2574  * number requested), or an error. Details about the return value:
2575  *
2576  * -- If nr_pages is 0, returns 0.
2577  * -- If nr_pages is >0, but no pages were pinned, returns -errno.
2578  * -- If nr_pages is >0, and some pages were pinned, returns the number of
2579  *    pages pinned. Again, this may be less than nr_pages.
2580  *
2581  * The caller is responsible for releasing returned @pages, via put_page().
2582  *
2583  * Must be called with mmap_lock held for read or write.
2584  *
2585  * get_user_pages_remote walks a process's page tables and takes a reference
2586  * to each struct page that each user address corresponds to at a given
2587  * instant. That is, it takes the page that would be accessed if a user
2588  * thread accesses the given user virtual address at that instant.
2589  *
2590  * This does not guarantee that the page exists in the user mappings when
2591  * get_user_pages_remote returns, and there may even be a completely different
2592  * page there in some cases (eg. if mmapped pagecache has been invalidated
2593  * and subsequently re-faulted). However it does guarantee that the page
2594  * won't be freed completely. And mostly callers simply care that the page
2595  * contains data that was valid *at some point in time*. Typically, an IO
2596  * or similar operation cannot guarantee anything stronger anyway because
2597  * locks can't be held over the syscall boundary.
2598  *
2599  * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2600  * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
2601  * be called after the page is finished with, and before put_page is called.
2602  *
2603  * get_user_pages_remote is typically used for fewer-copy IO operations,
2604  * to get a handle on the memory by some means other than accesses
2605  * via the user virtual addresses. The pages may be submitted for
2606  * DMA to devices or accessed via their kernel linear mapping (via the
2607  * kmap APIs). Care should be taken to use the correct cache flushing APIs.
2608  *
2609  * See also get_user_pages_fast, for performance critical applications.
2610  *
2611  * get_user_pages_remote should be phased out in favor of
2612  * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
2613  * should use get_user_pages_remote because it cannot pass
2614  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
2615  */
2616 long get_user_pages_remote(struct mm_struct *mm,
2617                 unsigned long start, unsigned long nr_pages,
2618                 unsigned int gup_flags, struct page **pages,
2619                 int *locked)
2620 {
2621         int local_locked = 1;
2622 
2623         if (!is_valid_gup_args(pages, locked, &gup_flags,
2624                                FOLL_TOUCH | FOLL_REMOTE))
2625                 return -EINVAL;
2626 
2627         return __get_user_pages_locked(mm, start, nr_pages, pages,
2628                                        locked ? locked : &local_locked,
2629                                        gup_flags);
2630 }
2631 EXPORT_SYMBOL(get_user_pages_remote);
2632 
2633 #else /* CONFIG_MMU */
2634 long get_user_pages_remote(struct mm_struct *mm,
2635                            unsigned long start, unsigned long nr_pages,
2636                            unsigned int gup_flags, struct page **pages,
2637                            int *locked)
2638 {
2639         return 0;
2640 }
2641 #endif /* !CONFIG_MMU */
2642 
2643 /**
2644  * get_user_pages() - pin user pages in memory
2645  * @start:      starting user address
2646  * @nr_pages:   number of pages from start to pin
2647  * @gup_flags:  flags modifying lookup behaviour
2648  * @pages:      array that receives pointers to the pages pinned.
2649  *              Should be at least nr_pages long. Or NULL, if caller
2650  *              only intends to ensure the pages are faulted in.
2651  *
2652  * This is the same as get_user_pages_remote(), just with a less-flexible
2653  * calling convention where we assume that the mm being operated on belongs to
2654  * the current task, and doesn't allow passing of a locked parameter.  We also
2655  * obviously don't pass FOLL_REMOTE in here.
2656  */
2657 long get_user_pages(unsigned long start, unsigned long nr_pages,
2658                     unsigned int gup_flags, struct page **pages)
2659 {
2660         int locked = 1;
2661 
2662         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH))
2663                 return -EINVAL;
2664 
2665         return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2666                                        &locked, gup_flags);
2667 }
2668 EXPORT_SYMBOL(get_user_pages);
2669 
2670 /*
2671  * get_user_pages_unlocked() is suitable to replace the form:
2672  *
2673  *      mmap_read_lock(mm);
2674  *      get_user_pages(mm, ..., pages, NULL);
2675  *      mmap_read_unlock(mm);
2676  *
2677  *  with:
2678  *
2679  *      get_user_pages_unlocked(mm, ..., pages);
2680  *
2681  * It is functionally equivalent to get_user_pages_fast so
2682  * get_user_pages_fast should be used instead if specific gup_flags
2683  * (e.g. FOLL_FORCE) are not required.
2684  */
2685 long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2686                              struct page **pages, unsigned int gup_flags)
2687 {
2688         int locked = 0;
2689 
2690         if (!is_valid_gup_args(pages, NULL, &gup_flags,
2691                                FOLL_TOUCH | FOLL_UNLOCKABLE))
2692                 return -EINVAL;
2693 
2694         return __get_user_pages_locked(current->mm, start, nr_pages, pages,
2695                                        &locked, gup_flags);
2696 }
2697 EXPORT_SYMBOL(get_user_pages_unlocked);
2698 
2699 /*
2700  * GUP-fast
2701  *
2702  * get_user_pages_fast attempts to pin user pages by walking the page
2703  * tables directly and avoids taking locks. Thus the walker needs to be
2704  * protected from page table pages being freed from under it, and should
2705  * block any THP splits.
2706  *
2707  * One way to achieve this is to have the walker disable interrupts, and
2708  * rely on IPIs from the TLB flushing code blocking before the page table
2709  * pages are freed. This is unsuitable for architectures that do not need
2710  * to broadcast an IPI when invalidating TLBs.
2711  *
2712  * Another way to achieve this is to batch up page table containing pages
2713  * belonging to more than one mm_user, then rcu_sched a callback to free those
2714  * pages. Disabling interrupts will allow the gup_fast() walker to both block
2715  * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2716  * (which is a relatively rare event). The code below adopts this strategy.
2717  *
2718  * Before activating this code, please be aware that the following assumptions
2719  * are currently made:
2720  *
2721  *  *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2722  *  free pages containing page tables or TLB flushing requires IPI broadcast.
2723  *
2724  *  *) ptes can be read atomically by the architecture.
2725  *
2726  *  *) access_ok is sufficient to validate userspace address ranges.
2727  *
2728  * The last two assumptions can be relaxed by the addition of helper functions.
2729  *
2730  * This code is based heavily on the PowerPC implementation by Nick Piggin.
2731  */
2732 #ifdef CONFIG_HAVE_GUP_FAST
2733 /*
2734  * Used in the GUP-fast path to determine whether GUP is permitted to work on
2735  * a specific folio.
2736  *
2737  * This call assumes the caller has pinned the folio, that the lowest page table
2738  * level still points to this folio, and that interrupts have been disabled.
2739  *
2740  * GUP-fast must reject all secretmem folios.
2741  *
2742  * Writing to pinned file-backed dirty tracked folios is inherently problematic
2743  * (see comment describing the writable_file_mapping_allowed() function). We
2744  * therefore try to avoid the most egregious case of a long-term mapping doing
2745  * so.
2746  *
2747  * This function cannot be as thorough as that one as the VMA is not available
2748  * in the fast path, so instead we whitelist known good cases and if in doubt,
2749  * fall back to the slow path.
2750  */
2751 static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
2752 {
2753         bool reject_file_backed = false;
2754         struct address_space *mapping;
2755         bool check_secretmem = false;
2756         unsigned long mapping_flags;
2757 
2758         /*
2759          * If we aren't pinning then no problematic write can occur. A long term
2760          * pin is the most egregious case so this is the one we disallow.
2761          */
2762         if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
2763             (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
2764                 reject_file_backed = true;
2765 
2766         /* We hold a folio reference, so we can safely access folio fields. */
2767 
2768         /* secretmem folios are always order-0 folios. */
2769         if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio))
2770                 check_secretmem = true;
2771 
2772         if (!reject_file_backed && !check_secretmem)
2773                 return true;
2774 
2775         if (WARN_ON_ONCE(folio_test_slab(folio)))
2776                 return false;
2777 
2778         /* hugetlb neither requires dirty-tracking nor can be secretmem. */
2779         if (folio_test_hugetlb(folio))
2780                 return true;
2781 
2782         /*
2783          * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods
2784          * cannot proceed, which means no actions performed under RCU can
2785          * proceed either.
2786          *
2787          * inodes and thus their mappings are freed under RCU, which means the
2788          * mapping cannot be freed beneath us and thus we can safely dereference
2789          * it.
2790          */
2791         lockdep_assert_irqs_disabled();
2792 
2793         /*
2794          * However, there may be operations which _alter_ the mapping, so ensure
2795          * we read it once and only once.
2796          */
2797         mapping = READ_ONCE(folio->mapping);
2798 
2799         /*
2800          * The mapping may have been truncated, in any case we cannot determine
2801          * if this mapping is safe - fall back to slow path to determine how to
2802          * proceed.
2803          */
2804         if (!mapping)
2805                 return false;
2806 
2807         /* Anonymous folios pose no problem. */
2808         mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS;
2809         if (mapping_flags)
2810                 return mapping_flags & PAGE_MAPPING_ANON;
2811 
2812         /*
2813          * At this point, we know the mapping is non-null and points to an
2814          * address_space object.
2815          */
2816         if (check_secretmem && secretmem_mapping(mapping))
2817                 return false;
2818         /* The only remaining allowed file system is shmem. */
2819         return !reject_file_backed || shmem_mapping(mapping);
2820 }
2821 
2822 static void __maybe_unused gup_fast_undo_dev_pagemap(int *nr, int nr_start,
2823                 unsigned int flags, struct page **pages)
2824 {
2825         while ((*nr) - nr_start) {
2826                 struct folio *folio = page_folio(pages[--(*nr)]);
2827 
2828                 folio_clear_referenced(folio);
2829                 gup_put_folio(folio, 1, flags);
2830         }
2831 }
2832 
2833 #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2834 /*
2835  * GUP-fast relies on pte change detection to avoid concurrent pgtable
2836  * operations.
2837  *
2838  * To pin the page, GUP-fast needs to do below in order:
2839  * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2840  *
2841  * For the rest of pgtable operations where pgtable updates can be racy
2842  * with GUP-fast, we need to do (1) clear pte, then (2) check whether page
2843  * is pinned.
2844  *
2845  * Above will work for all pte-level operations, including THP split.
2846  *
2847  * For THP collapse, it's a bit more complicated because GUP-fast may be
2848  * walking a pgtable page that is being freed (pte is still valid but pmd
2849  * can be cleared already).  To avoid race in such condition, we need to
2850  * also check pmd here to make sure pmd doesn't change (corresponds to
2851  * pmdp_collapse_flush() in the THP collapse code path).
2852  */
2853 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2854                 unsigned long end, unsigned int flags, struct page **pages,
2855                 int *nr)
2856 {
2857         struct dev_pagemap *pgmap = NULL;
2858         int nr_start = *nr, ret = 0;
2859         pte_t *ptep, *ptem;
2860 
2861         ptem = ptep = pte_offset_map(&pmd, addr);
2862         if (!ptep)
2863                 return 0;
2864         do {
2865                 pte_t pte = ptep_get_lockless(ptep);
2866                 struct page *page;
2867                 struct folio *folio;
2868 
2869                 /*
2870                  * Always fallback to ordinary GUP on PROT_NONE-mapped pages:
2871                  * pte_access_permitted() better should reject these pages
2872                  * either way: otherwise, GUP-fast might succeed in
2873                  * cases where ordinary GUP would fail due to VMA access
2874                  * permissions.
2875                  */
2876                 if (pte_protnone(pte))
2877                         goto pte_unmap;
2878 
2879                 if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2880                         goto pte_unmap;
2881 
2882                 if (pte_devmap(pte)) {
2883                         if (unlikely(flags & FOLL_LONGTERM))
2884                                 goto pte_unmap;
2885 
2886                         pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2887                         if (unlikely(!pgmap)) {
2888                                 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
2889                                 goto pte_unmap;
2890                         }
2891                 } else if (pte_special(pte))
2892                         goto pte_unmap;
2893 
2894                 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2895                 page = pte_page(pte);
2896 
2897                 folio = try_grab_folio_fast(page, 1, flags);
2898                 if (!folio)
2899                         goto pte_unmap;
2900 
2901                 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2902                     unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
2903                         gup_put_folio(folio, 1, flags);
2904                         goto pte_unmap;
2905                 }
2906 
2907                 if (!gup_fast_folio_allowed(folio, flags)) {
2908                         gup_put_folio(folio, 1, flags);
2909                         goto pte_unmap;
2910                 }
2911 
2912                 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2913                         gup_put_folio(folio, 1, flags);
2914                         goto pte_unmap;
2915                 }
2916 
2917                 /*
2918                  * We need to make the page accessible if and only if we are
2919                  * going to access its content (the FOLL_PIN case).  Please
2920                  * see Documentation/core-api/pin_user_pages.rst for
2921                  * details.
2922                  */
2923                 if (flags & FOLL_PIN) {
2924                         ret = arch_make_page_accessible(page);
2925                         if (ret) {
2926                                 gup_put_folio(folio, 1, flags);
2927                                 goto pte_unmap;
2928                         }
2929                 }
2930                 folio_set_referenced(folio);
2931                 pages[*nr] = page;
2932                 (*nr)++;
2933         } while (ptep++, addr += PAGE_SIZE, addr != end);
2934 
2935         ret = 1;
2936 
2937 pte_unmap:
2938         if (pgmap)
2939                 put_dev_pagemap(pgmap);
2940         pte_unmap(ptem);
2941         return ret;
2942 }
2943 #else
2944 
2945 /*
2946  * If we can't determine whether or not a pte is special, then fail immediately
2947  * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2948  * to be special.
2949  *
2950  * For a futex to be placed on a THP tail page, get_futex_key requires a
2951  * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2952  * useful to have gup_fast_pmd_leaf even if we can't operate on ptes.
2953  */
2954 static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2955                 unsigned long end, unsigned int flags, struct page **pages,
2956                 int *nr)
2957 {
2958         return 0;
2959 }
2960 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2961 
2962 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
2963 static int gup_fast_devmap_leaf(unsigned long pfn, unsigned long addr,
2964         unsigned long end, unsigned int flags, struct page **pages, int *nr)
2965 {
2966         int nr_start = *nr;
2967         struct dev_pagemap *pgmap = NULL;
2968 
2969         do {
2970                 struct folio *folio;
2971                 struct page *page = pfn_to_page(pfn);
2972 
2973                 pgmap = get_dev_pagemap(pfn, pgmap);
2974                 if (unlikely(!pgmap)) {
2975                         gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
2976                         break;
2977                 }
2978 
2979                 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
2980                         gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
2981                         break;
2982                 }
2983 
2984                 folio = try_grab_folio_fast(page, 1, flags);
2985                 if (!folio) {
2986                         gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
2987                         break;
2988                 }
2989                 folio_set_referenced(folio);
2990                 pages[*nr] = page;
2991                 (*nr)++;
2992                 pfn++;
2993         } while (addr += PAGE_SIZE, addr != end);
2994 
2995         put_dev_pagemap(pgmap);
2996         return addr == end;
2997 }
2998 
2999 static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3000                 unsigned long end, unsigned int flags, struct page **pages,
3001                 int *nr)
3002 {
3003         unsigned long fault_pfn;
3004         int nr_start = *nr;
3005 
3006         fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
3007         if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3008                 return 0;
3009 
3010         if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3011                 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3012                 return 0;
3013         }
3014         return 1;
3015 }
3016 
3017 static int gup_fast_devmap_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
3018                 unsigned long end, unsigned int flags, struct page **pages,
3019                 int *nr)
3020 {
3021         unsigned long fault_pfn;
3022         int nr_start = *nr;
3023 
3024         fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
3025         if (!gup_fast_devmap_leaf(fault_pfn, addr, end, flags, pages, nr))
3026                 return 0;
3027 
3028         if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3029                 gup_fast_undo_dev_pagemap(nr, nr_start, flags, pages);
3030                 return 0;
3031         }
3032         return 1;
3033 }
3034 #else
3035 static int gup_fast_devmap_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3036                 unsigned long end, unsigned int flags, struct page **pages,
3037                 int *nr)
3038 {
3039         BUILD_BUG();
3040         return 0;
3041 }
3042 
3043 static int gup_fast_devmap_pud_leaf(pud_t pud, pud_t *pudp, unsigned long addr,
3044                 unsigned long end, unsigned int flags, struct page **pages,
3045                 int *nr)
3046 {
3047         BUILD_BUG();
3048         return 0;
3049 }
3050 #endif
3051 
3052 static int gup_fast_pmd_leaf(pmd_t orig, pmd_t *pmdp, unsigned long addr,
3053                 unsigned long end, unsigned int flags, struct page **pages,
3054                 int *nr)
3055 {
3056         struct page *page;
3057         struct folio *folio;
3058         int refs;
3059 
3060         if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
3061                 return 0;
3062 
3063         if (pmd_devmap(orig)) {
3064                 if (unlikely(flags & FOLL_LONGTERM))
3065                         return 0;
3066                 return gup_fast_devmap_pmd_leaf(orig, pmdp, addr, end, flags,
3067                                                 pages, nr);
3068         }
3069 
3070         page = pmd_page(orig);
3071         refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
3072 
3073         folio = try_grab_folio_fast(page, refs, flags);
3074         if (!folio)
3075                 return 0;
3076 
3077         if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
3078                 gup_put_folio(folio, refs, flags);
3079                 return 0;
3080         }
3081 
3082         if (!gup_fast_folio_allowed(folio, flags)) {
3083                 gup_put_folio(folio, refs, flags);
3084                 return 0;
3085         }
3086         if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3087                 gup_put_folio(folio, refs, flags);
3088                 return 0;
3089         }
3090 
3091         *nr += refs;
3092         folio_set_referenced(folio);
3093         return 1;
3094 }
3095 
3096 static int gup_fast_pud_leaf(pud_t orig, pud_t *pudp, unsigned long addr,
3097                 unsigned long end, unsigned int flags, struct page **pages,
3098                 int *nr)
3099 {
3100         struct page *page;
3101         struct folio *folio;
3102         int refs;
3103 
3104         if (!pud_access_permitted(orig, flags & FOLL_WRITE))
3105                 return 0;
3106 
3107         if (pud_devmap(orig)) {
3108                 if (unlikely(flags & FOLL_LONGTERM))
3109                         return 0;
3110                 return gup_fast_devmap_pud_leaf(orig, pudp, addr, end, flags,
3111                                                 pages, nr);
3112         }
3113 
3114         page = pud_page(orig);
3115         refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
3116 
3117         folio = try_grab_folio_fast(page, refs, flags);
3118         if (!folio)
3119                 return 0;
3120 
3121         if (unlikely(pud_val(orig) != pud_val(*pudp))) {
3122                 gup_put_folio(folio, refs, flags);
3123                 return 0;
3124         }
3125 
3126         if (!gup_fast_folio_allowed(folio, flags)) {
3127                 gup_put_folio(folio, refs, flags);
3128                 return 0;
3129         }
3130 
3131         if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3132                 gup_put_folio(folio, refs, flags);
3133                 return 0;
3134         }
3135 
3136         *nr += refs;
3137         folio_set_referenced(folio);
3138         return 1;
3139 }
3140 
3141 static int gup_fast_pgd_leaf(pgd_t orig, pgd_t *pgdp, unsigned long addr,
3142                 unsigned long end, unsigned int flags, struct page **pages,
3143                 int *nr)
3144 {
3145         int refs;
3146         struct page *page;
3147         struct folio *folio;
3148 
3149         if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
3150                 return 0;
3151 
3152         BUILD_BUG_ON(pgd_devmap(orig));
3153 
3154         page = pgd_page(orig);
3155         refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
3156 
3157         folio = try_grab_folio_fast(page, refs, flags);
3158         if (!folio)
3159                 return 0;
3160 
3161         if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
3162                 gup_put_folio(folio, refs, flags);
3163                 return 0;
3164         }
3165 
3166         if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
3167                 gup_put_folio(folio, refs, flags);
3168                 return 0;
3169         }
3170 
3171         if (!gup_fast_folio_allowed(folio, flags)) {
3172                 gup_put_folio(folio, refs, flags);
3173                 return 0;
3174         }
3175 
3176         *nr += refs;
3177         folio_set_referenced(folio);
3178         return 1;
3179 }
3180 
3181 static int gup_fast_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
3182                 unsigned long end, unsigned int flags, struct page **pages,
3183                 int *nr)
3184 {
3185         unsigned long next;
3186         pmd_t *pmdp;
3187 
3188         pmdp = pmd_offset_lockless(pudp, pud, addr);
3189         do {
3190                 pmd_t pmd = pmdp_get_lockless(pmdp);
3191 
3192                 next = pmd_addr_end(addr, end);
3193                 if (!pmd_present(pmd))
3194                         return 0;
3195 
3196                 if (unlikely(pmd_leaf(pmd))) {
3197                         /* See gup_fast_pte_range() */
3198                         if (pmd_protnone(pmd))
3199                                 return 0;
3200 
3201                         if (!gup_fast_pmd_leaf(pmd, pmdp, addr, next, flags,
3202                                 pages, nr))
3203                                 return 0;
3204 
3205                 } else if (!gup_fast_pte_range(pmd, pmdp, addr, next, flags,
3206                                                pages, nr))
3207                         return 0;
3208         } while (pmdp++, addr = next, addr != end);
3209 
3210         return 1;
3211 }
3212 
3213 static int gup_fast_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
3214                 unsigned long end, unsigned int flags, struct page **pages,
3215                 int *nr)
3216 {
3217         unsigned long next;
3218         pud_t *pudp;
3219 
3220         pudp = pud_offset_lockless(p4dp, p4d, addr);
3221         do {
3222                 pud_t pud = READ_ONCE(*pudp);
3223 
3224                 next = pud_addr_end(addr, end);
3225                 if (unlikely(!pud_present(pud)))
3226                         return 0;
3227                 if (unlikely(pud_leaf(pud))) {
3228                         if (!gup_fast_pud_leaf(pud, pudp, addr, next, flags,
3229                                                pages, nr))
3230                                 return 0;
3231                 } else if (!gup_fast_pmd_range(pudp, pud, addr, next, flags,
3232                                                pages, nr))
3233                         return 0;
3234         } while (pudp++, addr = next, addr != end);
3235 
3236         return 1;
3237 }
3238 
3239 static int gup_fast_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
3240                 unsigned long end, unsigned int flags, struct page **pages,
3241                 int *nr)
3242 {
3243         unsigned long next;
3244         p4d_t *p4dp;
3245 
3246         p4dp = p4d_offset_lockless(pgdp, pgd, addr);
3247         do {
3248                 p4d_t p4d = READ_ONCE(*p4dp);
3249 
3250                 next = p4d_addr_end(addr, end);
3251                 if (!p4d_present(p4d))
3252                         return 0;
3253                 BUILD_BUG_ON(p4d_leaf(p4d));
3254                 if (!gup_fast_pud_range(p4dp, p4d, addr, next, flags,
3255                                         pages, nr))
3256                         return 0;
3257         } while (p4dp++, addr = next, addr != end);
3258 
3259         return 1;
3260 }
3261 
3262 static void gup_fast_pgd_range(unsigned long addr, unsigned long end,
3263                 unsigned int flags, struct page **pages, int *nr)
3264 {
3265         unsigned long next;
3266         pgd_t *pgdp;
3267 
3268         pgdp = pgd_offset(current->mm, addr);
3269         do {
3270                 pgd_t pgd = READ_ONCE(*pgdp);
3271 
3272                 next = pgd_addr_end(addr, end);
3273                 if (pgd_none(pgd))
3274                         return;
3275                 if (unlikely(pgd_leaf(pgd))) {
3276                         if (!gup_fast_pgd_leaf(pgd, pgdp, addr, next, flags,
3277                                                pages, nr))
3278                                 return;
3279                 } else if (!gup_fast_p4d_range(pgdp, pgd, addr, next, flags,
3280                                                pages, nr))
3281                         return;
3282         } while (pgdp++, addr = next, addr != end);
3283 }
3284 #else
3285 static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end,
3286                 unsigned int flags, struct page **pages, int *nr)
3287 {
3288 }
3289 #endif /* CONFIG_HAVE_GUP_FAST */
3290 
3291 #ifndef gup_fast_permitted
3292 /*
3293  * Check if it's allowed to use get_user_pages_fast_only() for the range, or
3294  * we need to fall back to the slow version:
3295  */
3296 static bool gup_fast_permitted(unsigned long start, unsigned long end)
3297 {
3298         return true;
3299 }
3300 #endif
3301 
3302 static unsigned long gup_fast(unsigned long start, unsigned long end,
3303                 unsigned int gup_flags, struct page **pages)
3304 {
3305         unsigned long flags;
3306         int nr_pinned = 0;
3307         unsigned seq;
3308 
3309         if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) ||
3310             !gup_fast_permitted(start, end))
3311                 return 0;
3312 
3313         if (gup_flags & FOLL_PIN) {
3314                 seq = raw_read_seqcount(&current->mm->write_protect_seq);
3315                 if (seq & 1)
3316                         return 0;
3317         }
3318 
3319         /*
3320          * Disable interrupts. The nested form is used, in order to allow full,
3321          * general purpose use of this routine.
3322          *
3323          * With interrupts disabled, we block page table pages from being freed
3324          * from under us. See struct mmu_table_batch comments in
3325          * include/asm-generic/tlb.h for more details.
3326          *
3327          * We do not adopt an rcu_read_lock() here as we also want to block IPIs
3328          * that come from THPs splitting.
3329          */
3330         local_irq_save(flags);
3331         gup_fast_pgd_range(start, end, gup_flags, pages, &nr_pinned);
3332         local_irq_restore(flags);
3333 
3334         /*
3335          * When pinning pages for DMA there could be a concurrent write protect
3336          * from fork() via copy_page_range(), in this case always fail GUP-fast.
3337          */
3338         if (gup_flags & FOLL_PIN) {
3339                 if (read_seqcount_retry(&current->mm->write_protect_seq, seq)) {
3340                         gup_fast_unpin_user_pages(pages, nr_pinned);
3341                         return 0;
3342                 } else {
3343                         sanity_check_pinned_pages(pages, nr_pinned);
3344                 }
3345         }
3346         return nr_pinned;
3347 }
3348 
3349 static int gup_fast_fallback(unsigned long start, unsigned long nr_pages,
3350                 unsigned int gup_flags, struct page **pages)
3351 {
3352         unsigned long len, end;
3353         unsigned long nr_pinned;
3354         int locked = 0;
3355         int ret;
3356 
3357         if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
3358                                        FOLL_FORCE | FOLL_PIN | FOLL_GET |
3359                                        FOLL_FAST_ONLY | FOLL_NOFAULT |
3360                                        FOLL_PCI_P2PDMA | FOLL_HONOR_NUMA_FAULT)))
3361                 return -EINVAL;
3362 
3363         if (gup_flags & FOLL_PIN)
3364                 mm_set_has_pinned_flag(&current->mm->flags);
3365 
3366         if (!(gup_flags & FOLL_FAST_ONLY))
3367                 might_lock_read(&current->mm->mmap_lock);
3368 
3369         start = untagged_addr(start) & PAGE_MASK;
3370         len = nr_pages << PAGE_SHIFT;
3371         if (check_add_overflow(start, len, &end))
3372                 return -EOVERFLOW;
3373         if (end > TASK_SIZE_MAX)
3374                 return -EFAULT;
3375         if (unlikely(!access_ok((void __user *)start, len)))
3376                 return -EFAULT;
3377 
3378         nr_pinned = gup_fast(start, end, gup_flags, pages);
3379         if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
3380                 return nr_pinned;
3381 
3382         /* Slow path: try to get the remaining pages with get_user_pages */
3383         start += nr_pinned << PAGE_SHIFT;
3384         pages += nr_pinned;
3385         ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned,
3386                                     pages, &locked,
3387                                     gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE);
3388         if (ret < 0) {
3389                 /*
3390                  * The caller has to unpin the pages we already pinned so
3391                  * returning -errno is not an option
3392                  */
3393                 if (nr_pinned)
3394                         return nr_pinned;
3395                 return ret;
3396         }
3397         return ret + nr_pinned;
3398 }
3399 
3400 /**
3401  * get_user_pages_fast_only() - pin user pages in memory
3402  * @start:      starting user address
3403  * @nr_pages:   number of pages from start to pin
3404  * @gup_flags:  flags modifying pin behaviour
3405  * @pages:      array that receives pointers to the pages pinned.
3406  *              Should be at least nr_pages long.
3407  *
3408  * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
3409  * the regular GUP.
3410  *
3411  * If the architecture does not support this function, simply return with no
3412  * pages pinned.
3413  *
3414  * Careful, careful! COW breaking can go either way, so a non-write
3415  * access can get ambiguous page results. If you call this function without
3416  * 'write' set, you'd better be sure that you're ok with that ambiguity.
3417  */
3418 int get_user_pages_fast_only(unsigned long start, int nr_pages,
3419                              unsigned int gup_flags, struct page **pages)
3420 {
3421         /*
3422          * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
3423          * because gup fast is always a "pin with a +1 page refcount" request.
3424          *
3425          * FOLL_FAST_ONLY is required in order to match the API description of
3426          * this routine: no fall back to regular ("slow") GUP.
3427          */
3428         if (!is_valid_gup_args(pages, NULL, &gup_flags,
3429                                FOLL_GET | FOLL_FAST_ONLY))
3430                 return -EINVAL;
3431 
3432         return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3433 }
3434 EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
3435 
3436 /**
3437  * get_user_pages_fast() - pin user pages in memory
3438  * @start:      starting user address
3439  * @nr_pages:   number of pages from start to pin
3440  * @gup_flags:  flags modifying pin behaviour
3441  * @pages:      array that receives pointers to the pages pinned.
3442  *              Should be at least nr_pages long.
3443  *
3444  * Attempt to pin user pages in memory without taking mm->mmap_lock.
3445  * If not successful, it will fall back to taking the lock and
3446  * calling get_user_pages().
3447  *
3448  * Returns number of pages pinned. This may be fewer than the number requested.
3449  * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
3450  * -errno.
3451  */
3452 int get_user_pages_fast(unsigned long start, int nr_pages,
3453                         unsigned int gup_flags, struct page **pages)
3454 {
3455         /*
3456          * The caller may or may not have explicitly set FOLL_GET; either way is
3457          * OK. However, internally (within mm/gup.c), gup fast variants must set
3458          * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3459          * request.
3460          */
3461         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET))
3462                 return -EINVAL;
3463         return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3464 }
3465 EXPORT_SYMBOL_GPL(get_user_pages_fast);
3466 
3467 /**
3468  * pin_user_pages_fast() - pin user pages in memory without taking locks
3469  *
3470  * @start:      starting user address
3471  * @nr_pages:   number of pages from start to pin
3472  * @gup_flags:  flags modifying pin behaviour
3473  * @pages:      array that receives pointers to the pages pinned.
3474  *              Should be at least nr_pages long.
3475  *
3476  * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
3477  * get_user_pages_fast() for documentation on the function arguments, because
3478  * the arguments here are identical.
3479  *
3480  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3481  * see Documentation/core-api/pin_user_pages.rst for further details.
3482  *
3483  * Note that if a zero_page is amongst the returned pages, it will not have
3484  * pins in it and unpin_user_page() will not remove pins from it.
3485  */
3486 int pin_user_pages_fast(unsigned long start, int nr_pages,
3487                         unsigned int gup_flags, struct page **pages)
3488 {
3489         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3490                 return -EINVAL;
3491         return gup_fast_fallback(start, nr_pages, gup_flags, pages);
3492 }
3493 EXPORT_SYMBOL_GPL(pin_user_pages_fast);
3494 
3495 /**
3496  * pin_user_pages_remote() - pin pages of a remote process
3497  *
3498  * @mm:         mm_struct of target mm
3499  * @start:      starting user address
3500  * @nr_pages:   number of pages from start to pin
3501  * @gup_flags:  flags modifying lookup behaviour
3502  * @pages:      array that receives pointers to the pages pinned.
3503  *              Should be at least nr_pages long.
3504  * @locked:     pointer to lock flag indicating whether lock is held and
3505  *              subsequently whether VM_FAULT_RETRY functionality can be
3506  *              utilised. Lock must initially be held.
3507  *
3508  * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
3509  * get_user_pages_remote() for documentation on the function arguments, because
3510  * the arguments here are identical.
3511  *
3512  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3513  * see Documentation/core-api/pin_user_pages.rst for details.
3514  *
3515  * Note that if a zero_page is amongst the returned pages, it will not have
3516  * pins in it and unpin_user_page*() will not remove pins from it.
3517  */
3518 long pin_user_pages_remote(struct mm_struct *mm,
3519                            unsigned long start, unsigned long nr_pages,
3520                            unsigned int gup_flags, struct page **pages,
3521                            int *locked)
3522 {
3523         int local_locked = 1;
3524 
3525         if (!is_valid_gup_args(pages, locked, &gup_flags,
3526                                FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
3527                 return 0;
3528         return __gup_longterm_locked(mm, start, nr_pages, pages,
3529                                      locked ? locked : &local_locked,
3530                                      gup_flags);
3531 }
3532 EXPORT_SYMBOL(pin_user_pages_remote);
3533 
3534 /**
3535  * pin_user_pages() - pin user pages in memory for use by other devices
3536  *
3537  * @start:      starting user address
3538  * @nr_pages:   number of pages from start to pin
3539  * @gup_flags:  flags modifying lookup behaviour
3540  * @pages:      array that receives pointers to the pages pinned.
3541  *              Should be at least nr_pages long.
3542  *
3543  * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
3544  * FOLL_PIN is set.
3545  *
3546  * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
3547  * see Documentation/core-api/pin_user_pages.rst for details.
3548  *
3549  * Note that if a zero_page is amongst the returned pages, it will not have
3550  * pins in it and unpin_user_page*() will not remove pins from it.
3551  */
3552 long pin_user_pages(unsigned long start, unsigned long nr_pages,
3553                     unsigned int gup_flags, struct page **pages)
3554 {
3555         int locked = 1;
3556 
3557         if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN))
3558                 return 0;
3559         return __gup_longterm_locked(current->mm, start, nr_pages,
3560                                      pages, &locked, gup_flags);
3561 }
3562 EXPORT_SYMBOL(pin_user_pages);
3563 
3564 /*
3565  * pin_user_pages_unlocked() is the FOLL_PIN variant of
3566  * get_user_pages_unlocked(). Behavior is the same, except that this one sets
3567  * FOLL_PIN and rejects FOLL_GET.
3568  *
3569  * Note that if a zero_page is amongst the returned pages, it will not have
3570  * pins in it and unpin_user_page*() will not remove pins from it.
3571  */
3572 long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
3573                              struct page **pages, unsigned int gup_flags)
3574 {
3575         int locked = 0;
3576 
3577         if (!is_valid_gup_args(pages, NULL, &gup_flags,
3578                                FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE))
3579                 return 0;
3580 
3581         return __gup_longterm_locked(current->mm, start, nr_pages, pages,
3582                                      &locked, gup_flags);
3583 }
3584 EXPORT_SYMBOL(pin_user_pages_unlocked);
3585 
3586 /**
3587  * memfd_pin_folios() - pin folios associated with a memfd
3588  * @memfd:      the memfd whose folios are to be pinned
3589  * @start:      the first memfd offset
3590  * @end:        the last memfd offset (inclusive)
3591  * @folios:     array that receives pointers to the folios pinned
3592  * @max_folios: maximum number of entries in @folios
3593  * @offset:     the offset into the first folio
3594  *
3595  * Attempt to pin folios associated with a memfd in the contiguous range
3596  * [start, end]. Given that a memfd is either backed by shmem or hugetlb,
3597  * the folios can either be found in the page cache or need to be allocated
3598  * if necessary. Once the folios are located, they are all pinned via
3599  * FOLL_PIN and @offset is populatedwith the offset into the first folio.
3600  * And, eventually, these pinned folios must be released either using
3601  * unpin_folios() or unpin_folio().
3602  *
3603  * It must be noted that the folios may be pinned for an indefinite amount
3604  * of time. And, in most cases, the duration of time they may stay pinned
3605  * would be controlled by the userspace. This behavior is effectively the
3606  * same as using FOLL_LONGTERM with other GUP APIs.
3607  *
3608  * Returns number of folios pinned, which could be less than @max_folios
3609  * as it depends on the folio sizes that cover the range [start, end].
3610  * If no folios were pinned, it returns -errno.
3611  */
3612 long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
3613                       struct folio **folios, unsigned int max_folios,
3614                       pgoff_t *offset)
3615 {
3616         unsigned int flags, nr_folios, nr_found;
3617         unsigned int i, pgshift = PAGE_SHIFT;
3618         pgoff_t start_idx, end_idx, next_idx;
3619         struct folio *folio = NULL;
3620         struct folio_batch fbatch;
3621         struct hstate *h;
3622         long ret = -EINVAL;
3623 
3624         if (start < 0 || start > end || !max_folios)
3625                 return -EINVAL;
3626 
3627         if (!memfd)
3628                 return -EINVAL;
3629 
3630         if (!shmem_file(memfd) && !is_file_hugepages(memfd))
3631                 return -EINVAL;
3632 
3633         if (end >= i_size_read(file_inode(memfd)))
3634                 return -EINVAL;
3635 
3636         if (is_file_hugepages(memfd)) {
3637                 h = hstate_file(memfd);
3638                 pgshift = huge_page_shift(h);
3639         }
3640 
3641         flags = memalloc_pin_save();
3642         do {
3643                 nr_folios = 0;
3644                 start_idx = start >> pgshift;
3645                 end_idx = end >> pgshift;
3646                 if (is_file_hugepages(memfd)) {
3647                         start_idx <<= huge_page_order(h);
3648                         end_idx <<= huge_page_order(h);
3649                 }
3650 
3651                 folio_batch_init(&fbatch);
3652                 while (start_idx <= end_idx && nr_folios < max_folios) {
3653                         /*
3654                          * In most cases, we should be able to find the folios
3655                          * in the page cache. If we cannot find them for some
3656                          * reason, we try to allocate them and add them to the
3657                          * page cache.
3658                          */
3659                         nr_found = filemap_get_folios_contig(memfd->f_mapping,
3660                                                              &start_idx,
3661                                                              end_idx,
3662                                                              &fbatch);
3663                         if (folio) {
3664                                 folio_put(folio);
3665                                 folio = NULL;
3666                         }
3667 
3668                         next_idx = 0;
3669                         for (i = 0; i < nr_found; i++) {
3670                                 /*
3671                                  * As there can be multiple entries for a
3672                                  * given folio in the batch returned by
3673                                  * filemap_get_folios_contig(), the below
3674                                  * check is to ensure that we pin and return a
3675                                  * unique set of folios between start and end.
3676                                  */
3677                                 if (next_idx &&
3678                                     next_idx != folio_index(fbatch.folios[i]))
3679                                         continue;
3680 
3681                                 folio = page_folio(&fbatch.folios[i]->page);
3682 
3683                                 if (try_grab_folio(folio, 1, FOLL_PIN)) {
3684                                         folio_batch_release(&fbatch);
3685                                         ret = -EINVAL;
3686                                         goto err;
3687                                 }
3688 
3689                                 if (nr_folios == 0)
3690                                         *offset = offset_in_folio(folio, start);
3691 
3692                                 folios[nr_folios] = folio;
3693                                 next_idx = folio_next_index(folio);
3694                                 if (++nr_folios == max_folios)
3695                                         break;
3696                         }
3697 
3698                         folio = NULL;
3699                         folio_batch_release(&fbatch);
3700                         if (!nr_found) {
3701                                 folio = memfd_alloc_folio(memfd, start_idx);
3702                                 if (IS_ERR(folio)) {
3703                                         ret = PTR_ERR(folio);
3704                                         if (ret != -EEXIST)
3705                                                 goto err;
3706                                         folio = NULL;
3707                                 }
3708                         }
3709                 }
3710 
3711                 ret = check_and_migrate_movable_folios(nr_folios, folios);
3712         } while (ret == -EAGAIN);
3713 
3714         memalloc_pin_restore(flags);
3715         return ret ? ret : nr_folios;
3716 err:
3717         memalloc_pin_restore(flags);
3718         unpin_folios(folios, nr_folios);
3719 
3720         return ret;
3721 }
3722 EXPORT_SYMBOL_GPL(memfd_pin_folios);
3723 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php