~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/migrate_device.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Device Memory Migration functionality.
  4  *
  5  * Originally written by Jérôme Glisse.
  6  */
  7 #include <linux/export.h>
  8 #include <linux/memremap.h>
  9 #include <linux/migrate.h>
 10 #include <linux/mm.h>
 11 #include <linux/mm_inline.h>
 12 #include <linux/mmu_notifier.h>
 13 #include <linux/oom.h>
 14 #include <linux/pagewalk.h>
 15 #include <linux/rmap.h>
 16 #include <linux/swapops.h>
 17 #include <asm/tlbflush.h>
 18 #include "internal.h"
 19 
 20 static int migrate_vma_collect_skip(unsigned long start,
 21                                     unsigned long end,
 22                                     struct mm_walk *walk)
 23 {
 24         struct migrate_vma *migrate = walk->private;
 25         unsigned long addr;
 26 
 27         for (addr = start; addr < end; addr += PAGE_SIZE) {
 28                 migrate->dst[migrate->npages] = 0;
 29                 migrate->src[migrate->npages++] = 0;
 30         }
 31 
 32         return 0;
 33 }
 34 
 35 static int migrate_vma_collect_hole(unsigned long start,
 36                                     unsigned long end,
 37                                     __always_unused int depth,
 38                                     struct mm_walk *walk)
 39 {
 40         struct migrate_vma *migrate = walk->private;
 41         unsigned long addr;
 42 
 43         /* Only allow populating anonymous memory. */
 44         if (!vma_is_anonymous(walk->vma))
 45                 return migrate_vma_collect_skip(start, end, walk);
 46 
 47         for (addr = start; addr < end; addr += PAGE_SIZE) {
 48                 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
 49                 migrate->dst[migrate->npages] = 0;
 50                 migrate->npages++;
 51                 migrate->cpages++;
 52         }
 53 
 54         return 0;
 55 }
 56 
 57 static int migrate_vma_collect_pmd(pmd_t *pmdp,
 58                                    unsigned long start,
 59                                    unsigned long end,
 60                                    struct mm_walk *walk)
 61 {
 62         struct migrate_vma *migrate = walk->private;
 63         struct vm_area_struct *vma = walk->vma;
 64         struct mm_struct *mm = vma->vm_mm;
 65         unsigned long addr = start, unmapped = 0;
 66         spinlock_t *ptl;
 67         pte_t *ptep;
 68 
 69 again:
 70         if (pmd_none(*pmdp))
 71                 return migrate_vma_collect_hole(start, end, -1, walk);
 72 
 73         if (pmd_trans_huge(*pmdp)) {
 74                 struct folio *folio;
 75 
 76                 ptl = pmd_lock(mm, pmdp);
 77                 if (unlikely(!pmd_trans_huge(*pmdp))) {
 78                         spin_unlock(ptl);
 79                         goto again;
 80                 }
 81 
 82                 folio = pmd_folio(*pmdp);
 83                 if (is_huge_zero_folio(folio)) {
 84                         spin_unlock(ptl);
 85                         split_huge_pmd(vma, pmdp, addr);
 86                 } else {
 87                         int ret;
 88 
 89                         folio_get(folio);
 90                         spin_unlock(ptl);
 91                         if (unlikely(!folio_trylock(folio)))
 92                                 return migrate_vma_collect_skip(start, end,
 93                                                                 walk);
 94                         ret = split_folio(folio);
 95                         folio_unlock(folio);
 96                         folio_put(folio);
 97                         if (ret)
 98                                 return migrate_vma_collect_skip(start, end,
 99                                                                 walk);
100                 }
101         }
102 
103         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
104         if (!ptep)
105                 goto again;
106         arch_enter_lazy_mmu_mode();
107 
108         for (; addr < end; addr += PAGE_SIZE, ptep++) {
109                 unsigned long mpfn = 0, pfn;
110                 struct folio *folio;
111                 struct page *page;
112                 swp_entry_t entry;
113                 pte_t pte;
114 
115                 pte = ptep_get(ptep);
116 
117                 if (pte_none(pte)) {
118                         if (vma_is_anonymous(vma)) {
119                                 mpfn = MIGRATE_PFN_MIGRATE;
120                                 migrate->cpages++;
121                         }
122                         goto next;
123                 }
124 
125                 if (!pte_present(pte)) {
126                         /*
127                          * Only care about unaddressable device page special
128                          * page table entry. Other special swap entries are not
129                          * migratable, and we ignore regular swapped page.
130                          */
131                         entry = pte_to_swp_entry(pte);
132                         if (!is_device_private_entry(entry))
133                                 goto next;
134 
135                         page = pfn_swap_entry_to_page(entry);
136                         if (!(migrate->flags &
137                                 MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
138                             page->pgmap->owner != migrate->pgmap_owner)
139                                 goto next;
140 
141                         mpfn = migrate_pfn(page_to_pfn(page)) |
142                                         MIGRATE_PFN_MIGRATE;
143                         if (is_writable_device_private_entry(entry))
144                                 mpfn |= MIGRATE_PFN_WRITE;
145                 } else {
146                         pfn = pte_pfn(pte);
147                         if (is_zero_pfn(pfn) &&
148                             (migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) {
149                                 mpfn = MIGRATE_PFN_MIGRATE;
150                                 migrate->cpages++;
151                                 goto next;
152                         }
153                         page = vm_normal_page(migrate->vma, addr, pte);
154                         if (page && !is_zone_device_page(page) &&
155                             !(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
156                                 goto next;
157                         else if (page && is_device_coherent_page(page) &&
158                             (!(migrate->flags & MIGRATE_VMA_SELECT_DEVICE_COHERENT) ||
159                              page->pgmap->owner != migrate->pgmap_owner))
160                                 goto next;
161                         mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
162                         mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
163                 }
164 
165                 /* FIXME support THP */
166                 if (!page || !page->mapping || PageTransCompound(page)) {
167                         mpfn = 0;
168                         goto next;
169                 }
170 
171                 /*
172                  * By getting a reference on the folio we pin it and that blocks
173                  * any kind of migration. Side effect is that it "freezes" the
174                  * pte.
175                  *
176                  * We drop this reference after isolating the folio from the lru
177                  * for non device folio (device folio are not on the lru and thus
178                  * can't be dropped from it).
179                  */
180                 folio = page_folio(page);
181                 folio_get(folio);
182 
183                 /*
184                  * We rely on folio_trylock() to avoid deadlock between
185                  * concurrent migrations where each is waiting on the others
186                  * folio lock. If we can't immediately lock the folio we fail this
187                  * migration as it is only best effort anyway.
188                  *
189                  * If we can lock the folio it's safe to set up a migration entry
190                  * now. In the common case where the folio is mapped once in a
191                  * single process setting up the migration entry now is an
192                  * optimisation to avoid walking the rmap later with
193                  * try_to_migrate().
194                  */
195                 if (folio_trylock(folio)) {
196                         bool anon_exclusive;
197                         pte_t swp_pte;
198 
199                         flush_cache_page(vma, addr, pte_pfn(pte));
200                         anon_exclusive = folio_test_anon(folio) &&
201                                           PageAnonExclusive(page);
202                         if (anon_exclusive) {
203                                 pte = ptep_clear_flush(vma, addr, ptep);
204 
205                                 if (folio_try_share_anon_rmap_pte(folio, page)) {
206                                         set_pte_at(mm, addr, ptep, pte);
207                                         folio_unlock(folio);
208                                         folio_put(folio);
209                                         mpfn = 0;
210                                         goto next;
211                                 }
212                         } else {
213                                 pte = ptep_get_and_clear(mm, addr, ptep);
214                         }
215 
216                         migrate->cpages++;
217 
218                         /* Set the dirty flag on the folio now the pte is gone. */
219                         if (pte_dirty(pte))
220                                 folio_mark_dirty(folio);
221 
222                         /* Setup special migration page table entry */
223                         if (mpfn & MIGRATE_PFN_WRITE)
224                                 entry = make_writable_migration_entry(
225                                                         page_to_pfn(page));
226                         else if (anon_exclusive)
227                                 entry = make_readable_exclusive_migration_entry(
228                                                         page_to_pfn(page));
229                         else
230                                 entry = make_readable_migration_entry(
231                                                         page_to_pfn(page));
232                         if (pte_present(pte)) {
233                                 if (pte_young(pte))
234                                         entry = make_migration_entry_young(entry);
235                                 if (pte_dirty(pte))
236                                         entry = make_migration_entry_dirty(entry);
237                         }
238                         swp_pte = swp_entry_to_pte(entry);
239                         if (pte_present(pte)) {
240                                 if (pte_soft_dirty(pte))
241                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
242                                 if (pte_uffd_wp(pte))
243                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
244                         } else {
245                                 if (pte_swp_soft_dirty(pte))
246                                         swp_pte = pte_swp_mksoft_dirty(swp_pte);
247                                 if (pte_swp_uffd_wp(pte))
248                                         swp_pte = pte_swp_mkuffd_wp(swp_pte);
249                         }
250                         set_pte_at(mm, addr, ptep, swp_pte);
251 
252                         /*
253                          * This is like regular unmap: we remove the rmap and
254                          * drop the folio refcount. The folio won't be freed, as
255                          * we took a reference just above.
256                          */
257                         folio_remove_rmap_pte(folio, page, vma);
258                         folio_put(folio);
259 
260                         if (pte_present(pte))
261                                 unmapped++;
262                 } else {
263                         folio_put(folio);
264                         mpfn = 0;
265                 }
266 
267 next:
268                 migrate->dst[migrate->npages] = 0;
269                 migrate->src[migrate->npages++] = mpfn;
270         }
271 
272         /* Only flush the TLB if we actually modified any entries */
273         if (unmapped)
274                 flush_tlb_range(walk->vma, start, end);
275 
276         arch_leave_lazy_mmu_mode();
277         pte_unmap_unlock(ptep - 1, ptl);
278 
279         return 0;
280 }
281 
282 static const struct mm_walk_ops migrate_vma_walk_ops = {
283         .pmd_entry              = migrate_vma_collect_pmd,
284         .pte_hole               = migrate_vma_collect_hole,
285         .walk_lock              = PGWALK_RDLOCK,
286 };
287 
288 /*
289  * migrate_vma_collect() - collect pages over a range of virtual addresses
290  * @migrate: migrate struct containing all migration information
291  *
292  * This will walk the CPU page table. For each virtual address backed by a
293  * valid page, it updates the src array and takes a reference on the page, in
294  * order to pin the page until we lock it and unmap it.
295  */
296 static void migrate_vma_collect(struct migrate_vma *migrate)
297 {
298         struct mmu_notifier_range range;
299 
300         /*
301          * Note that the pgmap_owner is passed to the mmu notifier callback so
302          * that the registered device driver can skip invalidating device
303          * private page mappings that won't be migrated.
304          */
305         mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
306                 migrate->vma->vm_mm, migrate->start, migrate->end,
307                 migrate->pgmap_owner);
308         mmu_notifier_invalidate_range_start(&range);
309 
310         walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
311                         &migrate_vma_walk_ops, migrate);
312 
313         mmu_notifier_invalidate_range_end(&range);
314         migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
315 }
316 
317 /*
318  * migrate_vma_check_page() - check if page is pinned or not
319  * @page: struct page to check
320  *
321  * Pinned pages cannot be migrated. This is the same test as in
322  * folio_migrate_mapping(), except that here we allow migration of a
323  * ZONE_DEVICE page.
324  */
325 static bool migrate_vma_check_page(struct page *page, struct page *fault_page)
326 {
327         struct folio *folio = page_folio(page);
328 
329         /*
330          * One extra ref because caller holds an extra reference, either from
331          * isolate_lru_page() for a regular page, or migrate_vma_collect() for
332          * a device page.
333          */
334         int extra = 1 + (page == fault_page);
335 
336         /*
337          * FIXME support THP (transparent huge page), it is bit more complex to
338          * check them than regular pages, because they can be mapped with a pmd
339          * or with a pte (split pte mapping).
340          */
341         if (folio_test_large(folio))
342                 return false;
343 
344         /* Page from ZONE_DEVICE have one extra reference */
345         if (folio_is_zone_device(folio))
346                 extra++;
347 
348         /* For file back page */
349         if (folio_mapping(folio))
350                 extra += 1 + folio_has_private(folio);
351 
352         if ((folio_ref_count(folio) - extra) > folio_mapcount(folio))
353                 return false;
354 
355         return true;
356 }
357 
358 /*
359  * Unmaps pages for migration. Returns number of source pfns marked as
360  * migrating.
361  */
362 static unsigned long migrate_device_unmap(unsigned long *src_pfns,
363                                           unsigned long npages,
364                                           struct page *fault_page)
365 {
366         unsigned long i, restore = 0;
367         bool allow_drain = true;
368         unsigned long unmapped = 0;
369 
370         lru_add_drain();
371 
372         for (i = 0; i < npages; i++) {
373                 struct page *page = migrate_pfn_to_page(src_pfns[i]);
374                 struct folio *folio;
375 
376                 if (!page) {
377                         if (src_pfns[i] & MIGRATE_PFN_MIGRATE)
378                                 unmapped++;
379                         continue;
380                 }
381 
382                 /* ZONE_DEVICE pages are not on LRU */
383                 if (!is_zone_device_page(page)) {
384                         if (!PageLRU(page) && allow_drain) {
385                                 /* Drain CPU's lru cache */
386                                 lru_add_drain_all();
387                                 allow_drain = false;
388                         }
389 
390                         if (!isolate_lru_page(page)) {
391                                 src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
392                                 restore++;
393                                 continue;
394                         }
395 
396                         /* Drop the reference we took in collect */
397                         put_page(page);
398                 }
399 
400                 folio = page_folio(page);
401                 if (folio_mapped(folio))
402                         try_to_migrate(folio, 0);
403 
404                 if (page_mapped(page) ||
405                     !migrate_vma_check_page(page, fault_page)) {
406                         if (!is_zone_device_page(page)) {
407                                 get_page(page);
408                                 putback_lru_page(page);
409                         }
410 
411                         src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
412                         restore++;
413                         continue;
414                 }
415 
416                 unmapped++;
417         }
418 
419         for (i = 0; i < npages && restore; i++) {
420                 struct page *page = migrate_pfn_to_page(src_pfns[i]);
421                 struct folio *folio;
422 
423                 if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE))
424                         continue;
425 
426                 folio = page_folio(page);
427                 remove_migration_ptes(folio, folio, false);
428 
429                 src_pfns[i] = 0;
430                 folio_unlock(folio);
431                 folio_put(folio);
432                 restore--;
433         }
434 
435         return unmapped;
436 }
437 
438 /*
439  * migrate_vma_unmap() - replace page mapping with special migration pte entry
440  * @migrate: migrate struct containing all migration information
441  *
442  * Isolate pages from the LRU and replace mappings (CPU page table pte) with a
443  * special migration pte entry and check if it has been pinned. Pinned pages are
444  * restored because we cannot migrate them.
445  *
446  * This is the last step before we call the device driver callback to allocate
447  * destination memory and copy contents of original page over to new page.
448  */
449 static void migrate_vma_unmap(struct migrate_vma *migrate)
450 {
451         migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages,
452                                         migrate->fault_page);
453 }
454 
455 /**
456  * migrate_vma_setup() - prepare to migrate a range of memory
457  * @args: contains the vma, start, and pfns arrays for the migration
458  *
459  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
460  * without an error.
461  *
462  * Prepare to migrate a range of memory virtual address range by collecting all
463  * the pages backing each virtual address in the range, saving them inside the
464  * src array.  Then lock those pages and unmap them. Once the pages are locked
465  * and unmapped, check whether each page is pinned or not.  Pages that aren't
466  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
467  * corresponding src array entry.  Then restores any pages that are pinned, by
468  * remapping and unlocking those pages.
469  *
470  * The caller should then allocate destination memory and copy source memory to
471  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
472  * flag set).  Once these are allocated and copied, the caller must update each
473  * corresponding entry in the dst array with the pfn value of the destination
474  * page and with MIGRATE_PFN_VALID. Destination pages must be locked via
475  * lock_page().
476  *
477  * Note that the caller does not have to migrate all the pages that are marked
478  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
479  * device memory to system memory.  If the caller cannot migrate a device page
480  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
481  * consequences for the userspace process, so it must be avoided if at all
482  * possible.
483  *
484  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
485  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
486  * allowing the caller to allocate device memory for those unbacked virtual
487  * addresses.  For this the caller simply has to allocate device memory and
488  * properly set the destination entry like for regular migration.  Note that
489  * this can still fail, and thus inside the device driver you must check if the
490  * migration was successful for those entries after calling migrate_vma_pages(),
491  * just like for regular migration.
492  *
493  * After that, the callers must call migrate_vma_pages() to go over each entry
494  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
495  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
496  * then migrate_vma_pages() to migrate struct page information from the source
497  * struct page to the destination struct page.  If it fails to migrate the
498  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
499  * src array.
500  *
501  * At this point all successfully migrated pages have an entry in the src
502  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
503  * array entry with MIGRATE_PFN_VALID flag set.
504  *
505  * Once migrate_vma_pages() returns the caller may inspect which pages were
506  * successfully migrated, and which were not.  Successfully migrated pages will
507  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
508  *
509  * It is safe to update device page table after migrate_vma_pages() because
510  * both destination and source page are still locked, and the mmap_lock is held
511  * in read mode (hence no one can unmap the range being migrated).
512  *
513  * Once the caller is done cleaning up things and updating its page table (if it
514  * chose to do so, this is not an obligation) it finally calls
515  * migrate_vma_finalize() to update the CPU page table to point to new pages
516  * for successfully migrated pages or otherwise restore the CPU page table to
517  * point to the original source pages.
518  */
519 int migrate_vma_setup(struct migrate_vma *args)
520 {
521         long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
522 
523         args->start &= PAGE_MASK;
524         args->end &= PAGE_MASK;
525         if (!args->vma || is_vm_hugetlb_page(args->vma) ||
526             (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
527                 return -EINVAL;
528         if (nr_pages <= 0)
529                 return -EINVAL;
530         if (args->start < args->vma->vm_start ||
531             args->start >= args->vma->vm_end)
532                 return -EINVAL;
533         if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
534                 return -EINVAL;
535         if (!args->src || !args->dst)
536                 return -EINVAL;
537         if (args->fault_page && !is_device_private_page(args->fault_page))
538                 return -EINVAL;
539 
540         memset(args->src, 0, sizeof(*args->src) * nr_pages);
541         args->cpages = 0;
542         args->npages = 0;
543 
544         migrate_vma_collect(args);
545 
546         if (args->cpages)
547                 migrate_vma_unmap(args);
548 
549         /*
550          * At this point pages are locked and unmapped, and thus they have
551          * stable content and can safely be copied to destination memory that
552          * is allocated by the drivers.
553          */
554         return 0;
555 
556 }
557 EXPORT_SYMBOL(migrate_vma_setup);
558 
559 /*
560  * This code closely matches the code in:
561  *   __handle_mm_fault()
562  *     handle_pte_fault()
563  *       do_anonymous_page()
564  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
565  * private or coherent page.
566  */
567 static void migrate_vma_insert_page(struct migrate_vma *migrate,
568                                     unsigned long addr,
569                                     struct page *page,
570                                     unsigned long *src)
571 {
572         struct folio *folio = page_folio(page);
573         struct vm_area_struct *vma = migrate->vma;
574         struct mm_struct *mm = vma->vm_mm;
575         bool flush = false;
576         spinlock_t *ptl;
577         pte_t entry;
578         pgd_t *pgdp;
579         p4d_t *p4dp;
580         pud_t *pudp;
581         pmd_t *pmdp;
582         pte_t *ptep;
583         pte_t orig_pte;
584 
585         /* Only allow populating anonymous memory */
586         if (!vma_is_anonymous(vma))
587                 goto abort;
588 
589         pgdp = pgd_offset(mm, addr);
590         p4dp = p4d_alloc(mm, pgdp, addr);
591         if (!p4dp)
592                 goto abort;
593         pudp = pud_alloc(mm, p4dp, addr);
594         if (!pudp)
595                 goto abort;
596         pmdp = pmd_alloc(mm, pudp, addr);
597         if (!pmdp)
598                 goto abort;
599         if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
600                 goto abort;
601         if (pte_alloc(mm, pmdp))
602                 goto abort;
603         if (unlikely(anon_vma_prepare(vma)))
604                 goto abort;
605         if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
606                 goto abort;
607 
608         /*
609          * The memory barrier inside __folio_mark_uptodate makes sure that
610          * preceding stores to the folio contents become visible before
611          * the set_pte_at() write.
612          */
613         __folio_mark_uptodate(folio);
614 
615         if (folio_is_device_private(folio)) {
616                 swp_entry_t swp_entry;
617 
618                 if (vma->vm_flags & VM_WRITE)
619                         swp_entry = make_writable_device_private_entry(
620                                                 page_to_pfn(page));
621                 else
622                         swp_entry = make_readable_device_private_entry(
623                                                 page_to_pfn(page));
624                 entry = swp_entry_to_pte(swp_entry);
625         } else {
626                 if (folio_is_zone_device(folio) &&
627                     !folio_is_device_coherent(folio)) {
628                         pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
629                         goto abort;
630                 }
631                 entry = mk_pte(page, vma->vm_page_prot);
632                 if (vma->vm_flags & VM_WRITE)
633                         entry = pte_mkwrite(pte_mkdirty(entry), vma);
634         }
635 
636         ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
637         if (!ptep)
638                 goto abort;
639         orig_pte = ptep_get(ptep);
640 
641         if (check_stable_address_space(mm))
642                 goto unlock_abort;
643 
644         if (pte_present(orig_pte)) {
645                 unsigned long pfn = pte_pfn(orig_pte);
646 
647                 if (!is_zero_pfn(pfn))
648                         goto unlock_abort;
649                 flush = true;
650         } else if (!pte_none(orig_pte))
651                 goto unlock_abort;
652 
653         /*
654          * Check for userfaultfd but do not deliver the fault. Instead,
655          * just back off.
656          */
657         if (userfaultfd_missing(vma))
658                 goto unlock_abort;
659 
660         inc_mm_counter(mm, MM_ANONPAGES);
661         folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
662         if (!folio_is_zone_device(folio))
663                 folio_add_lru_vma(folio, vma);
664         folio_get(folio);
665 
666         if (flush) {
667                 flush_cache_page(vma, addr, pte_pfn(orig_pte));
668                 ptep_clear_flush(vma, addr, ptep);
669         }
670         set_pte_at(mm, addr, ptep, entry);
671         update_mmu_cache(vma, addr, ptep);
672 
673         pte_unmap_unlock(ptep, ptl);
674         *src = MIGRATE_PFN_MIGRATE;
675         return;
676 
677 unlock_abort:
678         pte_unmap_unlock(ptep, ptl);
679 abort:
680         *src &= ~MIGRATE_PFN_MIGRATE;
681 }
682 
683 static void __migrate_device_pages(unsigned long *src_pfns,
684                                 unsigned long *dst_pfns, unsigned long npages,
685                                 struct migrate_vma *migrate)
686 {
687         struct mmu_notifier_range range;
688         unsigned long i;
689         bool notified = false;
690 
691         for (i = 0; i < npages; i++) {
692                 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
693                 struct page *page = migrate_pfn_to_page(src_pfns[i]);
694                 struct address_space *mapping;
695                 struct folio *newfolio, *folio;
696                 int r, extra_cnt = 0;
697 
698                 if (!newpage) {
699                         src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
700                         continue;
701                 }
702 
703                 if (!page) {
704                         unsigned long addr;
705 
706                         if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE))
707                                 continue;
708 
709                         /*
710                          * The only time there is no vma is when called from
711                          * migrate_device_coherent_page(). However this isn't
712                          * called if the page could not be unmapped.
713                          */
714                         VM_BUG_ON(!migrate);
715                         addr = migrate->start + i*PAGE_SIZE;
716                         if (!notified) {
717                                 notified = true;
718 
719                                 mmu_notifier_range_init_owner(&range,
720                                         MMU_NOTIFY_MIGRATE, 0,
721                                         migrate->vma->vm_mm, addr, migrate->end,
722                                         migrate->pgmap_owner);
723                                 mmu_notifier_invalidate_range_start(&range);
724                         }
725                         migrate_vma_insert_page(migrate, addr, newpage,
726                                                 &src_pfns[i]);
727                         continue;
728                 }
729 
730                 newfolio = page_folio(newpage);
731                 folio = page_folio(page);
732                 mapping = folio_mapping(folio);
733 
734                 if (folio_is_device_private(newfolio) ||
735                     folio_is_device_coherent(newfolio)) {
736                         if (mapping) {
737                                 /*
738                                  * For now only support anonymous memory migrating to
739                                  * device private or coherent memory.
740                                  *
741                                  * Try to get rid of swap cache if possible.
742                                  */
743                                 if (!folio_test_anon(folio) ||
744                                     !folio_free_swap(folio)) {
745                                         src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
746                                         continue;
747                                 }
748                         }
749                 } else if (folio_is_zone_device(newfolio)) {
750                         /*
751                          * Other types of ZONE_DEVICE page are not supported.
752                          */
753                         src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
754                         continue;
755                 }
756 
757                 BUG_ON(folio_test_writeback(folio));
758 
759                 if (migrate && migrate->fault_page == page)
760                         extra_cnt = 1;
761                 r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt);
762                 if (r != MIGRATEPAGE_SUCCESS)
763                         src_pfns[i] &= ~MIGRATE_PFN_MIGRATE;
764                 else
765                         folio_migrate_flags(newfolio, folio);
766         }
767 
768         if (notified)
769                 mmu_notifier_invalidate_range_end(&range);
770 }
771 
772 /**
773  * migrate_device_pages() - migrate meta-data from src page to dst page
774  * @src_pfns: src_pfns returned from migrate_device_range()
775  * @dst_pfns: array of pfns allocated by the driver to migrate memory to
776  * @npages: number of pages in the range
777  *
778  * Equivalent to migrate_vma_pages(). This is called to migrate struct page
779  * meta-data from source struct page to destination.
780  */
781 void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns,
782                         unsigned long npages)
783 {
784         __migrate_device_pages(src_pfns, dst_pfns, npages, NULL);
785 }
786 EXPORT_SYMBOL(migrate_device_pages);
787 
788 /**
789  * migrate_vma_pages() - migrate meta-data from src page to dst page
790  * @migrate: migrate struct containing all migration information
791  *
792  * This migrates struct page meta-data from source struct page to destination
793  * struct page. This effectively finishes the migration from source page to the
794  * destination page.
795  */
796 void migrate_vma_pages(struct migrate_vma *migrate)
797 {
798         __migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate);
799 }
800 EXPORT_SYMBOL(migrate_vma_pages);
801 
802 /*
803  * migrate_device_finalize() - complete page migration
804  * @src_pfns: src_pfns returned from migrate_device_range()
805  * @dst_pfns: array of pfns allocated by the driver to migrate memory to
806  * @npages: number of pages in the range
807  *
808  * Completes migration of the page by removing special migration entries.
809  * Drivers must ensure copying of page data is complete and visible to the CPU
810  * before calling this.
811  */
812 void migrate_device_finalize(unsigned long *src_pfns,
813                         unsigned long *dst_pfns, unsigned long npages)
814 {
815         unsigned long i;
816 
817         for (i = 0; i < npages; i++) {
818                 struct folio *dst, *src;
819                 struct page *newpage = migrate_pfn_to_page(dst_pfns[i]);
820                 struct page *page = migrate_pfn_to_page(src_pfns[i]);
821 
822                 if (!page) {
823                         if (newpage) {
824                                 unlock_page(newpage);
825                                 put_page(newpage);
826                         }
827                         continue;
828                 }
829 
830                 if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
831                         if (newpage) {
832                                 unlock_page(newpage);
833                                 put_page(newpage);
834                         }
835                         newpage = page;
836                 }
837 
838                 src = page_folio(page);
839                 dst = page_folio(newpage);
840                 remove_migration_ptes(src, dst, false);
841                 folio_unlock(src);
842 
843                 if (is_zone_device_page(page))
844                         put_page(page);
845                 else
846                         putback_lru_page(page);
847 
848                 if (newpage != page) {
849                         unlock_page(newpage);
850                         if (is_zone_device_page(newpage))
851                                 put_page(newpage);
852                         else
853                                 putback_lru_page(newpage);
854                 }
855         }
856 }
857 EXPORT_SYMBOL(migrate_device_finalize);
858 
859 /**
860  * migrate_vma_finalize() - restore CPU page table entry
861  * @migrate: migrate struct containing all migration information
862  *
863  * This replaces the special migration pte entry with either a mapping to the
864  * new page if migration was successful for that page, or to the original page
865  * otherwise.
866  *
867  * This also unlocks the pages and puts them back on the lru, or drops the extra
868  * refcount, for device pages.
869  */
870 void migrate_vma_finalize(struct migrate_vma *migrate)
871 {
872         migrate_device_finalize(migrate->src, migrate->dst, migrate->npages);
873 }
874 EXPORT_SYMBOL(migrate_vma_finalize);
875 
876 /**
877  * migrate_device_range() - migrate device private pfns to normal memory.
878  * @src_pfns: array large enough to hold migrating source device private pfns.
879  * @start: starting pfn in the range to migrate.
880  * @npages: number of pages to migrate.
881  *
882  * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that
883  * instead of looking up pages based on virtual address mappings a range of
884  * device pfns that should be migrated to system memory is used instead.
885  *
886  * This is useful when a driver needs to free device memory but doesn't know the
887  * virtual mappings of every page that may be in device memory. For example this
888  * is often the case when a driver is being unloaded or unbound from a device.
889  *
890  * Like migrate_vma_setup() this function will take a reference and lock any
891  * migrating pages that aren't free before unmapping them. Drivers may then
892  * allocate destination pages and start copying data from the device to CPU
893  * memory before calling migrate_device_pages().
894  */
895 int migrate_device_range(unsigned long *src_pfns, unsigned long start,
896                         unsigned long npages)
897 {
898         unsigned long i, pfn;
899 
900         for (pfn = start, i = 0; i < npages; pfn++, i++) {
901                 struct page *page = pfn_to_page(pfn);
902 
903                 if (!get_page_unless_zero(page)) {
904                         src_pfns[i] = 0;
905                         continue;
906                 }
907 
908                 if (!trylock_page(page)) {
909                         src_pfns[i] = 0;
910                         put_page(page);
911                         continue;
912                 }
913 
914                 src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
915         }
916 
917         migrate_device_unmap(src_pfns, npages, NULL);
918 
919         return 0;
920 }
921 EXPORT_SYMBOL(migrate_device_range);
922 
923 /*
924  * Migrate a device coherent page back to normal memory. The caller should have
925  * a reference on page which will be copied to the new page if migration is
926  * successful or dropped on failure.
927  */
928 int migrate_device_coherent_page(struct page *page)
929 {
930         unsigned long src_pfn, dst_pfn = 0;
931         struct page *dpage;
932 
933         WARN_ON_ONCE(PageCompound(page));
934 
935         lock_page(page);
936         src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE;
937 
938         /*
939          * We don't have a VMA and don't need to walk the page tables to find
940          * the source page. So call migrate_vma_unmap() directly to unmap the
941          * page as migrate_vma_setup() will fail if args.vma == NULL.
942          */
943         migrate_device_unmap(&src_pfn, 1, NULL);
944         if (!(src_pfn & MIGRATE_PFN_MIGRATE))
945                 return -EBUSY;
946 
947         dpage = alloc_page(GFP_USER | __GFP_NOWARN);
948         if (dpage) {
949                 lock_page(dpage);
950                 dst_pfn = migrate_pfn(page_to_pfn(dpage));
951         }
952 
953         migrate_device_pages(&src_pfn, &dst_pfn, 1);
954         if (src_pfn & MIGRATE_PFN_MIGRATE)
955                 copy_highpage(dpage, page);
956         migrate_device_finalize(&src_pfn, &dst_pfn, 1);
957 
958         if (src_pfn & MIGRATE_PFN_MIGRATE)
959                 return 0;
960         return -EBUSY;
961 }
962 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php