~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/migrate.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Memory Migration functionality - linux/mm/migrate.c
  4  *
  5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
  6  *
  7  * Page migration was first developed in the context of the memory hotplug
  8  * project. The main authors of the migration code are:
  9  *
 10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
 11  * Hirokazu Takahashi <taka@valinux.co.jp>
 12  * Dave Hansen <haveblue@us.ibm.com>
 13  * Christoph Lameter
 14  */
 15 
 16 #include <linux/migrate.h>
 17 #include <linux/export.h>
 18 #include <linux/swap.h>
 19 #include <linux/swapops.h>
 20 #include <linux/pagemap.h>
 21 #include <linux/buffer_head.h>
 22 #include <linux/mm_inline.h>
 23 #include <linux/nsproxy.h>
 24 #include <linux/ksm.h>
 25 #include <linux/rmap.h>
 26 #include <linux/topology.h>
 27 #include <linux/cpu.h>
 28 #include <linux/cpuset.h>
 29 #include <linux/writeback.h>
 30 #include <linux/mempolicy.h>
 31 #include <linux/vmalloc.h>
 32 #include <linux/security.h>
 33 #include <linux/backing-dev.h>
 34 #include <linux/compaction.h>
 35 #include <linux/syscalls.h>
 36 #include <linux/compat.h>
 37 #include <linux/hugetlb.h>
 38 #include <linux/hugetlb_cgroup.h>
 39 #include <linux/gfp.h>
 40 #include <linux/pfn_t.h>
 41 #include <linux/memremap.h>
 42 #include <linux/userfaultfd_k.h>
 43 #include <linux/balloon_compaction.h>
 44 #include <linux/page_idle.h>
 45 #include <linux/page_owner.h>
 46 #include <linux/sched/mm.h>
 47 #include <linux/ptrace.h>
 48 #include <linux/oom.h>
 49 #include <linux/memory.h>
 50 #include <linux/random.h>
 51 #include <linux/sched/sysctl.h>
 52 #include <linux/memory-tiers.h>
 53 
 54 #include <asm/tlbflush.h>
 55 
 56 #include <trace/events/migrate.h>
 57 
 58 #include "internal.h"
 59 
 60 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
 61 {
 62         struct folio *folio = folio_get_nontail_page(page);
 63         const struct movable_operations *mops;
 64 
 65         /*
 66          * Avoid burning cycles with pages that are yet under __free_pages(),
 67          * or just got freed under us.
 68          *
 69          * In case we 'win' a race for a movable page being freed under us and
 70          * raise its refcount preventing __free_pages() from doing its job
 71          * the put_page() at the end of this block will take care of
 72          * release this page, thus avoiding a nasty leakage.
 73          */
 74         if (!folio)
 75                 goto out;
 76 
 77         if (unlikely(folio_test_slab(folio)))
 78                 goto out_putfolio;
 79         /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
 80         smp_rmb();
 81         /*
 82          * Check movable flag before taking the page lock because
 83          * we use non-atomic bitops on newly allocated page flags so
 84          * unconditionally grabbing the lock ruins page's owner side.
 85          */
 86         if (unlikely(!__folio_test_movable(folio)))
 87                 goto out_putfolio;
 88         /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
 89         smp_rmb();
 90         if (unlikely(folio_test_slab(folio)))
 91                 goto out_putfolio;
 92 
 93         /*
 94          * As movable pages are not isolated from LRU lists, concurrent
 95          * compaction threads can race against page migration functions
 96          * as well as race against the releasing a page.
 97          *
 98          * In order to avoid having an already isolated movable page
 99          * being (wrongly) re-isolated while it is under migration,
100          * or to avoid attempting to isolate pages being released,
101          * lets be sure we have the page lock
102          * before proceeding with the movable page isolation steps.
103          */
104         if (unlikely(!folio_trylock(folio)))
105                 goto out_putfolio;
106 
107         if (!folio_test_movable(folio) || folio_test_isolated(folio))
108                 goto out_no_isolated;
109 
110         mops = folio_movable_ops(folio);
111         VM_BUG_ON_FOLIO(!mops, folio);
112 
113         if (!mops->isolate_page(&folio->page, mode))
114                 goto out_no_isolated;
115 
116         /* Driver shouldn't use the isolated flag */
117         WARN_ON_ONCE(folio_test_isolated(folio));
118         folio_set_isolated(folio);
119         folio_unlock(folio);
120 
121         return true;
122 
123 out_no_isolated:
124         folio_unlock(folio);
125 out_putfolio:
126         folio_put(folio);
127 out:
128         return false;
129 }
130 
131 static void putback_movable_folio(struct folio *folio)
132 {
133         const struct movable_operations *mops = folio_movable_ops(folio);
134 
135         mops->putback_page(&folio->page);
136         folio_clear_isolated(folio);
137 }
138 
139 /*
140  * Put previously isolated pages back onto the appropriate lists
141  * from where they were once taken off for compaction/migration.
142  *
143  * This function shall be used whenever the isolated pageset has been
144  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
145  * and isolate_hugetlb().
146  */
147 void putback_movable_pages(struct list_head *l)
148 {
149         struct folio *folio;
150         struct folio *folio2;
151 
152         list_for_each_entry_safe(folio, folio2, l, lru) {
153                 if (unlikely(folio_test_hugetlb(folio))) {
154                         folio_putback_active_hugetlb(folio);
155                         continue;
156                 }
157                 list_del(&folio->lru);
158                 /*
159                  * We isolated non-lru movable folio so here we can use
160                  * __folio_test_movable because LRU folio's mapping cannot
161                  * have PAGE_MAPPING_MOVABLE.
162                  */
163                 if (unlikely(__folio_test_movable(folio))) {
164                         VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
165                         folio_lock(folio);
166                         if (folio_test_movable(folio))
167                                 putback_movable_folio(folio);
168                         else
169                                 folio_clear_isolated(folio);
170                         folio_unlock(folio);
171                         folio_put(folio);
172                 } else {
173                         node_stat_mod_folio(folio, NR_ISOLATED_ANON +
174                                         folio_is_file_lru(folio), -folio_nr_pages(folio));
175                         folio_putback_lru(folio);
176                 }
177         }
178 }
179 
180 /*
181  * Restore a potential migration pte to a working pte entry
182  */
183 static bool remove_migration_pte(struct folio *folio,
184                 struct vm_area_struct *vma, unsigned long addr, void *old)
185 {
186         DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
187 
188         while (page_vma_mapped_walk(&pvmw)) {
189                 rmap_t rmap_flags = RMAP_NONE;
190                 pte_t old_pte;
191                 pte_t pte;
192                 swp_entry_t entry;
193                 struct page *new;
194                 unsigned long idx = 0;
195 
196                 /* pgoff is invalid for ksm pages, but they are never large */
197                 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
198                         idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
199                 new = folio_page(folio, idx);
200 
201 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
202                 /* PMD-mapped THP migration entry */
203                 if (!pvmw.pte) {
204                         VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
205                                         !folio_test_pmd_mappable(folio), folio);
206                         remove_migration_pmd(&pvmw, new);
207                         continue;
208                 }
209 #endif
210 
211                 folio_get(folio);
212                 pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
213                 old_pte = ptep_get(pvmw.pte);
214 
215                 entry = pte_to_swp_entry(old_pte);
216                 if (!is_migration_entry_young(entry))
217                         pte = pte_mkold(pte);
218                 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
219                         pte = pte_mkdirty(pte);
220                 if (pte_swp_soft_dirty(old_pte))
221                         pte = pte_mksoft_dirty(pte);
222                 else
223                         pte = pte_clear_soft_dirty(pte);
224 
225                 if (is_writable_migration_entry(entry))
226                         pte = pte_mkwrite(pte, vma);
227                 else if (pte_swp_uffd_wp(old_pte))
228                         pte = pte_mkuffd_wp(pte);
229 
230                 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
231                         rmap_flags |= RMAP_EXCLUSIVE;
232 
233                 if (unlikely(is_device_private_page(new))) {
234                         if (pte_write(pte))
235                                 entry = make_writable_device_private_entry(
236                                                         page_to_pfn(new));
237                         else
238                                 entry = make_readable_device_private_entry(
239                                                         page_to_pfn(new));
240                         pte = swp_entry_to_pte(entry);
241                         if (pte_swp_soft_dirty(old_pte))
242                                 pte = pte_swp_mksoft_dirty(pte);
243                         if (pte_swp_uffd_wp(old_pte))
244                                 pte = pte_swp_mkuffd_wp(pte);
245                 }
246 
247 #ifdef CONFIG_HUGETLB_PAGE
248                 if (folio_test_hugetlb(folio)) {
249                         struct hstate *h = hstate_vma(vma);
250                         unsigned int shift = huge_page_shift(h);
251                         unsigned long psize = huge_page_size(h);
252 
253                         pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
254                         if (folio_test_anon(folio))
255                                 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
256                                                       rmap_flags);
257                         else
258                                 hugetlb_add_file_rmap(folio);
259                         set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
260                                         psize);
261                 } else
262 #endif
263                 {
264                         if (folio_test_anon(folio))
265                                 folio_add_anon_rmap_pte(folio, new, vma,
266                                                         pvmw.address, rmap_flags);
267                         else
268                                 folio_add_file_rmap_pte(folio, new, vma);
269                         set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
270                 }
271                 if (vma->vm_flags & VM_LOCKED)
272                         mlock_drain_local();
273 
274                 trace_remove_migration_pte(pvmw.address, pte_val(pte),
275                                            compound_order(new));
276 
277                 /* No need to invalidate - it was non-present before */
278                 update_mmu_cache(vma, pvmw.address, pvmw.pte);
279         }
280 
281         return true;
282 }
283 
284 /*
285  * Get rid of all migration entries and replace them by
286  * references to the indicated page.
287  */
288 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
289 {
290         struct rmap_walk_control rwc = {
291                 .rmap_one = remove_migration_pte,
292                 .arg = src,
293         };
294 
295         if (locked)
296                 rmap_walk_locked(dst, &rwc);
297         else
298                 rmap_walk(dst, &rwc);
299 }
300 
301 /*
302  * Something used the pte of a page under migration. We need to
303  * get to the page and wait until migration is finished.
304  * When we return from this function the fault will be retried.
305  */
306 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
307                           unsigned long address)
308 {
309         spinlock_t *ptl;
310         pte_t *ptep;
311         pte_t pte;
312         swp_entry_t entry;
313 
314         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
315         if (!ptep)
316                 return;
317 
318         pte = ptep_get(ptep);
319         pte_unmap(ptep);
320 
321         if (!is_swap_pte(pte))
322                 goto out;
323 
324         entry = pte_to_swp_entry(pte);
325         if (!is_migration_entry(entry))
326                 goto out;
327 
328         migration_entry_wait_on_locked(entry, ptl);
329         return;
330 out:
331         spin_unlock(ptl);
332 }
333 
334 #ifdef CONFIG_HUGETLB_PAGE
335 /*
336  * The vma read lock must be held upon entry. Holding that lock prevents either
337  * the pte or the ptl from being freed.
338  *
339  * This function will release the vma lock before returning.
340  */
341 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
342 {
343         spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
344         pte_t pte;
345 
346         hugetlb_vma_assert_locked(vma);
347         spin_lock(ptl);
348         pte = huge_ptep_get(vma->vm_mm, addr, ptep);
349 
350         if (unlikely(!is_hugetlb_entry_migration(pte))) {
351                 spin_unlock(ptl);
352                 hugetlb_vma_unlock_read(vma);
353         } else {
354                 /*
355                  * If migration entry existed, safe to release vma lock
356                  * here because the pgtable page won't be freed without the
357                  * pgtable lock released.  See comment right above pgtable
358                  * lock release in migration_entry_wait_on_locked().
359                  */
360                 hugetlb_vma_unlock_read(vma);
361                 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
362         }
363 }
364 #endif
365 
366 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
367 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
368 {
369         spinlock_t *ptl;
370 
371         ptl = pmd_lock(mm, pmd);
372         if (!is_pmd_migration_entry(*pmd))
373                 goto unlock;
374         migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
375         return;
376 unlock:
377         spin_unlock(ptl);
378 }
379 #endif
380 
381 static int folio_expected_refs(struct address_space *mapping,
382                 struct folio *folio)
383 {
384         int refs = 1;
385         if (!mapping)
386                 return refs;
387 
388         refs += folio_nr_pages(folio);
389         if (folio_test_private(folio))
390                 refs++;
391 
392         return refs;
393 }
394 
395 /*
396  * Replace the folio in the mapping.
397  *
398  * The number of remaining references must be:
399  * 1 for anonymous folios without a mapping
400  * 2 for folios with a mapping
401  * 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
402  */
403 static int __folio_migrate_mapping(struct address_space *mapping,
404                 struct folio *newfolio, struct folio *folio, int expected_count)
405 {
406         XA_STATE(xas, &mapping->i_pages, folio_index(folio));
407         struct zone *oldzone, *newzone;
408         int dirty;
409         long nr = folio_nr_pages(folio);
410         long entries, i;
411 
412         if (!mapping) {
413                 /* Take off deferred split queue while frozen and memcg set */
414                 if (folio_test_large(folio) &&
415                     folio_test_large_rmappable(folio)) {
416                         if (!folio_ref_freeze(folio, expected_count))
417                                 return -EAGAIN;
418                         folio_undo_large_rmappable(folio);
419                         folio_ref_unfreeze(folio, expected_count);
420                 }
421 
422                 /* No turning back from here */
423                 newfolio->index = folio->index;
424                 newfolio->mapping = folio->mapping;
425                 if (folio_test_swapbacked(folio))
426                         __folio_set_swapbacked(newfolio);
427 
428                 return MIGRATEPAGE_SUCCESS;
429         }
430 
431         oldzone = folio_zone(folio);
432         newzone = folio_zone(newfolio);
433 
434         xas_lock_irq(&xas);
435         if (!folio_ref_freeze(folio, expected_count)) {
436                 xas_unlock_irq(&xas);
437                 return -EAGAIN;
438         }
439 
440         /* Take off deferred split queue while frozen and memcg set */
441         folio_undo_large_rmappable(folio);
442 
443         /*
444          * Now we know that no one else is looking at the folio:
445          * no turning back from here.
446          */
447         newfolio->index = folio->index;
448         newfolio->mapping = folio->mapping;
449         folio_ref_add(newfolio, nr); /* add cache reference */
450         if (folio_test_swapbacked(folio)) {
451                 __folio_set_swapbacked(newfolio);
452                 if (folio_test_swapcache(folio)) {
453                         folio_set_swapcache(newfolio);
454                         newfolio->private = folio_get_private(folio);
455                 }
456                 entries = nr;
457         } else {
458                 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
459                 entries = 1;
460         }
461 
462         /* Move dirty while folio refs frozen and newfolio not yet exposed */
463         dirty = folio_test_dirty(folio);
464         if (dirty) {
465                 folio_clear_dirty(folio);
466                 folio_set_dirty(newfolio);
467         }
468 
469         /* Swap cache still stores N entries instead of a high-order entry */
470         for (i = 0; i < entries; i++) {
471                 xas_store(&xas, newfolio);
472                 xas_next(&xas);
473         }
474 
475         /*
476          * Drop cache reference from old folio by unfreezing
477          * to one less reference.
478          * We know this isn't the last reference.
479          */
480         folio_ref_unfreeze(folio, expected_count - nr);
481 
482         xas_unlock(&xas);
483         /* Leave irq disabled to prevent preemption while updating stats */
484 
485         /*
486          * If moved to a different zone then also account
487          * the folio for that zone. Other VM counters will be
488          * taken care of when we establish references to the
489          * new folio and drop references to the old folio.
490          *
491          * Note that anonymous folios are accounted for
492          * via NR_FILE_PAGES and NR_ANON_MAPPED if they
493          * are mapped to swap space.
494          */
495         if (newzone != oldzone) {
496                 struct lruvec *old_lruvec, *new_lruvec;
497                 struct mem_cgroup *memcg;
498 
499                 memcg = folio_memcg(folio);
500                 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
501                 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
502 
503                 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
504                 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
505                 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
506                         __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
507                         __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
508 
509                         if (folio_test_pmd_mappable(folio)) {
510                                 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
511                                 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
512                         }
513                 }
514 #ifdef CONFIG_SWAP
515                 if (folio_test_swapcache(folio)) {
516                         __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
517                         __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
518                 }
519 #endif
520                 if (dirty && mapping_can_writeback(mapping)) {
521                         __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
522                         __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
523                         __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
524                         __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
525                 }
526         }
527         local_irq_enable();
528 
529         return MIGRATEPAGE_SUCCESS;
530 }
531 
532 int folio_migrate_mapping(struct address_space *mapping,
533                 struct folio *newfolio, struct folio *folio, int extra_count)
534 {
535         int expected_count = folio_expected_refs(mapping, folio) + extra_count;
536 
537         if (folio_ref_count(folio) != expected_count)
538                 return -EAGAIN;
539 
540         return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
541 }
542 EXPORT_SYMBOL(folio_migrate_mapping);
543 
544 /*
545  * The expected number of remaining references is the same as that
546  * of folio_migrate_mapping().
547  */
548 int migrate_huge_page_move_mapping(struct address_space *mapping,
549                                    struct folio *dst, struct folio *src)
550 {
551         XA_STATE(xas, &mapping->i_pages, folio_index(src));
552         int rc, expected_count = folio_expected_refs(mapping, src);
553 
554         if (folio_ref_count(src) != expected_count)
555                 return -EAGAIN;
556 
557         rc = folio_mc_copy(dst, src);
558         if (unlikely(rc))
559                 return rc;
560 
561         xas_lock_irq(&xas);
562         if (!folio_ref_freeze(src, expected_count)) {
563                 xas_unlock_irq(&xas);
564                 return -EAGAIN;
565         }
566 
567         dst->index = src->index;
568         dst->mapping = src->mapping;
569 
570         folio_ref_add(dst, folio_nr_pages(dst));
571 
572         xas_store(&xas, dst);
573 
574         folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
575 
576         xas_unlock_irq(&xas);
577 
578         return MIGRATEPAGE_SUCCESS;
579 }
580 
581 /*
582  * Copy the flags and some other ancillary information
583  */
584 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
585 {
586         int cpupid;
587 
588         if (folio_test_error(folio))
589                 folio_set_error(newfolio);
590         if (folio_test_referenced(folio))
591                 folio_set_referenced(newfolio);
592         if (folio_test_uptodate(folio))
593                 folio_mark_uptodate(newfolio);
594         if (folio_test_clear_active(folio)) {
595                 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
596                 folio_set_active(newfolio);
597         } else if (folio_test_clear_unevictable(folio))
598                 folio_set_unevictable(newfolio);
599         if (folio_test_workingset(folio))
600                 folio_set_workingset(newfolio);
601         if (folio_test_checked(folio))
602                 folio_set_checked(newfolio);
603         /*
604          * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
605          * migration entries. We can still have PG_anon_exclusive set on an
606          * effectively unmapped and unreferenced first sub-pages of an
607          * anonymous THP: we can simply copy it here via PG_mappedtodisk.
608          */
609         if (folio_test_mappedtodisk(folio))
610                 folio_set_mappedtodisk(newfolio);
611 
612         /* Move dirty on pages not done by folio_migrate_mapping() */
613         if (folio_test_dirty(folio))
614                 folio_set_dirty(newfolio);
615 
616         if (folio_test_young(folio))
617                 folio_set_young(newfolio);
618         if (folio_test_idle(folio))
619                 folio_set_idle(newfolio);
620 
621         /*
622          * Copy NUMA information to the new page, to prevent over-eager
623          * future migrations of this same page.
624          */
625         cpupid = folio_xchg_last_cpupid(folio, -1);
626         /*
627          * For memory tiering mode, when migrate between slow and fast
628          * memory node, reset cpupid, because that is used to record
629          * page access time in slow memory node.
630          */
631         if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
632                 bool f_toptier = node_is_toptier(folio_nid(folio));
633                 bool t_toptier = node_is_toptier(folio_nid(newfolio));
634 
635                 if (f_toptier != t_toptier)
636                         cpupid = -1;
637         }
638         folio_xchg_last_cpupid(newfolio, cpupid);
639 
640         folio_migrate_ksm(newfolio, folio);
641         /*
642          * Please do not reorder this without considering how mm/ksm.c's
643          * ksm_get_folio() depends upon ksm_migrate_page() and PageSwapCache().
644          */
645         if (folio_test_swapcache(folio))
646                 folio_clear_swapcache(folio);
647         folio_clear_private(folio);
648 
649         /* page->private contains hugetlb specific flags */
650         if (!folio_test_hugetlb(folio))
651                 folio->private = NULL;
652 
653         /*
654          * If any waiters have accumulated on the new page then
655          * wake them up.
656          */
657         if (folio_test_writeback(newfolio))
658                 folio_end_writeback(newfolio);
659 
660         /*
661          * PG_readahead shares the same bit with PG_reclaim.  The above
662          * end_page_writeback() may clear PG_readahead mistakenly, so set the
663          * bit after that.
664          */
665         if (folio_test_readahead(folio))
666                 folio_set_readahead(newfolio);
667 
668         folio_copy_owner(newfolio, folio);
669 
670         mem_cgroup_migrate(folio, newfolio);
671 }
672 EXPORT_SYMBOL(folio_migrate_flags);
673 
674 /************************************************************
675  *                    Migration functions
676  ***********************************************************/
677 
678 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
679                            struct folio *src, void *src_private,
680                            enum migrate_mode mode)
681 {
682         int rc, expected_count = folio_expected_refs(mapping, src);
683 
684         /* Check whether src does not have extra refs before we do more work */
685         if (folio_ref_count(src) != expected_count)
686                 return -EAGAIN;
687 
688         rc = folio_mc_copy(dst, src);
689         if (unlikely(rc))
690                 return rc;
691 
692         rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
693         if (rc != MIGRATEPAGE_SUCCESS)
694                 return rc;
695 
696         if (src_private)
697                 folio_attach_private(dst, folio_detach_private(src));
698 
699         folio_migrate_flags(dst, src);
700         return MIGRATEPAGE_SUCCESS;
701 }
702 
703 /**
704  * migrate_folio() - Simple folio migration.
705  * @mapping: The address_space containing the folio.
706  * @dst: The folio to migrate the data to.
707  * @src: The folio containing the current data.
708  * @mode: How to migrate the page.
709  *
710  * Common logic to directly migrate a single LRU folio suitable for
711  * folios that do not use PagePrivate/PagePrivate2.
712  *
713  * Folios are locked upon entry and exit.
714  */
715 int migrate_folio(struct address_space *mapping, struct folio *dst,
716                   struct folio *src, enum migrate_mode mode)
717 {
718         BUG_ON(folio_test_writeback(src));      /* Writeback must be complete */
719         return __migrate_folio(mapping, dst, src, NULL, mode);
720 }
721 EXPORT_SYMBOL(migrate_folio);
722 
723 #ifdef CONFIG_BUFFER_HEAD
724 /* Returns true if all buffers are successfully locked */
725 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
726                                                         enum migrate_mode mode)
727 {
728         struct buffer_head *bh = head;
729         struct buffer_head *failed_bh;
730 
731         do {
732                 if (!trylock_buffer(bh)) {
733                         if (mode == MIGRATE_ASYNC)
734                                 goto unlock;
735                         if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
736                                 goto unlock;
737                         lock_buffer(bh);
738                 }
739 
740                 bh = bh->b_this_page;
741         } while (bh != head);
742 
743         return true;
744 
745 unlock:
746         /* We failed to lock the buffer and cannot stall. */
747         failed_bh = bh;
748         bh = head;
749         while (bh != failed_bh) {
750                 unlock_buffer(bh);
751                 bh = bh->b_this_page;
752         }
753 
754         return false;
755 }
756 
757 static int __buffer_migrate_folio(struct address_space *mapping,
758                 struct folio *dst, struct folio *src, enum migrate_mode mode,
759                 bool check_refs)
760 {
761         struct buffer_head *bh, *head;
762         int rc;
763         int expected_count;
764 
765         head = folio_buffers(src);
766         if (!head)
767                 return migrate_folio(mapping, dst, src, mode);
768 
769         /* Check whether page does not have extra refs before we do more work */
770         expected_count = folio_expected_refs(mapping, src);
771         if (folio_ref_count(src) != expected_count)
772                 return -EAGAIN;
773 
774         if (!buffer_migrate_lock_buffers(head, mode))
775                 return -EAGAIN;
776 
777         if (check_refs) {
778                 bool busy;
779                 bool invalidated = false;
780 
781 recheck_buffers:
782                 busy = false;
783                 spin_lock(&mapping->i_private_lock);
784                 bh = head;
785                 do {
786                         if (atomic_read(&bh->b_count)) {
787                                 busy = true;
788                                 break;
789                         }
790                         bh = bh->b_this_page;
791                 } while (bh != head);
792                 if (busy) {
793                         if (invalidated) {
794                                 rc = -EAGAIN;
795                                 goto unlock_buffers;
796                         }
797                         spin_unlock(&mapping->i_private_lock);
798                         invalidate_bh_lrus();
799                         invalidated = true;
800                         goto recheck_buffers;
801                 }
802         }
803 
804         rc = filemap_migrate_folio(mapping, dst, src, mode);
805         if (rc != MIGRATEPAGE_SUCCESS)
806                 goto unlock_buffers;
807 
808         bh = head;
809         do {
810                 folio_set_bh(bh, dst, bh_offset(bh));
811                 bh = bh->b_this_page;
812         } while (bh != head);
813 
814 unlock_buffers:
815         if (check_refs)
816                 spin_unlock(&mapping->i_private_lock);
817         bh = head;
818         do {
819                 unlock_buffer(bh);
820                 bh = bh->b_this_page;
821         } while (bh != head);
822 
823         return rc;
824 }
825 
826 /**
827  * buffer_migrate_folio() - Migration function for folios with buffers.
828  * @mapping: The address space containing @src.
829  * @dst: The folio to migrate to.
830  * @src: The folio to migrate from.
831  * @mode: How to migrate the folio.
832  *
833  * This function can only be used if the underlying filesystem guarantees
834  * that no other references to @src exist. For example attached buffer
835  * heads are accessed only under the folio lock.  If your filesystem cannot
836  * provide this guarantee, buffer_migrate_folio_norefs() may be more
837  * appropriate.
838  *
839  * Return: 0 on success or a negative errno on failure.
840  */
841 int buffer_migrate_folio(struct address_space *mapping,
842                 struct folio *dst, struct folio *src, enum migrate_mode mode)
843 {
844         return __buffer_migrate_folio(mapping, dst, src, mode, false);
845 }
846 EXPORT_SYMBOL(buffer_migrate_folio);
847 
848 /**
849  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
850  * @mapping: The address space containing @src.
851  * @dst: The folio to migrate to.
852  * @src: The folio to migrate from.
853  * @mode: How to migrate the folio.
854  *
855  * Like buffer_migrate_folio() except that this variant is more careful
856  * and checks that there are also no buffer head references. This function
857  * is the right one for mappings where buffer heads are directly looked
858  * up and referenced (such as block device mappings).
859  *
860  * Return: 0 on success or a negative errno on failure.
861  */
862 int buffer_migrate_folio_norefs(struct address_space *mapping,
863                 struct folio *dst, struct folio *src, enum migrate_mode mode)
864 {
865         return __buffer_migrate_folio(mapping, dst, src, mode, true);
866 }
867 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
868 #endif /* CONFIG_BUFFER_HEAD */
869 
870 int filemap_migrate_folio(struct address_space *mapping,
871                 struct folio *dst, struct folio *src, enum migrate_mode mode)
872 {
873         return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
874 }
875 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
876 
877 /*
878  * Writeback a folio to clean the dirty state
879  */
880 static int writeout(struct address_space *mapping, struct folio *folio)
881 {
882         struct writeback_control wbc = {
883                 .sync_mode = WB_SYNC_NONE,
884                 .nr_to_write = 1,
885                 .range_start = 0,
886                 .range_end = LLONG_MAX,
887                 .for_reclaim = 1
888         };
889         int rc;
890 
891         if (!mapping->a_ops->writepage)
892                 /* No write method for the address space */
893                 return -EINVAL;
894 
895         if (!folio_clear_dirty_for_io(folio))
896                 /* Someone else already triggered a write */
897                 return -EAGAIN;
898 
899         /*
900          * A dirty folio may imply that the underlying filesystem has
901          * the folio on some queue. So the folio must be clean for
902          * migration. Writeout may mean we lose the lock and the
903          * folio state is no longer what we checked for earlier.
904          * At this point we know that the migration attempt cannot
905          * be successful.
906          */
907         remove_migration_ptes(folio, folio, false);
908 
909         rc = mapping->a_ops->writepage(&folio->page, &wbc);
910 
911         if (rc != AOP_WRITEPAGE_ACTIVATE)
912                 /* unlocked. Relock */
913                 folio_lock(folio);
914 
915         return (rc < 0) ? -EIO : -EAGAIN;
916 }
917 
918 /*
919  * Default handling if a filesystem does not provide a migration function.
920  */
921 static int fallback_migrate_folio(struct address_space *mapping,
922                 struct folio *dst, struct folio *src, enum migrate_mode mode)
923 {
924         if (folio_test_dirty(src)) {
925                 /* Only writeback folios in full synchronous migration */
926                 switch (mode) {
927                 case MIGRATE_SYNC:
928                         break;
929                 default:
930                         return -EBUSY;
931                 }
932                 return writeout(mapping, src);
933         }
934 
935         /*
936          * Buffers may be managed in a filesystem specific way.
937          * We must have no buffers or drop them.
938          */
939         if (!filemap_release_folio(src, GFP_KERNEL))
940                 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
941 
942         return migrate_folio(mapping, dst, src, mode);
943 }
944 
945 /*
946  * Move a page to a newly allocated page
947  * The page is locked and all ptes have been successfully removed.
948  *
949  * The new page will have replaced the old page if this function
950  * is successful.
951  *
952  * Return value:
953  *   < 0 - error code
954  *  MIGRATEPAGE_SUCCESS - success
955  */
956 static int move_to_new_folio(struct folio *dst, struct folio *src,
957                                 enum migrate_mode mode)
958 {
959         int rc = -EAGAIN;
960         bool is_lru = !__folio_test_movable(src);
961 
962         VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
963         VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
964 
965         if (likely(is_lru)) {
966                 struct address_space *mapping = folio_mapping(src);
967 
968                 if (!mapping)
969                         rc = migrate_folio(mapping, dst, src, mode);
970                 else if (mapping_inaccessible(mapping))
971                         rc = -EOPNOTSUPP;
972                 else if (mapping->a_ops->migrate_folio)
973                         /*
974                          * Most folios have a mapping and most filesystems
975                          * provide a migrate_folio callback. Anonymous folios
976                          * are part of swap space which also has its own
977                          * migrate_folio callback. This is the most common path
978                          * for page migration.
979                          */
980                         rc = mapping->a_ops->migrate_folio(mapping, dst, src,
981                                                                 mode);
982                 else
983                         rc = fallback_migrate_folio(mapping, dst, src, mode);
984         } else {
985                 const struct movable_operations *mops;
986 
987                 /*
988                  * In case of non-lru page, it could be released after
989                  * isolation step. In that case, we shouldn't try migration.
990                  */
991                 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
992                 if (!folio_test_movable(src)) {
993                         rc = MIGRATEPAGE_SUCCESS;
994                         folio_clear_isolated(src);
995                         goto out;
996                 }
997 
998                 mops = folio_movable_ops(src);
999                 rc = mops->migrate_page(&dst->page, &src->page, mode);
1000                 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1001                                 !folio_test_isolated(src));
1002         }
1003 
1004         /*
1005          * When successful, old pagecache src->mapping must be cleared before
1006          * src is freed; but stats require that PageAnon be left as PageAnon.
1007          */
1008         if (rc == MIGRATEPAGE_SUCCESS) {
1009                 if (__folio_test_movable(src)) {
1010                         VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1011 
1012                         /*
1013                          * We clear PG_movable under page_lock so any compactor
1014                          * cannot try to migrate this page.
1015                          */
1016                         folio_clear_isolated(src);
1017                 }
1018 
1019                 /*
1020                  * Anonymous and movable src->mapping will be cleared by
1021                  * free_pages_prepare so don't reset it here for keeping
1022                  * the type to work PageAnon, for example.
1023                  */
1024                 if (!folio_mapping_flags(src))
1025                         src->mapping = NULL;
1026 
1027                 if (likely(!folio_is_zone_device(dst)))
1028                         flush_dcache_folio(dst);
1029         }
1030 out:
1031         return rc;
1032 }
1033 
1034 /*
1035  * To record some information during migration, we use unused private
1036  * field of struct folio of the newly allocated destination folio.
1037  * This is safe because nobody is using it except us.
1038  */
1039 enum {
1040         PAGE_WAS_MAPPED = BIT(0),
1041         PAGE_WAS_MLOCKED = BIT(1),
1042         PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1043 };
1044 
1045 static void __migrate_folio_record(struct folio *dst,
1046                                    int old_page_state,
1047                                    struct anon_vma *anon_vma)
1048 {
1049         dst->private = (void *)anon_vma + old_page_state;
1050 }
1051 
1052 static void __migrate_folio_extract(struct folio *dst,
1053                                    int *old_page_state,
1054                                    struct anon_vma **anon_vmap)
1055 {
1056         unsigned long private = (unsigned long)dst->private;
1057 
1058         *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1059         *old_page_state = private & PAGE_OLD_STATES;
1060         dst->private = NULL;
1061 }
1062 
1063 /* Restore the source folio to the original state upon failure */
1064 static void migrate_folio_undo_src(struct folio *src,
1065                                    int page_was_mapped,
1066                                    struct anon_vma *anon_vma,
1067                                    bool locked,
1068                                    struct list_head *ret)
1069 {
1070         if (page_was_mapped)
1071                 remove_migration_ptes(src, src, false);
1072         /* Drop an anon_vma reference if we took one */
1073         if (anon_vma)
1074                 put_anon_vma(anon_vma);
1075         if (locked)
1076                 folio_unlock(src);
1077         if (ret)
1078                 list_move_tail(&src->lru, ret);
1079 }
1080 
1081 /* Restore the destination folio to the original state upon failure */
1082 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1083                 free_folio_t put_new_folio, unsigned long private)
1084 {
1085         if (locked)
1086                 folio_unlock(dst);
1087         if (put_new_folio)
1088                 put_new_folio(dst, private);
1089         else
1090                 folio_put(dst);
1091 }
1092 
1093 /* Cleanup src folio upon migration success */
1094 static void migrate_folio_done(struct folio *src,
1095                                enum migrate_reason reason)
1096 {
1097         /*
1098          * Compaction can migrate also non-LRU pages which are
1099          * not accounted to NR_ISOLATED_*. They can be recognized
1100          * as __folio_test_movable
1101          */
1102         if (likely(!__folio_test_movable(src)))
1103                 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1104                                     folio_is_file_lru(src), -folio_nr_pages(src));
1105 
1106         if (reason != MR_MEMORY_FAILURE)
1107                 /* We release the page in page_handle_poison. */
1108                 folio_put(src);
1109 }
1110 
1111 /* Obtain the lock on page, remove all ptes. */
1112 static int migrate_folio_unmap(new_folio_t get_new_folio,
1113                 free_folio_t put_new_folio, unsigned long private,
1114                 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1115                 enum migrate_reason reason, struct list_head *ret)
1116 {
1117         struct folio *dst;
1118         int rc = -EAGAIN;
1119         int old_page_state = 0;
1120         struct anon_vma *anon_vma = NULL;
1121         bool is_lru = data_race(!__folio_test_movable(src));
1122         bool locked = false;
1123         bool dst_locked = false;
1124 
1125         if (folio_ref_count(src) == 1) {
1126                 /* Folio was freed from under us. So we are done. */
1127                 folio_clear_active(src);
1128                 folio_clear_unevictable(src);
1129                 /* free_pages_prepare() will clear PG_isolated. */
1130                 list_del(&src->lru);
1131                 migrate_folio_done(src, reason);
1132                 return MIGRATEPAGE_SUCCESS;
1133         }
1134 
1135         dst = get_new_folio(src, private);
1136         if (!dst)
1137                 return -ENOMEM;
1138         *dstp = dst;
1139 
1140         dst->private = NULL;
1141 
1142         if (!folio_trylock(src)) {
1143                 if (mode == MIGRATE_ASYNC)
1144                         goto out;
1145 
1146                 /*
1147                  * It's not safe for direct compaction to call lock_page.
1148                  * For example, during page readahead pages are added locked
1149                  * to the LRU. Later, when the IO completes the pages are
1150                  * marked uptodate and unlocked. However, the queueing
1151                  * could be merging multiple pages for one bio (e.g.
1152                  * mpage_readahead). If an allocation happens for the
1153                  * second or third page, the process can end up locking
1154                  * the same page twice and deadlocking. Rather than
1155                  * trying to be clever about what pages can be locked,
1156                  * avoid the use of lock_page for direct compaction
1157                  * altogether.
1158                  */
1159                 if (current->flags & PF_MEMALLOC)
1160                         goto out;
1161 
1162                 /*
1163                  * In "light" mode, we can wait for transient locks (eg
1164                  * inserting a page into the page table), but it's not
1165                  * worth waiting for I/O.
1166                  */
1167                 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1168                         goto out;
1169 
1170                 folio_lock(src);
1171         }
1172         locked = true;
1173         if (folio_test_mlocked(src))
1174                 old_page_state |= PAGE_WAS_MLOCKED;
1175 
1176         if (folio_test_writeback(src)) {
1177                 /*
1178                  * Only in the case of a full synchronous migration is it
1179                  * necessary to wait for PageWriteback. In the async case,
1180                  * the retry loop is too short and in the sync-light case,
1181                  * the overhead of stalling is too much
1182                  */
1183                 switch (mode) {
1184                 case MIGRATE_SYNC:
1185                         break;
1186                 default:
1187                         rc = -EBUSY;
1188                         goto out;
1189                 }
1190                 folio_wait_writeback(src);
1191         }
1192 
1193         /*
1194          * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1195          * we cannot notice that anon_vma is freed while we migrate a page.
1196          * This get_anon_vma() delays freeing anon_vma pointer until the end
1197          * of migration. File cache pages are no problem because of page_lock()
1198          * File Caches may use write_page() or lock_page() in migration, then,
1199          * just care Anon page here.
1200          *
1201          * Only folio_get_anon_vma() understands the subtleties of
1202          * getting a hold on an anon_vma from outside one of its mms.
1203          * But if we cannot get anon_vma, then we won't need it anyway,
1204          * because that implies that the anon page is no longer mapped
1205          * (and cannot be remapped so long as we hold the page lock).
1206          */
1207         if (folio_test_anon(src) && !folio_test_ksm(src))
1208                 anon_vma = folio_get_anon_vma(src);
1209 
1210         /*
1211          * Block others from accessing the new page when we get around to
1212          * establishing additional references. We are usually the only one
1213          * holding a reference to dst at this point. We used to have a BUG
1214          * here if folio_trylock(dst) fails, but would like to allow for
1215          * cases where there might be a race with the previous use of dst.
1216          * This is much like races on refcount of oldpage: just don't BUG().
1217          */
1218         if (unlikely(!folio_trylock(dst)))
1219                 goto out;
1220         dst_locked = true;
1221 
1222         if (unlikely(!is_lru)) {
1223                 __migrate_folio_record(dst, old_page_state, anon_vma);
1224                 return MIGRATEPAGE_UNMAP;
1225         }
1226 
1227         /*
1228          * Corner case handling:
1229          * 1. When a new swap-cache page is read into, it is added to the LRU
1230          * and treated as swapcache but it has no rmap yet.
1231          * Calling try_to_unmap() against a src->mapping==NULL page will
1232          * trigger a BUG.  So handle it here.
1233          * 2. An orphaned page (see truncate_cleanup_page) might have
1234          * fs-private metadata. The page can be picked up due to memory
1235          * offlining.  Everywhere else except page reclaim, the page is
1236          * invisible to the vm, so the page can not be migrated.  So try to
1237          * free the metadata, so the page can be freed.
1238          */
1239         if (!src->mapping) {
1240                 if (folio_test_private(src)) {
1241                         try_to_free_buffers(src);
1242                         goto out;
1243                 }
1244         } else if (folio_mapped(src)) {
1245                 /* Establish migration ptes */
1246                 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1247                                !folio_test_ksm(src) && !anon_vma, src);
1248                 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1249                 old_page_state |= PAGE_WAS_MAPPED;
1250         }
1251 
1252         if (!folio_mapped(src)) {
1253                 __migrate_folio_record(dst, old_page_state, anon_vma);
1254                 return MIGRATEPAGE_UNMAP;
1255         }
1256 
1257 out:
1258         /*
1259          * A folio that has not been unmapped will be restored to
1260          * right list unless we want to retry.
1261          */
1262         if (rc == -EAGAIN)
1263                 ret = NULL;
1264 
1265         migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1266                                anon_vma, locked, ret);
1267         migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1268 
1269         return rc;
1270 }
1271 
1272 /* Migrate the folio to the newly allocated folio in dst. */
1273 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1274                               struct folio *src, struct folio *dst,
1275                               enum migrate_mode mode, enum migrate_reason reason,
1276                               struct list_head *ret)
1277 {
1278         int rc;
1279         int old_page_state = 0;
1280         struct anon_vma *anon_vma = NULL;
1281         bool is_lru = !__folio_test_movable(src);
1282         struct list_head *prev;
1283 
1284         __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1285         prev = dst->lru.prev;
1286         list_del(&dst->lru);
1287 
1288         rc = move_to_new_folio(dst, src, mode);
1289         if (rc)
1290                 goto out;
1291 
1292         if (unlikely(!is_lru))
1293                 goto out_unlock_both;
1294 
1295         /*
1296          * When successful, push dst to LRU immediately: so that if it
1297          * turns out to be an mlocked page, remove_migration_ptes() will
1298          * automatically build up the correct dst->mlock_count for it.
1299          *
1300          * We would like to do something similar for the old page, when
1301          * unsuccessful, and other cases when a page has been temporarily
1302          * isolated from the unevictable LRU: but this case is the easiest.
1303          */
1304         folio_add_lru(dst);
1305         if (old_page_state & PAGE_WAS_MLOCKED)
1306                 lru_add_drain();
1307 
1308         if (old_page_state & PAGE_WAS_MAPPED)
1309                 remove_migration_ptes(src, dst, false);
1310 
1311 out_unlock_both:
1312         folio_unlock(dst);
1313         set_page_owner_migrate_reason(&dst->page, reason);
1314         /*
1315          * If migration is successful, decrease refcount of dst,
1316          * which will not free the page because new page owner increased
1317          * refcounter.
1318          */
1319         folio_put(dst);
1320 
1321         /*
1322          * A folio that has been migrated has all references removed
1323          * and will be freed.
1324          */
1325         list_del(&src->lru);
1326         /* Drop an anon_vma reference if we took one */
1327         if (anon_vma)
1328                 put_anon_vma(anon_vma);
1329         folio_unlock(src);
1330         migrate_folio_done(src, reason);
1331 
1332         return rc;
1333 out:
1334         /*
1335          * A folio that has not been migrated will be restored to
1336          * right list unless we want to retry.
1337          */
1338         if (rc == -EAGAIN) {
1339                 list_add(&dst->lru, prev);
1340                 __migrate_folio_record(dst, old_page_state, anon_vma);
1341                 return rc;
1342         }
1343 
1344         migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1345                                anon_vma, true, ret);
1346         migrate_folio_undo_dst(dst, true, put_new_folio, private);
1347 
1348         return rc;
1349 }
1350 
1351 /*
1352  * Counterpart of unmap_and_move_page() for hugepage migration.
1353  *
1354  * This function doesn't wait the completion of hugepage I/O
1355  * because there is no race between I/O and migration for hugepage.
1356  * Note that currently hugepage I/O occurs only in direct I/O
1357  * where no lock is held and PG_writeback is irrelevant,
1358  * and writeback status of all subpages are counted in the reference
1359  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1360  * under direct I/O, the reference of the head page is 512 and a bit more.)
1361  * This means that when we try to migrate hugepage whose subpages are
1362  * doing direct I/O, some references remain after try_to_unmap() and
1363  * hugepage migration fails without data corruption.
1364  *
1365  * There is also no race when direct I/O is issued on the page under migration,
1366  * because then pte is replaced with migration swap entry and direct I/O code
1367  * will wait in the page fault for migration to complete.
1368  */
1369 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1370                 free_folio_t put_new_folio, unsigned long private,
1371                 struct folio *src, int force, enum migrate_mode mode,
1372                 int reason, struct list_head *ret)
1373 {
1374         struct folio *dst;
1375         int rc = -EAGAIN;
1376         int page_was_mapped = 0;
1377         struct anon_vma *anon_vma = NULL;
1378         struct address_space *mapping = NULL;
1379 
1380         if (folio_ref_count(src) == 1) {
1381                 /* page was freed from under us. So we are done. */
1382                 folio_putback_active_hugetlb(src);
1383                 return MIGRATEPAGE_SUCCESS;
1384         }
1385 
1386         dst = get_new_folio(src, private);
1387         if (!dst)
1388                 return -ENOMEM;
1389 
1390         if (!folio_trylock(src)) {
1391                 if (!force)
1392                         goto out;
1393                 switch (mode) {
1394                 case MIGRATE_SYNC:
1395                         break;
1396                 default:
1397                         goto out;
1398                 }
1399                 folio_lock(src);
1400         }
1401 
1402         /*
1403          * Check for pages which are in the process of being freed.  Without
1404          * folio_mapping() set, hugetlbfs specific move page routine will not
1405          * be called and we could leak usage counts for subpools.
1406          */
1407         if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1408                 rc = -EBUSY;
1409                 goto out_unlock;
1410         }
1411 
1412         if (folio_test_anon(src))
1413                 anon_vma = folio_get_anon_vma(src);
1414 
1415         if (unlikely(!folio_trylock(dst)))
1416                 goto put_anon;
1417 
1418         if (folio_mapped(src)) {
1419                 enum ttu_flags ttu = 0;
1420 
1421                 if (!folio_test_anon(src)) {
1422                         /*
1423                          * In shared mappings, try_to_unmap could potentially
1424                          * call huge_pmd_unshare.  Because of this, take
1425                          * semaphore in write mode here and set TTU_RMAP_LOCKED
1426                          * to let lower levels know we have taken the lock.
1427                          */
1428                         mapping = hugetlb_folio_mapping_lock_write(src);
1429                         if (unlikely(!mapping))
1430                                 goto unlock_put_anon;
1431 
1432                         ttu = TTU_RMAP_LOCKED;
1433                 }
1434 
1435                 try_to_migrate(src, ttu);
1436                 page_was_mapped = 1;
1437 
1438                 if (ttu & TTU_RMAP_LOCKED)
1439                         i_mmap_unlock_write(mapping);
1440         }
1441 
1442         if (!folio_mapped(src))
1443                 rc = move_to_new_folio(dst, src, mode);
1444 
1445         if (page_was_mapped)
1446                 remove_migration_ptes(src,
1447                         rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1448 
1449 unlock_put_anon:
1450         folio_unlock(dst);
1451 
1452 put_anon:
1453         if (anon_vma)
1454                 put_anon_vma(anon_vma);
1455 
1456         if (rc == MIGRATEPAGE_SUCCESS) {
1457                 move_hugetlb_state(src, dst, reason);
1458                 put_new_folio = NULL;
1459         }
1460 
1461 out_unlock:
1462         folio_unlock(src);
1463 out:
1464         if (rc == MIGRATEPAGE_SUCCESS)
1465                 folio_putback_active_hugetlb(src);
1466         else if (rc != -EAGAIN)
1467                 list_move_tail(&src->lru, ret);
1468 
1469         /*
1470          * If migration was not successful and there's a freeing callback, use
1471          * it.  Otherwise, put_page() will drop the reference grabbed during
1472          * isolation.
1473          */
1474         if (put_new_folio)
1475                 put_new_folio(dst, private);
1476         else
1477                 folio_putback_active_hugetlb(dst);
1478 
1479         return rc;
1480 }
1481 
1482 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1483                                   enum migrate_mode mode)
1484 {
1485         int rc;
1486 
1487         if (mode == MIGRATE_ASYNC) {
1488                 if (!folio_trylock(folio))
1489                         return -EAGAIN;
1490         } else {
1491                 folio_lock(folio);
1492         }
1493         rc = split_folio_to_list(folio, split_folios);
1494         folio_unlock(folio);
1495         if (!rc)
1496                 list_move_tail(&folio->lru, split_folios);
1497 
1498         return rc;
1499 }
1500 
1501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1502 #define NR_MAX_BATCHED_MIGRATION        HPAGE_PMD_NR
1503 #else
1504 #define NR_MAX_BATCHED_MIGRATION        512
1505 #endif
1506 #define NR_MAX_MIGRATE_PAGES_RETRY      10
1507 #define NR_MAX_MIGRATE_ASYNC_RETRY      3
1508 #define NR_MAX_MIGRATE_SYNC_RETRY                                       \
1509         (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1510 
1511 struct migrate_pages_stats {
1512         int nr_succeeded;       /* Normal and large folios migrated successfully, in
1513                                    units of base pages */
1514         int nr_failed_pages;    /* Normal and large folios failed to be migrated, in
1515                                    units of base pages.  Untried folios aren't counted */
1516         int nr_thp_succeeded;   /* THP migrated successfully */
1517         int nr_thp_failed;      /* THP failed to be migrated */
1518         int nr_thp_split;       /* THP split before migrating */
1519         int nr_split;   /* Large folio (include THP) split before migrating */
1520 };
1521 
1522 /*
1523  * Returns the number of hugetlb folios that were not migrated, or an error code
1524  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1525  * any more because the list has become empty or no retryable hugetlb folios
1526  * exist any more. It is caller's responsibility to call putback_movable_pages()
1527  * only if ret != 0.
1528  */
1529 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1530                             free_folio_t put_new_folio, unsigned long private,
1531                             enum migrate_mode mode, int reason,
1532                             struct migrate_pages_stats *stats,
1533                             struct list_head *ret_folios)
1534 {
1535         int retry = 1;
1536         int nr_failed = 0;
1537         int nr_retry_pages = 0;
1538         int pass = 0;
1539         struct folio *folio, *folio2;
1540         int rc, nr_pages;
1541 
1542         for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1543                 retry = 0;
1544                 nr_retry_pages = 0;
1545 
1546                 list_for_each_entry_safe(folio, folio2, from, lru) {
1547                         if (!folio_test_hugetlb(folio))
1548                                 continue;
1549 
1550                         nr_pages = folio_nr_pages(folio);
1551 
1552                         cond_resched();
1553 
1554                         /*
1555                          * Migratability of hugepages depends on architectures and
1556                          * their size.  This check is necessary because some callers
1557                          * of hugepage migration like soft offline and memory
1558                          * hotremove don't walk through page tables or check whether
1559                          * the hugepage is pmd-based or not before kicking migration.
1560                          */
1561                         if (!hugepage_migration_supported(folio_hstate(folio))) {
1562                                 nr_failed++;
1563                                 stats->nr_failed_pages += nr_pages;
1564                                 list_move_tail(&folio->lru, ret_folios);
1565                                 continue;
1566                         }
1567 
1568                         rc = unmap_and_move_huge_page(get_new_folio,
1569                                                       put_new_folio, private,
1570                                                       folio, pass > 2, mode,
1571                                                       reason, ret_folios);
1572                         /*
1573                          * The rules are:
1574                          *      Success: hugetlb folio will be put back
1575                          *      -EAGAIN: stay on the from list
1576                          *      -ENOMEM: stay on the from list
1577                          *      Other errno: put on ret_folios list
1578                          */
1579                         switch(rc) {
1580                         case -ENOMEM:
1581                                 /*
1582                                  * When memory is low, don't bother to try to migrate
1583                                  * other folios, just exit.
1584                                  */
1585                                 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1586                                 return -ENOMEM;
1587                         case -EAGAIN:
1588                                 retry++;
1589                                 nr_retry_pages += nr_pages;
1590                                 break;
1591                         case MIGRATEPAGE_SUCCESS:
1592                                 stats->nr_succeeded += nr_pages;
1593                                 break;
1594                         default:
1595                                 /*
1596                                  * Permanent failure (-EBUSY, etc.):
1597                                  * unlike -EAGAIN case, the failed folio is
1598                                  * removed from migration folio list and not
1599                                  * retried in the next outer loop.
1600                                  */
1601                                 nr_failed++;
1602                                 stats->nr_failed_pages += nr_pages;
1603                                 break;
1604                         }
1605                 }
1606         }
1607         /*
1608          * nr_failed is number of hugetlb folios failed to be migrated.  After
1609          * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1610          * folios as failed.
1611          */
1612         nr_failed += retry;
1613         stats->nr_failed_pages += nr_retry_pages;
1614 
1615         return nr_failed;
1616 }
1617 
1618 /*
1619  * migrate_pages_batch() first unmaps folios in the from list as many as
1620  * possible, then move the unmapped folios.
1621  *
1622  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1623  * lock or bit when we have locked more than one folio.  Which may cause
1624  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1625  * length of the from list must be <= 1.
1626  */
1627 static int migrate_pages_batch(struct list_head *from,
1628                 new_folio_t get_new_folio, free_folio_t put_new_folio,
1629                 unsigned long private, enum migrate_mode mode, int reason,
1630                 struct list_head *ret_folios, struct list_head *split_folios,
1631                 struct migrate_pages_stats *stats, int nr_pass)
1632 {
1633         int retry = 1;
1634         int thp_retry = 1;
1635         int nr_failed = 0;
1636         int nr_retry_pages = 0;
1637         int pass = 0;
1638         bool is_thp = false;
1639         bool is_large = false;
1640         struct folio *folio, *folio2, *dst = NULL, *dst2;
1641         int rc, rc_saved = 0, nr_pages;
1642         LIST_HEAD(unmap_folios);
1643         LIST_HEAD(dst_folios);
1644         bool nosplit = (reason == MR_NUMA_MISPLACED);
1645 
1646         VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1647                         !list_empty(from) && !list_is_singular(from));
1648 
1649         for (pass = 0; pass < nr_pass && retry; pass++) {
1650                 retry = 0;
1651                 thp_retry = 0;
1652                 nr_retry_pages = 0;
1653 
1654                 list_for_each_entry_safe(folio, folio2, from, lru) {
1655                         is_large = folio_test_large(folio);
1656                         is_thp = is_large && folio_test_pmd_mappable(folio);
1657                         nr_pages = folio_nr_pages(folio);
1658 
1659                         cond_resched();
1660 
1661                         /*
1662                          * The rare folio on the deferred split list should
1663                          * be split now. It should not count as a failure:
1664                          * but increment nr_failed because, without doing so,
1665                          * migrate_pages() may report success with (split but
1666                          * unmigrated) pages still on its fromlist; whereas it
1667                          * always reports success when its fromlist is empty.
1668                          * stats->nr_thp_failed should be increased too,
1669                          * otherwise stats inconsistency will happen when
1670                          * migrate_pages_batch is called via migrate_pages()
1671                          * with MIGRATE_SYNC and MIGRATE_ASYNC.
1672                          *
1673                          * Only check it without removing it from the list.
1674                          * Since the folio can be on deferred_split_scan()
1675                          * local list and removing it can cause the local list
1676                          * corruption. Folio split process below can handle it
1677                          * with the help of folio_ref_freeze().
1678                          *
1679                          * nr_pages > 2 is needed to avoid checking order-1
1680                          * page cache folios. They exist, in contrast to
1681                          * non-existent order-1 anonymous folios, and do not
1682                          * use _deferred_list.
1683                          */
1684                         if (nr_pages > 2 &&
1685                            !list_empty(&folio->_deferred_list)) {
1686                                 if (!try_split_folio(folio, split_folios, mode)) {
1687                                         nr_failed++;
1688                                         stats->nr_thp_failed += is_thp;
1689                                         stats->nr_thp_split += is_thp;
1690                                         stats->nr_split++;
1691                                         continue;
1692                                 }
1693                         }
1694 
1695                         /*
1696                          * Large folio migration might be unsupported or
1697                          * the allocation might be failed so we should retry
1698                          * on the same folio with the large folio split
1699                          * to normal folios.
1700                          *
1701                          * Split folios are put in split_folios, and
1702                          * we will migrate them after the rest of the
1703                          * list is processed.
1704                          */
1705                         if (!thp_migration_supported() && is_thp) {
1706                                 nr_failed++;
1707                                 stats->nr_thp_failed++;
1708                                 if (!try_split_folio(folio, split_folios, mode)) {
1709                                         stats->nr_thp_split++;
1710                                         stats->nr_split++;
1711                                         continue;
1712                                 }
1713                                 stats->nr_failed_pages += nr_pages;
1714                                 list_move_tail(&folio->lru, ret_folios);
1715                                 continue;
1716                         }
1717 
1718                         rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1719                                         private, folio, &dst, mode, reason,
1720                                         ret_folios);
1721                         /*
1722                          * The rules are:
1723                          *      Success: folio will be freed
1724                          *      Unmap: folio will be put on unmap_folios list,
1725                          *             dst folio put on dst_folios list
1726                          *      -EAGAIN: stay on the from list
1727                          *      -ENOMEM: stay on the from list
1728                          *      Other errno: put on ret_folios list
1729                          */
1730                         switch(rc) {
1731                         case -ENOMEM:
1732                                 /*
1733                                  * When memory is low, don't bother to try to migrate
1734                                  * other folios, move unmapped folios, then exit.
1735                                  */
1736                                 nr_failed++;
1737                                 stats->nr_thp_failed += is_thp;
1738                                 /* Large folio NUMA faulting doesn't split to retry. */
1739                                 if (is_large && !nosplit) {
1740                                         int ret = try_split_folio(folio, split_folios, mode);
1741 
1742                                         if (!ret) {
1743                                                 stats->nr_thp_split += is_thp;
1744                                                 stats->nr_split++;
1745                                                 break;
1746                                         } else if (reason == MR_LONGTERM_PIN &&
1747                                                    ret == -EAGAIN) {
1748                                                 /*
1749                                                  * Try again to split large folio to
1750                                                  * mitigate the failure of longterm pinning.
1751                                                  */
1752                                                 retry++;
1753                                                 thp_retry += is_thp;
1754                                                 nr_retry_pages += nr_pages;
1755                                                 /* Undo duplicated failure counting. */
1756                                                 nr_failed--;
1757                                                 stats->nr_thp_failed -= is_thp;
1758                                                 break;
1759                                         }
1760                                 }
1761 
1762                                 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1763                                 /* nr_failed isn't updated for not used */
1764                                 stats->nr_thp_failed += thp_retry;
1765                                 rc_saved = rc;
1766                                 if (list_empty(&unmap_folios))
1767                                         goto out;
1768                                 else
1769                                         goto move;
1770                         case -EAGAIN:
1771                                 retry++;
1772                                 thp_retry += is_thp;
1773                                 nr_retry_pages += nr_pages;
1774                                 break;
1775                         case MIGRATEPAGE_SUCCESS:
1776                                 stats->nr_succeeded += nr_pages;
1777                                 stats->nr_thp_succeeded += is_thp;
1778                                 break;
1779                         case MIGRATEPAGE_UNMAP:
1780                                 list_move_tail(&folio->lru, &unmap_folios);
1781                                 list_add_tail(&dst->lru, &dst_folios);
1782                                 break;
1783                         default:
1784                                 /*
1785                                  * Permanent failure (-EBUSY, etc.):
1786                                  * unlike -EAGAIN case, the failed folio is
1787                                  * removed from migration folio list and not
1788                                  * retried in the next outer loop.
1789                                  */
1790                                 nr_failed++;
1791                                 stats->nr_thp_failed += is_thp;
1792                                 stats->nr_failed_pages += nr_pages;
1793                                 break;
1794                         }
1795                 }
1796         }
1797         nr_failed += retry;
1798         stats->nr_thp_failed += thp_retry;
1799         stats->nr_failed_pages += nr_retry_pages;
1800 move:
1801         /* Flush TLBs for all unmapped folios */
1802         try_to_unmap_flush();
1803 
1804         retry = 1;
1805         for (pass = 0; pass < nr_pass && retry; pass++) {
1806                 retry = 0;
1807                 thp_retry = 0;
1808                 nr_retry_pages = 0;
1809 
1810                 dst = list_first_entry(&dst_folios, struct folio, lru);
1811                 dst2 = list_next_entry(dst, lru);
1812                 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1813                         is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1814                         nr_pages = folio_nr_pages(folio);
1815 
1816                         cond_resched();
1817 
1818                         rc = migrate_folio_move(put_new_folio, private,
1819                                                 folio, dst, mode,
1820                                                 reason, ret_folios);
1821                         /*
1822                          * The rules are:
1823                          *      Success: folio will be freed
1824                          *      -EAGAIN: stay on the unmap_folios list
1825                          *      Other errno: put on ret_folios list
1826                          */
1827                         switch(rc) {
1828                         case -EAGAIN:
1829                                 retry++;
1830                                 thp_retry += is_thp;
1831                                 nr_retry_pages += nr_pages;
1832                                 break;
1833                         case MIGRATEPAGE_SUCCESS:
1834                                 stats->nr_succeeded += nr_pages;
1835                                 stats->nr_thp_succeeded += is_thp;
1836                                 break;
1837                         default:
1838                                 nr_failed++;
1839                                 stats->nr_thp_failed += is_thp;
1840                                 stats->nr_failed_pages += nr_pages;
1841                                 break;
1842                         }
1843                         dst = dst2;
1844                         dst2 = list_next_entry(dst, lru);
1845                 }
1846         }
1847         nr_failed += retry;
1848         stats->nr_thp_failed += thp_retry;
1849         stats->nr_failed_pages += nr_retry_pages;
1850 
1851         rc = rc_saved ? : nr_failed;
1852 out:
1853         /* Cleanup remaining folios */
1854         dst = list_first_entry(&dst_folios, struct folio, lru);
1855         dst2 = list_next_entry(dst, lru);
1856         list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1857                 int old_page_state = 0;
1858                 struct anon_vma *anon_vma = NULL;
1859 
1860                 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1861                 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1862                                        anon_vma, true, ret_folios);
1863                 list_del(&dst->lru);
1864                 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1865                 dst = dst2;
1866                 dst2 = list_next_entry(dst, lru);
1867         }
1868 
1869         return rc;
1870 }
1871 
1872 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1873                 free_folio_t put_new_folio, unsigned long private,
1874                 enum migrate_mode mode, int reason,
1875                 struct list_head *ret_folios, struct list_head *split_folios,
1876                 struct migrate_pages_stats *stats)
1877 {
1878         int rc, nr_failed = 0;
1879         LIST_HEAD(folios);
1880         struct migrate_pages_stats astats;
1881 
1882         memset(&astats, 0, sizeof(astats));
1883         /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1884         rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1885                                  reason, &folios, split_folios, &astats,
1886                                  NR_MAX_MIGRATE_ASYNC_RETRY);
1887         stats->nr_succeeded += astats.nr_succeeded;
1888         stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1889         stats->nr_thp_split += astats.nr_thp_split;
1890         stats->nr_split += astats.nr_split;
1891         if (rc < 0) {
1892                 stats->nr_failed_pages += astats.nr_failed_pages;
1893                 stats->nr_thp_failed += astats.nr_thp_failed;
1894                 list_splice_tail(&folios, ret_folios);
1895                 return rc;
1896         }
1897         stats->nr_thp_failed += astats.nr_thp_split;
1898         /*
1899          * Do not count rc, as pages will be retried below.
1900          * Count nr_split only, since it includes nr_thp_split.
1901          */
1902         nr_failed += astats.nr_split;
1903         /*
1904          * Fall back to migrate all failed folios one by one synchronously. All
1905          * failed folios except split THPs will be retried, so their failure
1906          * isn't counted
1907          */
1908         list_splice_tail_init(&folios, from);
1909         while (!list_empty(from)) {
1910                 list_move(from->next, &folios);
1911                 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1912                                          private, mode, reason, ret_folios,
1913                                          split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1914                 list_splice_tail_init(&folios, ret_folios);
1915                 if (rc < 0)
1916                         return rc;
1917                 nr_failed += rc;
1918         }
1919 
1920         return nr_failed;
1921 }
1922 
1923 /*
1924  * migrate_pages - migrate the folios specified in a list, to the free folios
1925  *                 supplied as the target for the page migration
1926  *
1927  * @from:               The list of folios to be migrated.
1928  * @get_new_folio:      The function used to allocate free folios to be used
1929  *                      as the target of the folio migration.
1930  * @put_new_folio:      The function used to free target folios if migration
1931  *                      fails, or NULL if no special handling is necessary.
1932  * @private:            Private data to be passed on to get_new_folio()
1933  * @mode:               The migration mode that specifies the constraints for
1934  *                      folio migration, if any.
1935  * @reason:             The reason for folio migration.
1936  * @ret_succeeded:      Set to the number of folios migrated successfully if
1937  *                      the caller passes a non-NULL pointer.
1938  *
1939  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1940  * are movable any more because the list has become empty or no retryable folios
1941  * exist any more. It is caller's responsibility to call putback_movable_pages()
1942  * only if ret != 0.
1943  *
1944  * Returns the number of {normal folio, large folio, hugetlb} that were not
1945  * migrated, or an error code. The number of large folio splits will be
1946  * considered as the number of non-migrated large folio, no matter how many
1947  * split folios of the large folio are migrated successfully.
1948  */
1949 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1950                 free_folio_t put_new_folio, unsigned long private,
1951                 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1952 {
1953         int rc, rc_gather;
1954         int nr_pages;
1955         struct folio *folio, *folio2;
1956         LIST_HEAD(folios);
1957         LIST_HEAD(ret_folios);
1958         LIST_HEAD(split_folios);
1959         struct migrate_pages_stats stats;
1960 
1961         trace_mm_migrate_pages_start(mode, reason);
1962 
1963         memset(&stats, 0, sizeof(stats));
1964 
1965         rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1966                                      mode, reason, &stats, &ret_folios);
1967         if (rc_gather < 0)
1968                 goto out;
1969 
1970 again:
1971         nr_pages = 0;
1972         list_for_each_entry_safe(folio, folio2, from, lru) {
1973                 /* Retried hugetlb folios will be kept in list  */
1974                 if (folio_test_hugetlb(folio)) {
1975                         list_move_tail(&folio->lru, &ret_folios);
1976                         continue;
1977                 }
1978 
1979                 nr_pages += folio_nr_pages(folio);
1980                 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1981                         break;
1982         }
1983         if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1984                 list_cut_before(&folios, from, &folio2->lru);
1985         else
1986                 list_splice_init(from, &folios);
1987         if (mode == MIGRATE_ASYNC)
1988                 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1989                                 private, mode, reason, &ret_folios,
1990                                 &split_folios, &stats,
1991                                 NR_MAX_MIGRATE_PAGES_RETRY);
1992         else
1993                 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1994                                 private, mode, reason, &ret_folios,
1995                                 &split_folios, &stats);
1996         list_splice_tail_init(&folios, &ret_folios);
1997         if (rc < 0) {
1998                 rc_gather = rc;
1999                 list_splice_tail(&split_folios, &ret_folios);
2000                 goto out;
2001         }
2002         if (!list_empty(&split_folios)) {
2003                 /*
2004                  * Failure isn't counted since all split folios of a large folio
2005                  * is counted as 1 failure already.  And, we only try to migrate
2006                  * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2007                  */
2008                 migrate_pages_batch(&split_folios, get_new_folio,
2009                                 put_new_folio, private, MIGRATE_ASYNC, reason,
2010                                 &ret_folios, NULL, &stats, 1);
2011                 list_splice_tail_init(&split_folios, &ret_folios);
2012         }
2013         rc_gather += rc;
2014         if (!list_empty(from))
2015                 goto again;
2016 out:
2017         /*
2018          * Put the permanent failure folio back to migration list, they
2019          * will be put back to the right list by the caller.
2020          */
2021         list_splice(&ret_folios, from);
2022 
2023         /*
2024          * Return 0 in case all split folios of fail-to-migrate large folios
2025          * are migrated successfully.
2026          */
2027         if (list_empty(from))
2028                 rc_gather = 0;
2029 
2030         count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2031         count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2032         count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2033         count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2034         count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2035         trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2036                                stats.nr_thp_succeeded, stats.nr_thp_failed,
2037                                stats.nr_thp_split, stats.nr_split, mode,
2038                                reason);
2039 
2040         if (ret_succeeded)
2041                 *ret_succeeded = stats.nr_succeeded;
2042 
2043         return rc_gather;
2044 }
2045 
2046 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2047 {
2048         struct migration_target_control *mtc;
2049         gfp_t gfp_mask;
2050         unsigned int order = 0;
2051         int nid;
2052         int zidx;
2053 
2054         mtc = (struct migration_target_control *)private;
2055         gfp_mask = mtc->gfp_mask;
2056         nid = mtc->nid;
2057         if (nid == NUMA_NO_NODE)
2058                 nid = folio_nid(src);
2059 
2060         if (folio_test_hugetlb(src)) {
2061                 struct hstate *h = folio_hstate(src);
2062 
2063                 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2064                 return alloc_hugetlb_folio_nodemask(h, nid,
2065                                                 mtc->nmask, gfp_mask,
2066                                                 htlb_allow_alloc_fallback(mtc->reason));
2067         }
2068 
2069         if (folio_test_large(src)) {
2070                 /*
2071                  * clear __GFP_RECLAIM to make the migration callback
2072                  * consistent with regular THP allocations.
2073                  */
2074                 gfp_mask &= ~__GFP_RECLAIM;
2075                 gfp_mask |= GFP_TRANSHUGE;
2076                 order = folio_order(src);
2077         }
2078         zidx = zone_idx(folio_zone(src));
2079         if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2080                 gfp_mask |= __GFP_HIGHMEM;
2081 
2082         return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2083 }
2084 
2085 #ifdef CONFIG_NUMA
2086 
2087 static int store_status(int __user *status, int start, int value, int nr)
2088 {
2089         while (nr-- > 0) {
2090                 if (put_user(value, status + start))
2091                         return -EFAULT;
2092                 start++;
2093         }
2094 
2095         return 0;
2096 }
2097 
2098 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2099 {
2100         int err;
2101         struct migration_target_control mtc = {
2102                 .nid = node,
2103                 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2104                 .reason = MR_SYSCALL,
2105         };
2106 
2107         err = migrate_pages(pagelist, alloc_migration_target, NULL,
2108                 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2109         if (err)
2110                 putback_movable_pages(pagelist);
2111         return err;
2112 }
2113 
2114 /*
2115  * Resolves the given address to a struct page, isolates it from the LRU and
2116  * puts it to the given pagelist.
2117  * Returns:
2118  *     errno - if the page cannot be found/isolated
2119  *     0 - when it doesn't have to be migrated because it is already on the
2120  *         target node
2121  *     1 - when it has been queued
2122  */
2123 static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2124                 int node, struct list_head *pagelist, bool migrate_all)
2125 {
2126         struct vm_area_struct *vma;
2127         unsigned long addr;
2128         struct page *page;
2129         struct folio *folio;
2130         int err;
2131 
2132         mmap_read_lock(mm);
2133         addr = (unsigned long)untagged_addr_remote(mm, p);
2134 
2135         err = -EFAULT;
2136         vma = vma_lookup(mm, addr);
2137         if (!vma || !vma_migratable(vma))
2138                 goto out;
2139 
2140         /* FOLL_DUMP to ignore special (like zero) pages */
2141         page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2142 
2143         err = PTR_ERR(page);
2144         if (IS_ERR(page))
2145                 goto out;
2146 
2147         err = -ENOENT;
2148         if (!page)
2149                 goto out;
2150 
2151         folio = page_folio(page);
2152         if (folio_is_zone_device(folio))
2153                 goto out_putfolio;
2154 
2155         err = 0;
2156         if (folio_nid(folio) == node)
2157                 goto out_putfolio;
2158 
2159         err = -EACCES;
2160         if (folio_likely_mapped_shared(folio) && !migrate_all)
2161                 goto out_putfolio;
2162 
2163         err = -EBUSY;
2164         if (folio_test_hugetlb(folio)) {
2165                 if (isolate_hugetlb(folio, pagelist))
2166                         err = 1;
2167         } else {
2168                 if (!folio_isolate_lru(folio))
2169                         goto out_putfolio;
2170 
2171                 err = 1;
2172                 list_add_tail(&folio->lru, pagelist);
2173                 node_stat_mod_folio(folio,
2174                         NR_ISOLATED_ANON + folio_is_file_lru(folio),
2175                         folio_nr_pages(folio));
2176         }
2177 out_putfolio:
2178         /*
2179          * Either remove the duplicate refcount from folio_isolate_lru()
2180          * or drop the folio ref if it was not isolated.
2181          */
2182         folio_put(folio);
2183 out:
2184         mmap_read_unlock(mm);
2185         return err;
2186 }
2187 
2188 static int move_pages_and_store_status(int node,
2189                 struct list_head *pagelist, int __user *status,
2190                 int start, int i, unsigned long nr_pages)
2191 {
2192         int err;
2193 
2194         if (list_empty(pagelist))
2195                 return 0;
2196 
2197         err = do_move_pages_to_node(pagelist, node);
2198         if (err) {
2199                 /*
2200                  * Positive err means the number of failed
2201                  * pages to migrate.  Since we are going to
2202                  * abort and return the number of non-migrated
2203                  * pages, so need to include the rest of the
2204                  * nr_pages that have not been attempted as
2205                  * well.
2206                  */
2207                 if (err > 0)
2208                         err += nr_pages - i;
2209                 return err;
2210         }
2211         return store_status(status, start, node, i - start);
2212 }
2213 
2214 /*
2215  * Migrate an array of page address onto an array of nodes and fill
2216  * the corresponding array of status.
2217  */
2218 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2219                          unsigned long nr_pages,
2220                          const void __user * __user *pages,
2221                          const int __user *nodes,
2222                          int __user *status, int flags)
2223 {
2224         compat_uptr_t __user *compat_pages = (void __user *)pages;
2225         int current_node = NUMA_NO_NODE;
2226         LIST_HEAD(pagelist);
2227         int start, i;
2228         int err = 0, err1;
2229 
2230         lru_cache_disable();
2231 
2232         for (i = start = 0; i < nr_pages; i++) {
2233                 const void __user *p;
2234                 int node;
2235 
2236                 err = -EFAULT;
2237                 if (in_compat_syscall()) {
2238                         compat_uptr_t cp;
2239 
2240                         if (get_user(cp, compat_pages + i))
2241                                 goto out_flush;
2242 
2243                         p = compat_ptr(cp);
2244                 } else {
2245                         if (get_user(p, pages + i))
2246                                 goto out_flush;
2247                 }
2248                 if (get_user(node, nodes + i))
2249                         goto out_flush;
2250 
2251                 err = -ENODEV;
2252                 if (node < 0 || node >= MAX_NUMNODES)
2253                         goto out_flush;
2254                 if (!node_state(node, N_MEMORY))
2255                         goto out_flush;
2256 
2257                 err = -EACCES;
2258                 if (!node_isset(node, task_nodes))
2259                         goto out_flush;
2260 
2261                 if (current_node == NUMA_NO_NODE) {
2262                         current_node = node;
2263                         start = i;
2264                 } else if (node != current_node) {
2265                         err = move_pages_and_store_status(current_node,
2266                                         &pagelist, status, start, i, nr_pages);
2267                         if (err)
2268                                 goto out;
2269                         start = i;
2270                         current_node = node;
2271                 }
2272 
2273                 /*
2274                  * Errors in the page lookup or isolation are not fatal and we simply
2275                  * report them via status
2276                  */
2277                 err = add_page_for_migration(mm, p, current_node, &pagelist,
2278                                              flags & MPOL_MF_MOVE_ALL);
2279 
2280                 if (err > 0) {
2281                         /* The page is successfully queued for migration */
2282                         continue;
2283                 }
2284 
2285                 /*
2286                  * The move_pages() man page does not have an -EEXIST choice, so
2287                  * use -EFAULT instead.
2288                  */
2289                 if (err == -EEXIST)
2290                         err = -EFAULT;
2291 
2292                 /*
2293                  * If the page is already on the target node (!err), store the
2294                  * node, otherwise, store the err.
2295                  */
2296                 err = store_status(status, i, err ? : current_node, 1);
2297                 if (err)
2298                         goto out_flush;
2299 
2300                 err = move_pages_and_store_status(current_node, &pagelist,
2301                                 status, start, i, nr_pages);
2302                 if (err) {
2303                         /* We have accounted for page i */
2304                         if (err > 0)
2305                                 err--;
2306                         goto out;
2307                 }
2308                 current_node = NUMA_NO_NODE;
2309         }
2310 out_flush:
2311         /* Make sure we do not overwrite the existing error */
2312         err1 = move_pages_and_store_status(current_node, &pagelist,
2313                                 status, start, i, nr_pages);
2314         if (err >= 0)
2315                 err = err1;
2316 out:
2317         lru_cache_enable();
2318         return err;
2319 }
2320 
2321 /*
2322  * Determine the nodes of an array of pages and store it in an array of status.
2323  */
2324 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2325                                 const void __user **pages, int *status)
2326 {
2327         unsigned long i;
2328 
2329         mmap_read_lock(mm);
2330 
2331         for (i = 0; i < nr_pages; i++) {
2332                 unsigned long addr = (unsigned long)(*pages);
2333                 struct vm_area_struct *vma;
2334                 struct page *page;
2335                 int err = -EFAULT;
2336 
2337                 vma = vma_lookup(mm, addr);
2338                 if (!vma)
2339                         goto set_status;
2340 
2341                 /* FOLL_DUMP to ignore special (like zero) pages */
2342                 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2343 
2344                 err = PTR_ERR(page);
2345                 if (IS_ERR(page))
2346                         goto set_status;
2347 
2348                 err = -ENOENT;
2349                 if (!page)
2350                         goto set_status;
2351 
2352                 if (!is_zone_device_page(page))
2353                         err = page_to_nid(page);
2354 
2355                 put_page(page);
2356 set_status:
2357                 *status = err;
2358 
2359                 pages++;
2360                 status++;
2361         }
2362 
2363         mmap_read_unlock(mm);
2364 }
2365 
2366 static int get_compat_pages_array(const void __user *chunk_pages[],
2367                                   const void __user * __user *pages,
2368                                   unsigned long chunk_nr)
2369 {
2370         compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2371         compat_uptr_t p;
2372         int i;
2373 
2374         for (i = 0; i < chunk_nr; i++) {
2375                 if (get_user(p, pages32 + i))
2376                         return -EFAULT;
2377                 chunk_pages[i] = compat_ptr(p);
2378         }
2379 
2380         return 0;
2381 }
2382 
2383 /*
2384  * Determine the nodes of a user array of pages and store it in
2385  * a user array of status.
2386  */
2387 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2388                          const void __user * __user *pages,
2389                          int __user *status)
2390 {
2391 #define DO_PAGES_STAT_CHUNK_NR 16UL
2392         const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2393         int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2394 
2395         while (nr_pages) {
2396                 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2397 
2398                 if (in_compat_syscall()) {
2399                         if (get_compat_pages_array(chunk_pages, pages,
2400                                                    chunk_nr))
2401                                 break;
2402                 } else {
2403                         if (copy_from_user(chunk_pages, pages,
2404                                       chunk_nr * sizeof(*chunk_pages)))
2405                                 break;
2406                 }
2407 
2408                 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2409 
2410                 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2411                         break;
2412 
2413                 pages += chunk_nr;
2414                 status += chunk_nr;
2415                 nr_pages -= chunk_nr;
2416         }
2417         return nr_pages ? -EFAULT : 0;
2418 }
2419 
2420 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2421 {
2422         struct task_struct *task;
2423         struct mm_struct *mm;
2424 
2425         /*
2426          * There is no need to check if current process has the right to modify
2427          * the specified process when they are same.
2428          */
2429         if (!pid) {
2430                 mmget(current->mm);
2431                 *mem_nodes = cpuset_mems_allowed(current);
2432                 return current->mm;
2433         }
2434 
2435         /* Find the mm_struct */
2436         rcu_read_lock();
2437         task = find_task_by_vpid(pid);
2438         if (!task) {
2439                 rcu_read_unlock();
2440                 return ERR_PTR(-ESRCH);
2441         }
2442         get_task_struct(task);
2443 
2444         /*
2445          * Check if this process has the right to modify the specified
2446          * process. Use the regular "ptrace_may_access()" checks.
2447          */
2448         if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2449                 rcu_read_unlock();
2450                 mm = ERR_PTR(-EPERM);
2451                 goto out;
2452         }
2453         rcu_read_unlock();
2454 
2455         mm = ERR_PTR(security_task_movememory(task));
2456         if (IS_ERR(mm))
2457                 goto out;
2458         *mem_nodes = cpuset_mems_allowed(task);
2459         mm = get_task_mm(task);
2460 out:
2461         put_task_struct(task);
2462         if (!mm)
2463                 mm = ERR_PTR(-EINVAL);
2464         return mm;
2465 }
2466 
2467 /*
2468  * Move a list of pages in the address space of the currently executing
2469  * process.
2470  */
2471 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2472                              const void __user * __user *pages,
2473                              const int __user *nodes,
2474                              int __user *status, int flags)
2475 {
2476         struct mm_struct *mm;
2477         int err;
2478         nodemask_t task_nodes;
2479 
2480         /* Check flags */
2481         if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2482                 return -EINVAL;
2483 
2484         if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2485                 return -EPERM;
2486 
2487         mm = find_mm_struct(pid, &task_nodes);
2488         if (IS_ERR(mm))
2489                 return PTR_ERR(mm);
2490 
2491         if (nodes)
2492                 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2493                                     nodes, status, flags);
2494         else
2495                 err = do_pages_stat(mm, nr_pages, pages, status);
2496 
2497         mmput(mm);
2498         return err;
2499 }
2500 
2501 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2502                 const void __user * __user *, pages,
2503                 const int __user *, nodes,
2504                 int __user *, status, int, flags)
2505 {
2506         return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2507 }
2508 
2509 #ifdef CONFIG_NUMA_BALANCING
2510 /*
2511  * Returns true if this is a safe migration target node for misplaced NUMA
2512  * pages. Currently it only checks the watermarks which is crude.
2513  */
2514 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2515                                    unsigned long nr_migrate_pages)
2516 {
2517         int z;
2518 
2519         for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2520                 struct zone *zone = pgdat->node_zones + z;
2521 
2522                 if (!managed_zone(zone))
2523                         continue;
2524 
2525                 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2526                 if (!zone_watermark_ok(zone, 0,
2527                                        high_wmark_pages(zone) +
2528                                        nr_migrate_pages,
2529                                        ZONE_MOVABLE, 0))
2530                         continue;
2531                 return true;
2532         }
2533         return false;
2534 }
2535 
2536 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2537                                            unsigned long data)
2538 {
2539         int nid = (int) data;
2540         int order = folio_order(src);
2541         gfp_t gfp = __GFP_THISNODE;
2542 
2543         if (order > 0)
2544                 gfp |= GFP_TRANSHUGE_LIGHT;
2545         else {
2546                 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2547                         __GFP_NOWARN;
2548                 gfp &= ~__GFP_RECLAIM;
2549         }
2550         return __folio_alloc_node(gfp, order, nid);
2551 }
2552 
2553 /*
2554  * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2555  * permitted. Must be called with the PTL still held.
2556  */
2557 int migrate_misplaced_folio_prepare(struct folio *folio,
2558                 struct vm_area_struct *vma, int node)
2559 {
2560         int nr_pages = folio_nr_pages(folio);
2561         pg_data_t *pgdat = NODE_DATA(node);
2562 
2563         if (folio_is_file_lru(folio)) {
2564                 /*
2565                  * Do not migrate file folios that are mapped in multiple
2566                  * processes with execute permissions as they are probably
2567                  * shared libraries.
2568                  *
2569                  * See folio_likely_mapped_shared() on possible imprecision
2570                  * when we cannot easily detect if a folio is shared.
2571                  */
2572                 if ((vma->vm_flags & VM_EXEC) &&
2573                     folio_likely_mapped_shared(folio))
2574                         return -EACCES;
2575 
2576                 /*
2577                  * Do not migrate dirty folios as not all filesystems can move
2578                  * dirty folios in MIGRATE_ASYNC mode which is a waste of
2579                  * cycles.
2580                  */
2581                 if (folio_test_dirty(folio))
2582                         return -EAGAIN;
2583         }
2584 
2585         /* Avoid migrating to a node that is nearly full */
2586         if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2587                 int z;
2588 
2589                 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2590                         return -EAGAIN;
2591                 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2592                         if (managed_zone(pgdat->node_zones + z))
2593                                 break;
2594                 }
2595 
2596                 /*
2597                  * If there are no managed zones, it should not proceed
2598                  * further.
2599                  */
2600                 if (z < 0)
2601                         return -EAGAIN;
2602 
2603                 wakeup_kswapd(pgdat->node_zones + z, 0,
2604                               folio_order(folio), ZONE_MOVABLE);
2605                 return -EAGAIN;
2606         }
2607 
2608         if (!folio_isolate_lru(folio))
2609                 return -EAGAIN;
2610 
2611         node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2612                             nr_pages);
2613         return 0;
2614 }
2615 
2616 /*
2617  * Attempt to migrate a misplaced folio to the specified destination
2618  * node. Caller is expected to have isolated the folio by calling
2619  * migrate_misplaced_folio_prepare(), which will result in an
2620  * elevated reference count on the folio. This function will un-isolate the
2621  * folio, dereferencing the folio before returning.
2622  */
2623 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2624                             int node)
2625 {
2626         pg_data_t *pgdat = NODE_DATA(node);
2627         int nr_remaining;
2628         unsigned int nr_succeeded;
2629         LIST_HEAD(migratepages);
2630 
2631         list_add(&folio->lru, &migratepages);
2632         nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2633                                      NULL, node, MIGRATE_ASYNC,
2634                                      MR_NUMA_MISPLACED, &nr_succeeded);
2635         if (nr_remaining && !list_empty(&migratepages))
2636                 putback_movable_pages(&migratepages);
2637         if (nr_succeeded) {
2638                 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2639                 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2640                         mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2641                                             nr_succeeded);
2642         }
2643         BUG_ON(!list_empty(&migratepages));
2644         return nr_remaining ? -EAGAIN : 0;
2645 }
2646 #endif /* CONFIG_NUMA_BALANCING */
2647 #endif /* CONFIG_NUMA */
2648 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php