~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/khugepaged.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  3 
  4 #include <linux/mm.h>
  5 #include <linux/sched.h>
  6 #include <linux/sched/mm.h>
  7 #include <linux/sched/coredump.h>
  8 #include <linux/mmu_notifier.h>
  9 #include <linux/rmap.h>
 10 #include <linux/swap.h>
 11 #include <linux/mm_inline.h>
 12 #include <linux/kthread.h>
 13 #include <linux/khugepaged.h>
 14 #include <linux/freezer.h>
 15 #include <linux/mman.h>
 16 #include <linux/hashtable.h>
 17 #include <linux/userfaultfd_k.h>
 18 #include <linux/page_idle.h>
 19 #include <linux/page_table_check.h>
 20 #include <linux/rcupdate_wait.h>
 21 #include <linux/swapops.h>
 22 #include <linux/shmem_fs.h>
 23 #include <linux/ksm.h>
 24 
 25 #include <asm/tlb.h>
 26 #include <asm/pgalloc.h>
 27 #include "internal.h"
 28 #include "mm_slot.h"
 29 
 30 enum scan_result {
 31         SCAN_FAIL,
 32         SCAN_SUCCEED,
 33         SCAN_PMD_NULL,
 34         SCAN_PMD_NONE,
 35         SCAN_PMD_MAPPED,
 36         SCAN_EXCEED_NONE_PTE,
 37         SCAN_EXCEED_SWAP_PTE,
 38         SCAN_EXCEED_SHARED_PTE,
 39         SCAN_PTE_NON_PRESENT,
 40         SCAN_PTE_UFFD_WP,
 41         SCAN_PTE_MAPPED_HUGEPAGE,
 42         SCAN_PAGE_RO,
 43         SCAN_LACK_REFERENCED_PAGE,
 44         SCAN_PAGE_NULL,
 45         SCAN_SCAN_ABORT,
 46         SCAN_PAGE_COUNT,
 47         SCAN_PAGE_LRU,
 48         SCAN_PAGE_LOCK,
 49         SCAN_PAGE_ANON,
 50         SCAN_PAGE_COMPOUND,
 51         SCAN_ANY_PROCESS,
 52         SCAN_VMA_NULL,
 53         SCAN_VMA_CHECK,
 54         SCAN_ADDRESS_RANGE,
 55         SCAN_DEL_PAGE_LRU,
 56         SCAN_ALLOC_HUGE_PAGE_FAIL,
 57         SCAN_CGROUP_CHARGE_FAIL,
 58         SCAN_TRUNCATED,
 59         SCAN_PAGE_HAS_PRIVATE,
 60         SCAN_STORE_FAILED,
 61         SCAN_COPY_MC,
 62         SCAN_PAGE_FILLED,
 63 };
 64 
 65 #define CREATE_TRACE_POINTS
 66 #include <trace/events/huge_memory.h>
 67 
 68 static struct task_struct *khugepaged_thread __read_mostly;
 69 static DEFINE_MUTEX(khugepaged_mutex);
 70 
 71 /* default scan 8*512 pte (or vmas) every 30 second */
 72 static unsigned int khugepaged_pages_to_scan __read_mostly;
 73 static unsigned int khugepaged_pages_collapsed;
 74 static unsigned int khugepaged_full_scans;
 75 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
 76 /* during fragmentation poll the hugepage allocator once every minute */
 77 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
 78 static unsigned long khugepaged_sleep_expire;
 79 static DEFINE_SPINLOCK(khugepaged_mm_lock);
 80 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
 81 /*
 82  * default collapse hugepages if there is at least one pte mapped like
 83  * it would have happened if the vma was large enough during page
 84  * fault.
 85  *
 86  * Note that these are only respected if collapse was initiated by khugepaged.
 87  */
 88 static unsigned int khugepaged_max_ptes_none __read_mostly;
 89 static unsigned int khugepaged_max_ptes_swap __read_mostly;
 90 static unsigned int khugepaged_max_ptes_shared __read_mostly;
 91 
 92 #define MM_SLOTS_HASH_BITS 10
 93 static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
 94 
 95 static struct kmem_cache *mm_slot_cache __ro_after_init;
 96 
 97 struct collapse_control {
 98         bool is_khugepaged;
 99 
100         /* Num pages scanned per node */
101         u32 node_load[MAX_NUMNODES];
102 
103         /* nodemask for allocation fallback */
104         nodemask_t alloc_nmask;
105 };
106 
107 /**
108  * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
109  * @slot: hash lookup from mm to mm_slot
110  */
111 struct khugepaged_mm_slot {
112         struct mm_slot slot;
113 };
114 
115 /**
116  * struct khugepaged_scan - cursor for scanning
117  * @mm_head: the head of the mm list to scan
118  * @mm_slot: the current mm_slot we are scanning
119  * @address: the next address inside that to be scanned
120  *
121  * There is only the one khugepaged_scan instance of this cursor structure.
122  */
123 struct khugepaged_scan {
124         struct list_head mm_head;
125         struct khugepaged_mm_slot *mm_slot;
126         unsigned long address;
127 };
128 
129 static struct khugepaged_scan khugepaged_scan = {
130         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
131 };
132 
133 #ifdef CONFIG_SYSFS
134 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
135                                          struct kobj_attribute *attr,
136                                          char *buf)
137 {
138         return sysfs_emit(buf, "%u\n", khugepaged_scan_sleep_millisecs);
139 }
140 
141 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
142                                           struct kobj_attribute *attr,
143                                           const char *buf, size_t count)
144 {
145         unsigned int msecs;
146         int err;
147 
148         err = kstrtouint(buf, 10, &msecs);
149         if (err)
150                 return -EINVAL;
151 
152         khugepaged_scan_sleep_millisecs = msecs;
153         khugepaged_sleep_expire = 0;
154         wake_up_interruptible(&khugepaged_wait);
155 
156         return count;
157 }
158 static struct kobj_attribute scan_sleep_millisecs_attr =
159         __ATTR_RW(scan_sleep_millisecs);
160 
161 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
162                                           struct kobj_attribute *attr,
163                                           char *buf)
164 {
165         return sysfs_emit(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
166 }
167 
168 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
169                                            struct kobj_attribute *attr,
170                                            const char *buf, size_t count)
171 {
172         unsigned int msecs;
173         int err;
174 
175         err = kstrtouint(buf, 10, &msecs);
176         if (err)
177                 return -EINVAL;
178 
179         khugepaged_alloc_sleep_millisecs = msecs;
180         khugepaged_sleep_expire = 0;
181         wake_up_interruptible(&khugepaged_wait);
182 
183         return count;
184 }
185 static struct kobj_attribute alloc_sleep_millisecs_attr =
186         __ATTR_RW(alloc_sleep_millisecs);
187 
188 static ssize_t pages_to_scan_show(struct kobject *kobj,
189                                   struct kobj_attribute *attr,
190                                   char *buf)
191 {
192         return sysfs_emit(buf, "%u\n", khugepaged_pages_to_scan);
193 }
194 static ssize_t pages_to_scan_store(struct kobject *kobj,
195                                    struct kobj_attribute *attr,
196                                    const char *buf, size_t count)
197 {
198         unsigned int pages;
199         int err;
200 
201         err = kstrtouint(buf, 10, &pages);
202         if (err || !pages)
203                 return -EINVAL;
204 
205         khugepaged_pages_to_scan = pages;
206 
207         return count;
208 }
209 static struct kobj_attribute pages_to_scan_attr =
210         __ATTR_RW(pages_to_scan);
211 
212 static ssize_t pages_collapsed_show(struct kobject *kobj,
213                                     struct kobj_attribute *attr,
214                                     char *buf)
215 {
216         return sysfs_emit(buf, "%u\n", khugepaged_pages_collapsed);
217 }
218 static struct kobj_attribute pages_collapsed_attr =
219         __ATTR_RO(pages_collapsed);
220 
221 static ssize_t full_scans_show(struct kobject *kobj,
222                                struct kobj_attribute *attr,
223                                char *buf)
224 {
225         return sysfs_emit(buf, "%u\n", khugepaged_full_scans);
226 }
227 static struct kobj_attribute full_scans_attr =
228         __ATTR_RO(full_scans);
229 
230 static ssize_t defrag_show(struct kobject *kobj,
231                            struct kobj_attribute *attr, char *buf)
232 {
233         return single_hugepage_flag_show(kobj, attr, buf,
234                                          TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
235 }
236 static ssize_t defrag_store(struct kobject *kobj,
237                             struct kobj_attribute *attr,
238                             const char *buf, size_t count)
239 {
240         return single_hugepage_flag_store(kobj, attr, buf, count,
241                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
242 }
243 static struct kobj_attribute khugepaged_defrag_attr =
244         __ATTR_RW(defrag);
245 
246 /*
247  * max_ptes_none controls if khugepaged should collapse hugepages over
248  * any unmapped ptes in turn potentially increasing the memory
249  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
250  * reduce the available free memory in the system as it
251  * runs. Increasing max_ptes_none will instead potentially reduce the
252  * free memory in the system during the khugepaged scan.
253  */
254 static ssize_t max_ptes_none_show(struct kobject *kobj,
255                                   struct kobj_attribute *attr,
256                                   char *buf)
257 {
258         return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_none);
259 }
260 static ssize_t max_ptes_none_store(struct kobject *kobj,
261                                    struct kobj_attribute *attr,
262                                    const char *buf, size_t count)
263 {
264         int err;
265         unsigned long max_ptes_none;
266 
267         err = kstrtoul(buf, 10, &max_ptes_none);
268         if (err || max_ptes_none > HPAGE_PMD_NR - 1)
269                 return -EINVAL;
270 
271         khugepaged_max_ptes_none = max_ptes_none;
272 
273         return count;
274 }
275 static struct kobj_attribute khugepaged_max_ptes_none_attr =
276         __ATTR_RW(max_ptes_none);
277 
278 static ssize_t max_ptes_swap_show(struct kobject *kobj,
279                                   struct kobj_attribute *attr,
280                                   char *buf)
281 {
282         return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_swap);
283 }
284 
285 static ssize_t max_ptes_swap_store(struct kobject *kobj,
286                                    struct kobj_attribute *attr,
287                                    const char *buf, size_t count)
288 {
289         int err;
290         unsigned long max_ptes_swap;
291 
292         err  = kstrtoul(buf, 10, &max_ptes_swap);
293         if (err || max_ptes_swap > HPAGE_PMD_NR - 1)
294                 return -EINVAL;
295 
296         khugepaged_max_ptes_swap = max_ptes_swap;
297 
298         return count;
299 }
300 
301 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
302         __ATTR_RW(max_ptes_swap);
303 
304 static ssize_t max_ptes_shared_show(struct kobject *kobj,
305                                     struct kobj_attribute *attr,
306                                     char *buf)
307 {
308         return sysfs_emit(buf, "%u\n", khugepaged_max_ptes_shared);
309 }
310 
311 static ssize_t max_ptes_shared_store(struct kobject *kobj,
312                                      struct kobj_attribute *attr,
313                                      const char *buf, size_t count)
314 {
315         int err;
316         unsigned long max_ptes_shared;
317 
318         err  = kstrtoul(buf, 10, &max_ptes_shared);
319         if (err || max_ptes_shared > HPAGE_PMD_NR - 1)
320                 return -EINVAL;
321 
322         khugepaged_max_ptes_shared = max_ptes_shared;
323 
324         return count;
325 }
326 
327 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
328         __ATTR_RW(max_ptes_shared);
329 
330 static struct attribute *khugepaged_attr[] = {
331         &khugepaged_defrag_attr.attr,
332         &khugepaged_max_ptes_none_attr.attr,
333         &khugepaged_max_ptes_swap_attr.attr,
334         &khugepaged_max_ptes_shared_attr.attr,
335         &pages_to_scan_attr.attr,
336         &pages_collapsed_attr.attr,
337         &full_scans_attr.attr,
338         &scan_sleep_millisecs_attr.attr,
339         &alloc_sleep_millisecs_attr.attr,
340         NULL,
341 };
342 
343 struct attribute_group khugepaged_attr_group = {
344         .attrs = khugepaged_attr,
345         .name = "khugepaged",
346 };
347 #endif /* CONFIG_SYSFS */
348 
349 int hugepage_madvise(struct vm_area_struct *vma,
350                      unsigned long *vm_flags, int advice)
351 {
352         switch (advice) {
353         case MADV_HUGEPAGE:
354 #ifdef CONFIG_S390
355                 /*
356                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
357                  * can't handle this properly after s390_enable_sie, so we simply
358                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
359                  */
360                 if (mm_has_pgste(vma->vm_mm))
361                         return 0;
362 #endif
363                 *vm_flags &= ~VM_NOHUGEPAGE;
364                 *vm_flags |= VM_HUGEPAGE;
365                 /*
366                  * If the vma become good for khugepaged to scan,
367                  * register it here without waiting a page fault that
368                  * may not happen any time soon.
369                  */
370                 khugepaged_enter_vma(vma, *vm_flags);
371                 break;
372         case MADV_NOHUGEPAGE:
373                 *vm_flags &= ~VM_HUGEPAGE;
374                 *vm_flags |= VM_NOHUGEPAGE;
375                 /*
376                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
377                  * this vma even if we leave the mm registered in khugepaged if
378                  * it got registered before VM_NOHUGEPAGE was set.
379                  */
380                 break;
381         }
382 
383         return 0;
384 }
385 
386 int __init khugepaged_init(void)
387 {
388         mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
389         if (!mm_slot_cache)
390                 return -ENOMEM;
391 
392         khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393         khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394         khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
395         khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
396 
397         return 0;
398 }
399 
400 void __init khugepaged_destroy(void)
401 {
402         kmem_cache_destroy(mm_slot_cache);
403 }
404 
405 static inline int hpage_collapse_test_exit(struct mm_struct *mm)
406 {
407         return atomic_read(&mm->mm_users) == 0;
408 }
409 
410 static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
411 {
412         return hpage_collapse_test_exit(mm) ||
413                test_bit(MMF_DISABLE_THP, &mm->flags);
414 }
415 
416 static bool hugepage_pmd_enabled(void)
417 {
418         /*
419          * We cover both the anon and the file-backed case here; file-backed
420          * hugepages, when configured in, are determined by the global control.
421          * Anon pmd-sized hugepages are determined by the pmd-size control.
422          */
423         if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
424             hugepage_global_enabled())
425                 return true;
426         if (test_bit(PMD_ORDER, &huge_anon_orders_always))
427                 return true;
428         if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
429                 return true;
430         if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
431             hugepage_global_enabled())
432                 return true;
433         return false;
434 }
435 
436 void __khugepaged_enter(struct mm_struct *mm)
437 {
438         struct khugepaged_mm_slot *mm_slot;
439         struct mm_slot *slot;
440         int wakeup;
441 
442         /* __khugepaged_exit() must not run from under us */
443         VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
444         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags)))
445                 return;
446 
447         mm_slot = mm_slot_alloc(mm_slot_cache);
448         if (!mm_slot)
449                 return;
450 
451         slot = &mm_slot->slot;
452 
453         spin_lock(&khugepaged_mm_lock);
454         mm_slot_insert(mm_slots_hash, mm, slot);
455         /*
456          * Insert just behind the scanning cursor, to let the area settle
457          * down a little.
458          */
459         wakeup = list_empty(&khugepaged_scan.mm_head);
460         list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
461         spin_unlock(&khugepaged_mm_lock);
462 
463         mmgrab(mm);
464         if (wakeup)
465                 wake_up_interruptible(&khugepaged_wait);
466 }
467 
468 void khugepaged_enter_vma(struct vm_area_struct *vma,
469                           unsigned long vm_flags)
470 {
471         if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
472             hugepage_pmd_enabled()) {
473                 if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
474                                             PMD_ORDER))
475                         __khugepaged_enter(vma->vm_mm);
476         }
477 }
478 
479 void __khugepaged_exit(struct mm_struct *mm)
480 {
481         struct khugepaged_mm_slot *mm_slot;
482         struct mm_slot *slot;
483         int free = 0;
484 
485         spin_lock(&khugepaged_mm_lock);
486         slot = mm_slot_lookup(mm_slots_hash, mm);
487         mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
488         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
489                 hash_del(&slot->hash);
490                 list_del(&slot->mm_node);
491                 free = 1;
492         }
493         spin_unlock(&khugepaged_mm_lock);
494 
495         if (free) {
496                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
497                 mm_slot_free(mm_slot_cache, mm_slot);
498                 mmdrop(mm);
499         } else if (mm_slot) {
500                 /*
501                  * This is required to serialize against
502                  * hpage_collapse_test_exit() (which is guaranteed to run
503                  * under mmap sem read mode). Stop here (after we return all
504                  * pagetables will be destroyed) until khugepaged has finished
505                  * working on the pagetables under the mmap_lock.
506                  */
507                 mmap_write_lock(mm);
508                 mmap_write_unlock(mm);
509         }
510 }
511 
512 static void release_pte_folio(struct folio *folio)
513 {
514         node_stat_mod_folio(folio,
515                         NR_ISOLATED_ANON + folio_is_file_lru(folio),
516                         -folio_nr_pages(folio));
517         folio_unlock(folio);
518         folio_putback_lru(folio);
519 }
520 
521 static void release_pte_pages(pte_t *pte, pte_t *_pte,
522                 struct list_head *compound_pagelist)
523 {
524         struct folio *folio, *tmp;
525 
526         while (--_pte >= pte) {
527                 pte_t pteval = ptep_get(_pte);
528                 unsigned long pfn;
529 
530                 if (pte_none(pteval))
531                         continue;
532                 pfn = pte_pfn(pteval);
533                 if (is_zero_pfn(pfn))
534                         continue;
535                 folio = pfn_folio(pfn);
536                 if (folio_test_large(folio))
537                         continue;
538                 release_pte_folio(folio);
539         }
540 
541         list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) {
542                 list_del(&folio->lru);
543                 release_pte_folio(folio);
544         }
545 }
546 
547 static bool is_refcount_suitable(struct folio *folio)
548 {
549         int expected_refcount;
550 
551         expected_refcount = folio_mapcount(folio);
552         if (folio_test_swapcache(folio))
553                 expected_refcount += folio_nr_pages(folio);
554 
555         return folio_ref_count(folio) == expected_refcount;
556 }
557 
558 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
559                                         unsigned long address,
560                                         pte_t *pte,
561                                         struct collapse_control *cc,
562                                         struct list_head *compound_pagelist)
563 {
564         struct page *page = NULL;
565         struct folio *folio = NULL;
566         pte_t *_pte;
567         int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
568         bool writable = false;
569 
570         for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
571              _pte++, address += PAGE_SIZE) {
572                 pte_t pteval = ptep_get(_pte);
573                 if (pte_none(pteval) || (pte_present(pteval) &&
574                                 is_zero_pfn(pte_pfn(pteval)))) {
575                         ++none_or_zero;
576                         if (!userfaultfd_armed(vma) &&
577                             (!cc->is_khugepaged ||
578                              none_or_zero <= khugepaged_max_ptes_none)) {
579                                 continue;
580                         } else {
581                                 result = SCAN_EXCEED_NONE_PTE;
582                                 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
583                                 goto out;
584                         }
585                 }
586                 if (!pte_present(pteval)) {
587                         result = SCAN_PTE_NON_PRESENT;
588                         goto out;
589                 }
590                 if (pte_uffd_wp(pteval)) {
591                         result = SCAN_PTE_UFFD_WP;
592                         goto out;
593                 }
594                 page = vm_normal_page(vma, address, pteval);
595                 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
596                         result = SCAN_PAGE_NULL;
597                         goto out;
598                 }
599 
600                 folio = page_folio(page);
601                 VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
602 
603                 /* See hpage_collapse_scan_pmd(). */
604                 if (folio_likely_mapped_shared(folio)) {
605                         ++shared;
606                         if (cc->is_khugepaged &&
607                             shared > khugepaged_max_ptes_shared) {
608                                 result = SCAN_EXCEED_SHARED_PTE;
609                                 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
610                                 goto out;
611                         }
612                 }
613 
614                 if (folio_test_large(folio)) {
615                         struct folio *f;
616 
617                         /*
618                          * Check if we have dealt with the compound page
619                          * already
620                          */
621                         list_for_each_entry(f, compound_pagelist, lru) {
622                                 if (folio == f)
623                                         goto next;
624                         }
625                 }
626 
627                 /*
628                  * We can do it before isolate_lru_page because the
629                  * page can't be freed from under us. NOTE: PG_lock
630                  * is needed to serialize against split_huge_page
631                  * when invoked from the VM.
632                  */
633                 if (!folio_trylock(folio)) {
634                         result = SCAN_PAGE_LOCK;
635                         goto out;
636                 }
637 
638                 /*
639                  * Check if the page has any GUP (or other external) pins.
640                  *
641                  * The page table that maps the page has been already unlinked
642                  * from the page table tree and this process cannot get
643                  * an additional pin on the page.
644                  *
645                  * New pins can come later if the page is shared across fork,
646                  * but not from this process. The other process cannot write to
647                  * the page, only trigger CoW.
648                  */
649                 if (!is_refcount_suitable(folio)) {
650                         folio_unlock(folio);
651                         result = SCAN_PAGE_COUNT;
652                         goto out;
653                 }
654 
655                 /*
656                  * Isolate the page to avoid collapsing an hugepage
657                  * currently in use by the VM.
658                  */
659                 if (!folio_isolate_lru(folio)) {
660                         folio_unlock(folio);
661                         result = SCAN_DEL_PAGE_LRU;
662                         goto out;
663                 }
664                 node_stat_mod_folio(folio,
665                                 NR_ISOLATED_ANON + folio_is_file_lru(folio),
666                                 folio_nr_pages(folio));
667                 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
668                 VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
669 
670                 if (folio_test_large(folio))
671                         list_add_tail(&folio->lru, compound_pagelist);
672 next:
673                 /*
674                  * If collapse was initiated by khugepaged, check that there is
675                  * enough young pte to justify collapsing the page
676                  */
677                 if (cc->is_khugepaged &&
678                     (pte_young(pteval) || folio_test_young(folio) ||
679                      folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
680                                                                      address)))
681                         referenced++;
682 
683                 if (pte_write(pteval))
684                         writable = true;
685         }
686 
687         if (unlikely(!writable)) {
688                 result = SCAN_PAGE_RO;
689         } else if (unlikely(cc->is_khugepaged && !referenced)) {
690                 result = SCAN_LACK_REFERENCED_PAGE;
691         } else {
692                 result = SCAN_SUCCEED;
693                 trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
694                                                     referenced, writable, result);
695                 return result;
696         }
697 out:
698         release_pte_pages(pte, _pte, compound_pagelist);
699         trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
700                                             referenced, writable, result);
701         return result;
702 }
703 
704 static void __collapse_huge_page_copy_succeeded(pte_t *pte,
705                                                 struct vm_area_struct *vma,
706                                                 unsigned long address,
707                                                 spinlock_t *ptl,
708                                                 struct list_head *compound_pagelist)
709 {
710         struct folio *src, *tmp;
711         pte_t *_pte;
712         pte_t pteval;
713 
714         for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
715              _pte++, address += PAGE_SIZE) {
716                 pteval = ptep_get(_pte);
717                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
718                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
719                         if (is_zero_pfn(pte_pfn(pteval))) {
720                                 /*
721                                  * ptl mostly unnecessary.
722                                  */
723                                 spin_lock(ptl);
724                                 ptep_clear(vma->vm_mm, address, _pte);
725                                 spin_unlock(ptl);
726                                 ksm_might_unmap_zero_page(vma->vm_mm, pteval);
727                         }
728                 } else {
729                         struct page *src_page = pte_page(pteval);
730 
731                         src = page_folio(src_page);
732                         if (!folio_test_large(src))
733                                 release_pte_folio(src);
734                         /*
735                          * ptl mostly unnecessary, but preempt has to
736                          * be disabled to update the per-cpu stats
737                          * inside folio_remove_rmap_pte().
738                          */
739                         spin_lock(ptl);
740                         ptep_clear(vma->vm_mm, address, _pte);
741                         folio_remove_rmap_pte(src, src_page, vma);
742                         spin_unlock(ptl);
743                         free_page_and_swap_cache(src_page);
744                 }
745         }
746 
747         list_for_each_entry_safe(src, tmp, compound_pagelist, lru) {
748                 list_del(&src->lru);
749                 node_stat_sub_folio(src, NR_ISOLATED_ANON +
750                                 folio_is_file_lru(src));
751                 folio_unlock(src);
752                 free_swap_cache(src);
753                 folio_putback_lru(src);
754         }
755 }
756 
757 static void __collapse_huge_page_copy_failed(pte_t *pte,
758                                              pmd_t *pmd,
759                                              pmd_t orig_pmd,
760                                              struct vm_area_struct *vma,
761                                              struct list_head *compound_pagelist)
762 {
763         spinlock_t *pmd_ptl;
764 
765         /*
766          * Re-establish the PMD to point to the original page table
767          * entry. Restoring PMD needs to be done prior to releasing
768          * pages. Since pages are still isolated and locked here,
769          * acquiring anon_vma_lock_write is unnecessary.
770          */
771         pmd_ptl = pmd_lock(vma->vm_mm, pmd);
772         pmd_populate(vma->vm_mm, pmd, pmd_pgtable(orig_pmd));
773         spin_unlock(pmd_ptl);
774         /*
775          * Release both raw and compound pages isolated
776          * in __collapse_huge_page_isolate.
777          */
778         release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist);
779 }
780 
781 /*
782  * __collapse_huge_page_copy - attempts to copy memory contents from raw
783  * pages to a hugepage. Cleans up the raw pages if copying succeeds;
784  * otherwise restores the original page table and releases isolated raw pages.
785  * Returns SCAN_SUCCEED if copying succeeds, otherwise returns SCAN_COPY_MC.
786  *
787  * @pte: starting of the PTEs to copy from
788  * @folio: the new hugepage to copy contents to
789  * @pmd: pointer to the new hugepage's PMD
790  * @orig_pmd: the original raw pages' PMD
791  * @vma: the original raw pages' virtual memory area
792  * @address: starting address to copy
793  * @ptl: lock on raw pages' PTEs
794  * @compound_pagelist: list that stores compound pages
795  */
796 static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
797                 pmd_t *pmd, pmd_t orig_pmd, struct vm_area_struct *vma,
798                 unsigned long address, spinlock_t *ptl,
799                 struct list_head *compound_pagelist)
800 {
801         unsigned int i;
802         int result = SCAN_SUCCEED;
803 
804         /*
805          * Copying pages' contents is subject to memory poison at any iteration.
806          */
807         for (i = 0; i < HPAGE_PMD_NR; i++) {
808                 pte_t pteval = ptep_get(pte + i);
809                 struct page *page = folio_page(folio, i);
810                 unsigned long src_addr = address + i * PAGE_SIZE;
811                 struct page *src_page;
812 
813                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
814                         clear_user_highpage(page, src_addr);
815                         continue;
816                 }
817                 src_page = pte_page(pteval);
818                 if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
819                         result = SCAN_COPY_MC;
820                         break;
821                 }
822         }
823 
824         if (likely(result == SCAN_SUCCEED))
825                 __collapse_huge_page_copy_succeeded(pte, vma, address, ptl,
826                                                     compound_pagelist);
827         else
828                 __collapse_huge_page_copy_failed(pte, pmd, orig_pmd, vma,
829                                                  compound_pagelist);
830 
831         return result;
832 }
833 
834 static void khugepaged_alloc_sleep(void)
835 {
836         DEFINE_WAIT(wait);
837 
838         add_wait_queue(&khugepaged_wait, &wait);
839         __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
840         schedule_timeout(msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
841         remove_wait_queue(&khugepaged_wait, &wait);
842 }
843 
844 struct collapse_control khugepaged_collapse_control = {
845         .is_khugepaged = true,
846 };
847 
848 static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
849 {
850         int i;
851 
852         /*
853          * If node_reclaim_mode is disabled, then no extra effort is made to
854          * allocate memory locally.
855          */
856         if (!node_reclaim_enabled())
857                 return false;
858 
859         /* If there is a count for this node already, it must be acceptable */
860         if (cc->node_load[nid])
861                 return false;
862 
863         for (i = 0; i < MAX_NUMNODES; i++) {
864                 if (!cc->node_load[i])
865                         continue;
866                 if (node_distance(nid, i) > node_reclaim_distance)
867                         return true;
868         }
869         return false;
870 }
871 
872 #define khugepaged_defrag()                                     \
873         (transparent_hugepage_flags &                           \
874          (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
875 
876 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
877 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
878 {
879         return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
880 }
881 
882 #ifdef CONFIG_NUMA
883 static int hpage_collapse_find_target_node(struct collapse_control *cc)
884 {
885         int nid, target_node = 0, max_value = 0;
886 
887         /* find first node with max normal pages hit */
888         for (nid = 0; nid < MAX_NUMNODES; nid++)
889                 if (cc->node_load[nid] > max_value) {
890                         max_value = cc->node_load[nid];
891                         target_node = nid;
892                 }
893 
894         for_each_online_node(nid) {
895                 if (max_value == cc->node_load[nid])
896                         node_set(nid, cc->alloc_nmask);
897         }
898 
899         return target_node;
900 }
901 #else
902 static int hpage_collapse_find_target_node(struct collapse_control *cc)
903 {
904         return 0;
905 }
906 #endif
907 
908 /*
909  * If mmap_lock temporarily dropped, revalidate vma
910  * before taking mmap_lock.
911  * Returns enum scan_result value.
912  */
913 
914 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
915                                    bool expect_anon,
916                                    struct vm_area_struct **vmap,
917                                    struct collapse_control *cc)
918 {
919         struct vm_area_struct *vma;
920         unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
921 
922         if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
923                 return SCAN_ANY_PROCESS;
924 
925         *vmap = vma = find_vma(mm, address);
926         if (!vma)
927                 return SCAN_VMA_NULL;
928 
929         if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
930                 return SCAN_ADDRESS_RANGE;
931         if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
932                 return SCAN_VMA_CHECK;
933         /*
934          * Anon VMA expected, the address may be unmapped then
935          * remapped to file after khugepaged reaquired the mmap_lock.
936          *
937          * thp_vma_allowable_order may return true for qualified file
938          * vmas.
939          */
940         if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
941                 return SCAN_PAGE_ANON;
942         return SCAN_SUCCEED;
943 }
944 
945 static int find_pmd_or_thp_or_none(struct mm_struct *mm,
946                                    unsigned long address,
947                                    pmd_t **pmd)
948 {
949         pmd_t pmde;
950 
951         *pmd = mm_find_pmd(mm, address);
952         if (!*pmd)
953                 return SCAN_PMD_NULL;
954 
955         pmde = pmdp_get_lockless(*pmd);
956         if (pmd_none(pmde))
957                 return SCAN_PMD_NONE;
958         if (!pmd_present(pmde))
959                 return SCAN_PMD_NULL;
960         if (pmd_trans_huge(pmde))
961                 return SCAN_PMD_MAPPED;
962         if (pmd_devmap(pmde))
963                 return SCAN_PMD_NULL;
964         if (pmd_bad(pmde))
965                 return SCAN_PMD_NULL;
966         return SCAN_SUCCEED;
967 }
968 
969 static int check_pmd_still_valid(struct mm_struct *mm,
970                                  unsigned long address,
971                                  pmd_t *pmd)
972 {
973         pmd_t *new_pmd;
974         int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
975 
976         if (result != SCAN_SUCCEED)
977                 return result;
978         if (new_pmd != pmd)
979                 return SCAN_FAIL;
980         return SCAN_SUCCEED;
981 }
982 
983 /*
984  * Bring missing pages in from swap, to complete THP collapse.
985  * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
986  *
987  * Called and returns without pte mapped or spinlocks held.
988  * Returns result: if not SCAN_SUCCEED, mmap_lock has been released.
989  */
990 static int __collapse_huge_page_swapin(struct mm_struct *mm,
991                                        struct vm_area_struct *vma,
992                                        unsigned long haddr, pmd_t *pmd,
993                                        int referenced)
994 {
995         int swapped_in = 0;
996         vm_fault_t ret = 0;
997         unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE);
998         int result;
999         pte_t *pte = NULL;
1000         spinlock_t *ptl;
1001 
1002         for (address = haddr; address < end; address += PAGE_SIZE) {
1003                 struct vm_fault vmf = {
1004                         .vma = vma,
1005                         .address = address,
1006                         .pgoff = linear_page_index(vma, address),
1007                         .flags = FAULT_FLAG_ALLOW_RETRY,
1008                         .pmd = pmd,
1009                 };
1010 
1011                 if (!pte++) {
1012                         pte = pte_offset_map_nolock(mm, pmd, address, &ptl);
1013                         if (!pte) {
1014                                 mmap_read_unlock(mm);
1015                                 result = SCAN_PMD_NULL;
1016                                 goto out;
1017                         }
1018                 }
1019 
1020                 vmf.orig_pte = ptep_get_lockless(pte);
1021                 if (!is_swap_pte(vmf.orig_pte))
1022                         continue;
1023 
1024                 vmf.pte = pte;
1025                 vmf.ptl = ptl;
1026                 ret = do_swap_page(&vmf);
1027                 /* Which unmaps pte (after perhaps re-checking the entry) */
1028                 pte = NULL;
1029 
1030                 /*
1031                  * do_swap_page returns VM_FAULT_RETRY with released mmap_lock.
1032                  * Note we treat VM_FAULT_RETRY as VM_FAULT_ERROR here because
1033                  * we do not retry here and swap entry will remain in pagetable
1034                  * resulting in later failure.
1035                  */
1036                 if (ret & VM_FAULT_RETRY) {
1037                         /* Likely, but not guaranteed, that page lock failed */
1038                         result = SCAN_PAGE_LOCK;
1039                         goto out;
1040                 }
1041                 if (ret & VM_FAULT_ERROR) {
1042                         mmap_read_unlock(mm);
1043                         result = SCAN_FAIL;
1044                         goto out;
1045                 }
1046                 swapped_in++;
1047         }
1048 
1049         if (pte)
1050                 pte_unmap(pte);
1051 
1052         /* Drain LRU cache to remove extra pin on the swapped in pages */
1053         if (swapped_in)
1054                 lru_add_drain();
1055 
1056         result = SCAN_SUCCEED;
1057 out:
1058         trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result);
1059         return result;
1060 }
1061 
1062 static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
1063                               struct collapse_control *cc)
1064 {
1065         gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
1066                      GFP_TRANSHUGE);
1067         int node = hpage_collapse_find_target_node(cc);
1068         struct folio *folio;
1069 
1070         folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
1071         if (!folio) {
1072                 *foliop = NULL;
1073                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1074                 return SCAN_ALLOC_HUGE_PAGE_FAIL;
1075         }
1076 
1077         count_vm_event(THP_COLLAPSE_ALLOC);
1078         if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
1079                 folio_put(folio);
1080                 *foliop = NULL;
1081                 return SCAN_CGROUP_CHARGE_FAIL;
1082         }
1083 
1084         count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
1085 
1086         *foliop = folio;
1087         return SCAN_SUCCEED;
1088 }
1089 
1090 static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
1091                               int referenced, int unmapped,
1092                               struct collapse_control *cc)
1093 {
1094         LIST_HEAD(compound_pagelist);
1095         pmd_t *pmd, _pmd;
1096         pte_t *pte;
1097         pgtable_t pgtable;
1098         struct folio *folio;
1099         spinlock_t *pmd_ptl, *pte_ptl;
1100         int result = SCAN_FAIL;
1101         struct vm_area_struct *vma;
1102         struct mmu_notifier_range range;
1103 
1104         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1105 
1106         /*
1107          * Before allocating the hugepage, release the mmap_lock read lock.
1108          * The allocation can take potentially a long time if it involves
1109          * sync compaction, and we do not need to hold the mmap_lock during
1110          * that. We will recheck the vma after taking it again in write mode.
1111          */
1112         mmap_read_unlock(mm);
1113 
1114         result = alloc_charge_folio(&folio, mm, cc);
1115         if (result != SCAN_SUCCEED)
1116                 goto out_nolock;
1117 
1118         mmap_read_lock(mm);
1119         result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1120         if (result != SCAN_SUCCEED) {
1121                 mmap_read_unlock(mm);
1122                 goto out_nolock;
1123         }
1124 
1125         result = find_pmd_or_thp_or_none(mm, address, &pmd);
1126         if (result != SCAN_SUCCEED) {
1127                 mmap_read_unlock(mm);
1128                 goto out_nolock;
1129         }
1130 
1131         if (unmapped) {
1132                 /*
1133                  * __collapse_huge_page_swapin will return with mmap_lock
1134                  * released when it fails. So we jump out_nolock directly in
1135                  * that case.  Continuing to collapse causes inconsistency.
1136                  */
1137                 result = __collapse_huge_page_swapin(mm, vma, address, pmd,
1138                                                      referenced);
1139                 if (result != SCAN_SUCCEED)
1140                         goto out_nolock;
1141         }
1142 
1143         mmap_read_unlock(mm);
1144         /*
1145          * Prevent all access to pagetables with the exception of
1146          * gup_fast later handled by the ptep_clear_flush and the VM
1147          * handled by the anon_vma lock + PG_lock.
1148          *
1149          * UFFDIO_MOVE is prevented to race as well thanks to the
1150          * mmap_lock.
1151          */
1152         mmap_write_lock(mm);
1153         result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
1154         if (result != SCAN_SUCCEED)
1155                 goto out_up_write;
1156         /* check if the pmd is still valid */
1157         result = check_pmd_still_valid(mm, address, pmd);
1158         if (result != SCAN_SUCCEED)
1159                 goto out_up_write;
1160 
1161         vma_start_write(vma);
1162         anon_vma_lock_write(vma->anon_vma);
1163 
1164         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
1165                                 address + HPAGE_PMD_SIZE);
1166         mmu_notifier_invalidate_range_start(&range);
1167 
1168         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1169         /*
1170          * This removes any huge TLB entry from the CPU so we won't allow
1171          * huge and small TLB entries for the same virtual address to
1172          * avoid the risk of CPU bugs in that area.
1173          *
1174          * Parallel GUP-fast is fine since GUP-fast will back off when
1175          * it detects PMD is changed.
1176          */
1177         _pmd = pmdp_collapse_flush(vma, address, pmd);
1178         spin_unlock(pmd_ptl);
1179         mmu_notifier_invalidate_range_end(&range);
1180         tlb_remove_table_sync_one();
1181 
1182         pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
1183         if (pte) {
1184                 result = __collapse_huge_page_isolate(vma, address, pte, cc,
1185                                                       &compound_pagelist);
1186                 spin_unlock(pte_ptl);
1187         } else {
1188                 result = SCAN_PMD_NULL;
1189         }
1190 
1191         if (unlikely(result != SCAN_SUCCEED)) {
1192                 if (pte)
1193                         pte_unmap(pte);
1194                 spin_lock(pmd_ptl);
1195                 BUG_ON(!pmd_none(*pmd));
1196                 /*
1197                  * We can only use set_pmd_at when establishing
1198                  * hugepmds and never for establishing regular pmds that
1199                  * points to regular pagetables. Use pmd_populate for that
1200                  */
1201                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1202                 spin_unlock(pmd_ptl);
1203                 anon_vma_unlock_write(vma->anon_vma);
1204                 goto out_up_write;
1205         }
1206 
1207         /*
1208          * All pages are isolated and locked so anon_vma rmap
1209          * can't run anymore.
1210          */
1211         anon_vma_unlock_write(vma->anon_vma);
1212 
1213         result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
1214                                            vma, address, pte_ptl,
1215                                            &compound_pagelist);
1216         pte_unmap(pte);
1217         if (unlikely(result != SCAN_SUCCEED))
1218                 goto out_up_write;
1219 
1220         /*
1221          * The smp_wmb() inside __folio_mark_uptodate() ensures the
1222          * copy_huge_page writes become visible before the set_pmd_at()
1223          * write.
1224          */
1225         __folio_mark_uptodate(folio);
1226         pgtable = pmd_pgtable(_pmd);
1227 
1228         _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
1229         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1230 
1231         spin_lock(pmd_ptl);
1232         BUG_ON(!pmd_none(*pmd));
1233         folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
1234         folio_add_lru_vma(folio, vma);
1235         pgtable_trans_huge_deposit(mm, pmd, pgtable);
1236         set_pmd_at(mm, address, pmd, _pmd);
1237         update_mmu_cache_pmd(vma, address, pmd);
1238         spin_unlock(pmd_ptl);
1239 
1240         folio = NULL;
1241 
1242         result = SCAN_SUCCEED;
1243 out_up_write:
1244         mmap_write_unlock(mm);
1245 out_nolock:
1246         if (folio)
1247                 folio_put(folio);
1248         trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
1249         return result;
1250 }
1251 
1252 static int hpage_collapse_scan_pmd(struct mm_struct *mm,
1253                                    struct vm_area_struct *vma,
1254                                    unsigned long address, bool *mmap_locked,
1255                                    struct collapse_control *cc)
1256 {
1257         pmd_t *pmd;
1258         pte_t *pte, *_pte;
1259         int result = SCAN_FAIL, referenced = 0;
1260         int none_or_zero = 0, shared = 0;
1261         struct page *page = NULL;
1262         struct folio *folio = NULL;
1263         unsigned long _address;
1264         spinlock_t *ptl;
1265         int node = NUMA_NO_NODE, unmapped = 0;
1266         bool writable = false;
1267 
1268         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1269 
1270         result = find_pmd_or_thp_or_none(mm, address, &pmd);
1271         if (result != SCAN_SUCCEED)
1272                 goto out;
1273 
1274         memset(cc->node_load, 0, sizeof(cc->node_load));
1275         nodes_clear(cc->alloc_nmask);
1276         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1277         if (!pte) {
1278                 result = SCAN_PMD_NULL;
1279                 goto out;
1280         }
1281 
1282         for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
1283              _pte++, _address += PAGE_SIZE) {
1284                 pte_t pteval = ptep_get(_pte);
1285                 if (is_swap_pte(pteval)) {
1286                         ++unmapped;
1287                         if (!cc->is_khugepaged ||
1288                             unmapped <= khugepaged_max_ptes_swap) {
1289                                 /*
1290                                  * Always be strict with uffd-wp
1291                                  * enabled swap entries.  Please see
1292                                  * comment below for pte_uffd_wp().
1293                                  */
1294                                 if (pte_swp_uffd_wp_any(pteval)) {
1295                                         result = SCAN_PTE_UFFD_WP;
1296                                         goto out_unmap;
1297                                 }
1298                                 continue;
1299                         } else {
1300                                 result = SCAN_EXCEED_SWAP_PTE;
1301                                 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
1302                                 goto out_unmap;
1303                         }
1304                 }
1305                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1306                         ++none_or_zero;
1307                         if (!userfaultfd_armed(vma) &&
1308                             (!cc->is_khugepaged ||
1309                              none_or_zero <= khugepaged_max_ptes_none)) {
1310                                 continue;
1311                         } else {
1312                                 result = SCAN_EXCEED_NONE_PTE;
1313                                 count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
1314                                 goto out_unmap;
1315                         }
1316                 }
1317                 if (pte_uffd_wp(pteval)) {
1318                         /*
1319                          * Don't collapse the page if any of the small
1320                          * PTEs are armed with uffd write protection.
1321                          * Here we can also mark the new huge pmd as
1322                          * write protected if any of the small ones is
1323                          * marked but that could bring unknown
1324                          * userfault messages that falls outside of
1325                          * the registered range.  So, just be simple.
1326                          */
1327                         result = SCAN_PTE_UFFD_WP;
1328                         goto out_unmap;
1329                 }
1330                 if (pte_write(pteval))
1331                         writable = true;
1332 
1333                 page = vm_normal_page(vma, _address, pteval);
1334                 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1335                         result = SCAN_PAGE_NULL;
1336                         goto out_unmap;
1337                 }
1338                 folio = page_folio(page);
1339 
1340                 if (!folio_test_anon(folio)) {
1341                         result = SCAN_PAGE_ANON;
1342                         goto out_unmap;
1343                 }
1344 
1345                 /*
1346                  * We treat a single page as shared if any part of the THP
1347                  * is shared. "False negatives" from
1348                  * folio_likely_mapped_shared() are not expected to matter
1349                  * much in practice.
1350                  */
1351                 if (folio_likely_mapped_shared(folio)) {
1352                         ++shared;
1353                         if (cc->is_khugepaged &&
1354                             shared > khugepaged_max_ptes_shared) {
1355                                 result = SCAN_EXCEED_SHARED_PTE;
1356                                 count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
1357                                 goto out_unmap;
1358                         }
1359                 }
1360 
1361                 /*
1362                  * Record which node the original page is from and save this
1363                  * information to cc->node_load[].
1364                  * Khugepaged will allocate hugepage from the node has the max
1365                  * hit record.
1366                  */
1367                 node = folio_nid(folio);
1368                 if (hpage_collapse_scan_abort(node, cc)) {
1369                         result = SCAN_SCAN_ABORT;
1370                         goto out_unmap;
1371                 }
1372                 cc->node_load[node]++;
1373                 if (!folio_test_lru(folio)) {
1374                         result = SCAN_PAGE_LRU;
1375                         goto out_unmap;
1376                 }
1377                 if (folio_test_locked(folio)) {
1378                         result = SCAN_PAGE_LOCK;
1379                         goto out_unmap;
1380                 }
1381 
1382                 /*
1383                  * Check if the page has any GUP (or other external) pins.
1384                  *
1385                  * Here the check may be racy:
1386                  * it may see folio_mapcount() > folio_ref_count().
1387                  * But such case is ephemeral we could always retry collapse
1388                  * later.  However it may report false positive if the page
1389                  * has excessive GUP pins (i.e. 512).  Anyway the same check
1390                  * will be done again later the risk seems low.
1391                  */
1392                 if (!is_refcount_suitable(folio)) {
1393                         result = SCAN_PAGE_COUNT;
1394                         goto out_unmap;
1395                 }
1396 
1397                 /*
1398                  * If collapse was initiated by khugepaged, check that there is
1399                  * enough young pte to justify collapsing the page
1400                  */
1401                 if (cc->is_khugepaged &&
1402                     (pte_young(pteval) || folio_test_young(folio) ||
1403                      folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
1404                                                                      address)))
1405                         referenced++;
1406         }
1407         if (!writable) {
1408                 result = SCAN_PAGE_RO;
1409         } else if (cc->is_khugepaged &&
1410                    (!referenced ||
1411                     (unmapped && referenced < HPAGE_PMD_NR / 2))) {
1412                 result = SCAN_LACK_REFERENCED_PAGE;
1413         } else {
1414                 result = SCAN_SUCCEED;
1415         }
1416 out_unmap:
1417         pte_unmap_unlock(pte, ptl);
1418         if (result == SCAN_SUCCEED) {
1419                 result = collapse_huge_page(mm, address, referenced,
1420                                             unmapped, cc);
1421                 /* collapse_huge_page will return with the mmap_lock released */
1422                 *mmap_locked = false;
1423         }
1424 out:
1425         trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
1426                                      none_or_zero, result, unmapped);
1427         return result;
1428 }
1429 
1430 static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
1431 {
1432         struct mm_slot *slot = &mm_slot->slot;
1433         struct mm_struct *mm = slot->mm;
1434 
1435         lockdep_assert_held(&khugepaged_mm_lock);
1436 
1437         if (hpage_collapse_test_exit(mm)) {
1438                 /* free mm_slot */
1439                 hash_del(&slot->hash);
1440                 list_del(&slot->mm_node);
1441 
1442                 /*
1443                  * Not strictly needed because the mm exited already.
1444                  *
1445                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1446                  */
1447 
1448                 /* khugepaged_mm_lock actually not necessary for the below */
1449                 mm_slot_free(mm_slot_cache, mm_slot);
1450                 mmdrop(mm);
1451         }
1452 }
1453 
1454 #ifdef CONFIG_SHMEM
1455 /* hpage must be locked, and mmap_lock must be held */
1456 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
1457                         pmd_t *pmdp, struct page *hpage)
1458 {
1459         struct vm_fault vmf = {
1460                 .vma = vma,
1461                 .address = addr,
1462                 .flags = 0,
1463                 .pmd = pmdp,
1464         };
1465 
1466         VM_BUG_ON(!PageTransHuge(hpage));
1467         mmap_assert_locked(vma->vm_mm);
1468 
1469         if (do_set_pmd(&vmf, hpage))
1470                 return SCAN_FAIL;
1471 
1472         get_page(hpage);
1473         return SCAN_SUCCEED;
1474 }
1475 
1476 /**
1477  * collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
1478  * address haddr.
1479  *
1480  * @mm: process address space where collapse happens
1481  * @addr: THP collapse address
1482  * @install_pmd: If a huge PMD should be installed
1483  *
1484  * This function checks whether all the PTEs in the PMD are pointing to the
1485  * right THP. If so, retract the page table so the THP can refault in with
1486  * as pmd-mapped. Possibly install a huge PMD mapping the THP.
1487  */
1488 int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
1489                             bool install_pmd)
1490 {
1491         struct mmu_notifier_range range;
1492         bool notified = false;
1493         unsigned long haddr = addr & HPAGE_PMD_MASK;
1494         struct vm_area_struct *vma = vma_lookup(mm, haddr);
1495         struct folio *folio;
1496         pte_t *start_pte, *pte;
1497         pmd_t *pmd, pgt_pmd;
1498         spinlock_t *pml = NULL, *ptl;
1499         int nr_ptes = 0, result = SCAN_FAIL;
1500         int i;
1501 
1502         mmap_assert_locked(mm);
1503 
1504         /* First check VMA found, in case page tables are being torn down */
1505         if (!vma || !vma->vm_file ||
1506             !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
1507                 return SCAN_VMA_CHECK;
1508 
1509         /* Fast check before locking page if already PMD-mapped */
1510         result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1511         if (result == SCAN_PMD_MAPPED)
1512                 return result;
1513 
1514         /*
1515          * If we are here, we've succeeded in replacing all the native pages
1516          * in the page cache with a single hugepage. If a mm were to fault-in
1517          * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
1518          * and map it by a PMD, regardless of sysfs THP settings. As such, let's
1519          * analogously elide sysfs THP settings here.
1520          */
1521         if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
1522                 return SCAN_VMA_CHECK;
1523 
1524         /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
1525         if (userfaultfd_wp(vma))
1526                 return SCAN_PTE_UFFD_WP;
1527 
1528         folio = filemap_lock_folio(vma->vm_file->f_mapping,
1529                                linear_page_index(vma, haddr));
1530         if (IS_ERR(folio))
1531                 return SCAN_PAGE_NULL;
1532 
1533         if (folio_order(folio) != HPAGE_PMD_ORDER) {
1534                 result = SCAN_PAGE_COMPOUND;
1535                 goto drop_folio;
1536         }
1537 
1538         result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
1539         switch (result) {
1540         case SCAN_SUCCEED:
1541                 break;
1542         case SCAN_PMD_NONE:
1543                 /*
1544                  * All pte entries have been removed and pmd cleared.
1545                  * Skip all the pte checks and just update the pmd mapping.
1546                  */
1547                 goto maybe_install_pmd;
1548         default:
1549                 goto drop_folio;
1550         }
1551 
1552         result = SCAN_FAIL;
1553         start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1554         if (!start_pte)         /* mmap_lock + page lock should prevent this */
1555                 goto drop_folio;
1556 
1557         /* step 1: check all mapped PTEs are to the right huge page */
1558         for (i = 0, addr = haddr, pte = start_pte;
1559              i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1560                 struct page *page;
1561                 pte_t ptent = ptep_get(pte);
1562 
1563                 /* empty pte, skip */
1564                 if (pte_none(ptent))
1565                         continue;
1566 
1567                 /* page swapped out, abort */
1568                 if (!pte_present(ptent)) {
1569                         result = SCAN_PTE_NON_PRESENT;
1570                         goto abort;
1571                 }
1572 
1573                 page = vm_normal_page(vma, addr, ptent);
1574                 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1575                         page = NULL;
1576                 /*
1577                  * Note that uprobe, debugger, or MAP_PRIVATE may change the
1578                  * page table, but the new page will not be a subpage of hpage.
1579                  */
1580                 if (folio_page(folio, i) != page)
1581                         goto abort;
1582         }
1583 
1584         pte_unmap_unlock(start_pte, ptl);
1585         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1586                                 haddr, haddr + HPAGE_PMD_SIZE);
1587         mmu_notifier_invalidate_range_start(&range);
1588         notified = true;
1589 
1590         /*
1591          * pmd_lock covers a wider range than ptl, and (if split from mm's
1592          * page_table_lock) ptl nests inside pml. The less time we hold pml,
1593          * the better; but userfaultfd's mfill_atomic_pte() on a private VMA
1594          * inserts a valid as-if-COWed PTE without even looking up page cache.
1595          * So page lock of folio does not protect from it, so we must not drop
1596          * ptl before pgt_pmd is removed, so uffd private needs pml taken now.
1597          */
1598         if (userfaultfd_armed(vma) && !(vma->vm_flags & VM_SHARED))
1599                 pml = pmd_lock(mm, pmd);
1600 
1601         start_pte = pte_offset_map_nolock(mm, pmd, haddr, &ptl);
1602         if (!start_pte)         /* mmap_lock + page lock should prevent this */
1603                 goto abort;
1604         if (!pml)
1605                 spin_lock(ptl);
1606         else if (ptl != pml)
1607                 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1608 
1609         /* step 2: clear page table and adjust rmap */
1610         for (i = 0, addr = haddr, pte = start_pte;
1611              i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1612                 struct page *page;
1613                 pte_t ptent = ptep_get(pte);
1614 
1615                 if (pte_none(ptent))
1616                         continue;
1617                 /*
1618                  * We dropped ptl after the first scan, to do the mmu_notifier:
1619                  * page lock stops more PTEs of the folio being faulted in, but
1620                  * does not stop write faults COWing anon copies from existing
1621                  * PTEs; and does not stop those being swapped out or migrated.
1622                  */
1623                 if (!pte_present(ptent)) {
1624                         result = SCAN_PTE_NON_PRESENT;
1625                         goto abort;
1626                 }
1627                 page = vm_normal_page(vma, addr, ptent);
1628                 if (folio_page(folio, i) != page)
1629                         goto abort;
1630 
1631                 /*
1632                  * Must clear entry, or a racing truncate may re-remove it.
1633                  * TLB flush can be left until pmdp_collapse_flush() does it.
1634                  * PTE dirty? Shmem page is already dirty; file is read-only.
1635                  */
1636                 ptep_clear(mm, addr, pte);
1637                 folio_remove_rmap_pte(folio, page, vma);
1638                 nr_ptes++;
1639         }
1640 
1641         pte_unmap(start_pte);
1642         if (!pml)
1643                 spin_unlock(ptl);
1644 
1645         /* step 3: set proper refcount and mm_counters. */
1646         if (nr_ptes) {
1647                 folio_ref_sub(folio, nr_ptes);
1648                 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1649         }
1650 
1651         /* step 4: remove empty page table */
1652         if (!pml) {
1653                 pml = pmd_lock(mm, pmd);
1654                 if (ptl != pml)
1655                         spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1656         }
1657         pgt_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1658         pmdp_get_lockless_sync();
1659         if (ptl != pml)
1660                 spin_unlock(ptl);
1661         spin_unlock(pml);
1662 
1663         mmu_notifier_invalidate_range_end(&range);
1664 
1665         mm_dec_nr_ptes(mm);
1666         page_table_check_pte_clear_range(mm, haddr, pgt_pmd);
1667         pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1668 
1669 maybe_install_pmd:
1670         /* step 5: install pmd entry */
1671         result = install_pmd
1672                         ? set_huge_pmd(vma, haddr, pmd, &folio->page)
1673                         : SCAN_SUCCEED;
1674         goto drop_folio;
1675 abort:
1676         if (nr_ptes) {
1677                 flush_tlb_mm(mm);
1678                 folio_ref_sub(folio, nr_ptes);
1679                 add_mm_counter(mm, mm_counter_file(folio), -nr_ptes);
1680         }
1681         if (start_pte)
1682                 pte_unmap_unlock(start_pte, ptl);
1683         if (pml && pml != ptl)
1684                 spin_unlock(pml);
1685         if (notified)
1686                 mmu_notifier_invalidate_range_end(&range);
1687 drop_folio:
1688         folio_unlock(folio);
1689         folio_put(folio);
1690         return result;
1691 }
1692 
1693 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1694 {
1695         struct vm_area_struct *vma;
1696 
1697         i_mmap_lock_read(mapping);
1698         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1699                 struct mmu_notifier_range range;
1700                 struct mm_struct *mm;
1701                 unsigned long addr;
1702                 pmd_t *pmd, pgt_pmd;
1703                 spinlock_t *pml;
1704                 spinlock_t *ptl;
1705                 bool skipped_uffd = false;
1706 
1707                 /*
1708                  * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1709                  * got written to. These VMAs are likely not worth removing
1710                  * page tables from, as PMD-mapping is likely to be split later.
1711                  */
1712                 if (READ_ONCE(vma->anon_vma))
1713                         continue;
1714 
1715                 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1716                 if (addr & ~HPAGE_PMD_MASK ||
1717                     vma->vm_end < addr + HPAGE_PMD_SIZE)
1718                         continue;
1719 
1720                 mm = vma->vm_mm;
1721                 if (find_pmd_or_thp_or_none(mm, addr, &pmd) != SCAN_SUCCEED)
1722                         continue;
1723 
1724                 if (hpage_collapse_test_exit(mm))
1725                         continue;
1726                 /*
1727                  * When a vma is registered with uffd-wp, we cannot recycle
1728                  * the page table because there may be pte markers installed.
1729                  * Other vmas can still have the same file mapped hugely, but
1730                  * skip this one: it will always be mapped in small page size
1731                  * for uffd-wp registered ranges.
1732                  */
1733                 if (userfaultfd_wp(vma))
1734                         continue;
1735 
1736                 /* PTEs were notified when unmapped; but now for the PMD? */
1737                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
1738                                         addr, addr + HPAGE_PMD_SIZE);
1739                 mmu_notifier_invalidate_range_start(&range);
1740 
1741                 pml = pmd_lock(mm, pmd);
1742                 ptl = pte_lockptr(mm, pmd);
1743                 if (ptl != pml)
1744                         spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
1745 
1746                 /*
1747                  * Huge page lock is still held, so normally the page table
1748                  * must remain empty; and we have already skipped anon_vma
1749                  * and userfaultfd_wp() vmas.  But since the mmap_lock is not
1750                  * held, it is still possible for a racing userfaultfd_ioctl()
1751                  * to have inserted ptes or markers.  Now that we hold ptlock,
1752                  * repeating the anon_vma check protects from one category,
1753                  * and repeating the userfaultfd_wp() check from another.
1754                  */
1755                 if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) {
1756                         skipped_uffd = true;
1757                 } else {
1758                         pgt_pmd = pmdp_collapse_flush(vma, addr, pmd);
1759                         pmdp_get_lockless_sync();
1760                 }
1761 
1762                 if (ptl != pml)
1763                         spin_unlock(ptl);
1764                 spin_unlock(pml);
1765 
1766                 mmu_notifier_invalidate_range_end(&range);
1767 
1768                 if (!skipped_uffd) {
1769                         mm_dec_nr_ptes(mm);
1770                         page_table_check_pte_clear_range(mm, addr, pgt_pmd);
1771                         pte_free_defer(mm, pmd_pgtable(pgt_pmd));
1772                 }
1773         }
1774         i_mmap_unlock_read(mapping);
1775 }
1776 
1777 /**
1778  * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1779  *
1780  * @mm: process address space where collapse happens
1781  * @addr: virtual collapse start address
1782  * @file: file that collapse on
1783  * @start: collapse start address
1784  * @cc: collapse context and scratchpad
1785  *
1786  * Basic scheme is simple, details are more complex:
1787  *  - allocate and lock a new huge page;
1788  *  - scan page cache, locking old pages
1789  *    + swap/gup in pages if necessary;
1790  *  - copy data to new page
1791  *  - handle shmem holes
1792  *    + re-validate that holes weren't filled by someone else
1793  *    + check for userfaultfd
1794  *  - finalize updates to the page cache;
1795  *  - if replacing succeeds:
1796  *    + unlock huge page;
1797  *    + free old pages;
1798  *  - if replacing failed;
1799  *    + unlock old pages
1800  *    + unlock and free huge page;
1801  */
1802 static int collapse_file(struct mm_struct *mm, unsigned long addr,
1803                          struct file *file, pgoff_t start,
1804                          struct collapse_control *cc)
1805 {
1806         struct address_space *mapping = file->f_mapping;
1807         struct page *dst;
1808         struct folio *folio, *tmp, *new_folio;
1809         pgoff_t index = 0, end = start + HPAGE_PMD_NR;
1810         LIST_HEAD(pagelist);
1811         XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1812         int nr_none = 0, result = SCAN_SUCCEED;
1813         bool is_shmem = shmem_file(file);
1814 
1815         VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1816         VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1817 
1818         result = alloc_charge_folio(&new_folio, mm, cc);
1819         if (result != SCAN_SUCCEED)
1820                 goto out;
1821 
1822         __folio_set_locked(new_folio);
1823         if (is_shmem)
1824                 __folio_set_swapbacked(new_folio);
1825         new_folio->index = start;
1826         new_folio->mapping = mapping;
1827 
1828         /*
1829          * Ensure we have slots for all the pages in the range.  This is
1830          * almost certainly a no-op because most of the pages must be present
1831          */
1832         do {
1833                 xas_lock_irq(&xas);
1834                 xas_create_range(&xas);
1835                 if (!xas_error(&xas))
1836                         break;
1837                 xas_unlock_irq(&xas);
1838                 if (!xas_nomem(&xas, GFP_KERNEL)) {
1839                         result = SCAN_FAIL;
1840                         goto rollback;
1841                 }
1842         } while (1);
1843 
1844         for (index = start; index < end; index++) {
1845                 xas_set(&xas, index);
1846                 folio = xas_load(&xas);
1847 
1848                 VM_BUG_ON(index != xas.xa_index);
1849                 if (is_shmem) {
1850                         if (!folio) {
1851                                 /*
1852                                  * Stop if extent has been truncated or
1853                                  * hole-punched, and is now completely
1854                                  * empty.
1855                                  */
1856                                 if (index == start) {
1857                                         if (!xas_next_entry(&xas, end - 1)) {
1858                                                 result = SCAN_TRUNCATED;
1859                                                 goto xa_locked;
1860                                         }
1861                                 }
1862                                 nr_none++;
1863                                 continue;
1864                         }
1865 
1866                         if (xa_is_value(folio) || !folio_test_uptodate(folio)) {
1867                                 xas_unlock_irq(&xas);
1868                                 /* swap in or instantiate fallocated page */
1869                                 if (shmem_get_folio(mapping->host, index,
1870                                                 &folio, SGP_NOALLOC)) {
1871                                         result = SCAN_FAIL;
1872                                         goto xa_unlocked;
1873                                 }
1874                                 /* drain lru cache to help isolate_lru_page() */
1875                                 lru_add_drain();
1876                         } else if (folio_trylock(folio)) {
1877                                 folio_get(folio);
1878                                 xas_unlock_irq(&xas);
1879                         } else {
1880                                 result = SCAN_PAGE_LOCK;
1881                                 goto xa_locked;
1882                         }
1883                 } else {        /* !is_shmem */
1884                         if (!folio || xa_is_value(folio)) {
1885                                 xas_unlock_irq(&xas);
1886                                 page_cache_sync_readahead(mapping, &file->f_ra,
1887                                                           file, index,
1888                                                           end - index);
1889                                 /* drain lru cache to help isolate_lru_page() */
1890                                 lru_add_drain();
1891                                 folio = filemap_lock_folio(mapping, index);
1892                                 if (IS_ERR(folio)) {
1893                                         result = SCAN_FAIL;
1894                                         goto xa_unlocked;
1895                                 }
1896                         } else if (folio_test_dirty(folio)) {
1897                                 /*
1898                                  * khugepaged only works on read-only fd,
1899                                  * so this page is dirty because it hasn't
1900                                  * been flushed since first write. There
1901                                  * won't be new dirty pages.
1902                                  *
1903                                  * Trigger async flush here and hope the
1904                                  * writeback is done when khugepaged
1905                                  * revisits this page.
1906                                  *
1907                                  * This is a one-off situation. We are not
1908                                  * forcing writeback in loop.
1909                                  */
1910                                 xas_unlock_irq(&xas);
1911                                 filemap_flush(mapping);
1912                                 result = SCAN_FAIL;
1913                                 goto xa_unlocked;
1914                         } else if (folio_test_writeback(folio)) {
1915                                 xas_unlock_irq(&xas);
1916                                 result = SCAN_FAIL;
1917                                 goto xa_unlocked;
1918                         } else if (folio_trylock(folio)) {
1919                                 folio_get(folio);
1920                                 xas_unlock_irq(&xas);
1921                         } else {
1922                                 result = SCAN_PAGE_LOCK;
1923                                 goto xa_locked;
1924                         }
1925                 }
1926 
1927                 /*
1928                  * The folio must be locked, so we can drop the i_pages lock
1929                  * without racing with truncate.
1930                  */
1931                 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1932 
1933                 /* make sure the folio is up to date */
1934                 if (unlikely(!folio_test_uptodate(folio))) {
1935                         result = SCAN_FAIL;
1936                         goto out_unlock;
1937                 }
1938 
1939                 /*
1940                  * If file was truncated then extended, or hole-punched, before
1941                  * we locked the first folio, then a THP might be there already.
1942                  * This will be discovered on the first iteration.
1943                  */
1944                 if (folio_test_large(folio)) {
1945                         result = folio_order(folio) == HPAGE_PMD_ORDER &&
1946                                         folio->index == start
1947                                         /* Maybe PMD-mapped */
1948                                         ? SCAN_PTE_MAPPED_HUGEPAGE
1949                                         : SCAN_PAGE_COMPOUND;
1950                         goto out_unlock;
1951                 }
1952 
1953                 if (folio_mapping(folio) != mapping) {
1954                         result = SCAN_TRUNCATED;
1955                         goto out_unlock;
1956                 }
1957 
1958                 if (!is_shmem && (folio_test_dirty(folio) ||
1959                                   folio_test_writeback(folio))) {
1960                         /*
1961                          * khugepaged only works on read-only fd, so this
1962                          * folio is dirty because it hasn't been flushed
1963                          * since first write.
1964                          */
1965                         result = SCAN_FAIL;
1966                         goto out_unlock;
1967                 }
1968 
1969                 if (!folio_isolate_lru(folio)) {
1970                         result = SCAN_DEL_PAGE_LRU;
1971                         goto out_unlock;
1972                 }
1973 
1974                 if (!filemap_release_folio(folio, GFP_KERNEL)) {
1975                         result = SCAN_PAGE_HAS_PRIVATE;
1976                         folio_putback_lru(folio);
1977                         goto out_unlock;
1978                 }
1979 
1980                 if (folio_mapped(folio))
1981                         try_to_unmap(folio,
1982                                         TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
1983 
1984                 xas_lock_irq(&xas);
1985 
1986                 VM_BUG_ON_FOLIO(folio != xa_load(xas.xa, index), folio);
1987 
1988                 /*
1989                  * We control three references to the folio:
1990                  *  - we hold a pin on it;
1991                  *  - one reference from page cache;
1992                  *  - one from lru_isolate_folio;
1993                  * If those are the only references, then any new usage
1994                  * of the folio will have to fetch it from the page
1995                  * cache. That requires locking the folio to handle
1996                  * truncate, so any new usage will be blocked until we
1997                  * unlock folio after collapse/during rollback.
1998                  */
1999                 if (folio_ref_count(folio) != 3) {
2000                         result = SCAN_PAGE_COUNT;
2001                         xas_unlock_irq(&xas);
2002                         folio_putback_lru(folio);
2003                         goto out_unlock;
2004                 }
2005 
2006                 /*
2007                  * Accumulate the folios that are being collapsed.
2008                  */
2009                 list_add_tail(&folio->lru, &pagelist);
2010                 continue;
2011 out_unlock:
2012                 folio_unlock(folio);
2013                 folio_put(folio);
2014                 goto xa_unlocked;
2015         }
2016 
2017         if (!is_shmem) {
2018                 filemap_nr_thps_inc(mapping);
2019                 /*
2020                  * Paired with the fence in do_dentry_open() -> get_write_access()
2021                  * to ensure i_writecount is up to date and the update to nr_thps
2022                  * is visible. Ensures the page cache will be truncated if the
2023                  * file is opened writable.
2024                  */
2025                 smp_mb();
2026                 if (inode_is_open_for_write(mapping->host)) {
2027                         result = SCAN_FAIL;
2028                         filemap_nr_thps_dec(mapping);
2029                 }
2030         }
2031 
2032 xa_locked:
2033         xas_unlock_irq(&xas);
2034 xa_unlocked:
2035 
2036         /*
2037          * If collapse is successful, flush must be done now before copying.
2038          * If collapse is unsuccessful, does flush actually need to be done?
2039          * Do it anyway, to clear the state.
2040          */
2041         try_to_unmap_flush();
2042 
2043         if (result == SCAN_SUCCEED && nr_none &&
2044             !shmem_charge(mapping->host, nr_none))
2045                 result = SCAN_FAIL;
2046         if (result != SCAN_SUCCEED) {
2047                 nr_none = 0;
2048                 goto rollback;
2049         }
2050 
2051         /*
2052          * The old folios are locked, so they won't change anymore.
2053          */
2054         index = start;
2055         dst = folio_page(new_folio, 0);
2056         list_for_each_entry(folio, &pagelist, lru) {
2057                 while (index < folio->index) {
2058                         clear_highpage(dst);
2059                         index++;
2060                         dst++;
2061                 }
2062                 if (copy_mc_highpage(dst, folio_page(folio, 0)) > 0) {
2063                         result = SCAN_COPY_MC;
2064                         goto rollback;
2065                 }
2066                 index++;
2067                 dst++;
2068         }
2069         while (index < end) {
2070                 clear_highpage(dst);
2071                 index++;
2072                 dst++;
2073         }
2074 
2075         if (nr_none) {
2076                 struct vm_area_struct *vma;
2077                 int nr_none_check = 0;
2078 
2079                 i_mmap_lock_read(mapping);
2080                 xas_lock_irq(&xas);
2081 
2082                 xas_set(&xas, start);
2083                 for (index = start; index < end; index++) {
2084                         if (!xas_next(&xas)) {
2085                                 xas_store(&xas, XA_RETRY_ENTRY);
2086                                 if (xas_error(&xas)) {
2087                                         result = SCAN_STORE_FAILED;
2088                                         goto immap_locked;
2089                                 }
2090                                 nr_none_check++;
2091                         }
2092                 }
2093 
2094                 if (nr_none != nr_none_check) {
2095                         result = SCAN_PAGE_FILLED;
2096                         goto immap_locked;
2097                 }
2098 
2099                 /*
2100                  * If userspace observed a missing page in a VMA with
2101                  * a MODE_MISSING userfaultfd, then it might expect a
2102                  * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
2103                  * roll back to avoid suppressing such an event. Since
2104                  * wp/minor userfaultfds don't give userspace any
2105                  * guarantees that the kernel doesn't fill a missing
2106                  * page with a zero page, so they don't matter here.
2107                  *
2108                  * Any userfaultfds registered after this point will
2109                  * not be able to observe any missing pages due to the
2110                  * previously inserted retry entries.
2111                  */
2112                 vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
2113                         if (userfaultfd_missing(vma)) {
2114                                 result = SCAN_EXCEED_NONE_PTE;
2115                                 goto immap_locked;
2116                         }
2117                 }
2118 
2119 immap_locked:
2120                 i_mmap_unlock_read(mapping);
2121                 if (result != SCAN_SUCCEED) {
2122                         xas_set(&xas, start);
2123                         for (index = start; index < end; index++) {
2124                                 if (xas_next(&xas) == XA_RETRY_ENTRY)
2125                                         xas_store(&xas, NULL);
2126                         }
2127 
2128                         xas_unlock_irq(&xas);
2129                         goto rollback;
2130                 }
2131         } else {
2132                 xas_lock_irq(&xas);
2133         }
2134 
2135         if (is_shmem)
2136                 __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
2137         else
2138                 __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
2139 
2140         if (nr_none) {
2141                 __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
2142                 /* nr_none is always 0 for non-shmem. */
2143                 __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
2144         }
2145 
2146         /*
2147          * Mark new_folio as uptodate before inserting it into the
2148          * page cache so that it isn't mistaken for an fallocated but
2149          * unwritten page.
2150          */
2151         folio_mark_uptodate(new_folio);
2152         folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
2153 
2154         if (is_shmem)
2155                 folio_mark_dirty(new_folio);
2156         folio_add_lru(new_folio);
2157 
2158         /* Join all the small entries into a single multi-index entry. */
2159         xas_set_order(&xas, start, HPAGE_PMD_ORDER);
2160         xas_store(&xas, new_folio);
2161         WARN_ON_ONCE(xas_error(&xas));
2162         xas_unlock_irq(&xas);
2163 
2164         /*
2165          * Remove pte page tables, so we can re-fault the page as huge.
2166          * If MADV_COLLAPSE, adjust result to call collapse_pte_mapped_thp().
2167          */
2168         retract_page_tables(mapping, start);
2169         if (cc && !cc->is_khugepaged)
2170                 result = SCAN_PTE_MAPPED_HUGEPAGE;
2171         folio_unlock(new_folio);
2172 
2173         /*
2174          * The collapse has succeeded, so free the old folios.
2175          */
2176         list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2177                 list_del(&folio->lru);
2178                 folio->mapping = NULL;
2179                 folio_clear_active(folio);
2180                 folio_clear_unevictable(folio);
2181                 folio_unlock(folio);
2182                 folio_put_refs(folio, 3);
2183         }
2184 
2185         goto out;
2186 
2187 rollback:
2188         /* Something went wrong: roll back page cache changes */
2189         if (nr_none) {
2190                 xas_lock_irq(&xas);
2191                 mapping->nrpages -= nr_none;
2192                 xas_unlock_irq(&xas);
2193                 shmem_uncharge(mapping->host, nr_none);
2194         }
2195 
2196         list_for_each_entry_safe(folio, tmp, &pagelist, lru) {
2197                 list_del(&folio->lru);
2198                 folio_unlock(folio);
2199                 folio_putback_lru(folio);
2200                 folio_put(folio);
2201         }
2202         /*
2203          * Undo the updates of filemap_nr_thps_inc for non-SHMEM
2204          * file only. This undo is not needed unless failure is
2205          * due to SCAN_COPY_MC.
2206          */
2207         if (!is_shmem && result == SCAN_COPY_MC) {
2208                 filemap_nr_thps_dec(mapping);
2209                 /*
2210                  * Paired with the fence in do_dentry_open() -> get_write_access()
2211                  * to ensure the update to nr_thps is visible.
2212                  */
2213                 smp_mb();
2214         }
2215 
2216         new_folio->mapping = NULL;
2217 
2218         folio_unlock(new_folio);
2219         folio_put(new_folio);
2220 out:
2221         VM_BUG_ON(!list_empty(&pagelist));
2222         trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
2223         return result;
2224 }
2225 
2226 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2227                                     struct file *file, pgoff_t start,
2228                                     struct collapse_control *cc)
2229 {
2230         struct folio *folio = NULL;
2231         struct address_space *mapping = file->f_mapping;
2232         XA_STATE(xas, &mapping->i_pages, start);
2233         int present, swap;
2234         int node = NUMA_NO_NODE;
2235         int result = SCAN_SUCCEED;
2236 
2237         present = 0;
2238         swap = 0;
2239         memset(cc->node_load, 0, sizeof(cc->node_load));
2240         nodes_clear(cc->alloc_nmask);
2241         rcu_read_lock();
2242         xas_for_each(&xas, folio, start + HPAGE_PMD_NR - 1) {
2243                 if (xas_retry(&xas, folio))
2244                         continue;
2245 
2246                 if (xa_is_value(folio)) {
2247                         ++swap;
2248                         if (cc->is_khugepaged &&
2249                             swap > khugepaged_max_ptes_swap) {
2250                                 result = SCAN_EXCEED_SWAP_PTE;
2251                                 count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
2252                                 break;
2253                         }
2254                         continue;
2255                 }
2256 
2257                 /*
2258                  * TODO: khugepaged should compact smaller compound pages
2259                  * into a PMD sized page
2260                  */
2261                 if (folio_test_large(folio)) {
2262                         result = folio_order(folio) == HPAGE_PMD_ORDER &&
2263                                         folio->index == start
2264                                         /* Maybe PMD-mapped */
2265                                         ? SCAN_PTE_MAPPED_HUGEPAGE
2266                                         : SCAN_PAGE_COMPOUND;
2267                         /*
2268                          * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
2269                          * by the caller won't touch the page cache, and so
2270                          * it's safe to skip LRU and refcount checks before
2271                          * returning.
2272                          */
2273                         break;
2274                 }
2275 
2276                 node = folio_nid(folio);
2277                 if (hpage_collapse_scan_abort(node, cc)) {
2278                         result = SCAN_SCAN_ABORT;
2279                         break;
2280                 }
2281                 cc->node_load[node]++;
2282 
2283                 if (!folio_test_lru(folio)) {
2284                         result = SCAN_PAGE_LRU;
2285                         break;
2286                 }
2287 
2288                 if (folio_ref_count(folio) !=
2289                     1 + folio_mapcount(folio) + folio_test_private(folio)) {
2290                         result = SCAN_PAGE_COUNT;
2291                         break;
2292                 }
2293 
2294                 /*
2295                  * We probably should check if the folio is referenced
2296                  * here, but nobody would transfer pte_young() to
2297                  * folio_test_referenced() for us.  And rmap walk here
2298                  * is just too costly...
2299                  */
2300 
2301                 present++;
2302 
2303                 if (need_resched()) {
2304                         xas_pause(&xas);
2305                         cond_resched_rcu();
2306                 }
2307         }
2308         rcu_read_unlock();
2309 
2310         if (result == SCAN_SUCCEED) {
2311                 if (cc->is_khugepaged &&
2312                     present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2313                         result = SCAN_EXCEED_NONE_PTE;
2314                         count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
2315                 } else {
2316                         result = collapse_file(mm, addr, file, start, cc);
2317                 }
2318         }
2319 
2320         trace_mm_khugepaged_scan_file(mm, folio, file, present, swap, result);
2321         return result;
2322 }
2323 #else
2324 static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
2325                                     struct file *file, pgoff_t start,
2326                                     struct collapse_control *cc)
2327 {
2328         BUILD_BUG();
2329 }
2330 #endif
2331 
2332 static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
2333                                             struct collapse_control *cc)
2334         __releases(&khugepaged_mm_lock)
2335         __acquires(&khugepaged_mm_lock)
2336 {
2337         struct vma_iterator vmi;
2338         struct khugepaged_mm_slot *mm_slot;
2339         struct mm_slot *slot;
2340         struct mm_struct *mm;
2341         struct vm_area_struct *vma;
2342         int progress = 0;
2343 
2344         VM_BUG_ON(!pages);
2345         lockdep_assert_held(&khugepaged_mm_lock);
2346         *result = SCAN_FAIL;
2347 
2348         if (khugepaged_scan.mm_slot) {
2349                 mm_slot = khugepaged_scan.mm_slot;
2350                 slot = &mm_slot->slot;
2351         } else {
2352                 slot = list_entry(khugepaged_scan.mm_head.next,
2353                                      struct mm_slot, mm_node);
2354                 mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2355                 khugepaged_scan.address = 0;
2356                 khugepaged_scan.mm_slot = mm_slot;
2357         }
2358         spin_unlock(&khugepaged_mm_lock);
2359 
2360         mm = slot->mm;
2361         /*
2362          * Don't wait for semaphore (to avoid long wait times).  Just move to
2363          * the next mm on the list.
2364          */
2365         vma = NULL;
2366         if (unlikely(!mmap_read_trylock(mm)))
2367                 goto breakouterloop_mmap_lock;
2368 
2369         progress++;
2370         if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2371                 goto breakouterloop;
2372 
2373         vma_iter_init(&vmi, mm, khugepaged_scan.address);
2374         for_each_vma(vmi, vma) {
2375                 unsigned long hstart, hend;
2376 
2377                 cond_resched();
2378                 if (unlikely(hpage_collapse_test_exit_or_disable(mm))) {
2379                         progress++;
2380                         break;
2381                 }
2382                 if (!thp_vma_allowable_order(vma, vma->vm_flags,
2383                                         TVA_ENFORCE_SYSFS, PMD_ORDER)) {
2384 skip:
2385                         progress++;
2386                         continue;
2387                 }
2388                 hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
2389                 hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
2390                 if (khugepaged_scan.address > hend)
2391                         goto skip;
2392                 if (khugepaged_scan.address < hstart)
2393                         khugepaged_scan.address = hstart;
2394                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2395 
2396                 while (khugepaged_scan.address < hend) {
2397                         bool mmap_locked = true;
2398 
2399                         cond_resched();
2400                         if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
2401                                 goto breakouterloop;
2402 
2403                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2404                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2405                                   hend);
2406                         if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2407                                 struct file *file = get_file(vma->vm_file);
2408                                 pgoff_t pgoff = linear_page_index(vma,
2409                                                 khugepaged_scan.address);
2410 
2411                                 mmap_read_unlock(mm);
2412                                 mmap_locked = false;
2413                                 *result = hpage_collapse_scan_file(mm,
2414                                         khugepaged_scan.address, file, pgoff, cc);
2415                                 fput(file);
2416                                 if (*result == SCAN_PTE_MAPPED_HUGEPAGE) {
2417                                         mmap_read_lock(mm);
2418                                         if (hpage_collapse_test_exit_or_disable(mm))
2419                                                 goto breakouterloop;
2420                                         *result = collapse_pte_mapped_thp(mm,
2421                                                 khugepaged_scan.address, false);
2422                                         if (*result == SCAN_PMD_MAPPED)
2423                                                 *result = SCAN_SUCCEED;
2424                                         mmap_read_unlock(mm);
2425                                 }
2426                         } else {
2427                                 *result = hpage_collapse_scan_pmd(mm, vma,
2428                                         khugepaged_scan.address, &mmap_locked, cc);
2429                         }
2430 
2431                         if (*result == SCAN_SUCCEED)
2432                                 ++khugepaged_pages_collapsed;
2433 
2434                         /* move to next address */
2435                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2436                         progress += HPAGE_PMD_NR;
2437                         if (!mmap_locked)
2438                                 /*
2439                                  * We released mmap_lock so break loop.  Note
2440                                  * that we drop mmap_lock before all hugepage
2441                                  * allocations, so if allocation fails, we are
2442                                  * guaranteed to break here and report the
2443                                  * correct result back to caller.
2444                                  */
2445                                 goto breakouterloop_mmap_lock;
2446                         if (progress >= pages)
2447                                 goto breakouterloop;
2448                 }
2449         }
2450 breakouterloop:
2451         mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2452 breakouterloop_mmap_lock:
2453 
2454         spin_lock(&khugepaged_mm_lock);
2455         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2456         /*
2457          * Release the current mm_slot if this mm is about to die, or
2458          * if we scanned all vmas of this mm.
2459          */
2460         if (hpage_collapse_test_exit(mm) || !vma) {
2461                 /*
2462                  * Make sure that if mm_users is reaching zero while
2463                  * khugepaged runs here, khugepaged_exit will find
2464                  * mm_slot not pointing to the exiting mm.
2465                  */
2466                 if (slot->mm_node.next != &khugepaged_scan.mm_head) {
2467                         slot = list_entry(slot->mm_node.next,
2468                                           struct mm_slot, mm_node);
2469                         khugepaged_scan.mm_slot =
2470                                 mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
2471                         khugepaged_scan.address = 0;
2472                 } else {
2473                         khugepaged_scan.mm_slot = NULL;
2474                         khugepaged_full_scans++;
2475                 }
2476 
2477                 collect_mm_slot(mm_slot);
2478         }
2479 
2480         return progress;
2481 }
2482 
2483 static int khugepaged_has_work(void)
2484 {
2485         return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
2486 }
2487 
2488 static int khugepaged_wait_event(void)
2489 {
2490         return !list_empty(&khugepaged_scan.mm_head) ||
2491                 kthread_should_stop();
2492 }
2493 
2494 static void khugepaged_do_scan(struct collapse_control *cc)
2495 {
2496         unsigned int progress = 0, pass_through_head = 0;
2497         unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
2498         bool wait = true;
2499         int result = SCAN_SUCCEED;
2500 
2501         lru_add_drain_all();
2502 
2503         while (true) {
2504                 cond_resched();
2505 
2506                 if (unlikely(kthread_should_stop()))
2507                         break;
2508 
2509                 spin_lock(&khugepaged_mm_lock);
2510                 if (!khugepaged_scan.mm_slot)
2511                         pass_through_head++;
2512                 if (khugepaged_has_work() &&
2513                     pass_through_head < 2)
2514                         progress += khugepaged_scan_mm_slot(pages - progress,
2515                                                             &result, cc);
2516                 else
2517                         progress = pages;
2518                 spin_unlock(&khugepaged_mm_lock);
2519 
2520                 if (progress >= pages)
2521                         break;
2522 
2523                 if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
2524                         /*
2525                          * If fail to allocate the first time, try to sleep for
2526                          * a while.  When hit again, cancel the scan.
2527                          */
2528                         if (!wait)
2529                                 break;
2530                         wait = false;
2531                         khugepaged_alloc_sleep();
2532                 }
2533         }
2534 }
2535 
2536 static bool khugepaged_should_wakeup(void)
2537 {
2538         return kthread_should_stop() ||
2539                time_after_eq(jiffies, khugepaged_sleep_expire);
2540 }
2541 
2542 static void khugepaged_wait_work(void)
2543 {
2544         if (khugepaged_has_work()) {
2545                 const unsigned long scan_sleep_jiffies =
2546                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2547 
2548                 if (!scan_sleep_jiffies)
2549                         return;
2550 
2551                 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2552                 wait_event_freezable_timeout(khugepaged_wait,
2553                                              khugepaged_should_wakeup(),
2554                                              scan_sleep_jiffies);
2555                 return;
2556         }
2557 
2558         if (hugepage_pmd_enabled())
2559                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2560 }
2561 
2562 static int khugepaged(void *none)
2563 {
2564         struct khugepaged_mm_slot *mm_slot;
2565 
2566         set_freezable();
2567         set_user_nice(current, MAX_NICE);
2568 
2569         while (!kthread_should_stop()) {
2570                 khugepaged_do_scan(&khugepaged_collapse_control);
2571                 khugepaged_wait_work();
2572         }
2573 
2574         spin_lock(&khugepaged_mm_lock);
2575         mm_slot = khugepaged_scan.mm_slot;
2576         khugepaged_scan.mm_slot = NULL;
2577         if (mm_slot)
2578                 collect_mm_slot(mm_slot);
2579         spin_unlock(&khugepaged_mm_lock);
2580         return 0;
2581 }
2582 
2583 static void set_recommended_min_free_kbytes(void)
2584 {
2585         struct zone *zone;
2586         int nr_zones = 0;
2587         unsigned long recommended_min;
2588 
2589         if (!hugepage_pmd_enabled()) {
2590                 calculate_min_free_kbytes();
2591                 goto update_wmarks;
2592         }
2593 
2594         for_each_populated_zone(zone) {
2595                 /*
2596                  * We don't need to worry about fragmentation of
2597                  * ZONE_MOVABLE since it only has movable pages.
2598                  */
2599                 if (zone_idx(zone) > gfp_zone(GFP_USER))
2600                         continue;
2601 
2602                 nr_zones++;
2603         }
2604 
2605         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2606         recommended_min = pageblock_nr_pages * nr_zones * 2;
2607 
2608         /*
2609          * Make sure that on average at least two pageblocks are almost free
2610          * of another type, one for a migratetype to fall back to and a
2611          * second to avoid subsequent fallbacks of other types There are 3
2612          * MIGRATE_TYPES we care about.
2613          */
2614         recommended_min += pageblock_nr_pages * nr_zones *
2615                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2616 
2617         /* don't ever allow to reserve more than 5% of the lowmem */
2618         recommended_min = min(recommended_min,
2619                               (unsigned long) nr_free_buffer_pages() / 20);
2620         recommended_min <<= (PAGE_SHIFT-10);
2621 
2622         if (recommended_min > min_free_kbytes) {
2623                 if (user_min_free_kbytes >= 0)
2624                         pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2625                                 min_free_kbytes, recommended_min);
2626 
2627                 min_free_kbytes = recommended_min;
2628         }
2629 
2630 update_wmarks:
2631         setup_per_zone_wmarks();
2632 }
2633 
2634 int start_stop_khugepaged(void)
2635 {
2636         int err = 0;
2637 
2638         mutex_lock(&khugepaged_mutex);
2639         if (hugepage_pmd_enabled()) {
2640                 if (!khugepaged_thread)
2641                         khugepaged_thread = kthread_run(khugepaged, NULL,
2642                                                         "khugepaged");
2643                 if (IS_ERR(khugepaged_thread)) {
2644                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2645                         err = PTR_ERR(khugepaged_thread);
2646                         khugepaged_thread = NULL;
2647                         goto fail;
2648                 }
2649 
2650                 if (!list_empty(&khugepaged_scan.mm_head))
2651                         wake_up_interruptible(&khugepaged_wait);
2652         } else if (khugepaged_thread) {
2653                 kthread_stop(khugepaged_thread);
2654                 khugepaged_thread = NULL;
2655         }
2656         set_recommended_min_free_kbytes();
2657 fail:
2658         mutex_unlock(&khugepaged_mutex);
2659         return err;
2660 }
2661 
2662 void khugepaged_min_free_kbytes_update(void)
2663 {
2664         mutex_lock(&khugepaged_mutex);
2665         if (hugepage_pmd_enabled() && khugepaged_thread)
2666                 set_recommended_min_free_kbytes();
2667         mutex_unlock(&khugepaged_mutex);
2668 }
2669 
2670 bool current_is_khugepaged(void)
2671 {
2672         return kthread_func(current) == khugepaged;
2673 }
2674 
2675 static int madvise_collapse_errno(enum scan_result r)
2676 {
2677         /*
2678          * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
2679          * actionable feedback to caller, so they may take an appropriate
2680          * fallback measure depending on the nature of the failure.
2681          */
2682         switch (r) {
2683         case SCAN_ALLOC_HUGE_PAGE_FAIL:
2684                 return -ENOMEM;
2685         case SCAN_CGROUP_CHARGE_FAIL:
2686         case SCAN_EXCEED_NONE_PTE:
2687                 return -EBUSY;
2688         /* Resource temporary unavailable - trying again might succeed */
2689         case SCAN_PAGE_COUNT:
2690         case SCAN_PAGE_LOCK:
2691         case SCAN_PAGE_LRU:
2692         case SCAN_DEL_PAGE_LRU:
2693         case SCAN_PAGE_FILLED:
2694                 return -EAGAIN;
2695         /*
2696          * Other: Trying again likely not to succeed / error intrinsic to
2697          * specified memory range. khugepaged likely won't be able to collapse
2698          * either.
2699          */
2700         default:
2701                 return -EINVAL;
2702         }
2703 }
2704 
2705 int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
2706                      unsigned long start, unsigned long end)
2707 {
2708         struct collapse_control *cc;
2709         struct mm_struct *mm = vma->vm_mm;
2710         unsigned long hstart, hend, addr;
2711         int thps = 0, last_fail = SCAN_FAIL;
2712         bool mmap_locked = true;
2713 
2714         BUG_ON(vma->vm_start > start);
2715         BUG_ON(vma->vm_end < end);
2716 
2717         *prev = vma;
2718 
2719         if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
2720                 return -EINVAL;
2721 
2722         cc = kmalloc(sizeof(*cc), GFP_KERNEL);
2723         if (!cc)
2724                 return -ENOMEM;
2725         cc->is_khugepaged = false;
2726 
2727         mmgrab(mm);
2728         lru_add_drain_all();
2729 
2730         hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2731         hend = end & HPAGE_PMD_MASK;
2732 
2733         for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
2734                 int result = SCAN_FAIL;
2735 
2736                 if (!mmap_locked) {
2737                         cond_resched();
2738                         mmap_read_lock(mm);
2739                         mmap_locked = true;
2740                         result = hugepage_vma_revalidate(mm, addr, false, &vma,
2741                                                          cc);
2742                         if (result  != SCAN_SUCCEED) {
2743                                 last_fail = result;
2744                                 goto out_nolock;
2745                         }
2746 
2747                         hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
2748                 }
2749                 mmap_assert_locked(mm);
2750                 memset(cc->node_load, 0, sizeof(cc->node_load));
2751                 nodes_clear(cc->alloc_nmask);
2752                 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2753                         struct file *file = get_file(vma->vm_file);
2754                         pgoff_t pgoff = linear_page_index(vma, addr);
2755 
2756                         mmap_read_unlock(mm);
2757                         mmap_locked = false;
2758                         result = hpage_collapse_scan_file(mm, addr, file, pgoff,
2759                                                           cc);
2760                         fput(file);
2761                 } else {
2762                         result = hpage_collapse_scan_pmd(mm, vma, addr,
2763                                                          &mmap_locked, cc);
2764                 }
2765                 if (!mmap_locked)
2766                         *prev = NULL;  /* Tell caller we dropped mmap_lock */
2767 
2768 handle_result:
2769                 switch (result) {
2770                 case SCAN_SUCCEED:
2771                 case SCAN_PMD_MAPPED:
2772                         ++thps;
2773                         break;
2774                 case SCAN_PTE_MAPPED_HUGEPAGE:
2775                         BUG_ON(mmap_locked);
2776                         BUG_ON(*prev);
2777                         mmap_read_lock(mm);
2778                         result = collapse_pte_mapped_thp(mm, addr, true);
2779                         mmap_read_unlock(mm);
2780                         goto handle_result;
2781                 /* Whitelisted set of results where continuing OK */
2782                 case SCAN_PMD_NULL:
2783                 case SCAN_PTE_NON_PRESENT:
2784                 case SCAN_PTE_UFFD_WP:
2785                 case SCAN_PAGE_RO:
2786                 case SCAN_LACK_REFERENCED_PAGE:
2787                 case SCAN_PAGE_NULL:
2788                 case SCAN_PAGE_COUNT:
2789                 case SCAN_PAGE_LOCK:
2790                 case SCAN_PAGE_COMPOUND:
2791                 case SCAN_PAGE_LRU:
2792                 case SCAN_DEL_PAGE_LRU:
2793                         last_fail = result;
2794                         break;
2795                 default:
2796                         last_fail = result;
2797                         /* Other error, exit */
2798                         goto out_maybelock;
2799                 }
2800         }
2801 
2802 out_maybelock:
2803         /* Caller expects us to hold mmap_lock on return */
2804         if (!mmap_locked)
2805                 mmap_read_lock(mm);
2806 out_nolock:
2807         mmap_assert_locked(mm);
2808         mmdrop(mm);
2809         kfree(cc);
2810 
2811         return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
2812                         : madvise_collapse_errno(last_fail);
2813 }
2814 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php