~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_alloc.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *  linux/mm/page_alloc.c
  4  *
  5  *  Manages the free list, the system allocates free pages here.
  6  *  Note that kmalloc() lives in slab.c
  7  *
  8  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  9  *  Swap reorganised 29.12.95, Stephen Tweedie
 10  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 11  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 12  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 14  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 15  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 16  */
 17 
 18 #include <linux/stddef.h>
 19 #include <linux/mm.h>
 20 #include <linux/highmem.h>
 21 #include <linux/interrupt.h>
 22 #include <linux/jiffies.h>
 23 #include <linux/compiler.h>
 24 #include <linux/kernel.h>
 25 #include <linux/kasan.h>
 26 #include <linux/kmsan.h>
 27 #include <linux/module.h>
 28 #include <linux/suspend.h>
 29 #include <linux/ratelimit.h>
 30 #include <linux/oom.h>
 31 #include <linux/topology.h>
 32 #include <linux/sysctl.h>
 33 #include <linux/cpu.h>
 34 #include <linux/cpuset.h>
 35 #include <linux/pagevec.h>
 36 #include <linux/memory_hotplug.h>
 37 #include <linux/nodemask.h>
 38 #include <linux/vmstat.h>
 39 #include <linux/fault-inject.h>
 40 #include <linux/compaction.h>
 41 #include <trace/events/kmem.h>
 42 #include <trace/events/oom.h>
 43 #include <linux/prefetch.h>
 44 #include <linux/mm_inline.h>
 45 #include <linux/mmu_notifier.h>
 46 #include <linux/migrate.h>
 47 #include <linux/sched/mm.h>
 48 #include <linux/page_owner.h>
 49 #include <linux/page_table_check.h>
 50 #include <linux/memcontrol.h>
 51 #include <linux/ftrace.h>
 52 #include <linux/lockdep.h>
 53 #include <linux/psi.h>
 54 #include <linux/khugepaged.h>
 55 #include <linux/delayacct.h>
 56 #include <linux/cacheinfo.h>
 57 #include <linux/pgalloc_tag.h>
 58 #include <asm/div64.h>
 59 #include "internal.h"
 60 #include "shuffle.h"
 61 #include "page_reporting.h"
 62 
 63 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
 64 typedef int __bitwise fpi_t;
 65 
 66 /* No special request */
 67 #define FPI_NONE                ((__force fpi_t)0)
 68 
 69 /*
 70  * Skip free page reporting notification for the (possibly merged) page.
 71  * This does not hinder free page reporting from grabbing the page,
 72  * reporting it and marking it "reported" -  it only skips notifying
 73  * the free page reporting infrastructure about a newly freed page. For
 74  * example, used when temporarily pulling a page from a freelist and
 75  * putting it back unmodified.
 76  */
 77 #define FPI_SKIP_REPORT_NOTIFY  ((__force fpi_t)BIT(0))
 78 
 79 /*
 80  * Place the (possibly merged) page to the tail of the freelist. Will ignore
 81  * page shuffling (relevant code - e.g., memory onlining - is expected to
 82  * shuffle the whole zone).
 83  *
 84  * Note: No code should rely on this flag for correctness - it's purely
 85  *       to allow for optimizations when handing back either fresh pages
 86  *       (memory onlining) or untouched pages (page isolation, free page
 87  *       reporting).
 88  */
 89 #define FPI_TO_TAIL             ((__force fpi_t)BIT(1))
 90 
 91 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 92 static DEFINE_MUTEX(pcp_batch_high_lock);
 93 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8)
 94 
 95 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 96 /*
 97  * On SMP, spin_trylock is sufficient protection.
 98  * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP.
 99  */
100 #define pcp_trylock_prepare(flags)      do { } while (0)
101 #define pcp_trylock_finish(flag)        do { } while (0)
102 #else
103 
104 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
105 #define pcp_trylock_prepare(flags)      local_irq_save(flags)
106 #define pcp_trylock_finish(flags)       local_irq_restore(flags)
107 #endif
108 
109 /*
110  * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid
111  * a migration causing the wrong PCP to be locked and remote memory being
112  * potentially allocated, pin the task to the CPU for the lookup+lock.
113  * preempt_disable is used on !RT because it is faster than migrate_disable.
114  * migrate_disable is used on RT because otherwise RT spinlock usage is
115  * interfered with and a high priority task cannot preempt the allocator.
116  */
117 #ifndef CONFIG_PREEMPT_RT
118 #define pcpu_task_pin()         preempt_disable()
119 #define pcpu_task_unpin()       preempt_enable()
120 #else
121 #define pcpu_task_pin()         migrate_disable()
122 #define pcpu_task_unpin()       migrate_enable()
123 #endif
124 
125 /*
126  * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
127  * Return value should be used with equivalent unlock helper.
128  */
129 #define pcpu_spin_lock(type, member, ptr)                               \
130 ({                                                                      \
131         type *_ret;                                                     \
132         pcpu_task_pin();                                                \
133         _ret = this_cpu_ptr(ptr);                                       \
134         spin_lock(&_ret->member);                                       \
135         _ret;                                                           \
136 })
137 
138 #define pcpu_spin_trylock(type, member, ptr)                            \
139 ({                                                                      \
140         type *_ret;                                                     \
141         pcpu_task_pin();                                                \
142         _ret = this_cpu_ptr(ptr);                                       \
143         if (!spin_trylock(&_ret->member)) {                             \
144                 pcpu_task_unpin();                                      \
145                 _ret = NULL;                                            \
146         }                                                               \
147         _ret;                                                           \
148 })
149 
150 #define pcpu_spin_unlock(member, ptr)                                   \
151 ({                                                                      \
152         spin_unlock(&ptr->member);                                      \
153         pcpu_task_unpin();                                              \
154 })
155 
156 /* struct per_cpu_pages specific helpers. */
157 #define pcp_spin_lock(ptr)                                              \
158         pcpu_spin_lock(struct per_cpu_pages, lock, ptr)
159 
160 #define pcp_spin_trylock(ptr)                                           \
161         pcpu_spin_trylock(struct per_cpu_pages, lock, ptr)
162 
163 #define pcp_spin_unlock(ptr)                                            \
164         pcpu_spin_unlock(lock, ptr)
165 
166 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
167 DEFINE_PER_CPU(int, numa_node);
168 EXPORT_PER_CPU_SYMBOL(numa_node);
169 #endif
170 
171 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
172 
173 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
174 /*
175  * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
176  * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
177  * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
178  * defined in <linux/topology.h>.
179  */
180 DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
181 EXPORT_PER_CPU_SYMBOL(_numa_mem_);
182 #endif
183 
184 static DEFINE_MUTEX(pcpu_drain_mutex);
185 
186 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
187 volatile unsigned long latent_entropy __latent_entropy;
188 EXPORT_SYMBOL(latent_entropy);
189 #endif
190 
191 /*
192  * Array of node states.
193  */
194 nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
195         [N_POSSIBLE] = NODE_MASK_ALL,
196         [N_ONLINE] = { { [0] = 1UL } },
197 #ifndef CONFIG_NUMA
198         [N_NORMAL_MEMORY] = { { [0] = 1UL } },
199 #ifdef CONFIG_HIGHMEM
200         [N_HIGH_MEMORY] = { { [0] = 1UL } },
201 #endif
202         [N_MEMORY] = { { [0] = 1UL } },
203         [N_CPU] = { { [0] = 1UL } },
204 #endif  /* NUMA */
205 };
206 EXPORT_SYMBOL(node_states);
207 
208 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
209 
210 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
211 unsigned int pageblock_order __read_mostly;
212 #endif
213 
214 static void __free_pages_ok(struct page *page, unsigned int order,
215                             fpi_t fpi_flags);
216 
217 /*
218  * results with 256, 32 in the lowmem_reserve sysctl:
219  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
220  *      1G machine -> (16M dma, 784M normal, 224M high)
221  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
222  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
223  *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
224  *
225  * TBD: should special case ZONE_DMA32 machines here - in those we normally
226  * don't need any ZONE_NORMAL reservation
227  */
228 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
229 #ifdef CONFIG_ZONE_DMA
230         [ZONE_DMA] = 256,
231 #endif
232 #ifdef CONFIG_ZONE_DMA32
233         [ZONE_DMA32] = 256,
234 #endif
235         [ZONE_NORMAL] = 32,
236 #ifdef CONFIG_HIGHMEM
237         [ZONE_HIGHMEM] = 0,
238 #endif
239         [ZONE_MOVABLE] = 0,
240 };
241 
242 char * const zone_names[MAX_NR_ZONES] = {
243 #ifdef CONFIG_ZONE_DMA
244          "DMA",
245 #endif
246 #ifdef CONFIG_ZONE_DMA32
247          "DMA32",
248 #endif
249          "Normal",
250 #ifdef CONFIG_HIGHMEM
251          "HighMem",
252 #endif
253          "Movable",
254 #ifdef CONFIG_ZONE_DEVICE
255          "Device",
256 #endif
257 };
258 
259 const char * const migratetype_names[MIGRATE_TYPES] = {
260         "Unmovable",
261         "Movable",
262         "Reclaimable",
263         "HighAtomic",
264 #ifdef CONFIG_CMA
265         "CMA",
266 #endif
267 #ifdef CONFIG_MEMORY_ISOLATION
268         "Isolate",
269 #endif
270 };
271 
272 int min_free_kbytes = 1024;
273 int user_min_free_kbytes = -1;
274 static int watermark_boost_factor __read_mostly = 15000;
275 static int watermark_scale_factor = 10;
276 
277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
278 int movable_zone;
279 EXPORT_SYMBOL(movable_zone);
280 
281 #if MAX_NUMNODES > 1
282 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
283 unsigned int nr_online_nodes __read_mostly = 1;
284 EXPORT_SYMBOL(nr_node_ids);
285 EXPORT_SYMBOL(nr_online_nodes);
286 #endif
287 
288 static bool page_contains_unaccepted(struct page *page, unsigned int order);
289 static void accept_page(struct page *page, unsigned int order);
290 static bool try_to_accept_memory(struct zone *zone, unsigned int order);
291 static inline bool has_unaccepted_memory(void);
292 static bool __free_unaccepted(struct page *page);
293 
294 int page_group_by_mobility_disabled __read_mostly;
295 
296 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
297 /*
298  * During boot we initialize deferred pages on-demand, as needed, but once
299  * page_alloc_init_late() has finished, the deferred pages are all initialized,
300  * and we can permanently disable that path.
301  */
302 DEFINE_STATIC_KEY_TRUE(deferred_pages);
303 
304 static inline bool deferred_pages_enabled(void)
305 {
306         return static_branch_unlikely(&deferred_pages);
307 }
308 
309 /*
310  * deferred_grow_zone() is __init, but it is called from
311  * get_page_from_freelist() during early boot until deferred_pages permanently
312  * disables this call. This is why we have refdata wrapper to avoid warning,
313  * and to ensure that the function body gets unloaded.
314  */
315 static bool __ref
316 _deferred_grow_zone(struct zone *zone, unsigned int order)
317 {
318         return deferred_grow_zone(zone, order);
319 }
320 #else
321 static inline bool deferred_pages_enabled(void)
322 {
323         return false;
324 }
325 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
326 
327 /* Return a pointer to the bitmap storing bits affecting a block of pages */
328 static inline unsigned long *get_pageblock_bitmap(const struct page *page,
329                                                         unsigned long pfn)
330 {
331 #ifdef CONFIG_SPARSEMEM
332         return section_to_usemap(__pfn_to_section(pfn));
333 #else
334         return page_zone(page)->pageblock_flags;
335 #endif /* CONFIG_SPARSEMEM */
336 }
337 
338 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
339 {
340 #ifdef CONFIG_SPARSEMEM
341         pfn &= (PAGES_PER_SECTION-1);
342 #else
343         pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
344 #endif /* CONFIG_SPARSEMEM */
345         return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
346 }
347 
348 /**
349  * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
350  * @page: The page within the block of interest
351  * @pfn: The target page frame number
352  * @mask: mask of bits that the caller is interested in
353  *
354  * Return: pageblock_bits flags
355  */
356 unsigned long get_pfnblock_flags_mask(const struct page *page,
357                                         unsigned long pfn, unsigned long mask)
358 {
359         unsigned long *bitmap;
360         unsigned long bitidx, word_bitidx;
361         unsigned long word;
362 
363         bitmap = get_pageblock_bitmap(page, pfn);
364         bitidx = pfn_to_bitidx(page, pfn);
365         word_bitidx = bitidx / BITS_PER_LONG;
366         bitidx &= (BITS_PER_LONG-1);
367         /*
368          * This races, without locks, with set_pfnblock_flags_mask(). Ensure
369          * a consistent read of the memory array, so that results, even though
370          * racy, are not corrupted.
371          */
372         word = READ_ONCE(bitmap[word_bitidx]);
373         return (word >> bitidx) & mask;
374 }
375 
376 static __always_inline int get_pfnblock_migratetype(const struct page *page,
377                                         unsigned long pfn)
378 {
379         return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
380 }
381 
382 /**
383  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
384  * @page: The page within the block of interest
385  * @flags: The flags to set
386  * @pfn: The target page frame number
387  * @mask: mask of bits that the caller is interested in
388  */
389 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
390                                         unsigned long pfn,
391                                         unsigned long mask)
392 {
393         unsigned long *bitmap;
394         unsigned long bitidx, word_bitidx;
395         unsigned long word;
396 
397         BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
398         BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
399 
400         bitmap = get_pageblock_bitmap(page, pfn);
401         bitidx = pfn_to_bitidx(page, pfn);
402         word_bitidx = bitidx / BITS_PER_LONG;
403         bitidx &= (BITS_PER_LONG-1);
404 
405         VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
406 
407         mask <<= bitidx;
408         flags <<= bitidx;
409 
410         word = READ_ONCE(bitmap[word_bitidx]);
411         do {
412         } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags));
413 }
414 
415 void set_pageblock_migratetype(struct page *page, int migratetype)
416 {
417         if (unlikely(page_group_by_mobility_disabled &&
418                      migratetype < MIGRATE_PCPTYPES))
419                 migratetype = MIGRATE_UNMOVABLE;
420 
421         set_pfnblock_flags_mask(page, (unsigned long)migratetype,
422                                 page_to_pfn(page), MIGRATETYPE_MASK);
423 }
424 
425 #ifdef CONFIG_DEBUG_VM
426 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
427 {
428         int ret;
429         unsigned seq;
430         unsigned long pfn = page_to_pfn(page);
431         unsigned long sp, start_pfn;
432 
433         do {
434                 seq = zone_span_seqbegin(zone);
435                 start_pfn = zone->zone_start_pfn;
436                 sp = zone->spanned_pages;
437                 ret = !zone_spans_pfn(zone, pfn);
438         } while (zone_span_seqretry(zone, seq));
439 
440         if (ret)
441                 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
442                         pfn, zone_to_nid(zone), zone->name,
443                         start_pfn, start_pfn + sp);
444 
445         return ret;
446 }
447 
448 /*
449  * Temporary debugging check for pages not lying within a given zone.
450  */
451 static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
452 {
453         if (page_outside_zone_boundaries(zone, page))
454                 return true;
455         if (zone != page_zone(page))
456                 return true;
457 
458         return false;
459 }
460 #else
461 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
462 {
463         return false;
464 }
465 #endif
466 
467 static void bad_page(struct page *page, const char *reason)
468 {
469         static unsigned long resume;
470         static unsigned long nr_shown;
471         static unsigned long nr_unshown;
472 
473         /*
474          * Allow a burst of 60 reports, then keep quiet for that minute;
475          * or allow a steady drip of one report per second.
476          */
477         if (nr_shown == 60) {
478                 if (time_before(jiffies, resume)) {
479                         nr_unshown++;
480                         goto out;
481                 }
482                 if (nr_unshown) {
483                         pr_alert(
484                               "BUG: Bad page state: %lu messages suppressed\n",
485                                 nr_unshown);
486                         nr_unshown = 0;
487                 }
488                 nr_shown = 0;
489         }
490         if (nr_shown++ == 0)
491                 resume = jiffies + 60 * HZ;
492 
493         pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
494                 current->comm, page_to_pfn(page));
495         dump_page(page, reason);
496 
497         print_modules();
498         dump_stack();
499 out:
500         /* Leave bad fields for debug, except PageBuddy could make trouble */
501         if (PageBuddy(page))
502                 __ClearPageBuddy(page);
503         add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
504 }
505 
506 static inline unsigned int order_to_pindex(int migratetype, int order)
507 {
508         bool __maybe_unused movable;
509 
510 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
511         if (order > PAGE_ALLOC_COSTLY_ORDER) {
512                 VM_BUG_ON(order != HPAGE_PMD_ORDER);
513 
514                 movable = migratetype == MIGRATE_MOVABLE;
515 
516                 return NR_LOWORDER_PCP_LISTS + movable;
517         }
518 #else
519         VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
520 #endif
521 
522         return (MIGRATE_PCPTYPES * order) + migratetype;
523 }
524 
525 static inline int pindex_to_order(unsigned int pindex)
526 {
527         int order = pindex / MIGRATE_PCPTYPES;
528 
529 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
530         if (pindex >= NR_LOWORDER_PCP_LISTS)
531                 order = HPAGE_PMD_ORDER;
532 #else
533         VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
534 #endif
535 
536         return order;
537 }
538 
539 static inline bool pcp_allowed_order(unsigned int order)
540 {
541         if (order <= PAGE_ALLOC_COSTLY_ORDER)
542                 return true;
543 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
544         if (order == HPAGE_PMD_ORDER)
545                 return true;
546 #endif
547         return false;
548 }
549 
550 /*
551  * Higher-order pages are called "compound pages".  They are structured thusly:
552  *
553  * The first PAGE_SIZE page is called the "head page" and have PG_head set.
554  *
555  * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
556  * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
557  *
558  * The first tail page's ->compound_order holds the order of allocation.
559  * This usage means that zero-order pages may not be compound.
560  */
561 
562 void prep_compound_page(struct page *page, unsigned int order)
563 {
564         int i;
565         int nr_pages = 1 << order;
566 
567         __SetPageHead(page);
568         for (i = 1; i < nr_pages; i++)
569                 prep_compound_tail(page, i);
570 
571         prep_compound_head(page, order);
572 }
573 
574 static inline void set_buddy_order(struct page *page, unsigned int order)
575 {
576         set_page_private(page, order);
577         __SetPageBuddy(page);
578 }
579 
580 #ifdef CONFIG_COMPACTION
581 static inline struct capture_control *task_capc(struct zone *zone)
582 {
583         struct capture_control *capc = current->capture_control;
584 
585         return unlikely(capc) &&
586                 !(current->flags & PF_KTHREAD) &&
587                 !capc->page &&
588                 capc->cc->zone == zone ? capc : NULL;
589 }
590 
591 static inline bool
592 compaction_capture(struct capture_control *capc, struct page *page,
593                    int order, int migratetype)
594 {
595         if (!capc || order != capc->cc->order)
596                 return false;
597 
598         /* Do not accidentally pollute CMA or isolated regions*/
599         if (is_migrate_cma(migratetype) ||
600             is_migrate_isolate(migratetype))
601                 return false;
602 
603         /*
604          * Do not let lower order allocations pollute a movable pageblock
605          * unless compaction is also requesting movable pages.
606          * This might let an unmovable request use a reclaimable pageblock
607          * and vice-versa but no more than normal fallback logic which can
608          * have trouble finding a high-order free page.
609          */
610         if (order < pageblock_order && migratetype == MIGRATE_MOVABLE &&
611             capc->cc->migratetype != MIGRATE_MOVABLE)
612                 return false;
613 
614         capc->page = page;
615         return true;
616 }
617 
618 #else
619 static inline struct capture_control *task_capc(struct zone *zone)
620 {
621         return NULL;
622 }
623 
624 static inline bool
625 compaction_capture(struct capture_control *capc, struct page *page,
626                    int order, int migratetype)
627 {
628         return false;
629 }
630 #endif /* CONFIG_COMPACTION */
631 
632 static inline void account_freepages(struct zone *zone, int nr_pages,
633                                      int migratetype)
634 {
635         if (is_migrate_isolate(migratetype))
636                 return;
637 
638         __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
639 
640         if (is_migrate_cma(migratetype))
641                 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
642 }
643 
644 /* Used for pages not on another list */
645 static inline void __add_to_free_list(struct page *page, struct zone *zone,
646                                       unsigned int order, int migratetype,
647                                       bool tail)
648 {
649         struct free_area *area = &zone->free_area[order];
650 
651         VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
652                      "page type is %lu, passed migratetype is %d (nr=%d)\n",
653                      get_pageblock_migratetype(page), migratetype, 1 << order);
654 
655         if (tail)
656                 list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
657         else
658                 list_add(&page->buddy_list, &area->free_list[migratetype]);
659         area->nr_free++;
660 }
661 
662 /*
663  * Used for pages which are on another list. Move the pages to the tail
664  * of the list - so the moved pages won't immediately be considered for
665  * allocation again (e.g., optimization for memory onlining).
666  */
667 static inline void move_to_free_list(struct page *page, struct zone *zone,
668                                      unsigned int order, int old_mt, int new_mt)
669 {
670         struct free_area *area = &zone->free_area[order];
671 
672         /* Free page moving can fail, so it happens before the type update */
673         VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
674                      "page type is %lu, passed migratetype is %d (nr=%d)\n",
675                      get_pageblock_migratetype(page), old_mt, 1 << order);
676 
677         list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
678 
679         account_freepages(zone, -(1 << order), old_mt);
680         account_freepages(zone, 1 << order, new_mt);
681 }
682 
683 static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
684                                              unsigned int order, int migratetype)
685 {
686         VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
687                      "page type is %lu, passed migratetype is %d (nr=%d)\n",
688                      get_pageblock_migratetype(page), migratetype, 1 << order);
689 
690         /* clear reported state and update reported page count */
691         if (page_reported(page))
692                 __ClearPageReported(page);
693 
694         list_del(&page->buddy_list);
695         __ClearPageBuddy(page);
696         set_page_private(page, 0);
697         zone->free_area[order].nr_free--;
698 }
699 
700 static inline void del_page_from_free_list(struct page *page, struct zone *zone,
701                                            unsigned int order, int migratetype)
702 {
703         __del_page_from_free_list(page, zone, order, migratetype);
704         account_freepages(zone, -(1 << order), migratetype);
705 }
706 
707 static inline struct page *get_page_from_free_area(struct free_area *area,
708                                             int migratetype)
709 {
710         return list_first_entry_or_null(&area->free_list[migratetype],
711                                         struct page, buddy_list);
712 }
713 
714 /*
715  * If this is less than the 2nd largest possible page, check if the buddy
716  * of the next-higher order is free. If it is, it's possible
717  * that pages are being freed that will coalesce soon. In case,
718  * that is happening, add the free page to the tail of the list
719  * so it's less likely to be used soon and more likely to be merged
720  * as a 2-level higher order page
721  */
722 static inline bool
723 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
724                    struct page *page, unsigned int order)
725 {
726         unsigned long higher_page_pfn;
727         struct page *higher_page;
728 
729         if (order >= MAX_PAGE_ORDER - 1)
730                 return false;
731 
732         higher_page_pfn = buddy_pfn & pfn;
733         higher_page = page + (higher_page_pfn - pfn);
734 
735         return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1,
736                         NULL) != NULL;
737 }
738 
739 /*
740  * Freeing function for a buddy system allocator.
741  *
742  * The concept of a buddy system is to maintain direct-mapped table
743  * (containing bit values) for memory blocks of various "orders".
744  * The bottom level table contains the map for the smallest allocatable
745  * units of memory (here, pages), and each level above it describes
746  * pairs of units from the levels below, hence, "buddies".
747  * At a high level, all that happens here is marking the table entry
748  * at the bottom level available, and propagating the changes upward
749  * as necessary, plus some accounting needed to play nicely with other
750  * parts of the VM system.
751  * At each level, we keep a list of pages, which are heads of continuous
752  * free pages of length of (1 << order) and marked with PageBuddy.
753  * Page's order is recorded in page_private(page) field.
754  * So when we are allocating or freeing one, we can derive the state of the
755  * other.  That is, if we allocate a small block, and both were
756  * free, the remainder of the region must be split into blocks.
757  * If a block is freed, and its buddy is also free, then this
758  * triggers coalescing into a block of larger size.
759  *
760  * -- nyc
761  */
762 
763 static inline void __free_one_page(struct page *page,
764                 unsigned long pfn,
765                 struct zone *zone, unsigned int order,
766                 int migratetype, fpi_t fpi_flags)
767 {
768         struct capture_control *capc = task_capc(zone);
769         unsigned long buddy_pfn = 0;
770         unsigned long combined_pfn;
771         struct page *buddy;
772         bool to_tail;
773 
774         VM_BUG_ON(!zone_is_initialized(zone));
775         VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
776 
777         VM_BUG_ON(migratetype == -1);
778         VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
779         VM_BUG_ON_PAGE(bad_range(zone, page), page);
780 
781         account_freepages(zone, 1 << order, migratetype);
782 
783         while (order < MAX_PAGE_ORDER) {
784                 int buddy_mt = migratetype;
785 
786                 if (compaction_capture(capc, page, order, migratetype)) {
787                         account_freepages(zone, -(1 << order), migratetype);
788                         return;
789                 }
790 
791                 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn);
792                 if (!buddy)
793                         goto done_merging;
794 
795                 if (unlikely(order >= pageblock_order)) {
796                         /*
797                          * We want to prevent merge between freepages on pageblock
798                          * without fallbacks and normal pageblock. Without this,
799                          * pageblock isolation could cause incorrect freepage or CMA
800                          * accounting or HIGHATOMIC accounting.
801                          */
802                         buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
803 
804                         if (migratetype != buddy_mt &&
805                             (!migratetype_is_mergeable(migratetype) ||
806                              !migratetype_is_mergeable(buddy_mt)))
807                                 goto done_merging;
808                 }
809 
810                 /*
811                  * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
812                  * merge with it and move up one order.
813                  */
814                 if (page_is_guard(buddy))
815                         clear_page_guard(zone, buddy, order);
816                 else
817                         __del_page_from_free_list(buddy, zone, order, buddy_mt);
818 
819                 if (unlikely(buddy_mt != migratetype)) {
820                         /*
821                          * Match buddy type. This ensures that an
822                          * expand() down the line puts the sub-blocks
823                          * on the right freelists.
824                          */
825                         set_pageblock_migratetype(buddy, migratetype);
826                 }
827 
828                 combined_pfn = buddy_pfn & pfn;
829                 page = page + (combined_pfn - pfn);
830                 pfn = combined_pfn;
831                 order++;
832         }
833 
834 done_merging:
835         set_buddy_order(page, order);
836 
837         if (fpi_flags & FPI_TO_TAIL)
838                 to_tail = true;
839         else if (is_shuffle_order(order))
840                 to_tail = shuffle_pick_tail();
841         else
842                 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
843 
844         __add_to_free_list(page, zone, order, migratetype, to_tail);
845 
846         /* Notify page reporting subsystem of freed page */
847         if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
848                 page_reporting_notify_free(order);
849 }
850 
851 /*
852  * A bad page could be due to a number of fields. Instead of multiple branches,
853  * try and check multiple fields with one check. The caller must do a detailed
854  * check if necessary.
855  */
856 static inline bool page_expected_state(struct page *page,
857                                         unsigned long check_flags)
858 {
859         if (unlikely(atomic_read(&page->_mapcount) != -1))
860                 return false;
861 
862         if (unlikely((unsigned long)page->mapping |
863                         page_ref_count(page) |
864 #ifdef CONFIG_MEMCG
865                         page->memcg_data |
866 #endif
867 #ifdef CONFIG_PAGE_POOL
868                         ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) |
869 #endif
870                         (page->flags & check_flags)))
871                 return false;
872 
873         return true;
874 }
875 
876 static const char *page_bad_reason(struct page *page, unsigned long flags)
877 {
878         const char *bad_reason = NULL;
879 
880         if (unlikely(atomic_read(&page->_mapcount) != -1))
881                 bad_reason = "nonzero mapcount";
882         if (unlikely(page->mapping != NULL))
883                 bad_reason = "non-NULL mapping";
884         if (unlikely(page_ref_count(page) != 0))
885                 bad_reason = "nonzero _refcount";
886         if (unlikely(page->flags & flags)) {
887                 if (flags == PAGE_FLAGS_CHECK_AT_PREP)
888                         bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
889                 else
890                         bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
891         }
892 #ifdef CONFIG_MEMCG
893         if (unlikely(page->memcg_data))
894                 bad_reason = "page still charged to cgroup";
895 #endif
896 #ifdef CONFIG_PAGE_POOL
897         if (unlikely((page->pp_magic & ~0x3UL) == PP_SIGNATURE))
898                 bad_reason = "page_pool leak";
899 #endif
900         return bad_reason;
901 }
902 
903 static void free_page_is_bad_report(struct page *page)
904 {
905         bad_page(page,
906                  page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
907 }
908 
909 static inline bool free_page_is_bad(struct page *page)
910 {
911         if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
912                 return false;
913 
914         /* Something has gone sideways, find it */
915         free_page_is_bad_report(page);
916         return true;
917 }
918 
919 static inline bool is_check_pages_enabled(void)
920 {
921         return static_branch_unlikely(&check_pages_enabled);
922 }
923 
924 static int free_tail_page_prepare(struct page *head_page, struct page *page)
925 {
926         struct folio *folio = (struct folio *)head_page;
927         int ret = 1;
928 
929         /*
930          * We rely page->lru.next never has bit 0 set, unless the page
931          * is PageTail(). Let's make sure that's true even for poisoned ->lru.
932          */
933         BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
934 
935         if (!is_check_pages_enabled()) {
936                 ret = 0;
937                 goto out;
938         }
939         switch (page - head_page) {
940         case 1:
941                 /* the first tail page: these may be in place of ->mapping */
942                 if (unlikely(folio_entire_mapcount(folio))) {
943                         bad_page(page, "nonzero entire_mapcount");
944                         goto out;
945                 }
946                 if (unlikely(folio_large_mapcount(folio))) {
947                         bad_page(page, "nonzero large_mapcount");
948                         goto out;
949                 }
950                 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) {
951                         bad_page(page, "nonzero nr_pages_mapped");
952                         goto out;
953                 }
954                 if (unlikely(atomic_read(&folio->_pincount))) {
955                         bad_page(page, "nonzero pincount");
956                         goto out;
957                 }
958                 break;
959         case 2:
960                 /* the second tail page: deferred_list overlaps ->mapping */
961                 if (unlikely(!list_empty(&folio->_deferred_list))) {
962                         bad_page(page, "on deferred list");
963                         goto out;
964                 }
965                 break;
966         default:
967                 if (page->mapping != TAIL_MAPPING) {
968                         bad_page(page, "corrupted mapping in tail page");
969                         goto out;
970                 }
971                 break;
972         }
973         if (unlikely(!PageTail(page))) {
974                 bad_page(page, "PageTail not set");
975                 goto out;
976         }
977         if (unlikely(compound_head(page) != head_page)) {
978                 bad_page(page, "compound_head not consistent");
979                 goto out;
980         }
981         ret = 0;
982 out:
983         page->mapping = NULL;
984         clear_compound_head(page);
985         return ret;
986 }
987 
988 /*
989  * Skip KASAN memory poisoning when either:
990  *
991  * 1. For generic KASAN: deferred memory initialization has not yet completed.
992  *    Tag-based KASAN modes skip pages freed via deferred memory initialization
993  *    using page tags instead (see below).
994  * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
995  *    that error detection is disabled for accesses via the page address.
996  *
997  * Pages will have match-all tags in the following circumstances:
998  *
999  * 1. Pages are being initialized for the first time, including during deferred
1000  *    memory init; see the call to page_kasan_tag_reset in __init_single_page.
1001  * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the
1002  *    exception of pages unpoisoned by kasan_unpoison_vmalloc.
1003  * 3. The allocation was excluded from being checked due to sampling,
1004  *    see the call to kasan_unpoison_pages.
1005  *
1006  * Poisoning pages during deferred memory init will greatly lengthen the
1007  * process and cause problem in large memory systems as the deferred pages
1008  * initialization is done with interrupt disabled.
1009  *
1010  * Assuming that there will be no reference to those newly initialized
1011  * pages before they are ever allocated, this should have no effect on
1012  * KASAN memory tracking as the poison will be properly inserted at page
1013  * allocation time. The only corner case is when pages are allocated by
1014  * on-demand allocation and then freed again before the deferred pages
1015  * initialization is done, but this is not likely to happen.
1016  */
1017 static inline bool should_skip_kasan_poison(struct page *page)
1018 {
1019         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1020                 return deferred_pages_enabled();
1021 
1022         return page_kasan_tag(page) == KASAN_TAG_KERNEL;
1023 }
1024 
1025 static void kernel_init_pages(struct page *page, int numpages)
1026 {
1027         int i;
1028 
1029         /* s390's use of memset() could override KASAN redzones. */
1030         kasan_disable_current();
1031         for (i = 0; i < numpages; i++)
1032                 clear_highpage_kasan_tagged(page + i);
1033         kasan_enable_current();
1034 }
1035 
1036 __always_inline bool free_pages_prepare(struct page *page,
1037                         unsigned int order)
1038 {
1039         int bad = 0;
1040         bool skip_kasan_poison = should_skip_kasan_poison(page);
1041         bool init = want_init_on_free();
1042         bool compound = PageCompound(page);
1043 
1044         VM_BUG_ON_PAGE(PageTail(page), page);
1045 
1046         trace_mm_page_free(page, order);
1047         kmsan_free_page(page, order);
1048 
1049         if (memcg_kmem_online() && PageMemcgKmem(page))
1050                 __memcg_kmem_uncharge_page(page, order);
1051 
1052         if (unlikely(PageHWPoison(page)) && !order) {
1053                 /* Do not let hwpoison pages hit pcplists/buddy */
1054                 reset_page_owner(page, order);
1055                 page_table_check_free(page, order);
1056                 pgalloc_tag_sub(page, 1 << order);
1057                 return false;
1058         }
1059 
1060         VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1061 
1062         /*
1063          * Check tail pages before head page information is cleared to
1064          * avoid checking PageCompound for order-0 pages.
1065          */
1066         if (unlikely(order)) {
1067                 int i;
1068 
1069                 if (compound)
1070                         page[1].flags &= ~PAGE_FLAGS_SECOND;
1071                 for (i = 1; i < (1 << order); i++) {
1072                         if (compound)
1073                                 bad += free_tail_page_prepare(page, page + i);
1074                         if (is_check_pages_enabled()) {
1075                                 if (free_page_is_bad(page + i)) {
1076                                         bad++;
1077                                         continue;
1078                                 }
1079                         }
1080                         (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1081                 }
1082         }
1083         if (PageMappingFlags(page))
1084                 page->mapping = NULL;
1085         if (is_check_pages_enabled()) {
1086                 if (free_page_is_bad(page))
1087                         bad++;
1088                 if (bad)
1089                         return false;
1090         }
1091 
1092         page_cpupid_reset_last(page);
1093         page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1094         reset_page_owner(page, order);
1095         page_table_check_free(page, order);
1096         pgalloc_tag_sub(page, 1 << order);
1097 
1098         if (!PageHighMem(page)) {
1099                 debug_check_no_locks_freed(page_address(page),
1100                                            PAGE_SIZE << order);
1101                 debug_check_no_obj_freed(page_address(page),
1102                                            PAGE_SIZE << order);
1103         }
1104 
1105         kernel_poison_pages(page, 1 << order);
1106 
1107         /*
1108          * As memory initialization might be integrated into KASAN,
1109          * KASAN poisoning and memory initialization code must be
1110          * kept together to avoid discrepancies in behavior.
1111          *
1112          * With hardware tag-based KASAN, memory tags must be set before the
1113          * page becomes unavailable via debug_pagealloc or arch_free_page.
1114          */
1115         if (!skip_kasan_poison) {
1116                 kasan_poison_pages(page, order, init);
1117 
1118                 /* Memory is already initialized if KASAN did it internally. */
1119                 if (kasan_has_integrated_init())
1120                         init = false;
1121         }
1122         if (init)
1123                 kernel_init_pages(page, 1 << order);
1124 
1125         /*
1126          * arch_free_page() can make the page's contents inaccessible.  s390
1127          * does this.  So nothing which can access the page's contents should
1128          * happen after this.
1129          */
1130         arch_free_page(page, order);
1131 
1132         debug_pagealloc_unmap_pages(page, 1 << order);
1133 
1134         return true;
1135 }
1136 
1137 /*
1138  * Frees a number of pages from the PCP lists
1139  * Assumes all pages on list are in same zone.
1140  * count is the number of pages to free.
1141  */
1142 static void free_pcppages_bulk(struct zone *zone, int count,
1143                                         struct per_cpu_pages *pcp,
1144                                         int pindex)
1145 {
1146         unsigned long flags;
1147         unsigned int order;
1148         struct page *page;
1149 
1150         /*
1151          * Ensure proper count is passed which otherwise would stuck in the
1152          * below while (list_empty(list)) loop.
1153          */
1154         count = min(pcp->count, count);
1155 
1156         /* Ensure requested pindex is drained first. */
1157         pindex = pindex - 1;
1158 
1159         spin_lock_irqsave(&zone->lock, flags);
1160 
1161         while (count > 0) {
1162                 struct list_head *list;
1163                 int nr_pages;
1164 
1165                 /* Remove pages from lists in a round-robin fashion. */
1166                 do {
1167                         if (++pindex > NR_PCP_LISTS - 1)
1168                                 pindex = 0;
1169                         list = &pcp->lists[pindex];
1170                 } while (list_empty(list));
1171 
1172                 order = pindex_to_order(pindex);
1173                 nr_pages = 1 << order;
1174                 do {
1175                         unsigned long pfn;
1176                         int mt;
1177 
1178                         page = list_last_entry(list, struct page, pcp_list);
1179                         pfn = page_to_pfn(page);
1180                         mt = get_pfnblock_migratetype(page, pfn);
1181 
1182                         /* must delete to avoid corrupting pcp list */
1183                         list_del(&page->pcp_list);
1184                         count -= nr_pages;
1185                         pcp->count -= nr_pages;
1186 
1187                         __free_one_page(page, pfn, zone, order, mt, FPI_NONE);
1188                         trace_mm_page_pcpu_drain(page, order, mt);
1189                 } while (count > 0 && !list_empty(list));
1190         }
1191 
1192         spin_unlock_irqrestore(&zone->lock, flags);
1193 }
1194 
1195 static void free_one_page(struct zone *zone, struct page *page,
1196                           unsigned long pfn, unsigned int order,
1197                           fpi_t fpi_flags)
1198 {
1199         unsigned long flags;
1200         int migratetype;
1201 
1202         spin_lock_irqsave(&zone->lock, flags);
1203         migratetype = get_pfnblock_migratetype(page, pfn);
1204         __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1205         spin_unlock_irqrestore(&zone->lock, flags);
1206 }
1207 
1208 static void __free_pages_ok(struct page *page, unsigned int order,
1209                             fpi_t fpi_flags)
1210 {
1211         unsigned long pfn = page_to_pfn(page);
1212         struct zone *zone = page_zone(page);
1213 
1214         if (!free_pages_prepare(page, order))
1215                 return;
1216 
1217         free_one_page(zone, page, pfn, order, fpi_flags);
1218 
1219         __count_vm_events(PGFREE, 1 << order);
1220 }
1221 
1222 void __meminit __free_pages_core(struct page *page, unsigned int order,
1223                 enum meminit_context context)
1224 {
1225         unsigned int nr_pages = 1 << order;
1226         struct page *p = page;
1227         unsigned int loop;
1228 
1229         /*
1230          * When initializing the memmap, __init_single_page() sets the refcount
1231          * of all pages to 1 ("allocated"/"not free"). We have to set the
1232          * refcount of all involved pages to 0.
1233          *
1234          * Note that hotplugged memory pages are initialized to PageOffline().
1235          * Pages freed from memblock might be marked as reserved.
1236          */
1237         if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
1238             unlikely(context == MEMINIT_HOTPLUG)) {
1239                 for (loop = 0; loop < nr_pages; loop++, p++) {
1240                         VM_WARN_ON_ONCE(PageReserved(p));
1241                         __ClearPageOffline(p);
1242                         set_page_count(p, 0);
1243                 }
1244 
1245                 /*
1246                  * Freeing the page with debug_pagealloc enabled will try to
1247                  * unmap it; some archs don't like double-unmappings, so
1248                  * map it first.
1249                  */
1250                 debug_pagealloc_map_pages(page, nr_pages);
1251                 adjust_managed_page_count(page, nr_pages);
1252         } else {
1253                 for (loop = 0; loop < nr_pages; loop++, p++) {
1254                         __ClearPageReserved(p);
1255                         set_page_count(p, 0);
1256                 }
1257 
1258                 /* memblock adjusts totalram_pages() manually. */
1259                 atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1260         }
1261 
1262         if (page_contains_unaccepted(page, order)) {
1263                 if (order == MAX_PAGE_ORDER && __free_unaccepted(page))
1264                         return;
1265 
1266                 accept_page(page, order);
1267         }
1268 
1269         /*
1270          * Bypass PCP and place fresh pages right to the tail, primarily
1271          * relevant for memory onlining.
1272          */
1273         __free_pages_ok(page, order, FPI_TO_TAIL);
1274 }
1275 
1276 /*
1277  * Check that the whole (or subset of) a pageblock given by the interval of
1278  * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1279  * with the migration of free compaction scanner.
1280  *
1281  * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1282  *
1283  * It's possible on some configurations to have a setup like node0 node1 node0
1284  * i.e. it's possible that all pages within a zones range of pages do not
1285  * belong to a single zone. We assume that a border between node0 and node1
1286  * can occur within a single pageblock, but not a node0 node1 node0
1287  * interleaving within a single pageblock. It is therefore sufficient to check
1288  * the first and last page of a pageblock and avoid checking each individual
1289  * page in a pageblock.
1290  *
1291  * Note: the function may return non-NULL struct page even for a page block
1292  * which contains a memory hole (i.e. there is no physical memory for a subset
1293  * of the pfn range). For example, if the pageblock order is MAX_PAGE_ORDER, which
1294  * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1295  * even though the start pfn is online and valid. This should be safe most of
1296  * the time because struct pages are still initialized via init_unavailable_range()
1297  * and pfn walkers shouldn't touch any physical memory range for which they do
1298  * not recognize any specific metadata in struct pages.
1299  */
1300 struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1301                                      unsigned long end_pfn, struct zone *zone)
1302 {
1303         struct page *start_page;
1304         struct page *end_page;
1305 
1306         /* end_pfn is one past the range we are checking */
1307         end_pfn--;
1308 
1309         if (!pfn_valid(end_pfn))
1310                 return NULL;
1311 
1312         start_page = pfn_to_online_page(start_pfn);
1313         if (!start_page)
1314                 return NULL;
1315 
1316         if (page_zone(start_page) != zone)
1317                 return NULL;
1318 
1319         end_page = pfn_to_page(end_pfn);
1320 
1321         /* This gives a shorter code than deriving page_zone(end_page) */
1322         if (page_zone_id(start_page) != page_zone_id(end_page))
1323                 return NULL;
1324 
1325         return start_page;
1326 }
1327 
1328 /*
1329  * The order of subdivision here is critical for the IO subsystem.
1330  * Please do not alter this order without good reasons and regression
1331  * testing. Specifically, as large blocks of memory are subdivided,
1332  * the order in which smaller blocks are delivered depends on the order
1333  * they're subdivided in this function. This is the primary factor
1334  * influencing the order in which pages are delivered to the IO
1335  * subsystem according to empirical testing, and this is also justified
1336  * by considering the behavior of a buddy system containing a single
1337  * large block of memory acted on by a series of small allocations.
1338  * This behavior is a critical factor in sglist merging's success.
1339  *
1340  * -- nyc
1341  */
1342 static inline void expand(struct zone *zone, struct page *page,
1343         int low, int high, int migratetype)
1344 {
1345         unsigned long size = 1 << high;
1346         unsigned long nr_added = 0;
1347 
1348         while (high > low) {
1349                 high--;
1350                 size >>= 1;
1351                 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
1352 
1353                 /*
1354                  * Mark as guard pages (or page), that will allow to
1355                  * merge back to allocator when buddy will be freed.
1356                  * Corresponding page table entries will not be touched,
1357                  * pages will stay not present in virtual address space
1358                  */
1359                 if (set_page_guard(zone, &page[size], high))
1360                         continue;
1361 
1362                 __add_to_free_list(&page[size], zone, high, migratetype, false);
1363                 set_buddy_order(&page[size], high);
1364                 nr_added += size;
1365         }
1366         account_freepages(zone, nr_added, migratetype);
1367 }
1368 
1369 static void check_new_page_bad(struct page *page)
1370 {
1371         if (unlikely(page->flags & __PG_HWPOISON)) {
1372                 /* Don't complain about hwpoisoned pages */
1373                 if (PageBuddy(page))
1374                         __ClearPageBuddy(page);
1375                 return;
1376         }
1377 
1378         bad_page(page,
1379                  page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
1380 }
1381 
1382 /*
1383  * This page is about to be returned from the page allocator
1384  */
1385 static bool check_new_page(struct page *page)
1386 {
1387         if (likely(page_expected_state(page,
1388                                 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
1389                 return false;
1390 
1391         check_new_page_bad(page);
1392         return true;
1393 }
1394 
1395 static inline bool check_new_pages(struct page *page, unsigned int order)
1396 {
1397         if (is_check_pages_enabled()) {
1398                 for (int i = 0; i < (1 << order); i++) {
1399                         struct page *p = page + i;
1400 
1401                         if (check_new_page(p))
1402                                 return true;
1403                 }
1404         }
1405 
1406         return false;
1407 }
1408 
1409 static inline bool should_skip_kasan_unpoison(gfp_t flags)
1410 {
1411         /* Don't skip if a software KASAN mode is enabled. */
1412         if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
1413             IS_ENABLED(CONFIG_KASAN_SW_TAGS))
1414                 return false;
1415 
1416         /* Skip, if hardware tag-based KASAN is not enabled. */
1417         if (!kasan_hw_tags_enabled())
1418                 return true;
1419 
1420         /*
1421          * With hardware tag-based KASAN enabled, skip if this has been
1422          * requested via __GFP_SKIP_KASAN.
1423          */
1424         return flags & __GFP_SKIP_KASAN;
1425 }
1426 
1427 static inline bool should_skip_init(gfp_t flags)
1428 {
1429         /* Don't skip, if hardware tag-based KASAN is not enabled. */
1430         if (!kasan_hw_tags_enabled())
1431                 return false;
1432 
1433         /* For hardware tag-based KASAN, skip if requested. */
1434         return (flags & __GFP_SKIP_ZERO);
1435 }
1436 
1437 inline void post_alloc_hook(struct page *page, unsigned int order,
1438                                 gfp_t gfp_flags)
1439 {
1440         bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
1441                         !should_skip_init(gfp_flags);
1442         bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
1443         int i;
1444 
1445         set_page_private(page, 0);
1446         set_page_refcounted(page);
1447 
1448         arch_alloc_page(page, order);
1449         debug_pagealloc_map_pages(page, 1 << order);
1450 
1451         /*
1452          * Page unpoisoning must happen before memory initialization.
1453          * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
1454          * allocations and the page unpoisoning code will complain.
1455          */
1456         kernel_unpoison_pages(page, 1 << order);
1457 
1458         /*
1459          * As memory initialization might be integrated into KASAN,
1460          * KASAN unpoisoning and memory initializion code must be
1461          * kept together to avoid discrepancies in behavior.
1462          */
1463 
1464         /*
1465          * If memory tags should be zeroed
1466          * (which happens only when memory should be initialized as well).
1467          */
1468         if (zero_tags) {
1469                 /* Initialize both memory and memory tags. */
1470                 for (i = 0; i != 1 << order; ++i)
1471                         tag_clear_highpage(page + i);
1472 
1473                 /* Take note that memory was initialized by the loop above. */
1474                 init = false;
1475         }
1476         if (!should_skip_kasan_unpoison(gfp_flags) &&
1477             kasan_unpoison_pages(page, order, init)) {
1478                 /* Take note that memory was initialized by KASAN. */
1479                 if (kasan_has_integrated_init())
1480                         init = false;
1481         } else {
1482                 /*
1483                  * If memory tags have not been set by KASAN, reset the page
1484                  * tags to ensure page_address() dereferencing does not fault.
1485                  */
1486                 for (i = 0; i != 1 << order; ++i)
1487                         page_kasan_tag_reset(page + i);
1488         }
1489         /* If memory is still not initialized, initialize it now. */
1490         if (init)
1491                 kernel_init_pages(page, 1 << order);
1492 
1493         set_page_owner(page, order, gfp_flags);
1494         page_table_check_alloc(page, order);
1495         pgalloc_tag_add(page, current, 1 << order);
1496 }
1497 
1498 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
1499                                                         unsigned int alloc_flags)
1500 {
1501         post_alloc_hook(page, order, gfp_flags);
1502 
1503         if (order && (gfp_flags & __GFP_COMP))
1504                 prep_compound_page(page, order);
1505 
1506         /*
1507          * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
1508          * allocate the page. The expectation is that the caller is taking
1509          * steps that will free more memory. The caller should avoid the page
1510          * being used for !PFMEMALLOC purposes.
1511          */
1512         if (alloc_flags & ALLOC_NO_WATERMARKS)
1513                 set_page_pfmemalloc(page);
1514         else
1515                 clear_page_pfmemalloc(page);
1516 }
1517 
1518 /*
1519  * Go through the free lists for the given migratetype and remove
1520  * the smallest available page from the freelists
1521  */
1522 static __always_inline
1523 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1524                                                 int migratetype)
1525 {
1526         unsigned int current_order;
1527         struct free_area *area;
1528         struct page *page;
1529 
1530         /* Find a page of the appropriate size in the preferred list */
1531         for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) {
1532                 area = &(zone->free_area[current_order]);
1533                 page = get_page_from_free_area(area, migratetype);
1534                 if (!page)
1535                         continue;
1536                 del_page_from_free_list(page, zone, current_order, migratetype);
1537                 expand(zone, page, order, current_order, migratetype);
1538                 trace_mm_page_alloc_zone_locked(page, order, migratetype,
1539                                 pcp_allowed_order(order) &&
1540                                 migratetype < MIGRATE_PCPTYPES);
1541                 return page;
1542         }
1543 
1544         return NULL;
1545 }
1546 
1547 
1548 /*
1549  * This array describes the order lists are fallen back to when
1550  * the free lists for the desirable migrate type are depleted
1551  *
1552  * The other migratetypes do not have fallbacks.
1553  */
1554 static int fallbacks[MIGRATE_PCPTYPES][MIGRATE_PCPTYPES - 1] = {
1555         [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE   },
1556         [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
1557         [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE   },
1558 };
1559 
1560 #ifdef CONFIG_CMA
1561 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1562                                         unsigned int order)
1563 {
1564         return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1565 }
1566 #else
1567 static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1568                                         unsigned int order) { return NULL; }
1569 #endif
1570 
1571 /*
1572  * Change the type of a block and move all its free pages to that
1573  * type's freelist.
1574  */
1575 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
1576                                   int old_mt, int new_mt)
1577 {
1578         struct page *page;
1579         unsigned long pfn, end_pfn;
1580         unsigned int order;
1581         int pages_moved = 0;
1582 
1583         VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
1584         end_pfn = pageblock_end_pfn(start_pfn);
1585 
1586         for (pfn = start_pfn; pfn < end_pfn;) {
1587                 page = pfn_to_page(pfn);
1588                 if (!PageBuddy(page)) {
1589                         pfn++;
1590                         continue;
1591                 }
1592 
1593                 /* Make sure we are not inadvertently changing nodes */
1594                 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1595                 VM_BUG_ON_PAGE(page_zone(page) != zone, page);
1596 
1597                 order = buddy_order(page);
1598 
1599                 move_to_free_list(page, zone, order, old_mt, new_mt);
1600 
1601                 pfn += 1 << order;
1602                 pages_moved += 1 << order;
1603         }
1604 
1605         set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
1606 
1607         return pages_moved;
1608 }
1609 
1610 static bool prep_move_freepages_block(struct zone *zone, struct page *page,
1611                                       unsigned long *start_pfn,
1612                                       int *num_free, int *num_movable)
1613 {
1614         unsigned long pfn, start, end;
1615 
1616         pfn = page_to_pfn(page);
1617         start = pageblock_start_pfn(pfn);
1618         end = pageblock_end_pfn(pfn);
1619 
1620         /*
1621          * The caller only has the lock for @zone, don't touch ranges
1622          * that straddle into other zones. While we could move part of
1623          * the range that's inside the zone, this call is usually
1624          * accompanied by other operations such as migratetype updates
1625          * which also should be locked.
1626          */
1627         if (!zone_spans_pfn(zone, start))
1628                 return false;
1629         if (!zone_spans_pfn(zone, end - 1))
1630                 return false;
1631 
1632         *start_pfn = start;
1633 
1634         if (num_free) {
1635                 *num_free = 0;
1636                 *num_movable = 0;
1637                 for (pfn = start; pfn < end;) {
1638                         page = pfn_to_page(pfn);
1639                         if (PageBuddy(page)) {
1640                                 int nr = 1 << buddy_order(page);
1641 
1642                                 *num_free += nr;
1643                                 pfn += nr;
1644                                 continue;
1645                         }
1646                         /*
1647                          * We assume that pages that could be isolated for
1648                          * migration are movable. But we don't actually try
1649                          * isolating, as that would be expensive.
1650                          */
1651                         if (PageLRU(page) || __PageMovable(page))
1652                                 (*num_movable)++;
1653                         pfn++;
1654                 }
1655         }
1656 
1657         return true;
1658 }
1659 
1660 static int move_freepages_block(struct zone *zone, struct page *page,
1661                                 int old_mt, int new_mt)
1662 {
1663         unsigned long start_pfn;
1664 
1665         if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
1666                 return -1;
1667 
1668         return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
1669 }
1670 
1671 #ifdef CONFIG_MEMORY_ISOLATION
1672 /* Look for a buddy that straddles start_pfn */
1673 static unsigned long find_large_buddy(unsigned long start_pfn)
1674 {
1675         int order = 0;
1676         struct page *page;
1677         unsigned long pfn = start_pfn;
1678 
1679         while (!PageBuddy(page = pfn_to_page(pfn))) {
1680                 /* Nothing found */
1681                 if (++order > MAX_PAGE_ORDER)
1682                         return start_pfn;
1683                 pfn &= ~0UL << order;
1684         }
1685 
1686         /*
1687          * Found a preceding buddy, but does it straddle?
1688          */
1689         if (pfn + (1 << buddy_order(page)) > start_pfn)
1690                 return pfn;
1691 
1692         /* Nothing found */
1693         return start_pfn;
1694 }
1695 
1696 /* Split a multi-block free page into its individual pageblocks */
1697 static void split_large_buddy(struct zone *zone, struct page *page,
1698                               unsigned long pfn, int order)
1699 {
1700         unsigned long end_pfn = pfn + (1 << order);
1701 
1702         VM_WARN_ON_ONCE(order <= pageblock_order);
1703         VM_WARN_ON_ONCE(pfn & (pageblock_nr_pages - 1));
1704 
1705         /* Caller removed page from freelist, buddy info cleared! */
1706         VM_WARN_ON_ONCE(PageBuddy(page));
1707 
1708         while (pfn != end_pfn) {
1709                 int mt = get_pfnblock_migratetype(page, pfn);
1710 
1711                 __free_one_page(page, pfn, zone, pageblock_order, mt, FPI_NONE);
1712                 pfn += pageblock_nr_pages;
1713                 page = pfn_to_page(pfn);
1714         }
1715 }
1716 
1717 /**
1718  * move_freepages_block_isolate - move free pages in block for page isolation
1719  * @zone: the zone
1720  * @page: the pageblock page
1721  * @migratetype: migratetype to set on the pageblock
1722  *
1723  * This is similar to move_freepages_block(), but handles the special
1724  * case encountered in page isolation, where the block of interest
1725  * might be part of a larger buddy spanning multiple pageblocks.
1726  *
1727  * Unlike the regular page allocator path, which moves pages while
1728  * stealing buddies off the freelist, page isolation is interested in
1729  * arbitrary pfn ranges that may have overlapping buddies on both ends.
1730  *
1731  * This function handles that. Straddling buddies are split into
1732  * individual pageblocks. Only the block of interest is moved.
1733  *
1734  * Returns %true if pages could be moved, %false otherwise.
1735  */
1736 bool move_freepages_block_isolate(struct zone *zone, struct page *page,
1737                                   int migratetype)
1738 {
1739         unsigned long start_pfn, pfn;
1740 
1741         if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
1742                 return false;
1743 
1744         /* No splits needed if buddies can't span multiple blocks */
1745         if (pageblock_order == MAX_PAGE_ORDER)
1746                 goto move;
1747 
1748         /* We're a tail block in a larger buddy */
1749         pfn = find_large_buddy(start_pfn);
1750         if (pfn != start_pfn) {
1751                 struct page *buddy = pfn_to_page(pfn);
1752                 int order = buddy_order(buddy);
1753 
1754                 del_page_from_free_list(buddy, zone, order,
1755                                         get_pfnblock_migratetype(buddy, pfn));
1756                 set_pageblock_migratetype(page, migratetype);
1757                 split_large_buddy(zone, buddy, pfn, order);
1758                 return true;
1759         }
1760 
1761         /* We're the starting block of a larger buddy */
1762         if (PageBuddy(page) && buddy_order(page) > pageblock_order) {
1763                 int order = buddy_order(page);
1764 
1765                 del_page_from_free_list(page, zone, order,
1766                                         get_pfnblock_migratetype(page, pfn));
1767                 set_pageblock_migratetype(page, migratetype);
1768                 split_large_buddy(zone, page, pfn, order);
1769                 return true;
1770         }
1771 move:
1772         __move_freepages_block(zone, start_pfn,
1773                                get_pfnblock_migratetype(page, start_pfn),
1774                                migratetype);
1775         return true;
1776 }
1777 #endif /* CONFIG_MEMORY_ISOLATION */
1778 
1779 static void change_pageblock_range(struct page *pageblock_page,
1780                                         int start_order, int migratetype)
1781 {
1782         int nr_pageblocks = 1 << (start_order - pageblock_order);
1783 
1784         while (nr_pageblocks--) {
1785                 set_pageblock_migratetype(pageblock_page, migratetype);
1786                 pageblock_page += pageblock_nr_pages;
1787         }
1788 }
1789 
1790 /*
1791  * When we are falling back to another migratetype during allocation, try to
1792  * steal extra free pages from the same pageblocks to satisfy further
1793  * allocations, instead of polluting multiple pageblocks.
1794  *
1795  * If we are stealing a relatively large buddy page, it is likely there will
1796  * be more free pages in the pageblock, so try to steal them all. For
1797  * reclaimable and unmovable allocations, we steal regardless of page size,
1798  * as fragmentation caused by those allocations polluting movable pageblocks
1799  * is worse than movable allocations stealing from unmovable and reclaimable
1800  * pageblocks.
1801  */
1802 static bool can_steal_fallback(unsigned int order, int start_mt)
1803 {
1804         /*
1805          * Leaving this order check is intended, although there is
1806          * relaxed order check in next check. The reason is that
1807          * we can actually steal whole pageblock if this condition met,
1808          * but, below check doesn't guarantee it and that is just heuristic
1809          * so could be changed anytime.
1810          */
1811         if (order >= pageblock_order)
1812                 return true;
1813 
1814         if (order >= pageblock_order / 2 ||
1815                 start_mt == MIGRATE_RECLAIMABLE ||
1816                 start_mt == MIGRATE_UNMOVABLE ||
1817                 page_group_by_mobility_disabled)
1818                 return true;
1819 
1820         return false;
1821 }
1822 
1823 static inline bool boost_watermark(struct zone *zone)
1824 {
1825         unsigned long max_boost;
1826 
1827         if (!watermark_boost_factor)
1828                 return false;
1829         /*
1830          * Don't bother in zones that are unlikely to produce results.
1831          * On small machines, including kdump capture kernels running
1832          * in a small area, boosting the watermark can cause an out of
1833          * memory situation immediately.
1834          */
1835         if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
1836                 return false;
1837 
1838         max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
1839                         watermark_boost_factor, 10000);
1840 
1841         /*
1842          * high watermark may be uninitialised if fragmentation occurs
1843          * very early in boot so do not boost. We do not fall
1844          * through and boost by pageblock_nr_pages as failing
1845          * allocations that early means that reclaim is not going
1846          * to help and it may even be impossible to reclaim the
1847          * boosted watermark resulting in a hang.
1848          */
1849         if (!max_boost)
1850                 return false;
1851 
1852         max_boost = max(pageblock_nr_pages, max_boost);
1853 
1854         zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
1855                 max_boost);
1856 
1857         return true;
1858 }
1859 
1860 /*
1861  * This function implements actual steal behaviour. If order is large enough, we
1862  * can claim the whole pageblock for the requested migratetype. If not, we check
1863  * the pageblock for constituent pages; if at least half of the pages are free
1864  * or compatible, we can still claim the whole block, so pages freed in the
1865  * future will be put on the correct free list. Otherwise, we isolate exactly
1866  * the order we need from the fallback block and leave its migratetype alone.
1867  */
1868 static struct page *
1869 steal_suitable_fallback(struct zone *zone, struct page *page,
1870                         int current_order, int order, int start_type,
1871                         unsigned int alloc_flags, bool whole_block)
1872 {
1873         int free_pages, movable_pages, alike_pages;
1874         unsigned long start_pfn;
1875         int block_type;
1876 
1877         block_type = get_pageblock_migratetype(page);
1878 
1879         /*
1880          * This can happen due to races and we want to prevent broken
1881          * highatomic accounting.
1882          */
1883         if (is_migrate_highatomic(block_type))
1884                 goto single_page;
1885 
1886         /* Take ownership for orders >= pageblock_order */
1887         if (current_order >= pageblock_order) {
1888                 del_page_from_free_list(page, zone, current_order, block_type);
1889                 change_pageblock_range(page, current_order, start_type);
1890                 expand(zone, page, order, current_order, start_type);
1891                 return page;
1892         }
1893 
1894         /*
1895          * Boost watermarks to increase reclaim pressure to reduce the
1896          * likelihood of future fallbacks. Wake kswapd now as the node
1897          * may be balanced overall and kswapd will not wake naturally.
1898          */
1899         if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
1900                 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
1901 
1902         /* We are not allowed to try stealing from the whole block */
1903         if (!whole_block)
1904                 goto single_page;
1905 
1906         /* moving whole block can fail due to zone boundary conditions */
1907         if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
1908                                        &movable_pages))
1909                 goto single_page;
1910 
1911         /*
1912          * Determine how many pages are compatible with our allocation.
1913          * For movable allocation, it's the number of movable pages which
1914          * we just obtained. For other types it's a bit more tricky.
1915          */
1916         if (start_type == MIGRATE_MOVABLE) {
1917                 alike_pages = movable_pages;
1918         } else {
1919                 /*
1920                  * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
1921                  * to MOVABLE pageblock, consider all non-movable pages as
1922                  * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
1923                  * vice versa, be conservative since we can't distinguish the
1924                  * exact migratetype of non-movable pages.
1925                  */
1926                 if (block_type == MIGRATE_MOVABLE)
1927                         alike_pages = pageblock_nr_pages
1928                                                 - (free_pages + movable_pages);
1929                 else
1930                         alike_pages = 0;
1931         }
1932         /*
1933          * If a sufficient number of pages in the block are either free or of
1934          * compatible migratability as our allocation, claim the whole block.
1935          */
1936         if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
1937                         page_group_by_mobility_disabled) {
1938                 __move_freepages_block(zone, start_pfn, block_type, start_type);
1939                 return __rmqueue_smallest(zone, order, start_type);
1940         }
1941 
1942 single_page:
1943         del_page_from_free_list(page, zone, current_order, block_type);
1944         expand(zone, page, order, current_order, block_type);
1945         return page;
1946 }
1947 
1948 /*
1949  * Check whether there is a suitable fallback freepage with requested order.
1950  * If only_stealable is true, this function returns fallback_mt only if
1951  * we can steal other freepages all together. This would help to reduce
1952  * fragmentation due to mixed migratetype pages in one pageblock.
1953  */
1954 int find_suitable_fallback(struct free_area *area, unsigned int order,
1955                         int migratetype, bool only_stealable, bool *can_steal)
1956 {
1957         int i;
1958         int fallback_mt;
1959 
1960         if (area->nr_free == 0)
1961                 return -1;
1962 
1963         *can_steal = false;
1964         for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) {
1965                 fallback_mt = fallbacks[migratetype][i];
1966                 if (free_area_empty(area, fallback_mt))
1967                         continue;
1968 
1969                 if (can_steal_fallback(order, migratetype))
1970                         *can_steal = true;
1971 
1972                 if (!only_stealable)
1973                         return fallback_mt;
1974 
1975                 if (*can_steal)
1976                         return fallback_mt;
1977         }
1978 
1979         return -1;
1980 }
1981 
1982 /*
1983  * Reserve the pageblock(s) surrounding an allocation request for
1984  * exclusive use of high-order atomic allocations if there are no
1985  * empty page blocks that contain a page with a suitable order
1986  */
1987 static void reserve_highatomic_pageblock(struct page *page, int order,
1988                                          struct zone *zone)
1989 {
1990         int mt;
1991         unsigned long max_managed, flags;
1992 
1993         /*
1994          * The number reserved as: minimum is 1 pageblock, maximum is
1995          * roughly 1% of a zone. But if 1% of a zone falls below a
1996          * pageblock size, then don't reserve any pageblocks.
1997          * Check is race-prone but harmless.
1998          */
1999         if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
2000                 return;
2001         max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
2002         if (zone->nr_reserved_highatomic >= max_managed)
2003                 return;
2004 
2005         spin_lock_irqsave(&zone->lock, flags);
2006 
2007         /* Recheck the nr_reserved_highatomic limit under the lock */
2008         if (zone->nr_reserved_highatomic >= max_managed)
2009                 goto out_unlock;
2010 
2011         /* Yoink! */
2012         mt = get_pageblock_migratetype(page);
2013         /* Only reserve normal pageblocks (i.e., they can merge with others) */
2014         if (!migratetype_is_mergeable(mt))
2015                 goto out_unlock;
2016 
2017         if (order < pageblock_order) {
2018                 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
2019                         goto out_unlock;
2020                 zone->nr_reserved_highatomic += pageblock_nr_pages;
2021         } else {
2022                 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
2023                 zone->nr_reserved_highatomic += 1 << order;
2024         }
2025 
2026 out_unlock:
2027         spin_unlock_irqrestore(&zone->lock, flags);
2028 }
2029 
2030 /*
2031  * Used when an allocation is about to fail under memory pressure. This
2032  * potentially hurts the reliability of high-order allocations when under
2033  * intense memory pressure but failed atomic allocations should be easier
2034  * to recover from than an OOM.
2035  *
2036  * If @force is true, try to unreserve pageblocks even though highatomic
2037  * pageblock is exhausted.
2038  */
2039 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2040                                                 bool force)
2041 {
2042         struct zonelist *zonelist = ac->zonelist;
2043         unsigned long flags;
2044         struct zoneref *z;
2045         struct zone *zone;
2046         struct page *page;
2047         int order;
2048         int ret;
2049 
2050         for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2051                                                                 ac->nodemask) {
2052                 /*
2053                  * Preserve at least one pageblock unless memory pressure
2054                  * is really high.
2055                  */
2056                 if (!force && zone->nr_reserved_highatomic <=
2057                                         pageblock_nr_pages)
2058                         continue;
2059 
2060                 spin_lock_irqsave(&zone->lock, flags);
2061                 for (order = 0; order < NR_PAGE_ORDERS; order++) {
2062                         struct free_area *area = &(zone->free_area[order]);
2063                         int mt;
2064 
2065                         page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2066                         if (!page)
2067                                 continue;
2068 
2069                         mt = get_pageblock_migratetype(page);
2070                         /*
2071                          * In page freeing path, migratetype change is racy so
2072                          * we can counter several free pages in a pageblock
2073                          * in this loop although we changed the pageblock type
2074                          * from highatomic to ac->migratetype. So we should
2075                          * adjust the count once.
2076                          */
2077                         if (is_migrate_highatomic(mt)) {
2078                                 unsigned long size;
2079                                 /*
2080                                  * It should never happen but changes to
2081                                  * locking could inadvertently allow a per-cpu
2082                                  * drain to add pages to MIGRATE_HIGHATOMIC
2083                                  * while unreserving so be safe and watch for
2084                                  * underflows.
2085                                  */
2086                                 size = max(pageblock_nr_pages, 1UL << order);
2087                                 size = min(size, zone->nr_reserved_highatomic);
2088                                 zone->nr_reserved_highatomic -= size;
2089                         }
2090 
2091                         /*
2092                          * Convert to ac->migratetype and avoid the normal
2093                          * pageblock stealing heuristics. Minimally, the caller
2094                          * is doing the work and needs the pages. More
2095                          * importantly, if the block was always converted to
2096                          * MIGRATE_UNMOVABLE or another type then the number
2097                          * of pageblocks that cannot be completely freed
2098                          * may increase.
2099                          */
2100                         if (order < pageblock_order)
2101                                 ret = move_freepages_block(zone, page, mt,
2102                                                            ac->migratetype);
2103                         else {
2104                                 move_to_free_list(page, zone, order, mt,
2105                                                   ac->migratetype);
2106                                 change_pageblock_range(page, order,
2107                                                        ac->migratetype);
2108                                 ret = 1;
2109                         }
2110                         /*
2111                          * Reserving the block(s) already succeeded,
2112                          * so this should not fail on zone boundaries.
2113                          */
2114                         WARN_ON_ONCE(ret == -1);
2115                         if (ret > 0) {
2116                                 spin_unlock_irqrestore(&zone->lock, flags);
2117                                 return ret;
2118                         }
2119                 }
2120                 spin_unlock_irqrestore(&zone->lock, flags);
2121         }
2122 
2123         return false;
2124 }
2125 
2126 /*
2127  * Try finding a free buddy page on the fallback list and put it on the free
2128  * list of requested migratetype, possibly along with other pages from the same
2129  * block, depending on fragmentation avoidance heuristics. Returns true if
2130  * fallback was found so that __rmqueue_smallest() can grab it.
2131  *
2132  * The use of signed ints for order and current_order is a deliberate
2133  * deviation from the rest of this file, to make the for loop
2134  * condition simpler.
2135  */
2136 static __always_inline struct page *
2137 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2138                                                 unsigned int alloc_flags)
2139 {
2140         struct free_area *area;
2141         int current_order;
2142         int min_order = order;
2143         struct page *page;
2144         int fallback_mt;
2145         bool can_steal;
2146 
2147         /*
2148          * Do not steal pages from freelists belonging to other pageblocks
2149          * i.e. orders < pageblock_order. If there are no local zones free,
2150          * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2151          */
2152         if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
2153                 min_order = pageblock_order;
2154 
2155         /*
2156          * Find the largest available free page in the other list. This roughly
2157          * approximates finding the pageblock with the most free pages, which
2158          * would be too costly to do exactly.
2159          */
2160         for (current_order = MAX_PAGE_ORDER; current_order >= min_order;
2161                                 --current_order) {
2162                 area = &(zone->free_area[current_order]);
2163                 fallback_mt = find_suitable_fallback(area, current_order,
2164                                 start_migratetype, false, &can_steal);
2165                 if (fallback_mt == -1)
2166                         continue;
2167 
2168                 /*
2169                  * We cannot steal all free pages from the pageblock and the
2170                  * requested migratetype is movable. In that case it's better to
2171                  * steal and split the smallest available page instead of the
2172                  * largest available page, because even if the next movable
2173                  * allocation falls back into a different pageblock than this
2174                  * one, it won't cause permanent fragmentation.
2175                  */
2176                 if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2177                                         && current_order > order)
2178                         goto find_smallest;
2179 
2180                 goto do_steal;
2181         }
2182 
2183         return NULL;
2184 
2185 find_smallest:
2186         for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
2187                 area = &(zone->free_area[current_order]);
2188                 fallback_mt = find_suitable_fallback(area, current_order,
2189                                 start_migratetype, false, &can_steal);
2190                 if (fallback_mt != -1)
2191                         break;
2192         }
2193 
2194         /*
2195          * This should not happen - we already found a suitable fallback
2196          * when looking for the largest page.
2197          */
2198         VM_BUG_ON(current_order > MAX_PAGE_ORDER);
2199 
2200 do_steal:
2201         page = get_page_from_free_area(area, fallback_mt);
2202 
2203         /* take off list, maybe claim block, expand remainder */
2204         page = steal_suitable_fallback(zone, page, current_order, order,
2205                                        start_migratetype, alloc_flags, can_steal);
2206 
2207         trace_mm_page_alloc_extfrag(page, order, current_order,
2208                 start_migratetype, fallback_mt);
2209 
2210         return page;
2211 }
2212 
2213 /*
2214  * Do the hard work of removing an element from the buddy allocator.
2215  * Call me with the zone->lock already held.
2216  */
2217 static __always_inline struct page *
2218 __rmqueue(struct zone *zone, unsigned int order, int migratetype,
2219                                                 unsigned int alloc_flags)
2220 {
2221         struct page *page;
2222 
2223         if (IS_ENABLED(CONFIG_CMA)) {
2224                 /*
2225                  * Balance movable allocations between regular and CMA areas by
2226                  * allocating from CMA when over half of the zone's free memory
2227                  * is in the CMA area.
2228                  */
2229                 if (alloc_flags & ALLOC_CMA &&
2230                     zone_page_state(zone, NR_FREE_CMA_PAGES) >
2231                     zone_page_state(zone, NR_FREE_PAGES) / 2) {
2232                         page = __rmqueue_cma_fallback(zone, order);
2233                         if (page)
2234                                 return page;
2235                 }
2236         }
2237 
2238         page = __rmqueue_smallest(zone, order, migratetype);
2239         if (unlikely(!page)) {
2240                 if (alloc_flags & ALLOC_CMA)
2241                         page = __rmqueue_cma_fallback(zone, order);
2242 
2243                 if (!page)
2244                         page = __rmqueue_fallback(zone, order, migratetype,
2245                                                   alloc_flags);
2246         }
2247         return page;
2248 }
2249 
2250 /*
2251  * Obtain a specified number of elements from the buddy allocator, all under
2252  * a single hold of the lock, for efficiency.  Add them to the supplied list.
2253  * Returns the number of new pages which were placed at *list.
2254  */
2255 static int rmqueue_bulk(struct zone *zone, unsigned int order,
2256                         unsigned long count, struct list_head *list,
2257                         int migratetype, unsigned int alloc_flags)
2258 {
2259         unsigned long flags;
2260         int i;
2261 
2262         spin_lock_irqsave(&zone->lock, flags);
2263         for (i = 0; i < count; ++i) {
2264                 struct page *page = __rmqueue(zone, order, migratetype,
2265                                                                 alloc_flags);
2266                 if (unlikely(page == NULL))
2267                         break;
2268 
2269                 /*
2270                  * Split buddy pages returned by expand() are received here in
2271                  * physical page order. The page is added to the tail of
2272                  * caller's list. From the callers perspective, the linked list
2273                  * is ordered by page number under some conditions. This is
2274                  * useful for IO devices that can forward direction from the
2275                  * head, thus also in the physical page order. This is useful
2276                  * for IO devices that can merge IO requests if the physical
2277                  * pages are ordered properly.
2278                  */
2279                 list_add_tail(&page->pcp_list, list);
2280         }
2281         spin_unlock_irqrestore(&zone->lock, flags);
2282 
2283         return i;
2284 }
2285 
2286 /*
2287  * Called from the vmstat counter updater to decay the PCP high.
2288  * Return whether there are addition works to do.
2289  */
2290 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
2291 {
2292         int high_min, to_drain, batch;
2293         int todo = 0;
2294 
2295         high_min = READ_ONCE(pcp->high_min);
2296         batch = READ_ONCE(pcp->batch);
2297         /*
2298          * Decrease pcp->high periodically to try to free possible
2299          * idle PCP pages.  And, avoid to free too many pages to
2300          * control latency.  This caps pcp->high decrement too.
2301          */
2302         if (pcp->high > high_min) {
2303                 pcp->high = max3(pcp->count - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2304                                  pcp->high - (pcp->high >> 3), high_min);
2305                 if (pcp->high > high_min)
2306                         todo++;
2307         }
2308 
2309         to_drain = pcp->count - pcp->high;
2310         if (to_drain > 0) {
2311                 spin_lock(&pcp->lock);
2312                 free_pcppages_bulk(zone, to_drain, pcp, 0);
2313                 spin_unlock(&pcp->lock);
2314                 todo++;
2315         }
2316 
2317         return todo;
2318 }
2319 
2320 #ifdef CONFIG_NUMA
2321 /*
2322  * Called from the vmstat counter updater to drain pagesets of this
2323  * currently executing processor on remote nodes after they have
2324  * expired.
2325  */
2326 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2327 {
2328         int to_drain, batch;
2329 
2330         batch = READ_ONCE(pcp->batch);
2331         to_drain = min(pcp->count, batch);
2332         if (to_drain > 0) {
2333                 spin_lock(&pcp->lock);
2334                 free_pcppages_bulk(zone, to_drain, pcp, 0);
2335                 spin_unlock(&pcp->lock);
2336         }
2337 }
2338 #endif
2339 
2340 /*
2341  * Drain pcplists of the indicated processor and zone.
2342  */
2343 static void drain_pages_zone(unsigned int cpu, struct zone *zone)
2344 {
2345         struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2346         int count;
2347 
2348         do {
2349                 spin_lock(&pcp->lock);
2350                 count = pcp->count;
2351                 if (count) {
2352                         int to_drain = min(count,
2353                                 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
2354 
2355                         free_pcppages_bulk(zone, to_drain, pcp, 0);
2356                         count -= to_drain;
2357                 }
2358                 spin_unlock(&pcp->lock);
2359         } while (count);
2360 }
2361 
2362 /*
2363  * Drain pcplists of all zones on the indicated processor.
2364  */
2365 static void drain_pages(unsigned int cpu)
2366 {
2367         struct zone *zone;
2368 
2369         for_each_populated_zone(zone) {
2370                 drain_pages_zone(cpu, zone);
2371         }
2372 }
2373 
2374 /*
2375  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2376  */
2377 void drain_local_pages(struct zone *zone)
2378 {
2379         int cpu = smp_processor_id();
2380 
2381         if (zone)
2382                 drain_pages_zone(cpu, zone);
2383         else
2384                 drain_pages(cpu);
2385 }
2386 
2387 /*
2388  * The implementation of drain_all_pages(), exposing an extra parameter to
2389  * drain on all cpus.
2390  *
2391  * drain_all_pages() is optimized to only execute on cpus where pcplists are
2392  * not empty. The check for non-emptiness can however race with a free to
2393  * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2394  * that need the guarantee that every CPU has drained can disable the
2395  * optimizing racy check.
2396  */
2397 static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
2398 {
2399         int cpu;
2400 
2401         /*
2402          * Allocate in the BSS so we won't require allocation in
2403          * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
2404          */
2405         static cpumask_t cpus_with_pcps;
2406 
2407         /*
2408          * Do not drain if one is already in progress unless it's specific to
2409          * a zone. Such callers are primarily CMA and memory hotplug and need
2410          * the drain to be complete when the call returns.
2411          */
2412         if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
2413                 if (!zone)
2414                         return;
2415                 mutex_lock(&pcpu_drain_mutex);
2416         }
2417 
2418         /*
2419          * We don't care about racing with CPU hotplug event
2420          * as offline notification will cause the notified
2421          * cpu to drain that CPU pcps and on_each_cpu_mask
2422          * disables preemption as part of its processing
2423          */
2424         for_each_online_cpu(cpu) {
2425                 struct per_cpu_pages *pcp;
2426                 struct zone *z;
2427                 bool has_pcps = false;
2428 
2429                 if (force_all_cpus) {
2430                         /*
2431                          * The pcp.count check is racy, some callers need a
2432                          * guarantee that no cpu is missed.
2433                          */
2434                         has_pcps = true;
2435                 } else if (zone) {
2436                         pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
2437                         if (pcp->count)
2438                                 has_pcps = true;
2439                 } else {
2440                         for_each_populated_zone(z) {
2441                                 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu);
2442                                 if (pcp->count) {
2443                                         has_pcps = true;
2444                                         break;
2445                                 }
2446                         }
2447                 }
2448 
2449                 if (has_pcps)
2450                         cpumask_set_cpu(cpu, &cpus_with_pcps);
2451                 else
2452                         cpumask_clear_cpu(cpu, &cpus_with_pcps);
2453         }
2454 
2455         for_each_cpu(cpu, &cpus_with_pcps) {
2456                 if (zone)
2457                         drain_pages_zone(cpu, zone);
2458                 else
2459                         drain_pages(cpu);
2460         }
2461 
2462         mutex_unlock(&pcpu_drain_mutex);
2463 }
2464 
2465 /*
2466  * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2467  *
2468  * When zone parameter is non-NULL, spill just the single zone's pages.
2469  */
2470 void drain_all_pages(struct zone *zone)
2471 {
2472         __drain_all_pages(zone, false);
2473 }
2474 
2475 static int nr_pcp_free(struct per_cpu_pages *pcp, int batch, int high, bool free_high)
2476 {
2477         int min_nr_free, max_nr_free;
2478 
2479         /* Free as much as possible if batch freeing high-order pages. */
2480         if (unlikely(free_high))
2481                 return min(pcp->count, batch << CONFIG_PCP_BATCH_SCALE_MAX);
2482 
2483         /* Check for PCP disabled or boot pageset */
2484         if (unlikely(high < batch))
2485                 return 1;
2486 
2487         /* Leave at least pcp->batch pages on the list */
2488         min_nr_free = batch;
2489         max_nr_free = high - batch;
2490 
2491         /*
2492          * Increase the batch number to the number of the consecutive
2493          * freed pages to reduce zone lock contention.
2494          */
2495         batch = clamp_t(int, pcp->free_count, min_nr_free, max_nr_free);
2496 
2497         return batch;
2498 }
2499 
2500 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
2501                        int batch, bool free_high)
2502 {
2503         int high, high_min, high_max;
2504 
2505         high_min = READ_ONCE(pcp->high_min);
2506         high_max = READ_ONCE(pcp->high_max);
2507         high = pcp->high = clamp(pcp->high, high_min, high_max);
2508 
2509         if (unlikely(!high))
2510                 return 0;
2511 
2512         if (unlikely(free_high)) {
2513                 pcp->high = max(high - (batch << CONFIG_PCP_BATCH_SCALE_MAX),
2514                                 high_min);
2515                 return 0;
2516         }
2517 
2518         /*
2519          * If reclaim is active, limit the number of pages that can be
2520          * stored on pcp lists
2521          */
2522         if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
2523                 int free_count = max_t(int, pcp->free_count, batch);
2524 
2525                 pcp->high = max(high - free_count, high_min);
2526                 return min(batch << 2, pcp->high);
2527         }
2528 
2529         if (high_min == high_max)
2530                 return high;
2531 
2532         if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
2533                 int free_count = max_t(int, pcp->free_count, batch);
2534 
2535                 pcp->high = max(high - free_count, high_min);
2536                 high = max(pcp->count, high_min);
2537         } else if (pcp->count >= high) {
2538                 int need_high = pcp->free_count + batch;
2539 
2540                 /* pcp->high should be large enough to hold batch freed pages */
2541                 if (pcp->high < need_high)
2542                         pcp->high = clamp(need_high, high_min, high_max);
2543         }
2544 
2545         return high;
2546 }
2547 
2548 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
2549                                    struct page *page, int migratetype,
2550                                    unsigned int order)
2551 {
2552         int high, batch;
2553         int pindex;
2554         bool free_high = false;
2555 
2556         /*
2557          * On freeing, reduce the number of pages that are batch allocated.
2558          * See nr_pcp_alloc() where alloc_factor is increased for subsequent
2559          * allocations.
2560          */
2561         pcp->alloc_factor >>= 1;
2562         __count_vm_events(PGFREE, 1 << order);
2563         pindex = order_to_pindex(migratetype, order);
2564         list_add(&page->pcp_list, &pcp->lists[pindex]);
2565         pcp->count += 1 << order;
2566 
2567         batch = READ_ONCE(pcp->batch);
2568         /*
2569          * As high-order pages other than THP's stored on PCP can contribute
2570          * to fragmentation, limit the number stored when PCP is heavily
2571          * freeing without allocation. The remainder after bulk freeing
2572          * stops will be drained from vmstat refresh context.
2573          */
2574         if (order && order <= PAGE_ALLOC_COSTLY_ORDER) {
2575                 free_high = (pcp->free_count >= batch &&
2576                              (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) &&
2577                              (!(pcp->flags & PCPF_FREE_HIGH_BATCH) ||
2578                               pcp->count >= READ_ONCE(batch)));
2579                 pcp->flags |= PCPF_PREV_FREE_HIGH_ORDER;
2580         } else if (pcp->flags & PCPF_PREV_FREE_HIGH_ORDER) {
2581                 pcp->flags &= ~PCPF_PREV_FREE_HIGH_ORDER;
2582         }
2583         if (pcp->free_count < (batch << CONFIG_PCP_BATCH_SCALE_MAX))
2584                 pcp->free_count += (1 << order);
2585         high = nr_pcp_high(pcp, zone, batch, free_high);
2586         if (pcp->count >= high) {
2587                 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high),
2588                                    pcp, pindex);
2589                 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
2590                     zone_watermark_ok(zone, 0, high_wmark_pages(zone),
2591                                       ZONE_MOVABLE, 0))
2592                         clear_bit(ZONE_BELOW_HIGH, &zone->flags);
2593         }
2594 }
2595 
2596 /*
2597  * Free a pcp page
2598  */
2599 void free_unref_page(struct page *page, unsigned int order)
2600 {
2601         unsigned long __maybe_unused UP_flags;
2602         struct per_cpu_pages *pcp;
2603         struct zone *zone;
2604         unsigned long pfn = page_to_pfn(page);
2605         int migratetype;
2606 
2607         if (!pcp_allowed_order(order)) {
2608                 __free_pages_ok(page, order, FPI_NONE);
2609                 return;
2610         }
2611 
2612         if (!free_pages_prepare(page, order))
2613                 return;
2614 
2615         /*
2616          * We only track unmovable, reclaimable and movable on pcp lists.
2617          * Place ISOLATE pages on the isolated list because they are being
2618          * offlined but treat HIGHATOMIC and CMA as movable pages so we can
2619          * get those areas back if necessary. Otherwise, we may have to free
2620          * excessively into the page allocator
2621          */
2622         migratetype = get_pfnblock_migratetype(page, pfn);
2623         if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
2624                 if (unlikely(is_migrate_isolate(migratetype))) {
2625                         free_one_page(page_zone(page), page, pfn, order, FPI_NONE);
2626                         return;
2627                 }
2628                 migratetype = MIGRATE_MOVABLE;
2629         }
2630 
2631         zone = page_zone(page);
2632         pcp_trylock_prepare(UP_flags);
2633         pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2634         if (pcp) {
2635                 free_unref_page_commit(zone, pcp, page, migratetype, order);
2636                 pcp_spin_unlock(pcp);
2637         } else {
2638                 free_one_page(zone, page, pfn, order, FPI_NONE);
2639         }
2640         pcp_trylock_finish(UP_flags);
2641 }
2642 
2643 /*
2644  * Free a batch of folios
2645  */
2646 void free_unref_folios(struct folio_batch *folios)
2647 {
2648         unsigned long __maybe_unused UP_flags;
2649         struct per_cpu_pages *pcp = NULL;
2650         struct zone *locked_zone = NULL;
2651         int i, j;
2652 
2653         /* Prepare folios for freeing */
2654         for (i = 0, j = 0; i < folios->nr; i++) {
2655                 struct folio *folio = folios->folios[i];
2656                 unsigned long pfn = folio_pfn(folio);
2657                 unsigned int order = folio_order(folio);
2658 
2659                 folio_undo_large_rmappable(folio);
2660                 if (!free_pages_prepare(&folio->page, order))
2661                         continue;
2662                 /*
2663                  * Free orders not handled on the PCP directly to the
2664                  * allocator.
2665                  */
2666                 if (!pcp_allowed_order(order)) {
2667                         free_one_page(folio_zone(folio), &folio->page,
2668                                       pfn, order, FPI_NONE);
2669                         continue;
2670                 }
2671                 folio->private = (void *)(unsigned long)order;
2672                 if (j != i)
2673                         folios->folios[j] = folio;
2674                 j++;
2675         }
2676         folios->nr = j;
2677 
2678         for (i = 0; i < folios->nr; i++) {
2679                 struct folio *folio = folios->folios[i];
2680                 struct zone *zone = folio_zone(folio);
2681                 unsigned long pfn = folio_pfn(folio);
2682                 unsigned int order = (unsigned long)folio->private;
2683                 int migratetype;
2684 
2685                 folio->private = NULL;
2686                 migratetype = get_pfnblock_migratetype(&folio->page, pfn);
2687 
2688                 /* Different zone requires a different pcp lock */
2689                 if (zone != locked_zone ||
2690                     is_migrate_isolate(migratetype)) {
2691                         if (pcp) {
2692                                 pcp_spin_unlock(pcp);
2693                                 pcp_trylock_finish(UP_flags);
2694                                 locked_zone = NULL;
2695                                 pcp = NULL;
2696                         }
2697 
2698                         /*
2699                          * Free isolated pages directly to the
2700                          * allocator, see comment in free_unref_page.
2701                          */
2702                         if (is_migrate_isolate(migratetype)) {
2703                                 free_one_page(zone, &folio->page, pfn,
2704                                               order, FPI_NONE);
2705                                 continue;
2706                         }
2707 
2708                         /*
2709                          * trylock is necessary as folios may be getting freed
2710                          * from IRQ or SoftIRQ context after an IO completion.
2711                          */
2712                         pcp_trylock_prepare(UP_flags);
2713                         pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2714                         if (unlikely(!pcp)) {
2715                                 pcp_trylock_finish(UP_flags);
2716                                 free_one_page(zone, &folio->page, pfn,
2717                                               order, FPI_NONE);
2718                                 continue;
2719                         }
2720                         locked_zone = zone;
2721                 }
2722 
2723                 /*
2724                  * Non-isolated types over MIGRATE_PCPTYPES get added
2725                  * to the MIGRATE_MOVABLE pcp list.
2726                  */
2727                 if (unlikely(migratetype >= MIGRATE_PCPTYPES))
2728                         migratetype = MIGRATE_MOVABLE;
2729 
2730                 trace_mm_page_free_batched(&folio->page);
2731                 free_unref_page_commit(zone, pcp, &folio->page, migratetype,
2732                                 order);
2733         }
2734 
2735         if (pcp) {
2736                 pcp_spin_unlock(pcp);
2737                 pcp_trylock_finish(UP_flags);
2738         }
2739         folio_batch_reinit(folios);
2740 }
2741 
2742 /*
2743  * split_page takes a non-compound higher-order page, and splits it into
2744  * n (1<<order) sub-pages: page[0..n]
2745  * Each sub-page must be freed individually.
2746  *
2747  * Note: this is probably too low level an operation for use in drivers.
2748  * Please consult with lkml before using this in your driver.
2749  */
2750 void split_page(struct page *page, unsigned int order)
2751 {
2752         int i;
2753 
2754         VM_BUG_ON_PAGE(PageCompound(page), page);
2755         VM_BUG_ON_PAGE(!page_count(page), page);
2756 
2757         for (i = 1; i < (1 << order); i++)
2758                 set_page_refcounted(page + i);
2759         split_page_owner(page, order, 0);
2760         pgalloc_tag_split(page, 1 << order);
2761         split_page_memcg(page, order, 0);
2762 }
2763 EXPORT_SYMBOL_GPL(split_page);
2764 
2765 int __isolate_free_page(struct page *page, unsigned int order)
2766 {
2767         struct zone *zone = page_zone(page);
2768         int mt = get_pageblock_migratetype(page);
2769 
2770         if (!is_migrate_isolate(mt)) {
2771                 unsigned long watermark;
2772                 /*
2773                  * Obey watermarks as if the page was being allocated. We can
2774                  * emulate a high-order watermark check with a raised order-0
2775                  * watermark, because we already know our high-order page
2776                  * exists.
2777                  */
2778                 watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
2779                 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
2780                         return 0;
2781         }
2782 
2783         del_page_from_free_list(page, zone, order, mt);
2784 
2785         /*
2786          * Set the pageblock if the isolated page is at least half of a
2787          * pageblock
2788          */
2789         if (order >= pageblock_order - 1) {
2790                 struct page *endpage = page + (1 << order) - 1;
2791                 for (; page < endpage; page += pageblock_nr_pages) {
2792                         int mt = get_pageblock_migratetype(page);
2793                         /*
2794                          * Only change normal pageblocks (i.e., they can merge
2795                          * with others)
2796                          */
2797                         if (migratetype_is_mergeable(mt))
2798                                 move_freepages_block(zone, page, mt,
2799                                                      MIGRATE_MOVABLE);
2800                 }
2801         }
2802 
2803         return 1UL << order;
2804 }
2805 
2806 /**
2807  * __putback_isolated_page - Return a now-isolated page back where we got it
2808  * @page: Page that was isolated
2809  * @order: Order of the isolated page
2810  * @mt: The page's pageblock's migratetype
2811  *
2812  * This function is meant to return a page pulled from the free lists via
2813  * __isolate_free_page back to the free lists they were pulled from.
2814  */
2815 void __putback_isolated_page(struct page *page, unsigned int order, int mt)
2816 {
2817         struct zone *zone = page_zone(page);
2818 
2819         /* zone lock should be held when this function is called */
2820         lockdep_assert_held(&zone->lock);
2821 
2822         /* Return isolated page to tail of freelist. */
2823         __free_one_page(page, page_to_pfn(page), zone, order, mt,
2824                         FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
2825 }
2826 
2827 /*
2828  * Update NUMA hit/miss statistics
2829  */
2830 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
2831                                    long nr_account)
2832 {
2833 #ifdef CONFIG_NUMA
2834         enum numa_stat_item local_stat = NUMA_LOCAL;
2835 
2836         /* skip numa counters update if numa stats is disabled */
2837         if (!static_branch_likely(&vm_numa_stat_key))
2838                 return;
2839 
2840         if (zone_to_nid(z) != numa_node_id())
2841                 local_stat = NUMA_OTHER;
2842 
2843         if (zone_to_nid(z) == zone_to_nid(preferred_zone))
2844                 __count_numa_events(z, NUMA_HIT, nr_account);
2845         else {
2846                 __count_numa_events(z, NUMA_MISS, nr_account);
2847                 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account);
2848         }
2849         __count_numa_events(z, local_stat, nr_account);
2850 #endif
2851 }
2852 
2853 static __always_inline
2854 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
2855                            unsigned int order, unsigned int alloc_flags,
2856                            int migratetype)
2857 {
2858         struct page *page;
2859         unsigned long flags;
2860 
2861         do {
2862                 page = NULL;
2863                 spin_lock_irqsave(&zone->lock, flags);
2864                 if (alloc_flags & ALLOC_HIGHATOMIC)
2865                         page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2866                 if (!page) {
2867                         page = __rmqueue(zone, order, migratetype, alloc_flags);
2868 
2869                         /*
2870                          * If the allocation fails, allow OOM handling access
2871                          * to HIGHATOMIC reserves as failing now is worse than
2872                          * failing a high-order atomic allocation in the
2873                          * future.
2874                          */
2875                         if (!page && (alloc_flags & ALLOC_OOM))
2876                                 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
2877 
2878                         if (!page) {
2879                                 spin_unlock_irqrestore(&zone->lock, flags);
2880                                 return NULL;
2881                         }
2882                 }
2883                 spin_unlock_irqrestore(&zone->lock, flags);
2884         } while (check_new_pages(page, order));
2885 
2886         __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
2887         zone_statistics(preferred_zone, zone, 1);
2888 
2889         return page;
2890 }
2891 
2892 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
2893 {
2894         int high, base_batch, batch, max_nr_alloc;
2895         int high_max, high_min;
2896 
2897         base_batch = READ_ONCE(pcp->batch);
2898         high_min = READ_ONCE(pcp->high_min);
2899         high_max = READ_ONCE(pcp->high_max);
2900         high = pcp->high = clamp(pcp->high, high_min, high_max);
2901 
2902         /* Check for PCP disabled or boot pageset */
2903         if (unlikely(high < base_batch))
2904                 return 1;
2905 
2906         if (order)
2907                 batch = base_batch;
2908         else
2909                 batch = (base_batch << pcp->alloc_factor);
2910 
2911         /*
2912          * If we had larger pcp->high, we could avoid to allocate from
2913          * zone.
2914          */
2915         if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
2916                 high = pcp->high = min(high + batch, high_max);
2917 
2918         if (!order) {
2919                 max_nr_alloc = max(high - pcp->count - base_batch, base_batch);
2920                 /*
2921                  * Double the number of pages allocated each time there is
2922                  * subsequent allocation of order-0 pages without any freeing.
2923                  */
2924                 if (batch <= max_nr_alloc &&
2925                     pcp->alloc_factor < CONFIG_PCP_BATCH_SCALE_MAX)
2926                         pcp->alloc_factor++;
2927                 batch = min(batch, max_nr_alloc);
2928         }
2929 
2930         /*
2931          * Scale batch relative to order if batch implies free pages
2932          * can be stored on the PCP. Batch can be 1 for small zones or
2933          * for boot pagesets which should never store free pages as
2934          * the pages may belong to arbitrary zones.
2935          */
2936         if (batch > 1)
2937                 batch = max(batch >> order, 2);
2938 
2939         return batch;
2940 }
2941 
2942 /* Remove page from the per-cpu list, caller must protect the list */
2943 static inline
2944 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
2945                         int migratetype,
2946                         unsigned int alloc_flags,
2947                         struct per_cpu_pages *pcp,
2948                         struct list_head *list)
2949 {
2950         struct page *page;
2951 
2952         do {
2953                 if (list_empty(list)) {
2954                         int batch = nr_pcp_alloc(pcp, zone, order);
2955                         int alloced;
2956 
2957                         alloced = rmqueue_bulk(zone, order,
2958                                         batch, list,
2959                                         migratetype, alloc_flags);
2960 
2961                         pcp->count += alloced << order;
2962                         if (unlikely(list_empty(list)))
2963                                 return NULL;
2964                 }
2965 
2966                 page = list_first_entry(list, struct page, pcp_list);
2967                 list_del(&page->pcp_list);
2968                 pcp->count -= 1 << order;
2969         } while (check_new_pages(page, order));
2970 
2971         return page;
2972 }
2973 
2974 /* Lock and remove page from the per-cpu list */
2975 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
2976                         struct zone *zone, unsigned int order,
2977                         int migratetype, unsigned int alloc_flags)
2978 {
2979         struct per_cpu_pages *pcp;
2980         struct list_head *list;
2981         struct page *page;
2982         unsigned long __maybe_unused UP_flags;
2983 
2984         /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
2985         pcp_trylock_prepare(UP_flags);
2986         pcp = pcp_spin_trylock(zone->per_cpu_pageset);
2987         if (!pcp) {
2988                 pcp_trylock_finish(UP_flags);
2989                 return NULL;
2990         }
2991 
2992         /*
2993          * On allocation, reduce the number of pages that are batch freed.
2994          * See nr_pcp_free() where free_factor is increased for subsequent
2995          * frees.
2996          */
2997         pcp->free_count >>= 1;
2998         list = &pcp->lists[order_to_pindex(migratetype, order)];
2999         page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
3000         pcp_spin_unlock(pcp);
3001         pcp_trylock_finish(UP_flags);
3002         if (page) {
3003                 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3004                 zone_statistics(preferred_zone, zone, 1);
3005         }
3006         return page;
3007 }
3008 
3009 /*
3010  * Allocate a page from the given zone.
3011  * Use pcplists for THP or "cheap" high-order allocations.
3012  */
3013 
3014 /*
3015  * Do not instrument rmqueue() with KMSAN. This function may call
3016  * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
3017  * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
3018  * may call rmqueue() again, which will result in a deadlock.
3019  */
3020 __no_sanitize_memory
3021 static inline
3022 struct page *rmqueue(struct zone *preferred_zone,
3023                         struct zone *zone, unsigned int order,
3024                         gfp_t gfp_flags, unsigned int alloc_flags,
3025                         int migratetype)
3026 {
3027         struct page *page;
3028 
3029         /*
3030          * We most definitely don't want callers attempting to
3031          * allocate greater than order-1 page units with __GFP_NOFAIL.
3032          */
3033         WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3034 
3035         if (likely(pcp_allowed_order(order))) {
3036                 page = rmqueue_pcplist(preferred_zone, zone, order,
3037                                        migratetype, alloc_flags);
3038                 if (likely(page))
3039                         goto out;
3040         }
3041 
3042         page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
3043                                                         migratetype);
3044 
3045 out:
3046         /* Separate test+clear to avoid unnecessary atomics */
3047         if ((alloc_flags & ALLOC_KSWAPD) &&
3048             unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
3049                 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3050                 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3051         }
3052 
3053         VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3054         return page;
3055 }
3056 
3057 static inline long __zone_watermark_unusable_free(struct zone *z,
3058                                 unsigned int order, unsigned int alloc_flags)
3059 {
3060         long unusable_free = (1 << order) - 1;
3061 
3062         /*
3063          * If the caller does not have rights to reserves below the min
3064          * watermark then subtract the high-atomic reserves. This will
3065          * over-estimate the size of the atomic reserve but it avoids a search.
3066          */
3067         if (likely(!(alloc_flags & ALLOC_RESERVES)))
3068                 unusable_free += z->nr_reserved_highatomic;
3069 
3070 #ifdef CONFIG_CMA
3071         /* If allocation can't use CMA areas don't use free CMA pages */
3072         if (!(alloc_flags & ALLOC_CMA))
3073                 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3074 #endif
3075 #ifdef CONFIG_UNACCEPTED_MEMORY
3076         unusable_free += zone_page_state(z, NR_UNACCEPTED);
3077 #endif
3078 
3079         return unusable_free;
3080 }
3081 
3082 /*
3083  * Return true if free base pages are above 'mark'. For high-order checks it
3084  * will return true of the order-0 watermark is reached and there is at least
3085  * one free page of a suitable size. Checking now avoids taking the zone lock
3086  * to check in the allocation paths if no pages are free.
3087  */
3088 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3089                          int highest_zoneidx, unsigned int alloc_flags,
3090                          long free_pages)
3091 {
3092         long min = mark;
3093         int o;
3094 
3095         /* free_pages may go negative - that's OK */
3096         free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3097 
3098         if (unlikely(alloc_flags & ALLOC_RESERVES)) {
3099                 /*
3100                  * __GFP_HIGH allows access to 50% of the min reserve as well
3101                  * as OOM.
3102                  */
3103                 if (alloc_flags & ALLOC_MIN_RESERVE) {
3104                         min -= min / 2;
3105 
3106                         /*
3107                          * Non-blocking allocations (e.g. GFP_ATOMIC) can
3108                          * access more reserves than just __GFP_HIGH. Other
3109                          * non-blocking allocations requests such as GFP_NOWAIT
3110                          * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get
3111                          * access to the min reserve.
3112                          */
3113                         if (alloc_flags & ALLOC_NON_BLOCK)
3114                                 min -= min / 4;
3115                 }
3116 
3117                 /*
3118                  * OOM victims can try even harder than the normal reserve
3119                  * users on the grounds that it's definitely going to be in
3120                  * the exit path shortly and free memory. Any allocation it
3121                  * makes during the free path will be small and short-lived.
3122                  */
3123                 if (alloc_flags & ALLOC_OOM)
3124                         min -= min / 2;
3125         }
3126 
3127         /*
3128          * Check watermarks for an order-0 allocation request. If these
3129          * are not met, then a high-order request also cannot go ahead
3130          * even if a suitable page happened to be free.
3131          */
3132         if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3133                 return false;
3134 
3135         /* If this is an order-0 request then the watermark is fine */
3136         if (!order)
3137                 return true;
3138 
3139         /* For a high-order request, check at least one suitable page is free */
3140         for (o = order; o < NR_PAGE_ORDERS; o++) {
3141                 struct free_area *area = &z->free_area[o];
3142                 int mt;
3143 
3144                 if (!area->nr_free)
3145                         continue;
3146 
3147                 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3148                         if (!free_area_empty(area, mt))
3149                                 return true;
3150                 }
3151 
3152 #ifdef CONFIG_CMA
3153                 if ((alloc_flags & ALLOC_CMA) &&
3154                     !free_area_empty(area, MIGRATE_CMA)) {
3155                         return true;
3156                 }
3157 #endif
3158                 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
3159                     !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
3160                         return true;
3161                 }
3162         }
3163         return false;
3164 }
3165 
3166 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3167                       int highest_zoneidx, unsigned int alloc_flags)
3168 {
3169         return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3170                                         zone_page_state(z, NR_FREE_PAGES));
3171 }
3172 
3173 static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3174                                 unsigned long mark, int highest_zoneidx,
3175                                 unsigned int alloc_flags, gfp_t gfp_mask)
3176 {
3177         long free_pages;
3178 
3179         free_pages = zone_page_state(z, NR_FREE_PAGES);
3180 
3181         /*
3182          * Fast check for order-0 only. If this fails then the reserves
3183          * need to be calculated.
3184          */
3185         if (!order) {
3186                 long usable_free;
3187                 long reserved;
3188 
3189                 usable_free = free_pages;
3190                 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags);
3191 
3192                 /* reserved may over estimate high-atomic reserves. */
3193                 usable_free -= min(usable_free, reserved);
3194                 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx])
3195                         return true;
3196         }
3197 
3198         if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3199                                         free_pages))
3200                 return true;
3201 
3202         /*
3203          * Ignore watermark boosting for __GFP_HIGH order-0 allocations
3204          * when checking the min watermark. The min watermark is the
3205          * point where boosting is ignored so that kswapd is woken up
3206          * when below the low watermark.
3207          */
3208         if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost
3209                 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3210                 mark = z->_watermark[WMARK_MIN];
3211                 return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3212                                         alloc_flags, free_pages);
3213         }
3214 
3215         return false;
3216 }
3217 
3218 bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3219                         unsigned long mark, int highest_zoneidx)
3220 {
3221         long free_pages = zone_page_state(z, NR_FREE_PAGES);
3222 
3223         if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3224                 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3225 
3226         return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3227                                                                 free_pages);
3228 }
3229 
3230 #ifdef CONFIG_NUMA
3231 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
3232 
3233 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3234 {
3235         return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3236                                 node_reclaim_distance;
3237 }
3238 #else   /* CONFIG_NUMA */
3239 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3240 {
3241         return true;
3242 }
3243 #endif  /* CONFIG_NUMA */
3244 
3245 /*
3246  * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3247  * fragmentation is subtle. If the preferred zone was HIGHMEM then
3248  * premature use of a lower zone may cause lowmem pressure problems that
3249  * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3250  * probably too small. It only makes sense to spread allocations to avoid
3251  * fragmentation between the Normal and DMA32 zones.
3252  */
3253 static inline unsigned int
3254 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3255 {
3256         unsigned int alloc_flags;
3257 
3258         /*
3259          * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3260          * to save a branch.
3261          */
3262         alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3263 
3264 #ifdef CONFIG_ZONE_DMA32
3265         if (!zone)
3266                 return alloc_flags;
3267 
3268         if (zone_idx(zone) != ZONE_NORMAL)
3269                 return alloc_flags;
3270 
3271         /*
3272          * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3273          * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3274          * on UMA that if Normal is populated then so is DMA32.
3275          */
3276         BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3277         if (nr_online_nodes > 1 && !populated_zone(--zone))
3278                 return alloc_flags;
3279 
3280         alloc_flags |= ALLOC_NOFRAGMENT;
3281 #endif /* CONFIG_ZONE_DMA32 */
3282         return alloc_flags;
3283 }
3284 
3285 /* Must be called after current_gfp_context() which can change gfp_mask */
3286 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3287                                                   unsigned int alloc_flags)
3288 {
3289 #ifdef CONFIG_CMA
3290         if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3291                 alloc_flags |= ALLOC_CMA;
3292 #endif
3293         return alloc_flags;
3294 }
3295 
3296 /*
3297  * get_page_from_freelist goes through the zonelist trying to allocate
3298  * a page.
3299  */
3300 static struct page *
3301 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3302                                                 const struct alloc_context *ac)
3303 {
3304         struct zoneref *z;
3305         struct zone *zone;
3306         struct pglist_data *last_pgdat = NULL;
3307         bool last_pgdat_dirty_ok = false;
3308         bool no_fallback;
3309 
3310 retry:
3311         /*
3312          * Scan zonelist, looking for a zone with enough free.
3313          * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c.
3314          */
3315         no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3316         z = ac->preferred_zoneref;
3317         for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3318                                         ac->nodemask) {
3319                 struct page *page;
3320                 unsigned long mark;
3321 
3322                 if (cpusets_enabled() &&
3323                         (alloc_flags & ALLOC_CPUSET) &&
3324                         !__cpuset_zone_allowed(zone, gfp_mask))
3325                                 continue;
3326                 /*
3327                  * When allocating a page cache page for writing, we
3328                  * want to get it from a node that is within its dirty
3329                  * limit, such that no single node holds more than its
3330                  * proportional share of globally allowed dirty pages.
3331                  * The dirty limits take into account the node's
3332                  * lowmem reserves and high watermark so that kswapd
3333                  * should be able to balance it without having to
3334                  * write pages from its LRU list.
3335                  *
3336                  * XXX: For now, allow allocations to potentially
3337                  * exceed the per-node dirty limit in the slowpath
3338                  * (spread_dirty_pages unset) before going into reclaim,
3339                  * which is important when on a NUMA setup the allowed
3340                  * nodes are together not big enough to reach the
3341                  * global limit.  The proper fix for these situations
3342                  * will require awareness of nodes in the
3343                  * dirty-throttling and the flusher threads.
3344                  */
3345                 if (ac->spread_dirty_pages) {
3346                         if (last_pgdat != zone->zone_pgdat) {
3347                                 last_pgdat = zone->zone_pgdat;
3348                                 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
3349                         }
3350 
3351                         if (!last_pgdat_dirty_ok)
3352                                 continue;
3353                 }
3354 
3355                 if (no_fallback && nr_online_nodes > 1 &&
3356                     zone != ac->preferred_zoneref->zone) {
3357                         int local_nid;
3358 
3359                         /*
3360                          * If moving to a remote node, retry but allow
3361                          * fragmenting fallbacks. Locality is more important
3362                          * than fragmentation avoidance.
3363                          */
3364                         local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3365                         if (zone_to_nid(zone) != local_nid) {
3366                                 alloc_flags &= ~ALLOC_NOFRAGMENT;
3367                                 goto retry;
3368                         }
3369                 }
3370 
3371                 /*
3372                  * Detect whether the number of free pages is below high
3373                  * watermark.  If so, we will decrease pcp->high and free
3374                  * PCP pages in free path to reduce the possibility of
3375                  * premature page reclaiming.  Detection is done here to
3376                  * avoid to do that in hotter free path.
3377                  */
3378                 if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
3379                         goto check_alloc_wmark;
3380 
3381                 mark = high_wmark_pages(zone);
3382                 if (zone_watermark_fast(zone, order, mark,
3383                                         ac->highest_zoneidx, alloc_flags,
3384                                         gfp_mask))
3385                         goto try_this_zone;
3386                 else
3387                         set_bit(ZONE_BELOW_HIGH, &zone->flags);
3388 
3389 check_alloc_wmark:
3390                 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3391                 if (!zone_watermark_fast(zone, order, mark,
3392                                        ac->highest_zoneidx, alloc_flags,
3393                                        gfp_mask)) {
3394                         int ret;
3395 
3396                         if (has_unaccepted_memory()) {
3397                                 if (try_to_accept_memory(zone, order))
3398                                         goto try_this_zone;
3399                         }
3400 
3401 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3402                         /*
3403                          * Watermark failed for this zone, but see if we can
3404                          * grow this zone if it contains deferred pages.
3405                          */
3406                         if (deferred_pages_enabled()) {
3407                                 if (_deferred_grow_zone(zone, order))
3408                                         goto try_this_zone;
3409                         }
3410 #endif
3411                         /* Checked here to keep the fast path fast */
3412                         BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3413                         if (alloc_flags & ALLOC_NO_WATERMARKS)
3414                                 goto try_this_zone;
3415 
3416                         if (!node_reclaim_enabled() ||
3417                             !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3418                                 continue;
3419 
3420                         ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3421                         switch (ret) {
3422                         case NODE_RECLAIM_NOSCAN:
3423                                 /* did not scan */
3424                                 continue;
3425                         case NODE_RECLAIM_FULL:
3426                                 /* scanned but unreclaimable */
3427                                 continue;
3428                         default:
3429                                 /* did we reclaim enough */
3430                                 if (zone_watermark_ok(zone, order, mark,
3431                                         ac->highest_zoneidx, alloc_flags))
3432                                         goto try_this_zone;
3433 
3434                                 continue;
3435                         }
3436                 }
3437 
3438 try_this_zone:
3439                 page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3440                                 gfp_mask, alloc_flags, ac->migratetype);
3441                 if (page) {
3442                         prep_new_page(page, order, gfp_mask, alloc_flags);
3443 
3444                         /*
3445                          * If this is a high-order atomic allocation then check
3446                          * if the pageblock should be reserved for the future
3447                          */
3448                         if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
3449                                 reserve_highatomic_pageblock(page, order, zone);
3450 
3451                         return page;
3452                 } else {
3453                         if (has_unaccepted_memory()) {
3454                                 if (try_to_accept_memory(zone, order))
3455                                         goto try_this_zone;
3456                         }
3457 
3458 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3459                         /* Try again if zone has deferred pages */
3460                         if (deferred_pages_enabled()) {
3461                                 if (_deferred_grow_zone(zone, order))
3462                                         goto try_this_zone;
3463                         }
3464 #endif
3465                 }
3466         }
3467 
3468         /*
3469          * It's possible on a UMA machine to get through all zones that are
3470          * fragmented. If avoiding fragmentation, reset and try again.
3471          */
3472         if (no_fallback) {
3473                 alloc_flags &= ~ALLOC_NOFRAGMENT;
3474                 goto retry;
3475         }
3476 
3477         return NULL;
3478 }
3479 
3480 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3481 {
3482         unsigned int filter = SHOW_MEM_FILTER_NODES;
3483 
3484         /*
3485          * This documents exceptions given to allocations in certain
3486          * contexts that are allowed to allocate outside current's set
3487          * of allowed nodes.
3488          */
3489         if (!(gfp_mask & __GFP_NOMEMALLOC))
3490                 if (tsk_is_oom_victim(current) ||
3491                     (current->flags & (PF_MEMALLOC | PF_EXITING)))
3492                         filter &= ~SHOW_MEM_FILTER_NODES;
3493         if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3494                 filter &= ~SHOW_MEM_FILTER_NODES;
3495 
3496         __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3497 }
3498 
3499 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3500 {
3501         struct va_format vaf;
3502         va_list args;
3503         static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3504 
3505         if ((gfp_mask & __GFP_NOWARN) ||
3506              !__ratelimit(&nopage_rs) ||
3507              ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3508                 return;
3509 
3510         va_start(args, fmt);
3511         vaf.fmt = fmt;
3512         vaf.va = &args;
3513         pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
3514                         current->comm, &vaf, gfp_mask, &gfp_mask,
3515                         nodemask_pr_args(nodemask));
3516         va_end(args);
3517 
3518         cpuset_print_current_mems_allowed();
3519         pr_cont("\n");
3520         dump_stack();
3521         warn_alloc_show_mem(gfp_mask, nodemask);
3522 }
3523 
3524 static inline struct page *
3525 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3526                               unsigned int alloc_flags,
3527                               const struct alloc_context *ac)
3528 {
3529         struct page *page;
3530 
3531         page = get_page_from_freelist(gfp_mask, order,
3532                         alloc_flags|ALLOC_CPUSET, ac);
3533         /*
3534          * fallback to ignore cpuset restriction if our nodes
3535          * are depleted
3536          */
3537         if (!page)
3538                 page = get_page_from_freelist(gfp_mask, order,
3539                                 alloc_flags, ac);
3540 
3541         return page;
3542 }
3543 
3544 static inline struct page *
3545 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3546         const struct alloc_context *ac, unsigned long *did_some_progress)
3547 {
3548         struct oom_control oc = {
3549                 .zonelist = ac->zonelist,
3550                 .nodemask = ac->nodemask,
3551                 .memcg = NULL,
3552                 .gfp_mask = gfp_mask,
3553                 .order = order,
3554         };
3555         struct page *page;
3556 
3557         *did_some_progress = 0;
3558 
3559         /*
3560          * Acquire the oom lock.  If that fails, somebody else is
3561          * making progress for us.
3562          */
3563         if (!mutex_trylock(&oom_lock)) {
3564                 *did_some_progress = 1;
3565                 schedule_timeout_uninterruptible(1);
3566                 return NULL;
3567         }
3568 
3569         /*
3570          * Go through the zonelist yet one more time, keep very high watermark
3571          * here, this is only to catch a parallel oom killing, we must fail if
3572          * we're still under heavy pressure. But make sure that this reclaim
3573          * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
3574          * allocation which will never fail due to oom_lock already held.
3575          */
3576         page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3577                                       ~__GFP_DIRECT_RECLAIM, order,
3578                                       ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
3579         if (page)
3580                 goto out;
3581 
3582         /* Coredumps can quickly deplete all memory reserves */
3583         if (current->flags & PF_DUMPCORE)
3584                 goto out;
3585         /* The OOM killer will not help higher order allocs */
3586         if (order > PAGE_ALLOC_COSTLY_ORDER)
3587                 goto out;
3588         /*
3589          * We have already exhausted all our reclaim opportunities without any
3590          * success so it is time to admit defeat. We will skip the OOM killer
3591          * because it is very likely that the caller has a more reasonable
3592          * fallback than shooting a random task.
3593          *
3594          * The OOM killer may not free memory on a specific node.
3595          */
3596         if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
3597                 goto out;
3598         /* The OOM killer does not needlessly kill tasks for lowmem */
3599         if (ac->highest_zoneidx < ZONE_NORMAL)
3600                 goto out;
3601         if (pm_suspended_storage())
3602                 goto out;
3603         /*
3604          * XXX: GFP_NOFS allocations should rather fail than rely on
3605          * other request to make a forward progress.
3606          * We are in an unfortunate situation where out_of_memory cannot
3607          * do much for this context but let's try it to at least get
3608          * access to memory reserved if the current task is killed (see
3609          * out_of_memory). Once filesystems are ready to handle allocation
3610          * failures more gracefully we should just bail out here.
3611          */
3612 
3613         /* Exhausted what can be done so it's blame time */
3614         if (out_of_memory(&oc) ||
3615             WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
3616                 *did_some_progress = 1;
3617 
3618                 /*
3619                  * Help non-failing allocations by giving them access to memory
3620                  * reserves
3621                  */
3622                 if (gfp_mask & __GFP_NOFAIL)
3623                         page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3624                                         ALLOC_NO_WATERMARKS, ac);
3625         }
3626 out:
3627         mutex_unlock(&oom_lock);
3628         return page;
3629 }
3630 
3631 /*
3632  * Maximum number of compaction retries with a progress before OOM
3633  * killer is consider as the only way to move forward.
3634  */
3635 #define MAX_COMPACT_RETRIES 16
3636 
3637 #ifdef CONFIG_COMPACTION
3638 /* Try memory compaction for high-order allocations before reclaim */
3639 static struct page *
3640 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3641                 unsigned int alloc_flags, const struct alloc_context *ac,
3642                 enum compact_priority prio, enum compact_result *compact_result)
3643 {
3644         struct page *page = NULL;
3645         unsigned long pflags;
3646         unsigned int noreclaim_flag;
3647 
3648         if (!order)
3649                 return NULL;
3650 
3651         psi_memstall_enter(&pflags);
3652         delayacct_compact_start();
3653         noreclaim_flag = memalloc_noreclaim_save();
3654 
3655         *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3656                                                                 prio, &page);
3657 
3658         memalloc_noreclaim_restore(noreclaim_flag);
3659         psi_memstall_leave(&pflags);
3660         delayacct_compact_end();
3661 
3662         if (*compact_result == COMPACT_SKIPPED)
3663                 return NULL;
3664         /*
3665          * At least in one zone compaction wasn't deferred or skipped, so let's
3666          * count a compaction stall
3667          */
3668         count_vm_event(COMPACTSTALL);
3669 
3670         /* Prep a captured page if available */
3671         if (page)
3672                 prep_new_page(page, order, gfp_mask, alloc_flags);
3673 
3674         /* Try get a page from the freelist if available */
3675         if (!page)
3676                 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3677 
3678         if (page) {
3679                 struct zone *zone = page_zone(page);
3680 
3681                 zone->compact_blockskip_flush = false;
3682                 compaction_defer_reset(zone, order, true);
3683                 count_vm_event(COMPACTSUCCESS);
3684                 return page;
3685         }
3686 
3687         /*
3688          * It's bad if compaction run occurs and fails. The most likely reason
3689          * is that pages exist, but not enough to satisfy watermarks.
3690          */
3691         count_vm_event(COMPACTFAIL);
3692 
3693         cond_resched();
3694 
3695         return NULL;
3696 }
3697 
3698 static inline bool
3699 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3700                      enum compact_result compact_result,
3701                      enum compact_priority *compact_priority,
3702                      int *compaction_retries)
3703 {
3704         int max_retries = MAX_COMPACT_RETRIES;
3705         int min_priority;
3706         bool ret = false;
3707         int retries = *compaction_retries;
3708         enum compact_priority priority = *compact_priority;
3709 
3710         if (!order)
3711                 return false;
3712 
3713         if (fatal_signal_pending(current))
3714                 return false;
3715 
3716         /*
3717          * Compaction was skipped due to a lack of free order-0
3718          * migration targets. Continue if reclaim can help.
3719          */
3720         if (compact_result == COMPACT_SKIPPED) {
3721                 ret = compaction_zonelist_suitable(ac, order, alloc_flags);
3722                 goto out;
3723         }
3724 
3725         /*
3726          * Compaction managed to coalesce some page blocks, but the
3727          * allocation failed presumably due to a race. Retry some.
3728          */
3729         if (compact_result == COMPACT_SUCCESS) {
3730                 /*
3731                  * !costly requests are much more important than
3732                  * __GFP_RETRY_MAYFAIL costly ones because they are de
3733                  * facto nofail and invoke OOM killer to move on while
3734                  * costly can fail and users are ready to cope with
3735                  * that. 1/4 retries is rather arbitrary but we would
3736                  * need much more detailed feedback from compaction to
3737                  * make a better decision.
3738                  */
3739                 if (order > PAGE_ALLOC_COSTLY_ORDER)
3740                         max_retries /= 4;
3741 
3742                 if (++(*compaction_retries) <= max_retries) {
3743                         ret = true;
3744                         goto out;
3745                 }
3746         }
3747 
3748         /*
3749          * Compaction failed. Retry with increasing priority.
3750          */
3751         min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
3752                         MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
3753 
3754         if (*compact_priority > min_priority) {
3755                 (*compact_priority)--;
3756                 *compaction_retries = 0;
3757                 ret = true;
3758         }
3759 out:
3760         trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
3761         return ret;
3762 }
3763 #else
3764 static inline struct page *
3765 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3766                 unsigned int alloc_flags, const struct alloc_context *ac,
3767                 enum compact_priority prio, enum compact_result *compact_result)
3768 {
3769         *compact_result = COMPACT_SKIPPED;
3770         return NULL;
3771 }
3772 
3773 static inline bool
3774 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3775                      enum compact_result compact_result,
3776                      enum compact_priority *compact_priority,
3777                      int *compaction_retries)
3778 {
3779         struct zone *zone;
3780         struct zoneref *z;
3781 
3782         if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
3783                 return false;
3784 
3785         /*
3786          * There are setups with compaction disabled which would prefer to loop
3787          * inside the allocator rather than hit the oom killer prematurely.
3788          * Let's give them a good hope and keep retrying while the order-0
3789          * watermarks are OK.
3790          */
3791         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
3792                                 ac->highest_zoneidx, ac->nodemask) {
3793                 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
3794                                         ac->highest_zoneidx, alloc_flags))
3795                         return true;
3796         }
3797         return false;
3798 }
3799 #endif /* CONFIG_COMPACTION */
3800 
3801 #ifdef CONFIG_LOCKDEP
3802 static struct lockdep_map __fs_reclaim_map =
3803         STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
3804 
3805 static bool __need_reclaim(gfp_t gfp_mask)
3806 {
3807         /* no reclaim without waiting on it */
3808         if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3809                 return false;
3810 
3811         /* this guy won't enter reclaim */
3812         if (current->flags & PF_MEMALLOC)
3813                 return false;
3814 
3815         if (gfp_mask & __GFP_NOLOCKDEP)
3816                 return false;
3817 
3818         return true;
3819 }
3820 
3821 void __fs_reclaim_acquire(unsigned long ip)
3822 {
3823         lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip);
3824 }
3825 
3826 void __fs_reclaim_release(unsigned long ip)
3827 {
3828         lock_release(&__fs_reclaim_map, ip);
3829 }
3830 
3831 void fs_reclaim_acquire(gfp_t gfp_mask)
3832 {
3833         gfp_mask = current_gfp_context(gfp_mask);
3834 
3835         if (__need_reclaim(gfp_mask)) {
3836                 if (gfp_mask & __GFP_FS)
3837                         __fs_reclaim_acquire(_RET_IP_);
3838 
3839 #ifdef CONFIG_MMU_NOTIFIER
3840                 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
3841                 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
3842 #endif
3843 
3844         }
3845 }
3846 EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
3847 
3848 void fs_reclaim_release(gfp_t gfp_mask)
3849 {
3850         gfp_mask = current_gfp_context(gfp_mask);
3851 
3852         if (__need_reclaim(gfp_mask)) {
3853                 if (gfp_mask & __GFP_FS)
3854                         __fs_reclaim_release(_RET_IP_);
3855         }
3856 }
3857 EXPORT_SYMBOL_GPL(fs_reclaim_release);
3858 #endif
3859 
3860 /*
3861  * Zonelists may change due to hotplug during allocation. Detect when zonelists
3862  * have been rebuilt so allocation retries. Reader side does not lock and
3863  * retries the allocation if zonelist changes. Writer side is protected by the
3864  * embedded spin_lock.
3865  */
3866 static DEFINE_SEQLOCK(zonelist_update_seq);
3867 
3868 static unsigned int zonelist_iter_begin(void)
3869 {
3870         if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3871                 return read_seqbegin(&zonelist_update_seq);
3872 
3873         return 0;
3874 }
3875 
3876 static unsigned int check_retry_zonelist(unsigned int seq)
3877 {
3878         if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
3879                 return read_seqretry(&zonelist_update_seq, seq);
3880 
3881         return seq;
3882 }
3883 
3884 /* Perform direct synchronous page reclaim */
3885 static unsigned long
3886 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3887                                         const struct alloc_context *ac)
3888 {
3889         unsigned int noreclaim_flag;
3890         unsigned long progress;
3891 
3892         cond_resched();
3893 
3894         /* We now go into synchronous reclaim */
3895         cpuset_memory_pressure_bump();
3896         fs_reclaim_acquire(gfp_mask);
3897         noreclaim_flag = memalloc_noreclaim_save();
3898 
3899         progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3900                                                                 ac->nodemask);
3901 
3902         memalloc_noreclaim_restore(noreclaim_flag);
3903         fs_reclaim_release(gfp_mask);
3904 
3905         cond_resched();
3906 
3907         return progress;
3908 }
3909 
3910 /* The really slow allocator path where we enter direct reclaim */
3911 static inline struct page *
3912 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3913                 unsigned int alloc_flags, const struct alloc_context *ac,
3914                 unsigned long *did_some_progress)
3915 {
3916         struct page *page = NULL;
3917         unsigned long pflags;
3918         bool drained = false;
3919 
3920         psi_memstall_enter(&pflags);
3921         *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3922         if (unlikely(!(*did_some_progress)))
3923                 goto out;
3924 
3925 retry:
3926         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3927 
3928         /*
3929          * If an allocation failed after direct reclaim, it could be because
3930          * pages are pinned on the per-cpu lists or in high alloc reserves.
3931          * Shrink them and try again
3932          */
3933         if (!page && !drained) {
3934                 unreserve_highatomic_pageblock(ac, false);
3935                 drain_all_pages(NULL);
3936                 drained = true;
3937                 goto retry;
3938         }
3939 out:
3940         psi_memstall_leave(&pflags);
3941 
3942         return page;
3943 }
3944 
3945 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3946                              const struct alloc_context *ac)
3947 {
3948         struct zoneref *z;
3949         struct zone *zone;
3950         pg_data_t *last_pgdat = NULL;
3951         enum zone_type highest_zoneidx = ac->highest_zoneidx;
3952 
3953         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
3954                                         ac->nodemask) {
3955                 if (!managed_zone(zone))
3956                         continue;
3957                 if (last_pgdat != zone->zone_pgdat) {
3958                         wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3959                         last_pgdat = zone->zone_pgdat;
3960                 }
3961         }
3962 }
3963 
3964 static inline unsigned int
3965 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3966 {
3967         unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
3968 
3969         /*
3970          * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE
3971          * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3972          * to save two branches.
3973          */
3974         BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE);
3975         BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
3976 
3977         /*
3978          * The caller may dip into page reserves a bit more if the caller
3979          * cannot run direct reclaim, or if the caller has realtime scheduling
3980          * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
3981          * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH).
3982          */
3983         alloc_flags |= (__force int)
3984                 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
3985 
3986         if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
3987                 /*
3988                  * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
3989                  * if it can't schedule.
3990                  */
3991                 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
3992                         alloc_flags |= ALLOC_NON_BLOCK;
3993 
3994                         if (order > 0)
3995                                 alloc_flags |= ALLOC_HIGHATOMIC;
3996                 }
3997 
3998                 /*
3999                  * Ignore cpuset mems for non-blocking __GFP_HIGH (probably
4000                  * GFP_ATOMIC) rather than fail, see the comment for
4001                  * cpuset_node_allowed().
4002                  */
4003                 if (alloc_flags & ALLOC_MIN_RESERVE)
4004                         alloc_flags &= ~ALLOC_CPUSET;
4005         } else if (unlikely(rt_task(current)) && in_task())
4006                 alloc_flags |= ALLOC_MIN_RESERVE;
4007 
4008         alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4009 
4010         return alloc_flags;
4011 }
4012 
4013 static bool oom_reserves_allowed(struct task_struct *tsk)
4014 {
4015         if (!tsk_is_oom_victim(tsk))
4016                 return false;
4017 
4018         /*
4019          * !MMU doesn't have oom reaper so give access to memory reserves
4020          * only to the thread with TIF_MEMDIE set
4021          */
4022         if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4023                 return false;
4024 
4025         return true;
4026 }
4027 
4028 /*
4029  * Distinguish requests which really need access to full memory
4030  * reserves from oom victims which can live with a portion of it
4031  */
4032 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4033 {
4034         if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4035                 return 0;
4036         if (gfp_mask & __GFP_MEMALLOC)
4037                 return ALLOC_NO_WATERMARKS;
4038         if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4039                 return ALLOC_NO_WATERMARKS;
4040         if (!in_interrupt()) {
4041                 if (current->flags & PF_MEMALLOC)
4042                         return ALLOC_NO_WATERMARKS;
4043                 else if (oom_reserves_allowed(current))
4044                         return ALLOC_OOM;
4045         }
4046 
4047         return 0;
4048 }
4049 
4050 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4051 {
4052         return !!__gfp_pfmemalloc_flags(gfp_mask);
4053 }
4054 
4055 /*
4056  * Checks whether it makes sense to retry the reclaim to make a forward progress
4057  * for the given allocation request.
4058  *
4059  * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4060  * without success, or when we couldn't even meet the watermark if we
4061  * reclaimed all remaining pages on the LRU lists.
4062  *
4063  * Returns true if a retry is viable or false to enter the oom path.
4064  */
4065 static inline bool
4066 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4067                      struct alloc_context *ac, int alloc_flags,
4068                      bool did_some_progress, int *no_progress_loops)
4069 {
4070         struct zone *zone;
4071         struct zoneref *z;
4072         bool ret = false;
4073 
4074         /*
4075          * Costly allocations might have made a progress but this doesn't mean
4076          * their order will become available due to high fragmentation so
4077          * always increment the no progress counter for them
4078          */
4079         if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4080                 *no_progress_loops = 0;
4081         else
4082                 (*no_progress_loops)++;
4083 
4084         if (*no_progress_loops > MAX_RECLAIM_RETRIES)
4085                 goto out;
4086 
4087 
4088         /*
4089          * Keep reclaiming pages while there is a chance this will lead
4090          * somewhere.  If none of the target zones can satisfy our allocation
4091          * request even if all reclaimable pages are considered then we are
4092          * screwed and have to go OOM.
4093          */
4094         for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4095                                 ac->highest_zoneidx, ac->nodemask) {
4096                 unsigned long available;
4097                 unsigned long reclaimable;
4098                 unsigned long min_wmark = min_wmark_pages(zone);
4099                 bool wmark;
4100 
4101                 available = reclaimable = zone_reclaimable_pages(zone);
4102                 available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4103 
4104                 /*
4105                  * Would the allocation succeed if we reclaimed all
4106                  * reclaimable pages?
4107                  */
4108                 wmark = __zone_watermark_ok(zone, order, min_wmark,
4109                                 ac->highest_zoneidx, alloc_flags, available);
4110                 trace_reclaim_retry_zone(z, order, reclaimable,
4111                                 available, min_wmark, *no_progress_loops, wmark);
4112                 if (wmark) {
4113                         ret = true;
4114                         break;
4115                 }
4116         }
4117 
4118         /*
4119          * Memory allocation/reclaim might be called from a WQ context and the
4120          * current implementation of the WQ concurrency control doesn't
4121          * recognize that a particular WQ is congested if the worker thread is
4122          * looping without ever sleeping. Therefore we have to do a short sleep
4123          * here rather than calling cond_resched().
4124          */
4125         if (current->flags & PF_WQ_WORKER)
4126                 schedule_timeout_uninterruptible(1);
4127         else
4128                 cond_resched();
4129 out:
4130         /* Before OOM, exhaust highatomic_reserve */
4131         if (!ret)
4132                 return unreserve_highatomic_pageblock(ac, true);
4133 
4134         return ret;
4135 }
4136 
4137 static inline bool
4138 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4139 {
4140         /*
4141          * It's possible that cpuset's mems_allowed and the nodemask from
4142          * mempolicy don't intersect. This should be normally dealt with by
4143          * policy_nodemask(), but it's possible to race with cpuset update in
4144          * such a way the check therein was true, and then it became false
4145          * before we got our cpuset_mems_cookie here.
4146          * This assumes that for all allocations, ac->nodemask can come only
4147          * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4148          * when it does not intersect with the cpuset restrictions) or the
4149          * caller can deal with a violated nodemask.
4150          */
4151         if (cpusets_enabled() && ac->nodemask &&
4152                         !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4153                 ac->nodemask = NULL;
4154                 return true;
4155         }
4156 
4157         /*
4158          * When updating a task's mems_allowed or mempolicy nodemask, it is
4159          * possible to race with parallel threads in such a way that our
4160          * allocation can fail while the mask is being updated. If we are about
4161          * to fail, check if the cpuset changed during allocation and if so,
4162          * retry.
4163          */
4164         if (read_mems_allowed_retry(cpuset_mems_cookie))
4165                 return true;
4166 
4167         return false;
4168 }
4169 
4170 static inline struct page *
4171 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4172                                                 struct alloc_context *ac)
4173 {
4174         bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4175         bool can_compact = gfp_compaction_allowed(gfp_mask);
4176         const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4177         struct page *page = NULL;
4178         unsigned int alloc_flags;
4179         unsigned long did_some_progress;
4180         enum compact_priority compact_priority;
4181         enum compact_result compact_result;
4182         int compaction_retries;
4183         int no_progress_loops;
4184         unsigned int cpuset_mems_cookie;
4185         unsigned int zonelist_iter_cookie;
4186         int reserve_flags;
4187 
4188 restart:
4189         compaction_retries = 0;
4190         no_progress_loops = 0;
4191         compact_priority = DEF_COMPACT_PRIORITY;
4192         cpuset_mems_cookie = read_mems_allowed_begin();
4193         zonelist_iter_cookie = zonelist_iter_begin();
4194 
4195         /*
4196          * The fast path uses conservative alloc_flags to succeed only until
4197          * kswapd needs to be woken up, and to avoid the cost of setting up
4198          * alloc_flags precisely. So we do that now.
4199          */
4200         alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
4201 
4202         /*
4203          * We need to recalculate the starting point for the zonelist iterator
4204          * because we might have used different nodemask in the fast path, or
4205          * there was a cpuset modification and we are retrying - otherwise we
4206          * could end up iterating over non-eligible zones endlessly.
4207          */
4208         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4209                                         ac->highest_zoneidx, ac->nodemask);
4210         if (!ac->preferred_zoneref->zone)
4211                 goto nopage;
4212 
4213         /*
4214          * Check for insane configurations where the cpuset doesn't contain
4215          * any suitable zone to satisfy the request - e.g. non-movable
4216          * GFP_HIGHUSER allocations from MOVABLE nodes only.
4217          */
4218         if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
4219                 struct zoneref *z = first_zones_zonelist(ac->zonelist,
4220                                         ac->highest_zoneidx,
4221                                         &cpuset_current_mems_allowed);
4222                 if (!z->zone)
4223                         goto nopage;
4224         }
4225 
4226         if (alloc_flags & ALLOC_KSWAPD)
4227                 wake_all_kswapds(order, gfp_mask, ac);
4228 
4229         /*
4230          * The adjusted alloc_flags might result in immediate success, so try
4231          * that first
4232          */
4233         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4234         if (page)
4235                 goto got_pg;
4236 
4237         /*
4238          * For costly allocations, try direct compaction first, as it's likely
4239          * that we have enough base pages and don't need to reclaim. For non-
4240          * movable high-order allocations, do that as well, as compaction will
4241          * try prevent permanent fragmentation by migrating from blocks of the
4242          * same migratetype.
4243          * Don't try this for allocations that are allowed to ignore
4244          * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4245          */
4246         if (can_direct_reclaim && can_compact &&
4247                         (costly_order ||
4248                            (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4249                         && !gfp_pfmemalloc_allowed(gfp_mask)) {
4250                 page = __alloc_pages_direct_compact(gfp_mask, order,
4251                                                 alloc_flags, ac,
4252                                                 INIT_COMPACT_PRIORITY,
4253                                                 &compact_result);
4254                 if (page)
4255                         goto got_pg;
4256 
4257                 /*
4258                  * Checks for costly allocations with __GFP_NORETRY, which
4259                  * includes some THP page fault allocations
4260                  */
4261                 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4262                         /*
4263                          * If allocating entire pageblock(s) and compaction
4264                          * failed because all zones are below low watermarks
4265                          * or is prohibited because it recently failed at this
4266                          * order, fail immediately unless the allocator has
4267                          * requested compaction and reclaim retry.
4268                          *
4269                          * Reclaim is
4270                          *  - potentially very expensive because zones are far
4271                          *    below their low watermarks or this is part of very
4272                          *    bursty high order allocations,
4273                          *  - not guaranteed to help because isolate_freepages()
4274                          *    may not iterate over freed pages as part of its
4275                          *    linear scan, and
4276                          *  - unlikely to make entire pageblocks free on its
4277                          *    own.
4278                          */
4279                         if (compact_result == COMPACT_SKIPPED ||
4280                             compact_result == COMPACT_DEFERRED)
4281                                 goto nopage;
4282 
4283                         /*
4284                          * Looks like reclaim/compaction is worth trying, but
4285                          * sync compaction could be very expensive, so keep
4286                          * using async compaction.
4287                          */
4288                         compact_priority = INIT_COMPACT_PRIORITY;
4289                 }
4290         }
4291 
4292 retry:
4293         /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4294         if (alloc_flags & ALLOC_KSWAPD)
4295                 wake_all_kswapds(order, gfp_mask, ac);
4296 
4297         reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4298         if (reserve_flags)
4299                 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4300                                           (alloc_flags & ALLOC_KSWAPD);
4301 
4302         /*
4303          * Reset the nodemask and zonelist iterators if memory policies can be
4304          * ignored. These allocations are high priority and system rather than
4305          * user oriented.
4306          */
4307         if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4308                 ac->nodemask = NULL;
4309                 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4310                                         ac->highest_zoneidx, ac->nodemask);
4311         }
4312 
4313         /* Attempt with potentially adjusted zonelist and alloc_flags */
4314         page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4315         if (page)
4316                 goto got_pg;
4317 
4318         /* Caller is not willing to reclaim, we can't balance anything */
4319         if (!can_direct_reclaim)
4320                 goto nopage;
4321 
4322         /* Avoid recursion of direct reclaim */
4323         if (current->flags & PF_MEMALLOC)
4324                 goto nopage;
4325 
4326         /* Try direct reclaim and then allocating */
4327         page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4328                                                         &did_some_progress);
4329         if (page)
4330                 goto got_pg;
4331 
4332         /* Try direct compaction and then allocating */
4333         page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4334                                         compact_priority, &compact_result);
4335         if (page)
4336                 goto got_pg;
4337 
4338         /* Do not loop if specifically requested */
4339         if (gfp_mask & __GFP_NORETRY)
4340                 goto nopage;
4341 
4342         /*
4343          * Do not retry costly high order allocations unless they are
4344          * __GFP_RETRY_MAYFAIL and we can compact
4345          */
4346         if (costly_order && (!can_compact ||
4347                              !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4348                 goto nopage;
4349 
4350         if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4351                                  did_some_progress > 0, &no_progress_loops))
4352                 goto retry;
4353 
4354         /*
4355          * It doesn't make any sense to retry for the compaction if the order-0
4356          * reclaim is not able to make any progress because the current
4357          * implementation of the compaction depends on the sufficient amount
4358          * of free memory (see __compaction_suitable)
4359          */
4360         if (did_some_progress > 0 && can_compact &&
4361                         should_compact_retry(ac, order, alloc_flags,
4362                                 compact_result, &compact_priority,
4363                                 &compaction_retries))
4364                 goto retry;
4365 
4366 
4367         /*
4368          * Deal with possible cpuset update races or zonelist updates to avoid
4369          * a unnecessary OOM kill.
4370          */
4371         if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4372             check_retry_zonelist(zonelist_iter_cookie))
4373                 goto restart;
4374 
4375         /* Reclaim has failed us, start killing things */
4376         page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4377         if (page)
4378                 goto got_pg;
4379 
4380         /* Avoid allocations with no watermarks from looping endlessly */
4381         if (tsk_is_oom_victim(current) &&
4382             (alloc_flags & ALLOC_OOM ||
4383              (gfp_mask & __GFP_NOMEMALLOC)))
4384                 goto nopage;
4385 
4386         /* Retry as long as the OOM killer is making progress */
4387         if (did_some_progress) {
4388                 no_progress_loops = 0;
4389                 goto retry;
4390         }
4391 
4392 nopage:
4393         /*
4394          * Deal with possible cpuset update races or zonelist updates to avoid
4395          * a unnecessary OOM kill.
4396          */
4397         if (check_retry_cpuset(cpuset_mems_cookie, ac) ||
4398             check_retry_zonelist(zonelist_iter_cookie))
4399                 goto restart;
4400 
4401         /*
4402          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4403          * we always retry
4404          */
4405         if (gfp_mask & __GFP_NOFAIL) {
4406                 /*
4407                  * All existing users of the __GFP_NOFAIL are blockable, so warn
4408                  * of any new users that actually require GFP_NOWAIT
4409                  */
4410                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
4411                         goto fail;
4412 
4413                 /*
4414                  * PF_MEMALLOC request from this context is rather bizarre
4415                  * because we cannot reclaim anything and only can loop waiting
4416                  * for somebody to do a work for us
4417                  */
4418                 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
4419 
4420                 /*
4421                  * non failing costly orders are a hard requirement which we
4422                  * are not prepared for much so let's warn about these users
4423                  * so that we can identify them and convert them to something
4424                  * else.
4425                  */
4426                 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
4427 
4428                 /*
4429                  * Help non-failing allocations by giving some access to memory
4430                  * reserves normally used for high priority non-blocking
4431                  * allocations but do not use ALLOC_NO_WATERMARKS because this
4432                  * could deplete whole memory reserves which would just make
4433                  * the situation worse.
4434                  */
4435                 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4436                 if (page)
4437                         goto got_pg;
4438 
4439                 cond_resched();
4440                 goto retry;
4441         }
4442 fail:
4443         warn_alloc(gfp_mask, ac->nodemask,
4444                         "page allocation failure: order:%u", order);
4445 got_pg:
4446         return page;
4447 }
4448 
4449 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4450                 int preferred_nid, nodemask_t *nodemask,
4451                 struct alloc_context *ac, gfp_t *alloc_gfp,
4452                 unsigned int *alloc_flags)
4453 {
4454         ac->highest_zoneidx = gfp_zone(gfp_mask);
4455         ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4456         ac->nodemask = nodemask;
4457         ac->migratetype = gfp_migratetype(gfp_mask);
4458 
4459         if (cpusets_enabled()) {
4460                 *alloc_gfp |= __GFP_HARDWALL;
4461                 /*
4462                  * When we are in the interrupt context, it is irrelevant
4463                  * to the current task context. It means that any node ok.
4464                  */
4465                 if (in_task() && !ac->nodemask)
4466                         ac->nodemask = &cpuset_current_mems_allowed;
4467                 else
4468                         *alloc_flags |= ALLOC_CPUSET;
4469         }
4470 
4471         might_alloc(gfp_mask);
4472 
4473         if (should_fail_alloc_page(gfp_mask, order))
4474                 return false;
4475 
4476         *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4477 
4478         /* Dirty zone balancing only done in the fast path */
4479         ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4480 
4481         /*
4482          * The preferred zone is used for statistics but crucially it is
4483          * also used as the starting point for the zonelist iterator. It
4484          * may get reset for allocations that ignore memory policies.
4485          */
4486         ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4487                                         ac->highest_zoneidx, ac->nodemask);
4488 
4489         return true;
4490 }
4491 
4492 /*
4493  * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
4494  * @gfp: GFP flags for the allocation
4495  * @preferred_nid: The preferred NUMA node ID to allocate from
4496  * @nodemask: Set of nodes to allocate from, may be NULL
4497  * @nr_pages: The number of pages desired on the list or array
4498  * @page_list: Optional list to store the allocated pages
4499  * @page_array: Optional array to store the pages
4500  *
4501  * This is a batched version of the page allocator that attempts to
4502  * allocate nr_pages quickly. Pages are added to page_list if page_list
4503  * is not NULL, otherwise it is assumed that the page_array is valid.
4504  *
4505  * For lists, nr_pages is the number of pages that should be allocated.
4506  *
4507  * For arrays, only NULL elements are populated with pages and nr_pages
4508  * is the maximum number of pages that will be stored in the array.
4509  *
4510  * Returns the number of pages on the list or array.
4511  */
4512 unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
4513                         nodemask_t *nodemask, int nr_pages,
4514                         struct list_head *page_list,
4515                         struct page **page_array)
4516 {
4517         struct page *page;
4518         unsigned long __maybe_unused UP_flags;
4519         struct zone *zone;
4520         struct zoneref *z;
4521         struct per_cpu_pages *pcp;
4522         struct list_head *pcp_list;
4523         struct alloc_context ac;
4524         gfp_t alloc_gfp;
4525         unsigned int alloc_flags = ALLOC_WMARK_LOW;
4526         int nr_populated = 0, nr_account = 0;
4527 
4528         /*
4529          * Skip populated array elements to determine if any pages need
4530          * to be allocated before disabling IRQs.
4531          */
4532         while (page_array && nr_populated < nr_pages && page_array[nr_populated])
4533                 nr_populated++;
4534 
4535         /* No pages requested? */
4536         if (unlikely(nr_pages <= 0))
4537                 goto out;
4538 
4539         /* Already populated array? */
4540         if (unlikely(page_array && nr_pages - nr_populated == 0))
4541                 goto out;
4542 
4543         /* Bulk allocator does not support memcg accounting. */
4544         if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT))
4545                 goto failed;
4546 
4547         /* Use the single page allocator for one page. */
4548         if (nr_pages - nr_populated == 1)
4549                 goto failed;
4550 
4551 #ifdef CONFIG_PAGE_OWNER
4552         /*
4553          * PAGE_OWNER may recurse into the allocator to allocate space to
4554          * save the stack with pagesets.lock held. Releasing/reacquiring
4555          * removes much of the performance benefit of bulk allocation so
4556          * force the caller to allocate one page at a time as it'll have
4557          * similar performance to added complexity to the bulk allocator.
4558          */
4559         if (static_branch_unlikely(&page_owner_inited))
4560                 goto failed;
4561 #endif
4562 
4563         /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
4564         gfp &= gfp_allowed_mask;
4565         alloc_gfp = gfp;
4566         if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
4567                 goto out;
4568         gfp = alloc_gfp;
4569 
4570         /* Find an allowed local zone that meets the low watermark. */
4571         for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
4572                 unsigned long mark;
4573 
4574                 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
4575                     !__cpuset_zone_allowed(zone, gfp)) {
4576                         continue;
4577                 }
4578 
4579                 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
4580                     zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
4581                         goto failed;
4582                 }
4583 
4584                 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
4585                 if (zone_watermark_fast(zone, 0,  mark,
4586                                 zonelist_zone_idx(ac.preferred_zoneref),
4587                                 alloc_flags, gfp)) {
4588                         break;
4589                 }
4590         }
4591 
4592         /*
4593          * If there are no allowed local zones that meets the watermarks then
4594          * try to allocate a single page and reclaim if necessary.
4595          */
4596         if (unlikely(!zone))
4597                 goto failed;
4598 
4599         /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */
4600         pcp_trylock_prepare(UP_flags);
4601         pcp = pcp_spin_trylock(zone->per_cpu_pageset);
4602         if (!pcp)
4603                 goto failed_irq;
4604 
4605         /* Attempt the batch allocation */
4606         pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
4607         while (nr_populated < nr_pages) {
4608 
4609                 /* Skip existing pages */
4610                 if (page_array && page_array[nr_populated]) {
4611                         nr_populated++;
4612                         continue;
4613                 }
4614 
4615                 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
4616                                                                 pcp, pcp_list);
4617                 if (unlikely(!page)) {
4618                         /* Try and allocate at least one page */
4619                         if (!nr_account) {
4620                                 pcp_spin_unlock(pcp);
4621                                 goto failed_irq;
4622                         }
4623                         break;
4624                 }
4625                 nr_account++;
4626 
4627                 prep_new_page(page, 0, gfp, 0);
4628                 if (page_list)
4629                         list_add(&page->lru, page_list);
4630                 else
4631                         page_array[nr_populated] = page;
4632                 nr_populated++;
4633         }
4634 
4635         pcp_spin_unlock(pcp);
4636         pcp_trylock_finish(UP_flags);
4637 
4638         __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
4639         zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
4640 
4641 out:
4642         return nr_populated;
4643 
4644 failed_irq:
4645         pcp_trylock_finish(UP_flags);
4646 
4647 failed:
4648         page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask);
4649         if (page) {
4650                 if (page_list)
4651                         list_add(&page->lru, page_list);
4652                 else
4653                         page_array[nr_populated] = page;
4654                 nr_populated++;
4655         }
4656 
4657         goto out;
4658 }
4659 EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof);
4660 
4661 /*
4662  * This is the 'heart' of the zoned buddy allocator.
4663  */
4664 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order,
4665                                       int preferred_nid, nodemask_t *nodemask)
4666 {
4667         struct page *page;
4668         unsigned int alloc_flags = ALLOC_WMARK_LOW;
4669         gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
4670         struct alloc_context ac = { };
4671 
4672         /*
4673          * There are several places where we assume that the order value is sane
4674          * so bail out early if the request is out of bound.
4675          */
4676         if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp))
4677                 return NULL;
4678 
4679         gfp &= gfp_allowed_mask;
4680         /*
4681          * Apply scoped allocation constraints. This is mainly about GFP_NOFS
4682          * resp. GFP_NOIO which has to be inherited for all allocation requests
4683          * from a particular context which has been marked by
4684          * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
4685          * movable zones are not used during allocation.
4686          */
4687         gfp = current_gfp_context(gfp);
4688         alloc_gfp = gfp;
4689         if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
4690                         &alloc_gfp, &alloc_flags))
4691                 return NULL;
4692 
4693         /*
4694          * Forbid the first pass from falling back to types that fragment
4695          * memory until all local zones are considered.
4696          */
4697         alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
4698 
4699         /* First allocation attempt */
4700         page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
4701         if (likely(page))
4702                 goto out;
4703 
4704         alloc_gfp = gfp;
4705         ac.spread_dirty_pages = false;
4706 
4707         /*
4708          * Restore the original nodemask if it was potentially replaced with
4709          * &cpuset_current_mems_allowed to optimize the fast-path attempt.
4710          */
4711         ac.nodemask = nodemask;
4712 
4713         page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
4714 
4715 out:
4716         if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page &&
4717             unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
4718                 __free_pages(page, order);
4719                 page = NULL;
4720         }
4721 
4722         trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
4723         kmsan_alloc_page(page, order, alloc_gfp);
4724 
4725         return page;
4726 }
4727 EXPORT_SYMBOL(__alloc_pages_noprof);
4728 
4729 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid,
4730                 nodemask_t *nodemask)
4731 {
4732         struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order,
4733                                         preferred_nid, nodemask);
4734         return page_rmappable_folio(page);
4735 }
4736 EXPORT_SYMBOL(__folio_alloc_noprof);
4737 
4738 /*
4739  * Common helper functions. Never use with __GFP_HIGHMEM because the returned
4740  * address cannot represent highmem pages. Use alloc_pages and then kmap if
4741  * you need to access high mem.
4742  */
4743 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order)
4744 {
4745         struct page *page;
4746 
4747         page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order);
4748         if (!page)
4749                 return 0;
4750         return (unsigned long) page_address(page);
4751 }
4752 EXPORT_SYMBOL(get_free_pages_noprof);
4753 
4754 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask)
4755 {
4756         return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0);
4757 }
4758 EXPORT_SYMBOL(get_zeroed_page_noprof);
4759 
4760 /**
4761  * __free_pages - Free pages allocated with alloc_pages().
4762  * @page: The page pointer returned from alloc_pages().
4763  * @order: The order of the allocation.
4764  *
4765  * This function can free multi-page allocations that are not compound
4766  * pages.  It does not check that the @order passed in matches that of
4767  * the allocation, so it is easy to leak memory.  Freeing more memory
4768  * than was allocated will probably emit a warning.
4769  *
4770  * If the last reference to this page is speculative, it will be released
4771  * by put_page() which only frees the first page of a non-compound
4772  * allocation.  To prevent the remaining pages from being leaked, we free
4773  * the subsequent pages here.  If you want to use the page's reference
4774  * count to decide when to free the allocation, you should allocate a
4775  * compound page, and use put_page() instead of __free_pages().
4776  *
4777  * Context: May be called in interrupt context or while holding a normal
4778  * spinlock, but not in NMI context or while holding a raw spinlock.
4779  */
4780 void __free_pages(struct page *page, unsigned int order)
4781 {
4782         /* get PageHead before we drop reference */
4783         int head = PageHead(page);
4784         struct alloc_tag *tag = pgalloc_tag_get(page);
4785 
4786         if (put_page_testzero(page))
4787                 free_unref_page(page, order);
4788         else if (!head) {
4789                 pgalloc_tag_sub_pages(tag, (1 << order) - 1);
4790                 while (order-- > 0)
4791                         free_unref_page(page + (1 << order), order);
4792         }
4793 }
4794 EXPORT_SYMBOL(__free_pages);
4795 
4796 void free_pages(unsigned long addr, unsigned int order)
4797 {
4798         if (addr != 0) {
4799                 VM_BUG_ON(!virt_addr_valid((void *)addr));
4800                 __free_pages(virt_to_page((void *)addr), order);
4801         }
4802 }
4803 
4804 EXPORT_SYMBOL(free_pages);
4805 
4806 /*
4807  * Page Fragment:
4808  *  An arbitrary-length arbitrary-offset area of memory which resides
4809  *  within a 0 or higher order page.  Multiple fragments within that page
4810  *  are individually refcounted, in the page's reference counter.
4811  *
4812  * The page_frag functions below provide a simple allocation framework for
4813  * page fragments.  This is used by the network stack and network device
4814  * drivers to provide a backing region of memory for use as either an
4815  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4816  */
4817 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
4818                                              gfp_t gfp_mask)
4819 {
4820         struct page *page = NULL;
4821         gfp_t gfp = gfp_mask;
4822 
4823 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4824         gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) |  __GFP_COMP |
4825                    __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
4826         page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4827                                 PAGE_FRAG_CACHE_MAX_ORDER);
4828         nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
4829 #endif
4830         if (unlikely(!page))
4831                 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
4832 
4833         nc->va = page ? page_address(page) : NULL;
4834 
4835         return page;
4836 }
4837 
4838 void page_frag_cache_drain(struct page_frag_cache *nc)
4839 {
4840         if (!nc->va)
4841                 return;
4842 
4843         __page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
4844         nc->va = NULL;
4845 }
4846 EXPORT_SYMBOL(page_frag_cache_drain);
4847 
4848 void __page_frag_cache_drain(struct page *page, unsigned int count)
4849 {
4850         VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
4851 
4852         if (page_ref_sub_and_test(page, count))
4853                 free_unref_page(page, compound_order(page));
4854 }
4855 EXPORT_SYMBOL(__page_frag_cache_drain);
4856 
4857 void *__page_frag_alloc_align(struct page_frag_cache *nc,
4858                               unsigned int fragsz, gfp_t gfp_mask,
4859                               unsigned int align_mask)
4860 {
4861         unsigned int size = PAGE_SIZE;
4862         struct page *page;
4863         int offset;
4864 
4865         if (unlikely(!nc->va)) {
4866 refill:
4867                 page = __page_frag_cache_refill(nc, gfp_mask);
4868                 if (!page)
4869                         return NULL;
4870 
4871 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4872                 /* if size can vary use size else just use PAGE_SIZE */
4873                 size = nc->size;
4874 #endif
4875                 /* Even if we own the page, we do not use atomic_set().
4876                  * This would break get_page_unless_zero() users.
4877                  */
4878                 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
4879 
4880                 /* reset page count bias and offset to start of new frag */
4881                 nc->pfmemalloc = page_is_pfmemalloc(page);
4882                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4883                 nc->offset = size;
4884         }
4885 
4886         offset = nc->offset - fragsz;
4887         if (unlikely(offset < 0)) {
4888                 page = virt_to_page(nc->va);
4889 
4890                 if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
4891                         goto refill;
4892 
4893                 if (unlikely(nc->pfmemalloc)) {
4894                         free_unref_page(page, compound_order(page));
4895                         goto refill;
4896                 }
4897 
4898 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
4899                 /* if size can vary use size else just use PAGE_SIZE */
4900                 size = nc->size;
4901 #endif
4902                 /* OK, page count is 0, we can safely set it */
4903                 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
4904 
4905                 /* reset page count bias and offset to start of new frag */
4906                 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
4907                 offset = size - fragsz;
4908                 if (unlikely(offset < 0)) {
4909                         /*
4910                          * The caller is trying to allocate a fragment
4911                          * with fragsz > PAGE_SIZE but the cache isn't big
4912                          * enough to satisfy the request, this may
4913                          * happen in low memory conditions.
4914                          * We don't release the cache page because
4915                          * it could make memory pressure worse
4916                          * so we simply return NULL here.
4917                          */
4918                         return NULL;
4919                 }
4920         }
4921 
4922         nc->pagecnt_bias--;
4923         offset &= align_mask;
4924         nc->offset = offset;
4925 
4926         return nc->va + offset;
4927 }
4928 EXPORT_SYMBOL(__page_frag_alloc_align);
4929 
4930 /*
4931  * Frees a page fragment allocated out of either a compound or order 0 page.
4932  */
4933 void page_frag_free(void *addr)
4934 {
4935         struct page *page = virt_to_head_page(addr);
4936 
4937         if (unlikely(put_page_testzero(page)))
4938                 free_unref_page(page, compound_order(page));
4939 }
4940 EXPORT_SYMBOL(page_frag_free);
4941 
4942 static void *make_alloc_exact(unsigned long addr, unsigned int order,
4943                 size_t size)
4944 {
4945         if (addr) {
4946                 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
4947                 struct page *page = virt_to_page((void *)addr);
4948                 struct page *last = page + nr;
4949 
4950                 split_page_owner(page, order, 0);
4951                 pgalloc_tag_split(page, 1 << order);
4952                 split_page_memcg(page, order, 0);
4953                 while (page < --last)
4954                         set_page_refcounted(last);
4955 
4956                 last = page + (1UL << order);
4957                 for (page += nr; page < last; page++)
4958                         __free_pages_ok(page, 0, FPI_TO_TAIL);
4959         }
4960         return (void *)addr;
4961 }
4962 
4963 /**
4964  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4965  * @size: the number of bytes to allocate
4966  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4967  *
4968  * This function is similar to alloc_pages(), except that it allocates the
4969  * minimum number of pages to satisfy the request.  alloc_pages() can only
4970  * allocate memory in power-of-two pages.
4971  *
4972  * This function is also limited by MAX_PAGE_ORDER.
4973  *
4974  * Memory allocated by this function must be released by free_pages_exact().
4975  *
4976  * Return: pointer to the allocated area or %NULL in case of error.
4977  */
4978 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask)
4979 {
4980         unsigned int order = get_order(size);
4981         unsigned long addr;
4982 
4983         if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4984                 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4985 
4986         addr = get_free_pages_noprof(gfp_mask, order);
4987         return make_alloc_exact(addr, order, size);
4988 }
4989 EXPORT_SYMBOL(alloc_pages_exact_noprof);
4990 
4991 /**
4992  * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4993  *                         pages on a node.
4994  * @nid: the preferred node ID where memory should be allocated
4995  * @size: the number of bytes to allocate
4996  * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4997  *
4998  * Like alloc_pages_exact(), but try to allocate on node nid first before falling
4999  * back.
5000  *
5001  * Return: pointer to the allocated area or %NULL in case of error.
5002  */
5003 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask)
5004 {
5005         unsigned int order = get_order(size);
5006         struct page *p;
5007 
5008         if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
5009                 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
5010 
5011         p = alloc_pages_node_noprof(nid, gfp_mask, order);
5012         if (!p)
5013                 return NULL;
5014         return make_alloc_exact((unsigned long)page_address(p), order, size);
5015 }
5016 
5017 /**
5018  * free_pages_exact - release memory allocated via alloc_pages_exact()
5019  * @virt: the value returned by alloc_pages_exact.
5020  * @size: size of allocation, same value as passed to alloc_pages_exact().
5021  *
5022  * Release the memory allocated by a previous call to alloc_pages_exact.
5023  */
5024 void free_pages_exact(void *virt, size_t size)
5025 {
5026         unsigned long addr = (unsigned long)virt;
5027         unsigned long end = addr + PAGE_ALIGN(size);
5028 
5029         while (addr < end) {
5030                 free_page(addr);
5031                 addr += PAGE_SIZE;
5032         }
5033 }
5034 EXPORT_SYMBOL(free_pages_exact);
5035 
5036 /**
5037  * nr_free_zone_pages - count number of pages beyond high watermark
5038  * @offset: The zone index of the highest zone
5039  *
5040  * nr_free_zone_pages() counts the number of pages which are beyond the
5041  * high watermark within all zones at or below a given zone index.  For each
5042  * zone, the number of pages is calculated as:
5043  *
5044  *     nr_free_zone_pages = managed_pages - high_pages
5045  *
5046  * Return: number of pages beyond high watermark.
5047  */
5048 static unsigned long nr_free_zone_pages(int offset)
5049 {
5050         struct zoneref *z;
5051         struct zone *zone;
5052 
5053         /* Just pick one node, since fallback list is circular */
5054         unsigned long sum = 0;
5055 
5056         struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5057 
5058         for_each_zone_zonelist(zone, z, zonelist, offset) {
5059                 unsigned long size = zone_managed_pages(zone);
5060                 unsigned long high = high_wmark_pages(zone);
5061                 if (size > high)
5062                         sum += size - high;
5063         }
5064 
5065         return sum;
5066 }
5067 
5068 /**
5069  * nr_free_buffer_pages - count number of pages beyond high watermark
5070  *
5071  * nr_free_buffer_pages() counts the number of pages which are beyond the high
5072  * watermark within ZONE_DMA and ZONE_NORMAL.
5073  *
5074  * Return: number of pages beyond high watermark within ZONE_DMA and
5075  * ZONE_NORMAL.
5076  */
5077 unsigned long nr_free_buffer_pages(void)
5078 {
5079         return nr_free_zone_pages(gfp_zone(GFP_USER));
5080 }
5081 EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5082 
5083 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5084 {
5085         zoneref->zone = zone;
5086         zoneref->zone_idx = zone_idx(zone);
5087 }
5088 
5089 /*
5090  * Builds allocation fallback zone lists.
5091  *
5092  * Add all populated zones of a node to the zonelist.
5093  */
5094 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5095 {
5096         struct zone *zone;
5097         enum zone_type zone_type = MAX_NR_ZONES;
5098         int nr_zones = 0;
5099 
5100         do {
5101                 zone_type--;
5102                 zone = pgdat->node_zones + zone_type;
5103                 if (populated_zone(zone)) {
5104                         zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5105                         check_highest_zone(zone_type);
5106                 }
5107         } while (zone_type);
5108 
5109         return nr_zones;
5110 }
5111 
5112 #ifdef CONFIG_NUMA
5113 
5114 static int __parse_numa_zonelist_order(char *s)
5115 {
5116         /*
5117          * We used to support different zonelists modes but they turned
5118          * out to be just not useful. Let's keep the warning in place
5119          * if somebody still use the cmd line parameter so that we do
5120          * not fail it silently
5121          */
5122         if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5123                 pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5124                 return -EINVAL;
5125         }
5126         return 0;
5127 }
5128 
5129 static char numa_zonelist_order[] = "Node";
5130 #define NUMA_ZONELIST_ORDER_LEN 16
5131 /*
5132  * sysctl handler for numa_zonelist_order
5133  */
5134 static int numa_zonelist_order_handler(const struct ctl_table *table, int write,
5135                 void *buffer, size_t *length, loff_t *ppos)
5136 {
5137         if (write)
5138                 return __parse_numa_zonelist_order(buffer);
5139         return proc_dostring(table, write, buffer, length, ppos);
5140 }
5141 
5142 static int node_load[MAX_NUMNODES];
5143 
5144 /**
5145  * find_next_best_node - find the next node that should appear in a given node's fallback list
5146  * @node: node whose fallback list we're appending
5147  * @used_node_mask: nodemask_t of already used nodes
5148  *
5149  * We use a number of factors to determine which is the next node that should
5150  * appear on a given node's fallback list.  The node should not have appeared
5151  * already in @node's fallback list, and it should be the next closest node
5152  * according to the distance array (which contains arbitrary distance values
5153  * from each node to each node in the system), and should also prefer nodes
5154  * with no CPUs, since presumably they'll have very little allocation pressure
5155  * on them otherwise.
5156  *
5157  * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5158  */
5159 int find_next_best_node(int node, nodemask_t *used_node_mask)
5160 {
5161         int n, val;
5162         int min_val = INT_MAX;
5163         int best_node = NUMA_NO_NODE;
5164 
5165         /*
5166          * Use the local node if we haven't already, but for memoryless local
5167          * node, we should skip it and fall back to other nodes.
5168          */
5169         if (!node_isset(node, *used_node_mask) && node_state(node, N_MEMORY)) {
5170                 node_set(node, *used_node_mask);
5171                 return node;
5172         }
5173 
5174         for_each_node_state(n, N_MEMORY) {
5175 
5176                 /* Don't want a node to appear more than once */
5177                 if (node_isset(n, *used_node_mask))
5178                         continue;
5179 
5180                 /* Use the distance array to find the distance */
5181                 val = node_distance(node, n);
5182 
5183                 /* Penalize nodes under us ("prefer the next node") */
5184                 val += (n < node);
5185 
5186                 /* Give preference to headless and unused nodes */
5187                 if (!cpumask_empty(cpumask_of_node(n)))
5188                         val += PENALTY_FOR_NODE_WITH_CPUS;
5189 
5190                 /* Slight preference for less loaded node */
5191                 val *= MAX_NUMNODES;
5192                 val += node_load[n];
5193 
5194                 if (val < min_val) {
5195                         min_val = val;
5196                         best_node = n;
5197                 }
5198         }
5199 
5200         if (best_node >= 0)
5201                 node_set(best_node, *used_node_mask);
5202 
5203         return best_node;
5204 }
5205 
5206 
5207 /*
5208  * Build zonelists ordered by node and zones within node.
5209  * This results in maximum locality--normal zone overflows into local
5210  * DMA zone, if any--but risks exhausting DMA zone.
5211  */
5212 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
5213                 unsigned nr_nodes)
5214 {
5215         struct zoneref *zonerefs;
5216         int i;
5217 
5218         zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5219 
5220         for (i = 0; i < nr_nodes; i++) {
5221                 int nr_zones;
5222 
5223                 pg_data_t *node = NODE_DATA(node_order[i]);
5224 
5225                 nr_zones = build_zonerefs_node(node, zonerefs);
5226                 zonerefs += nr_zones;
5227         }
5228         zonerefs->zone = NULL;
5229         zonerefs->zone_idx = 0;
5230 }
5231 
5232 /*
5233  * Build __GFP_THISNODE zonelists
5234  */
5235 static void build_thisnode_zonelists(pg_data_t *pgdat)
5236 {
5237         struct zoneref *zonerefs;
5238         int nr_zones;
5239 
5240         zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5241         nr_zones = build_zonerefs_node(pgdat, zonerefs);
5242         zonerefs += nr_zones;
5243         zonerefs->zone = NULL;
5244         zonerefs->zone_idx = 0;
5245 }
5246 
5247 /*
5248  * Build zonelists ordered by zone and nodes within zones.
5249  * This results in conserving DMA zone[s] until all Normal memory is
5250  * exhausted, but results in overflowing to remote node while memory
5251  * may still exist in local DMA zone.
5252  */
5253 
5254 static void build_zonelists(pg_data_t *pgdat)
5255 {
5256         static int node_order[MAX_NUMNODES];
5257         int node, nr_nodes = 0;
5258         nodemask_t used_mask = NODE_MASK_NONE;
5259         int local_node, prev_node;
5260 
5261         /* NUMA-aware ordering of nodes */
5262         local_node = pgdat->node_id;
5263         prev_node = local_node;
5264 
5265         memset(node_order, 0, sizeof(node_order));
5266         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5267                 /*
5268                  * We don't want to pressure a particular node.
5269                  * So adding penalty to the first node in same
5270                  * distance group to make it round-robin.
5271                  */
5272                 if (node_distance(local_node, node) !=
5273                     node_distance(local_node, prev_node))
5274                         node_load[node] += 1;
5275 
5276                 node_order[nr_nodes++] = node;
5277                 prev_node = node;
5278         }
5279 
5280         build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5281         build_thisnode_zonelists(pgdat);
5282         pr_info("Fallback order for Node %d: ", local_node);
5283         for (node = 0; node < nr_nodes; node++)
5284                 pr_cont("%d ", node_order[node]);
5285         pr_cont("\n");
5286 }
5287 
5288 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5289 /*
5290  * Return node id of node used for "local" allocations.
5291  * I.e., first node id of first zone in arg node's generic zonelist.
5292  * Used for initializing percpu 'numa_mem', which is used primarily
5293  * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
5294  */
5295 int local_memory_node(int node)
5296 {
5297         struct zoneref *z;
5298 
5299         z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5300                                    gfp_zone(GFP_KERNEL),
5301                                    NULL);
5302         return zone_to_nid(z->zone);
5303 }
5304 #endif
5305 
5306 static void setup_min_unmapped_ratio(void);
5307 static void setup_min_slab_ratio(void);
5308 #else   /* CONFIG_NUMA */
5309 
5310 static void build_zonelists(pg_data_t *pgdat)
5311 {
5312         struct zoneref *zonerefs;
5313         int nr_zones;
5314 
5315         zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5316         nr_zones = build_zonerefs_node(pgdat, zonerefs);
5317         zonerefs += nr_zones;
5318 
5319         zonerefs->zone = NULL;
5320         zonerefs->zone_idx = 0;
5321 }
5322 
5323 #endif  /* CONFIG_NUMA */
5324 
5325 /*
5326  * Boot pageset table. One per cpu which is going to be used for all
5327  * zones and all nodes. The parameters will be set in such a way
5328  * that an item put on a list will immediately be handed over to
5329  * the buddy list. This is safe since pageset manipulation is done
5330  * with interrupts disabled.
5331  *
5332  * The boot_pagesets must be kept even after bootup is complete for
5333  * unused processors and/or zones. They do play a role for bootstrapping
5334  * hotplugged processors.
5335  *
5336  * zoneinfo_show() and maybe other functions do
5337  * not check if the processor is online before following the pageset pointer.
5338  * Other parts of the kernel may not check if the zone is available.
5339  */
5340 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats);
5341 /* These effectively disable the pcplists in the boot pageset completely */
5342 #define BOOT_PAGESET_HIGH       0
5343 #define BOOT_PAGESET_BATCH      1
5344 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
5345 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
5346 
5347 static void __build_all_zonelists(void *data)
5348 {
5349         int nid;
5350         int __maybe_unused cpu;
5351         pg_data_t *self = data;
5352         unsigned long flags;
5353 
5354         /*
5355          * The zonelist_update_seq must be acquired with irqsave because the
5356          * reader can be invoked from IRQ with GFP_ATOMIC.
5357          */
5358         write_seqlock_irqsave(&zonelist_update_seq, flags);
5359         /*
5360          * Also disable synchronous printk() to prevent any printk() from
5361          * trying to hold port->lock, for
5362          * tty_insert_flip_string_and_push_buffer() on other CPU might be
5363          * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held.
5364          */
5365         printk_deferred_enter();
5366 
5367 #ifdef CONFIG_NUMA
5368         memset(node_load, 0, sizeof(node_load));
5369 #endif
5370 
5371         /*
5372          * This node is hotadded and no memory is yet present.   So just
5373          * building zonelists is fine - no need to touch other nodes.
5374          */
5375         if (self && !node_online(self->node_id)) {
5376                 build_zonelists(self);
5377         } else {
5378                 /*
5379                  * All possible nodes have pgdat preallocated
5380                  * in free_area_init
5381                  */
5382                 for_each_node(nid) {
5383                         pg_data_t *pgdat = NODE_DATA(nid);
5384 
5385                         build_zonelists(pgdat);
5386                 }
5387 
5388 #ifdef CONFIG_HAVE_MEMORYLESS_NODES
5389                 /*
5390                  * We now know the "local memory node" for each node--
5391                  * i.e., the node of the first zone in the generic zonelist.
5392                  * Set up numa_mem percpu variable for on-line cpus.  During
5393                  * boot, only the boot cpu should be on-line;  we'll init the
5394                  * secondary cpus' numa_mem as they come on-line.  During
5395                  * node/memory hotplug, we'll fixup all on-line cpus.
5396                  */
5397                 for_each_online_cpu(cpu)
5398                         set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
5399 #endif
5400         }
5401 
5402         printk_deferred_exit();
5403         write_sequnlock_irqrestore(&zonelist_update_seq, flags);
5404 }
5405 
5406 static noinline void __init
5407 build_all_zonelists_init(void)
5408 {
5409         int cpu;
5410 
5411         __build_all_zonelists(NULL);
5412 
5413         /*
5414          * Initialize the boot_pagesets that are going to be used
5415          * for bootstrapping processors. The real pagesets for
5416          * each zone will be allocated later when the per cpu
5417          * allocator is available.
5418          *
5419          * boot_pagesets are used also for bootstrapping offline
5420          * cpus if the system is already booted because the pagesets
5421          * are needed to initialize allocators on a specific cpu too.
5422          * F.e. the percpu allocator needs the page allocator which
5423          * needs the percpu allocator in order to allocate its pagesets
5424          * (a chicken-egg dilemma).
5425          */
5426         for_each_possible_cpu(cpu)
5427                 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu));
5428 
5429         mminit_verify_zonelist();
5430         cpuset_init_current_mems_allowed();
5431 }
5432 
5433 /*
5434  * unless system_state == SYSTEM_BOOTING.
5435  *
5436  * __ref due to call of __init annotated helper build_all_zonelists_init
5437  * [protected by SYSTEM_BOOTING].
5438  */
5439 void __ref build_all_zonelists(pg_data_t *pgdat)
5440 {
5441         unsigned long vm_total_pages;
5442 
5443         if (system_state == SYSTEM_BOOTING) {
5444                 build_all_zonelists_init();
5445         } else {
5446                 __build_all_zonelists(pgdat);
5447                 /* cpuset refresh routine should be here */
5448         }
5449         /* Get the number of free pages beyond high watermark in all zones. */
5450         vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
5451         /*
5452          * Disable grouping by mobility if the number of pages in the
5453          * system is too low to allow the mechanism to work. It would be
5454          * more accurate, but expensive to check per-zone. This check is
5455          * made on memory-hotadd so a system can start with mobility
5456          * disabled and enable it later
5457          */
5458         if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
5459                 page_group_by_mobility_disabled = 1;
5460         else
5461                 page_group_by_mobility_disabled = 0;
5462 
5463         pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
5464                 nr_online_nodes,
5465                 page_group_by_mobility_disabled ? "off" : "on",
5466                 vm_total_pages);
5467 #ifdef CONFIG_NUMA
5468         pr_info("Policy zone: %s\n", zone_names[policy_zone]);
5469 #endif
5470 }
5471 
5472 static int zone_batchsize(struct zone *zone)
5473 {
5474 #ifdef CONFIG_MMU
5475         int batch;
5476 
5477         /*
5478          * The number of pages to batch allocate is either ~0.1%
5479          * of the zone or 1MB, whichever is smaller. The batch
5480          * size is striking a balance between allocation latency
5481          * and zone lock contention.
5482          */
5483         batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
5484         batch /= 4;             /* We effectively *= 4 below */
5485         if (batch < 1)
5486                 batch = 1;
5487 
5488         /*
5489          * Clamp the batch to a 2^n - 1 value. Having a power
5490          * of 2 value was found to be more likely to have
5491          * suboptimal cache aliasing properties in some cases.
5492          *
5493          * For example if 2 tasks are alternately allocating
5494          * batches of pages, one task can end up with a lot
5495          * of pages of one half of the possible page colors
5496          * and the other with pages of the other colors.
5497          */
5498         batch = rounddown_pow_of_two(batch + batch/2) - 1;
5499 
5500         return batch;
5501 
5502 #else
5503         /* The deferral and batching of frees should be suppressed under NOMMU
5504          * conditions.
5505          *
5506          * The problem is that NOMMU needs to be able to allocate large chunks
5507          * of contiguous memory as there's no hardware page translation to
5508          * assemble apparent contiguous memory from discontiguous pages.
5509          *
5510          * Queueing large contiguous runs of pages for batching, however,
5511          * causes the pages to actually be freed in smaller chunks.  As there
5512          * can be a significant delay between the individual batches being
5513          * recycled, this leads to the once large chunks of space being
5514          * fragmented and becoming unavailable for high-order allocations.
5515          */
5516         return 0;
5517 #endif
5518 }
5519 
5520 static int percpu_pagelist_high_fraction;
5521 static int zone_highsize(struct zone *zone, int batch, int cpu_online,
5522                          int high_fraction)
5523 {
5524 #ifdef CONFIG_MMU
5525         int high;
5526         int nr_split_cpus;
5527         unsigned long total_pages;
5528 
5529         if (!high_fraction) {
5530                 /*
5531                  * By default, the high value of the pcp is based on the zone
5532                  * low watermark so that if they are full then background
5533                  * reclaim will not be started prematurely.
5534                  */
5535                 total_pages = low_wmark_pages(zone);
5536         } else {
5537                 /*
5538                  * If percpu_pagelist_high_fraction is configured, the high
5539                  * value is based on a fraction of the managed pages in the
5540                  * zone.
5541                  */
5542                 total_pages = zone_managed_pages(zone) / high_fraction;
5543         }
5544 
5545         /*
5546          * Split the high value across all online CPUs local to the zone. Note
5547          * that early in boot that CPUs may not be online yet and that during
5548          * CPU hotplug that the cpumask is not yet updated when a CPU is being
5549          * onlined. For memory nodes that have no CPUs, split the high value
5550          * across all online CPUs to mitigate the risk that reclaim is triggered
5551          * prematurely due to pages stored on pcp lists.
5552          */
5553         nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
5554         if (!nr_split_cpus)
5555                 nr_split_cpus = num_online_cpus();
5556         high = total_pages / nr_split_cpus;
5557 
5558         /*
5559          * Ensure high is at least batch*4. The multiple is based on the
5560          * historical relationship between high and batch.
5561          */
5562         high = max(high, batch << 2);
5563 
5564         return high;
5565 #else
5566         return 0;
5567 #endif
5568 }
5569 
5570 /*
5571  * pcp->high and pcp->batch values are related and generally batch is lower
5572  * than high. They are also related to pcp->count such that count is lower
5573  * than high, and as soon as it reaches high, the pcplist is flushed.
5574  *
5575  * However, guaranteeing these relations at all times would require e.g. write
5576  * barriers here but also careful usage of read barriers at the read side, and
5577  * thus be prone to error and bad for performance. Thus the update only prevents
5578  * store tearing. Any new users of pcp->batch, pcp->high_min and pcp->high_max
5579  * should ensure they can cope with those fields changing asynchronously, and
5580  * fully trust only the pcp->count field on the local CPU with interrupts
5581  * disabled.
5582  *
5583  * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
5584  * outside of boot time (or some other assurance that no concurrent updaters
5585  * exist).
5586  */
5587 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high_min,
5588                            unsigned long high_max, unsigned long batch)
5589 {
5590         WRITE_ONCE(pcp->batch, batch);
5591         WRITE_ONCE(pcp->high_min, high_min);
5592         WRITE_ONCE(pcp->high_max, high_max);
5593 }
5594 
5595 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats)
5596 {
5597         int pindex;
5598 
5599         memset(pcp, 0, sizeof(*pcp));
5600         memset(pzstats, 0, sizeof(*pzstats));
5601 
5602         spin_lock_init(&pcp->lock);
5603         for (pindex = 0; pindex < NR_PCP_LISTS; pindex++)
5604                 INIT_LIST_HEAD(&pcp->lists[pindex]);
5605 
5606         /*
5607          * Set batch and high values safe for a boot pageset. A true percpu
5608          * pageset's initialization will update them subsequently. Here we don't
5609          * need to be as careful as pageset_update() as nobody can access the
5610          * pageset yet.
5611          */
5612         pcp->high_min = BOOT_PAGESET_HIGH;
5613         pcp->high_max = BOOT_PAGESET_HIGH;
5614         pcp->batch = BOOT_PAGESET_BATCH;
5615         pcp->free_count = 0;
5616 }
5617 
5618 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
5619                                               unsigned long high_max, unsigned long batch)
5620 {
5621         struct per_cpu_pages *pcp;
5622         int cpu;
5623 
5624         for_each_possible_cpu(cpu) {
5625                 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5626                 pageset_update(pcp, high_min, high_max, batch);
5627         }
5628 }
5629 
5630 /*
5631  * Calculate and set new high and batch values for all per-cpu pagesets of a
5632  * zone based on the zone's size.
5633  */
5634 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
5635 {
5636         int new_high_min, new_high_max, new_batch;
5637 
5638         new_batch = max(1, zone_batchsize(zone));
5639         if (percpu_pagelist_high_fraction) {
5640                 new_high_min = zone_highsize(zone, new_batch, cpu_online,
5641                                              percpu_pagelist_high_fraction);
5642                 /*
5643                  * PCP high is tuned manually, disable auto-tuning via
5644                  * setting high_min and high_max to the manual value.
5645                  */
5646                 new_high_max = new_high_min;
5647         } else {
5648                 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
5649                 new_high_max = zone_highsize(zone, new_batch, cpu_online,
5650                                              MIN_PERCPU_PAGELIST_HIGH_FRACTION);
5651         }
5652 
5653         if (zone->pageset_high_min == new_high_min &&
5654             zone->pageset_high_max == new_high_max &&
5655             zone->pageset_batch == new_batch)
5656                 return;
5657 
5658         zone->pageset_high_min = new_high_min;
5659         zone->pageset_high_max = new_high_max;
5660         zone->pageset_batch = new_batch;
5661 
5662         __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
5663                                           new_batch);
5664 }
5665 
5666 void __meminit setup_zone_pageset(struct zone *zone)
5667 {
5668         int cpu;
5669 
5670         /* Size may be 0 on !SMP && !NUMA */
5671         if (sizeof(struct per_cpu_zonestat) > 0)
5672                 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
5673 
5674         zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
5675         for_each_possible_cpu(cpu) {
5676                 struct per_cpu_pages *pcp;
5677                 struct per_cpu_zonestat *pzstats;
5678 
5679                 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5680                 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
5681                 per_cpu_pages_init(pcp, pzstats);
5682         }
5683 
5684         zone_set_pageset_high_and_batch(zone, 0);
5685 }
5686 
5687 /*
5688  * The zone indicated has a new number of managed_pages; batch sizes and percpu
5689  * page high values need to be recalculated.
5690  */
5691 static void zone_pcp_update(struct zone *zone, int cpu_online)
5692 {
5693         mutex_lock(&pcp_batch_high_lock);
5694         zone_set_pageset_high_and_batch(zone, cpu_online);
5695         mutex_unlock(&pcp_batch_high_lock);
5696 }
5697 
5698 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
5699 {
5700         struct per_cpu_pages *pcp;
5701         struct cpu_cacheinfo *cci;
5702 
5703         pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
5704         cci = get_cpu_cacheinfo(cpu);
5705         /*
5706          * If data cache slice of CPU is large enough, "pcp->batch"
5707          * pages can be preserved in PCP before draining PCP for
5708          * consecutive high-order pages freeing without allocation.
5709          * This can reduce zone lock contention without hurting
5710          * cache-hot pages sharing.
5711          */
5712         spin_lock(&pcp->lock);
5713         if ((cci->per_cpu_data_slice_size >> PAGE_SHIFT) > 3 * pcp->batch)
5714                 pcp->flags |= PCPF_FREE_HIGH_BATCH;
5715         else
5716                 pcp->flags &= ~PCPF_FREE_HIGH_BATCH;
5717         spin_unlock(&pcp->lock);
5718 }
5719 
5720 void setup_pcp_cacheinfo(unsigned int cpu)
5721 {
5722         struct zone *zone;
5723 
5724         for_each_populated_zone(zone)
5725                 zone_pcp_update_cacheinfo(zone, cpu);
5726 }
5727 
5728 /*
5729  * Allocate per cpu pagesets and initialize them.
5730  * Before this call only boot pagesets were available.
5731  */
5732 void __init setup_per_cpu_pageset(void)
5733 {
5734         struct pglist_data *pgdat;
5735         struct zone *zone;
5736         int __maybe_unused cpu;
5737 
5738         for_each_populated_zone(zone)
5739                 setup_zone_pageset(zone);
5740 
5741 #ifdef CONFIG_NUMA
5742         /*
5743          * Unpopulated zones continue using the boot pagesets.
5744          * The numa stats for these pagesets need to be reset.
5745          * Otherwise, they will end up skewing the stats of
5746          * the nodes these zones are associated with.
5747          */
5748         for_each_possible_cpu(cpu) {
5749                 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu);
5750                 memset(pzstats->vm_numa_event, 0,
5751                        sizeof(pzstats->vm_numa_event));
5752         }
5753 #endif
5754 
5755         for_each_online_pgdat(pgdat)
5756                 pgdat->per_cpu_nodestats =
5757                         alloc_percpu(struct per_cpu_nodestat);
5758         store_early_perpage_metadata();
5759 }
5760 
5761 __meminit void zone_pcp_init(struct zone *zone)
5762 {
5763         /*
5764          * per cpu subsystem is not up at this point. The following code
5765          * relies on the ability of the linker to provide the
5766          * offset of a (static) per cpu variable into the per cpu area.
5767          */
5768         zone->per_cpu_pageset = &boot_pageset;
5769         zone->per_cpu_zonestats = &boot_zonestats;
5770         zone->pageset_high_min = BOOT_PAGESET_HIGH;
5771         zone->pageset_high_max = BOOT_PAGESET_HIGH;
5772         zone->pageset_batch = BOOT_PAGESET_BATCH;
5773 
5774         if (populated_zone(zone))
5775                 pr_debug("  %s zone: %lu pages, LIFO batch:%u\n", zone->name,
5776                          zone->present_pages, zone_batchsize(zone));
5777 }
5778 
5779 void adjust_managed_page_count(struct page *page, long count)
5780 {
5781         atomic_long_add(count, &page_zone(page)->managed_pages);
5782         totalram_pages_add(count);
5783 }
5784 EXPORT_SYMBOL(adjust_managed_page_count);
5785 
5786 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
5787 {
5788         void *pos;
5789         unsigned long pages = 0;
5790 
5791         start = (void *)PAGE_ALIGN((unsigned long)start);
5792         end = (void *)((unsigned long)end & PAGE_MASK);
5793         for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
5794                 struct page *page = virt_to_page(pos);
5795                 void *direct_map_addr;
5796 
5797                 /*
5798                  * 'direct_map_addr' might be different from 'pos'
5799                  * because some architectures' virt_to_page()
5800                  * work with aliases.  Getting the direct map
5801                  * address ensures that we get a _writeable_
5802                  * alias for the memset().
5803                  */
5804                 direct_map_addr = page_address(page);
5805                 /*
5806                  * Perform a kasan-unchecked memset() since this memory
5807                  * has not been initialized.
5808                  */
5809                 direct_map_addr = kasan_reset_tag(direct_map_addr);
5810                 if ((unsigned int)poison <= 0xFF)
5811                         memset(direct_map_addr, poison, PAGE_SIZE);
5812 
5813                 free_reserved_page(page);
5814         }
5815 
5816         if (pages && s)
5817                 pr_info("Freeing %s memory: %ldK\n", s, K(pages));
5818 
5819         return pages;
5820 }
5821 
5822 void free_reserved_page(struct page *page)
5823 {
5824         if (mem_alloc_profiling_enabled()) {
5825                 union codetag_ref *ref = get_page_tag_ref(page);
5826 
5827                 if (ref) {
5828                         set_codetag_empty(ref);
5829                         put_page_tag_ref(ref);
5830                 }
5831         }
5832         ClearPageReserved(page);
5833         init_page_count(page);
5834         __free_page(page);
5835         adjust_managed_page_count(page, 1);
5836 }
5837 EXPORT_SYMBOL(free_reserved_page);
5838 
5839 static int page_alloc_cpu_dead(unsigned int cpu)
5840 {
5841         struct zone *zone;
5842 
5843         lru_add_drain_cpu(cpu);
5844         mlock_drain_remote(cpu);
5845         drain_pages(cpu);
5846 
5847         /*
5848          * Spill the event counters of the dead processor
5849          * into the current processors event counters.
5850          * This artificially elevates the count of the current
5851          * processor.
5852          */
5853         vm_events_fold_cpu(cpu);
5854 
5855         /*
5856          * Zero the differential counters of the dead processor
5857          * so that the vm statistics are consistent.
5858          *
5859          * This is only okay since the processor is dead and cannot
5860          * race with what we are doing.
5861          */
5862         cpu_vm_stats_fold(cpu);
5863 
5864         for_each_populated_zone(zone)
5865                 zone_pcp_update(zone, 0);
5866 
5867         return 0;
5868 }
5869 
5870 static int page_alloc_cpu_online(unsigned int cpu)
5871 {
5872         struct zone *zone;
5873 
5874         for_each_populated_zone(zone)
5875                 zone_pcp_update(zone, 1);
5876         return 0;
5877 }
5878 
5879 void __init page_alloc_init_cpuhp(void)
5880 {
5881         int ret;
5882 
5883         ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC,
5884                                         "mm/page_alloc:pcp",
5885                                         page_alloc_cpu_online,
5886                                         page_alloc_cpu_dead);
5887         WARN_ON(ret < 0);
5888 }
5889 
5890 /*
5891  * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
5892  *      or min_free_kbytes changes.
5893  */
5894 static void calculate_totalreserve_pages(void)
5895 {
5896         struct pglist_data *pgdat;
5897         unsigned long reserve_pages = 0;
5898         enum zone_type i, j;
5899 
5900         for_each_online_pgdat(pgdat) {
5901 
5902                 pgdat->totalreserve_pages = 0;
5903 
5904                 for (i = 0; i < MAX_NR_ZONES; i++) {
5905                         struct zone *zone = pgdat->node_zones + i;
5906                         long max = 0;
5907                         unsigned long managed_pages = zone_managed_pages(zone);
5908 
5909                         /* Find valid and maximum lowmem_reserve in the zone */
5910                         for (j = i; j < MAX_NR_ZONES; j++) {
5911                                 if (zone->lowmem_reserve[j] > max)
5912                                         max = zone->lowmem_reserve[j];
5913                         }
5914 
5915                         /* we treat the high watermark as reserved pages. */
5916                         max += high_wmark_pages(zone);
5917 
5918                         if (max > managed_pages)
5919                                 max = managed_pages;
5920 
5921                         pgdat->totalreserve_pages += max;
5922 
5923                         reserve_pages += max;
5924                 }
5925         }
5926         totalreserve_pages = reserve_pages;
5927 }
5928 
5929 /*
5930  * setup_per_zone_lowmem_reserve - called whenever
5931  *      sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
5932  *      has a correct pages reserved value, so an adequate number of
5933  *      pages are left in the zone after a successful __alloc_pages().
5934  */
5935 static void setup_per_zone_lowmem_reserve(void)
5936 {
5937         struct pglist_data *pgdat;
5938         enum zone_type i, j;
5939 
5940         for_each_online_pgdat(pgdat) {
5941                 for (i = 0; i < MAX_NR_ZONES - 1; i++) {
5942                         struct zone *zone = &pgdat->node_zones[i];
5943                         int ratio = sysctl_lowmem_reserve_ratio[i];
5944                         bool clear = !ratio || !zone_managed_pages(zone);
5945                         unsigned long managed_pages = 0;
5946 
5947                         for (j = i + 1; j < MAX_NR_ZONES; j++) {
5948                                 struct zone *upper_zone = &pgdat->node_zones[j];
5949                                 bool empty = !zone_managed_pages(upper_zone);
5950 
5951                                 managed_pages += zone_managed_pages(upper_zone);
5952 
5953                                 if (clear || empty)
5954                                         zone->lowmem_reserve[j] = 0;
5955                                 else
5956                                         zone->lowmem_reserve[j] = managed_pages / ratio;
5957                         }
5958                 }
5959         }
5960 
5961         /* update totalreserve_pages */
5962         calculate_totalreserve_pages();
5963 }
5964 
5965 static void __setup_per_zone_wmarks(void)
5966 {
5967         unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5968         unsigned long lowmem_pages = 0;
5969         struct zone *zone;
5970         unsigned long flags;
5971 
5972         /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */
5973         for_each_zone(zone) {
5974                 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
5975                         lowmem_pages += zone_managed_pages(zone);
5976         }
5977 
5978         for_each_zone(zone) {
5979                 u64 tmp;
5980 
5981                 spin_lock_irqsave(&zone->lock, flags);
5982                 tmp = (u64)pages_min * zone_managed_pages(zone);
5983                 tmp = div64_ul(tmp, lowmem_pages);
5984                 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
5985                         /*
5986                          * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5987                          * need highmem and movable zones pages, so cap pages_min
5988                          * to a small  value here.
5989                          *
5990                          * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5991                          * deltas control async page reclaim, and so should
5992                          * not be capped for highmem and movable zones.
5993                          */
5994                         unsigned long min_pages;
5995 
5996                         min_pages = zone_managed_pages(zone) / 1024;
5997                         min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
5998                         zone->_watermark[WMARK_MIN] = min_pages;
5999                 } else {
6000                         /*
6001                          * If it's a lowmem zone, reserve a number of pages
6002                          * proportionate to the zone's size.
6003                          */
6004                         zone->_watermark[WMARK_MIN] = tmp;
6005                 }
6006 
6007                 /*
6008                  * Set the kswapd watermarks distance according to the
6009                  * scale factor in proportion to available memory, but
6010                  * ensure a minimum size on small systems.
6011                  */
6012                 tmp = max_t(u64, tmp >> 2,
6013                             mult_frac(zone_managed_pages(zone),
6014                                       watermark_scale_factor, 10000));
6015 
6016                 zone->watermark_boost = 0;
6017                 zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
6018                 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
6019                 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
6020 
6021                 spin_unlock_irqrestore(&zone->lock, flags);
6022         }
6023 
6024         /* update totalreserve_pages */
6025         calculate_totalreserve_pages();
6026 }
6027 
6028 /**
6029  * setup_per_zone_wmarks - called when min_free_kbytes changes
6030  * or when memory is hot-{added|removed}
6031  *
6032  * Ensures that the watermark[min,low,high] values for each zone are set
6033  * correctly with respect to min_free_kbytes.
6034  */
6035 void setup_per_zone_wmarks(void)
6036 {
6037         struct zone *zone;
6038         static DEFINE_SPINLOCK(lock);
6039 
6040         spin_lock(&lock);
6041         __setup_per_zone_wmarks();
6042         spin_unlock(&lock);
6043 
6044         /*
6045          * The watermark size have changed so update the pcpu batch
6046          * and high limits or the limits may be inappropriate.
6047          */
6048         for_each_zone(zone)
6049                 zone_pcp_update(zone, 0);
6050 }
6051 
6052 /*
6053  * Initialise min_free_kbytes.
6054  *
6055  * For small machines we want it small (128k min).  For large machines
6056  * we want it large (256MB max).  But it is not linear, because network
6057  * bandwidth does not increase linearly with machine size.  We use
6058  *
6059  *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
6060  *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
6061  *
6062  * which yields
6063  *
6064  * 16MB:        512k
6065  * 32MB:        724k
6066  * 64MB:        1024k
6067  * 128MB:       1448k
6068  * 256MB:       2048k
6069  * 512MB:       2896k
6070  * 1024MB:      4096k
6071  * 2048MB:      5792k
6072  * 4096MB:      8192k
6073  * 8192MB:      11584k
6074  * 16384MB:     16384k
6075  */
6076 void calculate_min_free_kbytes(void)
6077 {
6078         unsigned long lowmem_kbytes;
6079         int new_min_free_kbytes;
6080 
6081         lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
6082         new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
6083 
6084         if (new_min_free_kbytes > user_min_free_kbytes)
6085                 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144);
6086         else
6087                 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
6088                                 new_min_free_kbytes, user_min_free_kbytes);
6089 
6090 }
6091 
6092 int __meminit init_per_zone_wmark_min(void)
6093 {
6094         calculate_min_free_kbytes();
6095         setup_per_zone_wmarks();
6096         refresh_zone_stat_thresholds();
6097         setup_per_zone_lowmem_reserve();
6098 
6099 #ifdef CONFIG_NUMA
6100         setup_min_unmapped_ratio();
6101         setup_min_slab_ratio();
6102 #endif
6103 
6104         khugepaged_min_free_kbytes_update();
6105 
6106         return 0;
6107 }
6108 postcore_initcall(init_per_zone_wmark_min)
6109 
6110 /*
6111  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
6112  *      that we can call two helper functions whenever min_free_kbytes
6113  *      changes.
6114  */
6115 static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write,
6116                 void *buffer, size_t *length, loff_t *ppos)
6117 {
6118         int rc;
6119 
6120         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6121         if (rc)
6122                 return rc;
6123 
6124         if (write) {
6125                 user_min_free_kbytes = min_free_kbytes;
6126                 setup_per_zone_wmarks();
6127         }
6128         return 0;
6129 }
6130 
6131 static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write,
6132                 void *buffer, size_t *length, loff_t *ppos)
6133 {
6134         int rc;
6135 
6136         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6137         if (rc)
6138                 return rc;
6139 
6140         if (write)
6141                 setup_per_zone_wmarks();
6142 
6143         return 0;
6144 }
6145 
6146 #ifdef CONFIG_NUMA
6147 static void setup_min_unmapped_ratio(void)
6148 {
6149         pg_data_t *pgdat;
6150         struct zone *zone;
6151 
6152         for_each_online_pgdat(pgdat)
6153                 pgdat->min_unmapped_pages = 0;
6154 
6155         for_each_zone(zone)
6156                 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
6157                                                          sysctl_min_unmapped_ratio) / 100;
6158 }
6159 
6160 
6161 static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write,
6162                 void *buffer, size_t *length, loff_t *ppos)
6163 {
6164         int rc;
6165 
6166         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6167         if (rc)
6168                 return rc;
6169 
6170         setup_min_unmapped_ratio();
6171 
6172         return 0;
6173 }
6174 
6175 static void setup_min_slab_ratio(void)
6176 {
6177         pg_data_t *pgdat;
6178         struct zone *zone;
6179 
6180         for_each_online_pgdat(pgdat)
6181                 pgdat->min_slab_pages = 0;
6182 
6183         for_each_zone(zone)
6184                 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
6185                                                      sysctl_min_slab_ratio) / 100;
6186 }
6187 
6188 static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write,
6189                 void *buffer, size_t *length, loff_t *ppos)
6190 {
6191         int rc;
6192 
6193         rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
6194         if (rc)
6195                 return rc;
6196 
6197         setup_min_slab_ratio();
6198 
6199         return 0;
6200 }
6201 #endif
6202 
6203 /*
6204  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
6205  *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
6206  *      whenever sysctl_lowmem_reserve_ratio changes.
6207  *
6208  * The reserve ratio obviously has absolutely no relation with the
6209  * minimum watermarks. The lowmem reserve ratio can only make sense
6210  * if in function of the boot time zone sizes.
6211  */
6212 static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table,
6213                 int write, void *buffer, size_t *length, loff_t *ppos)
6214 {
6215         int i;
6216 
6217         proc_dointvec_minmax(table, write, buffer, length, ppos);
6218 
6219         for (i = 0; i < MAX_NR_ZONES; i++) {
6220                 if (sysctl_lowmem_reserve_ratio[i] < 1)
6221                         sysctl_lowmem_reserve_ratio[i] = 0;
6222         }
6223 
6224         setup_per_zone_lowmem_reserve();
6225         return 0;
6226 }
6227 
6228 /*
6229  * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6230  * cpu. It is the fraction of total pages in each zone that a hot per cpu
6231  * pagelist can have before it gets flushed back to buddy allocator.
6232  */
6233 static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table,
6234                 int write, void *buffer, size_t *length, loff_t *ppos)
6235 {
6236         struct zone *zone;
6237         int old_percpu_pagelist_high_fraction;
6238         int ret;
6239 
6240         mutex_lock(&pcp_batch_high_lock);
6241         old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction;
6242 
6243         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
6244         if (!write || ret < 0)
6245                 goto out;
6246 
6247         /* Sanity checking to avoid pcp imbalance */
6248         if (percpu_pagelist_high_fraction &&
6249             percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) {
6250                 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction;
6251                 ret = -EINVAL;
6252                 goto out;
6253         }
6254 
6255         /* No change? */
6256         if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction)
6257                 goto out;
6258 
6259         for_each_populated_zone(zone)
6260                 zone_set_pageset_high_and_batch(zone, 0);
6261 out:
6262         mutex_unlock(&pcp_batch_high_lock);
6263         return ret;
6264 }
6265 
6266 static struct ctl_table page_alloc_sysctl_table[] = {
6267         {
6268                 .procname       = "min_free_kbytes",
6269                 .data           = &min_free_kbytes,
6270                 .maxlen         = sizeof(min_free_kbytes),
6271                 .mode           = 0644,
6272                 .proc_handler   = min_free_kbytes_sysctl_handler,
6273                 .extra1         = SYSCTL_ZERO,
6274         },
6275         {
6276                 .procname       = "watermark_boost_factor",
6277                 .data           = &watermark_boost_factor,
6278                 .maxlen         = sizeof(watermark_boost_factor),
6279                 .mode           = 0644,
6280                 .proc_handler   = proc_dointvec_minmax,
6281                 .extra1         = SYSCTL_ZERO,
6282         },
6283         {
6284                 .procname       = "watermark_scale_factor",
6285                 .data           = &watermark_scale_factor,
6286                 .maxlen         = sizeof(watermark_scale_factor),
6287                 .mode           = 0644,
6288                 .proc_handler   = watermark_scale_factor_sysctl_handler,
6289                 .extra1         = SYSCTL_ONE,
6290                 .extra2         = SYSCTL_THREE_THOUSAND,
6291         },
6292         {
6293                 .procname       = "percpu_pagelist_high_fraction",
6294                 .data           = &percpu_pagelist_high_fraction,
6295                 .maxlen         = sizeof(percpu_pagelist_high_fraction),
6296                 .mode           = 0644,
6297                 .proc_handler   = percpu_pagelist_high_fraction_sysctl_handler,
6298                 .extra1         = SYSCTL_ZERO,
6299         },
6300         {
6301                 .procname       = "lowmem_reserve_ratio",
6302                 .data           = &sysctl_lowmem_reserve_ratio,
6303                 .maxlen         = sizeof(sysctl_lowmem_reserve_ratio),
6304                 .mode           = 0644,
6305                 .proc_handler   = lowmem_reserve_ratio_sysctl_handler,
6306         },
6307 #ifdef CONFIG_NUMA
6308         {
6309                 .procname       = "numa_zonelist_order",
6310                 .data           = &numa_zonelist_order,
6311                 .maxlen         = NUMA_ZONELIST_ORDER_LEN,
6312                 .mode           = 0644,
6313                 .proc_handler   = numa_zonelist_order_handler,
6314         },
6315         {
6316                 .procname       = "min_unmapped_ratio",
6317                 .data           = &sysctl_min_unmapped_ratio,
6318                 .maxlen         = sizeof(sysctl_min_unmapped_ratio),
6319                 .mode           = 0644,
6320                 .proc_handler   = sysctl_min_unmapped_ratio_sysctl_handler,
6321                 .extra1         = SYSCTL_ZERO,
6322                 .extra2         = SYSCTL_ONE_HUNDRED,
6323         },
6324         {
6325                 .procname       = "min_slab_ratio",
6326                 .data           = &sysctl_min_slab_ratio,
6327                 .maxlen         = sizeof(sysctl_min_slab_ratio),
6328                 .mode           = 0644,
6329                 .proc_handler   = sysctl_min_slab_ratio_sysctl_handler,
6330                 .extra1         = SYSCTL_ZERO,
6331                 .extra2         = SYSCTL_ONE_HUNDRED,
6332         },
6333 #endif
6334 };
6335 
6336 void __init page_alloc_sysctl_init(void)
6337 {
6338         register_sysctl_init("vm", page_alloc_sysctl_table);
6339 }
6340 
6341 #ifdef CONFIG_CONTIG_ALLOC
6342 /* Usage: See admin-guide/dynamic-debug-howto.rst */
6343 static void alloc_contig_dump_pages(struct list_head *page_list)
6344 {
6345         DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
6346 
6347         if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
6348                 struct page *page;
6349 
6350                 dump_stack();
6351                 list_for_each_entry(page, page_list, lru)
6352                         dump_page(page, "migration failure");
6353         }
6354 }
6355 
6356 /*
6357  * [start, end) must belong to a single zone.
6358  * @migratetype: using migratetype to filter the type of migration in
6359  *              trace_mm_alloc_contig_migrate_range_info.
6360  */
6361 int __alloc_contig_migrate_range(struct compact_control *cc,
6362                                         unsigned long start, unsigned long end,
6363                                         int migratetype)
6364 {
6365         /* This function is based on compact_zone() from compaction.c. */
6366         unsigned int nr_reclaimed;
6367         unsigned long pfn = start;
6368         unsigned int tries = 0;
6369         int ret = 0;
6370         struct migration_target_control mtc = {
6371                 .nid = zone_to_nid(cc->zone),
6372                 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
6373                 .reason = MR_CONTIG_RANGE,
6374         };
6375         struct page *page;
6376         unsigned long total_mapped = 0;
6377         unsigned long total_migrated = 0;
6378         unsigned long total_reclaimed = 0;
6379 
6380         lru_cache_disable();
6381 
6382         while (pfn < end || !list_empty(&cc->migratepages)) {
6383                 if (fatal_signal_pending(current)) {
6384                         ret = -EINTR;
6385                         break;
6386                 }
6387 
6388                 if (list_empty(&cc->migratepages)) {
6389                         cc->nr_migratepages = 0;
6390                         ret = isolate_migratepages_range(cc, pfn, end);
6391                         if (ret && ret != -EAGAIN)
6392                                 break;
6393                         pfn = cc->migrate_pfn;
6394                         tries = 0;
6395                 } else if (++tries == 5) {
6396                         ret = -EBUSY;
6397                         break;
6398                 }
6399 
6400                 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
6401                                                         &cc->migratepages);
6402                 cc->nr_migratepages -= nr_reclaimed;
6403 
6404                 if (trace_mm_alloc_contig_migrate_range_info_enabled()) {
6405                         total_reclaimed += nr_reclaimed;
6406                         list_for_each_entry(page, &cc->migratepages, lru) {
6407                                 struct folio *folio = page_folio(page);
6408 
6409                                 total_mapped += folio_mapped(folio) *
6410                                                 folio_nr_pages(folio);
6411                         }
6412                 }
6413 
6414                 ret = migrate_pages(&cc->migratepages, alloc_migration_target,
6415                         NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL);
6416 
6417                 if (trace_mm_alloc_contig_migrate_range_info_enabled() && !ret)
6418                         total_migrated += cc->nr_migratepages;
6419 
6420                 /*
6421                  * On -ENOMEM, migrate_pages() bails out right away. It is pointless
6422                  * to retry again over this error, so do the same here.
6423                  */
6424                 if (ret == -ENOMEM)
6425                         break;
6426         }
6427 
6428         lru_cache_enable();
6429         if (ret < 0) {
6430                 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6431                         alloc_contig_dump_pages(&cc->migratepages);
6432                 putback_movable_pages(&cc->migratepages);
6433         }
6434 
6435         trace_mm_alloc_contig_migrate_range_info(start, end, migratetype,
6436                                                  total_migrated,
6437                                                  total_reclaimed,
6438                                                  total_mapped);
6439         return (ret < 0) ? ret : 0;
6440 }
6441 
6442 /**
6443  * alloc_contig_range() -- tries to allocate given range of pages
6444  * @start:      start PFN to allocate
6445  * @end:        one-past-the-last PFN to allocate
6446  * @migratetype:        migratetype of the underlying pageblocks (either
6447  *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
6448  *                      in range must have the same migratetype and it must
6449  *                      be either of the two.
6450  * @gfp_mask:   GFP mask to use during compaction
6451  *
6452  * The PFN range does not have to be pageblock aligned. The PFN range must
6453  * belong to a single zone.
6454  *
6455  * The first thing this routine does is attempt to MIGRATE_ISOLATE all
6456  * pageblocks in the range.  Once isolated, the pageblocks should not
6457  * be modified by others.
6458  *
6459  * Return: zero on success or negative error code.  On success all
6460  * pages which PFN is in [start, end) are allocated for the caller and
6461  * need to be freed with free_contig_range().
6462  */
6463 int alloc_contig_range_noprof(unsigned long start, unsigned long end,
6464                        unsigned migratetype, gfp_t gfp_mask)
6465 {
6466         unsigned long outer_start, outer_end;
6467         int ret = 0;
6468 
6469         struct compact_control cc = {
6470                 .nr_migratepages = 0,
6471                 .order = -1,
6472                 .zone = page_zone(pfn_to_page(start)),
6473                 .mode = MIGRATE_SYNC,
6474                 .ignore_skip_hint = true,
6475                 .no_set_skip_hint = true,
6476                 .gfp_mask = current_gfp_context(gfp_mask),
6477                 .alloc_contig = true,
6478         };
6479         INIT_LIST_HEAD(&cc.migratepages);
6480 
6481         /*
6482          * What we do here is we mark all pageblocks in range as
6483          * MIGRATE_ISOLATE.  Because pageblock and max order pages may
6484          * have different sizes, and due to the way page allocator
6485          * work, start_isolate_page_range() has special handlings for this.
6486          *
6487          * Once the pageblocks are marked as MIGRATE_ISOLATE, we
6488          * migrate the pages from an unaligned range (ie. pages that
6489          * we are interested in). This will put all the pages in
6490          * range back to page allocator as MIGRATE_ISOLATE.
6491          *
6492          * When this is done, we take the pages in range from page
6493          * allocator removing them from the buddy system.  This way
6494          * page allocator will never consider using them.
6495          *
6496          * This lets us mark the pageblocks back as
6497          * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
6498          * aligned range but not in the unaligned, original range are
6499          * put back to page allocator so that buddy can use them.
6500          */
6501 
6502         ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
6503         if (ret)
6504                 goto done;
6505 
6506         drain_all_pages(cc.zone);
6507 
6508         /*
6509          * In case of -EBUSY, we'd like to know which page causes problem.
6510          * So, just fall through. test_pages_isolated() has a tracepoint
6511          * which will report the busy page.
6512          *
6513          * It is possible that busy pages could become available before
6514          * the call to test_pages_isolated, and the range will actually be
6515          * allocated.  So, if we fall through be sure to clear ret so that
6516          * -EBUSY is not accidentally used or returned to caller.
6517          */
6518         ret = __alloc_contig_migrate_range(&cc, start, end, migratetype);
6519         if (ret && ret != -EBUSY)
6520                 goto done;
6521         ret = 0;
6522 
6523         /*
6524          * Pages from [start, end) are within a pageblock_nr_pages
6525          * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
6526          * more, all pages in [start, end) are free in page allocator.
6527          * What we are going to do is to allocate all pages from
6528          * [start, end) (that is remove them from page allocator).
6529          *
6530          * The only problem is that pages at the beginning and at the
6531          * end of interesting range may be not aligned with pages that
6532          * page allocator holds, ie. they can be part of higher order
6533          * pages.  Because of this, we reserve the bigger range and
6534          * once this is done free the pages we are not interested in.
6535          *
6536          * We don't have to hold zone->lock here because the pages are
6537          * isolated thus they won't get removed from buddy.
6538          */
6539         outer_start = find_large_buddy(start);
6540 
6541         /* Make sure the range is really isolated. */
6542         if (test_pages_isolated(outer_start, end, 0)) {
6543                 ret = -EBUSY;
6544                 goto done;
6545         }
6546 
6547         /* Grab isolated pages from freelists. */
6548         outer_end = isolate_freepages_range(&cc, outer_start, end);
6549         if (!outer_end) {
6550                 ret = -EBUSY;
6551                 goto done;
6552         }
6553 
6554         /* Free head and tail (if any) */
6555         if (start != outer_start)
6556                 free_contig_range(outer_start, start - outer_start);
6557         if (end != outer_end)
6558                 free_contig_range(end, outer_end - end);
6559 
6560 done:
6561         undo_isolate_page_range(start, end, migratetype);
6562         return ret;
6563 }
6564 EXPORT_SYMBOL(alloc_contig_range_noprof);
6565 
6566 static int __alloc_contig_pages(unsigned long start_pfn,
6567                                 unsigned long nr_pages, gfp_t gfp_mask)
6568 {
6569         unsigned long end_pfn = start_pfn + nr_pages;
6570 
6571         return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE,
6572                                    gfp_mask);
6573 }
6574 
6575 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
6576                                    unsigned long nr_pages)
6577 {
6578         unsigned long i, end_pfn = start_pfn + nr_pages;
6579         struct page *page;
6580 
6581         for (i = start_pfn; i < end_pfn; i++) {
6582                 page = pfn_to_online_page(i);
6583                 if (!page)
6584                         return false;
6585 
6586                 if (page_zone(page) != z)
6587                         return false;
6588 
6589                 if (PageReserved(page))
6590                         return false;
6591 
6592                 if (PageHuge(page))
6593                         return false;
6594         }
6595         return true;
6596 }
6597 
6598 static bool zone_spans_last_pfn(const struct zone *zone,
6599                                 unsigned long start_pfn, unsigned long nr_pages)
6600 {
6601         unsigned long last_pfn = start_pfn + nr_pages - 1;
6602 
6603         return zone_spans_pfn(zone, last_pfn);
6604 }
6605 
6606 /**
6607  * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
6608  * @nr_pages:   Number of contiguous pages to allocate
6609  * @gfp_mask:   GFP mask to limit search and used during compaction
6610  * @nid:        Target node
6611  * @nodemask:   Mask for other possible nodes
6612  *
6613  * This routine is a wrapper around alloc_contig_range(). It scans over zones
6614  * on an applicable zonelist to find a contiguous pfn range which can then be
6615  * tried for allocation with alloc_contig_range(). This routine is intended
6616  * for allocation requests which can not be fulfilled with the buddy allocator.
6617  *
6618  * The allocated memory is always aligned to a page boundary. If nr_pages is a
6619  * power of two, then allocated range is also guaranteed to be aligned to same
6620  * nr_pages (e.g. 1GB request would be aligned to 1GB).
6621  *
6622  * Allocated pages can be freed with free_contig_range() or by manually calling
6623  * __free_page() on each allocated page.
6624  *
6625  * Return: pointer to contiguous pages on success, or NULL if not successful.
6626  */
6627 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
6628                                  int nid, nodemask_t *nodemask)
6629 {
6630         unsigned long ret, pfn, flags;
6631         struct zonelist *zonelist;
6632         struct zone *zone;
6633         struct zoneref *z;
6634 
6635         zonelist = node_zonelist(nid, gfp_mask);
6636         for_each_zone_zonelist_nodemask(zone, z, zonelist,
6637                                         gfp_zone(gfp_mask), nodemask) {
6638                 spin_lock_irqsave(&zone->lock, flags);
6639 
6640                 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
6641                 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
6642                         if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
6643                                 /*
6644                                  * We release the zone lock here because
6645                                  * alloc_contig_range() will also lock the zone
6646                                  * at some point. If there's an allocation
6647                                  * spinning on this lock, it may win the race
6648                                  * and cause alloc_contig_range() to fail...
6649                                  */
6650                                 spin_unlock_irqrestore(&zone->lock, flags);
6651                                 ret = __alloc_contig_pages(pfn, nr_pages,
6652                                                         gfp_mask);
6653                                 if (!ret)
6654                                         return pfn_to_page(pfn);
6655                                 spin_lock_irqsave(&zone->lock, flags);
6656                         }
6657                         pfn += nr_pages;
6658                 }
6659                 spin_unlock_irqrestore(&zone->lock, flags);
6660         }
6661         return NULL;
6662 }
6663 #endif /* CONFIG_CONTIG_ALLOC */
6664 
6665 void free_contig_range(unsigned long pfn, unsigned long nr_pages)
6666 {
6667         unsigned long count = 0;
6668 
6669         for (; nr_pages--; pfn++) {
6670                 struct page *page = pfn_to_page(pfn);
6671 
6672                 count += page_count(page) != 1;
6673                 __free_page(page);
6674         }
6675         WARN(count != 0, "%lu pages are still in use!\n", count);
6676 }
6677 EXPORT_SYMBOL(free_contig_range);
6678 
6679 /*
6680  * Effectively disable pcplists for the zone by setting the high limit to 0
6681  * and draining all cpus. A concurrent page freeing on another CPU that's about
6682  * to put the page on pcplist will either finish before the drain and the page
6683  * will be drained, or observe the new high limit and skip the pcplist.
6684  *
6685  * Must be paired with a call to zone_pcp_enable().
6686  */
6687 void zone_pcp_disable(struct zone *zone)
6688 {
6689         mutex_lock(&pcp_batch_high_lock);
6690         __zone_set_pageset_high_and_batch(zone, 0, 0, 1);
6691         __drain_all_pages(zone, true);
6692 }
6693 
6694 void zone_pcp_enable(struct zone *zone)
6695 {
6696         __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
6697                 zone->pageset_high_max, zone->pageset_batch);
6698         mutex_unlock(&pcp_batch_high_lock);
6699 }
6700 
6701 void zone_pcp_reset(struct zone *zone)
6702 {
6703         int cpu;
6704         struct per_cpu_zonestat *pzstats;
6705 
6706         if (zone->per_cpu_pageset != &boot_pageset) {
6707                 for_each_online_cpu(cpu) {
6708                         pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
6709                         drain_zonestat(zone, pzstats);
6710                 }
6711                 free_percpu(zone->per_cpu_pageset);
6712                 zone->per_cpu_pageset = &boot_pageset;
6713                 if (zone->per_cpu_zonestats != &boot_zonestats) {
6714                         free_percpu(zone->per_cpu_zonestats);
6715                         zone->per_cpu_zonestats = &boot_zonestats;
6716                 }
6717         }
6718 }
6719 
6720 #ifdef CONFIG_MEMORY_HOTREMOVE
6721 /*
6722  * All pages in the range must be in a single zone, must not contain holes,
6723  * must span full sections, and must be isolated before calling this function.
6724  *
6725  * Returns the number of managed (non-PageOffline()) pages in the range: the
6726  * number of pages for which memory offlining code must adjust managed page
6727  * counters using adjust_managed_page_count().
6728  */
6729 unsigned long __offline_isolated_pages(unsigned long start_pfn,
6730                 unsigned long end_pfn)
6731 {
6732         unsigned long already_offline = 0, flags;
6733         unsigned long pfn = start_pfn;
6734         struct page *page;
6735         struct zone *zone;
6736         unsigned int order;
6737 
6738         offline_mem_sections(pfn, end_pfn);
6739         zone = page_zone(pfn_to_page(pfn));
6740         spin_lock_irqsave(&zone->lock, flags);
6741         while (pfn < end_pfn) {
6742                 page = pfn_to_page(pfn);
6743                 /*
6744                  * The HWPoisoned page may be not in buddy system, and
6745                  * page_count() is not 0.
6746                  */
6747                 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
6748                         pfn++;
6749                         continue;
6750                 }
6751                 /*
6752                  * At this point all remaining PageOffline() pages have a
6753                  * reference count of 0 and can simply be skipped.
6754                  */
6755                 if (PageOffline(page)) {
6756                         BUG_ON(page_count(page));
6757                         BUG_ON(PageBuddy(page));
6758                         already_offline++;
6759                         pfn++;
6760                         continue;
6761                 }
6762 
6763                 BUG_ON(page_count(page));
6764                 BUG_ON(!PageBuddy(page));
6765                 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
6766                 order = buddy_order(page);
6767                 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
6768                 pfn += (1 << order);
6769         }
6770         spin_unlock_irqrestore(&zone->lock, flags);
6771 
6772         return end_pfn - start_pfn - already_offline;
6773 }
6774 #endif
6775 
6776 /*
6777  * This function returns a stable result only if called under zone lock.
6778  */
6779 bool is_free_buddy_page(const struct page *page)
6780 {
6781         unsigned long pfn = page_to_pfn(page);
6782         unsigned int order;
6783 
6784         for (order = 0; order < NR_PAGE_ORDERS; order++) {
6785                 const struct page *head = page - (pfn & ((1 << order) - 1));
6786 
6787                 if (PageBuddy(head) &&
6788                     buddy_order_unsafe(head) >= order)
6789                         break;
6790         }
6791 
6792         return order <= MAX_PAGE_ORDER;
6793 }
6794 EXPORT_SYMBOL(is_free_buddy_page);
6795 
6796 #ifdef CONFIG_MEMORY_FAILURE
6797 static inline void add_to_free_list(struct page *page, struct zone *zone,
6798                                     unsigned int order, int migratetype,
6799                                     bool tail)
6800 {
6801         __add_to_free_list(page, zone, order, migratetype, tail);
6802         account_freepages(zone, 1 << order, migratetype);
6803 }
6804 
6805 /*
6806  * Break down a higher-order page in sub-pages, and keep our target out of
6807  * buddy allocator.
6808  */
6809 static void break_down_buddy_pages(struct zone *zone, struct page *page,
6810                                    struct page *target, int low, int high,
6811                                    int migratetype)
6812 {
6813         unsigned long size = 1 << high;
6814         struct page *current_buddy;
6815 
6816         while (high > low) {
6817                 high--;
6818                 size >>= 1;
6819 
6820                 if (target >= &page[size]) {
6821                         current_buddy = page;
6822                         page = page + size;
6823                 } else {
6824                         current_buddy = page + size;
6825                 }
6826 
6827                 if (set_page_guard(zone, current_buddy, high))
6828                         continue;
6829 
6830                 add_to_free_list(current_buddy, zone, high, migratetype, false);
6831                 set_buddy_order(current_buddy, high);
6832         }
6833 }
6834 
6835 /*
6836  * Take a page that will be marked as poisoned off the buddy allocator.
6837  */
6838 bool take_page_off_buddy(struct page *page)
6839 {
6840         struct zone *zone = page_zone(page);
6841         unsigned long pfn = page_to_pfn(page);
6842         unsigned long flags;
6843         unsigned int order;
6844         bool ret = false;
6845 
6846         spin_lock_irqsave(&zone->lock, flags);
6847         for (order = 0; order < NR_PAGE_ORDERS; order++) {
6848                 struct page *page_head = page - (pfn & ((1 << order) - 1));
6849                 int page_order = buddy_order(page_head);
6850 
6851                 if (PageBuddy(page_head) && page_order >= order) {
6852                         unsigned long pfn_head = page_to_pfn(page_head);
6853                         int migratetype = get_pfnblock_migratetype(page_head,
6854                                                                    pfn_head);
6855 
6856                         del_page_from_free_list(page_head, zone, page_order,
6857                                                 migratetype);
6858                         break_down_buddy_pages(zone, page_head, page, 0,
6859                                                 page_order, migratetype);
6860                         SetPageHWPoisonTakenOff(page);
6861                         ret = true;
6862                         break;
6863                 }
6864                 if (page_count(page_head) > 0)
6865                         break;
6866         }
6867         spin_unlock_irqrestore(&zone->lock, flags);
6868         return ret;
6869 }
6870 
6871 /*
6872  * Cancel takeoff done by take_page_off_buddy().
6873  */
6874 bool put_page_back_buddy(struct page *page)
6875 {
6876         struct zone *zone = page_zone(page);
6877         unsigned long flags;
6878         bool ret = false;
6879 
6880         spin_lock_irqsave(&zone->lock, flags);
6881         if (put_page_testzero(page)) {
6882                 unsigned long pfn = page_to_pfn(page);
6883                 int migratetype = get_pfnblock_migratetype(page, pfn);
6884 
6885                 ClearPageHWPoisonTakenOff(page);
6886                 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
6887                 if (TestClearPageHWPoison(page)) {
6888                         ret = true;
6889                 }
6890         }
6891         spin_unlock_irqrestore(&zone->lock, flags);
6892 
6893         return ret;
6894 }
6895 #endif
6896 
6897 #ifdef CONFIG_ZONE_DMA
6898 bool has_managed_dma(void)
6899 {
6900         struct pglist_data *pgdat;
6901 
6902         for_each_online_pgdat(pgdat) {
6903                 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
6904 
6905                 if (managed_zone(zone))
6906                         return true;
6907         }
6908         return false;
6909 }
6910 #endif /* CONFIG_ZONE_DMA */
6911 
6912 #ifdef CONFIG_UNACCEPTED_MEMORY
6913 
6914 /* Counts number of zones with unaccepted pages. */
6915 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages);
6916 
6917 static bool lazy_accept = true;
6918 
6919 static int __init accept_memory_parse(char *p)
6920 {
6921         if (!strcmp(p, "lazy")) {
6922                 lazy_accept = true;
6923                 return 0;
6924         } else if (!strcmp(p, "eager")) {
6925                 lazy_accept = false;
6926                 return 0;
6927         } else {
6928                 return -EINVAL;
6929         }
6930 }
6931 early_param("accept_memory", accept_memory_parse);
6932 
6933 static bool page_contains_unaccepted(struct page *page, unsigned int order)
6934 {
6935         phys_addr_t start = page_to_phys(page);
6936         phys_addr_t end = start + (PAGE_SIZE << order);
6937 
6938         return range_contains_unaccepted_memory(start, end);
6939 }
6940 
6941 static void accept_page(struct page *page, unsigned int order)
6942 {
6943         phys_addr_t start = page_to_phys(page);
6944 
6945         accept_memory(start, start + (PAGE_SIZE << order));
6946 }
6947 
6948 static bool try_to_accept_memory_one(struct zone *zone)
6949 {
6950         unsigned long flags;
6951         struct page *page;
6952         bool last;
6953 
6954         if (list_empty(&zone->unaccepted_pages))
6955                 return false;
6956 
6957         spin_lock_irqsave(&zone->lock, flags);
6958         page = list_first_entry_or_null(&zone->unaccepted_pages,
6959                                         struct page, lru);
6960         if (!page) {
6961                 spin_unlock_irqrestore(&zone->lock, flags);
6962                 return false;
6963         }
6964 
6965         list_del(&page->lru);
6966         last = list_empty(&zone->unaccepted_pages);
6967 
6968         account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
6969         __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
6970         spin_unlock_irqrestore(&zone->lock, flags);
6971 
6972         accept_page(page, MAX_PAGE_ORDER);
6973 
6974         __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL);
6975 
6976         if (last)
6977                 static_branch_dec(&zones_with_unaccepted_pages);
6978 
6979         return true;
6980 }
6981 
6982 static bool try_to_accept_memory(struct zone *zone, unsigned int order)
6983 {
6984         long to_accept;
6985         int ret = false;
6986 
6987         /* How much to accept to get to high watermark? */
6988         to_accept = high_wmark_pages(zone) -
6989                     (zone_page_state(zone, NR_FREE_PAGES) -
6990                     __zone_watermark_unusable_free(zone, order, 0));
6991 
6992         /* Accept at least one page */
6993         do {
6994                 if (!try_to_accept_memory_one(zone))
6995                         break;
6996                 ret = true;
6997                 to_accept -= MAX_ORDER_NR_PAGES;
6998         } while (to_accept > 0);
6999 
7000         return ret;
7001 }
7002 
7003 static inline bool has_unaccepted_memory(void)
7004 {
7005         return static_branch_unlikely(&zones_with_unaccepted_pages);
7006 }
7007 
7008 static bool __free_unaccepted(struct page *page)
7009 {
7010         struct zone *zone = page_zone(page);
7011         unsigned long flags;
7012         bool first = false;
7013 
7014         if (!lazy_accept)
7015                 return false;
7016 
7017         spin_lock_irqsave(&zone->lock, flags);
7018         first = list_empty(&zone->unaccepted_pages);
7019         list_add_tail(&page->lru, &zone->unaccepted_pages);
7020         account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
7021         __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
7022         spin_unlock_irqrestore(&zone->lock, flags);
7023 
7024         if (first)
7025                 static_branch_inc(&zones_with_unaccepted_pages);
7026 
7027         return true;
7028 }
7029 
7030 #else
7031 
7032 static bool page_contains_unaccepted(struct page *page, unsigned int order)
7033 {
7034         return false;
7035 }
7036 
7037 static void accept_page(struct page *page, unsigned int order)
7038 {
7039 }
7040 
7041 static bool try_to_accept_memory(struct zone *zone, unsigned int order)
7042 {
7043         return false;
7044 }
7045 
7046 static inline bool has_unaccepted_memory(void)
7047 {
7048         return false;
7049 }
7050 
7051 static bool __free_unaccepted(struct page *page)
7052 {
7053         BUILD_BUG();
7054         return false;
7055 }
7056 
7057 #endif /* CONFIG_UNACCEPTED_MEMORY */
7058 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php