~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_ext.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/mm.h>
  3 #include <linux/mmzone.h>
  4 #include <linux/memblock.h>
  5 #include <linux/page_ext.h>
  6 #include <linux/memory.h>
  7 #include <linux/vmalloc.h>
  8 #include <linux/kmemleak.h>
  9 #include <linux/page_owner.h>
 10 #include <linux/page_idle.h>
 11 #include <linux/page_table_check.h>
 12 #include <linux/rcupdate.h>
 13 #include <linux/pgalloc_tag.h>
 14 
 15 /*
 16  * struct page extension
 17  *
 18  * This is the feature to manage memory for extended data per page.
 19  *
 20  * Until now, we must modify struct page itself to store extra data per page.
 21  * This requires rebuilding the kernel and it is really time consuming process.
 22  * And, sometimes, rebuild is impossible due to third party module dependency.
 23  * At last, enlarging struct page could cause un-wanted system behaviour change.
 24  *
 25  * This feature is intended to overcome above mentioned problems. This feature
 26  * allocates memory for extended data per page in certain place rather than
 27  * the struct page itself. This memory can be accessed by the accessor
 28  * functions provided by this code. During the boot process, it checks whether
 29  * allocation of huge chunk of memory is needed or not. If not, it avoids
 30  * allocating memory at all. With this advantage, we can include this feature
 31  * into the kernel in default and can avoid rebuild and solve related problems.
 32  *
 33  * To help these things to work well, there are two callbacks for clients. One
 34  * is the need callback which is mandatory if user wants to avoid useless
 35  * memory allocation at boot-time. The other is optional, init callback, which
 36  * is used to do proper initialization after memory is allocated.
 37  *
 38  * The need callback is used to decide whether extended memory allocation is
 39  * needed or not. Sometimes users want to deactivate some features in this
 40  * boot and extra memory would be unnecessary. In this case, to avoid
 41  * allocating huge chunk of memory, each clients represent their need of
 42  * extra memory through the need callback. If one of the need callbacks
 43  * returns true, it means that someone needs extra memory so that
 44  * page extension core should allocates memory for page extension. If
 45  * none of need callbacks return true, memory isn't needed at all in this boot
 46  * and page extension core can skip to allocate memory. As result,
 47  * none of memory is wasted.
 48  *
 49  * When need callback returns true, page_ext checks if there is a request for
 50  * extra memory through size in struct page_ext_operations. If it is non-zero,
 51  * extra space is allocated for each page_ext entry and offset is returned to
 52  * user through offset in struct page_ext_operations.
 53  *
 54  * The init callback is used to do proper initialization after page extension
 55  * is completely initialized. In sparse memory system, extra memory is
 56  * allocated some time later than memmap is allocated. In other words, lifetime
 57  * of memory for page extension isn't same with memmap for struct page.
 58  * Therefore, clients can't store extra data until page extension is
 59  * initialized, even if pages are allocated and used freely. This could
 60  * cause inadequate state of extra data per page, so, to prevent it, client
 61  * can utilize this callback to initialize the state of it correctly.
 62  */
 63 
 64 #ifdef CONFIG_SPARSEMEM
 65 #define PAGE_EXT_INVALID       (0x1)
 66 #endif
 67 
 68 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
 69 static bool need_page_idle(void)
 70 {
 71         return true;
 72 }
 73 static struct page_ext_operations page_idle_ops __initdata = {
 74         .need = need_page_idle,
 75         .need_shared_flags = true,
 76 };
 77 #endif
 78 
 79 static struct page_ext_operations *page_ext_ops[] __initdata = {
 80 #ifdef CONFIG_PAGE_OWNER
 81         &page_owner_ops,
 82 #endif
 83 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
 84         &page_idle_ops,
 85 #endif
 86 #ifdef CONFIG_MEM_ALLOC_PROFILING
 87         &page_alloc_tagging_ops,
 88 #endif
 89 #ifdef CONFIG_PAGE_TABLE_CHECK
 90         &page_table_check_ops,
 91 #endif
 92 };
 93 
 94 unsigned long page_ext_size;
 95 
 96 static unsigned long total_usage;
 97 
 98 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
 99 /*
100  * To ensure correct allocation tagging for pages, page_ext should be available
101  * before the first page allocation. Otherwise early task stacks will be
102  * allocated before page_ext initialization and missing tags will be flagged.
103  */
104 bool early_page_ext __meminitdata = true;
105 #else
106 bool early_page_ext __meminitdata;
107 #endif
108 static int __init setup_early_page_ext(char *str)
109 {
110         early_page_ext = true;
111         return 0;
112 }
113 early_param("early_page_ext", setup_early_page_ext);
114 
115 static bool __init invoke_need_callbacks(void)
116 {
117         int i;
118         int entries = ARRAY_SIZE(page_ext_ops);
119         bool need = false;
120 
121         for (i = 0; i < entries; i++) {
122                 if (page_ext_ops[i]->need()) {
123                         if (page_ext_ops[i]->need_shared_flags) {
124                                 page_ext_size = sizeof(struct page_ext);
125                                 break;
126                         }
127                 }
128         }
129 
130         for (i = 0; i < entries; i++) {
131                 if (page_ext_ops[i]->need()) {
132                         page_ext_ops[i]->offset = page_ext_size;
133                         page_ext_size += page_ext_ops[i]->size;
134                         need = true;
135                 }
136         }
137 
138         return need;
139 }
140 
141 static void __init invoke_init_callbacks(void)
142 {
143         int i;
144         int entries = ARRAY_SIZE(page_ext_ops);
145 
146         for (i = 0; i < entries; i++) {
147                 if (page_ext_ops[i]->init)
148                         page_ext_ops[i]->init();
149         }
150 }
151 
152 static inline struct page_ext *get_entry(void *base, unsigned long index)
153 {
154         return base + page_ext_size * index;
155 }
156 
157 #ifndef CONFIG_SPARSEMEM
158 void __init page_ext_init_flatmem_late(void)
159 {
160         invoke_init_callbacks();
161 }
162 
163 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
164 {
165         pgdat->node_page_ext = NULL;
166 }
167 
168 static struct page_ext *lookup_page_ext(const struct page *page)
169 {
170         unsigned long pfn = page_to_pfn(page);
171         unsigned long index;
172         struct page_ext *base;
173 
174         WARN_ON_ONCE(!rcu_read_lock_held());
175         base = NODE_DATA(page_to_nid(page))->node_page_ext;
176         /*
177          * The sanity checks the page allocator does upon freeing a
178          * page can reach here before the page_ext arrays are
179          * allocated when feeding a range of pages to the allocator
180          * for the first time during bootup or memory hotplug.
181          */
182         if (unlikely(!base))
183                 return NULL;
184         index = pfn - round_down(node_start_pfn(page_to_nid(page)),
185                                         MAX_ORDER_NR_PAGES);
186         return get_entry(base, index);
187 }
188 
189 static int __init alloc_node_page_ext(int nid)
190 {
191         struct page_ext *base;
192         unsigned long table_size;
193         unsigned long nr_pages;
194 
195         nr_pages = NODE_DATA(nid)->node_spanned_pages;
196         if (!nr_pages)
197                 return 0;
198 
199         /*
200          * Need extra space if node range is not aligned with
201          * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
202          * checks buddy's status, range could be out of exact node range.
203          */
204         if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
205                 !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
206                 nr_pages += MAX_ORDER_NR_PAGES;
207 
208         table_size = page_ext_size * nr_pages;
209 
210         base = memblock_alloc_try_nid(
211                         table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
212                         MEMBLOCK_ALLOC_ACCESSIBLE, nid);
213         if (!base)
214                 return -ENOMEM;
215         NODE_DATA(nid)->node_page_ext = base;
216         total_usage += table_size;
217         mod_node_page_state(NODE_DATA(nid), NR_MEMMAP_BOOT,
218                             DIV_ROUND_UP(table_size, PAGE_SIZE));
219         return 0;
220 }
221 
222 void __init page_ext_init_flatmem(void)
223 {
224 
225         int nid, fail;
226 
227         if (!invoke_need_callbacks())
228                 return;
229 
230         for_each_online_node(nid)  {
231                 fail = alloc_node_page_ext(nid);
232                 if (fail)
233                         goto fail;
234         }
235         pr_info("allocated %ld bytes of page_ext\n", total_usage);
236         return;
237 
238 fail:
239         pr_crit("allocation of page_ext failed.\n");
240         panic("Out of memory");
241 }
242 
243 #else /* CONFIG_SPARSEMEM */
244 static bool page_ext_invalid(struct page_ext *page_ext)
245 {
246         return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
247 }
248 
249 static struct page_ext *lookup_page_ext(const struct page *page)
250 {
251         unsigned long pfn = page_to_pfn(page);
252         struct mem_section *section = __pfn_to_section(pfn);
253         struct page_ext *page_ext = READ_ONCE(section->page_ext);
254 
255         WARN_ON_ONCE(!rcu_read_lock_held());
256         /*
257          * The sanity checks the page allocator does upon freeing a
258          * page can reach here before the page_ext arrays are
259          * allocated when feeding a range of pages to the allocator
260          * for the first time during bootup or memory hotplug.
261          */
262         if (page_ext_invalid(page_ext))
263                 return NULL;
264         return get_entry(page_ext, pfn);
265 }
266 
267 static void *__meminit alloc_page_ext(size_t size, int nid)
268 {
269         gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
270         void *addr = NULL;
271 
272         addr = alloc_pages_exact_nid(nid, size, flags);
273         if (addr)
274                 kmemleak_alloc(addr, size, 1, flags);
275         else
276                 addr = vzalloc_node(size, nid);
277 
278         if (addr) {
279                 mod_node_page_state(NODE_DATA(nid), NR_MEMMAP,
280                                     DIV_ROUND_UP(size, PAGE_SIZE));
281         }
282 
283         return addr;
284 }
285 
286 static int __meminit init_section_page_ext(unsigned long pfn, int nid)
287 {
288         struct mem_section *section;
289         struct page_ext *base;
290         unsigned long table_size;
291 
292         section = __pfn_to_section(pfn);
293 
294         if (section->page_ext)
295                 return 0;
296 
297         table_size = page_ext_size * PAGES_PER_SECTION;
298         base = alloc_page_ext(table_size, nid);
299 
300         /*
301          * The value stored in section->page_ext is (base - pfn)
302          * and it does not point to the memory block allocated above,
303          * causing kmemleak false positives.
304          */
305         kmemleak_not_leak(base);
306 
307         if (!base) {
308                 pr_err("page ext allocation failure\n");
309                 return -ENOMEM;
310         }
311 
312         /*
313          * The passed "pfn" may not be aligned to SECTION.  For the calculation
314          * we need to apply a mask.
315          */
316         pfn &= PAGE_SECTION_MASK;
317         section->page_ext = (void *)base - page_ext_size * pfn;
318         total_usage += table_size;
319         return 0;
320 }
321 
322 static void free_page_ext(void *addr)
323 {
324         size_t table_size;
325         struct page *page;
326         struct pglist_data *pgdat;
327 
328         table_size = page_ext_size * PAGES_PER_SECTION;
329 
330         if (is_vmalloc_addr(addr)) {
331                 page = vmalloc_to_page(addr);
332                 pgdat = page_pgdat(page);
333                 vfree(addr);
334         } else {
335                 page = virt_to_page(addr);
336                 pgdat = page_pgdat(page);
337                 BUG_ON(PageReserved(page));
338                 kmemleak_free(addr);
339                 free_pages_exact(addr, table_size);
340         }
341 
342         mod_node_page_state(pgdat, NR_MEMMAP,
343                             -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
344 
345 }
346 
347 static void __free_page_ext(unsigned long pfn)
348 {
349         struct mem_section *ms;
350         struct page_ext *base;
351 
352         ms = __pfn_to_section(pfn);
353         if (!ms || !ms->page_ext)
354                 return;
355 
356         base = READ_ONCE(ms->page_ext);
357         /*
358          * page_ext here can be valid while doing the roll back
359          * operation in online_page_ext().
360          */
361         if (page_ext_invalid(base))
362                 base = (void *)base - PAGE_EXT_INVALID;
363         WRITE_ONCE(ms->page_ext, NULL);
364 
365         base = get_entry(base, pfn);
366         free_page_ext(base);
367 }
368 
369 static void __invalidate_page_ext(unsigned long pfn)
370 {
371         struct mem_section *ms;
372         void *val;
373 
374         ms = __pfn_to_section(pfn);
375         if (!ms || !ms->page_ext)
376                 return;
377         val = (void *)ms->page_ext + PAGE_EXT_INVALID;
378         WRITE_ONCE(ms->page_ext, val);
379 }
380 
381 static int __meminit online_page_ext(unsigned long start_pfn,
382                                 unsigned long nr_pages,
383                                 int nid)
384 {
385         unsigned long start, end, pfn;
386         int fail = 0;
387 
388         start = SECTION_ALIGN_DOWN(start_pfn);
389         end = SECTION_ALIGN_UP(start_pfn + nr_pages);
390 
391         if (nid == NUMA_NO_NODE) {
392                 /*
393                  * In this case, "nid" already exists and contains valid memory.
394                  * "start_pfn" passed to us is a pfn which is an arg for
395                  * online__pages(), and start_pfn should exist.
396                  */
397                 nid = pfn_to_nid(start_pfn);
398                 VM_BUG_ON(!node_online(nid));
399         }
400 
401         for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION)
402                 fail = init_section_page_ext(pfn, nid);
403         if (!fail)
404                 return 0;
405 
406         /* rollback */
407         end = pfn - PAGES_PER_SECTION;
408         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
409                 __free_page_ext(pfn);
410 
411         return -ENOMEM;
412 }
413 
414 static void __meminit offline_page_ext(unsigned long start_pfn,
415                                 unsigned long nr_pages)
416 {
417         unsigned long start, end, pfn;
418 
419         start = SECTION_ALIGN_DOWN(start_pfn);
420         end = SECTION_ALIGN_UP(start_pfn + nr_pages);
421 
422         /*
423          * Freeing of page_ext is done in 3 steps to avoid
424          * use-after-free of it:
425          * 1) Traverse all the sections and mark their page_ext
426          *    as invalid.
427          * 2) Wait for all the existing users of page_ext who
428          *    started before invalidation to finish.
429          * 3) Free the page_ext.
430          */
431         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
432                 __invalidate_page_ext(pfn);
433 
434         synchronize_rcu();
435 
436         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
437                 __free_page_ext(pfn);
438 }
439 
440 static int __meminit page_ext_callback(struct notifier_block *self,
441                                unsigned long action, void *arg)
442 {
443         struct memory_notify *mn = arg;
444         int ret = 0;
445 
446         switch (action) {
447         case MEM_GOING_ONLINE:
448                 ret = online_page_ext(mn->start_pfn,
449                                    mn->nr_pages, mn->status_change_nid);
450                 break;
451         case MEM_OFFLINE:
452                 offline_page_ext(mn->start_pfn,
453                                 mn->nr_pages);
454                 break;
455         case MEM_CANCEL_ONLINE:
456                 offline_page_ext(mn->start_pfn,
457                                 mn->nr_pages);
458                 break;
459         case MEM_GOING_OFFLINE:
460                 break;
461         case MEM_ONLINE:
462         case MEM_CANCEL_OFFLINE:
463                 break;
464         }
465 
466         return notifier_from_errno(ret);
467 }
468 
469 void __init page_ext_init(void)
470 {
471         unsigned long pfn;
472         int nid;
473 
474         if (!invoke_need_callbacks())
475                 return;
476 
477         for_each_node_state(nid, N_MEMORY) {
478                 unsigned long start_pfn, end_pfn;
479 
480                 start_pfn = node_start_pfn(nid);
481                 end_pfn = node_end_pfn(nid);
482                 /*
483                  * start_pfn and end_pfn may not be aligned to SECTION and the
484                  * page->flags of out of node pages are not initialized.  So we
485                  * scan [start_pfn, the biggest section's pfn < end_pfn) here.
486                  */
487                 for (pfn = start_pfn; pfn < end_pfn;
488                         pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
489 
490                         if (!pfn_valid(pfn))
491                                 continue;
492                         /*
493                          * Nodes's pfns can be overlapping.
494                          * We know some arch can have a nodes layout such as
495                          * -------------pfn-------------->
496                          * N0 | N1 | N2 | N0 | N1 | N2|....
497                          */
498                         if (pfn_to_nid(pfn) != nid)
499                                 continue;
500                         if (init_section_page_ext(pfn, nid))
501                                 goto oom;
502                         cond_resched();
503                 }
504         }
505         hotplug_memory_notifier(page_ext_callback, DEFAULT_CALLBACK_PRI);
506         pr_info("allocated %ld bytes of page_ext\n", total_usage);
507         invoke_init_callbacks();
508         return;
509 
510 oom:
511         panic("Out of memory");
512 }
513 
514 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
515 {
516 }
517 
518 #endif
519 
520 /**
521  * page_ext_get() - Get the extended information for a page.
522  * @page: The page we're interested in.
523  *
524  * Ensures that the page_ext will remain valid until page_ext_put()
525  * is called.
526  *
527  * Return: NULL if no page_ext exists for this page.
528  * Context: Any context.  Caller may not sleep until they have called
529  * page_ext_put().
530  */
531 struct page_ext *page_ext_get(const struct page *page)
532 {
533         struct page_ext *page_ext;
534 
535         rcu_read_lock();
536         page_ext = lookup_page_ext(page);
537         if (!page_ext) {
538                 rcu_read_unlock();
539                 return NULL;
540         }
541 
542         return page_ext;
543 }
544 
545 /**
546  * page_ext_put() - Working with page extended information is done.
547  * @page_ext: Page extended information received from page_ext_get().
548  *
549  * The page extended information of the page may not be valid after this
550  * function is called.
551  *
552  * Return: None.
553  * Context: Any context with corresponding page_ext_get() is called.
554  */
555 void page_ext_put(struct page_ext *page_ext)
556 {
557         if (unlikely(!page_ext))
558                 return;
559 
560         rcu_read_unlock();
561 }
562 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php