1 // SPDX-License-Identifier: GPL-2.0-or-later !! 1 /* 2 !! 2 * Written by Kanoj Sarcar, SGI, Aug 1999 3 #include <linux/memblock.h> !! 3 */ 4 #include <linux/printk.h> !! 4 #include <linux/config.h> 5 #include <linux/numa.h> !! 5 #include <linux/kernel.h> 6 #include <linux/numa_memblks.h> !! 6 #include <linux/mm.h> >> 7 #include <linux/init.h> >> 8 #include <linux/bootmem.h> >> 9 #include <linux/mmzone.h> >> 10 #include <linux/spinlock.h> >> 11 >> 12 int numnodes = 1; /* Initialized for UMA platforms */ >> 13 >> 14 static bootmem_data_t contig_bootmem_data; >> 15 pg_data_t contig_page_data = { bdata: &contig_bootmem_data }; >> 16 >> 17 #ifndef CONFIG_DISCONTIGMEM >> 18 >> 19 /* >> 20 * This is meant to be invoked by platforms whose physical memory starts >> 21 * at a considerably higher value than 0. Examples are Super-H, ARM, m68k. >> 22 * Should be invoked with paramters (0, 0, unsigned long *[], start_paddr). >> 23 */ >> 24 void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap, >> 25 unsigned long *zones_size, unsigned long zone_start_paddr, >> 26 unsigned long *zholes_size) >> 27 { >> 28 free_area_init_core(0, &contig_page_data, &mem_map, zones_size, >> 29 zone_start_paddr, zholes_size, pmap); >> 30 } 7 31 8 struct pglist_data *node_data[MAX_NUMNODES]; !! 32 #endif /* !CONFIG_DISCONTIGMEM */ 9 EXPORT_SYMBOL(node_data); << 10 33 11 /* Allocate NODE_DATA for a node on the local !! 34 struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order) 12 void __init alloc_node_data(int nid) << 13 { 35 { 14 const size_t nd_size = roundup(sizeof( !! 36 #ifdef CONFIG_NUMA 15 u64 nd_pa; !! 37 return __alloc_pages(gfp_mask, order, NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK)); 16 void *nd; !! 38 #else 17 int tnid; !! 39 return alloc_pages(gfp_mask, order); >> 40 #endif >> 41 } >> 42 >> 43 #ifdef CONFIG_DISCONTIGMEM 18 44 19 /* Allocate node data. Try node-local !! 45 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 20 nd_pa = memblock_phys_alloc_try_nid(nd << 21 if (!nd_pa) << 22 panic("Cannot allocate %zu byt << 23 nd_size, nid); << 24 nd = __va(nd_pa); << 25 46 26 /* report and initialize */ !! 47 static spinlock_t node_lock = SPIN_LOCK_UNLOCKED; 27 pr_info("NODE_DATA(%d) allocated [mem !! 48 28 nd_pa, nd_pa + nd_size - 1); !! 49 void show_free_areas_node(pg_data_t *pgdat) 29 tnid = early_pfn_to_nid(nd_pa >> PAGE_ !! 50 { 30 if (tnid != nid) !! 51 unsigned long flags; 31 pr_info(" NODE_DATA(%d) on << 32 52 33 node_data[nid] = nd; !! 53 spin_lock_irqsave(&node_lock, flags); 34 memset(NODE_DATA(nid), 0, sizeof(pg_da !! 54 show_free_areas_core(pgdat); >> 55 spin_unlock_irqrestore(&node_lock, flags); 35 } 56 } 36 57 37 void __init alloc_offline_node_data(int nid) !! 58 /* >> 59 * Nodes can be initialized parallely, in no particular order. >> 60 */ >> 61 void __init free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap, >> 62 unsigned long *zones_size, unsigned long zone_start_paddr, >> 63 unsigned long *zholes_size) 38 { 64 { 39 pg_data_t *pgdat; !! 65 int i, size = 0; >> 66 struct page *discard; 40 67 41 pgdat = memblock_alloc(sizeof(*pgdat), !! 68 if (mem_map == (mem_map_t *)NULL) 42 if (!pgdat) !! 69 mem_map = (mem_map_t *)PAGE_OFFSET; 43 panic("Cannot allocate %zuB fo << 44 sizeof(*pgdat), nid); << 45 70 46 node_data[nid] = pgdat; !! 71 free_area_init_core(nid, pgdat, &discard, zones_size, zone_start_paddr, >> 72 zholes_size, pmap); >> 73 pgdat->node_id = nid; >> 74 >> 75 /* >> 76 * Get space for the valid bitmap. >> 77 */ >> 78 for (i = 0; i < MAX_NR_ZONES; i++) >> 79 size += zones_size[i]; >> 80 size = LONG_ALIGN((size + 7) >> 3); >> 81 pgdat->valid_addr_bitmap = (unsigned long *)alloc_bootmem_node(pgdat, size); >> 82 memset(pgdat->valid_addr_bitmap, 0, size); 47 } 83 } 48 84 49 /* Stub functions: */ !! 85 static struct page * alloc_pages_pgdat(pg_data_t *pgdat, unsigned int gfp_mask, 50 !! 86 unsigned int order) 51 #ifndef memory_add_physaddr_to_nid << 52 int memory_add_physaddr_to_nid(u64 start) << 53 { 87 { 54 pr_info_once("Unknown online node for !! 88 return __alloc_pages(gfp_mask, order, pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK)); 55 start); << 56 return 0; << 57 } 89 } 58 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); << 59 #endif << 60 90 61 #ifndef phys_to_target_node !! 91 /* 62 int phys_to_target_node(u64 start) !! 92 * This can be refined. Currently, tries to do round robin, instead >> 93 * should do concentratic circle search, starting from current node. >> 94 */ >> 95 struct page * _alloc_pages(unsigned int gfp_mask, unsigned int order) 63 { 96 { 64 pr_info_once("Unknown target node for !! 97 struct page *ret = 0; 65 start); !! 98 pg_data_t *start, *temp; 66 return 0; !! 99 #ifndef CONFIG_NUMA 67 } !! 100 unsigned long flags; 68 EXPORT_SYMBOL_GPL(phys_to_target_node); !! 101 static pg_data_t *next = 0; 69 #endif 102 #endif >> 103 >> 104 if (order >= MAX_ORDER) >> 105 return NULL; >> 106 #ifdef CONFIG_NUMA >> 107 temp = NODE_DATA(numa_node_id()); >> 108 #else >> 109 spin_lock_irqsave(&node_lock, flags); >> 110 if (!next) next = pgdat_list; >> 111 temp = next; >> 112 next = next->node_next; >> 113 spin_unlock_irqrestore(&node_lock, flags); >> 114 #endif >> 115 start = temp; >> 116 while (temp) { >> 117 if ((ret = alloc_pages_pgdat(temp, gfp_mask, order))) >> 118 return(ret); >> 119 temp = temp->node_next; >> 120 } >> 121 temp = pgdat_list; >> 122 while (temp != start) { >> 123 if ((ret = alloc_pages_pgdat(temp, gfp_mask, order))) >> 124 return(ret); >> 125 temp = temp->node_next; >> 126 } >> 127 return(0); >> 128 } >> 129 >> 130 #endif /* CONFIG_DISCONTIGMEM */ 70 131
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.