~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/numa.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * pSeries NUMA support
  4  *
  5  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  6  */
  7 #define pr_fmt(fmt) "numa: " fmt
  8 
  9 #include <linux/threads.h>
 10 #include <linux/memblock.h>
 11 #include <linux/init.h>
 12 #include <linux/mm.h>
 13 #include <linux/mmzone.h>
 14 #include <linux/export.h>
 15 #include <linux/nodemask.h>
 16 #include <linux/cpu.h>
 17 #include <linux/notifier.h>
 18 #include <linux/of.h>
 19 #include <linux/of_address.h>
 20 #include <linux/pfn.h>
 21 #include <linux/cpuset.h>
 22 #include <linux/node.h>
 23 #include <linux/stop_machine.h>
 24 #include <linux/proc_fs.h>
 25 #include <linux/seq_file.h>
 26 #include <linux/uaccess.h>
 27 #include <linux/slab.h>
 28 #include <asm/cputhreads.h>
 29 #include <asm/sparsemem.h>
 30 #include <asm/smp.h>
 31 #include <asm/topology.h>
 32 #include <asm/firmware.h>
 33 #include <asm/paca.h>
 34 #include <asm/hvcall.h>
 35 #include <asm/setup.h>
 36 #include <asm/vdso.h>
 37 #include <asm/vphn.h>
 38 #include <asm/drmem.h>
 39 
 40 static int numa_enabled = 1;
 41 
 42 static char *cmdline __initdata;
 43 
 44 int numa_cpu_lookup_table[NR_CPUS];
 45 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
 46 struct pglist_data *node_data[MAX_NUMNODES];
 47 
 48 EXPORT_SYMBOL(numa_cpu_lookup_table);
 49 EXPORT_SYMBOL(node_to_cpumask_map);
 50 EXPORT_SYMBOL(node_data);
 51 
 52 static int primary_domain_index;
 53 static int n_mem_addr_cells, n_mem_size_cells;
 54 
 55 #define FORM0_AFFINITY 0
 56 #define FORM1_AFFINITY 1
 57 #define FORM2_AFFINITY 2
 58 static int affinity_form;
 59 
 60 #define MAX_DISTANCE_REF_POINTS 4
 61 static int distance_ref_points_depth;
 62 static const __be32 *distance_ref_points;
 63 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
 64 static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
 65         [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
 66 };
 67 static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
 68 
 69 /*
 70  * Allocate node_to_cpumask_map based on number of available nodes
 71  * Requires node_possible_map to be valid.
 72  *
 73  * Note: cpumask_of_node() is not valid until after this is done.
 74  */
 75 static void __init setup_node_to_cpumask_map(void)
 76 {
 77         unsigned int node;
 78 
 79         /* setup nr_node_ids if not done yet */
 80         if (nr_node_ids == MAX_NUMNODES)
 81                 setup_nr_node_ids();
 82 
 83         /* allocate the map */
 84         for_each_node(node)
 85                 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
 86 
 87         /* cpumask_of_node() will now work */
 88         pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
 89 }
 90 
 91 static int __init fake_numa_create_new_node(unsigned long end_pfn,
 92                                                 unsigned int *nid)
 93 {
 94         unsigned long long mem;
 95         char *p = cmdline;
 96         static unsigned int fake_nid;
 97         static unsigned long long curr_boundary;
 98 
 99         /*
100          * Modify node id, iff we started creating NUMA nodes
101          * We want to continue from where we left of the last time
102          */
103         if (fake_nid)
104                 *nid = fake_nid;
105         /*
106          * In case there are no more arguments to parse, the
107          * node_id should be the same as the last fake node id
108          * (we've handled this above).
109          */
110         if (!p)
111                 return 0;
112 
113         mem = memparse(p, &p);
114         if (!mem)
115                 return 0;
116 
117         if (mem < curr_boundary)
118                 return 0;
119 
120         curr_boundary = mem;
121 
122         if ((end_pfn << PAGE_SHIFT) > mem) {
123                 /*
124                  * Skip commas and spaces
125                  */
126                 while (*p == ',' || *p == ' ' || *p == '\t')
127                         p++;
128 
129                 cmdline = p;
130                 fake_nid++;
131                 *nid = fake_nid;
132                 pr_debug("created new fake_node with id %d\n", fake_nid);
133                 return 1;
134         }
135         return 0;
136 }
137 
138 static void __init reset_numa_cpu_lookup_table(void)
139 {
140         unsigned int cpu;
141 
142         for_each_possible_cpu(cpu)
143                 numa_cpu_lookup_table[cpu] = -1;
144 }
145 
146 void map_cpu_to_node(int cpu, int node)
147 {
148         update_numa_cpu_lookup_table(cpu, node);
149 
150         if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
151                 pr_debug("adding cpu %d to node %d\n", cpu, node);
152                 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
153         }
154 }
155 
156 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
157 void unmap_cpu_from_node(unsigned long cpu)
158 {
159         int node = numa_cpu_lookup_table[cpu];
160 
161         if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
162                 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
163                 pr_debug("removing cpu %lu from node %d\n", cpu, node);
164         } else {
165                 pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
166         }
167 }
168 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
169 
170 static int __associativity_to_nid(const __be32 *associativity,
171                                   int max_array_sz)
172 {
173         int nid;
174         /*
175          * primary_domain_index is 1 based array index.
176          */
177         int index = primary_domain_index  - 1;
178 
179         if (!numa_enabled || index >= max_array_sz)
180                 return NUMA_NO_NODE;
181 
182         nid = of_read_number(&associativity[index], 1);
183 
184         /* POWER4 LPAR uses 0xffff as invalid node */
185         if (nid == 0xffff || nid >= nr_node_ids)
186                 nid = NUMA_NO_NODE;
187         return nid;
188 }
189 /*
190  * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
191  * info is found.
192  */
193 static int associativity_to_nid(const __be32 *associativity)
194 {
195         int array_sz = of_read_number(associativity, 1);
196 
197         /* Skip the first element in the associativity array */
198         return __associativity_to_nid((associativity + 1), array_sz);
199 }
200 
201 static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
202 {
203         int dist;
204         int node1, node2;
205 
206         node1 = associativity_to_nid(cpu1_assoc);
207         node2 = associativity_to_nid(cpu2_assoc);
208 
209         dist = numa_distance_table[node1][node2];
210         if (dist <= LOCAL_DISTANCE)
211                 return 0;
212         else if (dist <= REMOTE_DISTANCE)
213                 return 1;
214         else
215                 return 2;
216 }
217 
218 static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
219 {
220         int dist = 0;
221 
222         int i, index;
223 
224         for (i = 0; i < distance_ref_points_depth; i++) {
225                 index = be32_to_cpu(distance_ref_points[i]);
226                 if (cpu1_assoc[index] == cpu2_assoc[index])
227                         break;
228                 dist++;
229         }
230 
231         return dist;
232 }
233 
234 int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
235 {
236         /* We should not get called with FORM0 */
237         VM_WARN_ON(affinity_form == FORM0_AFFINITY);
238         if (affinity_form == FORM1_AFFINITY)
239                 return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
240         return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
241 }
242 
243 /* must hold reference to node during call */
244 static const __be32 *of_get_associativity(struct device_node *dev)
245 {
246         return of_get_property(dev, "ibm,associativity", NULL);
247 }
248 
249 int __node_distance(int a, int b)
250 {
251         int i;
252         int distance = LOCAL_DISTANCE;
253 
254         if (affinity_form == FORM2_AFFINITY)
255                 return numa_distance_table[a][b];
256         else if (affinity_form == FORM0_AFFINITY)
257                 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
258 
259         for (i = 0; i < distance_ref_points_depth; i++) {
260                 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
261                         break;
262 
263                 /* Double the distance for each NUMA level */
264                 distance *= 2;
265         }
266 
267         return distance;
268 }
269 EXPORT_SYMBOL(__node_distance);
270 
271 /* Returns the nid associated with the given device tree node,
272  * or -1 if not found.
273  */
274 static int of_node_to_nid_single(struct device_node *device)
275 {
276         int nid = NUMA_NO_NODE;
277         const __be32 *tmp;
278 
279         tmp = of_get_associativity(device);
280         if (tmp)
281                 nid = associativity_to_nid(tmp);
282         return nid;
283 }
284 
285 /* Walk the device tree upwards, looking for an associativity id */
286 int of_node_to_nid(struct device_node *device)
287 {
288         int nid = NUMA_NO_NODE;
289 
290         of_node_get(device);
291         while (device) {
292                 nid = of_node_to_nid_single(device);
293                 if (nid != -1)
294                         break;
295 
296                 device = of_get_next_parent(device);
297         }
298         of_node_put(device);
299 
300         return nid;
301 }
302 EXPORT_SYMBOL(of_node_to_nid);
303 
304 static void __initialize_form1_numa_distance(const __be32 *associativity,
305                                              int max_array_sz)
306 {
307         int i, nid;
308 
309         if (affinity_form != FORM1_AFFINITY)
310                 return;
311 
312         nid = __associativity_to_nid(associativity, max_array_sz);
313         if (nid != NUMA_NO_NODE) {
314                 for (i = 0; i < distance_ref_points_depth; i++) {
315                         const __be32 *entry;
316                         int index = be32_to_cpu(distance_ref_points[i]) - 1;
317 
318                         /*
319                          * broken hierarchy, return with broken distance table
320                          */
321                         if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
322                                 return;
323 
324                         entry = &associativity[index];
325                         distance_lookup_table[nid][i] = of_read_number(entry, 1);
326                 }
327         }
328 }
329 
330 static void initialize_form1_numa_distance(const __be32 *associativity)
331 {
332         int array_sz;
333 
334         array_sz = of_read_number(associativity, 1);
335         /* Skip the first element in the associativity array */
336         __initialize_form1_numa_distance(associativity + 1, array_sz);
337 }
338 
339 /*
340  * Used to update distance information w.r.t newly added node.
341  */
342 void update_numa_distance(struct device_node *node)
343 {
344         int nid;
345 
346         if (affinity_form == FORM0_AFFINITY)
347                 return;
348         else if (affinity_form == FORM1_AFFINITY) {
349                 const __be32 *associativity;
350 
351                 associativity = of_get_associativity(node);
352                 if (!associativity)
353                         return;
354 
355                 initialize_form1_numa_distance(associativity);
356                 return;
357         }
358 
359         /* FORM2 affinity  */
360         nid = of_node_to_nid_single(node);
361         if (nid == NUMA_NO_NODE)
362                 return;
363 
364         /*
365          * With FORM2 we expect NUMA distance of all possible NUMA
366          * nodes to be provided during boot.
367          */
368         WARN(numa_distance_table[nid][nid] == -1,
369              "NUMA distance details for node %d not provided\n", nid);
370 }
371 EXPORT_SYMBOL_GPL(update_numa_distance);
372 
373 /*
374  * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
375  * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
376  */
377 static void __init initialize_form2_numa_distance_lookup_table(void)
378 {
379         int i, j;
380         struct device_node *root;
381         const __u8 *form2_distances;
382         const __be32 *numa_lookup_index;
383         int form2_distances_length;
384         int max_numa_index, distance_index;
385 
386         if (firmware_has_feature(FW_FEATURE_OPAL))
387                 root = of_find_node_by_path("/ibm,opal");
388         else
389                 root = of_find_node_by_path("/rtas");
390         if (!root)
391                 root = of_find_node_by_path("/");
392 
393         numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
394         max_numa_index = of_read_number(&numa_lookup_index[0], 1);
395 
396         /* first element of the array is the size and is encode-int */
397         form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
398         form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
399         /* Skip the size which is encoded int */
400         form2_distances += sizeof(__be32);
401 
402         pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
403                  form2_distances_length, max_numa_index);
404 
405         for (i = 0; i < max_numa_index; i++)
406                 /* +1 skip the max_numa_index in the property */
407                 numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
408 
409 
410         if (form2_distances_length != max_numa_index * max_numa_index) {
411                 WARN(1, "Wrong NUMA distance information\n");
412                 form2_distances = NULL; // don't use it
413         }
414         distance_index = 0;
415         for (i = 0;  i < max_numa_index; i++) {
416                 for (j = 0; j < max_numa_index; j++) {
417                         int nodeA = numa_id_index_table[i];
418                         int nodeB = numa_id_index_table[j];
419                         int dist;
420 
421                         if (form2_distances)
422                                 dist = form2_distances[distance_index++];
423                         else if (nodeA == nodeB)
424                                 dist = LOCAL_DISTANCE;
425                         else
426                                 dist = REMOTE_DISTANCE;
427                         numa_distance_table[nodeA][nodeB] = dist;
428                         pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
429                 }
430         }
431 
432         of_node_put(root);
433 }
434 
435 static int __init find_primary_domain_index(void)
436 {
437         int index;
438         struct device_node *root;
439 
440         /*
441          * Check for which form of affinity.
442          */
443         if (firmware_has_feature(FW_FEATURE_OPAL)) {
444                 affinity_form = FORM1_AFFINITY;
445         } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
446                 pr_debug("Using form 2 affinity\n");
447                 affinity_form = FORM2_AFFINITY;
448         } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
449                 pr_debug("Using form 1 affinity\n");
450                 affinity_form = FORM1_AFFINITY;
451         } else
452                 affinity_form = FORM0_AFFINITY;
453 
454         if (firmware_has_feature(FW_FEATURE_OPAL))
455                 root = of_find_node_by_path("/ibm,opal");
456         else
457                 root = of_find_node_by_path("/rtas");
458         if (!root)
459                 root = of_find_node_by_path("/");
460 
461         /*
462          * This property is a set of 32-bit integers, each representing
463          * an index into the ibm,associativity nodes.
464          *
465          * With form 0 affinity the first integer is for an SMP configuration
466          * (should be all 0's) and the second is for a normal NUMA
467          * configuration. We have only one level of NUMA.
468          *
469          * With form 1 affinity the first integer is the most significant
470          * NUMA boundary and the following are progressively less significant
471          * boundaries. There can be more than one level of NUMA.
472          */
473         distance_ref_points = of_get_property(root,
474                                         "ibm,associativity-reference-points",
475                                         &distance_ref_points_depth);
476 
477         if (!distance_ref_points) {
478                 pr_debug("ibm,associativity-reference-points not found.\n");
479                 goto err;
480         }
481 
482         distance_ref_points_depth /= sizeof(int);
483         if (affinity_form == FORM0_AFFINITY) {
484                 if (distance_ref_points_depth < 2) {
485                         pr_warn("short ibm,associativity-reference-points\n");
486                         goto err;
487                 }
488 
489                 index = of_read_number(&distance_ref_points[1], 1);
490         } else {
491                 /*
492                  * Both FORM1 and FORM2 affinity find the primary domain details
493                  * at the same offset.
494                  */
495                 index = of_read_number(distance_ref_points, 1);
496         }
497         /*
498          * Warn and cap if the hardware supports more than
499          * MAX_DISTANCE_REF_POINTS domains.
500          */
501         if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
502                 pr_warn("distance array capped at %d entries\n",
503                         MAX_DISTANCE_REF_POINTS);
504                 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
505         }
506 
507         of_node_put(root);
508         return index;
509 
510 err:
511         of_node_put(root);
512         return -1;
513 }
514 
515 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
516 {
517         struct device_node *memory = NULL;
518 
519         memory = of_find_node_by_type(memory, "memory");
520         if (!memory)
521                 panic("numa.c: No memory nodes found!");
522 
523         *n_addr_cells = of_n_addr_cells(memory);
524         *n_size_cells = of_n_size_cells(memory);
525         of_node_put(memory);
526 }
527 
528 static unsigned long read_n_cells(int n, const __be32 **buf)
529 {
530         unsigned long result = 0;
531 
532         while (n--) {
533                 result = (result << 32) | of_read_number(*buf, 1);
534                 (*buf)++;
535         }
536         return result;
537 }
538 
539 struct assoc_arrays {
540         u32     n_arrays;
541         u32     array_sz;
542         const __be32 *arrays;
543 };
544 
545 /*
546  * Retrieve and validate the list of associativity arrays for drconf
547  * memory from the ibm,associativity-lookup-arrays property of the
548  * device tree..
549  *
550  * The layout of the ibm,associativity-lookup-arrays property is a number N
551  * indicating the number of associativity arrays, followed by a number M
552  * indicating the size of each associativity array, followed by a list
553  * of N associativity arrays.
554  */
555 static int of_get_assoc_arrays(struct assoc_arrays *aa)
556 {
557         struct device_node *memory;
558         const __be32 *prop;
559         u32 len;
560 
561         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
562         if (!memory)
563                 return -1;
564 
565         prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
566         if (!prop || len < 2 * sizeof(unsigned int)) {
567                 of_node_put(memory);
568                 return -1;
569         }
570 
571         aa->n_arrays = of_read_number(prop++, 1);
572         aa->array_sz = of_read_number(prop++, 1);
573 
574         of_node_put(memory);
575 
576         /* Now that we know the number of arrays and size of each array,
577          * revalidate the size of the property read in.
578          */
579         if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
580                 return -1;
581 
582         aa->arrays = prop;
583         return 0;
584 }
585 
586 static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
587 {
588         struct assoc_arrays aa = { .arrays = NULL };
589         int default_nid = NUMA_NO_NODE;
590         int nid = default_nid;
591         int rc, index;
592 
593         if ((primary_domain_index < 0) || !numa_enabled)
594                 return default_nid;
595 
596         rc = of_get_assoc_arrays(&aa);
597         if (rc)
598                 return default_nid;
599 
600         if (primary_domain_index <= aa.array_sz &&
601             !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
602                 const __be32 *associativity;
603 
604                 index = lmb->aa_index * aa.array_sz;
605                 associativity = &aa.arrays[index];
606                 nid = __associativity_to_nid(associativity, aa.array_sz);
607                 if (nid > 0 && affinity_form == FORM1_AFFINITY) {
608                         /*
609                          * lookup array associativity entries have
610                          * no length of the array as the first element.
611                          */
612                         __initialize_form1_numa_distance(associativity, aa.array_sz);
613                 }
614         }
615         return nid;
616 }
617 
618 /*
619  * This is like of_node_to_nid_single() for memory represented in the
620  * ibm,dynamic-reconfiguration-memory node.
621  */
622 int of_drconf_to_nid_single(struct drmem_lmb *lmb)
623 {
624         struct assoc_arrays aa = { .arrays = NULL };
625         int default_nid = NUMA_NO_NODE;
626         int nid = default_nid;
627         int rc, index;
628 
629         if ((primary_domain_index < 0) || !numa_enabled)
630                 return default_nid;
631 
632         rc = of_get_assoc_arrays(&aa);
633         if (rc)
634                 return default_nid;
635 
636         if (primary_domain_index <= aa.array_sz &&
637             !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
638                 const __be32 *associativity;
639 
640                 index = lmb->aa_index * aa.array_sz;
641                 associativity = &aa.arrays[index];
642                 nid = __associativity_to_nid(associativity, aa.array_sz);
643         }
644         return nid;
645 }
646 
647 #ifdef CONFIG_PPC_SPLPAR
648 
649 static int __vphn_get_associativity(long lcpu, __be32 *associativity)
650 {
651         long rc, hwid;
652 
653         /*
654          * On a shared lpar, device tree will not have node associativity.
655          * At this time lppaca, or its __old_status field may not be
656          * updated. Hence kernel cannot detect if its on a shared lpar. So
657          * request an explicit associativity irrespective of whether the
658          * lpar is shared or dedicated. Use the device tree property as a
659          * fallback. cpu_to_phys_id is only valid between
660          * smp_setup_cpu_maps() and smp_setup_pacas().
661          */
662         if (firmware_has_feature(FW_FEATURE_VPHN)) {
663                 if (cpu_to_phys_id)
664                         hwid = cpu_to_phys_id[lcpu];
665                 else
666                         hwid = get_hard_smp_processor_id(lcpu);
667 
668                 rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
669                 if (rc == H_SUCCESS)
670                         return 0;
671         }
672 
673         return -1;
674 }
675 
676 static int vphn_get_nid(long lcpu)
677 {
678         __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
679 
680 
681         if (!__vphn_get_associativity(lcpu, associativity))
682                 return associativity_to_nid(associativity);
683 
684         return NUMA_NO_NODE;
685 
686 }
687 #else
688 
689 static int __vphn_get_associativity(long lcpu, __be32 *associativity)
690 {
691         return -1;
692 }
693 
694 static int vphn_get_nid(long unused)
695 {
696         return NUMA_NO_NODE;
697 }
698 #endif  /* CONFIG_PPC_SPLPAR */
699 
700 /*
701  * Figure out to which domain a cpu belongs and stick it there.
702  * Return the id of the domain used.
703  */
704 static int numa_setup_cpu(unsigned long lcpu)
705 {
706         struct device_node *cpu;
707         int fcpu = cpu_first_thread_sibling(lcpu);
708         int nid = NUMA_NO_NODE;
709 
710         if (!cpu_present(lcpu)) {
711                 set_cpu_numa_node(lcpu, first_online_node);
712                 return first_online_node;
713         }
714 
715         /*
716          * If a valid cpu-to-node mapping is already available, use it
717          * directly instead of querying the firmware, since it represents
718          * the most recent mapping notified to us by the platform (eg: VPHN).
719          * Since cpu_to_node binding remains the same for all threads in the
720          * core. If a valid cpu-to-node mapping is already available, for
721          * the first thread in the core, use it.
722          */
723         nid = numa_cpu_lookup_table[fcpu];
724         if (nid >= 0) {
725                 map_cpu_to_node(lcpu, nid);
726                 return nid;
727         }
728 
729         nid = vphn_get_nid(lcpu);
730         if (nid != NUMA_NO_NODE)
731                 goto out_present;
732 
733         cpu = of_get_cpu_node(lcpu, NULL);
734 
735         if (!cpu) {
736                 WARN_ON(1);
737                 if (cpu_present(lcpu))
738                         goto out_present;
739                 else
740                         goto out;
741         }
742 
743         nid = of_node_to_nid_single(cpu);
744         of_node_put(cpu);
745 
746 out_present:
747         if (nid < 0 || !node_possible(nid))
748                 nid = first_online_node;
749 
750         /*
751          * Update for the first thread of the core. All threads of a core
752          * have to be part of the same node. This not only avoids querying
753          * for every other thread in the core, but always avoids a case
754          * where virtual node associativity change causes subsequent threads
755          * of a core to be associated with different nid. However if first
756          * thread is already online, expect it to have a valid mapping.
757          */
758         if (fcpu != lcpu) {
759                 WARN_ON(cpu_online(fcpu));
760                 map_cpu_to_node(fcpu, nid);
761         }
762 
763         map_cpu_to_node(lcpu, nid);
764 out:
765         return nid;
766 }
767 
768 static void verify_cpu_node_mapping(int cpu, int node)
769 {
770         int base, sibling, i;
771 
772         /* Verify that all the threads in the core belong to the same node */
773         base = cpu_first_thread_sibling(cpu);
774 
775         for (i = 0; i < threads_per_core; i++) {
776                 sibling = base + i;
777 
778                 if (sibling == cpu || cpu_is_offline(sibling))
779                         continue;
780 
781                 if (cpu_to_node(sibling) != node) {
782                         WARN(1, "CPU thread siblings %d and %d don't belong"
783                                 " to the same node!\n", cpu, sibling);
784                         break;
785                 }
786         }
787 }
788 
789 /* Must run before sched domains notifier. */
790 static int ppc_numa_cpu_prepare(unsigned int cpu)
791 {
792         int nid;
793 
794         nid = numa_setup_cpu(cpu);
795         verify_cpu_node_mapping(cpu, nid);
796         return 0;
797 }
798 
799 static int ppc_numa_cpu_dead(unsigned int cpu)
800 {
801         return 0;
802 }
803 
804 /*
805  * Check and possibly modify a memory region to enforce the memory limit.
806  *
807  * Returns the size the region should have to enforce the memory limit.
808  * This will either be the original value of size, a truncated value,
809  * or zero. If the returned value of size is 0 the region should be
810  * discarded as it lies wholly above the memory limit.
811  */
812 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
813                                                       unsigned long size)
814 {
815         /*
816          * We use memblock_end_of_DRAM() in here instead of memory_limit because
817          * we've already adjusted it for the limit and it takes care of
818          * having memory holes below the limit.  Also, in the case of
819          * iommu_is_off, memory_limit is not set but is implicitly enforced.
820          */
821 
822         if (start + size <= memblock_end_of_DRAM())
823                 return size;
824 
825         if (start >= memblock_end_of_DRAM())
826                 return 0;
827 
828         return memblock_end_of_DRAM() - start;
829 }
830 
831 /*
832  * Reads the counter for a given entry in
833  * linux,drconf-usable-memory property
834  */
835 static inline int __init read_usm_ranges(const __be32 **usm)
836 {
837         /*
838          * For each lmb in ibm,dynamic-memory a corresponding
839          * entry in linux,drconf-usable-memory property contains
840          * a counter followed by that many (base, size) duple.
841          * read the counter from linux,drconf-usable-memory
842          */
843         return read_n_cells(n_mem_size_cells, usm);
844 }
845 
846 /*
847  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
848  * node.  This assumes n_mem_{addr,size}_cells have been set.
849  */
850 static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
851                                         const __be32 **usm,
852                                         void *data)
853 {
854         unsigned int ranges, is_kexec_kdump = 0;
855         unsigned long base, size, sz;
856         int nid;
857 
858         /*
859          * Skip this block if the reserved bit is set in flags (0x80)
860          * or if the block is not assigned to this partition (0x8)
861          */
862         if ((lmb->flags & DRCONF_MEM_RESERVED)
863             || !(lmb->flags & DRCONF_MEM_ASSIGNED))
864                 return 0;
865 
866         if (*usm)
867                 is_kexec_kdump = 1;
868 
869         base = lmb->base_addr;
870         size = drmem_lmb_size();
871         ranges = 1;
872 
873         if (is_kexec_kdump) {
874                 ranges = read_usm_ranges(usm);
875                 if (!ranges) /* there are no (base, size) duple */
876                         return 0;
877         }
878 
879         do {
880                 if (is_kexec_kdump) {
881                         base = read_n_cells(n_mem_addr_cells, usm);
882                         size = read_n_cells(n_mem_size_cells, usm);
883                 }
884 
885                 nid = get_nid_and_numa_distance(lmb);
886                 fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
887                                           &nid);
888                 node_set_online(nid);
889                 sz = numa_enforce_memory_limit(base, size);
890                 if (sz)
891                         memblock_set_node(base, sz, &memblock.memory, nid);
892         } while (--ranges);
893 
894         return 0;
895 }
896 
897 static int __init parse_numa_properties(void)
898 {
899         struct device_node *memory, *pci;
900         int default_nid = 0;
901         unsigned long i;
902         const __be32 *associativity;
903 
904         if (numa_enabled == 0) {
905                 pr_warn("disabled by user\n");
906                 return -1;
907         }
908 
909         primary_domain_index = find_primary_domain_index();
910 
911         if (primary_domain_index < 0) {
912                 /*
913                  * if we fail to parse primary_domain_index from device tree
914                  * mark the numa disabled, boot with numa disabled.
915                  */
916                 numa_enabled = false;
917                 return primary_domain_index;
918         }
919 
920         pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
921 
922         /*
923          * If it is FORM2 initialize the distance table here.
924          */
925         if (affinity_form == FORM2_AFFINITY)
926                 initialize_form2_numa_distance_lookup_table();
927 
928         /*
929          * Even though we connect cpus to numa domains later in SMP
930          * init, we need to know the node ids now. This is because
931          * each node to be onlined must have NODE_DATA etc backing it.
932          */
933         for_each_present_cpu(i) {
934                 __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
935                 struct device_node *cpu;
936                 int nid = NUMA_NO_NODE;
937 
938                 memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
939 
940                 if (__vphn_get_associativity(i, vphn_assoc) == 0) {
941                         nid = associativity_to_nid(vphn_assoc);
942                         initialize_form1_numa_distance(vphn_assoc);
943                 } else {
944 
945                         /*
946                          * Don't fall back to default_nid yet -- we will plug
947                          * cpus into nodes once the memory scan has discovered
948                          * the topology.
949                          */
950                         cpu = of_get_cpu_node(i, NULL);
951                         BUG_ON(!cpu);
952 
953                         associativity = of_get_associativity(cpu);
954                         if (associativity) {
955                                 nid = associativity_to_nid(associativity);
956                                 initialize_form1_numa_distance(associativity);
957                         }
958                         of_node_put(cpu);
959                 }
960 
961                 /* node_set_online() is an UB if 'nid' is negative */
962                 if (likely(nid >= 0))
963                         node_set_online(nid);
964         }
965 
966         get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
967 
968         for_each_node_by_type(memory, "memory") {
969                 unsigned long start;
970                 unsigned long size;
971                 int nid;
972                 int ranges;
973                 const __be32 *memcell_buf;
974                 unsigned int len;
975 
976                 memcell_buf = of_get_property(memory,
977                         "linux,usable-memory", &len);
978                 if (!memcell_buf || len <= 0)
979                         memcell_buf = of_get_property(memory, "reg", &len);
980                 if (!memcell_buf || len <= 0)
981                         continue;
982 
983                 /* ranges in cell */
984                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
985 new_range:
986                 /* these are order-sensitive, and modify the buffer pointer */
987                 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
988                 size = read_n_cells(n_mem_size_cells, &memcell_buf);
989 
990                 /*
991                  * Assumption: either all memory nodes or none will
992                  * have associativity properties.  If none, then
993                  * everything goes to default_nid.
994                  */
995                 associativity = of_get_associativity(memory);
996                 if (associativity) {
997                         nid = associativity_to_nid(associativity);
998                         initialize_form1_numa_distance(associativity);
999                 } else
1000                         nid = default_nid;
1001 
1002                 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
1003                 node_set_online(nid);
1004 
1005                 size = numa_enforce_memory_limit(start, size);
1006                 if (size)
1007                         memblock_set_node(start, size, &memblock.memory, nid);
1008 
1009                 if (--ranges)
1010                         goto new_range;
1011         }
1012 
1013         for_each_node_by_name(pci, "pci") {
1014                 int nid = NUMA_NO_NODE;
1015 
1016                 associativity = of_get_associativity(pci);
1017                 if (associativity) {
1018                         nid = associativity_to_nid(associativity);
1019                         initialize_form1_numa_distance(associativity);
1020                 }
1021                 if (likely(nid >= 0) && !node_online(nid))
1022                         node_set_online(nid);
1023         }
1024 
1025         /*
1026          * Now do the same thing for each MEMBLOCK listed in the
1027          * ibm,dynamic-memory property in the
1028          * ibm,dynamic-reconfiguration-memory node.
1029          */
1030         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1031         if (memory) {
1032                 walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
1033                 of_node_put(memory);
1034         }
1035 
1036         return 0;
1037 }
1038 
1039 static void __init setup_nonnuma(void)
1040 {
1041         unsigned long top_of_ram = memblock_end_of_DRAM();
1042         unsigned long total_ram = memblock_phys_mem_size();
1043         unsigned long start_pfn, end_pfn;
1044         unsigned int nid = 0;
1045         int i;
1046 
1047         pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
1048         pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
1049 
1050         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
1051                 fake_numa_create_new_node(end_pfn, &nid);
1052                 memblock_set_node(PFN_PHYS(start_pfn),
1053                                   PFN_PHYS(end_pfn - start_pfn),
1054                                   &memblock.memory, nid);
1055                 node_set_online(nid);
1056         }
1057 }
1058 
1059 void __init dump_numa_cpu_topology(void)
1060 {
1061         unsigned int node;
1062         unsigned int cpu, count;
1063 
1064         if (!numa_enabled)
1065                 return;
1066 
1067         for_each_online_node(node) {
1068                 pr_info("Node %d CPUs:", node);
1069 
1070                 count = 0;
1071                 /*
1072                  * If we used a CPU iterator here we would miss printing
1073                  * the holes in the cpumap.
1074                  */
1075                 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
1076                         if (cpumask_test_cpu(cpu,
1077                                         node_to_cpumask_map[node])) {
1078                                 if (count == 0)
1079                                         pr_cont(" %u", cpu);
1080                                 ++count;
1081                         } else {
1082                                 if (count > 1)
1083                                         pr_cont("-%u", cpu - 1);
1084                                 count = 0;
1085                         }
1086                 }
1087 
1088                 if (count > 1)
1089                         pr_cont("-%u", nr_cpu_ids - 1);
1090                 pr_cont("\n");
1091         }
1092 }
1093 
1094 /* Initialize NODE_DATA for a node on the local memory */
1095 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
1096 {
1097         u64 spanned_pages = end_pfn - start_pfn;
1098         const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
1099         u64 nd_pa;
1100         void *nd;
1101         int tnid;
1102 
1103         nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1104         if (!nd_pa)
1105                 panic("Cannot allocate %zu bytes for node %d data\n",
1106                       nd_size, nid);
1107 
1108         nd = __va(nd_pa);
1109 
1110         /* report and initialize */
1111         pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
1112                 nd_pa, nd_pa + nd_size - 1);
1113         tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1114         if (tnid != nid)
1115                 pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
1116 
1117         node_data[nid] = nd;
1118         memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1119         NODE_DATA(nid)->node_id = nid;
1120         NODE_DATA(nid)->node_start_pfn = start_pfn;
1121         NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1122 }
1123 
1124 static void __init find_possible_nodes(void)
1125 {
1126         struct device_node *rtas, *root;
1127         const __be32 *domains = NULL;
1128         int prop_length, max_nodes;
1129         u32 i;
1130 
1131         if (!numa_enabled)
1132                 return;
1133 
1134         rtas = of_find_node_by_path("/rtas");
1135         if (!rtas)
1136                 return;
1137 
1138         /*
1139          * ibm,current-associativity-domains is a fairly recent property. If
1140          * it doesn't exist, then fallback on ibm,max-associativity-domains.
1141          * Current denotes what the platform can support compared to max
1142          * which denotes what the Hypervisor can support.
1143          *
1144          * If the LPAR is migratable, new nodes might be activated after a LPM,
1145          * so we should consider the max number in that case.
1146          */
1147         root = of_find_node_by_path("/");
1148         if (!of_get_property(root, "ibm,migratable-partition", NULL))
1149                 domains = of_get_property(rtas,
1150                                           "ibm,current-associativity-domains",
1151                                           &prop_length);
1152         of_node_put(root);
1153         if (!domains) {
1154                 domains = of_get_property(rtas, "ibm,max-associativity-domains",
1155                                         &prop_length);
1156                 if (!domains)
1157                         goto out;
1158         }
1159 
1160         max_nodes = of_read_number(&domains[primary_domain_index], 1);
1161         pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
1162 
1163         for (i = 0; i < max_nodes; i++) {
1164                 if (!node_possible(i))
1165                         node_set(i, node_possible_map);
1166         }
1167 
1168         prop_length /= sizeof(int);
1169         if (prop_length > primary_domain_index + 2)
1170                 coregroup_enabled = 1;
1171 
1172 out:
1173         of_node_put(rtas);
1174 }
1175 
1176 void __init mem_topology_setup(void)
1177 {
1178         int cpu;
1179 
1180         max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1181         min_low_pfn = MEMORY_START >> PAGE_SHIFT;
1182 
1183         /*
1184          * Linux/mm assumes node 0 to be online at boot. However this is not
1185          * true on PowerPC, where node 0 is similar to any other node, it
1186          * could be cpuless, memoryless node. So force node 0 to be offline
1187          * for now. This will prevent cpuless, memoryless node 0 showing up
1188          * unnecessarily as online. If a node has cpus or memory that need
1189          * to be online, then node will anyway be marked online.
1190          */
1191         node_set_offline(0);
1192 
1193         if (parse_numa_properties())
1194                 setup_nonnuma();
1195 
1196         /*
1197          * Modify the set of possible NUMA nodes to reflect information
1198          * available about the set of online nodes, and the set of nodes
1199          * that we expect to make use of for this platform's affinity
1200          * calculations.
1201          */
1202         nodes_and(node_possible_map, node_possible_map, node_online_map);
1203 
1204         find_possible_nodes();
1205 
1206         setup_node_to_cpumask_map();
1207 
1208         reset_numa_cpu_lookup_table();
1209 
1210         for_each_possible_cpu(cpu) {
1211                 /*
1212                  * Powerpc with CONFIG_NUMA always used to have a node 0,
1213                  * even if it was memoryless or cpuless. For all cpus that
1214                  * are possible but not present, cpu_to_node() would point
1215                  * to node 0. To remove a cpuless, memoryless dummy node,
1216                  * powerpc need to make sure all possible but not present
1217                  * cpu_to_node are set to a proper node.
1218                  */
1219                 numa_setup_cpu(cpu);
1220         }
1221 }
1222 
1223 void __init initmem_init(void)
1224 {
1225         int nid;
1226 
1227         memblock_dump_all();
1228 
1229         for_each_online_node(nid) {
1230                 unsigned long start_pfn, end_pfn;
1231 
1232                 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1233                 setup_node_data(nid, start_pfn, end_pfn);
1234         }
1235 
1236         sparse_init();
1237 
1238         /*
1239          * We need the numa_cpu_lookup_table to be accurate for all CPUs,
1240          * even before we online them, so that we can use cpu_to_{node,mem}
1241          * early in boot, cf. smp_prepare_cpus().
1242          * _nocalls() + manual invocation is used because cpuhp is not yet
1243          * initialized for the boot CPU.
1244          */
1245         cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
1246                                   ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
1247 }
1248 
1249 static int __init early_numa(char *p)
1250 {
1251         if (!p)
1252                 return 0;
1253 
1254         if (strstr(p, "off"))
1255                 numa_enabled = 0;
1256 
1257         p = strstr(p, "fake=");
1258         if (p)
1259                 cmdline = p + strlen("fake=");
1260 
1261         return 0;
1262 }
1263 early_param("numa", early_numa);
1264 
1265 #ifdef CONFIG_MEMORY_HOTPLUG
1266 /*
1267  * Find the node associated with a hot added memory section for
1268  * memory represented in the device tree by the property
1269  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1270  */
1271 static int hot_add_drconf_scn_to_nid(unsigned long scn_addr)
1272 {
1273         struct drmem_lmb *lmb;
1274         unsigned long lmb_size;
1275         int nid = NUMA_NO_NODE;
1276 
1277         lmb_size = drmem_lmb_size();
1278 
1279         for_each_drmem_lmb(lmb) {
1280                 /* skip this block if it is reserved or not assigned to
1281                  * this partition */
1282                 if ((lmb->flags & DRCONF_MEM_RESERVED)
1283                     || !(lmb->flags & DRCONF_MEM_ASSIGNED))
1284                         continue;
1285 
1286                 if ((scn_addr < lmb->base_addr)
1287                     || (scn_addr >= (lmb->base_addr + lmb_size)))
1288                         continue;
1289 
1290                 nid = of_drconf_to_nid_single(lmb);
1291                 break;
1292         }
1293 
1294         return nid;
1295 }
1296 
1297 /*
1298  * Find the node associated with a hot added memory section for memory
1299  * represented in the device tree as a node (i.e. memory@XXXX) for
1300  * each memblock.
1301  */
1302 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1303 {
1304         struct device_node *memory;
1305         int nid = NUMA_NO_NODE;
1306 
1307         for_each_node_by_type(memory, "memory") {
1308                 int i = 0;
1309 
1310                 while (1) {
1311                         struct resource res;
1312 
1313                         if (of_address_to_resource(memory, i++, &res))
1314                                 break;
1315 
1316                         if ((scn_addr < res.start) || (scn_addr > res.end))
1317                                 continue;
1318 
1319                         nid = of_node_to_nid_single(memory);
1320                         break;
1321                 }
1322 
1323                 if (nid >= 0)
1324                         break;
1325         }
1326 
1327         of_node_put(memory);
1328 
1329         return nid;
1330 }
1331 
1332 /*
1333  * Find the node associated with a hot added memory section.  Section
1334  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1335  * sections are fully contained within a single MEMBLOCK.
1336  */
1337 int hot_add_scn_to_nid(unsigned long scn_addr)
1338 {
1339         struct device_node *memory = NULL;
1340         int nid;
1341 
1342         if (!numa_enabled)
1343                 return first_online_node;
1344 
1345         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1346         if (memory) {
1347                 nid = hot_add_drconf_scn_to_nid(scn_addr);
1348                 of_node_put(memory);
1349         } else {
1350                 nid = hot_add_node_scn_to_nid(scn_addr);
1351         }
1352 
1353         if (nid < 0 || !node_possible(nid))
1354                 nid = first_online_node;
1355 
1356         return nid;
1357 }
1358 
1359 static u64 hot_add_drconf_memory_max(void)
1360 {
1361         struct device_node *memory = NULL;
1362         struct device_node *dn = NULL;
1363         const __be64 *lrdr = NULL;
1364 
1365         dn = of_find_node_by_path("/rtas");
1366         if (dn) {
1367                 lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
1368                 of_node_put(dn);
1369                 if (lrdr)
1370                         return be64_to_cpup(lrdr);
1371         }
1372 
1373         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1374         if (memory) {
1375                 of_node_put(memory);
1376                 return drmem_lmb_memory_max();
1377         }
1378         return 0;
1379 }
1380 
1381 /*
1382  * memory_hotplug_max - return max address of memory that may be added
1383  *
1384  * This is currently only used on systems that support drconfig memory
1385  * hotplug.
1386  */
1387 u64 memory_hotplug_max(void)
1388 {
1389         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1390 }
1391 #endif /* CONFIG_MEMORY_HOTPLUG */
1392 
1393 /* Virtual Processor Home Node (VPHN) support */
1394 #ifdef CONFIG_PPC_SPLPAR
1395 static int topology_inited;
1396 
1397 /*
1398  * Retrieve the new associativity information for a virtual processor's
1399  * home node.
1400  */
1401 static long vphn_get_associativity(unsigned long cpu,
1402                                         __be32 *associativity)
1403 {
1404         long rc;
1405 
1406         rc = hcall_vphn(get_hard_smp_processor_id(cpu),
1407                                 VPHN_FLAG_VCPU, associativity);
1408 
1409         switch (rc) {
1410         case H_SUCCESS:
1411                 pr_debug("VPHN hcall succeeded. Reset polling...\n");
1412                 goto out;
1413 
1414         case H_FUNCTION:
1415                 pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
1416                 break;
1417         case H_HARDWARE:
1418                 pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
1419                         "preventing VPHN. Disabling polling...\n");
1420                 break;
1421         case H_PARAMETER:
1422                 pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
1423                         "Disabling polling...\n");
1424                 break;
1425         default:
1426                 pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
1427                         , rc);
1428                 break;
1429         }
1430 out:
1431         return rc;
1432 }
1433 
1434 void find_and_update_cpu_nid(int cpu)
1435 {
1436         __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1437         int new_nid;
1438 
1439         /* Use associativity from first thread for all siblings */
1440         if (vphn_get_associativity(cpu, associativity))
1441                 return;
1442 
1443         /* Do not have previous associativity, so find it now. */
1444         new_nid = associativity_to_nid(associativity);
1445 
1446         if (new_nid < 0 || !node_possible(new_nid))
1447                 new_nid = first_online_node;
1448         else
1449                 // Associate node <-> cpu, so cpu_up() calls
1450                 // try_online_node() on the right node.
1451                 set_cpu_numa_node(cpu, new_nid);
1452 
1453         pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
1454 }
1455 
1456 int cpu_to_coregroup_id(int cpu)
1457 {
1458         __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1459         int index;
1460 
1461         if (cpu < 0 || cpu > nr_cpu_ids)
1462                 return -1;
1463 
1464         if (!coregroup_enabled)
1465                 goto out;
1466 
1467         if (!firmware_has_feature(FW_FEATURE_VPHN))
1468                 goto out;
1469 
1470         if (vphn_get_associativity(cpu, associativity))
1471                 goto out;
1472 
1473         index = of_read_number(associativity, 1);
1474         if (index > primary_domain_index + 1)
1475                 return of_read_number(&associativity[index - 1], 1);
1476 
1477 out:
1478         return cpu_to_core_id(cpu);
1479 }
1480 
1481 static int topology_update_init(void)
1482 {
1483         topology_inited = 1;
1484         return 0;
1485 }
1486 device_initcall(topology_update_init);
1487 #endif /* CONFIG_PPC_SPLPAR */
1488 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php