~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/topology.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * CPU/APIC topology
  4  *
  5  * The APIC IDs describe the system topology in multiple domain levels.
  6  * The CPUID topology parser provides the information which part of the
  7  * APIC ID is associated to the individual levels:
  8  *
  9  * [PACKAGE][DIEGRP][DIE][TILE][MODULE][CORE][THREAD]
 10  *
 11  * The root space contains the package (socket) IDs.
 12  *
 13  * Not enumerated levels consume 0 bits space, but conceptually they are
 14  * always represented. If e.g. only CORE and THREAD levels are enumerated
 15  * then the DIE, MODULE and TILE have the same physical ID as the PACKAGE.
 16  *
 17  * If SMT is not supported, then the THREAD domain is still used. It then
 18  * has the same physical ID as the CORE domain and is the only child of
 19  * the core domain.
 20  *
 21  * This allows a unified view on the system independent of the enumerated
 22  * domain levels without requiring any conditionals in the code.
 23  */
 24 #define pr_fmt(fmt) "CPU topo: " fmt
 25 #include <linux/cpu.h>
 26 
 27 #include <xen/xen.h>
 28 
 29 #include <asm/apic.h>
 30 #include <asm/hypervisor.h>
 31 #include <asm/io_apic.h>
 32 #include <asm/mpspec.h>
 33 #include <asm/smp.h>
 34 
 35 #include "cpu.h"
 36 
 37 /*
 38  * Map cpu index to physical APIC ID
 39  */
 40 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid, BAD_APICID);
 41 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, CPU_ACPIID_INVALID);
 42 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
 43 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
 44 
 45 /* Bitmap of physically present CPUs. */
 46 DECLARE_BITMAP(phys_cpu_present_map, MAX_LOCAL_APIC) __read_mostly;
 47 
 48 /* Used for CPU number allocation and parallel CPU bringup */
 49 u32 cpuid_to_apicid[] __ro_after_init = { [0 ... NR_CPUS - 1] = BAD_APICID, };
 50 
 51 /* Bitmaps to mark registered APICs at each topology domain */
 52 static struct { DECLARE_BITMAP(map, MAX_LOCAL_APIC); } apic_maps[TOPO_MAX_DOMAIN] __ro_after_init;
 53 
 54 /*
 55  * Keep track of assigned, disabled and rejected CPUs. Present assigned
 56  * with 1 as CPU #0 is reserved for the boot CPU.
 57  */
 58 static struct {
 59         unsigned int            nr_assigned_cpus;
 60         unsigned int            nr_disabled_cpus;
 61         unsigned int            nr_rejected_cpus;
 62         u32                     boot_cpu_apic_id;
 63         u32                     real_bsp_apic_id;
 64 } topo_info __ro_after_init = {
 65         .nr_assigned_cpus       = 1,
 66         .boot_cpu_apic_id       = BAD_APICID,
 67         .real_bsp_apic_id       = BAD_APICID,
 68 };
 69 
 70 #define domain_weight(_dom)     bitmap_weight(apic_maps[_dom].map, MAX_LOCAL_APIC)
 71 
 72 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
 73 {
 74         return phys_id == (u64)cpuid_to_apicid[cpu];
 75 }
 76 
 77 #ifdef CONFIG_SMP
 78 static void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid)
 79 {
 80         if (!(apicid & (__max_threads_per_core - 1)))
 81                 cpumask_set_cpu(cpu, &__cpu_primary_thread_mask);
 82 }
 83 #else
 84 static inline void cpu_mark_primary_thread(unsigned int cpu, unsigned int apicid) { }
 85 #endif
 86 
 87 /*
 88  * Convert the APIC ID to a domain level ID by masking out the low bits
 89  * below the domain level @dom.
 90  */
 91 static inline u32 topo_apicid(u32 apicid, enum x86_topology_domains dom)
 92 {
 93         if (dom == TOPO_SMT_DOMAIN)
 94                 return apicid;
 95         return apicid & (UINT_MAX << x86_topo_system.dom_shifts[dom - 1]);
 96 }
 97 
 98 static int topo_lookup_cpuid(u32 apic_id)
 99 {
100         int i;
101 
102         /* CPU# to APICID mapping is persistent once it is established */
103         for (i = 0; i < topo_info.nr_assigned_cpus; i++) {
104                 if (cpuid_to_apicid[i] == apic_id)
105                         return i;
106         }
107         return -ENODEV;
108 }
109 
110 static __init int topo_get_cpunr(u32 apic_id)
111 {
112         int cpu = topo_lookup_cpuid(apic_id);
113 
114         if (cpu >= 0)
115                 return cpu;
116 
117         return topo_info.nr_assigned_cpus++;
118 }
119 
120 static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
121 {
122 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
123         early_per_cpu(x86_cpu_to_apicid, cpu) = apic_id;
124         early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
125 #endif
126         set_cpu_present(cpu, true);
127 }
128 
129 static __init bool check_for_real_bsp(u32 apic_id)
130 {
131         bool is_bsp = false, has_apic_base = boot_cpu_data.x86 >= 6;
132         u64 msr;
133 
134         /*
135          * There is no real good way to detect whether this a kdump()
136          * kernel, but except on the Voyager SMP monstrosity which is not
137          * longer supported, the real BSP APIC ID is the first one which is
138          * enumerated by firmware. That allows to detect whether the boot
139          * CPU is the real BSP. If it is not, then do not register the APIC
140          * because sending INIT to the real BSP would reset the whole
141          * system.
142          *
143          * The first APIC ID which is enumerated by firmware is detectable
144          * because the boot CPU APIC ID is registered before that without
145          * invoking this code.
146          */
147         if (topo_info.real_bsp_apic_id != BAD_APICID)
148                 return false;
149 
150         /*
151          * Check whether the enumeration order is broken by evaluating the
152          * BSP bit in the APICBASE MSR. If the CPU does not have the
153          * APICBASE MSR then the BSP detection is not possible and the
154          * kernel must rely on the firmware enumeration order.
155          */
156         if (has_apic_base) {
157                 rdmsrl(MSR_IA32_APICBASE, msr);
158                 is_bsp = !!(msr & MSR_IA32_APICBASE_BSP);
159         }
160 
161         if (apic_id == topo_info.boot_cpu_apic_id) {
162                 /*
163                  * If the boot CPU has the APIC BSP bit set then the
164                  * firmware enumeration is agreeing. If the CPU does not
165                  * have the APICBASE MSR then the only choice is to trust
166                  * the enumeration order.
167                  */
168                 if (is_bsp || !has_apic_base) {
169                         topo_info.real_bsp_apic_id = apic_id;
170                         return false;
171                 }
172                 /*
173                  * If the boot APIC is enumerated first, but the APICBASE
174                  * MSR does not have the BSP bit set, then there is no way
175                  * to discover the real BSP here. Assume a crash kernel and
176                  * limit the number of CPUs to 1 as an INIT to the real BSP
177                  * would reset the machine.
178                  */
179                 pr_warn("Enumerated BSP APIC %x is not marked in APICBASE MSR\n", apic_id);
180                 pr_warn("Assuming crash kernel. Limiting to one CPU to prevent machine INIT\n");
181                 set_nr_cpu_ids(1);
182                 goto fwbug;
183         }
184 
185         pr_warn("Boot CPU APIC ID not the first enumerated APIC ID: %x != %x\n",
186                 topo_info.boot_cpu_apic_id, apic_id);
187 
188         if (is_bsp) {
189                 /*
190                  * The boot CPU has the APIC BSP bit set. Use it and complain
191                  * about the broken firmware enumeration.
192                  */
193                 topo_info.real_bsp_apic_id = topo_info.boot_cpu_apic_id;
194                 goto fwbug;
195         }
196 
197         pr_warn("Crash kernel detected. Disabling real BSP to prevent machine INIT\n");
198 
199         topo_info.real_bsp_apic_id = apic_id;
200         return true;
201 
202 fwbug:
203         pr_warn(FW_BUG "APIC enumeration order not specification compliant\n");
204         return false;
205 }
206 
207 static unsigned int topo_unit_count(u32 lvlid, enum x86_topology_domains at_level,
208                                     unsigned long *map)
209 {
210         unsigned int id, end, cnt = 0;
211 
212         /* Calculate the exclusive end */
213         end = lvlid + (1U << x86_topo_system.dom_shifts[at_level]);
214 
215         /* Unfortunately there is no bitmap_weight_range() */
216         for (id = find_next_bit(map, end, lvlid); id < end; id = find_next_bit(map, end, ++id))
217                 cnt++;
218         return cnt;
219 }
220 
221 static __init void topo_register_apic(u32 apic_id, u32 acpi_id, bool present)
222 {
223         int cpu, dom;
224 
225         if (present) {
226                 set_bit(apic_id, phys_cpu_present_map);
227 
228                 /*
229                  * Double registration is valid in case of the boot CPU
230                  * APIC because that is registered before the enumeration
231                  * of the APICs via firmware parsers or VM guest
232                  * mechanisms.
233                  */
234                 if (apic_id == topo_info.boot_cpu_apic_id)
235                         cpu = 0;
236                 else
237                         cpu = topo_get_cpunr(apic_id);
238 
239                 cpuid_to_apicid[cpu] = apic_id;
240                 topo_set_cpuids(cpu, apic_id, acpi_id);
241         } else {
242                 u32 pkgid = topo_apicid(apic_id, TOPO_PKG_DOMAIN);
243 
244                 /*
245                  * Check for present APICs in the same package when running
246                  * on bare metal. Allow the bogosity in a guest.
247                  */
248                 if (hypervisor_is_type(X86_HYPER_NATIVE) &&
249                     topo_unit_count(pkgid, TOPO_PKG_DOMAIN, phys_cpu_present_map)) {
250                         pr_info_once("Ignoring hot-pluggable APIC ID %x in present package.\n",
251                                      apic_id);
252                         topo_info.nr_rejected_cpus++;
253                         return;
254                 }
255 
256                 topo_info.nr_disabled_cpus++;
257         }
258 
259         /*
260          * Register present and possible CPUs in the domain
261          * maps. cpu_possible_map will be updated in
262          * topology_init_possible_cpus() after enumeration is done.
263          */
264         for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++)
265                 set_bit(topo_apicid(apic_id, dom), apic_maps[dom].map);
266 }
267 
268 /**
269  * topology_register_apic - Register an APIC in early topology maps
270  * @apic_id:    The APIC ID to set up
271  * @acpi_id:    The ACPI ID associated to the APIC
272  * @present:    True if the corresponding CPU is present
273  */
274 void __init topology_register_apic(u32 apic_id, u32 acpi_id, bool present)
275 {
276         if (apic_id >= MAX_LOCAL_APIC) {
277                 pr_err_once("APIC ID %x exceeds kernel limit of: %x\n", apic_id, MAX_LOCAL_APIC - 1);
278                 topo_info.nr_rejected_cpus++;
279                 return;
280         }
281 
282         if (check_for_real_bsp(apic_id)) {
283                 topo_info.nr_rejected_cpus++;
284                 return;
285         }
286 
287         /* CPU numbers exhausted? */
288         if (apic_id != topo_info.boot_cpu_apic_id && topo_info.nr_assigned_cpus >= nr_cpu_ids) {
289                 pr_warn_once("CPU limit of %d reached. Ignoring further CPUs\n", nr_cpu_ids);
290                 topo_info.nr_rejected_cpus++;
291                 return;
292         }
293 
294         topo_register_apic(apic_id, acpi_id, present);
295 }
296 
297 /**
298  * topology_register_boot_apic - Register the boot CPU APIC
299  * @apic_id:    The APIC ID to set up
300  *
301  * Separate so CPU #0 can be assigned
302  */
303 void __init topology_register_boot_apic(u32 apic_id)
304 {
305         WARN_ON_ONCE(topo_info.boot_cpu_apic_id != BAD_APICID);
306 
307         topo_info.boot_cpu_apic_id = apic_id;
308         topo_register_apic(apic_id, CPU_ACPIID_INVALID, true);
309 }
310 
311 /**
312  * topology_get_logical_id - Retrieve the logical ID at a given topology domain level
313  * @apicid:             The APIC ID for which to lookup the logical ID
314  * @at_level:           The topology domain level to use
315  *
316  * @apicid must be a full APIC ID, not the normalized variant. It's valid to have
317  * all bits below the domain level specified by @at_level to be clear. So both
318  * real APIC IDs and backshifted normalized APIC IDs work correctly.
319  *
320  * Returns:
321  *  - >= 0:     The requested logical ID
322  *  - -ERANGE:  @apicid is out of range
323  *  - -ENODEV:  @apicid is not registered
324  */
325 int topology_get_logical_id(u32 apicid, enum x86_topology_domains at_level)
326 {
327         /* Remove the bits below @at_level to get the proper level ID of @apicid */
328         unsigned int lvlid = topo_apicid(apicid, at_level);
329 
330         if (lvlid >= MAX_LOCAL_APIC)
331                 return -ERANGE;
332         if (!test_bit(lvlid, apic_maps[at_level].map))
333                 return -ENODEV;
334         /* Get the number of set bits before @lvlid. */
335         return bitmap_weight(apic_maps[at_level].map, lvlid);
336 }
337 EXPORT_SYMBOL_GPL(topology_get_logical_id);
338 
339 /**
340  * topology_unit_count - Retrieve the count of specified units at a given topology domain level
341  * @apicid:             The APIC ID which specifies the search range
342  * @which_units:        The domain level specifying the units to count
343  * @at_level:           The domain level at which @which_units have to be counted
344  *
345  * This returns the number of possible units according to the enumerated
346  * information.
347  *
348  * E.g. topology_count_units(apicid, TOPO_CORE_DOMAIN, TOPO_PKG_DOMAIN)
349  * counts the number of possible cores in the package to which @apicid
350  * belongs.
351  *
352  * @at_level must obviously be greater than @which_level to produce useful
353  * results.  If @at_level is equal to @which_units the result is
354  * unsurprisingly 1. If @at_level is less than @which_units the results
355  * is by definition undefined and the function returns 0.
356  */
357 unsigned int topology_unit_count(u32 apicid, enum x86_topology_domains which_units,
358                                  enum x86_topology_domains at_level)
359 {
360         /* Remove the bits below @at_level to get the proper level ID of @apicid */
361         unsigned int lvlid = topo_apicid(apicid, at_level);
362 
363         if (lvlid >= MAX_LOCAL_APIC)
364                 return 0;
365         if (!test_bit(lvlid, apic_maps[at_level].map))
366                 return 0;
367         if (which_units > at_level)
368                 return 0;
369         if (which_units == at_level)
370                 return 1;
371         return topo_unit_count(lvlid, at_level, apic_maps[which_units].map);
372 }
373 
374 #ifdef CONFIG_ACPI_HOTPLUG_CPU
375 /**
376  * topology_hotplug_apic - Handle a physical hotplugged APIC after boot
377  * @apic_id:    The APIC ID to set up
378  * @acpi_id:    The ACPI ID associated to the APIC
379  */
380 int topology_hotplug_apic(u32 apic_id, u32 acpi_id)
381 {
382         int cpu;
383 
384         if (apic_id >= MAX_LOCAL_APIC)
385                 return -EINVAL;
386 
387         /* Reject if the APIC ID was not registered during enumeration. */
388         if (!test_bit(apic_id, apic_maps[TOPO_SMT_DOMAIN].map))
389                 return -ENODEV;
390 
391         cpu = topo_lookup_cpuid(apic_id);
392         if (cpu < 0)
393                 return -ENOSPC;
394 
395         set_bit(apic_id, phys_cpu_present_map);
396         topo_set_cpuids(cpu, apic_id, acpi_id);
397         cpu_mark_primary_thread(cpu, apic_id);
398         return cpu;
399 }
400 
401 /**
402  * topology_hotunplug_apic - Remove a physical hotplugged APIC after boot
403  * @cpu:        The CPU number for which the APIC ID is removed
404  */
405 void topology_hotunplug_apic(unsigned int cpu)
406 {
407         u32 apic_id = cpuid_to_apicid[cpu];
408 
409         if (apic_id == BAD_APICID)
410                 return;
411 
412         per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
413         clear_bit(apic_id, phys_cpu_present_map);
414         set_cpu_present(cpu, false);
415 }
416 #endif
417 
418 #ifdef CONFIG_X86_LOCAL_APIC
419 static unsigned int max_possible_cpus __initdata = NR_CPUS;
420 
421 /**
422  * topology_apply_cmdline_limits_early - Apply topology command line limits early
423  *
424  * Ensure that command line limits are in effect before firmware parsing
425  * takes place.
426  */
427 void __init topology_apply_cmdline_limits_early(void)
428 {
429         unsigned int possible = nr_cpu_ids;
430 
431         /* 'maxcpus=0' 'nosmp' 'nolapic' 'disableapic' 'noapic' */
432         if (!setup_max_cpus || ioapic_is_disabled || apic_is_disabled)
433                 possible = 1;
434 
435         /* 'possible_cpus=N' */
436         possible = min_t(unsigned int, max_possible_cpus, possible);
437 
438         if (possible < nr_cpu_ids) {
439                 pr_info("Limiting to %u possible CPUs\n", possible);
440                 set_nr_cpu_ids(possible);
441         }
442 }
443 
444 static __init bool restrict_to_up(void)
445 {
446         if (!smp_found_config || ioapic_is_disabled)
447                 return true;
448         /*
449          * XEN PV is special as it does not advertise the local APIC
450          * properly, but provides a fake topology for it so that the
451          * infrastructure works. So don't apply the restrictions vs. APIC
452          * here.
453          */
454         if (xen_pv_domain())
455                 return false;
456 
457         return apic_is_disabled;
458 }
459 
460 void __init topology_init_possible_cpus(void)
461 {
462         unsigned int assigned = topo_info.nr_assigned_cpus;
463         unsigned int disabled = topo_info.nr_disabled_cpus;
464         unsigned int cnta, cntb, cpu, allowed = 1;
465         unsigned int total = assigned + disabled;
466         u32 apicid, firstid;
467 
468         /*
469          * If there was no APIC registered, then fake one so that the
470          * topology bitmap is populated. That ensures that the code below
471          * is valid and the various query interfaces can be used
472          * unconditionally. This does not affect the actual APIC code in
473          * any way because either the local APIC address has not been
474          * registered or the local APIC was disabled on the command line.
475          */
476         if (topo_info.boot_cpu_apic_id == BAD_APICID)
477                 topology_register_boot_apic(0);
478 
479         if (!restrict_to_up()) {
480                 if (WARN_ON_ONCE(assigned > nr_cpu_ids)) {
481                         disabled += assigned - nr_cpu_ids;
482                         assigned = nr_cpu_ids;
483                 }
484                 allowed = min_t(unsigned int, total, nr_cpu_ids);
485         }
486 
487         if (total > allowed)
488                 pr_warn("%u possible CPUs exceed the limit of %u\n", total, allowed);
489 
490         assigned = min_t(unsigned int, allowed, assigned);
491         disabled = allowed - assigned;
492 
493         topo_info.nr_assigned_cpus = assigned;
494         topo_info.nr_disabled_cpus = disabled;
495 
496         total_cpus = allowed;
497         set_nr_cpu_ids(allowed);
498 
499         cnta = domain_weight(TOPO_PKG_DOMAIN);
500         cntb = domain_weight(TOPO_DIE_DOMAIN);
501         __max_logical_packages = cnta;
502         __max_dies_per_package = 1U << (get_count_order(cntb) - get_count_order(cnta));
503 
504         pr_info("Max. logical packages: %3u\n", cnta);
505         pr_info("Max. logical dies:     %3u\n", cntb);
506         pr_info("Max. dies per package: %3u\n", __max_dies_per_package);
507 
508         cnta = domain_weight(TOPO_CORE_DOMAIN);
509         cntb = domain_weight(TOPO_SMT_DOMAIN);
510         /*
511          * Can't use order delta here as order(cnta) can be equal
512          * order(cntb) even if cnta != cntb.
513          */
514         __max_threads_per_core = DIV_ROUND_UP(cntb, cnta);
515         pr_info("Max. threads per core: %3u\n", __max_threads_per_core);
516 
517         firstid = find_first_bit(apic_maps[TOPO_SMT_DOMAIN].map, MAX_LOCAL_APIC);
518         __num_cores_per_package = topology_unit_count(firstid, TOPO_CORE_DOMAIN, TOPO_PKG_DOMAIN);
519         pr_info("Num. cores per package:   %3u\n", __num_cores_per_package);
520         __num_threads_per_package = topology_unit_count(firstid, TOPO_SMT_DOMAIN, TOPO_PKG_DOMAIN);
521         pr_info("Num. threads per package: %3u\n", __num_threads_per_package);
522 
523         pr_info("Allowing %u present CPUs plus %u hotplug CPUs\n", assigned, disabled);
524         if (topo_info.nr_rejected_cpus)
525                 pr_info("Rejected CPUs %u\n", topo_info.nr_rejected_cpus);
526 
527         init_cpu_present(cpumask_of(0));
528         init_cpu_possible(cpumask_of(0));
529 
530         /* Assign CPU numbers to non-present CPUs */
531         for (apicid = 0; disabled; disabled--, apicid++) {
532                 apicid = find_next_andnot_bit(apic_maps[TOPO_SMT_DOMAIN].map, phys_cpu_present_map,
533                                               MAX_LOCAL_APIC, apicid);
534                 if (apicid >= MAX_LOCAL_APIC)
535                         break;
536                 cpuid_to_apicid[topo_info.nr_assigned_cpus++] = apicid;
537         }
538 
539         for (cpu = 0; cpu < allowed; cpu++) {
540                 apicid = cpuid_to_apicid[cpu];
541 
542                 set_cpu_possible(cpu, true);
543 
544                 if (apicid == BAD_APICID)
545                         continue;
546 
547                 cpu_mark_primary_thread(cpu, apicid);
548                 set_cpu_present(cpu, test_bit(apicid, phys_cpu_present_map));
549         }
550 }
551 
552 /*
553  * Late SMP disable after sizing CPU masks when APIC/IOAPIC setup failed.
554  */
555 void __init topology_reset_possible_cpus_up(void)
556 {
557         init_cpu_present(cpumask_of(0));
558         init_cpu_possible(cpumask_of(0));
559 
560         bitmap_zero(phys_cpu_present_map, MAX_LOCAL_APIC);
561         if (topo_info.boot_cpu_apic_id != BAD_APICID)
562                 set_bit(topo_info.boot_cpu_apic_id, phys_cpu_present_map);
563 }
564 
565 static int __init setup_possible_cpus(char *str)
566 {
567         get_option(&str, &max_possible_cpus);
568         return 0;
569 }
570 early_param("possible_cpus", setup_possible_cpus);
571 #endif
572 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php