~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/cpu.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/cpu.c (Version linux-6.12-rc7) and /kernel/cpu.c (Version linux-3.10.108)


  1 /* CPU control.                                     1 /* CPU control.
  2  * (C) 2001, 2002, 2003, 2004 Rusty Russell         2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3  *                                                  3  *
  4  * This code is licenced under the GPL.             4  * This code is licenced under the GPL.
  5  */                                                 5  */
  6 #include <linux/sched/mm.h>                    << 
  7 #include <linux/proc_fs.h>                          6 #include <linux/proc_fs.h>
  8 #include <linux/smp.h>                              7 #include <linux/smp.h>
  9 #include <linux/init.h>                             8 #include <linux/init.h>
 10 #include <linux/notifier.h>                         9 #include <linux/notifier.h>
 11 #include <linux/sched/signal.h>                !!  10 #include <linux/sched.h>
 12 #include <linux/sched/hotplug.h>               << 
 13 #include <linux/sched/isolation.h>             << 
 14 #include <linux/sched/task.h>                  << 
 15 #include <linux/sched/smt.h>                   << 
 16 #include <linux/unistd.h>                          11 #include <linux/unistd.h>
 17 #include <linux/cpu.h>                             12 #include <linux/cpu.h>
 18 #include <linux/oom.h>                             13 #include <linux/oom.h>
 19 #include <linux/rcupdate.h>                        14 #include <linux/rcupdate.h>
 20 #include <linux/delay.h>                       << 
 21 #include <linux/export.h>                          15 #include <linux/export.h>
 22 #include <linux/bug.h>                             16 #include <linux/bug.h>
 23 #include <linux/kthread.h>                         17 #include <linux/kthread.h>
 24 #include <linux/stop_machine.h>                    18 #include <linux/stop_machine.h>
 25 #include <linux/mutex.h>                           19 #include <linux/mutex.h>
 26 #include <linux/gfp.h>                             20 #include <linux/gfp.h>
 27 #include <linux/suspend.h>                         21 #include <linux/suspend.h>
 28 #include <linux/lockdep.h>                     << 
 29 #include <linux/tick.h>                        << 
 30 #include <linux/irq.h>                         << 
 31 #include <linux/nmi.h>                         << 
 32 #include <linux/smpboot.h>                     << 
 33 #include <linux/relay.h>                       << 
 34 #include <linux/slab.h>                        << 
 35 #include <linux/scs.h>                         << 
 36 #include <linux/percpu-rwsem.h>                << 
 37 #include <linux/cpuset.h>                      << 
 38 #include <linux/random.h>                      << 
 39 #include <linux/cc_platform.h>                 << 
 40                                                << 
 41 #include <trace/events/power.h>                << 
 42 #define CREATE_TRACE_POINTS                    << 
 43 #include <trace/events/cpuhp.h>                << 
 44                                                    22 
 45 #include "smpboot.h"                               23 #include "smpboot.h"
 46                                                    24 
 47 /**                                            << 
 48  * struct cpuhp_cpu_state - Per cpu hotplug st << 
 49  * @state:      The current cpu state          << 
 50  * @target:     The target state               << 
 51  * @fail:       Current CPU hotplug callback s << 
 52  * @thread:     Pointer to the hotplug thread  << 
 53  * @should_run: Thread should execute          << 
 54  * @rollback:   Perform a rollback             << 
 55  * @single:     Single callback invocation     << 
 56  * @bringup:    Single callback bringup or tea << 
 57  * @node:       Remote CPU node; for multi-ins << 
 58  *              single entry callback for inst << 
 59  * @last:       For multi-instance rollback, r << 
 60  * @cb_state:   The state for a single callbac << 
 61  * @result:     Result of the operation        << 
 62  * @ap_sync_state:      State for AP synchroni << 
 63  * @done_up:    Signal completion to the issue << 
 64  * @done_down:  Signal completion to the issue << 
 65  */                                            << 
 66 struct cpuhp_cpu_state {                       << 
 67         enum cpuhp_state        state;         << 
 68         enum cpuhp_state        target;        << 
 69         enum cpuhp_state        fail;          << 
 70 #ifdef CONFIG_SMP                                  25 #ifdef CONFIG_SMP
 71         struct task_struct      *thread;       << 
 72         bool                    should_run;    << 
 73         bool                    rollback;      << 
 74         bool                    single;        << 
 75         bool                    bringup;       << 
 76         struct hlist_node       *node;         << 
 77         struct hlist_node       *last;         << 
 78         enum cpuhp_state        cb_state;      << 
 79         int                     result;        << 
 80         atomic_t                ap_sync_state; << 
 81         struct completion       done_up;       << 
 82         struct completion       done_down;     << 
 83 #endif                                         << 
 84 };                                             << 
 85                                                << 
 86 static DEFINE_PER_CPU(struct cpuhp_cpu_state,  << 
 87         .fail = CPUHP_INVALID,                 << 
 88 };                                             << 
 89                                                << 
 90 #ifdef CONFIG_SMP                              << 
 91 cpumask_t cpus_booted_once_mask;               << 
 92 #endif                                         << 
 93                                                << 
 94 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_ << 
 95 static struct lockdep_map cpuhp_state_up_map = << 
 96         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-u << 
 97 static struct lockdep_map cpuhp_state_down_map << 
 98         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-d << 
 99                                                << 
100                                                << 
101 static inline void cpuhp_lock_acquire(bool bri << 
102 {                                              << 
103         lock_map_acquire(bringup ? &cpuhp_stat << 
104 }                                              << 
105                                                << 
106 static inline void cpuhp_lock_release(bool bri << 
107 {                                              << 
108         lock_map_release(bringup ? &cpuhp_stat << 
109 }                                              << 
110 #else                                          << 
111                                                << 
112 static inline void cpuhp_lock_acquire(bool bri << 
113 static inline void cpuhp_lock_release(bool bri << 
114                                                << 
115 #endif                                         << 
116                                                << 
117 /**                                            << 
118  * struct cpuhp_step - Hotplug state machine s << 
119  * @name:       Name of the step               << 
120  * @startup:    Startup function of the step   << 
121  * @teardown:   Teardown function of the step  << 
122  * @cant_stop:  Bringup/teardown can't be stop << 
123  * @multi_instance:     State has multiple ins << 
124  */                                            << 
125 struct cpuhp_step {                            << 
126         const char              *name;         << 
127         union {                                << 
128                 int             (*single)(unsi << 
129                 int             (*multi)(unsig << 
130                                          struc << 
131         } startup;                             << 
132         union {                                << 
133                 int             (*single)(unsi << 
134                 int             (*multi)(unsig << 
135                                          struc << 
136         } teardown;                            << 
137         /* private: */                         << 
138         struct hlist_head       list;          << 
139         /* public: */                          << 
140         bool                    cant_stop;     << 
141         bool                    multi_instance << 
142 };                                             << 
143                                                << 
144 static DEFINE_MUTEX(cpuhp_state_mutex);        << 
145 static struct cpuhp_step cpuhp_hp_states[];    << 
146                                                << 
147 static struct cpuhp_step *cpuhp_get_step(enum  << 
148 {                                              << 
149         return cpuhp_hp_states + state;        << 
150 }                                              << 
151                                                << 
152 static bool cpuhp_step_empty(bool bringup, str << 
153 {                                              << 
154         return bringup ? !step->startup.single << 
155 }                                              << 
156                                                << 
157 /**                                            << 
158  * cpuhp_invoke_callback - Invoke the callback << 
159  * @cpu:        The cpu for which the callback << 
160  * @state:      The state to do callbacks for  << 
161  * @bringup:    True if the bringup callback s << 
162  * @node:       For multi-instance, do a singl << 
163  * @lastp:      For multi-instance rollback, r << 
164  *                                             << 
165  * Called from cpu hotplug and from the state  << 
166  *                                             << 
167  * Return: %0 on success or a negative errno c << 
168  */                                            << 
169 static int cpuhp_invoke_callback(unsigned int  << 
170                                  bool bringup, << 
171                                  struct hlist_ << 
172 {                                              << 
173         struct cpuhp_cpu_state *st = per_cpu_p << 
174         struct cpuhp_step *step = cpuhp_get_st << 
175         int (*cbm)(unsigned int cpu, struct hl << 
176         int (*cb)(unsigned int cpu);           << 
177         int ret, cnt;                          << 
178                                                << 
179         if (st->fail == state) {               << 
180                 st->fail = CPUHP_INVALID;      << 
181                 return -EAGAIN;                << 
182         }                                      << 
183                                                << 
184         if (cpuhp_step_empty(bringup, step)) { << 
185                 WARN_ON_ONCE(1);               << 
186                 return 0;                      << 
187         }                                      << 
188                                                << 
189         if (!step->multi_instance) {           << 
190                 WARN_ON_ONCE(lastp && *lastp); << 
191                 cb = bringup ? step->startup.s << 
192                                                << 
193                 trace_cpuhp_enter(cpu, st->tar << 
194                 ret = cb(cpu);                 << 
195                 trace_cpuhp_exit(cpu, st->stat << 
196                 return ret;                    << 
197         }                                      << 
198         cbm = bringup ? step->startup.multi :  << 
199                                                << 
200         /* Single invocation for instance add/ << 
201         if (node) {                            << 
202                 WARN_ON_ONCE(lastp && *lastp); << 
203                 trace_cpuhp_multi_enter(cpu, s << 
204                 ret = cbm(cpu, node);          << 
205                 trace_cpuhp_exit(cpu, st->stat << 
206                 return ret;                    << 
207         }                                      << 
208                                                << 
209         /* State transition. Invoke on all ins << 
210         cnt = 0;                               << 
211         hlist_for_each(node, &step->list) {    << 
212                 if (lastp && node == *lastp)   << 
213                         break;                 << 
214                                                << 
215                 trace_cpuhp_multi_enter(cpu, s << 
216                 ret = cbm(cpu, node);          << 
217                 trace_cpuhp_exit(cpu, st->stat << 
218                 if (ret) {                     << 
219                         if (!lastp)            << 
220                                 goto err;      << 
221                                                << 
222                         *lastp = node;         << 
223                         return ret;            << 
224                 }                              << 
225                 cnt++;                         << 
226         }                                      << 
227         if (lastp)                             << 
228                 *lastp = NULL;                 << 
229         return 0;                              << 
230 err:                                           << 
231         /* Rollback the instances if one faile << 
232         cbm = !bringup ? step->startup.multi : << 
233         if (!cbm)                              << 
234                 return ret;                    << 
235                                                << 
236         hlist_for_each(node, &step->list) {    << 
237                 if (!cnt--)                    << 
238                         break;                 << 
239                                                << 
240                 trace_cpuhp_multi_enter(cpu, s << 
241                 ret = cbm(cpu, node);          << 
242                 trace_cpuhp_exit(cpu, st->stat << 
243                 /*                             << 
244                  * Rollback must not fail,     << 
245                  */                            << 
246                 WARN_ON_ONCE(ret);             << 
247         }                                      << 
248         return ret;                            << 
249 }                                              << 
250                                                << 
251 #ifdef CONFIG_SMP                              << 
252 static bool cpuhp_is_ap_state(enum cpuhp_state << 
253 {                                              << 
254         /*                                     << 
255          * The extra check for CPUHP_TEARDOWN_ << 
256          * purposes as that state is handled e << 
257          */                                    << 
258         return state > CPUHP_BRINGUP_CPU && st << 
259 }                                              << 
260                                                << 
261 static inline void wait_for_ap_thread(struct c << 
262 {                                              << 
263         struct completion *done = bringup ? &s << 
264         wait_for_completion(done);             << 
265 }                                              << 
266                                                << 
267 static inline void complete_ap_thread(struct c << 
268 {                                              << 
269         struct completion *done = bringup ? &s << 
270         complete(done);                        << 
271 }                                              << 
272                                                << 
273 /*                                             << 
274  * The former STARTING/DYING states, ran with  << 
275  */                                            << 
276 static bool cpuhp_is_atomic_state(enum cpuhp_s << 
277 {                                              << 
278         return CPUHP_AP_IDLE_DEAD <= state &&  << 
279 }                                              << 
280                                                << 
281 /* Synchronization state management */         << 
282 enum cpuhp_sync_state {                        << 
283         SYNC_STATE_DEAD,                       << 
284         SYNC_STATE_KICKED,                     << 
285         SYNC_STATE_SHOULD_DIE,                 << 
286         SYNC_STATE_ALIVE,                      << 
287         SYNC_STATE_SHOULD_ONLINE,              << 
288         SYNC_STATE_ONLINE,                     << 
289 };                                             << 
290                                                << 
291 #ifdef CONFIG_HOTPLUG_CORE_SYNC                << 
292 /**                                            << 
293  * cpuhp_ap_update_sync_state - Update synchro << 
294  * @state:      The synchronization state to s << 
295  *                                             << 
296  * No synchronization point. Just update of th << 
297  * a full barrier so that the AP changes are v << 
298  */                                            << 
299 static inline void cpuhp_ap_update_sync_state( << 
300 {                                              << 
301         atomic_t *st = this_cpu_ptr(&cpuhp_sta << 
302                                                << 
303         (void)atomic_xchg(st, state);          << 
304 }                                              << 
305                                                << 
306 void __weak arch_cpuhp_sync_state_poll(void) { << 
307                                                << 
308 static bool cpuhp_wait_for_sync_state(unsigned << 
309                                       enum cpu << 
310 {                                              << 
311         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
312         ktime_t now, end, start = ktime_get(); << 
313         int sync;                              << 
314                                                << 
315         end = start + 10ULL * NSEC_PER_SEC;    << 
316                                                << 
317         sync = atomic_read(st);                << 
318         while (1) {                            << 
319                 if (sync == state) {           << 
320                         if (!atomic_try_cmpxch << 
321                                 continue;      << 
322                         return true;           << 
323                 }                              << 
324                                                << 
325                 now = ktime_get();             << 
326                 if (now > end) {               << 
327                         /* Timeout. Leave the  << 
328                         return false;          << 
329                 } else if (now - start < NSEC_ << 
330                         /* Poll for one millis << 
331                         arch_cpuhp_sync_state_ << 
332                 } else {                       << 
333                         usleep_range(USEC_PER_ << 
334                 }                              << 
335                 sync = atomic_read(st);        << 
336         }                                      << 
337         return true;                           << 
338 }                                              << 
339 #else  /* CONFIG_HOTPLUG_CORE_SYNC */          << 
340 static inline void cpuhp_ap_update_sync_state( << 
341 #endif /* !CONFIG_HOTPLUG_CORE_SYNC */         << 
342                                                << 
343 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD           << 
344 /**                                            << 
345  * cpuhp_ap_report_dead - Update synchronizati << 
346  *                                             << 
347  * No synchronization point. Just update of th << 
348  */                                            << 
349 void cpuhp_ap_report_dead(void)                << 
350 {                                              << 
351         cpuhp_ap_update_sync_state(SYNC_STATE_ << 
352 }                                              << 
353                                                << 
354 void __weak arch_cpuhp_cleanup_dead_cpu(unsign << 
355                                                << 
356 /*                                             << 
357  * Late CPU shutdown synchronization point. Ca << 
358  * because the AP cannot issue complete() at t << 
359  */                                            << 
360 static void cpuhp_bp_sync_dead(unsigned int cp << 
361 {                                              << 
362         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
363         int sync = atomic_read(st);            << 
364                                                << 
365         do {                                   << 
366                 /* CPU can have reported dead  << 
367                 if (sync == SYNC_STATE_DEAD)   << 
368                         break;                 << 
369         } while (!atomic_try_cmpxchg(st, &sync << 
370                                                << 
371         if (cpuhp_wait_for_sync_state(cpu, SYN << 
372                 /* CPU reached dead state. Inv << 
373                 arch_cpuhp_cleanup_dead_cpu(cp << 
374                 return;                        << 
375         }                                      << 
376                                                << 
377         /* No further action possible. Emit me << 
378         pr_err("CPU%u failed to report dead st << 
379 }                                              << 
380 #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */      << 
381 static inline void cpuhp_bp_sync_dead(unsigned << 
382 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */    << 
383                                                << 
384 #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL           << 
385 /**                                            << 
386  * cpuhp_ap_sync_alive - Synchronize AP with t << 
387  *                                             << 
388  * Updates the AP synchronization state to SYN << 
389  * for the BP to release it.                   << 
390  */                                            << 
391 void cpuhp_ap_sync_alive(void)                 << 
392 {                                              << 
393         atomic_t *st = this_cpu_ptr(&cpuhp_sta << 
394                                                << 
395         cpuhp_ap_update_sync_state(SYNC_STATE_ << 
396                                                << 
397         /* Wait for the control CPU to release << 
398         while (atomic_read(st) != SYNC_STATE_S << 
399                 cpu_relax();                   << 
400 }                                              << 
401                                                << 
402 static bool cpuhp_can_boot_ap(unsigned int cpu << 
403 {                                              << 
404         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
405         int sync = atomic_read(st);            << 
406                                                << 
407 again:                                         << 
408         switch (sync) {                        << 
409         case SYNC_STATE_DEAD:                  << 
410                 /* CPU is properly dead */     << 
411                 break;                         << 
412         case SYNC_STATE_KICKED:                << 
413                 /* CPU did not come up in prev << 
414                 break;                         << 
415         case SYNC_STATE_ALIVE:                 << 
416                 /* CPU is stuck cpuhp_ap_sync_ << 
417                 break;                         << 
418         default:                               << 
419                 /* CPU failed to report online << 
420                 return false;                  << 
421         }                                      << 
422                                                << 
423         /* Prepare for booting */              << 
424         if (!atomic_try_cmpxchg(st, &sync, SYN << 
425                 goto again;                    << 
426                                                << 
427         return true;                           << 
428 }                                              << 
429                                                << 
430 void __weak arch_cpuhp_cleanup_kick_cpu(unsign << 
431                                                << 
432 /*                                             << 
433  * Early CPU bringup synchronization point. Ca << 
434  * because the AP cannot issue complete() so e << 
435  */                                            << 
436 static int cpuhp_bp_sync_alive(unsigned int cp << 
437 {                                              << 
438         int ret = 0;                           << 
439                                                << 
440         if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SY << 
441                 return 0;                      << 
442                                                << 
443         if (!cpuhp_wait_for_sync_state(cpu, SY << 
444                 pr_err("CPU%u failed to report << 
445                 ret = -EIO;                    << 
446         }                                      << 
447                                                << 
448         /* Let the architecture cleanup the ki << 
449         arch_cpuhp_cleanup_kick_cpu(cpu);      << 
450         return ret;                            << 
451 }                                              << 
452 #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */      << 
453 static inline int cpuhp_bp_sync_alive(unsigned << 
454 static inline bool cpuhp_can_boot_ap(unsigned  << 
455 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */    << 
456                                                << 
457 /* Serializes the updates to cpu_online_mask,      26 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
458 static DEFINE_MUTEX(cpu_add_remove_lock);          27 static DEFINE_MUTEX(cpu_add_remove_lock);
459 bool cpuhp_tasks_frozen;                       << 
460 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);         << 
461                                                    28 
462 /*                                                 29 /*
463  * The following two APIs (cpu_maps_update_beg !!  30  * The following two API's must be used when attempting
464  * attempting to serialize the updates to cpu_ !!  31  * to serialize the updates to cpu_online_mask, cpu_present_mask.
465  */                                                32  */
466 void cpu_maps_update_begin(void)                   33 void cpu_maps_update_begin(void)
467 {                                                  34 {
468         mutex_lock(&cpu_add_remove_lock);          35         mutex_lock(&cpu_add_remove_lock);
469 }                                                  36 }
470                                                    37 
471 void cpu_maps_update_done(void)                    38 void cpu_maps_update_done(void)
472 {                                                  39 {
473         mutex_unlock(&cpu_add_remove_lock);        40         mutex_unlock(&cpu_add_remove_lock);
474 }                                                  41 }
475                                                    42 
476 /*                                             !!  43 static RAW_NOTIFIER_HEAD(cpu_chain);
477  * If set, cpu_up and cpu_down will return -EB !!  44 
                                                   >>  45 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
478  * Should always be manipulated under cpu_add_     46  * Should always be manipulated under cpu_add_remove_lock
479  */                                                47  */
480 static int cpu_hotplug_disabled;                   48 static int cpu_hotplug_disabled;
481                                                    49 
482 #ifdef CONFIG_HOTPLUG_CPU                          50 #ifdef CONFIG_HOTPLUG_CPU
483                                                    51 
484 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);  !!  52 static struct {
485                                                !!  53         struct task_struct *active_writer;
486 static bool cpu_hotplug_offline_disabled __ro_ !!  54         struct mutex lock; /* Synchronizes accesses to refcount, */
487                                                !!  55         /*
488 void cpus_read_lock(void)                      !!  56          * Also blocks the new readers during
489 {                                              !!  57          * an ongoing cpu hotplug operation.
490         percpu_down_read(&cpu_hotplug_lock);   !!  58          */
491 }                                              !!  59         int refcount;
492 EXPORT_SYMBOL_GPL(cpus_read_lock);             !!  60 } cpu_hotplug = {
493                                                !!  61         .active_writer = NULL,
494 int cpus_read_trylock(void)                    !!  62         .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
495 {                                              !!  63         .refcount = 0,
496         return percpu_down_read_trylock(&cpu_h !!  64 };
497 }                                              << 
498 EXPORT_SYMBOL_GPL(cpus_read_trylock);          << 
499                                                << 
500 void cpus_read_unlock(void)                    << 
501 {                                              << 
502         percpu_up_read(&cpu_hotplug_lock);     << 
503 }                                              << 
504 EXPORT_SYMBOL_GPL(cpus_read_unlock);           << 
505                                                    65 
506 void cpus_write_lock(void)                     !!  66 void get_online_cpus(void)
507 {                                                  67 {
508         percpu_down_write(&cpu_hotplug_lock);  !!  68         might_sleep();
509 }                                              !!  69         if (cpu_hotplug.active_writer == current)
                                                   >>  70                 return;
                                                   >>  71         mutex_lock(&cpu_hotplug.lock);
                                                   >>  72         cpu_hotplug.refcount++;
                                                   >>  73         mutex_unlock(&cpu_hotplug.lock);
510                                                    74 
511 void cpus_write_unlock(void)                   << 
512 {                                              << 
513         percpu_up_write(&cpu_hotplug_lock);    << 
514 }                                                  75 }
                                                   >>  76 EXPORT_SYMBOL_GPL(get_online_cpus);
515                                                    77 
516 void lockdep_assert_cpus_held(void)            !!  78 void put_online_cpus(void)
517 {                                                  79 {
518         /*                                     !!  80         if (cpu_hotplug.active_writer == current)
519          * We can't have hotplug operations be << 
520          * and some init codepaths will knowin << 
521          * This is all valid, so mute lockdep  << 
522          * unheld locks.                       << 
523          */                                    << 
524         if (system_state < SYSTEM_RUNNING)     << 
525                 return;                            81                 return;
                                                   >>  82         mutex_lock(&cpu_hotplug.lock);
526                                                    83 
527         percpu_rwsem_assert_held(&cpu_hotplug_ !!  84         if (WARN_ON(!cpu_hotplug.refcount))
528 }                                              !!  85                 cpu_hotplug.refcount++; /* try to fix things up */
529                                                    86 
530 #ifdef CONFIG_LOCKDEP                          !!  87         if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
531 int lockdep_is_cpus_held(void)                 !!  88                 wake_up_process(cpu_hotplug.active_writer);
532 {                                              !!  89         mutex_unlock(&cpu_hotplug.lock);
533         return percpu_rwsem_is_held(&cpu_hotpl << 
534 }                                              << 
535 #endif                                         << 
536                                                    90 
537 static void lockdep_acquire_cpus_lock(void)    << 
538 {                                              << 
539         rwsem_acquire(&cpu_hotplug_lock.dep_ma << 
540 }                                                  91 }
                                                   >>  92 EXPORT_SYMBOL_GPL(put_online_cpus);
541                                                    93 
542 static void lockdep_release_cpus_lock(void)    !!  94 /*
                                                   >>  95  * This ensures that the hotplug operation can begin only when the
                                                   >>  96  * refcount goes to zero.
                                                   >>  97  *
                                                   >>  98  * Note that during a cpu-hotplug operation, the new readers, if any,
                                                   >>  99  * will be blocked by the cpu_hotplug.lock
                                                   >> 100  *
                                                   >> 101  * Since cpu_hotplug_begin() is always called after invoking
                                                   >> 102  * cpu_maps_update_begin(), we can be sure that only one writer is active.
                                                   >> 103  *
                                                   >> 104  * Note that theoretically, there is a possibility of a livelock:
                                                   >> 105  * - Refcount goes to zero, last reader wakes up the sleeping
                                                   >> 106  *   writer.
                                                   >> 107  * - Last reader unlocks the cpu_hotplug.lock.
                                                   >> 108  * - A new reader arrives at this moment, bumps up the refcount.
                                                   >> 109  * - The writer acquires the cpu_hotplug.lock finds the refcount
                                                   >> 110  *   non zero and goes to sleep again.
                                                   >> 111  *
                                                   >> 112  * However, this is very difficult to achieve in practice since
                                                   >> 113  * get_online_cpus() not an api which is called all that often.
                                                   >> 114  *
                                                   >> 115  */
                                                   >> 116 static void cpu_hotplug_begin(void)
543 {                                                 117 {
544         rwsem_release(&cpu_hotplug_lock.dep_ma !! 118         cpu_hotplug.active_writer = current;
                                                   >> 119 
                                                   >> 120         for (;;) {
                                                   >> 121                 mutex_lock(&cpu_hotplug.lock);
                                                   >> 122                 if (likely(!cpu_hotplug.refcount))
                                                   >> 123                         break;
                                                   >> 124                 __set_current_state(TASK_UNINTERRUPTIBLE);
                                                   >> 125                 mutex_unlock(&cpu_hotplug.lock);
                                                   >> 126                 schedule();
                                                   >> 127         }
545 }                                                 128 }
546                                                   129 
547 /* Declare CPU offlining not supported */      !! 130 static void cpu_hotplug_done(void)
548 void cpu_hotplug_disable_offlining(void)       << 
549 {                                                 131 {
550         cpu_maps_update_begin();               !! 132         cpu_hotplug.active_writer = NULL;
551         cpu_hotplug_offline_disabled = true;   !! 133         mutex_unlock(&cpu_hotplug.lock);
552         cpu_maps_update_done();                << 
553 }                                                 134 }
554                                                   135 
555 /*                                                136 /*
556  * Wait for currently running CPU hotplug oper    137  * Wait for currently running CPU hotplug operations to complete (if any) and
557  * disable future CPU hotplug (from sysfs). Th    138  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
558  * the 'cpu_hotplug_disabled' flag. The same l    139  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
559  * hotplug path before performing hotplug oper    140  * hotplug path before performing hotplug operations. So acquiring that lock
560  * guarantees mutual exclusion from any curren    141  * guarantees mutual exclusion from any currently running hotplug operations.
561  */                                               142  */
562 void cpu_hotplug_disable(void)                    143 void cpu_hotplug_disable(void)
563 {                                                 144 {
564         cpu_maps_update_begin();                  145         cpu_maps_update_begin();
565         cpu_hotplug_disabled++;                !! 146         cpu_hotplug_disabled = 1;
566         cpu_maps_update_done();                   147         cpu_maps_update_done();
567 }                                                 148 }
568 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);        << 
569                                                << 
570 static void __cpu_hotplug_enable(void)         << 
571 {                                              << 
572         if (WARN_ONCE(!cpu_hotplug_disabled, " << 
573                 return;                        << 
574         cpu_hotplug_disabled--;                << 
575 }                                              << 
576                                                   149 
577 void cpu_hotplug_enable(void)                     150 void cpu_hotplug_enable(void)
578 {                                                 151 {
579         cpu_maps_update_begin();                  152         cpu_maps_update_begin();
580         __cpu_hotplug_enable();                !! 153         cpu_hotplug_disabled = 0;
581         cpu_maps_update_done();                   154         cpu_maps_update_done();
582 }                                                 155 }
583 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);         << 
584                                                << 
585 #else                                          << 
586                                                << 
587 static void lockdep_acquire_cpus_lock(void)    << 
588 {                                              << 
589 }                                              << 
590                                                << 
591 static void lockdep_release_cpus_lock(void)    << 
592 {                                              << 
593 }                                              << 
594                                                << 
595 #endif  /* CONFIG_HOTPLUG_CPU */               << 
596                                                << 
597 /*                                             << 
598  * Architectures that need SMT-specific errata << 
599  * should override this.                       << 
600  */                                            << 
601 void __weak arch_smt_update(void) { }          << 
602                                                << 
603 #ifdef CONFIG_HOTPLUG_SMT                      << 
604                                                << 
605 enum cpuhp_smt_control cpu_smt_control __read_ << 
606 static unsigned int cpu_smt_max_threads __ro_a << 
607 unsigned int cpu_smt_num_threads __read_mostly << 
608                                                << 
609 void __init cpu_smt_disable(bool force)        << 
610 {                                              << 
611         if (!cpu_smt_possible())               << 
612                 return;                        << 
613                                                << 
614         if (force) {                           << 
615                 pr_info("SMT: Force disabled\n << 
616                 cpu_smt_control = CPU_SMT_FORC << 
617         } else {                               << 
618                 pr_info("SMT: disabled\n");    << 
619                 cpu_smt_control = CPU_SMT_DISA << 
620         }                                      << 
621         cpu_smt_num_threads = 1;               << 
622 }                                              << 
623                                                << 
624 /*                                             << 
625  * The decision whether SMT is supported can o << 
626  * CPU identification. Called from architectur << 
627  */                                            << 
628 void __init cpu_smt_set_num_threads(unsigned i << 
629                                     unsigned i << 
630 {                                              << 
631         WARN_ON(!num_threads || (num_threads > << 
632                                                << 
633         if (max_threads == 1)                  << 
634                 cpu_smt_control = CPU_SMT_NOT_ << 
635                                                << 
636         cpu_smt_max_threads = max_threads;     << 
637                                                << 
638         /*                                     << 
639          * If SMT has been disabled via the ke << 
640          * not supported, set cpu_smt_num_thre << 
641          * If enabled, take the architecture r << 
642          * to bring up into account.           << 
643          */                                    << 
644         if (cpu_smt_control != CPU_SMT_ENABLED << 
645                 cpu_smt_num_threads = 1;       << 
646         else if (num_threads < cpu_smt_num_thr << 
647                 cpu_smt_num_threads = num_thre << 
648 }                                              << 
649                                                << 
650 static int __init smt_cmdline_disable(char *st << 
651 {                                              << 
652         cpu_smt_disable(str && !strcmp(str, "f << 
653         return 0;                              << 
654 }                                              << 
655 early_param("nosmt", smt_cmdline_disable);     << 
656                                                << 
657 /*                                             << 
658  * For Archicture supporting partial SMT state << 
659  * Otherwise this has already been checked thr << 
660  * setting the SMT level.                      << 
661  */                                            << 
662 static inline bool cpu_smt_thread_allowed(unsi << 
663 {                                              << 
664 #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC          << 
665         return topology_smt_thread_allowed(cpu << 
666 #else                                          << 
667         return true;                           << 
668 #endif                                         << 
669 }                                              << 
670                                                << 
671 static inline bool cpu_bootable(unsigned int c << 
672 {                                              << 
673         if (cpu_smt_control == CPU_SMT_ENABLED << 
674                 return true;                   << 
675                                                << 
676         /* All CPUs are bootable if controls a << 
677         if (cpu_smt_control == CPU_SMT_NOT_IMP << 
678                 return true;                   << 
679                                                << 
680         /* All CPUs are bootable if CPU is not << 
681         if (cpu_smt_control == CPU_SMT_NOT_SUP << 
682                 return true;                   << 
683                                                << 
684         if (topology_is_primary_thread(cpu))   << 
685                 return true;                   << 
686                                                << 
687         /*                                     << 
688          * On x86 it's required to boot all lo << 
689          * that the init code can get a chance << 
690          * CPU. Otherwise, a broadcasted MCE o << 
691          * core will shutdown the machine.     << 
692          */                                    << 
693         return !cpumask_test_cpu(cpu, &cpus_bo << 
694 }                                              << 
695                                                << 
696 /* Returns true if SMT is supported and not fo << 
697 bool cpu_smt_possible(void)                    << 
698 {                                              << 
699         return cpu_smt_control != CPU_SMT_FORC << 
700                 cpu_smt_control != CPU_SMT_NOT << 
701 }                                              << 
702 EXPORT_SYMBOL_GPL(cpu_smt_possible);           << 
703                                                << 
704 #else                                          << 
705 static inline bool cpu_bootable(unsigned int c << 
706 #endif                                         << 
707                                                << 
708 static inline enum cpuhp_state                 << 
709 cpuhp_set_state(int cpu, struct cpuhp_cpu_stat << 
710 {                                              << 
711         enum cpuhp_state prev_state = st->stat << 
712         bool bringup = st->state < target;     << 
713                                                << 
714         st->rollback = false;                  << 
715         st->last = NULL;                       << 
716                                                << 
717         st->target = target;                   << 
718         st->single = false;                    << 
719         st->bringup = bringup;                 << 
720         if (cpu_dying(cpu) != !bringup)        << 
721                 set_cpu_dying(cpu, !bringup);  << 
722                                                << 
723         return prev_state;                     << 
724 }                                              << 
725                                                << 
726 static inline void                             << 
727 cpuhp_reset_state(int cpu, struct cpuhp_cpu_st << 
728                   enum cpuhp_state prev_state) << 
729 {                                              << 
730         bool bringup = !st->bringup;           << 
731                                                << 
732         st->target = prev_state;               << 
733                                                << 
734         /*                                     << 
735          * Already rolling back. No need inver << 
736          * the current state.                  << 
737          */                                    << 
738         if (st->rollback)                      << 
739                 return;                        << 
740                                                << 
741         st->rollback = true;                   << 
742                                                   156 
743         /*                                     !! 157 #else /* #if CONFIG_HOTPLUG_CPU */
744          * If we have st->last we need to undo !! 158 static void cpu_hotplug_begin(void) {}
745          * state first. Otherwise start undo a !! 159 static void cpu_hotplug_done(void) {}
746          */                                    !! 160 #endif  /* #else #if CONFIG_HOTPLUG_CPU */
747         if (!st->last) {                       << 
748                 if (st->bringup)               << 
749                         st->state--;           << 
750                 else                           << 
751                         st->state++;           << 
752         }                                      << 
753                                                   161 
754         st->bringup = bringup;                 !! 162 /* Need to know about CPUs going up/down? */
755         if (cpu_dying(cpu) != !bringup)        !! 163 int __ref register_cpu_notifier(struct notifier_block *nb)
756                 set_cpu_dying(cpu, !bringup);  << 
757 }                                              << 
758                                                << 
759 /* Regular hotplug invocation of the AP hotplu << 
760 static void __cpuhp_kick_ap(struct cpuhp_cpu_s << 
761 {                                                 164 {
762         if (!st->single && st->state == st->ta << 
763                 return;                        << 
764                                                << 
765         st->result = 0;                        << 
766         /*                                     << 
767          * Make sure the above stores are visi << 
768          * true. Paired with the mb() above in << 
769          */                                    << 
770         smp_mb();                              << 
771         st->should_run = true;                 << 
772         wake_up_process(st->thread);           << 
773         wait_for_ap_thread(st, st->bringup);   << 
774 }                                              << 
775                                                << 
776 static int cpuhp_kick_ap(int cpu, struct cpuhp << 
777                          enum cpuhp_state targ << 
778 {                                              << 
779         enum cpuhp_state prev_state;           << 
780         int ret;                                  165         int ret;
781                                                !! 166         cpu_maps_update_begin();
782         prev_state = cpuhp_set_state(cpu, st,  !! 167         ret = raw_notifier_chain_register(&cpu_chain, nb);
783         __cpuhp_kick_ap(st);                   !! 168         cpu_maps_update_done();
784         if ((ret = st->result)) {              << 
785                 cpuhp_reset_state(cpu, st, pre << 
786                 __cpuhp_kick_ap(st);           << 
787         }                                      << 
788                                                << 
789         return ret;                            << 
790 }                                              << 
791                                                << 
792 static int bringup_wait_for_ap_online(unsigned << 
793 {                                              << 
794         struct cpuhp_cpu_state *st = per_cpu_p << 
795                                                << 
796         /* Wait for the CPU to reach CPUHP_AP_ << 
797         wait_for_ap_thread(st, true);          << 
798         if (WARN_ON_ONCE((!cpu_online(cpu))))  << 
799                 return -ECANCELED;             << 
800                                                << 
801         /* Unpark the hotplug thread of the ta << 
802         kthread_unpark(st->thread);            << 
803                                                << 
804         /*                                     << 
805          * SMT soft disabling on X86 requires  << 
806          * BIOS 'wait for SIPI' state in order << 
807          * CPU marked itself as booted_once in << 
808          * cpu_bootable() check will now retur << 
809          * primary sibling.                    << 
810          */                                    << 
811         if (!cpu_bootable(cpu))                << 
812                 return -ECANCELED;             << 
813         return 0;                              << 
814 }                                              << 
815                                                << 
816 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP            << 
817 static int cpuhp_kick_ap_alive(unsigned int cp << 
818 {                                              << 
819         if (!cpuhp_can_boot_ap(cpu))           << 
820                 return -EAGAIN;                << 
821                                                << 
822         return arch_cpuhp_kick_ap_alive(cpu, i << 
823 }                                              << 
824                                                << 
825 static int cpuhp_bringup_ap(unsigned int cpu)  << 
826 {                                              << 
827         struct cpuhp_cpu_state *st = per_cpu_p << 
828         int ret;                               << 
829                                                << 
830         /*                                     << 
831          * Some architectures have to walk the << 
832          * setup the vector space for the cpu  << 
833          * Prevent irq alloc/free across the b << 
834          */                                    << 
835         irq_lock_sparse();                     << 
836                                                << 
837         ret = cpuhp_bp_sync_alive(cpu);        << 
838         if (ret)                               << 
839                 goto out_unlock;               << 
840                                                << 
841         ret = bringup_wait_for_ap_online(cpu); << 
842         if (ret)                               << 
843                 goto out_unlock;               << 
844                                                << 
845         irq_unlock_sparse();                   << 
846                                                << 
847         if (st->target <= CPUHP_AP_ONLINE_IDLE << 
848                 return 0;                      << 
849                                                << 
850         return cpuhp_kick_ap(cpu, st, st->targ << 
851                                                << 
852 out_unlock:                                    << 
853         irq_unlock_sparse();                   << 
854         return ret;                            << 
855 }                                              << 
856 #else                                          << 
857 static int bringup_cpu(unsigned int cpu)       << 
858 {                                              << 
859         struct cpuhp_cpu_state *st = per_cpu_p << 
860         struct task_struct *idle = idle_thread << 
861         int ret;                               << 
862                                                << 
863         if (!cpuhp_can_boot_ap(cpu))           << 
864                 return -EAGAIN;                << 
865                                                << 
866         /*                                     << 
867          * Some architectures have to walk the << 
868          * setup the vector space for the cpu  << 
869          *                                     << 
870          * Prevent irq alloc/free across the b << 
871          * sparse irq lock. Hold it until the  << 
872          * startup in cpuhp_online_idle() whic << 
873          * intermediate synchronization points << 
874          */                                    << 
875         irq_lock_sparse();                     << 
876                                                << 
877         ret = __cpu_up(cpu, idle);             << 
878         if (ret)                               << 
879                 goto out_unlock;               << 
880                                                << 
881         ret = cpuhp_bp_sync_alive(cpu);        << 
882         if (ret)                               << 
883                 goto out_unlock;               << 
884                                                << 
885         ret = bringup_wait_for_ap_online(cpu); << 
886         if (ret)                               << 
887                 goto out_unlock;               << 
888                                                << 
889         irq_unlock_sparse();                   << 
890                                                << 
891         if (st->target <= CPUHP_AP_ONLINE_IDLE << 
892                 return 0;                      << 
893                                                << 
894         return cpuhp_kick_ap(cpu, st, st->targ << 
895                                                << 
896 out_unlock:                                    << 
897         irq_unlock_sparse();                   << 
898         return ret;                            << 
899 }                                              << 
900 #endif                                         << 
901                                                << 
902 static int finish_cpu(unsigned int cpu)        << 
903 {                                              << 
904         struct task_struct *idle = idle_thread << 
905         struct mm_struct *mm = idle->active_mm << 
906                                                << 
907         /*                                     << 
908          * idle_task_exit() will have switched << 
909          * clean up any remaining active_mm st << 
910          */                                    << 
911         if (mm != &init_mm)                    << 
912                 idle->active_mm = &init_mm;    << 
913         mmdrop_lazy_tlb(mm);                   << 
914         return 0;                              << 
915 }                                              << 
916                                                << 
917 /*                                             << 
918  * Hotplug state machine related functions     << 
919  */                                            << 
920                                                << 
921 /*                                             << 
922  * Get the next state to run. Empty ones will  << 
923  * state must be run.                          << 
924  *                                             << 
925  * st->state will be modified ahead of time, t << 
926  * has already ran.                            << 
927  */                                            << 
928 static bool cpuhp_next_state(bool bringup,     << 
929                              enum cpuhp_state  << 
930                              struct cpuhp_cpu_ << 
931                              enum cpuhp_state  << 
932 {                                              << 
933         do {                                   << 
934                 if (bringup) {                 << 
935                         if (st->state >= targe << 
936                                 return false;  << 
937                                                << 
938                         *state_to_run = ++st-> << 
939                 } else {                       << 
940                         if (st->state <= targe << 
941                                 return false;  << 
942                                                << 
943                         *state_to_run = st->st << 
944                 }                              << 
945                                                << 
946                 if (!cpuhp_step_empty(bringup, << 
947                         break;                 << 
948         } while (true);                        << 
949                                                << 
950         return true;                           << 
951 }                                              << 
952                                                << 
953 static int __cpuhp_invoke_callback_range(bool  << 
954                                          unsig << 
955                                          struc << 
956                                          enum  << 
957                                          bool  << 
958 {                                              << 
959         enum cpuhp_state state;                << 
960         int ret = 0;                           << 
961                                                << 
962         while (cpuhp_next_state(bringup, &stat << 
963                 int err;                       << 
964                                                << 
965                 err = cpuhp_invoke_callback(cp << 
966                 if (!err)                      << 
967                         continue;              << 
968                                                << 
969                 if (nofail) {                  << 
970                         pr_warn("CPU %u %s sta << 
971                                 cpu, bringup ? << 
972                                 cpuhp_get_step << 
973                                 st->state, err << 
974                         ret = -1;              << 
975                 } else {                       << 
976                         ret = err;             << 
977                         break;                 << 
978                 }                              << 
979         }                                      << 
980                                                << 
981         return ret;                            << 
982 }                                              << 
983                                                << 
984 static inline int cpuhp_invoke_callback_range( << 
985                                                << 
986                                                << 
987                                                << 
988 {                                              << 
989         return __cpuhp_invoke_callback_range(b << 
990 }                                              << 
991                                                << 
992 static inline void cpuhp_invoke_callback_range << 
993                                                << 
994                                                << 
995                                                << 
996 {                                              << 
997         __cpuhp_invoke_callback_range(bringup, << 
998 }                                              << 
999                                                << 
1000 static inline bool can_rollback_cpu(struct cp << 
1001 {                                             << 
1002         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))   << 
1003                 return true;                  << 
1004         /*                                    << 
1005          * When CPU hotplug is disabled, then << 
1006          * possible because takedown_cpu() an << 
1007          * subsystem specific mechanisms are  << 
1008          * which would be completely unplugge << 
1009          * in the current state.              << 
1010          */                                   << 
1011         return st->state <= CPUHP_BRINGUP_CPU << 
1012 }                                             << 
1013                                               << 
1014 static int cpuhp_up_callbacks(unsigned int cp << 
1015                               enum cpuhp_stat << 
1016 {                                             << 
1017         enum cpuhp_state prev_state = st->sta << 
1018         int ret = 0;                          << 
1019                                               << 
1020         ret = cpuhp_invoke_callback_range(tru << 
1021         if (ret) {                            << 
1022                 pr_debug("CPU UP failed (%d)  << 
1023                          ret, cpu, cpuhp_get_ << 
1024                          st->state);          << 
1025                                               << 
1026                 cpuhp_reset_state(cpu, st, pr << 
1027                 if (can_rollback_cpu(st))     << 
1028                         WARN_ON(cpuhp_invoke_ << 
1029                                               << 
1030         }                                     << 
1031         return ret;                              169         return ret;
1032 }                                                170 }
1033                                                  171 
1034 /*                                            !! 172 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
1035  * The cpu hotplug threads manage the bringup !! 173                         int *nr_calls)
1036  */                                           << 
1037 static int cpuhp_should_run(unsigned int cpu) << 
1038 {                                             << 
1039         struct cpuhp_cpu_state *st = this_cpu << 
1040                                               << 
1041         return st->should_run;                << 
1042 }                                             << 
1043                                               << 
1044 /*                                            << 
1045  * Execute teardown/startup callbacks on the  << 
1046  * callbacks when a state gets [un]installed  << 
1047  *                                            << 
1048  * Each invocation of this function by the sm << 
1049  * state callback.                            << 
1050  *                                            << 
1051  * It has 3 modes of operation:               << 
1052  *  - single: runs st->cb_state               << 
1053  *  - up:     runs ++st->state, while st->sta << 
1054  *  - down:   runs st->state--, while st->sta << 
1055  *                                            << 
1056  * When complete or on error, should_run is c << 
1057  */                                           << 
1058 static void cpuhp_thread_fun(unsigned int cpu << 
1059 {                                             << 
1060         struct cpuhp_cpu_state *st = this_cpu << 
1061         bool bringup = st->bringup;           << 
1062         enum cpuhp_state state;               << 
1063                                               << 
1064         if (WARN_ON_ONCE(!st->should_run))    << 
1065                 return;                       << 
1066                                               << 
1067         /*                                    << 
1068          * ACQUIRE for the cpuhp_should_run() << 
1069          * that if we see ->should_run we als << 
1070          */                                   << 
1071         smp_mb();                             << 
1072                                               << 
1073         /*                                    << 
1074          * The BP holds the hotplug lock, but << 
1075          * ensure that anybody asserting the  << 
1076          * it so.                             << 
1077          */                                   << 
1078         lockdep_acquire_cpus_lock();          << 
1079         cpuhp_lock_acquire(bringup);          << 
1080                                               << 
1081         if (st->single) {                     << 
1082                 state = st->cb_state;         << 
1083                 st->should_run = false;       << 
1084         } else {                              << 
1085                 st->should_run = cpuhp_next_s << 
1086                 if (!st->should_run)          << 
1087                         goto end;             << 
1088         }                                     << 
1089                                               << 
1090         WARN_ON_ONCE(!cpuhp_is_ap_state(state << 
1091                                               << 
1092         if (cpuhp_is_atomic_state(state)) {   << 
1093                 local_irq_disable();          << 
1094                 st->result = cpuhp_invoke_cal << 
1095                 local_irq_enable();           << 
1096                                               << 
1097                 /*                            << 
1098                  * STARTING/DYING must not fa << 
1099                  */                           << 
1100                 WARN_ON_ONCE(st->result);     << 
1101         } else {                              << 
1102                 st->result = cpuhp_invoke_cal << 
1103         }                                     << 
1104                                               << 
1105         if (st->result) {                     << 
1106                 /*                            << 
1107                  * If we fail on a rollback,  << 
1108                  * paddle, no way forward, no << 
1109                  * playing.                   << 
1110                  */                           << 
1111                 WARN_ON_ONCE(st->rollback);   << 
1112                 st->should_run = false;       << 
1113         }                                     << 
1114                                               << 
1115 end:                                          << 
1116         cpuhp_lock_release(bringup);          << 
1117         lockdep_release_cpus_lock();          << 
1118                                               << 
1119         if (!st->should_run)                  << 
1120                 complete_ap_thread(st, bringu << 
1121 }                                             << 
1122                                               << 
1123 /* Invoke a single callback on a remote cpu * << 
1124 static int                                    << 
1125 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_ << 
1126                          struct hlist_node *n << 
1127 {                                                174 {
1128         struct cpuhp_cpu_state *st = per_cpu_ << 
1129         int ret;                                 175         int ret;
1130                                                  176 
1131         if (!cpu_online(cpu))                 !! 177         ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
1132                 return 0;                     !! 178                                         nr_calls);
1133                                               << 
1134         cpuhp_lock_acquire(false);            << 
1135         cpuhp_lock_release(false);            << 
1136                                               << 
1137         cpuhp_lock_acquire(true);             << 
1138         cpuhp_lock_release(true);             << 
1139                                               << 
1140         /*                                    << 
1141          * If we are up and running, use the  << 
1142          * we invoke the thread function dire << 
1143          */                                   << 
1144         if (!st->thread)                      << 
1145                 return cpuhp_invoke_callback( << 
1146                                               << 
1147         st->rollback = false;                 << 
1148         st->last = NULL;                      << 
1149                                                  179 
1150         st->node = node;                      !! 180         return notifier_to_errno(ret);
1151         st->bringup = bringup;                << 
1152         st->cb_state = state;                 << 
1153         st->single = true;                    << 
1154                                               << 
1155         __cpuhp_kick_ap(st);                  << 
1156                                               << 
1157         /*                                    << 
1158          * If we failed and did a partial, do << 
1159          */                                   << 
1160         if ((ret = st->result) && st->last) { << 
1161                 st->rollback = true;          << 
1162                 st->bringup = !bringup;       << 
1163                                               << 
1164                 __cpuhp_kick_ap(st);          << 
1165         }                                     << 
1166                                               << 
1167         /*                                    << 
1168          * Clean up the leftovers so the next << 
1169          * data.                              << 
1170          */                                   << 
1171         st->node = st->last = NULL;           << 
1172         return ret;                           << 
1173 }                                                181 }
1174                                                  182 
1175 static int cpuhp_kick_ap_work(unsigned int cp !! 183 static int cpu_notify(unsigned long val, void *v)
1176 {                                                184 {
1177         struct cpuhp_cpu_state *st = per_cpu_ !! 185         return __cpu_notify(val, v, -1, NULL);
1178         enum cpuhp_state prev_state = st->sta << 
1179         int ret;                              << 
1180                                               << 
1181         cpuhp_lock_acquire(false);            << 
1182         cpuhp_lock_release(false);            << 
1183                                               << 
1184         cpuhp_lock_acquire(true);             << 
1185         cpuhp_lock_release(true);             << 
1186                                               << 
1187         trace_cpuhp_enter(cpu, st->target, pr << 
1188         ret = cpuhp_kick_ap(cpu, st, st->targ << 
1189         trace_cpuhp_exit(cpu, st->state, prev << 
1190                                               << 
1191         return ret;                           << 
1192 }                                                186 }
1193                                                  187 
1194 static struct smp_hotplug_thread cpuhp_thread !! 188 static void cpu_notify_nofail(unsigned long val, void *v)
1195         .store                  = &cpuhp_stat << 
1196         .thread_should_run      = cpuhp_shoul << 
1197         .thread_fn              = cpuhp_threa << 
1198         .thread_comm            = "cpuhp/%u", << 
1199         .selfparking            = true,       << 
1200 };                                            << 
1201                                               << 
1202 static __init void cpuhp_init_state(void)     << 
1203 {                                                189 {
1204         struct cpuhp_cpu_state *st;           !! 190         BUG_ON(cpu_notify(val, v));
1205         int cpu;                              << 
1206                                               << 
1207         for_each_possible_cpu(cpu) {          << 
1208                 st = per_cpu_ptr(&cpuhp_state << 
1209                 init_completion(&st->done_up) << 
1210                 init_completion(&st->done_dow << 
1211         }                                     << 
1212 }                                                191 }
                                                   >> 192 EXPORT_SYMBOL(register_cpu_notifier);
1213                                                  193 
1214 void __init cpuhp_threads_init(void)          !! 194 void __ref unregister_cpu_notifier(struct notifier_block *nb)
1215 {                                                195 {
1216         cpuhp_init_state();                   !! 196         cpu_maps_update_begin();
1217         BUG_ON(smpboot_register_percpu_thread !! 197         raw_notifier_chain_unregister(&cpu_chain, nb);
1218         kthread_unpark(this_cpu_read(cpuhp_st !! 198         cpu_maps_update_done();
1219 }                                                199 }
                                                   >> 200 EXPORT_SYMBOL(unregister_cpu_notifier);
1220                                                  201 
1221 #ifdef CONFIG_HOTPLUG_CPU                        202 #ifdef CONFIG_HOTPLUG_CPU
1222 #ifndef arch_clear_mm_cpumask_cpu             << 
1223 #define arch_clear_mm_cpumask_cpu(cpu, mm) cp << 
1224 #endif                                        << 
1225                                               << 
1226 /**                                              203 /**
1227  * clear_tasks_mm_cpumask - Safely clear task    204  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1228  * @cpu: a CPU id                                205  * @cpu: a CPU id
1229  *                                               206  *
1230  * This function walks all processes, finds a    207  * This function walks all processes, finds a valid mm struct for each one and
1231  * then clears a corresponding bit in mm's cp    208  * then clears a corresponding bit in mm's cpumask.  While this all sounds
1232  * trivial, there are various non-obvious cor    209  * trivial, there are various non-obvious corner cases, which this function
1233  * tries to solve in a safe manner.              210  * tries to solve in a safe manner.
1234  *                                               211  *
1235  * Also note that the function uses a somewha    212  * Also note that the function uses a somewhat relaxed locking scheme, so it may
1236  * be called only for an already offlined CPU    213  * be called only for an already offlined CPU.
1237  */                                              214  */
1238 void clear_tasks_mm_cpumask(int cpu)             215 void clear_tasks_mm_cpumask(int cpu)
1239 {                                                216 {
1240         struct task_struct *p;                   217         struct task_struct *p;
1241                                                  218 
1242         /*                                       219         /*
1243          * This function is called after the     220          * This function is called after the cpu is taken down and marked
1244          * offline, so its not like new tasks    221          * offline, so its not like new tasks will ever get this cpu set in
1245          * their mm mask. -- Peter Zijlstra      222          * their mm mask. -- Peter Zijlstra
1246          * Thus, we may use rcu_read_lock() h    223          * Thus, we may use rcu_read_lock() here, instead of grabbing
1247          * full-fledged tasklist_lock.           224          * full-fledged tasklist_lock.
1248          */                                      225          */
1249         WARN_ON(cpu_online(cpu));                226         WARN_ON(cpu_online(cpu));
1250         rcu_read_lock();                         227         rcu_read_lock();
1251         for_each_process(p) {                    228         for_each_process(p) {
1252                 struct task_struct *t;           229                 struct task_struct *t;
1253                                                  230 
1254                 /*                               231                 /*
1255                  * Main thread might exit, bu    232                  * Main thread might exit, but other threads may still have
1256                  * a valid mm. Find one.         233                  * a valid mm. Find one.
1257                  */                              234                  */
1258                 t = find_lock_task_mm(p);        235                 t = find_lock_task_mm(p);
1259                 if (!t)                          236                 if (!t)
1260                         continue;                237                         continue;
1261                 arch_clear_mm_cpumask_cpu(cpu !! 238                 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
1262                 task_unlock(t);                  239                 task_unlock(t);
1263         }                                        240         }
1264         rcu_read_unlock();                       241         rcu_read_unlock();
1265 }                                                242 }
1266                                                  243 
                                                   >> 244 static inline void check_for_tasks(int cpu)
                                                   >> 245 {
                                                   >> 246         struct task_struct *p;
                                                   >> 247         cputime_t utime, stime;
                                                   >> 248 
                                                   >> 249         write_lock_irq(&tasklist_lock);
                                                   >> 250         for_each_process(p) {
                                                   >> 251                 task_cputime(p, &utime, &stime);
                                                   >> 252                 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
                                                   >> 253                     (utime || stime))
                                                   >> 254                         printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
                                                   >> 255                                 "(state = %ld, flags = %x)\n",
                                                   >> 256                                 p->comm, task_pid_nr(p), cpu,
                                                   >> 257                                 p->state, p->flags);
                                                   >> 258         }
                                                   >> 259         write_unlock_irq(&tasklist_lock);
                                                   >> 260 }
                                                   >> 261 
                                                   >> 262 struct take_cpu_down_param {
                                                   >> 263         unsigned long mod;
                                                   >> 264         void *hcpu;
                                                   >> 265 };
                                                   >> 266 
1267 /* Take this CPU down. */                        267 /* Take this CPU down. */
1268 static int take_cpu_down(void *_param)        !! 268 static int __ref take_cpu_down(void *_param)
1269 {                                                269 {
1270         struct cpuhp_cpu_state *st = this_cpu !! 270         struct take_cpu_down_param *param = _param;
1271         enum cpuhp_state target = max((int)st !! 271         int err;
1272         int err, cpu = smp_processor_id();    << 
1273                                                  272 
1274         /* Ensure this CPU doesn't handle any    273         /* Ensure this CPU doesn't handle any more interrupts. */
1275         err = __cpu_disable();                   274         err = __cpu_disable();
1276         if (err < 0)                             275         if (err < 0)
1277                 return err;                      276                 return err;
1278                                                  277 
1279         /*                                    !! 278         cpu_notify(CPU_DYING | param->mod, param->hcpu);
1280          * Must be called from CPUHP_TEARDOWN << 
1281          * down, that the current state is CP << 
1282          */                                   << 
1283         WARN_ON(st->state != (CPUHP_TEARDOWN_ << 
1284                                               << 
1285         /*                                    << 
1286          * Invoke the former CPU_DYING callba << 
1287          */                                   << 
1288         cpuhp_invoke_callback_range_nofail(fa << 
1289                                               << 
1290         /* Park the stopper thread */            279         /* Park the stopper thread */
1291         stop_machine_park(cpu);               !! 280         kthread_park(current);
1292         return 0;                                281         return 0;
1293 }                                                282 }
1294                                                  283 
1295 static int takedown_cpu(unsigned int cpu)     !! 284 /* Requires cpu_add_remove_lock to be held */
                                                   >> 285 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1296 {                                                286 {
1297         struct cpuhp_cpu_state *st = per_cpu_ !! 287         int err, nr_calls = 0;
1298         int err;                              !! 288         void *hcpu = (void *)(long)cpu;
                                                   >> 289         unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
                                                   >> 290         struct take_cpu_down_param tcd_param = {
                                                   >> 291                 .mod = mod,
                                                   >> 292                 .hcpu = hcpu,
                                                   >> 293         };
1299                                                  294 
1300         /* Park the smpboot threads */        !! 295         if (num_online_cpus() == 1)
1301         kthread_park(st->thread);             !! 296                 return -EBUSY;
1302                                                  297 
1303         /*                                    !! 298         if (!cpu_online(cpu))
1304          * Prevent irq alloc/free while the d !! 299                 return -EINVAL;
1305          * interrupt affinities.              << 
1306          */                                   << 
1307         irq_lock_sparse();                    << 
1308                                                  300 
1309         /*                                    !! 301         cpu_hotplug_begin();
1310          * So now all preempt/rcu users must  !! 302 
1311          */                                   !! 303         err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
1312         err = stop_machine_cpuslocked(take_cp << 
1313         if (err) {                               304         if (err) {
1314                 /* CPU refused to die */      !! 305                 nr_calls--;
1315                 irq_unlock_sparse();          !! 306                 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
1316                 /* Unpark the hotplug thread  !! 307                 printk("%s: attempt to take down CPU %u failed\n",
1317                 kthread_unpark(st->thread);   !! 308                                 __func__, cpu);
1318                 return err;                   !! 309                 goto out_release;
                                                   >> 310         }
                                                   >> 311         smpboot_park_threads(cpu);
                                                   >> 312 
                                                   >> 313         err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
                                                   >> 314         if (err) {
                                                   >> 315                 /* CPU didn't die: tell everyone.  Can't complain. */
                                                   >> 316                 smpboot_unpark_threads(cpu);
                                                   >> 317                 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
                                                   >> 318                 goto out_release;
1319         }                                        319         }
1320         BUG_ON(cpu_online(cpu));                 320         BUG_ON(cpu_online(cpu));
1321                                                  321 
1322         /*                                       322         /*
1323          * The teardown callback for CPUHP_AP !! 323          * The migration_call() CPU_DYING callback will have removed all
1324          * all runnable tasks from the CPU, t !! 324          * runnable tasks from the cpu, there's only the idle task left now
1325          * that the migration thread is done     325          * that the migration thread is done doing the stop_machine thing.
1326          *                                       326          *
1327          * Wait for the stop thread to go awa    327          * Wait for the stop thread to go away.
1328          */                                      328          */
1329         wait_for_ap_thread(st, false);        !! 329         while (!idle_cpu(cpu))
1330         BUG_ON(st->state != CPUHP_AP_IDLE_DEA !! 330                 cpu_relax();
1331                                               << 
1332         /* Interrupts are moved away from the << 
1333         irq_unlock_sparse();                  << 
1334                                                  331 
1335         hotplug_cpu__broadcast_tick_pull(cpu) << 
1336         /* This actually kills the CPU. */       332         /* This actually kills the CPU. */
1337         __cpu_die(cpu);                          333         __cpu_die(cpu);
1338                                                  334 
1339         cpuhp_bp_sync_dead(cpu);              !! 335         /* CPU is completely dead: tell everyone.  Too late to complain. */
1340                                               !! 336         cpu_notify_nofail(CPU_DEAD | mod, hcpu);
1341         tick_cleanup_dead_cpu(cpu);           << 
1342                                               << 
1343         /*                                    << 
1344          * Callbacks must be re-integrated ri << 
1345          * Otherwise an RCU callback could bl << 
1346          * waiting for its completion.        << 
1347          */                                   << 
1348         rcutree_migrate_callbacks(cpu);       << 
1349                                               << 
1350         return 0;                             << 
1351 }                                             << 
1352                                               << 
1353 static void cpuhp_complete_idle_dead(void *ar << 
1354 {                                             << 
1355         struct cpuhp_cpu_state *st = arg;     << 
1356                                               << 
1357         complete_ap_thread(st, false);        << 
1358 }                                             << 
1359                                               << 
1360 void cpuhp_report_idle_dead(void)             << 
1361 {                                             << 
1362         struct cpuhp_cpu_state *st = this_cpu << 
1363                                               << 
1364         BUG_ON(st->state != CPUHP_AP_OFFLINE) << 
1365         tick_assert_timekeeping_handover();   << 
1366         rcutree_report_cpu_dead();            << 
1367         st->state = CPUHP_AP_IDLE_DEAD;       << 
1368         /*                                    << 
1369          * We cannot call complete after rcut << 
1370          * to an online cpu.                  << 
1371          */                                   << 
1372         smp_call_function_single(cpumask_firs << 
1373                                  cpuhp_comple << 
1374 }                                             << 
1375                                               << 
1376 static int cpuhp_down_callbacks(unsigned int  << 
1377                                 enum cpuhp_st << 
1378 {                                             << 
1379         enum cpuhp_state prev_state = st->sta << 
1380         int ret = 0;                          << 
1381                                               << 
1382         ret = cpuhp_invoke_callback_range(fal << 
1383         if (ret) {                            << 
1384                 pr_debug("CPU DOWN failed (%d << 
1385                          ret, cpu, cpuhp_get_ << 
1386                          st->state);          << 
1387                                               << 
1388                 cpuhp_reset_state(cpu, st, pr << 
1389                                               << 
1390                 if (st->state < prev_state)   << 
1391                         WARN_ON(cpuhp_invoke_ << 
1392                                               << 
1393         }                                     << 
1394                                               << 
1395         return ret;                           << 
1396 }                                             << 
1397                                               << 
1398 /* Requires cpu_add_remove_lock to be held */ << 
1399 static int __ref _cpu_down(unsigned int cpu,  << 
1400                            enum cpuhp_state t << 
1401 {                                             << 
1402         struct cpuhp_cpu_state *st = per_cpu_ << 
1403         int prev_state, ret = 0;              << 
1404                                               << 
1405         if (num_online_cpus() == 1)           << 
1406                 return -EBUSY;                << 
1407                                               << 
1408         if (!cpu_present(cpu))                << 
1409                 return -EINVAL;               << 
1410                                               << 
1411         cpus_write_lock();                    << 
1412                                               << 
1413         cpuhp_tasks_frozen = tasks_frozen;    << 
1414                                               << 
1415         prev_state = cpuhp_set_state(cpu, st, << 
1416         /*                                    << 
1417          * If the current CPU state is in the << 
1418          * then we need to kick the thread.   << 
1419          */                                   << 
1420         if (st->state > CPUHP_TEARDOWN_CPU) { << 
1421                 st->target = max((int)target, << 
1422                 ret = cpuhp_kick_ap_work(cpu) << 
1423                 /*                            << 
1424                  * The AP side has done the e << 
1425                  * return the error code..    << 
1426                  */                           << 
1427                 if (ret)                      << 
1428                         goto out;             << 
1429                                               << 
1430                 /*                            << 
1431                  * We might have stopped stil << 
1432                  * thread. Nothing to do anym << 
1433                  */                           << 
1434                 if (st->state > CPUHP_TEARDOW << 
1435                         goto out;             << 
1436                                               << 
1437                 st->target = target;          << 
1438         }                                     << 
1439         /*                                    << 
1440          * The AP brought itself down to CPUH << 
1441          * to do the further cleanups.        << 
1442          */                                   << 
1443         ret = cpuhp_down_callbacks(cpu, st, t << 
1444         if (ret && st->state < prev_state) {  << 
1445                 if (st->state == CPUHP_TEARDO << 
1446                         cpuhp_reset_state(cpu << 
1447                         __cpuhp_kick_ap(st);  << 
1448                 } else {                      << 
1449                         WARN(1, "DEAD callbac << 
1450                 }                             << 
1451         }                                     << 
1452                                                  337 
1453 out:                                          !! 338         check_for_tasks(cpu);
1454         cpus_write_unlock();                  << 
1455         /*                                    << 
1456          * Do post unplug cleanup. This is st << 
1457          * concurrent CPU hotplug via cpu_add << 
1458          */                                   << 
1459         lockup_detector_cleanup();            << 
1460         arch_smt_update();                    << 
1461         return ret;                           << 
1462 }                                             << 
1463                                               << 
1464 struct cpu_down_work {                        << 
1465         unsigned int            cpu;          << 
1466         enum cpuhp_state        target;       << 
1467 };                                            << 
1468                                               << 
1469 static long __cpu_down_maps_locked(void *arg) << 
1470 {                                             << 
1471         struct cpu_down_work *work = arg;     << 
1472                                                  339 
1473         return _cpu_down(work->cpu, 0, work-> !! 340 out_release:
1474 }                                             !! 341         cpu_hotplug_done();
1475                                               !! 342         if (!err)
1476 static int cpu_down_maps_locked(unsigned int  !! 343                 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
1477 {                                             << 
1478         struct cpu_down_work work = { .cpu =  << 
1479                                               << 
1480         /*                                    << 
1481          * If the platform does not support h << 
1482          * differentiate it from a transient  << 
1483          */                                   << 
1484         if (cpu_hotplug_offline_disabled)     << 
1485                 return -EOPNOTSUPP;           << 
1486         if (cpu_hotplug_disabled)             << 
1487                 return -EBUSY;                << 
1488                                               << 
1489         /*                                    << 
1490          * Ensure that the control task does  << 
1491          * CPU to prevent a deadlock against  << 
1492          * Also keep at least one housekeepin << 
1493          * an empty sched_domain span.        << 
1494          */                                   << 
1495         for_each_cpu_and(cpu, cpu_online_mask << 
1496                 if (cpu != work.cpu)          << 
1497                         return work_on_cpu(cp << 
1498         }                                     << 
1499         return -EBUSY;                        << 
1500 }                                             << 
1501                                               << 
1502 static int cpu_down(unsigned int cpu, enum cp << 
1503 {                                             << 
1504         int err;                              << 
1505                                               << 
1506         cpu_maps_update_begin();              << 
1507         err = cpu_down_maps_locked(cpu, targe << 
1508         cpu_maps_update_done();               << 
1509         return err;                              344         return err;
1510 }                                                345 }
1511                                                  346 
1512 /**                                           !! 347 int __ref cpu_down(unsigned int cpu)
1513  * cpu_device_down - Bring down a cpu device  << 
1514  * @dev: Pointer to the cpu device to offline << 
1515  *                                            << 
1516  * This function is meant to be used by devic << 
1517  *                                            << 
1518  * Other subsystems should use remove_cpu() i << 
1519  *                                            << 
1520  * Return: %0 on success or a negative errno  << 
1521  */                                           << 
1522 int cpu_device_down(struct device *dev)       << 
1523 {                                             << 
1524         return cpu_down(dev->id, CPUHP_OFFLIN << 
1525 }                                             << 
1526                                               << 
1527 int remove_cpu(unsigned int cpu)              << 
1528 {                                                348 {
1529         int ret;                              !! 349         int err;
1530                                               << 
1531         lock_device_hotplug();                << 
1532         ret = device_offline(get_cpu_device(c << 
1533         unlock_device_hotplug();              << 
1534                                               << 
1535         return ret;                           << 
1536 }                                             << 
1537 EXPORT_SYMBOL_GPL(remove_cpu);                << 
1538                                               << 
1539 void smp_shutdown_nonboot_cpus(unsigned int p << 
1540 {                                             << 
1541         unsigned int cpu;                     << 
1542         int error;                            << 
1543                                                  350 
1544         cpu_maps_update_begin();                 351         cpu_maps_update_begin();
1545                                                  352 
1546         /*                                    !! 353         if (cpu_hotplug_disabled) {
1547          * Make certain the cpu I'm about to  !! 354                 err = -EBUSY;
1548          *                                    !! 355                 goto out;
1549          * This is inline to what migrate_to_ << 
1550          */                                   << 
1551         if (!cpu_online(primary_cpu))         << 
1552                 primary_cpu = cpumask_first(c << 
1553                                               << 
1554         for_each_online_cpu(cpu) {            << 
1555                 if (cpu == primary_cpu)       << 
1556                         continue;             << 
1557                                               << 
1558                 error = cpu_down_maps_locked( << 
1559                 if (error) {                  << 
1560                         pr_err("Failed to off << 
1561                                 cpu, error);  << 
1562                         break;                << 
1563                 }                             << 
1564         }                                        356         }
1565                                                  357 
1566         /*                                    !! 358         err = _cpu_down(cpu, 0);
1567          * Ensure all but the reboot CPU are  << 
1568          */                                   << 
1569         BUG_ON(num_online_cpus() > 1);        << 
1570                                               << 
1571         /*                                    << 
1572          * Make sure the CPUs won't be enable << 
1573          * point. Kexec will reboot to a new  << 
1574          * everything along the way.          << 
1575          */                                   << 
1576         cpu_hotplug_disabled++;               << 
1577                                                  359 
                                                   >> 360 out:
1578         cpu_maps_update_done();                  361         cpu_maps_update_done();
                                                   >> 362         return err;
1579 }                                                363 }
1580                                               !! 364 EXPORT_SYMBOL(cpu_down);
1581 #else                                         << 
1582 #define takedown_cpu            NULL          << 
1583 #endif /*CONFIG_HOTPLUG_CPU*/                    365 #endif /*CONFIG_HOTPLUG_CPU*/
1584                                                  366 
1585 /**                                           << 
1586  * notify_cpu_starting(cpu) - Invoke the call << 
1587  * @cpu: cpu that just started                << 
1588  *                                            << 
1589  * It must be called by the arch code on the  << 
1590  * enables interrupts and before the "boot" c << 
1591  */                                           << 
1592 void notify_cpu_starting(unsigned int cpu)    << 
1593 {                                             << 
1594         struct cpuhp_cpu_state *st = per_cpu_ << 
1595         enum cpuhp_state target = min((int)st << 
1596                                               << 
1597         rcutree_report_cpu_starting(cpu);     << 
1598         cpumask_set_cpu(cpu, &cpus_booted_onc << 
1599                                               << 
1600         /*                                    << 
1601          * STARTING must not fail!            << 
1602          */                                   << 
1603         cpuhp_invoke_callback_range_nofail(tr << 
1604 }                                             << 
1605                                               << 
1606 /*                                            << 
1607  * Called from the idle task. Wake up the con << 
1608  * hotplug thread of the upcoming CPU up and  << 
1609  * online bringup to the hotplug thread.      << 
1610  */                                           << 
1611 void cpuhp_online_idle(enum cpuhp_state state << 
1612 {                                             << 
1613         struct cpuhp_cpu_state *st = this_cpu << 
1614                                               << 
1615         /* Happens for the boot cpu */        << 
1616         if (state != CPUHP_AP_ONLINE_IDLE)    << 
1617                 return;                       << 
1618                                               << 
1619         cpuhp_ap_update_sync_state(SYNC_STATE << 
1620                                               << 
1621         /*                                    << 
1622          * Unpark the stopper thread before w << 
1623          * scheduling); this ensures the stop << 
1624          */                                   << 
1625         stop_machine_unpark(smp_processor_id( << 
1626                                               << 
1627         st->state = CPUHP_AP_ONLINE_IDLE;     << 
1628         complete_ap_thread(st, true);         << 
1629 }                                             << 
1630                                               << 
1631 /* Requires cpu_add_remove_lock to be held */    367 /* Requires cpu_add_remove_lock to be held */
1632 static int _cpu_up(unsigned int cpu, int task !! 368 static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
1633 {                                                369 {
1634         struct cpuhp_cpu_state *st = per_cpu_ !! 370         int ret, nr_calls = 0;
                                                   >> 371         void *hcpu = (void *)(long)cpu;
                                                   >> 372         unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
1635         struct task_struct *idle;                373         struct task_struct *idle;
1636         int ret = 0;                          << 
1637                                                  374 
1638         cpus_write_lock();                    !! 375         cpu_hotplug_begin();
1639                                                  376 
1640         if (!cpu_present(cpu)) {              !! 377         if (cpu_online(cpu) || !cpu_present(cpu)) {
1641                 ret = -EINVAL;                   378                 ret = -EINVAL;
1642                 goto out;                        379                 goto out;
1643         }                                        380         }
1644                                                  381 
1645         /*                                    !! 382         idle = idle_thread_get(cpu);
1646          * The caller of cpu_up() might have  !! 383         if (IS_ERR(idle)) {
1647          * caller. Nothing to do.             !! 384                 ret = PTR_ERR(idle);
1648          */                                   << 
1649         if (st->state >= target)              << 
1650                 goto out;                        385                 goto out;
1651                                               << 
1652         if (st->state == CPUHP_OFFLINE) {     << 
1653                 /* Let it fail before we try  << 
1654                 idle = idle_thread_get(cpu);  << 
1655                 if (IS_ERR(idle)) {           << 
1656                         ret = PTR_ERR(idle);  << 
1657                         goto out;             << 
1658                 }                             << 
1659                                               << 
1660                 /*                            << 
1661                  * Reset stale stack state fr << 
1662                  */                           << 
1663                 scs_task_reset(idle);         << 
1664                 kasan_unpoison_task_stack(idl << 
1665         }                                        386         }
1666                                                  387 
1667         cpuhp_tasks_frozen = tasks_frozen;    !! 388         ret = smpboot_create_threads(cpu);
                                                   >> 389         if (ret)
                                                   >> 390                 goto out;
1668                                                  391 
1669         cpuhp_set_state(cpu, st, target);     !! 392         ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
1670         /*                                    !! 393         if (ret) {
1671          * If the current CPU state is in the !! 394                 nr_calls--;
1672          * then we need to kick the thread on !! 395                 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
1673          */                                   !! 396                                 __func__, cpu);
1674         if (st->state > CPUHP_BRINGUP_CPU) {  !! 397                 goto out_notify;
1675                 ret = cpuhp_kick_ap_work(cpu) << 
1676                 /*                            << 
1677                  * The AP side has done the e << 
1678                  * return the error code..    << 
1679                  */                           << 
1680                 if (ret)                      << 
1681                         goto out;             << 
1682         }                                        398         }
1683                                                  399 
1684         /*                                    !! 400         /* Arch-specific enabling code. */
1685          * Try to reach the target state. We  !! 401         ret = __cpu_up(cpu, idle);
1686          * CPUHP_BRINGUP_CPU. After that the  !! 402         if (ret != 0)
1687          * responsible for bringing it up to  !! 403                 goto out_notify;
1688          */                                   !! 404         BUG_ON(!cpu_online(cpu));
1689         target = min((int)target, CPUHP_BRING !! 405 
1690         ret = cpuhp_up_callbacks(cpu, st, tar !! 406         /* Wake the per cpu threads */
                                                   >> 407         smpboot_unpark_threads(cpu);
                                                   >> 408 
                                                   >> 409         /* Now call notifier in preparation. */
                                                   >> 410         cpu_notify(CPU_ONLINE | mod, hcpu);
                                                   >> 411 
                                                   >> 412 out_notify:
                                                   >> 413         if (ret != 0)
                                                   >> 414                 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
1691 out:                                             415 out:
1692         cpus_write_unlock();                  !! 416         cpu_hotplug_done();
1693         arch_smt_update();                    !! 417 
1694         return ret;                              418         return ret;
1695 }                                                419 }
1696                                                  420 
1697 static int cpu_up(unsigned int cpu, enum cpuh !! 421 int __cpuinit cpu_up(unsigned int cpu)
1698 {                                                422 {
1699         int err = 0;                             423         int err = 0;
1700                                                  424 
                                                   >> 425 #ifdef  CONFIG_MEMORY_HOTPLUG
                                                   >> 426         int nid;
                                                   >> 427         pg_data_t       *pgdat;
                                                   >> 428 #endif
                                                   >> 429 
1701         if (!cpu_possible(cpu)) {                430         if (!cpu_possible(cpu)) {
1702                 pr_err("can't online cpu %d b !! 431                 printk(KERN_ERR "can't online cpu %d because it is not "
1703                        cpu);                  !! 432                         "configured as may-hotadd at boot time\n", cpu);
                                                   >> 433 #if defined(CONFIG_IA64)
                                                   >> 434                 printk(KERN_ERR "please check additional_cpus= boot "
                                                   >> 435                                 "parameter\n");
                                                   >> 436 #endif
1704                 return -EINVAL;                  437                 return -EINVAL;
1705         }                                        438         }
1706                                                  439 
1707         err = try_online_node(cpu_to_node(cpu !! 440 #ifdef  CONFIG_MEMORY_HOTPLUG
1708         if (err)                              !! 441         nid = cpu_to_node(cpu);
1709                 return err;                   !! 442         if (!node_online(nid)) {
1710                                               !! 443                 err = mem_online_node(nid);
1711         cpu_maps_update_begin();              !! 444                 if (err)
1712                                               !! 445                         return err;
1713         if (cpu_hotplug_disabled) {           << 
1714                 err = -EBUSY;                 << 
1715                 goto out;                     << 
1716         }                                        446         }
1717         if (!cpu_bootable(cpu)) {             << 
1718                 err = -EPERM;                 << 
1719                 goto out;                     << 
1720         }                                     << 
1721                                               << 
1722         err = _cpu_up(cpu, 0, target);        << 
1723 out:                                          << 
1724         cpu_maps_update_done();               << 
1725         return err;                           << 
1726 }                                             << 
1727                                               << 
1728 /**                                           << 
1729  * cpu_device_up - Bring up a cpu device      << 
1730  * @dev: Pointer to the cpu device to online  << 
1731  *                                            << 
1732  * This function is meant to be used by devic << 
1733  *                                            << 
1734  * Other subsystems should use add_cpu() inst << 
1735  *                                            << 
1736  * Return: %0 on success or a negative errno  << 
1737  */                                           << 
1738 int cpu_device_up(struct device *dev)         << 
1739 {                                             << 
1740         return cpu_up(dev->id, CPUHP_ONLINE); << 
1741 }                                             << 
1742                                               << 
1743 int add_cpu(unsigned int cpu)                 << 
1744 {                                             << 
1745         int ret;                              << 
1746                                               << 
1747         lock_device_hotplug();                << 
1748         ret = device_online(get_cpu_device(cp << 
1749         unlock_device_hotplug();              << 
1750                                               << 
1751         return ret;                           << 
1752 }                                             << 
1753 EXPORT_SYMBOL_GPL(add_cpu);                   << 
1754                                                  447 
1755 /**                                           !! 448         pgdat = NODE_DATA(nid);
1756  * bringup_hibernate_cpu - Bring up the CPU t !! 449         if (!pgdat) {
1757  * @sleep_cpu: The cpu we hibernated on and s !! 450                 printk(KERN_ERR
1758  *                                            !! 451                         "Can't online cpu %d due to NULL pgdat\n", cpu);
1759  * On some architectures like arm64, we can h !! 452                 return -ENOMEM;
1760  * wake up the CPU we hibernated on might be  << 
1761  * using maxcpus= for example.                << 
1762  *                                            << 
1763  * Return: %0 on success or a negative errno  << 
1764  */                                           << 
1765 int bringup_hibernate_cpu(unsigned int sleep_ << 
1766 {                                             << 
1767         int ret;                              << 
1768                                               << 
1769         if (!cpu_online(sleep_cpu)) {         << 
1770                 pr_info("Hibernated on a CPU  << 
1771                 ret = cpu_up(sleep_cpu, CPUHP << 
1772                 if (ret) {                    << 
1773                         pr_err("Failed to bri << 
1774                         return ret;           << 
1775                 }                             << 
1776         }                                        453         }
1777         return 0;                             << 
1778 }                                             << 
1779                                               << 
1780 static void __init cpuhp_bringup_mask(const s << 
1781                                       enum cp << 
1782 {                                             << 
1783         unsigned int cpu;                     << 
1784                                               << 
1785         for_each_cpu(cpu, mask) {             << 
1786                 struct cpuhp_cpu_state *st =  << 
1787                                                  454 
1788                 if (cpu_up(cpu, target) && ca !! 455         if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
1789                         /*                    !! 456                 mutex_lock(&zonelists_mutex);
1790                          * If this failed the !! 457                 build_all_zonelists(NULL, NULL);
1791                          * rolled back to CPU !! 458                 mutex_unlock(&zonelists_mutex);
1792                          * online. Clean it u << 
1793                          */                   << 
1794                         WARN_ON(cpuhp_invoke_ << 
1795                 }                             << 
1796                                               << 
1797                 if (!--ncpus)                 << 
1798                         break;                << 
1799         }                                        459         }
1800 }                                             << 
1801                                               << 
1802 #ifdef CONFIG_HOTPLUG_PARALLEL                << 
1803 static bool __cpuhp_parallel_bringup __ro_aft << 
1804                                               << 
1805 static int __init parallel_bringup_parse_para << 
1806 {                                             << 
1807         return kstrtobool(arg, &__cpuhp_paral << 
1808 }                                             << 
1809 early_param("cpuhp.parallel", parallel_bringu << 
1810                                               << 
1811 #ifdef CONFIG_HOTPLUG_SMT                     << 
1812 static inline bool cpuhp_smt_aware(void)      << 
1813 {                                             << 
1814         return cpu_smt_max_threads > 1;       << 
1815 }                                             << 
1816                                               << 
1817 static inline const struct cpumask *cpuhp_get << 
1818 {                                             << 
1819         return cpu_primary_thread_mask;       << 
1820 }                                             << 
1821 #else                                         << 
1822 static inline bool cpuhp_smt_aware(void)      << 
1823 {                                             << 
1824         return false;                         << 
1825 }                                             << 
1826 static inline const struct cpumask *cpuhp_get << 
1827 {                                             << 
1828         return cpu_none_mask;                 << 
1829 }                                             << 
1830 #endif                                           460 #endif
1831                                                  461 
1832 bool __weak arch_cpuhp_init_parallel_bringup( !! 462         cpu_maps_update_begin();
1833 {                                             << 
1834         return true;                          << 
1835 }                                             << 
1836                                               << 
1837 /*                                            << 
1838  * On architectures which have enabled parall << 
1839  * prepare states for each of the to be onlin << 
1840  * sends the startup IPI to the APs. The APs  << 
1841  * bringup code in parallel and then wait for << 
1842  * them one by one for the final onlining pro << 
1843  *                                            << 
1844  * This avoids waiting for each AP to respond << 
1845  * CPUHP_BRINGUP_CPU.                         << 
1846  */                                           << 
1847 static bool __init cpuhp_bringup_cpus_paralle << 
1848 {                                             << 
1849         const struct cpumask *mask = cpu_pres << 
1850                                               << 
1851         if (__cpuhp_parallel_bringup)         << 
1852                 __cpuhp_parallel_bringup = ar << 
1853         if (!__cpuhp_parallel_bringup)        << 
1854                 return false;                 << 
1855                                               << 
1856         if (cpuhp_smt_aware()) {              << 
1857                 const struct cpumask *pmask = << 
1858                 static struct cpumask tmp_mas << 
1859                                                  463 
1860                 /*                            !! 464         if (cpu_hotplug_disabled) {
1861                  * X86 requires to prevent th !! 465                 err = -EBUSY;
1862                  * the primary thread does a  !! 466                 goto out;
1863                  * reasons. Bring the primary << 
1864                  */                           << 
1865                 cpumask_and(&tmp_mask, mask,  << 
1866                 cpuhp_bringup_mask(&tmp_mask, << 
1867                 cpuhp_bringup_mask(&tmp_mask, << 
1868                 /* Account for the online CPU << 
1869                 ncpus -= num_online_cpus();   << 
1870                 if (!ncpus)                   << 
1871                         return true;          << 
1872                 /* Create the mask for second << 
1873                 cpumask_andnot(&tmp_mask, mas << 
1874                 mask = &tmp_mask;             << 
1875         }                                        467         }
1876                                                  468 
1877         /* Bring the not-yet started CPUs up  !! 469         err = _cpu_up(cpu, 0);
1878         cpuhp_bringup_mask(mask, ncpus, CPUHP << 
1879         cpuhp_bringup_mask(mask, ncpus, CPUHP << 
1880         return true;                          << 
1881 }                                             << 
1882 #else                                         << 
1883 static inline bool cpuhp_bringup_cpus_paralle << 
1884 #endif /* CONFIG_HOTPLUG_PARALLEL */          << 
1885                                                  470 
1886 void __init bringup_nonboot_cpus(unsigned int !! 471 out:
1887 {                                             !! 472         cpu_maps_update_done();
1888         if (!max_cpus)                        !! 473         return err;
1889                 return;                       << 
1890                                               << 
1891         /* Try parallel bringup optimization  << 
1892         if (cpuhp_bringup_cpus_parallel(max_c << 
1893                 return;                       << 
1894                                               << 
1895         /* Full per CPU serialized bringup */ << 
1896         cpuhp_bringup_mask(cpu_present_mask,  << 
1897 }                                                474 }
                                                   >> 475 EXPORT_SYMBOL_GPL(cpu_up);
1898                                                  476 
1899 #ifdef CONFIG_PM_SLEEP_SMP                       477 #ifdef CONFIG_PM_SLEEP_SMP
1900 static cpumask_var_t frozen_cpus;                478 static cpumask_var_t frozen_cpus;
1901                                                  479 
1902 int freeze_secondary_cpus(int primary)        !! 480 int disable_nonboot_cpus(void)
1903 {                                                481 {
1904         int cpu, error = 0;                   !! 482         int cpu, first_cpu, error = 0;
1905                                                  483 
1906         cpu_maps_update_begin();                 484         cpu_maps_update_begin();
1907         if (primary == -1) {                  !! 485         first_cpu = cpumask_first(cpu_online_mask);
1908                 primary = cpumask_first(cpu_o << 
1909                 if (!housekeeping_cpu(primary << 
1910                         primary = housekeepin << 
1911         } else {                              << 
1912                 if (!cpu_online(primary))     << 
1913                         primary = cpumask_fir << 
1914         }                                     << 
1915                                               << 
1916         /*                                       486         /*
1917          * We take down all of the non-boot C    487          * We take down all of the non-boot CPUs in one shot to avoid races
1918          * with the userspace trying to use t    488          * with the userspace trying to use the CPU hotplug at the same time
1919          */                                      489          */
1920         cpumask_clear(frozen_cpus);              490         cpumask_clear(frozen_cpus);
1921                                                  491 
1922         pr_info("Disabling non-boot CPUs ...\ !! 492         printk("Disabling non-boot CPUs ...\n");
1923         for (cpu = nr_cpu_ids - 1; cpu >= 0;  !! 493         for_each_online_cpu(cpu) {
1924                 if (!cpu_online(cpu) || cpu = !! 494                 if (cpu == first_cpu)
1925                         continue;                495                         continue;
1926                                               !! 496                 error = _cpu_down(cpu, 1);
1927                 if (pm_wakeup_pending()) {    << 
1928                         pr_info("Wakeup pendi << 
1929                         error = -EBUSY;       << 
1930                         break;                << 
1931                 }                             << 
1932                                               << 
1933                 trace_suspend_resume(TPS("CPU << 
1934                 error = _cpu_down(cpu, 1, CPU << 
1935                 trace_suspend_resume(TPS("CPU << 
1936                 if (!error)                      497                 if (!error)
1937                         cpumask_set_cpu(cpu,     498                         cpumask_set_cpu(cpu, frozen_cpus);
1938                 else {                           499                 else {
1939                         pr_err("Error taking  !! 500                         printk(KERN_ERR "Error taking CPU%d down: %d\n",
                                                   >> 501                                 cpu, error);
1940                         break;                   502                         break;
1941                 }                                503                 }
1942         }                                        504         }
1943                                                  505 
1944         if (!error)                           !! 506         if (!error) {
1945                 BUG_ON(num_online_cpus() > 1)    507                 BUG_ON(num_online_cpus() > 1);
1946         else                                  !! 508                 /* Make sure the CPUs won't be enabled by someone else */
1947                 pr_err("Non-boot CPUs are not !! 509                 cpu_hotplug_disabled = 1;
1948                                               !! 510         } else {
1949         /*                                    !! 511                 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
1950          * Make sure the CPUs won't be enable !! 512         }
1951          * this even in case of failure as al << 
1952          * supposed to do thaw_secondary_cpus << 
1953          */                                   << 
1954         cpu_hotplug_disabled++;               << 
1955                                               << 
1956         cpu_maps_update_done();                  513         cpu_maps_update_done();
1957         return error;                            514         return error;
1958 }                                                515 }
1959                                                  516 
1960 void __weak arch_thaw_secondary_cpus_begin(vo !! 517 void __weak arch_enable_nonboot_cpus_begin(void)
1961 {                                                518 {
1962 }                                                519 }
1963                                                  520 
1964 void __weak arch_thaw_secondary_cpus_end(void !! 521 void __weak arch_enable_nonboot_cpus_end(void)
1965 {                                                522 {
1966 }                                                523 }
1967                                                  524 
1968 void thaw_secondary_cpus(void)                !! 525 void __ref enable_nonboot_cpus(void)
1969 {                                                526 {
1970         int cpu, error;                          527         int cpu, error;
1971                                                  528 
1972         /* Allow everyone to use the CPU hotp    529         /* Allow everyone to use the CPU hotplug again */
1973         cpu_maps_update_begin();                 530         cpu_maps_update_begin();
1974         __cpu_hotplug_enable();               !! 531         cpu_hotplug_disabled = 0;
1975         if (cpumask_empty(frozen_cpus))          532         if (cpumask_empty(frozen_cpus))
1976                 goto out;                        533                 goto out;
1977                                                  534 
1978         pr_info("Enabling non-boot CPUs ...\n !! 535         printk(KERN_INFO "Enabling non-boot CPUs ...\n");
1979                                                  536 
1980         arch_thaw_secondary_cpus_begin();     !! 537         arch_enable_nonboot_cpus_begin();
1981                                                  538 
1982         for_each_cpu(cpu, frozen_cpus) {         539         for_each_cpu(cpu, frozen_cpus) {
1983                 trace_suspend_resume(TPS("CPU !! 540                 error = _cpu_up(cpu, 1);
1984                 error = _cpu_up(cpu, 1, CPUHP << 
1985                 trace_suspend_resume(TPS("CPU << 
1986                 if (!error) {                    541                 if (!error) {
1987                         pr_info("CPU%d is up\ !! 542                         printk(KERN_INFO "CPU%d is up\n", cpu);
1988                         continue;                543                         continue;
1989                 }                                544                 }
1990                 pr_warn("Error taking CPU%d u !! 545                 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
1991         }                                        546         }
1992                                                  547 
1993         arch_thaw_secondary_cpus_end();       !! 548         arch_enable_nonboot_cpus_end();
1994                                                  549 
1995         cpumask_clear(frozen_cpus);              550         cpumask_clear(frozen_cpus);
1996 out:                                             551 out:
1997         cpu_maps_update_done();                  552         cpu_maps_update_done();
1998 }                                                553 }
1999                                                  554 
2000 static int __init alloc_frozen_cpus(void)        555 static int __init alloc_frozen_cpus(void)
2001 {                                                556 {
2002         if (!alloc_cpumask_var(&frozen_cpus,     557         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
2003                 return -ENOMEM;                  558                 return -ENOMEM;
2004         return 0;                                559         return 0;
2005 }                                                560 }
2006 core_initcall(alloc_frozen_cpus);                561 core_initcall(alloc_frozen_cpus);
2007                                                  562 
2008 /*                                               563 /*
2009  * When callbacks for CPU hotplug notificatio    564  * When callbacks for CPU hotplug notifications are being executed, we must
2010  * ensure that the state of the system with r    565  * ensure that the state of the system with respect to the tasks being frozen
2011  * or not, as reported by the notification, r    566  * or not, as reported by the notification, remains unchanged *throughout the
2012  * duration* of the execution of the callback    567  * duration* of the execution of the callbacks.
2013  * Hence we need to prevent the freezer from     568  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
2014  *                                               569  *
2015  * This synchronization is implemented by mut    570  * This synchronization is implemented by mutually excluding regular CPU
2016  * hotplug and Suspend/Hibernate call paths b    571  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
2017  * Hibernate notifications.                      572  * Hibernate notifications.
2018  */                                              573  */
2019 static int                                       574 static int
2020 cpu_hotplug_pm_callback(struct notifier_block    575 cpu_hotplug_pm_callback(struct notifier_block *nb,
2021                         unsigned long action,    576                         unsigned long action, void *ptr)
2022 {                                                577 {
2023         switch (action) {                        578         switch (action) {
2024                                                  579 
2025         case PM_SUSPEND_PREPARE:                 580         case PM_SUSPEND_PREPARE:
2026         case PM_HIBERNATION_PREPARE:             581         case PM_HIBERNATION_PREPARE:
2027                 cpu_hotplug_disable();           582                 cpu_hotplug_disable();
2028                 break;                           583                 break;
2029                                                  584 
2030         case PM_POST_SUSPEND:                    585         case PM_POST_SUSPEND:
2031         case PM_POST_HIBERNATION:                586         case PM_POST_HIBERNATION:
2032                 cpu_hotplug_enable();            587                 cpu_hotplug_enable();
2033                 break;                           588                 break;
2034                                                  589 
2035         default:                                 590         default:
2036                 return NOTIFY_DONE;              591                 return NOTIFY_DONE;
2037         }                                        592         }
2038                                                  593 
2039         return NOTIFY_OK;                        594         return NOTIFY_OK;
2040 }                                                595 }
2041                                                  596 
2042                                                  597 
2043 static int __init cpu_hotplug_pm_sync_init(vo    598 static int __init cpu_hotplug_pm_sync_init(void)
2044 {                                                599 {
2045         /*                                       600         /*
2046          * cpu_hotplug_pm_callback has higher    601          * cpu_hotplug_pm_callback has higher priority than x86
2047          * bsp_pm_callback which depends on c    602          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2048          * to disable cpu hotplug to avoid cp    603          * to disable cpu hotplug to avoid cpu hotplug race.
2049          */                                      604          */
2050         pm_notifier(cpu_hotplug_pm_callback,     605         pm_notifier(cpu_hotplug_pm_callback, 0);
2051         return 0;                                606         return 0;
2052 }                                                607 }
2053 core_initcall(cpu_hotplug_pm_sync_init);         608 core_initcall(cpu_hotplug_pm_sync_init);
2054                                                  609 
2055 #endif /* CONFIG_PM_SLEEP_SMP */                 610 #endif /* CONFIG_PM_SLEEP_SMP */
2056                                                  611 
2057 int __boot_cpu_id;                            << 
2058                                               << 
2059 #endif /* CONFIG_SMP */                       << 
2060                                               << 
2061 /* Boot processor state steps */              << 
2062 static struct cpuhp_step cpuhp_hp_states[] =  << 
2063         [CPUHP_OFFLINE] = {                   << 
2064                 .name                   = "of << 
2065                 .startup.single         = NUL << 
2066                 .teardown.single        = NUL << 
2067         },                                    << 
2068 #ifdef CONFIG_SMP                             << 
2069         [CPUHP_CREATE_THREADS]= {             << 
2070                 .name                   = "th << 
2071                 .startup.single         = smp << 
2072                 .teardown.single        = NUL << 
2073                 .cant_stop              = tru << 
2074         },                                    << 
2075         [CPUHP_PERF_PREPARE] = {              << 
2076                 .name                   = "pe << 
2077                 .startup.single         = per << 
2078                 .teardown.single        = per << 
2079         },                                    << 
2080         [CPUHP_RANDOM_PREPARE] = {            << 
2081                 .name                   = "ra << 
2082                 .startup.single         = ran << 
2083                 .teardown.single        = NUL << 
2084         },                                    << 
2085         [CPUHP_WORKQUEUE_PREP] = {            << 
2086                 .name                   = "wo << 
2087                 .startup.single         = wor << 
2088                 .teardown.single        = NUL << 
2089         },                                    << 
2090         [CPUHP_HRTIMERS_PREPARE] = {          << 
2091                 .name                   = "hr << 
2092                 .startup.single         = hrt << 
2093                 .teardown.single        = NUL << 
2094         },                                    << 
2095         [CPUHP_SMPCFD_PREPARE] = {            << 
2096                 .name                   = "sm << 
2097                 .startup.single         = smp << 
2098                 .teardown.single        = smp << 
2099         },                                    << 
2100         [CPUHP_RELAY_PREPARE] = {             << 
2101                 .name                   = "re << 
2102                 .startup.single         = rel << 
2103                 .teardown.single        = NUL << 
2104         },                                    << 
2105         [CPUHP_RCUTREE_PREP] = {              << 
2106                 .name                   = "RC << 
2107                 .startup.single         = rcu << 
2108                 .teardown.single        = rcu << 
2109         },                                    << 
2110         /*                                    << 
2111          * On the tear-down path, timers_dead << 
2112          * before blk_mq_queue_reinit_notify( << 
2113          * otherwise a RCU stall occurs.      << 
2114          */                                   << 
2115         [CPUHP_TIMERS_PREPARE] = {            << 
2116                 .name                   = "ti << 
2117                 .startup.single         = tim << 
2118                 .teardown.single        = tim << 
2119         },                                    << 
2120                                               << 
2121 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP           << 
2122         /*                                    << 
2123          * Kicks the AP alive. AP will wait i << 
2124          * the next step will release it.     << 
2125          */                                   << 
2126         [CPUHP_BP_KICK_AP] = {                << 
2127                 .name                   = "cp << 
2128                 .startup.single         = cpu << 
2129         },                                    << 
2130                                               << 
2131         /*                                    << 
2132          * Waits for the AP to reach cpuhp_ap << 
2133          * releases it for the complete bring << 
2134          */                                   << 
2135         [CPUHP_BRINGUP_CPU] = {               << 
2136                 .name                   = "cp << 
2137                 .startup.single         = cpu << 
2138                 .teardown.single        = fin << 
2139                 .cant_stop              = tru << 
2140         },                                    << 
2141 #else                                         << 
2142         /*                                    << 
2143          * All-in-one CPU bringup state which << 
2144          */                                   << 
2145         [CPUHP_BRINGUP_CPU] = {               << 
2146                 .name                   = "cp << 
2147                 .startup.single         = bri << 
2148                 .teardown.single        = fin << 
2149                 .cant_stop              = tru << 
2150         },                                    << 
2151 #endif                                        << 
2152         /* Final state before CPU kills itsel << 
2153         [CPUHP_AP_IDLE_DEAD] = {              << 
2154                 .name                   = "id << 
2155         },                                    << 
2156         /*                                    << 
2157          * Last state before CPU enters the i << 
2158          * for synchronization.               << 
2159          */                                   << 
2160         [CPUHP_AP_OFFLINE] = {                << 
2161                 .name                   = "ap << 
2162                 .cant_stop              = tru << 
2163         },                                    << 
2164         /* First state is scheduler control.  << 
2165         [CPUHP_AP_SCHED_STARTING] = {         << 
2166                 .name                   = "sc << 
2167                 .startup.single         = sch << 
2168                 .teardown.single        = sch << 
2169         },                                    << 
2170         [CPUHP_AP_RCUTREE_DYING] = {          << 
2171                 .name                   = "RC << 
2172                 .startup.single         = NUL << 
2173                 .teardown.single        = rcu << 
2174         },                                    << 
2175         [CPUHP_AP_SMPCFD_DYING] = {           << 
2176                 .name                   = "sm << 
2177                 .startup.single         = NUL << 
2178                 .teardown.single        = smp << 
2179         },                                    << 
2180         [CPUHP_AP_HRTIMERS_DYING] = {         << 
2181                 .name                   = "hr << 
2182                 .startup.single         = NUL << 
2183                 .teardown.single        = hrt << 
2184         },                                    << 
2185         [CPUHP_AP_TICK_DYING] = {             << 
2186                 .name                   = "ti << 
2187                 .startup.single         = NUL << 
2188                 .teardown.single        = tic << 
2189         },                                    << 
2190         /* Entry state on starting. Interrupt << 
2191          * state for synchronsization */      << 
2192         [CPUHP_AP_ONLINE] = {                 << 
2193                 .name                   = "ap << 
2194         },                                    << 
2195         /*                                    << 
2196          * Handled on control processor until << 
2197          * this itself.                       << 
2198          */                                   << 
2199         [CPUHP_TEARDOWN_CPU] = {              << 
2200                 .name                   = "cp << 
2201                 .startup.single         = NUL << 
2202                 .teardown.single        = tak << 
2203                 .cant_stop              = tru << 
2204         },                                    << 
2205                                               << 
2206         [CPUHP_AP_SCHED_WAIT_EMPTY] = {       << 
2207                 .name                   = "sc << 
2208                 .startup.single         = NUL << 
2209                 .teardown.single        = sch << 
2210         },                                    << 
2211                                               << 
2212         /* Handle smpboot threads park/unpark << 
2213         [CPUHP_AP_SMPBOOT_THREADS] = {        << 
2214                 .name                   = "sm << 
2215                 .startup.single         = smp << 
2216                 .teardown.single        = smp << 
2217         },                                    << 
2218         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {    << 
2219                 .name                   = "ir << 
2220                 .startup.single         = irq << 
2221                 .teardown.single        = NUL << 
2222         },                                    << 
2223         [CPUHP_AP_PERF_ONLINE] = {            << 
2224                 .name                   = "pe << 
2225                 .startup.single         = per << 
2226                 .teardown.single        = per << 
2227         },                                    << 
2228         [CPUHP_AP_WATCHDOG_ONLINE] = {        << 
2229                 .name                   = "lo << 
2230                 .startup.single         = loc << 
2231                 .teardown.single        = loc << 
2232         },                                    << 
2233         [CPUHP_AP_WORKQUEUE_ONLINE] = {       << 
2234                 .name                   = "wo << 
2235                 .startup.single         = wor << 
2236                 .teardown.single        = wor << 
2237         },                                    << 
2238         [CPUHP_AP_RANDOM_ONLINE] = {          << 
2239                 .name                   = "ra << 
2240                 .startup.single         = ran << 
2241                 .teardown.single        = NUL << 
2242         },                                    << 
2243         [CPUHP_AP_RCUTREE_ONLINE] = {         << 
2244                 .name                   = "RC << 
2245                 .startup.single         = rcu << 
2246                 .teardown.single        = rcu << 
2247         },                                    << 
2248 #endif                                        << 
2249         /*                                    << 
2250          * The dynamically registered state s << 
2251          */                                   << 
2252                                               << 
2253 #ifdef CONFIG_SMP                             << 
2254         /* Last state is scheduler control se << 
2255         [CPUHP_AP_ACTIVE] = {                 << 
2256                 .name                   = "sc << 
2257                 .startup.single         = sch << 
2258                 .teardown.single        = sch << 
2259         },                                    << 
2260 #endif                                        << 
2261                                               << 
2262         /* CPU is fully up and running. */    << 
2263         [CPUHP_ONLINE] = {                    << 
2264                 .name                   = "on << 
2265                 .startup.single         = NUL << 
2266                 .teardown.single        = NUL << 
2267         },                                    << 
2268 };                                            << 
2269                                               << 
2270 /* Sanity check for callbacks */              << 
2271 static int cpuhp_cb_check(enum cpuhp_state st << 
2272 {                                             << 
2273         if (state <= CPUHP_OFFLINE || state > << 
2274                 return -EINVAL;               << 
2275         return 0;                             << 
2276 }                                             << 
2277                                               << 
2278 /*                                            << 
2279  * Returns a free for dynamic slot assignment << 
2280  * are protected by the cpuhp_slot_states mut << 
2281  * by having no name assigned.                << 
2282  */                                           << 
2283 static int cpuhp_reserve_state(enum cpuhp_sta << 
2284 {                                             << 
2285         enum cpuhp_state i, end;              << 
2286         struct cpuhp_step *step;              << 
2287                                               << 
2288         switch (state) {                      << 
2289         case CPUHP_AP_ONLINE_DYN:             << 
2290                 step = cpuhp_hp_states + CPUH << 
2291                 end = CPUHP_AP_ONLINE_DYN_END << 
2292                 break;                        << 
2293         case CPUHP_BP_PREPARE_DYN:            << 
2294                 step = cpuhp_hp_states + CPUH << 
2295                 end = CPUHP_BP_PREPARE_DYN_EN << 
2296                 break;                        << 
2297         default:                              << 
2298                 return -EINVAL;               << 
2299         }                                     << 
2300                                               << 
2301         for (i = state; i <= end; i++, step++ << 
2302                 if (!step->name)              << 
2303                         return i;             << 
2304         }                                     << 
2305         WARN(1, "No more dynamic states avail << 
2306         return -ENOSPC;                       << 
2307 }                                             << 
2308                                               << 
2309 static int cpuhp_store_callbacks(enum cpuhp_s << 
2310                                  int (*startu << 
2311                                  int (*teardo << 
2312                                  bool multi_i << 
2313 {                                             << 
2314         /* (Un)Install the callbacks for furt << 
2315         struct cpuhp_step *sp;                << 
2316         int ret = 0;                          << 
2317                                               << 
2318         /*                                    << 
2319          * If name is NULL, then the state ge << 
2320          *                                    << 
2321          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_P << 
2322          * the first allocation from these dy << 
2323          * would trigger a new allocation and << 
2324          * empty) state, leaving the callback << 
2325          * dangling, which causes wreckage on << 
2326          */                                   << 
2327         if (name && (state == CPUHP_AP_ONLINE << 
2328                      state == CPUHP_BP_PREPAR << 
2329                 ret = cpuhp_reserve_state(sta << 
2330                 if (ret < 0)                  << 
2331                         return ret;           << 
2332                 state = ret;                  << 
2333         }                                     << 
2334         sp = cpuhp_get_step(state);           << 
2335         if (name && sp->name)                 << 
2336                 return -EBUSY;                << 
2337                                               << 
2338         sp->startup.single = startup;         << 
2339         sp->teardown.single = teardown;       << 
2340         sp->name = name;                      << 
2341         sp->multi_instance = multi_instance;  << 
2342         INIT_HLIST_HEAD(&sp->list);           << 
2343         return ret;                           << 
2344 }                                             << 
2345                                               << 
2346 static void *cpuhp_get_teardown_cb(enum cpuhp << 
2347 {                                             << 
2348         return cpuhp_get_step(state)->teardow << 
2349 }                                             << 
2350                                               << 
2351 /*                                            << 
2352  * Call the startup/teardown function for a s << 
2353  * on the current CPU.                        << 
2354  */                                           << 
2355 static int cpuhp_issue_call(int cpu, enum cpu << 
2356                             struct hlist_node << 
2357 {                                             << 
2358         struct cpuhp_step *sp = cpuhp_get_ste << 
2359         int ret;                              << 
2360                                               << 
2361         /*                                    << 
2362          * If there's nothing to do, we done. << 
2363          * Relies on the union for multi_inst << 
2364          */                                   << 
2365         if (cpuhp_step_empty(bringup, sp))    << 
2366                 return 0;                     << 
2367         /*                                    << 
2368          * The non AP bound callbacks can fai << 
2369          * e.g. module removal we crash for n << 
2370          */                                   << 
2371 #ifdef CONFIG_SMP                             << 
2372         if (cpuhp_is_ap_state(state))         << 
2373                 ret = cpuhp_invoke_ap_callbac << 
2374         else                                  << 
2375                 ret = cpuhp_invoke_callback(c << 
2376 #else                                         << 
2377         ret = cpuhp_invoke_callback(cpu, stat << 
2378 #endif                                        << 
2379         BUG_ON(ret && !bringup);              << 
2380         return ret;                           << 
2381 }                                             << 
2382                                               << 
2383 /*                                            << 
2384  * Called from __cpuhp_setup_state on a recov << 
2385  *                                            << 
2386  * Note: The teardown callbacks for rollback  << 
2387  */                                           << 
2388 static void cpuhp_rollback_install(int failed << 
2389                                    struct hli << 
2390 {                                             << 
2391         int cpu;                              << 
2392                                               << 
2393         /* Roll back the already executed ste << 
2394         for_each_present_cpu(cpu) {           << 
2395                 struct cpuhp_cpu_state *st =  << 
2396                 int cpustate = st->state;     << 
2397                                               << 
2398                 if (cpu >= failedcpu)         << 
2399                         break;                << 
2400                                               << 
2401                 /* Did we invoke the startup  << 
2402                 if (cpustate >= state)        << 
2403                         cpuhp_issue_call(cpu, << 
2404         }                                     << 
2405 }                                             << 
2406                                               << 
2407 int __cpuhp_state_add_instance_cpuslocked(enu << 
2408                                           str << 
2409                                           boo << 
2410 {                                             << 
2411         struct cpuhp_step *sp;                << 
2412         int cpu;                              << 
2413         int ret;                              << 
2414                                               << 
2415         lockdep_assert_cpus_held();           << 
2416                                               << 
2417         sp = cpuhp_get_step(state);           << 
2418         if (sp->multi_instance == false)      << 
2419                 return -EINVAL;               << 
2420                                               << 
2421         mutex_lock(&cpuhp_state_mutex);       << 
2422                                               << 
2423         if (!invoke || !sp->startup.multi)    << 
2424                 goto add_node;                << 
2425                                               << 
2426         /*                                    << 
2427          * Try to call the startup callback f << 
2428          * depending on the hotplug state of  << 
2429          */                                   << 
2430         for_each_present_cpu(cpu) {           << 
2431                 struct cpuhp_cpu_state *st =  << 
2432                 int cpustate = st->state;     << 
2433                                               << 
2434                 if (cpustate < state)         << 
2435                         continue;             << 
2436                                               << 
2437                 ret = cpuhp_issue_call(cpu, s << 
2438                 if (ret) {                    << 
2439                         if (sp->teardown.mult << 
2440                                 cpuhp_rollbac << 
2441                         goto unlock;          << 
2442                 }                             << 
2443         }                                     << 
2444 add_node:                                     << 
2445         ret = 0;                              << 
2446         hlist_add_head(node, &sp->list);      << 
2447 unlock:                                       << 
2448         mutex_unlock(&cpuhp_state_mutex);     << 
2449         return ret;                           << 
2450 }                                             << 
2451                                               << 
2452 int __cpuhp_state_add_instance(enum cpuhp_sta << 
2453                                bool invoke)   << 
2454 {                                             << 
2455         int ret;                              << 
2456                                               << 
2457         cpus_read_lock();                     << 
2458         ret = __cpuhp_state_add_instance_cpus << 
2459         cpus_read_unlock();                   << 
2460         return ret;                           << 
2461 }                                             << 
2462 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance) << 
2463                                               << 
2464 /**                                           << 
2465  * __cpuhp_setup_state_cpuslocked - Setup the << 
2466  * @state:              The state to setup    << 
2467  * @name:               Name of the step      << 
2468  * @invoke:             If true, the startup  << 
2469  *                      cpu state >= @state   << 
2470  * @startup:            startup callback func << 
2471  * @teardown:           teardown callback fun << 
2472  * @multi_instance:     State is set up for m << 
2473  *                      added afterwards.     << 
2474  *                                            << 
2475  * The caller needs to hold cpus read locked  << 
2476  * Return:                                    << 
2477  *   On success:                              << 
2478  *      Positive state number if @state is CP << 
2479  *      0 for all other states                << 
2480  *   On failure: proper (negative) error code << 
2481  */                                           << 
2482 int __cpuhp_setup_state_cpuslocked(enum cpuhp << 
2483                                    const char << 
2484                                    int (*star << 
2485                                    int (*tear << 
2486                                    bool multi << 
2487 {                                             << 
2488         int cpu, ret = 0;                     << 
2489         bool dynstate;                        << 
2490                                               << 
2491         lockdep_assert_cpus_held();           << 
2492                                               << 
2493         if (cpuhp_cb_check(state) || !name)   << 
2494                 return -EINVAL;               << 
2495                                               << 
2496         mutex_lock(&cpuhp_state_mutex);       << 
2497                                               << 
2498         ret = cpuhp_store_callbacks(state, na << 
2499                                     multi_ins << 
2500                                               << 
2501         dynstate = state == CPUHP_AP_ONLINE_D << 
2502         if (ret > 0 && dynstate) {            << 
2503                 state = ret;                  << 
2504                 ret = 0;                      << 
2505         }                                     << 
2506                                               << 
2507         if (ret || !invoke || !startup)       << 
2508                 goto out;                     << 
2509                                               << 
2510         /*                                    << 
2511          * Try to call the startup callback f << 
2512          * depending on the hotplug state of  << 
2513          */                                   << 
2514         for_each_present_cpu(cpu) {           << 
2515                 struct cpuhp_cpu_state *st =  << 
2516                 int cpustate = st->state;     << 
2517                                               << 
2518                 if (cpustate < state)         << 
2519                         continue;             << 
2520                                               << 
2521                 ret = cpuhp_issue_call(cpu, s << 
2522                 if (ret) {                    << 
2523                         if (teardown)         << 
2524                                 cpuhp_rollbac << 
2525                         cpuhp_store_callbacks << 
2526                         goto out;             << 
2527                 }                             << 
2528         }                                     << 
2529 out:                                          << 
2530         mutex_unlock(&cpuhp_state_mutex);     << 
2531         /*                                    << 
2532          * If the requested state is CPUHP_AP << 
2533          * return the dynamically allocated s << 
2534          */                                   << 
2535         if (!ret && dynstate)                 << 
2536                 return state;                 << 
2537         return ret;                           << 
2538 }                                             << 
2539 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked) << 
2540                                               << 
2541 int __cpuhp_setup_state(enum cpuhp_state stat << 
2542                         const char *name, boo << 
2543                         int (*startup)(unsign << 
2544                         int (*teardown)(unsig << 
2545                         bool multi_instance)  << 
2546 {                                             << 
2547         int ret;                              << 
2548                                               << 
2549         cpus_read_lock();                     << 
2550         ret = __cpuhp_setup_state_cpuslocked( << 
2551                                               << 
2552         cpus_read_unlock();                   << 
2553         return ret;                           << 
2554 }                                             << 
2555 EXPORT_SYMBOL(__cpuhp_setup_state);           << 
2556                                               << 
2557 int __cpuhp_state_remove_instance(enum cpuhp_ << 
2558                                   struct hlis << 
2559 {                                             << 
2560         struct cpuhp_step *sp = cpuhp_get_ste << 
2561         int cpu;                              << 
2562                                               << 
2563         BUG_ON(cpuhp_cb_check(state));        << 
2564                                               << 
2565         if (!sp->multi_instance)              << 
2566                 return -EINVAL;               << 
2567                                               << 
2568         cpus_read_lock();                     << 
2569         mutex_lock(&cpuhp_state_mutex);       << 
2570                                               << 
2571         if (!invoke || !cpuhp_get_teardown_cb << 
2572                 goto remove;                  << 
2573         /*                                    << 
2574          * Call the teardown callback for eac << 
2575          * on the hotplug state of the cpu. T << 
2576          * allowed to fail currently!         << 
2577          */                                   << 
2578         for_each_present_cpu(cpu) {           << 
2579                 struct cpuhp_cpu_state *st =  << 
2580                 int cpustate = st->state;     << 
2581                                               << 
2582                 if (cpustate >= state)        << 
2583                         cpuhp_issue_call(cpu, << 
2584         }                                     << 
2585                                               << 
2586 remove:                                       << 
2587         hlist_del(node);                      << 
2588         mutex_unlock(&cpuhp_state_mutex);     << 
2589         cpus_read_unlock();                   << 
2590                                               << 
2591         return 0;                             << 
2592 }                                             << 
2593 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instan << 
2594                                               << 
2595 /**                                              612 /**
2596  * __cpuhp_remove_state_cpuslocked - Remove t !! 613  * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
2597  * @state:      The state to remove           !! 614  * @cpu: cpu that just started
2598  * @invoke:     If true, the teardown functio << 
2599  *              cpu state >= @state           << 
2600  *                                               615  *
2601  * The caller needs to hold cpus read locked  !! 616  * This function calls the cpu_chain notifiers with CPU_STARTING.
2602  * The teardown callback is currently not all !! 617  * It must be called by the arch code on the new cpu, before the new cpu
2603  * about module removal!                      !! 618  * enables interrupts and before the "boot" cpu returns from __cpu_up().
2604  */                                              619  */
2605 void __cpuhp_remove_state_cpuslocked(enum cpu !! 620 void __cpuinit notify_cpu_starting(unsigned int cpu)
2606 {                                             << 
2607         struct cpuhp_step *sp = cpuhp_get_ste << 
2608         int cpu;                              << 
2609                                               << 
2610         BUG_ON(cpuhp_cb_check(state));        << 
2611                                               << 
2612         lockdep_assert_cpus_held();           << 
2613                                               << 
2614         mutex_lock(&cpuhp_state_mutex);       << 
2615         if (sp->multi_instance) {             << 
2616                 WARN(!hlist_empty(&sp->list), << 
2617                      "Error: Removing state % << 
2618                      state);                  << 
2619                 goto remove;                  << 
2620         }                                     << 
2621                                               << 
2622         if (!invoke || !cpuhp_get_teardown_cb << 
2623                 goto remove;                  << 
2624                                               << 
2625         /*                                    << 
2626          * Call the teardown callback for eac << 
2627          * on the hotplug state of the cpu. T << 
2628          * allowed to fail currently!         << 
2629          */                                   << 
2630         for_each_present_cpu(cpu) {           << 
2631                 struct cpuhp_cpu_state *st =  << 
2632                 int cpustate = st->state;     << 
2633                                               << 
2634                 if (cpustate >= state)        << 
2635                         cpuhp_issue_call(cpu, << 
2636         }                                     << 
2637 remove:                                       << 
2638         cpuhp_store_callbacks(state, NULL, NU << 
2639         mutex_unlock(&cpuhp_state_mutex);     << 
2640 }                                             << 
2641 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked << 
2642                                               << 
2643 void __cpuhp_remove_state(enum cpuhp_state st << 
2644 {                                             << 
2645         cpus_read_lock();                     << 
2646         __cpuhp_remove_state_cpuslocked(state << 
2647         cpus_read_unlock();                   << 
2648 }                                             << 
2649 EXPORT_SYMBOL(__cpuhp_remove_state);          << 
2650                                               << 
2651 #ifdef CONFIG_HOTPLUG_SMT                     << 
2652 static void cpuhp_offline_cpu_device(unsigned << 
2653 {                                             << 
2654         struct device *dev = get_cpu_device(c << 
2655                                               << 
2656         dev->offline = true;                  << 
2657         /* Tell user space about the state ch << 
2658         kobject_uevent(&dev->kobj, KOBJ_OFFLI << 
2659 }                                             << 
2660                                               << 
2661 static void cpuhp_online_cpu_device(unsigned  << 
2662 {                                             << 
2663         struct device *dev = get_cpu_device(c << 
2664                                               << 
2665         dev->offline = false;                 << 
2666         /* Tell user space about the state ch << 
2667         kobject_uevent(&dev->kobj, KOBJ_ONLIN << 
2668 }                                             << 
2669                                               << 
2670 int cpuhp_smt_disable(enum cpuhp_smt_control  << 
2671 {                                             << 
2672         int cpu, ret = 0;                     << 
2673                                               << 
2674         cpu_maps_update_begin();              << 
2675         for_each_online_cpu(cpu) {            << 
2676                 if (topology_is_primary_threa << 
2677                         continue;             << 
2678                 /*                            << 
2679                  * Disable can be called with << 
2680                  * from a higher to lower num << 
2681                  */                           << 
2682                 if (ctrlval == CPU_SMT_ENABLE << 
2683                         continue;             << 
2684                 ret = cpu_down_maps_locked(cp << 
2685                 if (ret)                      << 
2686                         break;                << 
2687                 /*                            << 
2688                  * As this needs to hold the  << 
2689                  * to call device_offline() b << 
2690                  * cpu_down() which takes cpu << 
2691                  * needs to be held as this m << 
2692                  * abusers of the hotplug mac << 
2693                  *                            << 
2694                  * So nothing would update de << 
2695                  * leave the sysfs entry stal << 
2696                  * smt control has been chang << 
2697                  * called under the sysfs hot << 
2698                  * serialized against the reg << 
2699                  */                           << 
2700                 cpuhp_offline_cpu_device(cpu) << 
2701         }                                     << 
2702         if (!ret)                             << 
2703                 cpu_smt_control = ctrlval;    << 
2704         cpu_maps_update_done();               << 
2705         return ret;                           << 
2706 }                                             << 
2707                                               << 
2708 /* Check if the core a CPU belongs to is onli << 
2709 #if !defined(topology_is_core_online)         << 
2710 static inline bool topology_is_core_online(un << 
2711 {                                             << 
2712         return true;                          << 
2713 }                                             << 
2714 #endif                                        << 
2715                                               << 
2716 int cpuhp_smt_enable(void)                    << 
2717 {                                             << 
2718         int cpu, ret = 0;                     << 
2719                                               << 
2720         cpu_maps_update_begin();              << 
2721         cpu_smt_control = CPU_SMT_ENABLED;    << 
2722         for_each_present_cpu(cpu) {           << 
2723                 /* Skip online CPUs and CPUs  << 
2724                 if (cpu_online(cpu) || !node_ << 
2725                         continue;             << 
2726                 if (!cpu_smt_thread_allowed(c << 
2727                         continue;             << 
2728                 ret = _cpu_up(cpu, 0, CPUHP_O << 
2729                 if (ret)                      << 
2730                         break;                << 
2731                 /* See comment in cpuhp_smt_d << 
2732                 cpuhp_online_cpu_device(cpu); << 
2733         }                                     << 
2734         cpu_maps_update_done();               << 
2735         return ret;                           << 
2736 }                                             << 
2737 #endif                                        << 
2738                                               << 
2739 #if defined(CONFIG_SYSFS) && defined(CONFIG_H << 
2740 static ssize_t state_show(struct device *dev, << 
2741                           struct device_attri << 
2742 {                                             << 
2743         struct cpuhp_cpu_state *st = per_cpu_ << 
2744                                               << 
2745         return sprintf(buf, "%d\n", st->state << 
2746 }                                             << 
2747 static DEVICE_ATTR_RO(state);                 << 
2748                                               << 
2749 static ssize_t target_store(struct device *de << 
2750                             const char *buf,  << 
2751 {                                             << 
2752         struct cpuhp_cpu_state *st = per_cpu_ << 
2753         struct cpuhp_step *sp;                << 
2754         int target, ret;                      << 
2755                                               << 
2756         ret = kstrtoint(buf, 10, &target);    << 
2757         if (ret)                              << 
2758                 return ret;                   << 
2759                                               << 
2760 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL       << 
2761         if (target < CPUHP_OFFLINE || target  << 
2762                 return -EINVAL;               << 
2763 #else                                         << 
2764         if (target != CPUHP_OFFLINE && target << 
2765                 return -EINVAL;               << 
2766 #endif                                        << 
2767                                               << 
2768         ret = lock_device_hotplug_sysfs();    << 
2769         if (ret)                              << 
2770                 return ret;                   << 
2771                                               << 
2772         mutex_lock(&cpuhp_state_mutex);       << 
2773         sp = cpuhp_get_step(target);          << 
2774         ret = !sp->name || sp->cant_stop ? -E << 
2775         mutex_unlock(&cpuhp_state_mutex);     << 
2776         if (ret)                              << 
2777                 goto out;                     << 
2778                                               << 
2779         if (st->state < target)               << 
2780                 ret = cpu_up(dev->id, target) << 
2781         else if (st->state > target)          << 
2782                 ret = cpu_down(dev->id, targe << 
2783         else if (WARN_ON(st->target != target << 
2784                 st->target = target;          << 
2785 out:                                          << 
2786         unlock_device_hotplug();              << 
2787         return ret ? ret : count;             << 
2788 }                                             << 
2789                                               << 
2790 static ssize_t target_show(struct device *dev << 
2791                            struct device_attr << 
2792 {                                             << 
2793         struct cpuhp_cpu_state *st = per_cpu_ << 
2794                                               << 
2795         return sprintf(buf, "%d\n", st->targe << 
2796 }                                             << 
2797 static DEVICE_ATTR_RW(target);                << 
2798                                               << 
2799 static ssize_t fail_store(struct device *dev, << 
2800                           const char *buf, si << 
2801 {                                             << 
2802         struct cpuhp_cpu_state *st = per_cpu_ << 
2803         struct cpuhp_step *sp;                << 
2804         int fail, ret;                        << 
2805                                               << 
2806         ret = kstrtoint(buf, 10, &fail);      << 
2807         if (ret)                              << 
2808                 return ret;                   << 
2809                                               << 
2810         if (fail == CPUHP_INVALID) {          << 
2811                 st->fail = fail;              << 
2812                 return count;                 << 
2813         }                                     << 
2814                                               << 
2815         if (fail < CPUHP_OFFLINE || fail > CP << 
2816                 return -EINVAL;               << 
2817                                               << 
2818         /*                                    << 
2819          * Cannot fail STARTING/DYING callbac << 
2820          */                                   << 
2821         if (cpuhp_is_atomic_state(fail))      << 
2822                 return -EINVAL;               << 
2823                                               << 
2824         /*                                    << 
2825          * DEAD callbacks cannot fail...      << 
2826          * ... neither can CPUHP_BRINGUP_CPU  << 
2827          * triggering STARTING callbacks, a f << 
2828          * hinder rollback.                   << 
2829          */                                   << 
2830         if (fail <= CPUHP_BRINGUP_CPU && st-> << 
2831                 return -EINVAL;               << 
2832                                               << 
2833         /*                                    << 
2834          * Cannot fail anything that doesn't  << 
2835          */                                   << 
2836         mutex_lock(&cpuhp_state_mutex);       << 
2837         sp = cpuhp_get_step(fail);            << 
2838         if (!sp->startup.single && !sp->teard << 
2839                 ret = -EINVAL;                << 
2840         mutex_unlock(&cpuhp_state_mutex);     << 
2841         if (ret)                              << 
2842                 return ret;                   << 
2843                                               << 
2844         st->fail = fail;                      << 
2845                                               << 
2846         return count;                         << 
2847 }                                             << 
2848                                               << 
2849 static ssize_t fail_show(struct device *dev,  << 
2850                          struct device_attrib << 
2851 {                                             << 
2852         struct cpuhp_cpu_state *st = per_cpu_ << 
2853                                               << 
2854         return sprintf(buf, "%d\n", st->fail) << 
2855 }                                             << 
2856                                               << 
2857 static DEVICE_ATTR_RW(fail);                  << 
2858                                               << 
2859 static struct attribute *cpuhp_cpu_attrs[] =  << 
2860         &dev_attr_state.attr,                 << 
2861         &dev_attr_target.attr,                << 
2862         &dev_attr_fail.attr,                  << 
2863         NULL                                  << 
2864 };                                            << 
2865                                               << 
2866 static const struct attribute_group cpuhp_cpu << 
2867         .attrs = cpuhp_cpu_attrs,             << 
2868         .name = "hotplug",                    << 
2869         NULL                                  << 
2870 };                                            << 
2871                                               << 
2872 static ssize_t states_show(struct device *dev << 
2873                                  struct devic << 
2874 {                                             << 
2875         ssize_t cur, res = 0;                 << 
2876         int i;                                << 
2877                                               << 
2878         mutex_lock(&cpuhp_state_mutex);       << 
2879         for (i = CPUHP_OFFLINE; i <= CPUHP_ON << 
2880                 struct cpuhp_step *sp = cpuhp << 
2881                                               << 
2882                 if (sp->name) {               << 
2883                         cur = sprintf(buf, "% << 
2884                         buf += cur;           << 
2885                         res += cur;           << 
2886                 }                             << 
2887         }                                     << 
2888         mutex_unlock(&cpuhp_state_mutex);     << 
2889         return res;                           << 
2890 }                                             << 
2891 static DEVICE_ATTR_RO(states);                << 
2892                                               << 
2893 static struct attribute *cpuhp_cpu_root_attrs << 
2894         &dev_attr_states.attr,                << 
2895         NULL                                  << 
2896 };                                            << 
2897                                               << 
2898 static const struct attribute_group cpuhp_cpu << 
2899         .attrs = cpuhp_cpu_root_attrs,        << 
2900         .name = "hotplug",                    << 
2901         NULL                                  << 
2902 };                                            << 
2903                                               << 
2904 #ifdef CONFIG_HOTPLUG_SMT                     << 
2905                                               << 
2906 static bool cpu_smt_num_threads_valid(unsigne << 
2907 {                                             << 
2908         if (IS_ENABLED(CONFIG_SMT_NUM_THREADS << 
2909                 return threads >= 1 && thread << 
2910         return threads == 1 || threads == cpu << 
2911 }                                             << 
2912                                               << 
2913 static ssize_t                                << 
2914 __store_smt_control(struct device *dev, struc << 
2915                     const char *buf, size_t c << 
2916 {                                             << 
2917         int ctrlval, ret, num_threads, orig_t << 
2918         bool force_off;                       << 
2919                                               << 
2920         if (cpu_smt_control == CPU_SMT_FORCE_ << 
2921                 return -EPERM;                << 
2922                                               << 
2923         if (cpu_smt_control == CPU_SMT_NOT_SU << 
2924                 return -ENODEV;               << 
2925                                               << 
2926         if (sysfs_streq(buf, "on")) {         << 
2927                 ctrlval = CPU_SMT_ENABLED;    << 
2928                 num_threads = cpu_smt_max_thr << 
2929         } else if (sysfs_streq(buf, "off")) { << 
2930                 ctrlval = CPU_SMT_DISABLED;   << 
2931                 num_threads = 1;              << 
2932         } else if (sysfs_streq(buf, "forceoff << 
2933                 ctrlval = CPU_SMT_FORCE_DISAB << 
2934                 num_threads = 1;              << 
2935         } else if (kstrtoint(buf, 10, &num_th << 
2936                 if (num_threads == 1)         << 
2937                         ctrlval = CPU_SMT_DIS << 
2938                 else if (cpu_smt_num_threads_ << 
2939                         ctrlval = CPU_SMT_ENA << 
2940                 else                          << 
2941                         return -EINVAL;       << 
2942         } else {                              << 
2943                 return -EINVAL;               << 
2944         }                                     << 
2945                                               << 
2946         ret = lock_device_hotplug_sysfs();    << 
2947         if (ret)                              << 
2948                 return ret;                   << 
2949                                               << 
2950         orig_threads = cpu_smt_num_threads;   << 
2951         cpu_smt_num_threads = num_threads;    << 
2952                                               << 
2953         force_off = ctrlval != cpu_smt_contro << 
2954                                               << 
2955         if (num_threads > orig_threads)       << 
2956                 ret = cpuhp_smt_enable();     << 
2957         else if (num_threads < orig_threads | << 
2958                 ret = cpuhp_smt_disable(ctrlv << 
2959                                               << 
2960         unlock_device_hotplug();              << 
2961         return ret ? ret : count;             << 
2962 }                                             << 
2963                                               << 
2964 #else /* !CONFIG_HOTPLUG_SMT */               << 
2965 static ssize_t                                << 
2966 __store_smt_control(struct device *dev, struc << 
2967                     const char *buf, size_t c << 
2968 {                                             << 
2969         return -ENODEV;                       << 
2970 }                                             << 
2971 #endif /* CONFIG_HOTPLUG_SMT */               << 
2972                                               << 
2973 static const char *smt_states[] = {           << 
2974         [CPU_SMT_ENABLED]               = "on << 
2975         [CPU_SMT_DISABLED]              = "of << 
2976         [CPU_SMT_FORCE_DISABLED]        = "fo << 
2977         [CPU_SMT_NOT_SUPPORTED]         = "no << 
2978         [CPU_SMT_NOT_IMPLEMENTED]       = "no << 
2979 };                                            << 
2980                                               << 
2981 static ssize_t control_show(struct device *de << 
2982                             struct device_att << 
2983 {                                             << 
2984         const char *state = smt_states[cpu_sm << 
2985                                               << 
2986 #ifdef CONFIG_HOTPLUG_SMT                     << 
2987         /*                                    << 
2988          * If SMT is enabled but not all thre << 
2989          * number of threads. If all threads  << 
2990          * show the state name.               << 
2991          */                                   << 
2992         if (cpu_smt_control == CPU_SMT_ENABLE << 
2993             cpu_smt_num_threads != cpu_smt_ma << 
2994                 return sysfs_emit(buf, "%d\n" << 
2995 #endif                                        << 
2996                                               << 
2997         return sysfs_emit(buf, "%s\n", state) << 
2998 }                                             << 
2999                                               << 
3000 static ssize_t control_store(struct device *d << 
3001                              const char *buf, << 
3002 {                                                621 {
3003         return __store_smt_control(dev, attr, !! 622         unsigned long val = CPU_STARTING;
3004 }                                             << 
3005 static DEVICE_ATTR_RW(control);               << 
3006                                               << 
3007 static ssize_t active_show(struct device *dev << 
3008                            struct device_attr << 
3009 {                                             << 
3010         return sysfs_emit(buf, "%d\n", sched_ << 
3011 }                                             << 
3012 static DEVICE_ATTR_RO(active);                << 
3013                                               << 
3014 static struct attribute *cpuhp_smt_attrs[] =  << 
3015         &dev_attr_control.attr,               << 
3016         &dev_attr_active.attr,                << 
3017         NULL                                  << 
3018 };                                            << 
3019                                               << 
3020 static const struct attribute_group cpuhp_smt << 
3021         .attrs = cpuhp_smt_attrs,             << 
3022         .name = "smt",                        << 
3023         NULL                                  << 
3024 };                                            << 
3025                                               << 
3026 static int __init cpu_smt_sysfs_init(void)    << 
3027 {                                             << 
3028         struct device *dev_root;              << 
3029         int ret = -ENODEV;                    << 
3030                                                  623 
3031         dev_root = bus_get_dev_root(&cpu_subs !! 624 #ifdef CONFIG_PM_SLEEP_SMP
3032         if (dev_root) {                       !! 625         if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
3033                 ret = sysfs_create_group(&dev !! 626                 val = CPU_STARTING_FROZEN;
3034                 put_device(dev_root);         !! 627 #endif /* CONFIG_PM_SLEEP_SMP */
3035         }                                     !! 628         cpu_notify(val, (void *)(long)cpu);
3036         return ret;                           << 
3037 }                                                629 }
3038                                                  630 
3039 static int __init cpuhp_sysfs_init(void)      !! 631 #endif /* CONFIG_SMP */
3040 {                                             << 
3041         struct device *dev_root;              << 
3042         int cpu, ret;                         << 
3043                                               << 
3044         ret = cpu_smt_sysfs_init();           << 
3045         if (ret)                              << 
3046                 return ret;                   << 
3047                                               << 
3048         dev_root = bus_get_dev_root(&cpu_subs << 
3049         if (dev_root) {                       << 
3050                 ret = sysfs_create_group(&dev << 
3051                 put_device(dev_root);         << 
3052                 if (ret)                      << 
3053                         return ret;           << 
3054         }                                     << 
3055                                               << 
3056         for_each_possible_cpu(cpu) {          << 
3057                 struct device *dev = get_cpu_ << 
3058                                               << 
3059                 if (!dev)                     << 
3060                         continue;             << 
3061                 ret = sysfs_create_group(&dev << 
3062                 if (ret)                      << 
3063                         return ret;           << 
3064         }                                     << 
3065         return 0;                             << 
3066 }                                             << 
3067 device_initcall(cpuhp_sysfs_init);            << 
3068 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU  << 
3069                                                  632 
3070 /*                                               633 /*
3071  * cpu_bit_bitmap[] is a special, "compressed    634  * cpu_bit_bitmap[] is a special, "compressed" data structure that
3072  * represents all NR_CPUS bits binary values     635  * represents all NR_CPUS bits binary values of 1<<nr.
3073  *                                               636  *
3074  * It is used by cpumask_of() to get a consta    637  * It is used by cpumask_of() to get a constant address to a CPU
3075  * mask value that has a single bit set only.    638  * mask value that has a single bit set only.
3076  */                                              639  */
3077                                                  640 
3078 /* cpu_bit_bitmap[0] is empty - so we can bac    641 /* cpu_bit_bitmap[0] is empty - so we can back into it */
3079 #define MASK_DECLARE_1(x)       [x+1][0] = (1    642 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
3080 #define MASK_DECLARE_2(x)       MASK_DECLARE_    643 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3081 #define MASK_DECLARE_4(x)       MASK_DECLARE_    644 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3082 #define MASK_DECLARE_8(x)       MASK_DECLARE_    645 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3083                                                  646 
3084 const unsigned long cpu_bit_bitmap[BITS_PER_L    647 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3085                                                  648 
3086         MASK_DECLARE_8(0),      MASK_DECLARE_    649         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
3087         MASK_DECLARE_8(16),     MASK_DECLARE_    650         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
3088 #if BITS_PER_LONG > 32                           651 #if BITS_PER_LONG > 32
3089         MASK_DECLARE_8(32),     MASK_DECLARE_    652         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
3090         MASK_DECLARE_8(48),     MASK_DECLARE_    653         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
3091 #endif                                           654 #endif
3092 };                                               655 };
3093 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);               656 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3094                                                  657 
3095 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) =    658 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
3096 EXPORT_SYMBOL(cpu_all_bits);                     659 EXPORT_SYMBOL(cpu_all_bits);
3097                                                  660 
3098 #ifdef CONFIG_INIT_ALL_POSSIBLE                  661 #ifdef CONFIG_INIT_ALL_POSSIBLE
3099 struct cpumask __cpu_possible_mask __ro_after !! 662 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
3100         = {CPU_BITS_ALL};                     !! 663         = CPU_BITS_ALL;
3101 #else                                            664 #else
3102 struct cpumask __cpu_possible_mask __ro_after !! 665 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
3103 #endif                                           666 #endif
3104 EXPORT_SYMBOL(__cpu_possible_mask);           !! 667 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
3105                                               !! 668 EXPORT_SYMBOL(cpu_possible_mask);
3106 struct cpumask __cpu_online_mask __read_mostl << 
3107 EXPORT_SYMBOL(__cpu_online_mask);             << 
3108                                                  669 
3109 struct cpumask __cpu_enabled_mask __read_most !! 670 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
3110 EXPORT_SYMBOL(__cpu_enabled_mask);            !! 671 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
                                                   >> 672 EXPORT_SYMBOL(cpu_online_mask);
3111                                                  673 
3112 struct cpumask __cpu_present_mask __read_most !! 674 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
3113 EXPORT_SYMBOL(__cpu_present_mask);            !! 675 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
                                                   >> 676 EXPORT_SYMBOL(cpu_present_mask);
3114                                                  677 
3115 struct cpumask __cpu_active_mask __read_mostl !! 678 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
3116 EXPORT_SYMBOL(__cpu_active_mask);             !! 679 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
                                                   >> 680 EXPORT_SYMBOL(cpu_active_mask);
3117                                                  681 
3118 struct cpumask __cpu_dying_mask __read_mostly !! 682 void set_cpu_possible(unsigned int cpu, bool possible)
3119 EXPORT_SYMBOL(__cpu_dying_mask);              << 
3120                                               << 
3121 atomic_t __num_online_cpus __read_mostly;     << 
3122 EXPORT_SYMBOL(__num_online_cpus);             << 
3123                                               << 
3124 void init_cpu_present(const struct cpumask *s << 
3125 {                                             << 
3126         cpumask_copy(&__cpu_present_mask, src << 
3127 }                                             << 
3128                                               << 
3129 void init_cpu_possible(const struct cpumask * << 
3130 {                                                683 {
3131         cpumask_copy(&__cpu_possible_mask, sr !! 684         if (possible)
                                                   >> 685                 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
                                                   >> 686         else
                                                   >> 687                 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
3132 }                                                688 }
3133                                                  689 
3134 void init_cpu_online(const struct cpumask *sr !! 690 void set_cpu_present(unsigned int cpu, bool present)
3135 {                                                691 {
3136         cpumask_copy(&__cpu_online_mask, src) !! 692         if (present)
                                                   >> 693                 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
                                                   >> 694         else
                                                   >> 695                 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
3137 }                                                696 }
3138                                                  697 
3139 void set_cpu_online(unsigned int cpu, bool on    698 void set_cpu_online(unsigned int cpu, bool online)
3140 {                                                699 {
3141         /*                                    << 
3142          * atomic_inc/dec() is required to ha << 
3143          * function by the reboot and kexec c << 
3144          * IPI/NMI broadcasts when shutting d << 
3145          * regular CPU hotplug is properly se << 
3146          *                                    << 
3147          * Note, that the fact that __num_onl << 
3148          * does not protect readers which are << 
3149          * concurrent hotplug operations.     << 
3150          */                                   << 
3151         if (online) {                            700         if (online) {
3152                 if (!cpumask_test_and_set_cpu !! 701                 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
3153                         atomic_inc(&__num_onl !! 702                 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
3154         } else {                                 703         } else {
3155                 if (cpumask_test_and_clear_cp !! 704                 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
3156                         atomic_dec(&__num_onl << 
3157         }                                        705         }
3158 }                                                706 }
3159                                                  707 
3160 /*                                            !! 708 void set_cpu_active(unsigned int cpu, bool active)
3161  * Activate the first processor.              << 
3162  */                                           << 
3163 void __init boot_cpu_init(void)               << 
3164 {                                             << 
3165         int cpu = smp_processor_id();         << 
3166                                               << 
3167         /* Mark the boot cpu "present", "onli << 
3168         set_cpu_online(cpu, true);            << 
3169         set_cpu_active(cpu, true);            << 
3170         set_cpu_present(cpu, true);           << 
3171         set_cpu_possible(cpu, true);          << 
3172                                               << 
3173 #ifdef CONFIG_SMP                             << 
3174         __boot_cpu_id = cpu;                  << 
3175 #endif                                        << 
3176 }                                             << 
3177                                               << 
3178 /*                                            << 
3179  * Must be called _AFTER_ setting up the per_ << 
3180  */                                           << 
3181 void __init boot_cpu_hotplug_init(void)       << 
3182 {                                                709 {
3183 #ifdef CONFIG_SMP                             !! 710         if (active)
3184         cpumask_set_cpu(smp_processor_id(), & !! 711                 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
3185         atomic_set(this_cpu_ptr(&cpuhp_state. << 
3186 #endif                                        << 
3187         this_cpu_write(cpuhp_state.state, CPU << 
3188         this_cpu_write(cpuhp_state.target, CP << 
3189 }                                             << 
3190                                               << 
3191 #ifdef CONFIG_CPU_MITIGATIONS                 << 
3192 /*                                            << 
3193  * These are used for a global "mitigations=" << 
3194  * optional CPU mitigations.                  << 
3195  */                                           << 
3196 enum cpu_mitigations {                        << 
3197         CPU_MITIGATIONS_OFF,                  << 
3198         CPU_MITIGATIONS_AUTO,                 << 
3199         CPU_MITIGATIONS_AUTO_NOSMT,           << 
3200 };                                            << 
3201                                               << 
3202 static enum cpu_mitigations cpu_mitigations _ << 
3203                                               << 
3204 static int __init mitigations_parse_cmdline(c << 
3205 {                                             << 
3206         if (!strcmp(arg, "off"))              << 
3207                 cpu_mitigations = CPU_MITIGAT << 
3208         else if (!strcmp(arg, "auto"))        << 
3209                 cpu_mitigations = CPU_MITIGAT << 
3210         else if (!strcmp(arg, "auto,nosmt"))  << 
3211                 cpu_mitigations = CPU_MITIGAT << 
3212         else                                     712         else
3213                 pr_crit("Unsupported mitigati !! 713                 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
3214                         arg);                 << 
3215                                               << 
3216         return 0;                             << 
3217 }                                                714 }
3218                                                  715 
3219 /* mitigations=off */                         !! 716 void init_cpu_present(const struct cpumask *src)
3220 bool cpu_mitigations_off(void)                << 
3221 {                                                717 {
3222         return cpu_mitigations == CPU_MITIGAT !! 718         cpumask_copy(to_cpumask(cpu_present_bits), src);
3223 }                                                719 }
3224 EXPORT_SYMBOL_GPL(cpu_mitigations_off);       << 
3225                                                  720 
3226 /* mitigations=auto,nosmt */                  !! 721 void init_cpu_possible(const struct cpumask *src)
3227 bool cpu_mitigations_auto_nosmt(void)         << 
3228 {                                                722 {
3229         return cpu_mitigations == CPU_MITIGAT !! 723         cpumask_copy(to_cpumask(cpu_possible_bits), src);
3230 }                                                724 }
3231 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt) !! 725 
3232 #else                                         !! 726 void init_cpu_online(const struct cpumask *src)
3233 static int __init mitigations_parse_cmdline(c << 
3234 {                                                727 {
3235         pr_crit("Kernel compiled without miti !! 728         cpumask_copy(to_cpumask(cpu_online_bits), src);
3236         return 0;                             << 
3237 }                                                729 }
3238 #endif                                        << 
3239 early_param("mitigations", mitigations_parse_ << 
3240                                                  730 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php