~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/cpu.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/cpu.c (Version linux-6.12-rc7) and /kernel/cpu.c (Version linux-4.11.12)


  1 /* CPU control.                                     1 /* CPU control.
  2  * (C) 2001, 2002, 2003, 2004 Rusty Russell         2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3  *                                                  3  *
  4  * This code is licenced under the GPL.             4  * This code is licenced under the GPL.
  5  */                                                 5  */
  6 #include <linux/sched/mm.h>                    << 
  7 #include <linux/proc_fs.h>                          6 #include <linux/proc_fs.h>
  8 #include <linux/smp.h>                              7 #include <linux/smp.h>
  9 #include <linux/init.h>                             8 #include <linux/init.h>
 10 #include <linux/notifier.h>                         9 #include <linux/notifier.h>
 11 #include <linux/sched/signal.h>                    10 #include <linux/sched/signal.h>
 12 #include <linux/sched/hotplug.h>                   11 #include <linux/sched/hotplug.h>
 13 #include <linux/sched/isolation.h>             << 
 14 #include <linux/sched/task.h>                      12 #include <linux/sched/task.h>
 15 #include <linux/sched/smt.h>                   << 
 16 #include <linux/unistd.h>                          13 #include <linux/unistd.h>
 17 #include <linux/cpu.h>                             14 #include <linux/cpu.h>
 18 #include <linux/oom.h>                             15 #include <linux/oom.h>
 19 #include <linux/rcupdate.h>                        16 #include <linux/rcupdate.h>
 20 #include <linux/delay.h>                       << 
 21 #include <linux/export.h>                          17 #include <linux/export.h>
 22 #include <linux/bug.h>                             18 #include <linux/bug.h>
 23 #include <linux/kthread.h>                         19 #include <linux/kthread.h>
 24 #include <linux/stop_machine.h>                    20 #include <linux/stop_machine.h>
 25 #include <linux/mutex.h>                           21 #include <linux/mutex.h>
 26 #include <linux/gfp.h>                             22 #include <linux/gfp.h>
 27 #include <linux/suspend.h>                         23 #include <linux/suspend.h>
 28 #include <linux/lockdep.h>                         24 #include <linux/lockdep.h>
 29 #include <linux/tick.h>                            25 #include <linux/tick.h>
 30 #include <linux/irq.h>                             26 #include <linux/irq.h>
 31 #include <linux/nmi.h>                         << 
 32 #include <linux/smpboot.h>                         27 #include <linux/smpboot.h>
 33 #include <linux/relay.h>                           28 #include <linux/relay.h>
 34 #include <linux/slab.h>                            29 #include <linux/slab.h>
 35 #include <linux/scs.h>                         << 
 36 #include <linux/percpu-rwsem.h>                << 
 37 #include <linux/cpuset.h>                      << 
 38 #include <linux/random.h>                      << 
 39 #include <linux/cc_platform.h>                 << 
 40                                                    30 
 41 #include <trace/events/power.h>                    31 #include <trace/events/power.h>
 42 #define CREATE_TRACE_POINTS                        32 #define CREATE_TRACE_POINTS
 43 #include <trace/events/cpuhp.h>                    33 #include <trace/events/cpuhp.h>
 44                                                    34 
 45 #include "smpboot.h"                               35 #include "smpboot.h"
 46                                                    36 
 47 /**                                                37 /**
 48  * struct cpuhp_cpu_state - Per cpu hotplug st !!  38  * cpuhp_cpu_state - Per cpu hotplug state storage
 49  * @state:      The current cpu state              39  * @state:      The current cpu state
 50  * @target:     The target state                   40  * @target:     The target state
 51  * @fail:       Current CPU hotplug callback s << 
 52  * @thread:     Pointer to the hotplug thread      41  * @thread:     Pointer to the hotplug thread
 53  * @should_run: Thread should execute              42  * @should_run: Thread should execute
 54  * @rollback:   Perform a rollback                 43  * @rollback:   Perform a rollback
 55  * @single:     Single callback invocation         44  * @single:     Single callback invocation
 56  * @bringup:    Single callback bringup or tea     45  * @bringup:    Single callback bringup or teardown selector
 57  * @node:       Remote CPU node; for multi-ins << 
 58  *              single entry callback for inst << 
 59  * @last:       For multi-instance rollback, r << 
 60  * @cb_state:   The state for a single callbac     46  * @cb_state:   The state for a single callback (install/uninstall)
 61  * @result:     Result of the operation            47  * @result:     Result of the operation
 62  * @ap_sync_state:      State for AP synchroni !!  48  * @done:       Signal completion to the issuer of the task
 63  * @done_up:    Signal completion to the issue << 
 64  * @done_down:  Signal completion to the issue << 
 65  */                                                49  */
 66 struct cpuhp_cpu_state {                           50 struct cpuhp_cpu_state {
 67         enum cpuhp_state        state;             51         enum cpuhp_state        state;
 68         enum cpuhp_state        target;            52         enum cpuhp_state        target;
 69         enum cpuhp_state        fail;          << 
 70 #ifdef CONFIG_SMP                                  53 #ifdef CONFIG_SMP
 71         struct task_struct      *thread;           54         struct task_struct      *thread;
 72         bool                    should_run;        55         bool                    should_run;
 73         bool                    rollback;          56         bool                    rollback;
 74         bool                    single;            57         bool                    single;
 75         bool                    bringup;           58         bool                    bringup;
 76         struct hlist_node       *node;             59         struct hlist_node       *node;
 77         struct hlist_node       *last;         << 
 78         enum cpuhp_state        cb_state;          60         enum cpuhp_state        cb_state;
 79         int                     result;            61         int                     result;
 80         atomic_t                ap_sync_state; !!  62         struct completion       done;
 81         struct completion       done_up;       << 
 82         struct completion       done_down;     << 
 83 #endif                                             63 #endif
 84 };                                                 64 };
 85                                                    65 
 86 static DEFINE_PER_CPU(struct cpuhp_cpu_state,  !!  66 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
 87         .fail = CPUHP_INVALID,                 << 
 88 };                                             << 
 89                                                << 
 90 #ifdef CONFIG_SMP                              << 
 91 cpumask_t cpus_booted_once_mask;               << 
 92 #endif                                         << 
 93                                                << 
 94 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_ << 
 95 static struct lockdep_map cpuhp_state_up_map = << 
 96         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-u << 
 97 static struct lockdep_map cpuhp_state_down_map << 
 98         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-d << 
 99                                                << 
100                                                << 
101 static inline void cpuhp_lock_acquire(bool bri << 
102 {                                              << 
103         lock_map_acquire(bringup ? &cpuhp_stat << 
104 }                                              << 
105                                                << 
106 static inline void cpuhp_lock_release(bool bri << 
107 {                                              << 
108         lock_map_release(bringup ? &cpuhp_stat << 
109 }                                              << 
110 #else                                          << 
111                                                << 
112 static inline void cpuhp_lock_acquire(bool bri << 
113 static inline void cpuhp_lock_release(bool bri << 
114                                                << 
115 #endif                                         << 
116                                                    67 
117 /**                                                68 /**
118  * struct cpuhp_step - Hotplug state machine s !!  69  * cpuhp_step - Hotplug state machine step
119  * @name:       Name of the step                   70  * @name:       Name of the step
120  * @startup:    Startup function of the step       71  * @startup:    Startup function of the step
121  * @teardown:   Teardown function of the step      72  * @teardown:   Teardown function of the step
                                                   >>  73  * @skip_onerr: Do not invoke the functions on error rollback
                                                   >>  74  *              Will go away once the notifiers are gone
122  * @cant_stop:  Bringup/teardown can't be stop     75  * @cant_stop:  Bringup/teardown can't be stopped at this step
123  * @multi_instance:     State has multiple ins << 
124  */                                                76  */
125 struct cpuhp_step {                                77 struct cpuhp_step {
126         const char              *name;             78         const char              *name;
127         union {                                    79         union {
128                 int             (*single)(unsi     80                 int             (*single)(unsigned int cpu);
129                 int             (*multi)(unsig     81                 int             (*multi)(unsigned int cpu,
130                                          struc     82                                          struct hlist_node *node);
131         } startup;                                 83         } startup;
132         union {                                    84         union {
133                 int             (*single)(unsi     85                 int             (*single)(unsigned int cpu);
134                 int             (*multi)(unsig     86                 int             (*multi)(unsigned int cpu,
135                                          struc     87                                          struct hlist_node *node);
136         } teardown;                                88         } teardown;
137         /* private: */                         << 
138         struct hlist_head       list;              89         struct hlist_head       list;
139         /* public: */                          !!  90         bool                    skip_onerr;
140         bool                    cant_stop;         91         bool                    cant_stop;
141         bool                    multi_instance     92         bool                    multi_instance;
142 };                                                 93 };
143                                                    94 
144 static DEFINE_MUTEX(cpuhp_state_mutex);            95 static DEFINE_MUTEX(cpuhp_state_mutex);
145 static struct cpuhp_step cpuhp_hp_states[];    !!  96 static struct cpuhp_step cpuhp_bp_states[];
                                                   >>  97 static struct cpuhp_step cpuhp_ap_states[];
146                                                    98 
147 static struct cpuhp_step *cpuhp_get_step(enum  !!  99 static bool cpuhp_is_ap_state(enum cpuhp_state state)
148 {                                                 100 {
149         return cpuhp_hp_states + state;        !! 101         /*
                                                   >> 102          * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
                                                   >> 103          * purposes as that state is handled explicitly in cpu_down.
                                                   >> 104          */
                                                   >> 105         return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
150 }                                                 106 }
151                                                   107 
152 static bool cpuhp_step_empty(bool bringup, str !! 108 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
153 {                                                 109 {
154         return bringup ? !step->startup.single !! 110         struct cpuhp_step *sp;
                                                   >> 111 
                                                   >> 112         sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
                                                   >> 113         return sp + state;
155 }                                                 114 }
156                                                   115 
157 /**                                               116 /**
158  * cpuhp_invoke_callback - Invoke the callback !! 117  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
159  * @cpu:        The cpu for which the callback    118  * @cpu:        The cpu for which the callback should be invoked
160  * @state:      The state to do callbacks for  !! 119  * @step:       The step in the state machine
161  * @bringup:    True if the bringup callback s    120  * @bringup:    True if the bringup callback should be invoked
162  * @node:       For multi-instance, do a singl << 
163  * @lastp:      For multi-instance rollback, r << 
164  *                                                121  *
165  * Called from cpu hotplug and from the state     122  * Called from cpu hotplug and from the state register machinery.
166  *                                             << 
167  * Return: %0 on success or a negative errno c << 
168  */                                               123  */
169 static int cpuhp_invoke_callback(unsigned int     124 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
170                                  bool bringup, !! 125                                  bool bringup, struct hlist_node *node)
171                                  struct hlist_ << 
172 {                                                 126 {
173         struct cpuhp_cpu_state *st = per_cpu_p    127         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
174         struct cpuhp_step *step = cpuhp_get_st    128         struct cpuhp_step *step = cpuhp_get_step(state);
175         int (*cbm)(unsigned int cpu, struct hl    129         int (*cbm)(unsigned int cpu, struct hlist_node *node);
176         int (*cb)(unsigned int cpu);              130         int (*cb)(unsigned int cpu);
177         int ret, cnt;                             131         int ret, cnt;
178                                                   132 
179         if (st->fail == state) {               << 
180                 st->fail = CPUHP_INVALID;      << 
181                 return -EAGAIN;                << 
182         }                                      << 
183                                                << 
184         if (cpuhp_step_empty(bringup, step)) { << 
185                 WARN_ON_ONCE(1);               << 
186                 return 0;                      << 
187         }                                      << 
188                                                << 
189         if (!step->multi_instance) {              133         if (!step->multi_instance) {
190                 WARN_ON_ONCE(lastp && *lastp); << 
191                 cb = bringup ? step->startup.s    134                 cb = bringup ? step->startup.single : step->teardown.single;
192                                                !! 135                 if (!cb)
                                                   >> 136                         return 0;
193                 trace_cpuhp_enter(cpu, st->tar    137                 trace_cpuhp_enter(cpu, st->target, state, cb);
194                 ret = cb(cpu);                    138                 ret = cb(cpu);
195                 trace_cpuhp_exit(cpu, st->stat    139                 trace_cpuhp_exit(cpu, st->state, state, ret);
196                 return ret;                       140                 return ret;
197         }                                         141         }
198         cbm = bringup ? step->startup.multi :     142         cbm = bringup ? step->startup.multi : step->teardown.multi;
                                                   >> 143         if (!cbm)
                                                   >> 144                 return 0;
199                                                   145 
200         /* Single invocation for instance add/    146         /* Single invocation for instance add/remove */
201         if (node) {                               147         if (node) {
202                 WARN_ON_ONCE(lastp && *lastp); << 
203                 trace_cpuhp_multi_enter(cpu, s    148                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
204                 ret = cbm(cpu, node);             149                 ret = cbm(cpu, node);
205                 trace_cpuhp_exit(cpu, st->stat    150                 trace_cpuhp_exit(cpu, st->state, state, ret);
206                 return ret;                       151                 return ret;
207         }                                         152         }
208                                                   153 
209         /* State transition. Invoke on all ins    154         /* State transition. Invoke on all instances */
210         cnt = 0;                                  155         cnt = 0;
211         hlist_for_each(node, &step->list) {       156         hlist_for_each(node, &step->list) {
212                 if (lastp && node == *lastp)   << 
213                         break;                 << 
214                                                << 
215                 trace_cpuhp_multi_enter(cpu, s    157                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216                 ret = cbm(cpu, node);             158                 ret = cbm(cpu, node);
217                 trace_cpuhp_exit(cpu, st->stat    159                 trace_cpuhp_exit(cpu, st->state, state, ret);
218                 if (ret) {                     !! 160                 if (ret)
219                         if (!lastp)            !! 161                         goto err;
220                                 goto err;      << 
221                                                << 
222                         *lastp = node;         << 
223                         return ret;            << 
224                 }                              << 
225                 cnt++;                            162                 cnt++;
226         }                                         163         }
227         if (lastp)                             << 
228                 *lastp = NULL;                 << 
229         return 0;                                 164         return 0;
230 err:                                              165 err:
231         /* Rollback the instances if one faile    166         /* Rollback the instances if one failed */
232         cbm = !bringup ? step->startup.multi :    167         cbm = !bringup ? step->startup.multi : step->teardown.multi;
233         if (!cbm)                                 168         if (!cbm)
234                 return ret;                       169                 return ret;
235                                                   170 
236         hlist_for_each(node, &step->list) {       171         hlist_for_each(node, &step->list) {
237                 if (!cnt--)                       172                 if (!cnt--)
238                         break;                    173                         break;
239                                                !! 174                 cbm(cpu, node);
240                 trace_cpuhp_multi_enter(cpu, s << 
241                 ret = cbm(cpu, node);          << 
242                 trace_cpuhp_exit(cpu, st->stat << 
243                 /*                             << 
244                  * Rollback must not fail,     << 
245                  */                            << 
246                 WARN_ON_ONCE(ret);             << 
247         }                                         175         }
248         return ret;                               176         return ret;
249 }                                                 177 }
250                                                   178 
251 #ifdef CONFIG_SMP                                 179 #ifdef CONFIG_SMP
252 static bool cpuhp_is_ap_state(enum cpuhp_state << 
253 {                                              << 
254         /*                                     << 
255          * The extra check for CPUHP_TEARDOWN_ << 
256          * purposes as that state is handled e << 
257          */                                    << 
258         return state > CPUHP_BRINGUP_CPU && st << 
259 }                                              << 
260                                                << 
261 static inline void wait_for_ap_thread(struct c << 
262 {                                              << 
263         struct completion *done = bringup ? &s << 
264         wait_for_completion(done);             << 
265 }                                              << 
266                                                << 
267 static inline void complete_ap_thread(struct c << 
268 {                                              << 
269         struct completion *done = bringup ? &s << 
270         complete(done);                        << 
271 }                                              << 
272                                                << 
273 /*                                             << 
274  * The former STARTING/DYING states, ran with  << 
275  */                                            << 
276 static bool cpuhp_is_atomic_state(enum cpuhp_s << 
277 {                                              << 
278         return CPUHP_AP_IDLE_DEAD <= state &&  << 
279 }                                              << 
280                                                << 
281 /* Synchronization state management */         << 
282 enum cpuhp_sync_state {                        << 
283         SYNC_STATE_DEAD,                       << 
284         SYNC_STATE_KICKED,                     << 
285         SYNC_STATE_SHOULD_DIE,                 << 
286         SYNC_STATE_ALIVE,                      << 
287         SYNC_STATE_SHOULD_ONLINE,              << 
288         SYNC_STATE_ONLINE,                     << 
289 };                                             << 
290                                                << 
291 #ifdef CONFIG_HOTPLUG_CORE_SYNC                << 
292 /**                                            << 
293  * cpuhp_ap_update_sync_state - Update synchro << 
294  * @state:      The synchronization state to s << 
295  *                                             << 
296  * No synchronization point. Just update of th << 
297  * a full barrier so that the AP changes are v << 
298  */                                            << 
299 static inline void cpuhp_ap_update_sync_state( << 
300 {                                              << 
301         atomic_t *st = this_cpu_ptr(&cpuhp_sta << 
302                                                << 
303         (void)atomic_xchg(st, state);          << 
304 }                                              << 
305                                                << 
306 void __weak arch_cpuhp_sync_state_poll(void) { << 
307                                                << 
308 static bool cpuhp_wait_for_sync_state(unsigned << 
309                                       enum cpu << 
310 {                                              << 
311         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
312         ktime_t now, end, start = ktime_get(); << 
313         int sync;                              << 
314                                                << 
315         end = start + 10ULL * NSEC_PER_SEC;    << 
316                                                << 
317         sync = atomic_read(st);                << 
318         while (1) {                            << 
319                 if (sync == state) {           << 
320                         if (!atomic_try_cmpxch << 
321                                 continue;      << 
322                         return true;           << 
323                 }                              << 
324                                                << 
325                 now = ktime_get();             << 
326                 if (now > end) {               << 
327                         /* Timeout. Leave the  << 
328                         return false;          << 
329                 } else if (now - start < NSEC_ << 
330                         /* Poll for one millis << 
331                         arch_cpuhp_sync_state_ << 
332                 } else {                       << 
333                         usleep_range(USEC_PER_ << 
334                 }                              << 
335                 sync = atomic_read(st);        << 
336         }                                      << 
337         return true;                           << 
338 }                                              << 
339 #else  /* CONFIG_HOTPLUG_CORE_SYNC */          << 
340 static inline void cpuhp_ap_update_sync_state( << 
341 #endif /* !CONFIG_HOTPLUG_CORE_SYNC */         << 
342                                                << 
343 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD           << 
344 /**                                            << 
345  * cpuhp_ap_report_dead - Update synchronizati << 
346  *                                             << 
347  * No synchronization point. Just update of th << 
348  */                                            << 
349 void cpuhp_ap_report_dead(void)                << 
350 {                                              << 
351         cpuhp_ap_update_sync_state(SYNC_STATE_ << 
352 }                                              << 
353                                                << 
354 void __weak arch_cpuhp_cleanup_dead_cpu(unsign << 
355                                                << 
356 /*                                             << 
357  * Late CPU shutdown synchronization point. Ca << 
358  * because the AP cannot issue complete() at t << 
359  */                                            << 
360 static void cpuhp_bp_sync_dead(unsigned int cp << 
361 {                                              << 
362         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
363         int sync = atomic_read(st);            << 
364                                                << 
365         do {                                   << 
366                 /* CPU can have reported dead  << 
367                 if (sync == SYNC_STATE_DEAD)   << 
368                         break;                 << 
369         } while (!atomic_try_cmpxchg(st, &sync << 
370                                                << 
371         if (cpuhp_wait_for_sync_state(cpu, SYN << 
372                 /* CPU reached dead state. Inv << 
373                 arch_cpuhp_cleanup_dead_cpu(cp << 
374                 return;                        << 
375         }                                      << 
376                                                << 
377         /* No further action possible. Emit me << 
378         pr_err("CPU%u failed to report dead st << 
379 }                                              << 
380 #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */      << 
381 static inline void cpuhp_bp_sync_dead(unsigned << 
382 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */    << 
383                                                << 
384 #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL           << 
385 /**                                            << 
386  * cpuhp_ap_sync_alive - Synchronize AP with t << 
387  *                                             << 
388  * Updates the AP synchronization state to SYN << 
389  * for the BP to release it.                   << 
390  */                                            << 
391 void cpuhp_ap_sync_alive(void)                 << 
392 {                                              << 
393         atomic_t *st = this_cpu_ptr(&cpuhp_sta << 
394                                                << 
395         cpuhp_ap_update_sync_state(SYNC_STATE_ << 
396                                                << 
397         /* Wait for the control CPU to release << 
398         while (atomic_read(st) != SYNC_STATE_S << 
399                 cpu_relax();                   << 
400 }                                              << 
401                                                << 
402 static bool cpuhp_can_boot_ap(unsigned int cpu << 
403 {                                              << 
404         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
405         int sync = atomic_read(st);            << 
406                                                << 
407 again:                                         << 
408         switch (sync) {                        << 
409         case SYNC_STATE_DEAD:                  << 
410                 /* CPU is properly dead */     << 
411                 break;                         << 
412         case SYNC_STATE_KICKED:                << 
413                 /* CPU did not come up in prev << 
414                 break;                         << 
415         case SYNC_STATE_ALIVE:                 << 
416                 /* CPU is stuck cpuhp_ap_sync_ << 
417                 break;                         << 
418         default:                               << 
419                 /* CPU failed to report online << 
420                 return false;                  << 
421         }                                      << 
422                                                << 
423         /* Prepare for booting */              << 
424         if (!atomic_try_cmpxchg(st, &sync, SYN << 
425                 goto again;                    << 
426                                                << 
427         return true;                           << 
428 }                                              << 
429                                                << 
430 void __weak arch_cpuhp_cleanup_kick_cpu(unsign << 
431                                                << 
432 /*                                             << 
433  * Early CPU bringup synchronization point. Ca << 
434  * because the AP cannot issue complete() so e << 
435  */                                            << 
436 static int cpuhp_bp_sync_alive(unsigned int cp << 
437 {                                              << 
438         int ret = 0;                           << 
439                                                << 
440         if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SY << 
441                 return 0;                      << 
442                                                << 
443         if (!cpuhp_wait_for_sync_state(cpu, SY << 
444                 pr_err("CPU%u failed to report << 
445                 ret = -EIO;                    << 
446         }                                      << 
447                                                << 
448         /* Let the architecture cleanup the ki << 
449         arch_cpuhp_cleanup_kick_cpu(cpu);      << 
450         return ret;                            << 
451 }                                              << 
452 #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */      << 
453 static inline int cpuhp_bp_sync_alive(unsigned << 
454 static inline bool cpuhp_can_boot_ap(unsigned  << 
455 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */    << 
456                                                << 
457 /* Serializes the updates to cpu_online_mask,     180 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
458 static DEFINE_MUTEX(cpu_add_remove_lock);         181 static DEFINE_MUTEX(cpu_add_remove_lock);
459 bool cpuhp_tasks_frozen;                          182 bool cpuhp_tasks_frozen;
460 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);            183 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
461                                                   184 
462 /*                                                185 /*
463  * The following two APIs (cpu_maps_update_beg    186  * The following two APIs (cpu_maps_update_begin/done) must be used when
464  * attempting to serialize the updates to cpu_    187  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
465  */                                               188  */
466 void cpu_maps_update_begin(void)                  189 void cpu_maps_update_begin(void)
467 {                                                 190 {
468         mutex_lock(&cpu_add_remove_lock);         191         mutex_lock(&cpu_add_remove_lock);
469 }                                                 192 }
470                                                   193 
471 void cpu_maps_update_done(void)                   194 void cpu_maps_update_done(void)
472 {                                                 195 {
473         mutex_unlock(&cpu_add_remove_lock);       196         mutex_unlock(&cpu_add_remove_lock);
474 }                                                 197 }
475                                                   198 
476 /*                                             !! 199 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
477  * If set, cpu_up and cpu_down will return -EB << 
478  * Should always be manipulated under cpu_add_    200  * Should always be manipulated under cpu_add_remove_lock
479  */                                               201  */
480 static int cpu_hotplug_disabled;                  202 static int cpu_hotplug_disabled;
481                                                   203 
482 #ifdef CONFIG_HOTPLUG_CPU                         204 #ifdef CONFIG_HOTPLUG_CPU
483                                                   205 
484 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);  !! 206 static struct {
                                                   >> 207         struct task_struct *active_writer;
                                                   >> 208         /* wait queue to wake up the active_writer */
                                                   >> 209         wait_queue_head_t wq;
                                                   >> 210         /* verifies that no writer will get active while readers are active */
                                                   >> 211         struct mutex lock;
                                                   >> 212         /*
                                                   >> 213          * Also blocks the new readers during
                                                   >> 214          * an ongoing cpu hotplug operation.
                                                   >> 215          */
                                                   >> 216         atomic_t refcount;
                                                   >> 217 
                                                   >> 218 #ifdef CONFIG_DEBUG_LOCK_ALLOC
                                                   >> 219         struct lockdep_map dep_map;
                                                   >> 220 #endif
                                                   >> 221 } cpu_hotplug = {
                                                   >> 222         .active_writer = NULL,
                                                   >> 223         .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
                                                   >> 224         .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
                                                   >> 225 #ifdef CONFIG_DEBUG_LOCK_ALLOC
                                                   >> 226         .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
                                                   >> 227 #endif
                                                   >> 228 };
485                                                   229 
486 static bool cpu_hotplug_offline_disabled __ro_ !! 230 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
                                                   >> 231 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
                                                   >> 232 #define cpuhp_lock_acquire_tryread() \
                                                   >> 233                                   lock_map_acquire_tryread(&cpu_hotplug.dep_map)
                                                   >> 234 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
                                                   >> 235 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
487                                                   236 
488 void cpus_read_lock(void)                      << 
489 {                                              << 
490         percpu_down_read(&cpu_hotplug_lock);   << 
491 }                                              << 
492 EXPORT_SYMBOL_GPL(cpus_read_lock);             << 
493                                                   237 
494 int cpus_read_trylock(void)                    !! 238 void get_online_cpus(void)
495 {                                                 239 {
496         return percpu_down_read_trylock(&cpu_h !! 240         might_sleep();
                                                   >> 241         if (cpu_hotplug.active_writer == current)
                                                   >> 242                 return;
                                                   >> 243         cpuhp_lock_acquire_read();
                                                   >> 244         mutex_lock(&cpu_hotplug.lock);
                                                   >> 245         atomic_inc(&cpu_hotplug.refcount);
                                                   >> 246         mutex_unlock(&cpu_hotplug.lock);
497 }                                                 247 }
498 EXPORT_SYMBOL_GPL(cpus_read_trylock);          !! 248 EXPORT_SYMBOL_GPL(get_online_cpus);
499                                                   249 
500 void cpus_read_unlock(void)                    !! 250 void put_online_cpus(void)
501 {                                                 251 {
502         percpu_up_read(&cpu_hotplug_lock);     !! 252         int refcount;
503 }                                              << 
504 EXPORT_SYMBOL_GPL(cpus_read_unlock);           << 
505                                                   253 
506 void cpus_write_lock(void)                     !! 254         if (cpu_hotplug.active_writer == current)
507 {                                              !! 255                 return;
508         percpu_down_write(&cpu_hotplug_lock);  << 
509 }                                              << 
510                                                   256 
511 void cpus_write_unlock(void)                   !! 257         refcount = atomic_dec_return(&cpu_hotplug.refcount);
512 {                                              !! 258         if (WARN_ON(refcount < 0)) /* try to fix things up */
513         percpu_up_write(&cpu_hotplug_lock);    !! 259                 atomic_inc(&cpu_hotplug.refcount);
514 }                                              << 
515                                                   260 
516 void lockdep_assert_cpus_held(void)            !! 261         if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
517 {                                              !! 262                 wake_up(&cpu_hotplug.wq);
518         /*                                     << 
519          * We can't have hotplug operations be << 
520          * and some init codepaths will knowin << 
521          * This is all valid, so mute lockdep  << 
522          * unheld locks.                       << 
523          */                                    << 
524         if (system_state < SYSTEM_RUNNING)     << 
525                 return;                        << 
526                                                   263 
527         percpu_rwsem_assert_held(&cpu_hotplug_ !! 264         cpuhp_lock_release();
528 }                                              << 
529                                                   265 
530 #ifdef CONFIG_LOCKDEP                          << 
531 int lockdep_is_cpus_held(void)                 << 
532 {                                              << 
533         return percpu_rwsem_is_held(&cpu_hotpl << 
534 }                                                 266 }
535 #endif                                         !! 267 EXPORT_SYMBOL_GPL(put_online_cpus);
536                                                   268 
537 static void lockdep_acquire_cpus_lock(void)    !! 269 /*
                                                   >> 270  * This ensures that the hotplug operation can begin only when the
                                                   >> 271  * refcount goes to zero.
                                                   >> 272  *
                                                   >> 273  * Note that during a cpu-hotplug operation, the new readers, if any,
                                                   >> 274  * will be blocked by the cpu_hotplug.lock
                                                   >> 275  *
                                                   >> 276  * Since cpu_hotplug_begin() is always called after invoking
                                                   >> 277  * cpu_maps_update_begin(), we can be sure that only one writer is active.
                                                   >> 278  *
                                                   >> 279  * Note that theoretically, there is a possibility of a livelock:
                                                   >> 280  * - Refcount goes to zero, last reader wakes up the sleeping
                                                   >> 281  *   writer.
                                                   >> 282  * - Last reader unlocks the cpu_hotplug.lock.
                                                   >> 283  * - A new reader arrives at this moment, bumps up the refcount.
                                                   >> 284  * - The writer acquires the cpu_hotplug.lock finds the refcount
                                                   >> 285  *   non zero and goes to sleep again.
                                                   >> 286  *
                                                   >> 287  * However, this is very difficult to achieve in practice since
                                                   >> 288  * get_online_cpus() not an api which is called all that often.
                                                   >> 289  *
                                                   >> 290  */
                                                   >> 291 void cpu_hotplug_begin(void)
538 {                                                 292 {
539         rwsem_acquire(&cpu_hotplug_lock.dep_ma !! 293         DEFINE_WAIT(wait);
540 }                                              << 
541                                                   294 
542 static void lockdep_release_cpus_lock(void)    !! 295         cpu_hotplug.active_writer = current;
543 {                                              !! 296         cpuhp_lock_acquire();
544         rwsem_release(&cpu_hotplug_lock.dep_ma !! 297 
                                                   >> 298         for (;;) {
                                                   >> 299                 mutex_lock(&cpu_hotplug.lock);
                                                   >> 300                 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
                                                   >> 301                 if (likely(!atomic_read(&cpu_hotplug.refcount)))
                                                   >> 302                                 break;
                                                   >> 303                 mutex_unlock(&cpu_hotplug.lock);
                                                   >> 304                 schedule();
                                                   >> 305         }
                                                   >> 306         finish_wait(&cpu_hotplug.wq, &wait);
545 }                                                 307 }
546                                                   308 
547 /* Declare CPU offlining not supported */      !! 309 void cpu_hotplug_done(void)
548 void cpu_hotplug_disable_offlining(void)       << 
549 {                                                 310 {
550         cpu_maps_update_begin();               !! 311         cpu_hotplug.active_writer = NULL;
551         cpu_hotplug_offline_disabled = true;   !! 312         mutex_unlock(&cpu_hotplug.lock);
552         cpu_maps_update_done();                !! 313         cpuhp_lock_release();
553 }                                                 314 }
554                                                   315 
555 /*                                                316 /*
556  * Wait for currently running CPU hotplug oper    317  * Wait for currently running CPU hotplug operations to complete (if any) and
557  * disable future CPU hotplug (from sysfs). Th    318  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
558  * the 'cpu_hotplug_disabled' flag. The same l    319  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
559  * hotplug path before performing hotplug oper    320  * hotplug path before performing hotplug operations. So acquiring that lock
560  * guarantees mutual exclusion from any curren    321  * guarantees mutual exclusion from any currently running hotplug operations.
561  */                                               322  */
562 void cpu_hotplug_disable(void)                    323 void cpu_hotplug_disable(void)
563 {                                                 324 {
564         cpu_maps_update_begin();                  325         cpu_maps_update_begin();
565         cpu_hotplug_disabled++;                   326         cpu_hotplug_disabled++;
566         cpu_maps_update_done();                   327         cpu_maps_update_done();
567 }                                                 328 }
568 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);           329 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
569                                                   330 
570 static void __cpu_hotplug_enable(void)            331 static void __cpu_hotplug_enable(void)
571 {                                                 332 {
572         if (WARN_ONCE(!cpu_hotplug_disabled, "    333         if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
573                 return;                           334                 return;
574         cpu_hotplug_disabled--;                   335         cpu_hotplug_disabled--;
575 }                                                 336 }
576                                                   337 
577 void cpu_hotplug_enable(void)                     338 void cpu_hotplug_enable(void)
578 {                                                 339 {
579         cpu_maps_update_begin();                  340         cpu_maps_update_begin();
580         __cpu_hotplug_enable();                   341         __cpu_hotplug_enable();
581         cpu_maps_update_done();                   342         cpu_maps_update_done();
582 }                                                 343 }
583 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);            344 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
584                                                << 
585 #else                                          << 
586                                                << 
587 static void lockdep_acquire_cpus_lock(void)    << 
588 {                                              << 
589 }                                              << 
590                                                << 
591 static void lockdep_release_cpus_lock(void)    << 
592 {                                              << 
593 }                                              << 
594                                                << 
595 #endif  /* CONFIG_HOTPLUG_CPU */                  345 #endif  /* CONFIG_HOTPLUG_CPU */
596                                                   346 
597 /*                                             !! 347 /* Notifier wrappers for transitioning to state machine */
598  * Architectures that need SMT-specific errata << 
599  * should override this.                       << 
600  */                                            << 
601 void __weak arch_smt_update(void) { }          << 
602                                                << 
603 #ifdef CONFIG_HOTPLUG_SMT                      << 
604                                                   348 
605 enum cpuhp_smt_control cpu_smt_control __read_ !! 349 static int bringup_wait_for_ap(unsigned int cpu)
606 static unsigned int cpu_smt_max_threads __ro_a << 
607 unsigned int cpu_smt_num_threads __read_mostly << 
608                                                << 
609 void __init cpu_smt_disable(bool force)        << 
610 {                                              << 
611         if (!cpu_smt_possible())               << 
612                 return;                        << 
613                                                << 
614         if (force) {                           << 
615                 pr_info("SMT: Force disabled\n << 
616                 cpu_smt_control = CPU_SMT_FORC << 
617         } else {                               << 
618                 pr_info("SMT: disabled\n");    << 
619                 cpu_smt_control = CPU_SMT_DISA << 
620         }                                      << 
621         cpu_smt_num_threads = 1;               << 
622 }                                              << 
623                                                << 
624 /*                                             << 
625  * The decision whether SMT is supported can o << 
626  * CPU identification. Called from architectur << 
627  */                                            << 
628 void __init cpu_smt_set_num_threads(unsigned i << 
629                                     unsigned i << 
630 {                                              << 
631         WARN_ON(!num_threads || (num_threads > << 
632                                                << 
633         if (max_threads == 1)                  << 
634                 cpu_smt_control = CPU_SMT_NOT_ << 
635                                                << 
636         cpu_smt_max_threads = max_threads;     << 
637                                                << 
638         /*                                     << 
639          * If SMT has been disabled via the ke << 
640          * not supported, set cpu_smt_num_thre << 
641          * If enabled, take the architecture r << 
642          * to bring up into account.           << 
643          */                                    << 
644         if (cpu_smt_control != CPU_SMT_ENABLED << 
645                 cpu_smt_num_threads = 1;       << 
646         else if (num_threads < cpu_smt_num_thr << 
647                 cpu_smt_num_threads = num_thre << 
648 }                                              << 
649                                                << 
650 static int __init smt_cmdline_disable(char *st << 
651 {                                              << 
652         cpu_smt_disable(str && !strcmp(str, "f << 
653         return 0;                              << 
654 }                                              << 
655 early_param("nosmt", smt_cmdline_disable);     << 
656                                                << 
657 /*                                             << 
658  * For Archicture supporting partial SMT state << 
659  * Otherwise this has already been checked thr << 
660  * setting the SMT level.                      << 
661  */                                            << 
662 static inline bool cpu_smt_thread_allowed(unsi << 
663 {                                              << 
664 #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC          << 
665         return topology_smt_thread_allowed(cpu << 
666 #else                                          << 
667         return true;                           << 
668 #endif                                         << 
669 }                                              << 
670                                                << 
671 static inline bool cpu_bootable(unsigned int c << 
672 {                                              << 
673         if (cpu_smt_control == CPU_SMT_ENABLED << 
674                 return true;                   << 
675                                                << 
676         /* All CPUs are bootable if controls a << 
677         if (cpu_smt_control == CPU_SMT_NOT_IMP << 
678                 return true;                   << 
679                                                << 
680         /* All CPUs are bootable if CPU is not << 
681         if (cpu_smt_control == CPU_SMT_NOT_SUP << 
682                 return true;                   << 
683                                                << 
684         if (topology_is_primary_thread(cpu))   << 
685                 return true;                   << 
686                                                << 
687         /*                                     << 
688          * On x86 it's required to boot all lo << 
689          * that the init code can get a chance << 
690          * CPU. Otherwise, a broadcasted MCE o << 
691          * core will shutdown the machine.     << 
692          */                                    << 
693         return !cpumask_test_cpu(cpu, &cpus_bo << 
694 }                                              << 
695                                                << 
696 /* Returns true if SMT is supported and not fo << 
697 bool cpu_smt_possible(void)                    << 
698 {                                              << 
699         return cpu_smt_control != CPU_SMT_FORC << 
700                 cpu_smt_control != CPU_SMT_NOT << 
701 }                                              << 
702 EXPORT_SYMBOL_GPL(cpu_smt_possible);           << 
703                                                << 
704 #else                                          << 
705 static inline bool cpu_bootable(unsigned int c << 
706 #endif                                         << 
707                                                << 
708 static inline enum cpuhp_state                 << 
709 cpuhp_set_state(int cpu, struct cpuhp_cpu_stat << 
710 {                                              << 
711         enum cpuhp_state prev_state = st->stat << 
712         bool bringup = st->state < target;     << 
713                                                << 
714         st->rollback = false;                  << 
715         st->last = NULL;                       << 
716                                                << 
717         st->target = target;                   << 
718         st->single = false;                    << 
719         st->bringup = bringup;                 << 
720         if (cpu_dying(cpu) != !bringup)        << 
721                 set_cpu_dying(cpu, !bringup);  << 
722                                                << 
723         return prev_state;                     << 
724 }                                              << 
725                                                << 
726 static inline void                             << 
727 cpuhp_reset_state(int cpu, struct cpuhp_cpu_st << 
728                   enum cpuhp_state prev_state) << 
729 {                                              << 
730         bool bringup = !st->bringup;           << 
731                                                << 
732         st->target = prev_state;               << 
733                                                << 
734         /*                                     << 
735          * Already rolling back. No need inver << 
736          * the current state.                  << 
737          */                                    << 
738         if (st->rollback)                      << 
739                 return;                        << 
740                                                << 
741         st->rollback = true;                   << 
742                                                << 
743         /*                                     << 
744          * If we have st->last we need to undo << 
745          * state first. Otherwise start undo a << 
746          */                                    << 
747         if (!st->last) {                       << 
748                 if (st->bringup)               << 
749                         st->state--;           << 
750                 else                           << 
751                         st->state++;           << 
752         }                                      << 
753                                                << 
754         st->bringup = bringup;                 << 
755         if (cpu_dying(cpu) != !bringup)        << 
756                 set_cpu_dying(cpu, !bringup);  << 
757 }                                              << 
758                                                << 
759 /* Regular hotplug invocation of the AP hotplu << 
760 static void __cpuhp_kick_ap(struct cpuhp_cpu_s << 
761 {                                              << 
762         if (!st->single && st->state == st->ta << 
763                 return;                        << 
764                                                << 
765         st->result = 0;                        << 
766         /*                                     << 
767          * Make sure the above stores are visi << 
768          * true. Paired with the mb() above in << 
769          */                                    << 
770         smp_mb();                              << 
771         st->should_run = true;                 << 
772         wake_up_process(st->thread);           << 
773         wait_for_ap_thread(st, st->bringup);   << 
774 }                                              << 
775                                                << 
776 static int cpuhp_kick_ap(int cpu, struct cpuhp << 
777                          enum cpuhp_state targ << 
778 {                                              << 
779         enum cpuhp_state prev_state;           << 
780         int ret;                               << 
781                                                << 
782         prev_state = cpuhp_set_state(cpu, st,  << 
783         __cpuhp_kick_ap(st);                   << 
784         if ((ret = st->result)) {              << 
785                 cpuhp_reset_state(cpu, st, pre << 
786                 __cpuhp_kick_ap(st);           << 
787         }                                      << 
788                                                << 
789         return ret;                            << 
790 }                                              << 
791                                                << 
792 static int bringup_wait_for_ap_online(unsigned << 
793 {                                                 350 {
794         struct cpuhp_cpu_state *st = per_cpu_p    351         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
795                                                   352 
796         /* Wait for the CPU to reach CPUHP_AP_ !! 353         wait_for_completion(&st->done);
797         wait_for_ap_thread(st, true);          !! 354         return st->result;
798         if (WARN_ON_ONCE((!cpu_online(cpu))))  << 
799                 return -ECANCELED;             << 
800                                                << 
801         /* Unpark the hotplug thread of the ta << 
802         kthread_unpark(st->thread);            << 
803                                                << 
804         /*                                     << 
805          * SMT soft disabling on X86 requires  << 
806          * BIOS 'wait for SIPI' state in order << 
807          * CPU marked itself as booted_once in << 
808          * cpu_bootable() check will now retur << 
809          * primary sibling.                    << 
810          */                                    << 
811         if (!cpu_bootable(cpu))                << 
812                 return -ECANCELED;             << 
813         return 0;                              << 
814 }                                              << 
815                                                << 
816 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP            << 
817 static int cpuhp_kick_ap_alive(unsigned int cp << 
818 {                                              << 
819         if (!cpuhp_can_boot_ap(cpu))           << 
820                 return -EAGAIN;                << 
821                                                << 
822         return arch_cpuhp_kick_ap_alive(cpu, i << 
823 }                                                 355 }
824                                                   356 
825 static int cpuhp_bringup_ap(unsigned int cpu)  << 
826 {                                              << 
827         struct cpuhp_cpu_state *st = per_cpu_p << 
828         int ret;                               << 
829                                                << 
830         /*                                     << 
831          * Some architectures have to walk the << 
832          * setup the vector space for the cpu  << 
833          * Prevent irq alloc/free across the b << 
834          */                                    << 
835         irq_lock_sparse();                     << 
836                                                << 
837         ret = cpuhp_bp_sync_alive(cpu);        << 
838         if (ret)                               << 
839                 goto out_unlock;               << 
840                                                << 
841         ret = bringup_wait_for_ap_online(cpu); << 
842         if (ret)                               << 
843                 goto out_unlock;               << 
844                                                << 
845         irq_unlock_sparse();                   << 
846                                                << 
847         if (st->target <= CPUHP_AP_ONLINE_IDLE << 
848                 return 0;                      << 
849                                                << 
850         return cpuhp_kick_ap(cpu, st, st->targ << 
851                                                << 
852 out_unlock:                                    << 
853         irq_unlock_sparse();                   << 
854         return ret;                            << 
855 }                                              << 
856 #else                                          << 
857 static int bringup_cpu(unsigned int cpu)          357 static int bringup_cpu(unsigned int cpu)
858 {                                                 358 {
859         struct cpuhp_cpu_state *st = per_cpu_p << 
860         struct task_struct *idle = idle_thread    359         struct task_struct *idle = idle_thread_get(cpu);
861         int ret;                                  360         int ret;
862                                                   361 
863         if (!cpuhp_can_boot_ap(cpu))           << 
864                 return -EAGAIN;                << 
865                                                << 
866         /*                                        362         /*
867          * Some architectures have to walk the    363          * Some architectures have to walk the irq descriptors to
868          * setup the vector space for the cpu     364          * setup the vector space for the cpu which comes online.
869          *                                     !! 365          * Prevent irq alloc/free across the bringup.
870          * Prevent irq alloc/free across the b << 
871          * sparse irq lock. Hold it until the  << 
872          * startup in cpuhp_online_idle() whic << 
873          * intermediate synchronization points << 
874          */                                       366          */
875         irq_lock_sparse();                        367         irq_lock_sparse();
876                                                   368 
                                                   >> 369         /* Arch-specific enabling code. */
877         ret = __cpu_up(cpu, idle);                370         ret = __cpu_up(cpu, idle);
878         if (ret)                               << 
879                 goto out_unlock;               << 
880                                                << 
881         ret = cpuhp_bp_sync_alive(cpu);        << 
882         if (ret)                               << 
883                 goto out_unlock;               << 
884                                                << 
885         ret = bringup_wait_for_ap_online(cpu); << 
886         if (ret)                               << 
887                 goto out_unlock;               << 
888                                                << 
889         irq_unlock_sparse();                   << 
890                                                << 
891         if (st->target <= CPUHP_AP_ONLINE_IDLE << 
892                 return 0;                      << 
893                                                << 
894         return cpuhp_kick_ap(cpu, st, st->targ << 
895                                                << 
896 out_unlock:                                    << 
897         irq_unlock_sparse();                      371         irq_unlock_sparse();
                                                   >> 372         if (ret)
                                                   >> 373                 return ret;
                                                   >> 374         ret = bringup_wait_for_ap(cpu);
                                                   >> 375         BUG_ON(!cpu_online(cpu));
898         return ret;                               376         return ret;
899 }                                                 377 }
900 #endif                                         << 
901                                                << 
902 static int finish_cpu(unsigned int cpu)        << 
903 {                                              << 
904         struct task_struct *idle = idle_thread << 
905         struct mm_struct *mm = idle->active_mm << 
906                                                << 
907         /*                                     << 
908          * idle_task_exit() will have switched << 
909          * clean up any remaining active_mm st << 
910          */                                    << 
911         if (mm != &init_mm)                    << 
912                 idle->active_mm = &init_mm;    << 
913         mmdrop_lazy_tlb(mm);                   << 
914         return 0;                              << 
915 }                                              << 
916                                                   378 
917 /*                                                379 /*
918  * Hotplug state machine related functions        380  * Hotplug state machine related functions
919  */                                               381  */
                                                   >> 382 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
                                                   >> 383 {
                                                   >> 384         for (st->state++; st->state < st->target; st->state++) {
                                                   >> 385                 struct cpuhp_step *step = cpuhp_get_step(st->state);
920                                                   386 
921 /*                                             !! 387                 if (!step->skip_onerr)
922  * Get the next state to run. Empty ones will  !! 388                         cpuhp_invoke_callback(cpu, st->state, true, NULL);
923  * state must be run.                          !! 389         }
924  *                                             << 
925  * st->state will be modified ahead of time, t << 
926  * has already ran.                            << 
927  */                                            << 
928 static bool cpuhp_next_state(bool bringup,     << 
929                              enum cpuhp_state  << 
930                              struct cpuhp_cpu_ << 
931                              enum cpuhp_state  << 
932 {                                              << 
933         do {                                   << 
934                 if (bringup) {                 << 
935                         if (st->state >= targe << 
936                                 return false;  << 
937                                                << 
938                         *state_to_run = ++st-> << 
939                 } else {                       << 
940                         if (st->state <= targe << 
941                                 return false;  << 
942                                                << 
943                         *state_to_run = st->st << 
944                 }                              << 
945                                                << 
946                 if (!cpuhp_step_empty(bringup, << 
947                         break;                 << 
948         } while (true);                        << 
949                                                << 
950         return true;                           << 
951 }                                                 390 }
952                                                   391 
953 static int __cpuhp_invoke_callback_range(bool  !! 392 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
954                                          unsig !! 393                                 enum cpuhp_state target)
955                                          struc << 
956                                          enum  << 
957                                          bool  << 
958 {                                                 394 {
959         enum cpuhp_state state;                !! 395         enum cpuhp_state prev_state = st->state;
960         int ret = 0;                              396         int ret = 0;
961                                                   397 
962         while (cpuhp_next_state(bringup, &stat !! 398         for (; st->state > target; st->state--) {
963                 int err;                       !! 399                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
964                                                !! 400                 if (ret) {
965                 err = cpuhp_invoke_callback(cp !! 401                         st->target = prev_state;
966                 if (!err)                      !! 402                         undo_cpu_down(cpu, st);
967                         continue;              << 
968                                                << 
969                 if (nofail) {                  << 
970                         pr_warn("CPU %u %s sta << 
971                                 cpu, bringup ? << 
972                                 cpuhp_get_step << 
973                                 st->state, err << 
974                         ret = -1;              << 
975                 } else {                       << 
976                         ret = err;             << 
977                         break;                    403                         break;
978                 }                                 404                 }
979         }                                         405         }
980                                                << 
981         return ret;                               406         return ret;
982 }                                                 407 }
983                                                   408 
984 static inline int cpuhp_invoke_callback_range( !! 409 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
985                                                << 
986                                                << 
987                                                << 
988 {                                                 410 {
989         return __cpuhp_invoke_callback_range(b !! 411         for (st->state--; st->state > st->target; st->state--) {
990 }                                              !! 412                 struct cpuhp_step *step = cpuhp_get_step(st->state);
991                                                << 
992 static inline void cpuhp_invoke_callback_range << 
993                                                << 
994                                                << 
995                                                << 
996 {                                              << 
997         __cpuhp_invoke_callback_range(bringup, << 
998 }                                              << 
999                                                   413 
1000 static inline bool can_rollback_cpu(struct cp !! 414                 if (!step->skip_onerr)
1001 {                                             !! 415                         cpuhp_invoke_callback(cpu, st->state, false, NULL);
1002         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))   !! 416         }
1003                 return true;                  << 
1004         /*                                    << 
1005          * When CPU hotplug is disabled, then << 
1006          * possible because takedown_cpu() an << 
1007          * subsystem specific mechanisms are  << 
1008          * which would be completely unplugge << 
1009          * in the current state.              << 
1010          */                                   << 
1011         return st->state <= CPUHP_BRINGUP_CPU << 
1012 }                                                417 }
1013                                                  418 
1014 static int cpuhp_up_callbacks(unsigned int cp    419 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1015                               enum cpuhp_stat    420                               enum cpuhp_state target)
1016 {                                                421 {
1017         enum cpuhp_state prev_state = st->sta    422         enum cpuhp_state prev_state = st->state;
1018         int ret = 0;                             423         int ret = 0;
1019                                                  424 
1020         ret = cpuhp_invoke_callback_range(tru !! 425         while (st->state < target) {
1021         if (ret) {                            !! 426                 st->state++;
1022                 pr_debug("CPU UP failed (%d)  !! 427                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
1023                          ret, cpu, cpuhp_get_ !! 428                 if (ret) {
1024                          st->state);          !! 429                         st->target = prev_state;
1025                                               !! 430                         undo_cpu_up(cpu, st);
1026                 cpuhp_reset_state(cpu, st, pr !! 431                         break;
1027                 if (can_rollback_cpu(st))     !! 432                 }
1028                         WARN_ON(cpuhp_invoke_ << 
1029                                               << 
1030         }                                        433         }
1031         return ret;                              434         return ret;
1032 }                                                435 }
1033                                                  436 
1034 /*                                               437 /*
1035  * The cpu hotplug threads manage the bringup    438  * The cpu hotplug threads manage the bringup and teardown of the cpus
1036  */                                              439  */
                                                   >> 440 static void cpuhp_create(unsigned int cpu)
                                                   >> 441 {
                                                   >> 442         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
                                                   >> 443 
                                                   >> 444         init_completion(&st->done);
                                                   >> 445 }
                                                   >> 446 
1037 static int cpuhp_should_run(unsigned int cpu)    447 static int cpuhp_should_run(unsigned int cpu)
1038 {                                                448 {
1039         struct cpuhp_cpu_state *st = this_cpu    449         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1040                                                  450 
1041         return st->should_run;                   451         return st->should_run;
1042 }                                                452 }
1043                                                  453 
                                                   >> 454 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
                                                   >> 455 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
                                                   >> 456 {
                                                   >> 457         enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
                                                   >> 458 
                                                   >> 459         return cpuhp_down_callbacks(cpu, st, target);
                                                   >> 460 }
                                                   >> 461 
                                                   >> 462 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
                                                   >> 463 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
                                                   >> 464 {
                                                   >> 465         return cpuhp_up_callbacks(cpu, st, st->target);
                                                   >> 466 }
                                                   >> 467 
1044 /*                                               468 /*
1045  * Execute teardown/startup callbacks on the     469  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
1046  * callbacks when a state gets [un]installed     470  * callbacks when a state gets [un]installed at runtime.
1047  *                                            << 
1048  * Each invocation of this function by the sm << 
1049  * state callback.                            << 
1050  *                                            << 
1051  * It has 3 modes of operation:               << 
1052  *  - single: runs st->cb_state               << 
1053  *  - up:     runs ++st->state, while st->sta << 
1054  *  - down:   runs st->state--, while st->sta << 
1055  *                                            << 
1056  * When complete or on error, should_run is c << 
1057  */                                              471  */
1058 static void cpuhp_thread_fun(unsigned int cpu    472 static void cpuhp_thread_fun(unsigned int cpu)
1059 {                                                473 {
1060         struct cpuhp_cpu_state *st = this_cpu    474         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1061         bool bringup = st->bringup;           !! 475         int ret = 0;
1062         enum cpuhp_state state;               << 
1063                                               << 
1064         if (WARN_ON_ONCE(!st->should_run))    << 
1065                 return;                       << 
1066                                                  476 
1067         /*                                       477         /*
1068          * ACQUIRE for the cpuhp_should_run() !! 478          * Paired with the mb() in cpuhp_kick_ap_work and
1069          * that if we see ->should_run we als !! 479          * cpuhp_invoke_ap_callback, so the work set is consistent visible.
1070          */                                      480          */
1071         smp_mb();                                481         smp_mb();
                                                   >> 482         if (!st->should_run)
                                                   >> 483                 return;
1072                                                  484 
1073         /*                                    !! 485         st->should_run = false;
1074          * The BP holds the hotplug lock, but << 
1075          * ensure that anybody asserting the  << 
1076          * it so.                             << 
1077          */                                   << 
1078         lockdep_acquire_cpus_lock();          << 
1079         cpuhp_lock_acquire(bringup);          << 
1080                                                  486 
                                                   >> 487         /* Single callback invocation for [un]install ? */
1081         if (st->single) {                        488         if (st->single) {
1082                 state = st->cb_state;         !! 489                 if (st->cb_state < CPUHP_AP_ONLINE) {
1083                 st->should_run = false;       !! 490                         local_irq_disable();
1084         } else {                              !! 491                         ret = cpuhp_invoke_callback(cpu, st->cb_state,
1085                 st->should_run = cpuhp_next_s !! 492                                                     st->bringup, st->node);
1086                 if (!st->should_run)          !! 493                         local_irq_enable();
1087                         goto end;             !! 494                 } else {
1088         }                                     !! 495                         ret = cpuhp_invoke_callback(cpu, st->cb_state,
1089                                               !! 496                                                     st->bringup, st->node);
1090         WARN_ON_ONCE(!cpuhp_is_ap_state(state !! 497                 }
1091                                               !! 498         } else if (st->rollback) {
1092         if (cpuhp_is_atomic_state(state)) {   !! 499                 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1093                 local_irq_disable();          << 
1094                 st->result = cpuhp_invoke_cal << 
1095                 local_irq_enable();           << 
1096                                                  500 
1097                 /*                            !! 501                 undo_cpu_down(cpu, st);
1098                  * STARTING/DYING must not fa !! 502                 st->rollback = false;
1099                  */                           << 
1100                 WARN_ON_ONCE(st->result);     << 
1101         } else {                                 503         } else {
1102                 st->result = cpuhp_invoke_cal !! 504                 /* Cannot happen .... */
1103         }                                     !! 505                 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1104                                                  506 
1105         if (st->result) {                     !! 507                 /* Regular hotplug work */
1106                 /*                            !! 508                 if (st->state < st->target)
1107                  * If we fail on a rollback,  !! 509                         ret = cpuhp_ap_online(cpu, st);
1108                  * paddle, no way forward, no !! 510                 else if (st->state > st->target)
1109                  * playing.                   !! 511                         ret = cpuhp_ap_offline(cpu, st);
1110                  */                           << 
1111                 WARN_ON_ONCE(st->rollback);   << 
1112                 st->should_run = false;       << 
1113         }                                        512         }
1114                                               !! 513         st->result = ret;
1115 end:                                          !! 514         complete(&st->done);
1116         cpuhp_lock_release(bringup);          << 
1117         lockdep_release_cpus_lock();          << 
1118                                               << 
1119         if (!st->should_run)                  << 
1120                 complete_ap_thread(st, bringu << 
1121 }                                                515 }
1122                                                  516 
1123 /* Invoke a single callback on a remote cpu *    517 /* Invoke a single callback on a remote cpu */
1124 static int                                       518 static int
1125 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_    519 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1126                          struct hlist_node *n    520                          struct hlist_node *node)
1127 {                                                521 {
1128         struct cpuhp_cpu_state *st = per_cpu_    522         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1129         int ret;                              << 
1130                                                  523 
1131         if (!cpu_online(cpu))                    524         if (!cpu_online(cpu))
1132                 return 0;                        525                 return 0;
1133                                                  526 
1134         cpuhp_lock_acquire(false);            << 
1135         cpuhp_lock_release(false);            << 
1136                                               << 
1137         cpuhp_lock_acquire(true);             << 
1138         cpuhp_lock_release(true);             << 
1139                                               << 
1140         /*                                       527         /*
1141          * If we are up and running, use the     528          * If we are up and running, use the hotplug thread. For early calls
1142          * we invoke the thread function dire    529          * we invoke the thread function directly.
1143          */                                      530          */
1144         if (!st->thread)                         531         if (!st->thread)
1145                 return cpuhp_invoke_callback( !! 532                 return cpuhp_invoke_callback(cpu, state, bringup, node);
1146                                                  533 
1147         st->rollback = false;                 << 
1148         st->last = NULL;                      << 
1149                                               << 
1150         st->node = node;                      << 
1151         st->bringup = bringup;                << 
1152         st->cb_state = state;                    534         st->cb_state = state;
1153         st->single = true;                       535         st->single = true;
1154                                               !! 536         st->bringup = bringup;
1155         __cpuhp_kick_ap(st);                  !! 537         st->node = node;
1156                                                  538 
1157         /*                                       539         /*
1158          * If we failed and did a partial, do !! 540          * Make sure the above stores are visible before should_run becomes
                                                   >> 541          * true. Paired with the mb() above in cpuhp_thread_fun()
1159          */                                      542          */
1160         if ((ret = st->result) && st->last) { !! 543         smp_mb();
1161                 st->rollback = true;          !! 544         st->should_run = true;
1162                 st->bringup = !bringup;       !! 545         wake_up_process(st->thread);
1163                                               !! 546         wait_for_completion(&st->done);
1164                 __cpuhp_kick_ap(st);          !! 547         return st->result;
1165         }                                     !! 548 }
1166                                                  549 
                                                   >> 550 /* Regular hotplug invocation of the AP hotplug thread */
                                                   >> 551 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
                                                   >> 552 {
                                                   >> 553         st->result = 0;
                                                   >> 554         st->single = false;
1167         /*                                       555         /*
1168          * Clean up the leftovers so the next !! 556          * Make sure the above stores are visible before should_run becomes
1169          * data.                              !! 557          * true. Paired with the mb() above in cpuhp_thread_fun()
1170          */                                      558          */
1171         st->node = st->last = NULL;           !! 559         smp_mb();
1172         return ret;                           !! 560         st->should_run = true;
                                                   >> 561         wake_up_process(st->thread);
1173 }                                                562 }
1174                                                  563 
1175 static int cpuhp_kick_ap_work(unsigned int cp    564 static int cpuhp_kick_ap_work(unsigned int cpu)
1176 {                                                565 {
1177         struct cpuhp_cpu_state *st = per_cpu_    566         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1178         enum cpuhp_state prev_state = st->sta !! 567         enum cpuhp_state state = st->state;
1179         int ret;                              << 
1180                                               << 
1181         cpuhp_lock_acquire(false);            << 
1182         cpuhp_lock_release(false);            << 
1183                                               << 
1184         cpuhp_lock_acquire(true);             << 
1185         cpuhp_lock_release(true);             << 
1186                                                  568 
1187         trace_cpuhp_enter(cpu, st->target, pr !! 569         trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
1188         ret = cpuhp_kick_ap(cpu, st, st->targ !! 570         __cpuhp_kick_ap_work(st);
1189         trace_cpuhp_exit(cpu, st->state, prev !! 571         wait_for_completion(&st->done);
1190                                               !! 572         trace_cpuhp_exit(cpu, st->state, state, st->result);
1191         return ret;                           !! 573         return st->result;
1192 }                                                574 }
1193                                                  575 
1194 static struct smp_hotplug_thread cpuhp_thread    576 static struct smp_hotplug_thread cpuhp_threads = {
1195         .store                  = &cpuhp_stat    577         .store                  = &cpuhp_state.thread,
                                                   >> 578         .create                 = &cpuhp_create,
1196         .thread_should_run      = cpuhp_shoul    579         .thread_should_run      = cpuhp_should_run,
1197         .thread_fn              = cpuhp_threa    580         .thread_fn              = cpuhp_thread_fun,
1198         .thread_comm            = "cpuhp/%u",    581         .thread_comm            = "cpuhp/%u",
1199         .selfparking            = true,          582         .selfparking            = true,
1200 };                                               583 };
1201                                                  584 
1202 static __init void cpuhp_init_state(void)     << 
1203 {                                             << 
1204         struct cpuhp_cpu_state *st;           << 
1205         int cpu;                              << 
1206                                               << 
1207         for_each_possible_cpu(cpu) {          << 
1208                 st = per_cpu_ptr(&cpuhp_state << 
1209                 init_completion(&st->done_up) << 
1210                 init_completion(&st->done_dow << 
1211         }                                     << 
1212 }                                             << 
1213                                               << 
1214 void __init cpuhp_threads_init(void)             585 void __init cpuhp_threads_init(void)
1215 {                                                586 {
1216         cpuhp_init_state();                   << 
1217         BUG_ON(smpboot_register_percpu_thread    587         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
1218         kthread_unpark(this_cpu_read(cpuhp_st    588         kthread_unpark(this_cpu_read(cpuhp_state.thread));
1219 }                                                589 }
1220                                                  590 
1221 #ifdef CONFIG_HOTPLUG_CPU                        591 #ifdef CONFIG_HOTPLUG_CPU
1222 #ifndef arch_clear_mm_cpumask_cpu             << 
1223 #define arch_clear_mm_cpumask_cpu(cpu, mm) cp << 
1224 #endif                                        << 
1225                                               << 
1226 /**                                              592 /**
1227  * clear_tasks_mm_cpumask - Safely clear task    593  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1228  * @cpu: a CPU id                                594  * @cpu: a CPU id
1229  *                                               595  *
1230  * This function walks all processes, finds a    596  * This function walks all processes, finds a valid mm struct for each one and
1231  * then clears a corresponding bit in mm's cp    597  * then clears a corresponding bit in mm's cpumask.  While this all sounds
1232  * trivial, there are various non-obvious cor    598  * trivial, there are various non-obvious corner cases, which this function
1233  * tries to solve in a safe manner.              599  * tries to solve in a safe manner.
1234  *                                               600  *
1235  * Also note that the function uses a somewha    601  * Also note that the function uses a somewhat relaxed locking scheme, so it may
1236  * be called only for an already offlined CPU    602  * be called only for an already offlined CPU.
1237  */                                              603  */
1238 void clear_tasks_mm_cpumask(int cpu)             604 void clear_tasks_mm_cpumask(int cpu)
1239 {                                                605 {
1240         struct task_struct *p;                   606         struct task_struct *p;
1241                                                  607 
1242         /*                                       608         /*
1243          * This function is called after the     609          * This function is called after the cpu is taken down and marked
1244          * offline, so its not like new tasks    610          * offline, so its not like new tasks will ever get this cpu set in
1245          * their mm mask. -- Peter Zijlstra      611          * their mm mask. -- Peter Zijlstra
1246          * Thus, we may use rcu_read_lock() h    612          * Thus, we may use rcu_read_lock() here, instead of grabbing
1247          * full-fledged tasklist_lock.           613          * full-fledged tasklist_lock.
1248          */                                      614          */
1249         WARN_ON(cpu_online(cpu));                615         WARN_ON(cpu_online(cpu));
1250         rcu_read_lock();                         616         rcu_read_lock();
1251         for_each_process(p) {                    617         for_each_process(p) {
1252                 struct task_struct *t;           618                 struct task_struct *t;
1253                                                  619 
1254                 /*                               620                 /*
1255                  * Main thread might exit, bu    621                  * Main thread might exit, but other threads may still have
1256                  * a valid mm. Find one.         622                  * a valid mm. Find one.
1257                  */                              623                  */
1258                 t = find_lock_task_mm(p);        624                 t = find_lock_task_mm(p);
1259                 if (!t)                          625                 if (!t)
1260                         continue;                626                         continue;
1261                 arch_clear_mm_cpumask_cpu(cpu !! 627                 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
1262                 task_unlock(t);                  628                 task_unlock(t);
1263         }                                        629         }
1264         rcu_read_unlock();                       630         rcu_read_unlock();
1265 }                                                631 }
1266                                                  632 
                                                   >> 633 static inline void check_for_tasks(int dead_cpu)
                                                   >> 634 {
                                                   >> 635         struct task_struct *g, *p;
                                                   >> 636 
                                                   >> 637         read_lock(&tasklist_lock);
                                                   >> 638         for_each_process_thread(g, p) {
                                                   >> 639                 if (!p->on_rq)
                                                   >> 640                         continue;
                                                   >> 641                 /*
                                                   >> 642                  * We do the check with unlocked task_rq(p)->lock.
                                                   >> 643                  * Order the reading to do not warn about a task,
                                                   >> 644                  * which was running on this cpu in the past, and
                                                   >> 645                  * it's just been woken on another cpu.
                                                   >> 646                  */
                                                   >> 647                 rmb();
                                                   >> 648                 if (task_cpu(p) != dead_cpu)
                                                   >> 649                         continue;
                                                   >> 650 
                                                   >> 651                 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
                                                   >> 652                         p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
                                                   >> 653         }
                                                   >> 654         read_unlock(&tasklist_lock);
                                                   >> 655 }
                                                   >> 656 
1267 /* Take this CPU down. */                        657 /* Take this CPU down. */
1268 static int take_cpu_down(void *_param)           658 static int take_cpu_down(void *_param)
1269 {                                                659 {
1270         struct cpuhp_cpu_state *st = this_cpu    660         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1271         enum cpuhp_state target = max((int)st    661         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1272         int err, cpu = smp_processor_id();       662         int err, cpu = smp_processor_id();
1273                                                  663 
1274         /* Ensure this CPU doesn't handle any    664         /* Ensure this CPU doesn't handle any more interrupts. */
1275         err = __cpu_disable();                   665         err = __cpu_disable();
1276         if (err < 0)                             666         if (err < 0)
1277                 return err;                      667                 return err;
1278                                                  668 
1279         /*                                       669         /*
1280          * Must be called from CPUHP_TEARDOWN !! 670          * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
1281          * down, that the current state is CP !! 671          * do this step again.
1282          */                                   << 
1283         WARN_ON(st->state != (CPUHP_TEARDOWN_ << 
1284                                               << 
1285         /*                                    << 
1286          * Invoke the former CPU_DYING callba << 
1287          */                                      672          */
1288         cpuhp_invoke_callback_range_nofail(fa !! 673         WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
                                                   >> 674         st->state--;
                                                   >> 675         /* Invoke the former CPU_DYING callbacks */
                                                   >> 676         for (; st->state > target; st->state--)
                                                   >> 677                 cpuhp_invoke_callback(cpu, st->state, false, NULL);
1289                                                  678 
                                                   >> 679         /* Give up timekeeping duties */
                                                   >> 680         tick_handover_do_timer();
1290         /* Park the stopper thread */            681         /* Park the stopper thread */
1291         stop_machine_park(cpu);                  682         stop_machine_park(cpu);
1292         return 0;                                683         return 0;
1293 }                                                684 }
1294                                                  685 
1295 static int takedown_cpu(unsigned int cpu)        686 static int takedown_cpu(unsigned int cpu)
1296 {                                                687 {
1297         struct cpuhp_cpu_state *st = per_cpu_    688         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1298         int err;                                 689         int err;
1299                                                  690 
1300         /* Park the smpboot threads */           691         /* Park the smpboot threads */
1301         kthread_park(st->thread);             !! 692         kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
                                                   >> 693         smpboot_park_threads(cpu);
1302                                                  694 
1303         /*                                       695         /*
1304          * Prevent irq alloc/free while the d    696          * Prevent irq alloc/free while the dying cpu reorganizes the
1305          * interrupt affinities.                 697          * interrupt affinities.
1306          */                                      698          */
1307         irq_lock_sparse();                       699         irq_lock_sparse();
1308                                                  700 
1309         /*                                       701         /*
1310          * So now all preempt/rcu users must     702          * So now all preempt/rcu users must observe !cpu_active().
1311          */                                      703          */
1312         err = stop_machine_cpuslocked(take_cp !! 704         err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
1313         if (err) {                               705         if (err) {
1314                 /* CPU refused to die */         706                 /* CPU refused to die */
1315                 irq_unlock_sparse();             707                 irq_unlock_sparse();
1316                 /* Unpark the hotplug thread     708                 /* Unpark the hotplug thread so we can rollback there */
1317                 kthread_unpark(st->thread);   !! 709                 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
1318                 return err;                      710                 return err;
1319         }                                        711         }
1320         BUG_ON(cpu_online(cpu));                 712         BUG_ON(cpu_online(cpu));
1321                                                  713 
1322         /*                                       714         /*
1323          * The teardown callback for CPUHP_AP !! 715          * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
1324          * all runnable tasks from the CPU, t !! 716          * runnable tasks from the cpu, there's only the idle task left now
1325          * that the migration thread is done     717          * that the migration thread is done doing the stop_machine thing.
1326          *                                       718          *
1327          * Wait for the stop thread to go awa    719          * Wait for the stop thread to go away.
1328          */                                      720          */
1329         wait_for_ap_thread(st, false);        !! 721         wait_for_completion(&st->done);
1330         BUG_ON(st->state != CPUHP_AP_IDLE_DEA    722         BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1331                                                  723 
1332         /* Interrupts are moved away from the    724         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1333         irq_unlock_sparse();                     725         irq_unlock_sparse();
1334                                                  726 
1335         hotplug_cpu__broadcast_tick_pull(cpu)    727         hotplug_cpu__broadcast_tick_pull(cpu);
1336         /* This actually kills the CPU. */       728         /* This actually kills the CPU. */
1337         __cpu_die(cpu);                          729         __cpu_die(cpu);
1338                                                  730 
1339         cpuhp_bp_sync_dead(cpu);              << 
1340                                               << 
1341         tick_cleanup_dead_cpu(cpu);              731         tick_cleanup_dead_cpu(cpu);
1342                                               << 
1343         /*                                    << 
1344          * Callbacks must be re-integrated ri << 
1345          * Otherwise an RCU callback could bl << 
1346          * waiting for its completion.        << 
1347          */                                   << 
1348         rcutree_migrate_callbacks(cpu);       << 
1349                                               << 
1350         return 0;                                732         return 0;
1351 }                                                733 }
1352                                                  734 
1353 static void cpuhp_complete_idle_dead(void *ar    735 static void cpuhp_complete_idle_dead(void *arg)
1354 {                                                736 {
1355         struct cpuhp_cpu_state *st = arg;        737         struct cpuhp_cpu_state *st = arg;
1356                                                  738 
1357         complete_ap_thread(st, false);        !! 739         complete(&st->done);
1358 }                                                740 }
1359                                                  741 
1360 void cpuhp_report_idle_dead(void)                742 void cpuhp_report_idle_dead(void)
1361 {                                                743 {
1362         struct cpuhp_cpu_state *st = this_cpu    744         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1363                                                  745 
1364         BUG_ON(st->state != CPUHP_AP_OFFLINE)    746         BUG_ON(st->state != CPUHP_AP_OFFLINE);
1365         tick_assert_timekeeping_handover();   !! 747         rcu_report_dead(smp_processor_id());
1366         rcutree_report_cpu_dead();            << 
1367         st->state = CPUHP_AP_IDLE_DEAD;          748         st->state = CPUHP_AP_IDLE_DEAD;
1368         /*                                       749         /*
1369          * We cannot call complete after rcut !! 750          * We cannot call complete after rcu_report_dead() so we delegate it
1370          * to an online cpu.                     751          * to an online cpu.
1371          */                                      752          */
1372         smp_call_function_single(cpumask_firs    753         smp_call_function_single(cpumask_first(cpu_online_mask),
1373                                  cpuhp_comple    754                                  cpuhp_complete_idle_dead, st, 0);
1374 }                                                755 }
1375                                                  756 
1376 static int cpuhp_down_callbacks(unsigned int  !! 757 #else
1377                                 enum cpuhp_st !! 758 #define takedown_cpu            NULL
1378 {                                             !! 759 #endif
1379         enum cpuhp_state prev_state = st->sta << 
1380         int ret = 0;                          << 
1381                                               << 
1382         ret = cpuhp_invoke_callback_range(fal << 
1383         if (ret) {                            << 
1384                 pr_debug("CPU DOWN failed (%d << 
1385                          ret, cpu, cpuhp_get_ << 
1386                          st->state);          << 
1387                                               << 
1388                 cpuhp_reset_state(cpu, st, pr << 
1389                                               << 
1390                 if (st->state < prev_state)   << 
1391                         WARN_ON(cpuhp_invoke_ << 
1392                                               << 
1393         }                                     << 
1394                                                  760 
1395         return ret;                           !! 761 #ifdef CONFIG_HOTPLUG_CPU
1396 }                                             << 
1397                                                  762 
1398 /* Requires cpu_add_remove_lock to be held */    763 /* Requires cpu_add_remove_lock to be held */
1399 static int __ref _cpu_down(unsigned int cpu,     764 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1400                            enum cpuhp_state t    765                            enum cpuhp_state target)
1401 {                                                766 {
1402         struct cpuhp_cpu_state *st = per_cpu_    767         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1403         int prev_state, ret = 0;                 768         int prev_state, ret = 0;
1404                                                  769 
1405         if (num_online_cpus() == 1)              770         if (num_online_cpus() == 1)
1406                 return -EBUSY;                   771                 return -EBUSY;
1407                                                  772 
1408         if (!cpu_present(cpu))                   773         if (!cpu_present(cpu))
1409                 return -EINVAL;                  774                 return -EINVAL;
1410                                                  775 
1411         cpus_write_lock();                    !! 776         cpu_hotplug_begin();
1412                                                  777 
1413         cpuhp_tasks_frozen = tasks_frozen;       778         cpuhp_tasks_frozen = tasks_frozen;
1414                                                  779 
1415         prev_state = cpuhp_set_state(cpu, st, !! 780         prev_state = st->state;
                                                   >> 781         st->target = target;
1416         /*                                       782         /*
1417          * If the current CPU state is in the    783          * If the current CPU state is in the range of the AP hotplug thread,
1418          * then we need to kick the thread.      784          * then we need to kick the thread.
1419          */                                      785          */
1420         if (st->state > CPUHP_TEARDOWN_CPU) {    786         if (st->state > CPUHP_TEARDOWN_CPU) {
1421                 st->target = max((int)target, << 
1422                 ret = cpuhp_kick_ap_work(cpu)    787                 ret = cpuhp_kick_ap_work(cpu);
1423                 /*                               788                 /*
1424                  * The AP side has done the e    789                  * The AP side has done the error rollback already. Just
1425                  * return the error code..       790                  * return the error code..
1426                  */                              791                  */
1427                 if (ret)                         792                 if (ret)
1428                         goto out;                793                         goto out;
1429                                                  794 
1430                 /*                               795                 /*
1431                  * We might have stopped stil    796                  * We might have stopped still in the range of the AP hotplug
1432                  * thread. Nothing to do anym    797                  * thread. Nothing to do anymore.
1433                  */                              798                  */
1434                 if (st->state > CPUHP_TEARDOW    799                 if (st->state > CPUHP_TEARDOWN_CPU)
1435                         goto out;                800                         goto out;
1436                                               << 
1437                 st->target = target;          << 
1438         }                                        801         }
1439         /*                                       802         /*
1440          * The AP brought itself down to CPUH    803          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1441          * to do the further cleanups.           804          * to do the further cleanups.
1442          */                                      805          */
1443         ret = cpuhp_down_callbacks(cpu, st, t    806         ret = cpuhp_down_callbacks(cpu, st, target);
1444         if (ret && st->state < prev_state) {  !! 807         if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1445                 if (st->state == CPUHP_TEARDO !! 808                 st->target = prev_state;
1446                         cpuhp_reset_state(cpu !! 809                 st->rollback = true;
1447                         __cpuhp_kick_ap(st);  !! 810                 cpuhp_kick_ap_work(cpu);
1448                 } else {                      << 
1449                         WARN(1, "DEAD callbac << 
1450                 }                             << 
1451         }                                        811         }
1452                                                  812 
1453 out:                                             813 out:
1454         cpus_write_unlock();                  !! 814         cpu_hotplug_done();
1455         /*                                    << 
1456          * Do post unplug cleanup. This is st << 
1457          * concurrent CPU hotplug via cpu_add << 
1458          */                                   << 
1459         lockup_detector_cleanup();            << 
1460         arch_smt_update();                    << 
1461         return ret;                              815         return ret;
1462 }                                                816 }
1463                                                  817 
1464 struct cpu_down_work {                        !! 818 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1465         unsigned int            cpu;          << 
1466         enum cpuhp_state        target;       << 
1467 };                                            << 
1468                                               << 
1469 static long __cpu_down_maps_locked(void *arg) << 
1470 {                                                819 {
1471         struct cpu_down_work *work = arg;     !! 820         int err;
1472                                               << 
1473         return _cpu_down(work->cpu, 0, work-> << 
1474 }                                             << 
1475                                               << 
1476 static int cpu_down_maps_locked(unsigned int  << 
1477 {                                             << 
1478         struct cpu_down_work work = { .cpu =  << 
1479                                                  821 
1480         /*                                    !! 822         cpu_maps_update_begin();
1481          * If the platform does not support h << 
1482          * differentiate it from a transient  << 
1483          */                                   << 
1484         if (cpu_hotplug_offline_disabled)     << 
1485                 return -EOPNOTSUPP;           << 
1486         if (cpu_hotplug_disabled)             << 
1487                 return -EBUSY;                << 
1488                                                  823 
1489         /*                                    !! 824         if (cpu_hotplug_disabled) {
1490          * Ensure that the control task does  !! 825                 err = -EBUSY;
1491          * CPU to prevent a deadlock against  !! 826                 goto out;
1492          * Also keep at least one housekeepin << 
1493          * an empty sched_domain span.        << 
1494          */                                   << 
1495         for_each_cpu_and(cpu, cpu_online_mask << 
1496                 if (cpu != work.cpu)          << 
1497                         return work_on_cpu(cp << 
1498         }                                        827         }
1499         return -EBUSY;                        << 
1500 }                                             << 
1501                                                  828 
1502 static int cpu_down(unsigned int cpu, enum cp !! 829         err = _cpu_down(cpu, 0, target);
1503 {                                             << 
1504         int err;                              << 
1505                                                  830 
1506         cpu_maps_update_begin();              !! 831 out:
1507         err = cpu_down_maps_locked(cpu, targe << 
1508         cpu_maps_update_done();                  832         cpu_maps_update_done();
1509         return err;                              833         return err;
1510 }                                                834 }
1511                                               !! 835 int cpu_down(unsigned int cpu)
1512 /**                                           << 
1513  * cpu_device_down - Bring down a cpu device  << 
1514  * @dev: Pointer to the cpu device to offline << 
1515  *                                            << 
1516  * This function is meant to be used by devic << 
1517  *                                            << 
1518  * Other subsystems should use remove_cpu() i << 
1519  *                                            << 
1520  * Return: %0 on success or a negative errno  << 
1521  */                                           << 
1522 int cpu_device_down(struct device *dev)       << 
1523 {                                             << 
1524         return cpu_down(dev->id, CPUHP_OFFLIN << 
1525 }                                             << 
1526                                               << 
1527 int remove_cpu(unsigned int cpu)              << 
1528 {                                             << 
1529         int ret;                              << 
1530                                               << 
1531         lock_device_hotplug();                << 
1532         ret = device_offline(get_cpu_device(c << 
1533         unlock_device_hotplug();              << 
1534                                               << 
1535         return ret;                           << 
1536 }                                             << 
1537 EXPORT_SYMBOL_GPL(remove_cpu);                << 
1538                                               << 
1539 void smp_shutdown_nonboot_cpus(unsigned int p << 
1540 {                                                836 {
1541         unsigned int cpu;                     !! 837         return do_cpu_down(cpu, CPUHP_OFFLINE);
1542         int error;                            << 
1543                                               << 
1544         cpu_maps_update_begin();              << 
1545                                               << 
1546         /*                                    << 
1547          * Make certain the cpu I'm about to  << 
1548          *                                    << 
1549          * This is inline to what migrate_to_ << 
1550          */                                   << 
1551         if (!cpu_online(primary_cpu))         << 
1552                 primary_cpu = cpumask_first(c << 
1553                                               << 
1554         for_each_online_cpu(cpu) {            << 
1555                 if (cpu == primary_cpu)       << 
1556                         continue;             << 
1557                                               << 
1558                 error = cpu_down_maps_locked( << 
1559                 if (error) {                  << 
1560                         pr_err("Failed to off << 
1561                                 cpu, error);  << 
1562                         break;                << 
1563                 }                             << 
1564         }                                     << 
1565                                               << 
1566         /*                                    << 
1567          * Ensure all but the reboot CPU are  << 
1568          */                                   << 
1569         BUG_ON(num_online_cpus() > 1);        << 
1570                                               << 
1571         /*                                    << 
1572          * Make sure the CPUs won't be enable << 
1573          * point. Kexec will reboot to a new  << 
1574          * everything along the way.          << 
1575          */                                   << 
1576         cpu_hotplug_disabled++;               << 
1577                                               << 
1578         cpu_maps_update_done();               << 
1579 }                                                838 }
1580                                               !! 839 EXPORT_SYMBOL(cpu_down);
1581 #else                                         << 
1582 #define takedown_cpu            NULL          << 
1583 #endif /*CONFIG_HOTPLUG_CPU*/                    840 #endif /*CONFIG_HOTPLUG_CPU*/
1584                                                  841 
1585 /**                                              842 /**
1586  * notify_cpu_starting(cpu) - Invoke the call    843  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1587  * @cpu: cpu that just started                   844  * @cpu: cpu that just started
1588  *                                               845  *
1589  * It must be called by the arch code on the     846  * It must be called by the arch code on the new cpu, before the new cpu
1590  * enables interrupts and before the "boot" c    847  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1591  */                                              848  */
1592 void notify_cpu_starting(unsigned int cpu)       849 void notify_cpu_starting(unsigned int cpu)
1593 {                                                850 {
1594         struct cpuhp_cpu_state *st = per_cpu_    851         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1595         enum cpuhp_state target = min((int)st    852         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1596                                                  853 
1597         rcutree_report_cpu_starting(cpu);     !! 854         rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
1598         cpumask_set_cpu(cpu, &cpus_booted_onc !! 855         while (st->state < target) {
1599                                               !! 856                 st->state++;
1600         /*                                    !! 857                 cpuhp_invoke_callback(cpu, st->state, true, NULL);
1601          * STARTING must not fail!            !! 858         }
1602          */                                   << 
1603         cpuhp_invoke_callback_range_nofail(tr << 
1604 }                                                859 }
1605                                                  860 
1606 /*                                               861 /*
1607  * Called from the idle task. Wake up the con !! 862  * Called from the idle task. We need to set active here, so we can kick off
1608  * hotplug thread of the upcoming CPU up and  !! 863  * the stopper thread and unpark the smpboot threads. If the target state is
1609  * online bringup to the hotplug thread.      !! 864  * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
                                                   >> 865  * cpu further.
1610  */                                              866  */
1611 void cpuhp_online_idle(enum cpuhp_state state    867 void cpuhp_online_idle(enum cpuhp_state state)
1612 {                                                868 {
1613         struct cpuhp_cpu_state *st = this_cpu    869         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
                                                   >> 870         unsigned int cpu = smp_processor_id();
1614                                                  871 
1615         /* Happens for the boot cpu */           872         /* Happens for the boot cpu */
1616         if (state != CPUHP_AP_ONLINE_IDLE)       873         if (state != CPUHP_AP_ONLINE_IDLE)
1617                 return;                          874                 return;
1618                                                  875 
1619         cpuhp_ap_update_sync_state(SYNC_STATE !! 876         st->state = CPUHP_AP_ONLINE_IDLE;
1620                                                  877 
1621         /*                                    !! 878         /* Unpark the stopper thread and the hotplug thread of this cpu */
1622          * Unpark the stopper thread before w !! 879         stop_machine_unpark(cpu);
1623          * scheduling); this ensures the stop !! 880         kthread_unpark(st->thread);
1624          */                                   << 
1625         stop_machine_unpark(smp_processor_id( << 
1626                                                  881 
1627         st->state = CPUHP_AP_ONLINE_IDLE;     !! 882         /* Should we go further up ? */
1628         complete_ap_thread(st, true);         !! 883         if (st->target > CPUHP_AP_ONLINE_IDLE)
                                                   >> 884                 __cpuhp_kick_ap_work(st);
                                                   >> 885         else
                                                   >> 886                 complete(&st->done);
1629 }                                                887 }
1630                                                  888 
1631 /* Requires cpu_add_remove_lock to be held */    889 /* Requires cpu_add_remove_lock to be held */
1632 static int _cpu_up(unsigned int cpu, int task    890 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1633 {                                                891 {
1634         struct cpuhp_cpu_state *st = per_cpu_    892         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1635         struct task_struct *idle;                893         struct task_struct *idle;
1636         int ret = 0;                             894         int ret = 0;
1637                                                  895 
1638         cpus_write_lock();                    !! 896         cpu_hotplug_begin();
1639                                                  897 
1640         if (!cpu_present(cpu)) {                 898         if (!cpu_present(cpu)) {
1641                 ret = -EINVAL;                   899                 ret = -EINVAL;
1642                 goto out;                        900                 goto out;
1643         }                                        901         }
1644                                                  902 
1645         /*                                       903         /*
1646          * The caller of cpu_up() might have  !! 904          * The caller of do_cpu_up might have raced with another
1647          * caller. Nothing to do.             !! 905          * caller. Ignore it for now.
1648          */                                      906          */
1649         if (st->state >= target)                 907         if (st->state >= target)
1650                 goto out;                        908                 goto out;
1651                                                  909 
1652         if (st->state == CPUHP_OFFLINE) {        910         if (st->state == CPUHP_OFFLINE) {
1653                 /* Let it fail before we try     911                 /* Let it fail before we try to bring the cpu up */
1654                 idle = idle_thread_get(cpu);     912                 idle = idle_thread_get(cpu);
1655                 if (IS_ERR(idle)) {              913                 if (IS_ERR(idle)) {
1656                         ret = PTR_ERR(idle);     914                         ret = PTR_ERR(idle);
1657                         goto out;                915                         goto out;
1658                 }                                916                 }
1659                                               << 
1660                 /*                            << 
1661                  * Reset stale stack state fr << 
1662                  */                           << 
1663                 scs_task_reset(idle);         << 
1664                 kasan_unpoison_task_stack(idl << 
1665         }                                        917         }
1666                                                  918 
1667         cpuhp_tasks_frozen = tasks_frozen;       919         cpuhp_tasks_frozen = tasks_frozen;
1668                                                  920 
1669         cpuhp_set_state(cpu, st, target);     !! 921         st->target = target;
1670         /*                                       922         /*
1671          * If the current CPU state is in the    923          * If the current CPU state is in the range of the AP hotplug thread,
1672          * then we need to kick the thread on    924          * then we need to kick the thread once more.
1673          */                                      925          */
1674         if (st->state > CPUHP_BRINGUP_CPU) {     926         if (st->state > CPUHP_BRINGUP_CPU) {
1675                 ret = cpuhp_kick_ap_work(cpu)    927                 ret = cpuhp_kick_ap_work(cpu);
1676                 /*                               928                 /*
1677                  * The AP side has done the e    929                  * The AP side has done the error rollback already. Just
1678                  * return the error code..       930                  * return the error code..
1679                  */                              931                  */
1680                 if (ret)                         932                 if (ret)
1681                         goto out;                933                         goto out;
1682         }                                        934         }
1683                                                  935 
1684         /*                                       936         /*
1685          * Try to reach the target state. We     937          * Try to reach the target state. We max out on the BP at
1686          * CPUHP_BRINGUP_CPU. After that the     938          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1687          * responsible for bringing it up to     939          * responsible for bringing it up to the target state.
1688          */                                      940          */
1689         target = min((int)target, CPUHP_BRING    941         target = min((int)target, CPUHP_BRINGUP_CPU);
1690         ret = cpuhp_up_callbacks(cpu, st, tar    942         ret = cpuhp_up_callbacks(cpu, st, target);
1691 out:                                             943 out:
1692         cpus_write_unlock();                  !! 944         cpu_hotplug_done();
1693         arch_smt_update();                    << 
1694         return ret;                              945         return ret;
1695 }                                                946 }
1696                                                  947 
1697 static int cpu_up(unsigned int cpu, enum cpuh !! 948 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1698 {                                                949 {
1699         int err = 0;                             950         int err = 0;
1700                                                  951 
1701         if (!cpu_possible(cpu)) {                952         if (!cpu_possible(cpu)) {
1702                 pr_err("can't online cpu %d b    953                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1703                        cpu);                     954                        cpu);
                                                   >> 955 #if defined(CONFIG_IA64)
                                                   >> 956                 pr_err("please check additional_cpus= boot parameter\n");
                                                   >> 957 #endif
1704                 return -EINVAL;                  958                 return -EINVAL;
1705         }                                        959         }
1706                                                  960 
1707         err = try_online_node(cpu_to_node(cpu    961         err = try_online_node(cpu_to_node(cpu));
1708         if (err)                                 962         if (err)
1709                 return err;                      963                 return err;
1710                                                  964 
1711         cpu_maps_update_begin();                 965         cpu_maps_update_begin();
1712                                                  966 
1713         if (cpu_hotplug_disabled) {              967         if (cpu_hotplug_disabled) {
1714                 err = -EBUSY;                    968                 err = -EBUSY;
1715                 goto out;                        969                 goto out;
1716         }                                        970         }
1717         if (!cpu_bootable(cpu)) {             << 
1718                 err = -EPERM;                 << 
1719                 goto out;                     << 
1720         }                                     << 
1721                                                  971 
1722         err = _cpu_up(cpu, 0, target);           972         err = _cpu_up(cpu, 0, target);
1723 out:                                             973 out:
1724         cpu_maps_update_done();                  974         cpu_maps_update_done();
1725         return err;                              975         return err;
1726 }                                                976 }
1727                                                  977 
1728 /**                                           !! 978 int cpu_up(unsigned int cpu)
1729  * cpu_device_up - Bring up a cpu device      << 
1730  * @dev: Pointer to the cpu device to online  << 
1731  *                                            << 
1732  * This function is meant to be used by devic << 
1733  *                                            << 
1734  * Other subsystems should use add_cpu() inst << 
1735  *                                            << 
1736  * Return: %0 on success or a negative errno  << 
1737  */                                           << 
1738 int cpu_device_up(struct device *dev)         << 
1739 {                                             << 
1740         return cpu_up(dev->id, CPUHP_ONLINE); << 
1741 }                                             << 
1742                                               << 
1743 int add_cpu(unsigned int cpu)                 << 
1744 {                                                979 {
1745         int ret;                              !! 980         return do_cpu_up(cpu, CPUHP_ONLINE);
1746                                               << 
1747         lock_device_hotplug();                << 
1748         ret = device_online(get_cpu_device(cp << 
1749         unlock_device_hotplug();              << 
1750                                               << 
1751         return ret;                           << 
1752 }                                             << 
1753 EXPORT_SYMBOL_GPL(add_cpu);                   << 
1754                                               << 
1755 /**                                           << 
1756  * bringup_hibernate_cpu - Bring up the CPU t << 
1757  * @sleep_cpu: The cpu we hibernated on and s << 
1758  *                                            << 
1759  * On some architectures like arm64, we can h << 
1760  * wake up the CPU we hibernated on might be  << 
1761  * using maxcpus= for example.                << 
1762  *                                            << 
1763  * Return: %0 on success or a negative errno  << 
1764  */                                           << 
1765 int bringup_hibernate_cpu(unsigned int sleep_ << 
1766 {                                             << 
1767         int ret;                              << 
1768                                               << 
1769         if (!cpu_online(sleep_cpu)) {         << 
1770                 pr_info("Hibernated on a CPU  << 
1771                 ret = cpu_up(sleep_cpu, CPUHP << 
1772                 if (ret) {                    << 
1773                         pr_err("Failed to bri << 
1774                         return ret;           << 
1775                 }                             << 
1776         }                                     << 
1777         return 0;                             << 
1778 }                                             << 
1779                                               << 
1780 static void __init cpuhp_bringup_mask(const s << 
1781                                       enum cp << 
1782 {                                             << 
1783         unsigned int cpu;                     << 
1784                                               << 
1785         for_each_cpu(cpu, mask) {             << 
1786                 struct cpuhp_cpu_state *st =  << 
1787                                               << 
1788                 if (cpu_up(cpu, target) && ca << 
1789                         /*                    << 
1790                          * If this failed the << 
1791                          * rolled back to CPU << 
1792                          * online. Clean it u << 
1793                          */                   << 
1794                         WARN_ON(cpuhp_invoke_ << 
1795                 }                             << 
1796                                               << 
1797                 if (!--ncpus)                 << 
1798                         break;                << 
1799         }                                     << 
1800 }                                             << 
1801                                               << 
1802 #ifdef CONFIG_HOTPLUG_PARALLEL                << 
1803 static bool __cpuhp_parallel_bringup __ro_aft << 
1804                                               << 
1805 static int __init parallel_bringup_parse_para << 
1806 {                                             << 
1807         return kstrtobool(arg, &__cpuhp_paral << 
1808 }                                             << 
1809 early_param("cpuhp.parallel", parallel_bringu << 
1810                                               << 
1811 #ifdef CONFIG_HOTPLUG_SMT                     << 
1812 static inline bool cpuhp_smt_aware(void)      << 
1813 {                                             << 
1814         return cpu_smt_max_threads > 1;       << 
1815 }                                             << 
1816                                               << 
1817 static inline const struct cpumask *cpuhp_get << 
1818 {                                             << 
1819         return cpu_primary_thread_mask;       << 
1820 }                                             << 
1821 #else                                         << 
1822 static inline bool cpuhp_smt_aware(void)      << 
1823 {                                             << 
1824         return false;                         << 
1825 }                                             << 
1826 static inline const struct cpumask *cpuhp_get << 
1827 {                                             << 
1828         return cpu_none_mask;                 << 
1829 }                                             << 
1830 #endif                                        << 
1831                                               << 
1832 bool __weak arch_cpuhp_init_parallel_bringup( << 
1833 {                                             << 
1834         return true;                          << 
1835 }                                             << 
1836                                               << 
1837 /*                                            << 
1838  * On architectures which have enabled parall << 
1839  * prepare states for each of the to be onlin << 
1840  * sends the startup IPI to the APs. The APs  << 
1841  * bringup code in parallel and then wait for << 
1842  * them one by one for the final onlining pro << 
1843  *                                            << 
1844  * This avoids waiting for each AP to respond << 
1845  * CPUHP_BRINGUP_CPU.                         << 
1846  */                                           << 
1847 static bool __init cpuhp_bringup_cpus_paralle << 
1848 {                                             << 
1849         const struct cpumask *mask = cpu_pres << 
1850                                               << 
1851         if (__cpuhp_parallel_bringup)         << 
1852                 __cpuhp_parallel_bringup = ar << 
1853         if (!__cpuhp_parallel_bringup)        << 
1854                 return false;                 << 
1855                                               << 
1856         if (cpuhp_smt_aware()) {              << 
1857                 const struct cpumask *pmask = << 
1858                 static struct cpumask tmp_mas << 
1859                                               << 
1860                 /*                            << 
1861                  * X86 requires to prevent th << 
1862                  * the primary thread does a  << 
1863                  * reasons. Bring the primary << 
1864                  */                           << 
1865                 cpumask_and(&tmp_mask, mask,  << 
1866                 cpuhp_bringup_mask(&tmp_mask, << 
1867                 cpuhp_bringup_mask(&tmp_mask, << 
1868                 /* Account for the online CPU << 
1869                 ncpus -= num_online_cpus();   << 
1870                 if (!ncpus)                   << 
1871                         return true;          << 
1872                 /* Create the mask for second << 
1873                 cpumask_andnot(&tmp_mask, mas << 
1874                 mask = &tmp_mask;             << 
1875         }                                     << 
1876                                               << 
1877         /* Bring the not-yet started CPUs up  << 
1878         cpuhp_bringup_mask(mask, ncpus, CPUHP << 
1879         cpuhp_bringup_mask(mask, ncpus, CPUHP << 
1880         return true;                          << 
1881 }                                             << 
1882 #else                                         << 
1883 static inline bool cpuhp_bringup_cpus_paralle << 
1884 #endif /* CONFIG_HOTPLUG_PARALLEL */          << 
1885                                               << 
1886 void __init bringup_nonboot_cpus(unsigned int << 
1887 {                                             << 
1888         if (!max_cpus)                        << 
1889                 return;                       << 
1890                                               << 
1891         /* Try parallel bringup optimization  << 
1892         if (cpuhp_bringup_cpus_parallel(max_c << 
1893                 return;                       << 
1894                                               << 
1895         /* Full per CPU serialized bringup */ << 
1896         cpuhp_bringup_mask(cpu_present_mask,  << 
1897 }                                                981 }
                                                   >> 982 EXPORT_SYMBOL_GPL(cpu_up);
1898                                                  983 
1899 #ifdef CONFIG_PM_SLEEP_SMP                       984 #ifdef CONFIG_PM_SLEEP_SMP
1900 static cpumask_var_t frozen_cpus;                985 static cpumask_var_t frozen_cpus;
1901                                                  986 
1902 int freeze_secondary_cpus(int primary)           987 int freeze_secondary_cpus(int primary)
1903 {                                                988 {
1904         int cpu, error = 0;                      989         int cpu, error = 0;
1905                                                  990 
1906         cpu_maps_update_begin();                 991         cpu_maps_update_begin();
1907         if (primary == -1) {                  !! 992         if (!cpu_online(primary))
1908                 primary = cpumask_first(cpu_o    993                 primary = cpumask_first(cpu_online_mask);
1909                 if (!housekeeping_cpu(primary << 
1910                         primary = housekeepin << 
1911         } else {                              << 
1912                 if (!cpu_online(primary))     << 
1913                         primary = cpumask_fir << 
1914         }                                     << 
1915                                               << 
1916         /*                                       994         /*
1917          * We take down all of the non-boot C    995          * We take down all of the non-boot CPUs in one shot to avoid races
1918          * with the userspace trying to use t    996          * with the userspace trying to use the CPU hotplug at the same time
1919          */                                      997          */
1920         cpumask_clear(frozen_cpus);              998         cpumask_clear(frozen_cpus);
1921                                                  999 
1922         pr_info("Disabling non-boot CPUs ...\    1000         pr_info("Disabling non-boot CPUs ...\n");
1923         for (cpu = nr_cpu_ids - 1; cpu >= 0;  !! 1001         for_each_online_cpu(cpu) {
1924                 if (!cpu_online(cpu) || cpu = !! 1002                 if (cpu == primary)
1925                         continue;                1003                         continue;
1926                                               << 
1927                 if (pm_wakeup_pending()) {    << 
1928                         pr_info("Wakeup pendi << 
1929                         error = -EBUSY;       << 
1930                         break;                << 
1931                 }                             << 
1932                                               << 
1933                 trace_suspend_resume(TPS("CPU    1004                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1934                 error = _cpu_down(cpu, 1, CPU    1005                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1935                 trace_suspend_resume(TPS("CPU    1006                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1936                 if (!error)                      1007                 if (!error)
1937                         cpumask_set_cpu(cpu,     1008                         cpumask_set_cpu(cpu, frozen_cpus);
1938                 else {                           1009                 else {
1939                         pr_err("Error taking     1010                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
1940                         break;                   1011                         break;
1941                 }                                1012                 }
1942         }                                        1013         }
1943                                                  1014 
1944         if (!error)                              1015         if (!error)
1945                 BUG_ON(num_online_cpus() > 1)    1016                 BUG_ON(num_online_cpus() > 1);
1946         else                                     1017         else
1947                 pr_err("Non-boot CPUs are not    1018                 pr_err("Non-boot CPUs are not disabled\n");
1948                                                  1019 
1949         /*                                       1020         /*
1950          * Make sure the CPUs won't be enable    1021          * Make sure the CPUs won't be enabled by someone else. We need to do
1951          * this even in case of failure as al !! 1022          * this even in case of failure as all disable_nonboot_cpus() users are
1952          * supposed to do thaw_secondary_cpus !! 1023          * supposed to do enable_nonboot_cpus() on the failure path.
1953          */                                      1024          */
1954         cpu_hotplug_disabled++;                  1025         cpu_hotplug_disabled++;
1955                                                  1026 
1956         cpu_maps_update_done();                  1027         cpu_maps_update_done();
1957         return error;                            1028         return error;
1958 }                                                1029 }
1959                                                  1030 
1960 void __weak arch_thaw_secondary_cpus_begin(vo !! 1031 void __weak arch_enable_nonboot_cpus_begin(void)
1961 {                                                1032 {
1962 }                                                1033 }
1963                                                  1034 
1964 void __weak arch_thaw_secondary_cpus_end(void !! 1035 void __weak arch_enable_nonboot_cpus_end(void)
1965 {                                                1036 {
1966 }                                                1037 }
1967                                                  1038 
1968 void thaw_secondary_cpus(void)                !! 1039 void enable_nonboot_cpus(void)
1969 {                                                1040 {
1970         int cpu, error;                          1041         int cpu, error;
1971                                                  1042 
1972         /* Allow everyone to use the CPU hotp    1043         /* Allow everyone to use the CPU hotplug again */
1973         cpu_maps_update_begin();                 1044         cpu_maps_update_begin();
1974         __cpu_hotplug_enable();                  1045         __cpu_hotplug_enable();
1975         if (cpumask_empty(frozen_cpus))          1046         if (cpumask_empty(frozen_cpus))
1976                 goto out;                        1047                 goto out;
1977                                                  1048 
1978         pr_info("Enabling non-boot CPUs ...\n    1049         pr_info("Enabling non-boot CPUs ...\n");
1979                                                  1050 
1980         arch_thaw_secondary_cpus_begin();     !! 1051         arch_enable_nonboot_cpus_begin();
1981                                                  1052 
1982         for_each_cpu(cpu, frozen_cpus) {         1053         for_each_cpu(cpu, frozen_cpus) {
1983                 trace_suspend_resume(TPS("CPU    1054                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1984                 error = _cpu_up(cpu, 1, CPUHP    1055                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1985                 trace_suspend_resume(TPS("CPU    1056                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1986                 if (!error) {                    1057                 if (!error) {
1987                         pr_info("CPU%d is up\    1058                         pr_info("CPU%d is up\n", cpu);
1988                         continue;                1059                         continue;
1989                 }                                1060                 }
1990                 pr_warn("Error taking CPU%d u    1061                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1991         }                                        1062         }
1992                                                  1063 
1993         arch_thaw_secondary_cpus_end();       !! 1064         arch_enable_nonboot_cpus_end();
1994                                                  1065 
1995         cpumask_clear(frozen_cpus);              1066         cpumask_clear(frozen_cpus);
1996 out:                                             1067 out:
1997         cpu_maps_update_done();                  1068         cpu_maps_update_done();
1998 }                                                1069 }
1999                                                  1070 
2000 static int __init alloc_frozen_cpus(void)        1071 static int __init alloc_frozen_cpus(void)
2001 {                                                1072 {
2002         if (!alloc_cpumask_var(&frozen_cpus,     1073         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
2003                 return -ENOMEM;                  1074                 return -ENOMEM;
2004         return 0;                                1075         return 0;
2005 }                                                1076 }
2006 core_initcall(alloc_frozen_cpus);                1077 core_initcall(alloc_frozen_cpus);
2007                                                  1078 
2008 /*                                               1079 /*
2009  * When callbacks for CPU hotplug notificatio    1080  * When callbacks for CPU hotplug notifications are being executed, we must
2010  * ensure that the state of the system with r    1081  * ensure that the state of the system with respect to the tasks being frozen
2011  * or not, as reported by the notification, r    1082  * or not, as reported by the notification, remains unchanged *throughout the
2012  * duration* of the execution of the callback    1083  * duration* of the execution of the callbacks.
2013  * Hence we need to prevent the freezer from     1084  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
2014  *                                               1085  *
2015  * This synchronization is implemented by mut    1086  * This synchronization is implemented by mutually excluding regular CPU
2016  * hotplug and Suspend/Hibernate call paths b    1087  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
2017  * Hibernate notifications.                      1088  * Hibernate notifications.
2018  */                                              1089  */
2019 static int                                       1090 static int
2020 cpu_hotplug_pm_callback(struct notifier_block    1091 cpu_hotplug_pm_callback(struct notifier_block *nb,
2021                         unsigned long action,    1092                         unsigned long action, void *ptr)
2022 {                                                1093 {
2023         switch (action) {                        1094         switch (action) {
2024                                                  1095 
2025         case PM_SUSPEND_PREPARE:                 1096         case PM_SUSPEND_PREPARE:
2026         case PM_HIBERNATION_PREPARE:             1097         case PM_HIBERNATION_PREPARE:
2027                 cpu_hotplug_disable();           1098                 cpu_hotplug_disable();
2028                 break;                           1099                 break;
2029                                                  1100 
2030         case PM_POST_SUSPEND:                    1101         case PM_POST_SUSPEND:
2031         case PM_POST_HIBERNATION:                1102         case PM_POST_HIBERNATION:
2032                 cpu_hotplug_enable();            1103                 cpu_hotplug_enable();
2033                 break;                           1104                 break;
2034                                                  1105 
2035         default:                                 1106         default:
2036                 return NOTIFY_DONE;              1107                 return NOTIFY_DONE;
2037         }                                        1108         }
2038                                                  1109 
2039         return NOTIFY_OK;                        1110         return NOTIFY_OK;
2040 }                                                1111 }
2041                                                  1112 
2042                                                  1113 
2043 static int __init cpu_hotplug_pm_sync_init(vo    1114 static int __init cpu_hotplug_pm_sync_init(void)
2044 {                                                1115 {
2045         /*                                       1116         /*
2046          * cpu_hotplug_pm_callback has higher    1117          * cpu_hotplug_pm_callback has higher priority than x86
2047          * bsp_pm_callback which depends on c    1118          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2048          * to disable cpu hotplug to avoid cp    1119          * to disable cpu hotplug to avoid cpu hotplug race.
2049          */                                      1120          */
2050         pm_notifier(cpu_hotplug_pm_callback,     1121         pm_notifier(cpu_hotplug_pm_callback, 0);
2051         return 0;                                1122         return 0;
2052 }                                                1123 }
2053 core_initcall(cpu_hotplug_pm_sync_init);         1124 core_initcall(cpu_hotplug_pm_sync_init);
2054                                                  1125 
2055 #endif /* CONFIG_PM_SLEEP_SMP */                 1126 #endif /* CONFIG_PM_SLEEP_SMP */
2056                                                  1127 
2057 int __boot_cpu_id;                            << 
2058                                               << 
2059 #endif /* CONFIG_SMP */                          1128 #endif /* CONFIG_SMP */
2060                                                  1129 
2061 /* Boot processor state steps */                 1130 /* Boot processor state steps */
2062 static struct cpuhp_step cpuhp_hp_states[] =  !! 1131 static struct cpuhp_step cpuhp_bp_states[] = {
2063         [CPUHP_OFFLINE] = {                      1132         [CPUHP_OFFLINE] = {
2064                 .name                   = "of    1133                 .name                   = "offline",
2065                 .startup.single         = NUL    1134                 .startup.single         = NULL,
2066                 .teardown.single        = NUL    1135                 .teardown.single        = NULL,
2067         },                                       1136         },
2068 #ifdef CONFIG_SMP                                1137 #ifdef CONFIG_SMP
2069         [CPUHP_CREATE_THREADS]= {                1138         [CPUHP_CREATE_THREADS]= {
2070                 .name                   = "th    1139                 .name                   = "threads:prepare",
2071                 .startup.single         = smp    1140                 .startup.single         = smpboot_create_threads,
2072                 .teardown.single        = NUL    1141                 .teardown.single        = NULL,
2073                 .cant_stop              = tru    1142                 .cant_stop              = true,
2074         },                                       1143         },
2075         [CPUHP_PERF_PREPARE] = {                 1144         [CPUHP_PERF_PREPARE] = {
2076                 .name                   = "pe    1145                 .name                   = "perf:prepare",
2077                 .startup.single         = per    1146                 .startup.single         = perf_event_init_cpu,
2078                 .teardown.single        = per    1147                 .teardown.single        = perf_event_exit_cpu,
2079         },                                       1148         },
2080         [CPUHP_RANDOM_PREPARE] = {            << 
2081                 .name                   = "ra << 
2082                 .startup.single         = ran << 
2083                 .teardown.single        = NUL << 
2084         },                                    << 
2085         [CPUHP_WORKQUEUE_PREP] = {               1149         [CPUHP_WORKQUEUE_PREP] = {
2086                 .name                   = "wo    1150                 .name                   = "workqueue:prepare",
2087                 .startup.single         = wor    1151                 .startup.single         = workqueue_prepare_cpu,
2088                 .teardown.single        = NUL    1152                 .teardown.single        = NULL,
2089         },                                       1153         },
2090         [CPUHP_HRTIMERS_PREPARE] = {             1154         [CPUHP_HRTIMERS_PREPARE] = {
2091                 .name                   = "hr    1155                 .name                   = "hrtimers:prepare",
2092                 .startup.single         = hrt    1156                 .startup.single         = hrtimers_prepare_cpu,
2093                 .teardown.single        = NUL !! 1157                 .teardown.single        = hrtimers_dead_cpu,
2094         },                                       1158         },
2095         [CPUHP_SMPCFD_PREPARE] = {               1159         [CPUHP_SMPCFD_PREPARE] = {
2096                 .name                   = "sm    1160                 .name                   = "smpcfd:prepare",
2097                 .startup.single         = smp    1161                 .startup.single         = smpcfd_prepare_cpu,
2098                 .teardown.single        = smp    1162                 .teardown.single        = smpcfd_dead_cpu,
2099         },                                       1163         },
2100         [CPUHP_RELAY_PREPARE] = {                1164         [CPUHP_RELAY_PREPARE] = {
2101                 .name                   = "re    1165                 .name                   = "relay:prepare",
2102                 .startup.single         = rel    1166                 .startup.single         = relay_prepare_cpu,
2103                 .teardown.single        = NUL    1167                 .teardown.single        = NULL,
2104         },                                       1168         },
                                                   >> 1169         [CPUHP_SLAB_PREPARE] = {
                                                   >> 1170                 .name                   = "slab:prepare",
                                                   >> 1171                 .startup.single         = slab_prepare_cpu,
                                                   >> 1172                 .teardown.single        = slab_dead_cpu,
                                                   >> 1173         },
2105         [CPUHP_RCUTREE_PREP] = {                 1174         [CPUHP_RCUTREE_PREP] = {
2106                 .name                   = "RC    1175                 .name                   = "RCU/tree:prepare",
2107                 .startup.single         = rcu    1176                 .startup.single         = rcutree_prepare_cpu,
2108                 .teardown.single        = rcu    1177                 .teardown.single        = rcutree_dead_cpu,
2109         },                                       1178         },
2110         /*                                       1179         /*
2111          * On the tear-down path, timers_dead    1180          * On the tear-down path, timers_dead_cpu() must be invoked
2112          * before blk_mq_queue_reinit_notify(    1181          * before blk_mq_queue_reinit_notify() from notify_dead(),
2113          * otherwise a RCU stall occurs.         1182          * otherwise a RCU stall occurs.
2114          */                                      1183          */
2115         [CPUHP_TIMERS_PREPARE] = {            !! 1184         [CPUHP_TIMERS_DEAD] = {
2116                 .name                   = "ti !! 1185                 .name                   = "timers:dead",
2117                 .startup.single         = tim !! 1186                 .startup.single         = NULL,
2118                 .teardown.single        = tim    1187                 .teardown.single        = timers_dead_cpu,
2119         },                                       1188         },
2120                                               !! 1189         /* Kicks the plugged cpu into life */
2121 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP           << 
2122         /*                                    << 
2123          * Kicks the AP alive. AP will wait i << 
2124          * the next step will release it.     << 
2125          */                                   << 
2126         [CPUHP_BP_KICK_AP] = {                << 
2127                 .name                   = "cp << 
2128                 .startup.single         = cpu << 
2129         },                                    << 
2130                                               << 
2131         /*                                    << 
2132          * Waits for the AP to reach cpuhp_ap << 
2133          * releases it for the complete bring << 
2134          */                                   << 
2135         [CPUHP_BRINGUP_CPU] = {                  1190         [CPUHP_BRINGUP_CPU] = {
2136                 .name                   = "cp    1191                 .name                   = "cpu:bringup",
2137                 .startup.single         = cpu !! 1192                 .startup.single         = bringup_cpu,
2138                 .teardown.single        = fin !! 1193                 .teardown.single        = NULL,
2139                 .cant_stop              = tru    1194                 .cant_stop              = true,
2140         },                                       1195         },
2141 #else                                         !! 1196         [CPUHP_AP_SMPCFD_DYING] = {
                                                   >> 1197                 .name                   = "smpcfd:dying",
                                                   >> 1198                 .startup.single         = NULL,
                                                   >> 1199                 .teardown.single        = smpcfd_dying_cpu,
                                                   >> 1200         },
2142         /*                                       1201         /*
2143          * All-in-one CPU bringup state which !! 1202          * Handled on controll processor until the plugged processor manages
                                                   >> 1203          * this itself.
2144          */                                      1204          */
2145         [CPUHP_BRINGUP_CPU] = {               !! 1205         [CPUHP_TEARDOWN_CPU] = {
2146                 .name                   = "cp !! 1206                 .name                   = "cpu:teardown",
2147                 .startup.single         = bri !! 1207                 .startup.single         = NULL,
2148                 .teardown.single        = fin !! 1208                 .teardown.single        = takedown_cpu,
2149                 .cant_stop              = tru    1209                 .cant_stop              = true,
2150         },                                       1210         },
                                                   >> 1211 #else
                                                   >> 1212         [CPUHP_BRINGUP_CPU] = { },
2151 #endif                                           1213 #endif
                                                   >> 1214 };
                                                   >> 1215 
                                                   >> 1216 /* Application processor state steps */
                                                   >> 1217 static struct cpuhp_step cpuhp_ap_states[] = {
                                                   >> 1218 #ifdef CONFIG_SMP
2152         /* Final state before CPU kills itsel    1219         /* Final state before CPU kills itself */
2153         [CPUHP_AP_IDLE_DEAD] = {                 1220         [CPUHP_AP_IDLE_DEAD] = {
2154                 .name                   = "id    1221                 .name                   = "idle:dead",
2155         },                                       1222         },
2156         /*                                       1223         /*
2157          * Last state before CPU enters the i    1224          * Last state before CPU enters the idle loop to die. Transient state
2158          * for synchronization.                  1225          * for synchronization.
2159          */                                      1226          */
2160         [CPUHP_AP_OFFLINE] = {                   1227         [CPUHP_AP_OFFLINE] = {
2161                 .name                   = "ap    1228                 .name                   = "ap:offline",
2162                 .cant_stop              = tru    1229                 .cant_stop              = true,
2163         },                                       1230         },
2164         /* First state is scheduler control.     1231         /* First state is scheduler control. Interrupts are disabled */
2165         [CPUHP_AP_SCHED_STARTING] = {            1232         [CPUHP_AP_SCHED_STARTING] = {
2166                 .name                   = "sc    1233                 .name                   = "sched:starting",
2167                 .startup.single         = sch    1234                 .startup.single         = sched_cpu_starting,
2168                 .teardown.single        = sch    1235                 .teardown.single        = sched_cpu_dying,
2169         },                                       1236         },
2170         [CPUHP_AP_RCUTREE_DYING] = {             1237         [CPUHP_AP_RCUTREE_DYING] = {
2171                 .name                   = "RC    1238                 .name                   = "RCU/tree:dying",
2172                 .startup.single         = NUL    1239                 .startup.single         = NULL,
2173                 .teardown.single        = rcu    1240                 .teardown.single        = rcutree_dying_cpu,
2174         },                                       1241         },
2175         [CPUHP_AP_SMPCFD_DYING] = {           << 
2176                 .name                   = "sm << 
2177                 .startup.single         = NUL << 
2178                 .teardown.single        = smp << 
2179         },                                    << 
2180         [CPUHP_AP_HRTIMERS_DYING] = {         << 
2181                 .name                   = "hr << 
2182                 .startup.single         = NUL << 
2183                 .teardown.single        = hrt << 
2184         },                                    << 
2185         [CPUHP_AP_TICK_DYING] = {             << 
2186                 .name                   = "ti << 
2187                 .startup.single         = NUL << 
2188                 .teardown.single        = tic << 
2189         },                                    << 
2190         /* Entry state on starting. Interrupt    1242         /* Entry state on starting. Interrupts enabled from here on. Transient
2191          * state for synchronsization */         1243          * state for synchronsization */
2192         [CPUHP_AP_ONLINE] = {                    1244         [CPUHP_AP_ONLINE] = {
2193                 .name                   = "ap    1245                 .name                   = "ap:online",
2194         },                                       1246         },
2195         /*                                    << 
2196          * Handled on control processor until << 
2197          * this itself.                       << 
2198          */                                   << 
2199         [CPUHP_TEARDOWN_CPU] = {              << 
2200                 .name                   = "cp << 
2201                 .startup.single         = NUL << 
2202                 .teardown.single        = tak << 
2203                 .cant_stop              = tru << 
2204         },                                    << 
2205                                               << 
2206         [CPUHP_AP_SCHED_WAIT_EMPTY] = {       << 
2207                 .name                   = "sc << 
2208                 .startup.single         = NUL << 
2209                 .teardown.single        = sch << 
2210         },                                    << 
2211                                               << 
2212         /* Handle smpboot threads park/unpark    1247         /* Handle smpboot threads park/unpark */
2213         [CPUHP_AP_SMPBOOT_THREADS] = {           1248         [CPUHP_AP_SMPBOOT_THREADS] = {
2214                 .name                   = "sm    1249                 .name                   = "smpboot/threads:online",
2215                 .startup.single         = smp    1250                 .startup.single         = smpboot_unpark_threads,
2216                 .teardown.single        = smp << 
2217         },                                    << 
2218         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {    << 
2219                 .name                   = "ir << 
2220                 .startup.single         = irq << 
2221                 .teardown.single        = NUL    1251                 .teardown.single        = NULL,
2222         },                                       1252         },
2223         [CPUHP_AP_PERF_ONLINE] = {               1253         [CPUHP_AP_PERF_ONLINE] = {
2224                 .name                   = "pe    1254                 .name                   = "perf:online",
2225                 .startup.single         = per    1255                 .startup.single         = perf_event_init_cpu,
2226                 .teardown.single        = per    1256                 .teardown.single        = perf_event_exit_cpu,
2227         },                                       1257         },
2228         [CPUHP_AP_WATCHDOG_ONLINE] = {        << 
2229                 .name                   = "lo << 
2230                 .startup.single         = loc << 
2231                 .teardown.single        = loc << 
2232         },                                    << 
2233         [CPUHP_AP_WORKQUEUE_ONLINE] = {          1258         [CPUHP_AP_WORKQUEUE_ONLINE] = {
2234                 .name                   = "wo    1259                 .name                   = "workqueue:online",
2235                 .startup.single         = wor    1260                 .startup.single         = workqueue_online_cpu,
2236                 .teardown.single        = wor    1261                 .teardown.single        = workqueue_offline_cpu,
2237         },                                       1262         },
2238         [CPUHP_AP_RANDOM_ONLINE] = {          << 
2239                 .name                   = "ra << 
2240                 .startup.single         = ran << 
2241                 .teardown.single        = NUL << 
2242         },                                    << 
2243         [CPUHP_AP_RCUTREE_ONLINE] = {            1263         [CPUHP_AP_RCUTREE_ONLINE] = {
2244                 .name                   = "RC    1264                 .name                   = "RCU/tree:online",
2245                 .startup.single         = rcu    1265                 .startup.single         = rcutree_online_cpu,
2246                 .teardown.single        = rcu    1266                 .teardown.single        = rcutree_offline_cpu,
2247         },                                       1267         },
2248 #endif                                           1268 #endif
2249         /*                                       1269         /*
2250          * The dynamically registered state s    1270          * The dynamically registered state space is here
2251          */                                      1271          */
2252                                                  1272 
2253 #ifdef CONFIG_SMP                                1273 #ifdef CONFIG_SMP
2254         /* Last state is scheduler control se    1274         /* Last state is scheduler control setting the cpu active */
2255         [CPUHP_AP_ACTIVE] = {                    1275         [CPUHP_AP_ACTIVE] = {
2256                 .name                   = "sc    1276                 .name                   = "sched:active",
2257                 .startup.single         = sch    1277                 .startup.single         = sched_cpu_activate,
2258                 .teardown.single        = sch    1278                 .teardown.single        = sched_cpu_deactivate,
2259         },                                       1279         },
2260 #endif                                           1280 #endif
2261                                                  1281 
2262         /* CPU is fully up and running. */       1282         /* CPU is fully up and running. */
2263         [CPUHP_ONLINE] = {                       1283         [CPUHP_ONLINE] = {
2264                 .name                   = "on    1284                 .name                   = "online",
2265                 .startup.single         = NUL    1285                 .startup.single         = NULL,
2266                 .teardown.single        = NUL    1286                 .teardown.single        = NULL,
2267         },                                       1287         },
2268 };                                               1288 };
2269                                                  1289 
2270 /* Sanity check for callbacks */                 1290 /* Sanity check for callbacks */
2271 static int cpuhp_cb_check(enum cpuhp_state st    1291 static int cpuhp_cb_check(enum cpuhp_state state)
2272 {                                                1292 {
2273         if (state <= CPUHP_OFFLINE || state >    1293         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
2274                 return -EINVAL;                  1294                 return -EINVAL;
2275         return 0;                                1295         return 0;
2276 }                                                1296 }
2277                                                  1297 
2278 /*                                               1298 /*
2279  * Returns a free for dynamic slot assignment    1299  * Returns a free for dynamic slot assignment of the Online state. The states
2280  * are protected by the cpuhp_slot_states mut    1300  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2281  * by having no name assigned.                   1301  * by having no name assigned.
2282  */                                              1302  */
2283 static int cpuhp_reserve_state(enum cpuhp_sta    1303 static int cpuhp_reserve_state(enum cpuhp_state state)
2284 {                                                1304 {
2285         enum cpuhp_state i, end;                 1305         enum cpuhp_state i, end;
2286         struct cpuhp_step *step;                 1306         struct cpuhp_step *step;
2287                                                  1307 
2288         switch (state) {                         1308         switch (state) {
2289         case CPUHP_AP_ONLINE_DYN:                1309         case CPUHP_AP_ONLINE_DYN:
2290                 step = cpuhp_hp_states + CPUH !! 1310                 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
2291                 end = CPUHP_AP_ONLINE_DYN_END    1311                 end = CPUHP_AP_ONLINE_DYN_END;
2292                 break;                           1312                 break;
2293         case CPUHP_BP_PREPARE_DYN:               1313         case CPUHP_BP_PREPARE_DYN:
2294                 step = cpuhp_hp_states + CPUH !! 1314                 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
2295                 end = CPUHP_BP_PREPARE_DYN_EN    1315                 end = CPUHP_BP_PREPARE_DYN_END;
2296                 break;                           1316                 break;
2297         default:                                 1317         default:
2298                 return -EINVAL;                  1318                 return -EINVAL;
2299         }                                        1319         }
2300                                                  1320 
2301         for (i = state; i <= end; i++, step++    1321         for (i = state; i <= end; i++, step++) {
2302                 if (!step->name)                 1322                 if (!step->name)
2303                         return i;                1323                         return i;
2304         }                                        1324         }
2305         WARN(1, "No more dynamic states avail    1325         WARN(1, "No more dynamic states available for CPU hotplug\n");
2306         return -ENOSPC;                          1326         return -ENOSPC;
2307 }                                                1327 }
2308                                                  1328 
2309 static int cpuhp_store_callbacks(enum cpuhp_s    1329 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
2310                                  int (*startu    1330                                  int (*startup)(unsigned int cpu),
2311                                  int (*teardo    1331                                  int (*teardown)(unsigned int cpu),
2312                                  bool multi_i    1332                                  bool multi_instance)
2313 {                                                1333 {
2314         /* (Un)Install the callbacks for furt    1334         /* (Un)Install the callbacks for further cpu hotplug operations */
2315         struct cpuhp_step *sp;                   1335         struct cpuhp_step *sp;
2316         int ret = 0;                             1336         int ret = 0;
2317                                                  1337 
2318         /*                                    !! 1338         if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
2319          * If name is NULL, then the state ge << 
2320          *                                    << 
2321          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_P << 
2322          * the first allocation from these dy << 
2323          * would trigger a new allocation and << 
2324          * empty) state, leaving the callback << 
2325          * dangling, which causes wreckage on << 
2326          */                                   << 
2327         if (name && (state == CPUHP_AP_ONLINE << 
2328                      state == CPUHP_BP_PREPAR << 
2329                 ret = cpuhp_reserve_state(sta    1339                 ret = cpuhp_reserve_state(state);
2330                 if (ret < 0)                     1340                 if (ret < 0)
2331                         return ret;              1341                         return ret;
2332                 state = ret;                     1342                 state = ret;
2333         }                                        1343         }
2334         sp = cpuhp_get_step(state);              1344         sp = cpuhp_get_step(state);
2335         if (name && sp->name)                    1345         if (name && sp->name)
2336                 return -EBUSY;                   1346                 return -EBUSY;
2337                                                  1347 
2338         sp->startup.single = startup;            1348         sp->startup.single = startup;
2339         sp->teardown.single = teardown;          1349         sp->teardown.single = teardown;
2340         sp->name = name;                         1350         sp->name = name;
2341         sp->multi_instance = multi_instance;     1351         sp->multi_instance = multi_instance;
2342         INIT_HLIST_HEAD(&sp->list);              1352         INIT_HLIST_HEAD(&sp->list);
2343         return ret;                              1353         return ret;
2344 }                                                1354 }
2345                                                  1355 
2346 static void *cpuhp_get_teardown_cb(enum cpuhp    1356 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
2347 {                                                1357 {
2348         return cpuhp_get_step(state)->teardow    1358         return cpuhp_get_step(state)->teardown.single;
2349 }                                                1359 }
2350                                                  1360 
2351 /*                                               1361 /*
2352  * Call the startup/teardown function for a s    1362  * Call the startup/teardown function for a step either on the AP or
2353  * on the current CPU.                           1363  * on the current CPU.
2354  */                                              1364  */
2355 static int cpuhp_issue_call(int cpu, enum cpu    1365 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2356                             struct hlist_node    1366                             struct hlist_node *node)
2357 {                                                1367 {
2358         struct cpuhp_step *sp = cpuhp_get_ste    1368         struct cpuhp_step *sp = cpuhp_get_step(state);
2359         int ret;                                 1369         int ret;
2360                                                  1370 
2361         /*                                    !! 1371         if ((bringup && !sp->startup.single) ||
2362          * If there's nothing to do, we done. !! 1372             (!bringup && !sp->teardown.single))
2363          * Relies on the union for multi_inst << 
2364          */                                   << 
2365         if (cpuhp_step_empty(bringup, sp))    << 
2366                 return 0;                        1373                 return 0;
2367         /*                                       1374         /*
2368          * The non AP bound callbacks can fai    1375          * The non AP bound callbacks can fail on bringup. On teardown
2369          * e.g. module removal we crash for n    1376          * e.g. module removal we crash for now.
2370          */                                      1377          */
2371 #ifdef CONFIG_SMP                                1378 #ifdef CONFIG_SMP
2372         if (cpuhp_is_ap_state(state))            1379         if (cpuhp_is_ap_state(state))
2373                 ret = cpuhp_invoke_ap_callbac    1380                 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
2374         else                                     1381         else
2375                 ret = cpuhp_invoke_callback(c !! 1382                 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
2376 #else                                            1383 #else
2377         ret = cpuhp_invoke_callback(cpu, stat !! 1384         ret = cpuhp_invoke_callback(cpu, state, bringup, node);
2378 #endif                                           1385 #endif
2379         BUG_ON(ret && !bringup);                 1386         BUG_ON(ret && !bringup);
2380         return ret;                              1387         return ret;
2381 }                                                1388 }
2382                                                  1389 
2383 /*                                               1390 /*
2384  * Called from __cpuhp_setup_state on a recov    1391  * Called from __cpuhp_setup_state on a recoverable failure.
2385  *                                               1392  *
2386  * Note: The teardown callbacks for rollback     1393  * Note: The teardown callbacks for rollback are not allowed to fail!
2387  */                                              1394  */
2388 static void cpuhp_rollback_install(int failed    1395 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2389                                    struct hli    1396                                    struct hlist_node *node)
2390 {                                                1397 {
2391         int cpu;                                 1398         int cpu;
2392                                                  1399 
2393         /* Roll back the already executed ste    1400         /* Roll back the already executed steps on the other cpus */
2394         for_each_present_cpu(cpu) {              1401         for_each_present_cpu(cpu) {
2395                 struct cpuhp_cpu_state *st =     1402                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2396                 int cpustate = st->state;        1403                 int cpustate = st->state;
2397                                                  1404 
2398                 if (cpu >= failedcpu)            1405                 if (cpu >= failedcpu)
2399                         break;                   1406                         break;
2400                                                  1407 
2401                 /* Did we invoke the startup     1408                 /* Did we invoke the startup call on that cpu ? */
2402                 if (cpustate >= state)           1409                 if (cpustate >= state)
2403                         cpuhp_issue_call(cpu,    1410                         cpuhp_issue_call(cpu, state, false, node);
2404         }                                        1411         }
2405 }                                                1412 }
2406                                                  1413 
2407 int __cpuhp_state_add_instance_cpuslocked(enu !! 1414 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2408                                           str !! 1415                                bool invoke)
2409                                           boo << 
2410 {                                                1416 {
2411         struct cpuhp_step *sp;                   1417         struct cpuhp_step *sp;
2412         int cpu;                                 1418         int cpu;
2413         int ret;                                 1419         int ret;
2414                                                  1420 
2415         lockdep_assert_cpus_held();           << 
2416                                               << 
2417         sp = cpuhp_get_step(state);              1421         sp = cpuhp_get_step(state);
2418         if (sp->multi_instance == false)         1422         if (sp->multi_instance == false)
2419                 return -EINVAL;                  1423                 return -EINVAL;
2420                                                  1424 
                                                   >> 1425         get_online_cpus();
2421         mutex_lock(&cpuhp_state_mutex);          1426         mutex_lock(&cpuhp_state_mutex);
2422                                                  1427 
2423         if (!invoke || !sp->startup.multi)       1428         if (!invoke || !sp->startup.multi)
2424                 goto add_node;                   1429                 goto add_node;
2425                                                  1430 
2426         /*                                       1431         /*
2427          * Try to call the startup callback f    1432          * Try to call the startup callback for each present cpu
2428          * depending on the hotplug state of     1433          * depending on the hotplug state of the cpu.
2429          */                                      1434          */
2430         for_each_present_cpu(cpu) {              1435         for_each_present_cpu(cpu) {
2431                 struct cpuhp_cpu_state *st =     1436                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2432                 int cpustate = st->state;        1437                 int cpustate = st->state;
2433                                                  1438 
2434                 if (cpustate < state)            1439                 if (cpustate < state)
2435                         continue;                1440                         continue;
2436                                                  1441 
2437                 ret = cpuhp_issue_call(cpu, s    1442                 ret = cpuhp_issue_call(cpu, state, true, node);
2438                 if (ret) {                       1443                 if (ret) {
2439                         if (sp->teardown.mult    1444                         if (sp->teardown.multi)
2440                                 cpuhp_rollbac    1445                                 cpuhp_rollback_install(cpu, state, node);
2441                         goto unlock;             1446                         goto unlock;
2442                 }                                1447                 }
2443         }                                        1448         }
2444 add_node:                                        1449 add_node:
2445         ret = 0;                                 1450         ret = 0;
2446         hlist_add_head(node, &sp->list);         1451         hlist_add_head(node, &sp->list);
2447 unlock:                                          1452 unlock:
2448         mutex_unlock(&cpuhp_state_mutex);        1453         mutex_unlock(&cpuhp_state_mutex);
2449         return ret;                           !! 1454         put_online_cpus();
2450 }                                             << 
2451                                               << 
2452 int __cpuhp_state_add_instance(enum cpuhp_sta << 
2453                                bool invoke)   << 
2454 {                                             << 
2455         int ret;                              << 
2456                                               << 
2457         cpus_read_lock();                     << 
2458         ret = __cpuhp_state_add_instance_cpus << 
2459         cpus_read_unlock();                   << 
2460         return ret;                              1455         return ret;
2461 }                                                1456 }
2462 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance)    1457 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2463                                                  1458 
2464 /**                                              1459 /**
2465  * __cpuhp_setup_state_cpuslocked - Setup the !! 1460  * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
2466  * @state:              The state to setup       1461  * @state:              The state to setup
2467  * @name:               Name of the step      << 
2468  * @invoke:             If true, the startup     1462  * @invoke:             If true, the startup function is invoked for cpus where
2469  *                      cpu state >= @state      1463  *                      cpu state >= @state
2470  * @startup:            startup callback func    1464  * @startup:            startup callback function
2471  * @teardown:           teardown callback fun    1465  * @teardown:           teardown callback function
2472  * @multi_instance:     State is set up for m    1466  * @multi_instance:     State is set up for multiple instances which get
2473  *                      added afterwards.        1467  *                      added afterwards.
2474  *                                               1468  *
2475  * The caller needs to hold cpus read locked  !! 1469  * Returns:
2476  * Return:                                    << 
2477  *   On success:                                 1470  *   On success:
2478  *      Positive state number if @state is CP !! 1471  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
2479  *      0 for all other states                   1472  *      0 for all other states
2480  *   On failure: proper (negative) error code    1473  *   On failure: proper (negative) error code
2481  */                                              1474  */
2482 int __cpuhp_setup_state_cpuslocked(enum cpuhp !! 1475 int __cpuhp_setup_state(enum cpuhp_state state,
2483                                    const char !! 1476                         const char *name, bool invoke,
2484                                    int (*star !! 1477                         int (*startup)(unsigned int cpu),
2485                                    int (*tear !! 1478                         int (*teardown)(unsigned int cpu),
2486                                    bool multi !! 1479                         bool multi_instance)
2487 {                                                1480 {
2488         int cpu, ret = 0;                        1481         int cpu, ret = 0;
2489         bool dynstate;                           1482         bool dynstate;
2490                                                  1483 
2491         lockdep_assert_cpus_held();           << 
2492                                               << 
2493         if (cpuhp_cb_check(state) || !name)      1484         if (cpuhp_cb_check(state) || !name)
2494                 return -EINVAL;                  1485                 return -EINVAL;
2495                                                  1486 
                                                   >> 1487         get_online_cpus();
2496         mutex_lock(&cpuhp_state_mutex);          1488         mutex_lock(&cpuhp_state_mutex);
2497                                                  1489 
2498         ret = cpuhp_store_callbacks(state, na    1490         ret = cpuhp_store_callbacks(state, name, startup, teardown,
2499                                     multi_ins    1491                                     multi_instance);
2500                                                  1492 
2501         dynstate = state == CPUHP_AP_ONLINE_D !! 1493         dynstate = state == CPUHP_AP_ONLINE_DYN;
2502         if (ret > 0 && dynstate) {               1494         if (ret > 0 && dynstate) {
2503                 state = ret;                     1495                 state = ret;
2504                 ret = 0;                         1496                 ret = 0;
2505         }                                        1497         }
2506                                                  1498 
2507         if (ret || !invoke || !startup)          1499         if (ret || !invoke || !startup)
2508                 goto out;                        1500                 goto out;
2509                                                  1501 
2510         /*                                       1502         /*
2511          * Try to call the startup callback f    1503          * Try to call the startup callback for each present cpu
2512          * depending on the hotplug state of     1504          * depending on the hotplug state of the cpu.
2513          */                                      1505          */
2514         for_each_present_cpu(cpu) {              1506         for_each_present_cpu(cpu) {
2515                 struct cpuhp_cpu_state *st =     1507                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2516                 int cpustate = st->state;        1508                 int cpustate = st->state;
2517                                                  1509 
2518                 if (cpustate < state)            1510                 if (cpustate < state)
2519                         continue;                1511                         continue;
2520                                                  1512 
2521                 ret = cpuhp_issue_call(cpu, s    1513                 ret = cpuhp_issue_call(cpu, state, true, NULL);
2522                 if (ret) {                       1514                 if (ret) {
2523                         if (teardown)            1515                         if (teardown)
2524                                 cpuhp_rollbac    1516                                 cpuhp_rollback_install(cpu, state, NULL);
2525                         cpuhp_store_callbacks    1517                         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2526                         goto out;                1518                         goto out;
2527                 }                                1519                 }
2528         }                                        1520         }
2529 out:                                             1521 out:
2530         mutex_unlock(&cpuhp_state_mutex);        1522         mutex_unlock(&cpuhp_state_mutex);
                                                   >> 1523         put_online_cpus();
2531         /*                                       1524         /*
2532          * If the requested state is CPUHP_AP !! 1525          * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2533          * return the dynamically allocated s !! 1526          * dynamically allocated state in case of success.
2534          */                                      1527          */
2535         if (!ret && dynstate)                    1528         if (!ret && dynstate)
2536                 return state;                    1529                 return state;
2537         return ret;                              1530         return ret;
2538 }                                                1531 }
2539 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked) << 
2540                                               << 
2541 int __cpuhp_setup_state(enum cpuhp_state stat << 
2542                         const char *name, boo << 
2543                         int (*startup)(unsign << 
2544                         int (*teardown)(unsig << 
2545                         bool multi_instance)  << 
2546 {                                             << 
2547         int ret;                              << 
2548                                               << 
2549         cpus_read_lock();                     << 
2550         ret = __cpuhp_setup_state_cpuslocked( << 
2551                                               << 
2552         cpus_read_unlock();                   << 
2553         return ret;                           << 
2554 }                                             << 
2555 EXPORT_SYMBOL(__cpuhp_setup_state);              1532 EXPORT_SYMBOL(__cpuhp_setup_state);
2556                                                  1533 
2557 int __cpuhp_state_remove_instance(enum cpuhp_    1534 int __cpuhp_state_remove_instance(enum cpuhp_state state,
2558                                   struct hlis    1535                                   struct hlist_node *node, bool invoke)
2559 {                                                1536 {
2560         struct cpuhp_step *sp = cpuhp_get_ste    1537         struct cpuhp_step *sp = cpuhp_get_step(state);
2561         int cpu;                                 1538         int cpu;
2562                                                  1539 
2563         BUG_ON(cpuhp_cb_check(state));           1540         BUG_ON(cpuhp_cb_check(state));
2564                                                  1541 
2565         if (!sp->multi_instance)                 1542         if (!sp->multi_instance)
2566                 return -EINVAL;                  1543                 return -EINVAL;
2567                                                  1544 
2568         cpus_read_lock();                     !! 1545         get_online_cpus();
2569         mutex_lock(&cpuhp_state_mutex);          1546         mutex_lock(&cpuhp_state_mutex);
2570                                                  1547 
2571         if (!invoke || !cpuhp_get_teardown_cb    1548         if (!invoke || !cpuhp_get_teardown_cb(state))
2572                 goto remove;                     1549                 goto remove;
2573         /*                                       1550         /*
2574          * Call the teardown callback for eac    1551          * Call the teardown callback for each present cpu depending
2575          * on the hotplug state of the cpu. T    1552          * on the hotplug state of the cpu. This function is not
2576          * allowed to fail currently!            1553          * allowed to fail currently!
2577          */                                      1554          */
2578         for_each_present_cpu(cpu) {              1555         for_each_present_cpu(cpu) {
2579                 struct cpuhp_cpu_state *st =     1556                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2580                 int cpustate = st->state;        1557                 int cpustate = st->state;
2581                                                  1558 
2582                 if (cpustate >= state)           1559                 if (cpustate >= state)
2583                         cpuhp_issue_call(cpu,    1560                         cpuhp_issue_call(cpu, state, false, node);
2584         }                                        1561         }
2585                                                  1562 
2586 remove:                                          1563 remove:
2587         hlist_del(node);                         1564         hlist_del(node);
2588         mutex_unlock(&cpuhp_state_mutex);        1565         mutex_unlock(&cpuhp_state_mutex);
2589         cpus_read_unlock();                   !! 1566         put_online_cpus();
2590                                                  1567 
2591         return 0;                                1568         return 0;
2592 }                                                1569 }
2593 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instan    1570 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2594                                                  1571 
2595 /**                                              1572 /**
2596  * __cpuhp_remove_state_cpuslocked - Remove t !! 1573  * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
2597  * @state:      The state to remove              1574  * @state:      The state to remove
2598  * @invoke:     If true, the teardown functio    1575  * @invoke:     If true, the teardown function is invoked for cpus where
2599  *              cpu state >= @state              1576  *              cpu state >= @state
2600  *                                               1577  *
2601  * The caller needs to hold cpus read locked  << 
2602  * The teardown callback is currently not all    1578  * The teardown callback is currently not allowed to fail. Think
2603  * about module removal!                         1579  * about module removal!
2604  */                                              1580  */
2605 void __cpuhp_remove_state_cpuslocked(enum cpu !! 1581 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2606 {                                                1582 {
2607         struct cpuhp_step *sp = cpuhp_get_ste    1583         struct cpuhp_step *sp = cpuhp_get_step(state);
2608         int cpu;                                 1584         int cpu;
2609                                                  1585 
2610         BUG_ON(cpuhp_cb_check(state));           1586         BUG_ON(cpuhp_cb_check(state));
2611                                                  1587 
2612         lockdep_assert_cpus_held();           !! 1588         get_online_cpus();
2613                                                  1589 
2614         mutex_lock(&cpuhp_state_mutex);          1590         mutex_lock(&cpuhp_state_mutex);
2615         if (sp->multi_instance) {                1591         if (sp->multi_instance) {
2616                 WARN(!hlist_empty(&sp->list),    1592                 WARN(!hlist_empty(&sp->list),
2617                      "Error: Removing state %    1593                      "Error: Removing state %d which has instances left.\n",
2618                      state);                     1594                      state);
2619                 goto remove;                     1595                 goto remove;
2620         }                                        1596         }
2621                                                  1597 
2622         if (!invoke || !cpuhp_get_teardown_cb    1598         if (!invoke || !cpuhp_get_teardown_cb(state))
2623                 goto remove;                     1599                 goto remove;
2624                                                  1600 
2625         /*                                       1601         /*
2626          * Call the teardown callback for eac    1602          * Call the teardown callback for each present cpu depending
2627          * on the hotplug state of the cpu. T    1603          * on the hotplug state of the cpu. This function is not
2628          * allowed to fail currently!            1604          * allowed to fail currently!
2629          */                                      1605          */
2630         for_each_present_cpu(cpu) {              1606         for_each_present_cpu(cpu) {
2631                 struct cpuhp_cpu_state *st =     1607                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2632                 int cpustate = st->state;        1608                 int cpustate = st->state;
2633                                                  1609 
2634                 if (cpustate >= state)           1610                 if (cpustate >= state)
2635                         cpuhp_issue_call(cpu,    1611                         cpuhp_issue_call(cpu, state, false, NULL);
2636         }                                        1612         }
2637 remove:                                          1613 remove:
2638         cpuhp_store_callbacks(state, NULL, NU    1614         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2639         mutex_unlock(&cpuhp_state_mutex);        1615         mutex_unlock(&cpuhp_state_mutex);
2640 }                                             !! 1616         put_online_cpus();
2641 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked << 
2642                                               << 
2643 void __cpuhp_remove_state(enum cpuhp_state st << 
2644 {                                             << 
2645         cpus_read_lock();                     << 
2646         __cpuhp_remove_state_cpuslocked(state << 
2647         cpus_read_unlock();                   << 
2648 }                                                1617 }
2649 EXPORT_SYMBOL(__cpuhp_remove_state);             1618 EXPORT_SYMBOL(__cpuhp_remove_state);
2650                                                  1619 
2651 #ifdef CONFIG_HOTPLUG_SMT                     << 
2652 static void cpuhp_offline_cpu_device(unsigned << 
2653 {                                             << 
2654         struct device *dev = get_cpu_device(c << 
2655                                               << 
2656         dev->offline = true;                  << 
2657         /* Tell user space about the state ch << 
2658         kobject_uevent(&dev->kobj, KOBJ_OFFLI << 
2659 }                                             << 
2660                                               << 
2661 static void cpuhp_online_cpu_device(unsigned  << 
2662 {                                             << 
2663         struct device *dev = get_cpu_device(c << 
2664                                               << 
2665         dev->offline = false;                 << 
2666         /* Tell user space about the state ch << 
2667         kobject_uevent(&dev->kobj, KOBJ_ONLIN << 
2668 }                                             << 
2669                                               << 
2670 int cpuhp_smt_disable(enum cpuhp_smt_control  << 
2671 {                                             << 
2672         int cpu, ret = 0;                     << 
2673                                               << 
2674         cpu_maps_update_begin();              << 
2675         for_each_online_cpu(cpu) {            << 
2676                 if (topology_is_primary_threa << 
2677                         continue;             << 
2678                 /*                            << 
2679                  * Disable can be called with << 
2680                  * from a higher to lower num << 
2681                  */                           << 
2682                 if (ctrlval == CPU_SMT_ENABLE << 
2683                         continue;             << 
2684                 ret = cpu_down_maps_locked(cp << 
2685                 if (ret)                      << 
2686                         break;                << 
2687                 /*                            << 
2688                  * As this needs to hold the  << 
2689                  * to call device_offline() b << 
2690                  * cpu_down() which takes cpu << 
2691                  * needs to be held as this m << 
2692                  * abusers of the hotplug mac << 
2693                  *                            << 
2694                  * So nothing would update de << 
2695                  * leave the sysfs entry stal << 
2696                  * smt control has been chang << 
2697                  * called under the sysfs hot << 
2698                  * serialized against the reg << 
2699                  */                           << 
2700                 cpuhp_offline_cpu_device(cpu) << 
2701         }                                     << 
2702         if (!ret)                             << 
2703                 cpu_smt_control = ctrlval;    << 
2704         cpu_maps_update_done();               << 
2705         return ret;                           << 
2706 }                                             << 
2707                                               << 
2708 /* Check if the core a CPU belongs to is onli << 
2709 #if !defined(topology_is_core_online)         << 
2710 static inline bool topology_is_core_online(un << 
2711 {                                             << 
2712         return true;                          << 
2713 }                                             << 
2714 #endif                                        << 
2715                                               << 
2716 int cpuhp_smt_enable(void)                    << 
2717 {                                             << 
2718         int cpu, ret = 0;                     << 
2719                                               << 
2720         cpu_maps_update_begin();              << 
2721         cpu_smt_control = CPU_SMT_ENABLED;    << 
2722         for_each_present_cpu(cpu) {           << 
2723                 /* Skip online CPUs and CPUs  << 
2724                 if (cpu_online(cpu) || !node_ << 
2725                         continue;             << 
2726                 if (!cpu_smt_thread_allowed(c << 
2727                         continue;             << 
2728                 ret = _cpu_up(cpu, 0, CPUHP_O << 
2729                 if (ret)                      << 
2730                         break;                << 
2731                 /* See comment in cpuhp_smt_d << 
2732                 cpuhp_online_cpu_device(cpu); << 
2733         }                                     << 
2734         cpu_maps_update_done();               << 
2735         return ret;                           << 
2736 }                                             << 
2737 #endif                                        << 
2738                                               << 
2739 #if defined(CONFIG_SYSFS) && defined(CONFIG_H    1620 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2740 static ssize_t state_show(struct device *dev, !! 1621 static ssize_t show_cpuhp_state(struct device *dev,
2741                           struct device_attri !! 1622                                 struct device_attribute *attr, char *buf)
2742 {                                                1623 {
2743         struct cpuhp_cpu_state *st = per_cpu_    1624         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2744                                                  1625 
2745         return sprintf(buf, "%d\n", st->state    1626         return sprintf(buf, "%d\n", st->state);
2746 }                                                1627 }
2747 static DEVICE_ATTR_RO(state);                 !! 1628 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2748                                                  1629 
2749 static ssize_t target_store(struct device *de !! 1630 static ssize_t write_cpuhp_target(struct device *dev,
2750                             const char *buf,  !! 1631                                   struct device_attribute *attr,
                                                   >> 1632                                   const char *buf, size_t count)
2751 {                                                1633 {
2752         struct cpuhp_cpu_state *st = per_cpu_    1634         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2753         struct cpuhp_step *sp;                   1635         struct cpuhp_step *sp;
2754         int target, ret;                         1636         int target, ret;
2755                                                  1637 
2756         ret = kstrtoint(buf, 10, &target);       1638         ret = kstrtoint(buf, 10, &target);
2757         if (ret)                                 1639         if (ret)
2758                 return ret;                      1640                 return ret;
2759                                                  1641 
2760 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL          1642 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2761         if (target < CPUHP_OFFLINE || target     1643         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2762                 return -EINVAL;                  1644                 return -EINVAL;
2763 #else                                            1645 #else
2764         if (target != CPUHP_OFFLINE && target    1646         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2765                 return -EINVAL;                  1647                 return -EINVAL;
2766 #endif                                           1648 #endif
2767                                                  1649 
2768         ret = lock_device_hotplug_sysfs();       1650         ret = lock_device_hotplug_sysfs();
2769         if (ret)                                 1651         if (ret)
2770                 return ret;                      1652                 return ret;
2771                                                  1653 
2772         mutex_lock(&cpuhp_state_mutex);          1654         mutex_lock(&cpuhp_state_mutex);
2773         sp = cpuhp_get_step(target);             1655         sp = cpuhp_get_step(target);
2774         ret = !sp->name || sp->cant_stop ? -E    1656         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2775         mutex_unlock(&cpuhp_state_mutex);        1657         mutex_unlock(&cpuhp_state_mutex);
2776         if (ret)                                 1658         if (ret)
2777                 goto out;                        1659                 goto out;
2778                                                  1660 
2779         if (st->state < target)                  1661         if (st->state < target)
2780                 ret = cpu_up(dev->id, target) !! 1662                 ret = do_cpu_up(dev->id, target);
2781         else if (st->state > target)          !! 1663         else
2782                 ret = cpu_down(dev->id, targe !! 1664                 ret = do_cpu_down(dev->id, target);
2783         else if (WARN_ON(st->target != target << 
2784                 st->target = target;          << 
2785 out:                                             1665 out:
2786         unlock_device_hotplug();                 1666         unlock_device_hotplug();
2787         return ret ? ret : count;                1667         return ret ? ret : count;
2788 }                                                1668 }
2789                                                  1669 
2790 static ssize_t target_show(struct device *dev !! 1670 static ssize_t show_cpuhp_target(struct device *dev,
2791                            struct device_attr !! 1671                                  struct device_attribute *attr, char *buf)
2792 {                                                1672 {
2793         struct cpuhp_cpu_state *st = per_cpu_    1673         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2794                                                  1674 
2795         return sprintf(buf, "%d\n", st->targe    1675         return sprintf(buf, "%d\n", st->target);
2796 }                                                1676 }
2797 static DEVICE_ATTR_RW(target);                !! 1677 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2798                                               << 
2799 static ssize_t fail_store(struct device *dev, << 
2800                           const char *buf, si << 
2801 {                                             << 
2802         struct cpuhp_cpu_state *st = per_cpu_ << 
2803         struct cpuhp_step *sp;                << 
2804         int fail, ret;                        << 
2805                                               << 
2806         ret = kstrtoint(buf, 10, &fail);      << 
2807         if (ret)                              << 
2808                 return ret;                   << 
2809                                               << 
2810         if (fail == CPUHP_INVALID) {          << 
2811                 st->fail = fail;              << 
2812                 return count;                 << 
2813         }                                     << 
2814                                               << 
2815         if (fail < CPUHP_OFFLINE || fail > CP << 
2816                 return -EINVAL;               << 
2817                                               << 
2818         /*                                    << 
2819          * Cannot fail STARTING/DYING callbac << 
2820          */                                   << 
2821         if (cpuhp_is_atomic_state(fail))      << 
2822                 return -EINVAL;               << 
2823                                               << 
2824         /*                                    << 
2825          * DEAD callbacks cannot fail...      << 
2826          * ... neither can CPUHP_BRINGUP_CPU  << 
2827          * triggering STARTING callbacks, a f << 
2828          * hinder rollback.                   << 
2829          */                                   << 
2830         if (fail <= CPUHP_BRINGUP_CPU && st-> << 
2831                 return -EINVAL;               << 
2832                                               << 
2833         /*                                    << 
2834          * Cannot fail anything that doesn't  << 
2835          */                                   << 
2836         mutex_lock(&cpuhp_state_mutex);       << 
2837         sp = cpuhp_get_step(fail);            << 
2838         if (!sp->startup.single && !sp->teard << 
2839                 ret = -EINVAL;                << 
2840         mutex_unlock(&cpuhp_state_mutex);     << 
2841         if (ret)                              << 
2842                 return ret;                   << 
2843                                               << 
2844         st->fail = fail;                      << 
2845                                               << 
2846         return count;                         << 
2847 }                                             << 
2848                                               << 
2849 static ssize_t fail_show(struct device *dev,  << 
2850                          struct device_attrib << 
2851 {                                             << 
2852         struct cpuhp_cpu_state *st = per_cpu_ << 
2853                                               << 
2854         return sprintf(buf, "%d\n", st->fail) << 
2855 }                                             << 
2856                                               << 
2857 static DEVICE_ATTR_RW(fail);                  << 
2858                                                  1678 
2859 static struct attribute *cpuhp_cpu_attrs[] =     1679 static struct attribute *cpuhp_cpu_attrs[] = {
2860         &dev_attr_state.attr,                    1680         &dev_attr_state.attr,
2861         &dev_attr_target.attr,                   1681         &dev_attr_target.attr,
2862         &dev_attr_fail.attr,                  << 
2863         NULL                                     1682         NULL
2864 };                                               1683 };
2865                                                  1684 
2866 static const struct attribute_group cpuhp_cpu !! 1685 static struct attribute_group cpuhp_cpu_attr_group = {
2867         .attrs = cpuhp_cpu_attrs,                1686         .attrs = cpuhp_cpu_attrs,
2868         .name = "hotplug",                       1687         .name = "hotplug",
2869         NULL                                     1688         NULL
2870 };                                               1689 };
2871                                                  1690 
2872 static ssize_t states_show(struct device *dev !! 1691 static ssize_t show_cpuhp_states(struct device *dev,
2873                                  struct devic    1692                                  struct device_attribute *attr, char *buf)
2874 {                                                1693 {
2875         ssize_t cur, res = 0;                    1694         ssize_t cur, res = 0;
2876         int i;                                   1695         int i;
2877                                                  1696 
2878         mutex_lock(&cpuhp_state_mutex);          1697         mutex_lock(&cpuhp_state_mutex);
2879         for (i = CPUHP_OFFLINE; i <= CPUHP_ON    1698         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2880                 struct cpuhp_step *sp = cpuhp    1699                 struct cpuhp_step *sp = cpuhp_get_step(i);
2881                                                  1700 
2882                 if (sp->name) {                  1701                 if (sp->name) {
2883                         cur = sprintf(buf, "%    1702                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2884                         buf += cur;              1703                         buf += cur;
2885                         res += cur;              1704                         res += cur;
2886                 }                                1705                 }
2887         }                                        1706         }
2888         mutex_unlock(&cpuhp_state_mutex);        1707         mutex_unlock(&cpuhp_state_mutex);
2889         return res;                              1708         return res;
2890 }                                                1709 }
2891 static DEVICE_ATTR_RO(states);                !! 1710 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2892                                                  1711 
2893 static struct attribute *cpuhp_cpu_root_attrs    1712 static struct attribute *cpuhp_cpu_root_attrs[] = {
2894         &dev_attr_states.attr,                   1713         &dev_attr_states.attr,
2895         NULL                                     1714         NULL
2896 };                                               1715 };
2897                                                  1716 
2898 static const struct attribute_group cpuhp_cpu !! 1717 static struct attribute_group cpuhp_cpu_root_attr_group = {
2899         .attrs = cpuhp_cpu_root_attrs,           1718         .attrs = cpuhp_cpu_root_attrs,
2900         .name = "hotplug",                       1719         .name = "hotplug",
2901         NULL                                     1720         NULL
2902 };                                               1721 };
2903                                                  1722 
2904 #ifdef CONFIG_HOTPLUG_SMT                     << 
2905                                               << 
2906 static bool cpu_smt_num_threads_valid(unsigne << 
2907 {                                             << 
2908         if (IS_ENABLED(CONFIG_SMT_NUM_THREADS << 
2909                 return threads >= 1 && thread << 
2910         return threads == 1 || threads == cpu << 
2911 }                                             << 
2912                                               << 
2913 static ssize_t                                << 
2914 __store_smt_control(struct device *dev, struc << 
2915                     const char *buf, size_t c << 
2916 {                                             << 
2917         int ctrlval, ret, num_threads, orig_t << 
2918         bool force_off;                       << 
2919                                               << 
2920         if (cpu_smt_control == CPU_SMT_FORCE_ << 
2921                 return -EPERM;                << 
2922                                               << 
2923         if (cpu_smt_control == CPU_SMT_NOT_SU << 
2924                 return -ENODEV;               << 
2925                                               << 
2926         if (sysfs_streq(buf, "on")) {         << 
2927                 ctrlval = CPU_SMT_ENABLED;    << 
2928                 num_threads = cpu_smt_max_thr << 
2929         } else if (sysfs_streq(buf, "off")) { << 
2930                 ctrlval = CPU_SMT_DISABLED;   << 
2931                 num_threads = 1;              << 
2932         } else if (sysfs_streq(buf, "forceoff << 
2933                 ctrlval = CPU_SMT_FORCE_DISAB << 
2934                 num_threads = 1;              << 
2935         } else if (kstrtoint(buf, 10, &num_th << 
2936                 if (num_threads == 1)         << 
2937                         ctrlval = CPU_SMT_DIS << 
2938                 else if (cpu_smt_num_threads_ << 
2939                         ctrlval = CPU_SMT_ENA << 
2940                 else                          << 
2941                         return -EINVAL;       << 
2942         } else {                              << 
2943                 return -EINVAL;               << 
2944         }                                     << 
2945                                               << 
2946         ret = lock_device_hotplug_sysfs();    << 
2947         if (ret)                              << 
2948                 return ret;                   << 
2949                                               << 
2950         orig_threads = cpu_smt_num_threads;   << 
2951         cpu_smt_num_threads = num_threads;    << 
2952                                               << 
2953         force_off = ctrlval != cpu_smt_contro << 
2954                                               << 
2955         if (num_threads > orig_threads)       << 
2956                 ret = cpuhp_smt_enable();     << 
2957         else if (num_threads < orig_threads | << 
2958                 ret = cpuhp_smt_disable(ctrlv << 
2959                                               << 
2960         unlock_device_hotplug();              << 
2961         return ret ? ret : count;             << 
2962 }                                             << 
2963                                               << 
2964 #else /* !CONFIG_HOTPLUG_SMT */               << 
2965 static ssize_t                                << 
2966 __store_smt_control(struct device *dev, struc << 
2967                     const char *buf, size_t c << 
2968 {                                             << 
2969         return -ENODEV;                       << 
2970 }                                             << 
2971 #endif /* CONFIG_HOTPLUG_SMT */               << 
2972                                               << 
2973 static const char *smt_states[] = {           << 
2974         [CPU_SMT_ENABLED]               = "on << 
2975         [CPU_SMT_DISABLED]              = "of << 
2976         [CPU_SMT_FORCE_DISABLED]        = "fo << 
2977         [CPU_SMT_NOT_SUPPORTED]         = "no << 
2978         [CPU_SMT_NOT_IMPLEMENTED]       = "no << 
2979 };                                            << 
2980                                               << 
2981 static ssize_t control_show(struct device *de << 
2982                             struct device_att << 
2983 {                                             << 
2984         const char *state = smt_states[cpu_sm << 
2985                                               << 
2986 #ifdef CONFIG_HOTPLUG_SMT                     << 
2987         /*                                    << 
2988          * If SMT is enabled but not all thre << 
2989          * number of threads. If all threads  << 
2990          * show the state name.               << 
2991          */                                   << 
2992         if (cpu_smt_control == CPU_SMT_ENABLE << 
2993             cpu_smt_num_threads != cpu_smt_ma << 
2994                 return sysfs_emit(buf, "%d\n" << 
2995 #endif                                        << 
2996                                               << 
2997         return sysfs_emit(buf, "%s\n", state) << 
2998 }                                             << 
2999                                               << 
3000 static ssize_t control_store(struct device *d << 
3001                              const char *buf, << 
3002 {                                             << 
3003         return __store_smt_control(dev, attr, << 
3004 }                                             << 
3005 static DEVICE_ATTR_RW(control);               << 
3006                                               << 
3007 static ssize_t active_show(struct device *dev << 
3008                            struct device_attr << 
3009 {                                             << 
3010         return sysfs_emit(buf, "%d\n", sched_ << 
3011 }                                             << 
3012 static DEVICE_ATTR_RO(active);                << 
3013                                               << 
3014 static struct attribute *cpuhp_smt_attrs[] =  << 
3015         &dev_attr_control.attr,               << 
3016         &dev_attr_active.attr,                << 
3017         NULL                                  << 
3018 };                                            << 
3019                                               << 
3020 static const struct attribute_group cpuhp_smt << 
3021         .attrs = cpuhp_smt_attrs,             << 
3022         .name = "smt",                        << 
3023         NULL                                  << 
3024 };                                            << 
3025                                               << 
3026 static int __init cpu_smt_sysfs_init(void)    << 
3027 {                                             << 
3028         struct device *dev_root;              << 
3029         int ret = -ENODEV;                    << 
3030                                               << 
3031         dev_root = bus_get_dev_root(&cpu_subs << 
3032         if (dev_root) {                       << 
3033                 ret = sysfs_create_group(&dev << 
3034                 put_device(dev_root);         << 
3035         }                                     << 
3036         return ret;                           << 
3037 }                                             << 
3038                                               << 
3039 static int __init cpuhp_sysfs_init(void)         1723 static int __init cpuhp_sysfs_init(void)
3040 {                                                1724 {
3041         struct device *dev_root;              << 
3042         int cpu, ret;                            1725         int cpu, ret;
3043                                                  1726 
3044         ret = cpu_smt_sysfs_init();           !! 1727         ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
                                                   >> 1728                                  &cpuhp_cpu_root_attr_group);
3045         if (ret)                                 1729         if (ret)
3046                 return ret;                      1730                 return ret;
3047                                                  1731 
3048         dev_root = bus_get_dev_root(&cpu_subs << 
3049         if (dev_root) {                       << 
3050                 ret = sysfs_create_group(&dev << 
3051                 put_device(dev_root);         << 
3052                 if (ret)                      << 
3053                         return ret;           << 
3054         }                                     << 
3055                                               << 
3056         for_each_possible_cpu(cpu) {             1732         for_each_possible_cpu(cpu) {
3057                 struct device *dev = get_cpu_    1733                 struct device *dev = get_cpu_device(cpu);
3058                                                  1734 
3059                 if (!dev)                        1735                 if (!dev)
3060                         continue;                1736                         continue;
3061                 ret = sysfs_create_group(&dev    1737                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
3062                 if (ret)                         1738                 if (ret)
3063                         return ret;              1739                         return ret;
3064         }                                        1740         }
3065         return 0;                                1741         return 0;
3066 }                                                1742 }
3067 device_initcall(cpuhp_sysfs_init);               1743 device_initcall(cpuhp_sysfs_init);
3068 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU  !! 1744 #endif
3069                                                  1745 
3070 /*                                               1746 /*
3071  * cpu_bit_bitmap[] is a special, "compressed    1747  * cpu_bit_bitmap[] is a special, "compressed" data structure that
3072  * represents all NR_CPUS bits binary values     1748  * represents all NR_CPUS bits binary values of 1<<nr.
3073  *                                               1749  *
3074  * It is used by cpumask_of() to get a consta    1750  * It is used by cpumask_of() to get a constant address to a CPU
3075  * mask value that has a single bit set only.    1751  * mask value that has a single bit set only.
3076  */                                              1752  */
3077                                                  1753 
3078 /* cpu_bit_bitmap[0] is empty - so we can bac    1754 /* cpu_bit_bitmap[0] is empty - so we can back into it */
3079 #define MASK_DECLARE_1(x)       [x+1][0] = (1    1755 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
3080 #define MASK_DECLARE_2(x)       MASK_DECLARE_    1756 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3081 #define MASK_DECLARE_4(x)       MASK_DECLARE_    1757 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3082 #define MASK_DECLARE_8(x)       MASK_DECLARE_    1758 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3083                                                  1759 
3084 const unsigned long cpu_bit_bitmap[BITS_PER_L    1760 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3085                                                  1761 
3086         MASK_DECLARE_8(0),      MASK_DECLARE_    1762         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
3087         MASK_DECLARE_8(16),     MASK_DECLARE_    1763         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
3088 #if BITS_PER_LONG > 32                           1764 #if BITS_PER_LONG > 32
3089         MASK_DECLARE_8(32),     MASK_DECLARE_    1765         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
3090         MASK_DECLARE_8(48),     MASK_DECLARE_    1766         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
3091 #endif                                           1767 #endif
3092 };                                               1768 };
3093 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);               1769 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3094                                                  1770 
3095 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) =    1771 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
3096 EXPORT_SYMBOL(cpu_all_bits);                     1772 EXPORT_SYMBOL(cpu_all_bits);
3097                                                  1773 
3098 #ifdef CONFIG_INIT_ALL_POSSIBLE                  1774 #ifdef CONFIG_INIT_ALL_POSSIBLE
3099 struct cpumask __cpu_possible_mask __ro_after !! 1775 struct cpumask __cpu_possible_mask __read_mostly
3100         = {CPU_BITS_ALL};                        1776         = {CPU_BITS_ALL};
3101 #else                                            1777 #else
3102 struct cpumask __cpu_possible_mask __ro_after !! 1778 struct cpumask __cpu_possible_mask __read_mostly;
3103 #endif                                           1779 #endif
3104 EXPORT_SYMBOL(__cpu_possible_mask);              1780 EXPORT_SYMBOL(__cpu_possible_mask);
3105                                                  1781 
3106 struct cpumask __cpu_online_mask __read_mostl    1782 struct cpumask __cpu_online_mask __read_mostly;
3107 EXPORT_SYMBOL(__cpu_online_mask);                1783 EXPORT_SYMBOL(__cpu_online_mask);
3108                                                  1784 
3109 struct cpumask __cpu_enabled_mask __read_most << 
3110 EXPORT_SYMBOL(__cpu_enabled_mask);            << 
3111                                               << 
3112 struct cpumask __cpu_present_mask __read_most    1785 struct cpumask __cpu_present_mask __read_mostly;
3113 EXPORT_SYMBOL(__cpu_present_mask);               1786 EXPORT_SYMBOL(__cpu_present_mask);
3114                                                  1787 
3115 struct cpumask __cpu_active_mask __read_mostl    1788 struct cpumask __cpu_active_mask __read_mostly;
3116 EXPORT_SYMBOL(__cpu_active_mask);                1789 EXPORT_SYMBOL(__cpu_active_mask);
3117                                                  1790 
3118 struct cpumask __cpu_dying_mask __read_mostly << 
3119 EXPORT_SYMBOL(__cpu_dying_mask);              << 
3120                                               << 
3121 atomic_t __num_online_cpus __read_mostly;     << 
3122 EXPORT_SYMBOL(__num_online_cpus);             << 
3123                                               << 
3124 void init_cpu_present(const struct cpumask *s    1791 void init_cpu_present(const struct cpumask *src)
3125 {                                                1792 {
3126         cpumask_copy(&__cpu_present_mask, src    1793         cpumask_copy(&__cpu_present_mask, src);
3127 }                                                1794 }
3128                                                  1795 
3129 void init_cpu_possible(const struct cpumask *    1796 void init_cpu_possible(const struct cpumask *src)
3130 {                                                1797 {
3131         cpumask_copy(&__cpu_possible_mask, sr    1798         cpumask_copy(&__cpu_possible_mask, src);
3132 }                                                1799 }
3133                                                  1800 
3134 void init_cpu_online(const struct cpumask *sr    1801 void init_cpu_online(const struct cpumask *src)
3135 {                                                1802 {
3136         cpumask_copy(&__cpu_online_mask, src)    1803         cpumask_copy(&__cpu_online_mask, src);
3137 }                                                1804 }
3138                                                  1805 
3139 void set_cpu_online(unsigned int cpu, bool on << 
3140 {                                             << 
3141         /*                                    << 
3142          * atomic_inc/dec() is required to ha << 
3143          * function by the reboot and kexec c << 
3144          * IPI/NMI broadcasts when shutting d << 
3145          * regular CPU hotplug is properly se << 
3146          *                                    << 
3147          * Note, that the fact that __num_onl << 
3148          * does not protect readers which are << 
3149          * concurrent hotplug operations.     << 
3150          */                                   << 
3151         if (online) {                         << 
3152                 if (!cpumask_test_and_set_cpu << 
3153                         atomic_inc(&__num_onl << 
3154         } else {                              << 
3155                 if (cpumask_test_and_clear_cp << 
3156                         atomic_dec(&__num_onl << 
3157         }                                     << 
3158 }                                             << 
3159                                               << 
3160 /*                                               1806 /*
3161  * Activate the first processor.                 1807  * Activate the first processor.
3162  */                                              1808  */
3163 void __init boot_cpu_init(void)                  1809 void __init boot_cpu_init(void)
3164 {                                                1810 {
3165         int cpu = smp_processor_id();            1811         int cpu = smp_processor_id();
3166                                                  1812 
3167         /* Mark the boot cpu "present", "onli    1813         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3168         set_cpu_online(cpu, true);               1814         set_cpu_online(cpu, true);
3169         set_cpu_active(cpu, true);               1815         set_cpu_active(cpu, true);
3170         set_cpu_present(cpu, true);              1816         set_cpu_present(cpu, true);
3171         set_cpu_possible(cpu, true);             1817         set_cpu_possible(cpu, true);
3172                                               << 
3173 #ifdef CONFIG_SMP                             << 
3174         __boot_cpu_id = cpu;                  << 
3175 #endif                                        << 
3176 }                                                1818 }
3177                                                  1819 
3178 /*                                               1820 /*
3179  * Must be called _AFTER_ setting up the per_    1821  * Must be called _AFTER_ setting up the per_cpu areas
3180  */                                              1822  */
3181 void __init boot_cpu_hotplug_init(void)       !! 1823 void __init boot_cpu_state_init(void)
3182 {                                             << 
3183 #ifdef CONFIG_SMP                             << 
3184         cpumask_set_cpu(smp_processor_id(), & << 
3185         atomic_set(this_cpu_ptr(&cpuhp_state. << 
3186 #endif                                        << 
3187         this_cpu_write(cpuhp_state.state, CPU << 
3188         this_cpu_write(cpuhp_state.target, CP << 
3189 }                                             << 
3190                                               << 
3191 #ifdef CONFIG_CPU_MITIGATIONS                 << 
3192 /*                                            << 
3193  * These are used for a global "mitigations=" << 
3194  * optional CPU mitigations.                  << 
3195  */                                           << 
3196 enum cpu_mitigations {                        << 
3197         CPU_MITIGATIONS_OFF,                  << 
3198         CPU_MITIGATIONS_AUTO,                 << 
3199         CPU_MITIGATIONS_AUTO_NOSMT,           << 
3200 };                                            << 
3201                                               << 
3202 static enum cpu_mitigations cpu_mitigations _ << 
3203                                               << 
3204 static int __init mitigations_parse_cmdline(c << 
3205 {                                                1824 {
3206         if (!strcmp(arg, "off"))              !! 1825         per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
3207                 cpu_mitigations = CPU_MITIGAT << 
3208         else if (!strcmp(arg, "auto"))        << 
3209                 cpu_mitigations = CPU_MITIGAT << 
3210         else if (!strcmp(arg, "auto,nosmt"))  << 
3211                 cpu_mitigations = CPU_MITIGAT << 
3212         else                                  << 
3213                 pr_crit("Unsupported mitigati << 
3214                         arg);                 << 
3215                                               << 
3216         return 0;                             << 
3217 }                                             << 
3218                                               << 
3219 /* mitigations=off */                         << 
3220 bool cpu_mitigations_off(void)                << 
3221 {                                             << 
3222         return cpu_mitigations == CPU_MITIGAT << 
3223 }                                             << 
3224 EXPORT_SYMBOL_GPL(cpu_mitigations_off);       << 
3225                                               << 
3226 /* mitigations=auto,nosmt */                  << 
3227 bool cpu_mitigations_auto_nosmt(void)         << 
3228 {                                             << 
3229         return cpu_mitigations == CPU_MITIGAT << 
3230 }                                                1826 }
3231 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt) << 
3232 #else                                         << 
3233 static int __init mitigations_parse_cmdline(c << 
3234 {                                             << 
3235         pr_crit("Kernel compiled without miti << 
3236         return 0;                             << 
3237 }                                             << 
3238 #endif                                        << 
3239 early_param("mitigations", mitigations_parse_ << 
3240                                                  1827 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php