~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/cpu.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/cpu.c (Version linux-6.12-rc7) and /kernel/cpu.c (Version linux-4.15.18)


  1 /* CPU control.                                     1 /* CPU control.
  2  * (C) 2001, 2002, 2003, 2004 Rusty Russell         2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3  *                                                  3  *
  4  * This code is licenced under the GPL.             4  * This code is licenced under the GPL.
  5  */                                                 5  */
  6 #include <linux/sched/mm.h>                    << 
  7 #include <linux/proc_fs.h>                          6 #include <linux/proc_fs.h>
  8 #include <linux/smp.h>                              7 #include <linux/smp.h>
  9 #include <linux/init.h>                             8 #include <linux/init.h>
 10 #include <linux/notifier.h>                         9 #include <linux/notifier.h>
 11 #include <linux/sched/signal.h>                    10 #include <linux/sched/signal.h>
 12 #include <linux/sched/hotplug.h>                   11 #include <linux/sched/hotplug.h>
 13 #include <linux/sched/isolation.h>             << 
 14 #include <linux/sched/task.h>                      12 #include <linux/sched/task.h>
 15 #include <linux/sched/smt.h>                   << 
 16 #include <linux/unistd.h>                          13 #include <linux/unistd.h>
 17 #include <linux/cpu.h>                             14 #include <linux/cpu.h>
 18 #include <linux/oom.h>                             15 #include <linux/oom.h>
 19 #include <linux/rcupdate.h>                        16 #include <linux/rcupdate.h>
 20 #include <linux/delay.h>                       << 
 21 #include <linux/export.h>                          17 #include <linux/export.h>
 22 #include <linux/bug.h>                             18 #include <linux/bug.h>
 23 #include <linux/kthread.h>                         19 #include <linux/kthread.h>
 24 #include <linux/stop_machine.h>                    20 #include <linux/stop_machine.h>
 25 #include <linux/mutex.h>                           21 #include <linux/mutex.h>
 26 #include <linux/gfp.h>                             22 #include <linux/gfp.h>
 27 #include <linux/suspend.h>                         23 #include <linux/suspend.h>
 28 #include <linux/lockdep.h>                         24 #include <linux/lockdep.h>
 29 #include <linux/tick.h>                            25 #include <linux/tick.h>
 30 #include <linux/irq.h>                             26 #include <linux/irq.h>
 31 #include <linux/nmi.h>                             27 #include <linux/nmi.h>
 32 #include <linux/smpboot.h>                         28 #include <linux/smpboot.h>
 33 #include <linux/relay.h>                           29 #include <linux/relay.h>
 34 #include <linux/slab.h>                            30 #include <linux/slab.h>
 35 #include <linux/scs.h>                         << 
 36 #include <linux/percpu-rwsem.h>                    31 #include <linux/percpu-rwsem.h>
 37 #include <linux/cpuset.h>                      << 
 38 #include <linux/random.h>                      << 
 39 #include <linux/cc_platform.h>                 << 
 40                                                    32 
 41 #include <trace/events/power.h>                    33 #include <trace/events/power.h>
 42 #define CREATE_TRACE_POINTS                        34 #define CREATE_TRACE_POINTS
 43 #include <trace/events/cpuhp.h>                    35 #include <trace/events/cpuhp.h>
 44                                                    36 
 45 #include "smpboot.h"                               37 #include "smpboot.h"
 46                                                    38 
 47 /**                                                39 /**
 48  * struct cpuhp_cpu_state - Per cpu hotplug st !!  40  * cpuhp_cpu_state - Per cpu hotplug state storage
 49  * @state:      The current cpu state              41  * @state:      The current cpu state
 50  * @target:     The target state                   42  * @target:     The target state
 51  * @fail:       Current CPU hotplug callback s << 
 52  * @thread:     Pointer to the hotplug thread      43  * @thread:     Pointer to the hotplug thread
 53  * @should_run: Thread should execute              44  * @should_run: Thread should execute
 54  * @rollback:   Perform a rollback                 45  * @rollback:   Perform a rollback
 55  * @single:     Single callback invocation         46  * @single:     Single callback invocation
 56  * @bringup:    Single callback bringup or tea     47  * @bringup:    Single callback bringup or teardown selector
 57  * @node:       Remote CPU node; for multi-ins << 
 58  *              single entry callback for inst << 
 59  * @last:       For multi-instance rollback, r << 
 60  * @cb_state:   The state for a single callbac     48  * @cb_state:   The state for a single callback (install/uninstall)
 61  * @result:     Result of the operation            49  * @result:     Result of the operation
 62  * @ap_sync_state:      State for AP synchroni << 
 63  * @done_up:    Signal completion to the issue     50  * @done_up:    Signal completion to the issuer of the task for cpu-up
 64  * @done_down:  Signal completion to the issue     51  * @done_down:  Signal completion to the issuer of the task for cpu-down
 65  */                                                52  */
 66 struct cpuhp_cpu_state {                           53 struct cpuhp_cpu_state {
 67         enum cpuhp_state        state;             54         enum cpuhp_state        state;
 68         enum cpuhp_state        target;            55         enum cpuhp_state        target;
 69         enum cpuhp_state        fail;              56         enum cpuhp_state        fail;
 70 #ifdef CONFIG_SMP                                  57 #ifdef CONFIG_SMP
 71         struct task_struct      *thread;           58         struct task_struct      *thread;
 72         bool                    should_run;        59         bool                    should_run;
 73         bool                    rollback;          60         bool                    rollback;
 74         bool                    single;            61         bool                    single;
 75         bool                    bringup;           62         bool                    bringup;
 76         struct hlist_node       *node;             63         struct hlist_node       *node;
 77         struct hlist_node       *last;             64         struct hlist_node       *last;
 78         enum cpuhp_state        cb_state;          65         enum cpuhp_state        cb_state;
 79         int                     result;            66         int                     result;
 80         atomic_t                ap_sync_state; << 
 81         struct completion       done_up;           67         struct completion       done_up;
 82         struct completion       done_down;         68         struct completion       done_down;
 83 #endif                                             69 #endif
 84 };                                                 70 };
 85                                                    71 
 86 static DEFINE_PER_CPU(struct cpuhp_cpu_state,      72 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
 87         .fail = CPUHP_INVALID,                     73         .fail = CPUHP_INVALID,
 88 };                                                 74 };
 89                                                    75 
 90 #ifdef CONFIG_SMP                              << 
 91 cpumask_t cpus_booted_once_mask;               << 
 92 #endif                                         << 
 93                                                << 
 94 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_     76 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
 95 static struct lockdep_map cpuhp_state_up_map =     77 static struct lockdep_map cpuhp_state_up_map =
 96         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-u     78         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
 97 static struct lockdep_map cpuhp_state_down_map     79 static struct lockdep_map cpuhp_state_down_map =
 98         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-d     80         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
 99                                                    81 
100                                                    82 
101 static inline void cpuhp_lock_acquire(bool bri     83 static inline void cpuhp_lock_acquire(bool bringup)
102 {                                                  84 {
103         lock_map_acquire(bringup ? &cpuhp_stat     85         lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
104 }                                                  86 }
105                                                    87 
106 static inline void cpuhp_lock_release(bool bri     88 static inline void cpuhp_lock_release(bool bringup)
107 {                                                  89 {
108         lock_map_release(bringup ? &cpuhp_stat     90         lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
109 }                                                  91 }
110 #else                                              92 #else
111                                                    93 
112 static inline void cpuhp_lock_acquire(bool bri     94 static inline void cpuhp_lock_acquire(bool bringup) { }
113 static inline void cpuhp_lock_release(bool bri     95 static inline void cpuhp_lock_release(bool bringup) { }
114                                                    96 
115 #endif                                             97 #endif
116                                                    98 
117 /**                                                99 /**
118  * struct cpuhp_step - Hotplug state machine s !! 100  * cpuhp_step - Hotplug state machine step
119  * @name:       Name of the step                  101  * @name:       Name of the step
120  * @startup:    Startup function of the step      102  * @startup:    Startup function of the step
121  * @teardown:   Teardown function of the step     103  * @teardown:   Teardown function of the step
                                                   >> 104  * @skip_onerr: Do not invoke the functions on error rollback
                                                   >> 105  *              Will go away once the notifiers are gone
122  * @cant_stop:  Bringup/teardown can't be stop    106  * @cant_stop:  Bringup/teardown can't be stopped at this step
123  * @multi_instance:     State has multiple ins << 
124  */                                               107  */
125 struct cpuhp_step {                               108 struct cpuhp_step {
126         const char              *name;            109         const char              *name;
127         union {                                   110         union {
128                 int             (*single)(unsi    111                 int             (*single)(unsigned int cpu);
129                 int             (*multi)(unsig    112                 int             (*multi)(unsigned int cpu,
130                                          struc    113                                          struct hlist_node *node);
131         } startup;                                114         } startup;
132         union {                                   115         union {
133                 int             (*single)(unsi    116                 int             (*single)(unsigned int cpu);
134                 int             (*multi)(unsig    117                 int             (*multi)(unsigned int cpu,
135                                          struc    118                                          struct hlist_node *node);
136         } teardown;                               119         } teardown;
137         /* private: */                         << 
138         struct hlist_head       list;             120         struct hlist_head       list;
139         /* public: */                          !! 121         bool                    skip_onerr;
140         bool                    cant_stop;        122         bool                    cant_stop;
141         bool                    multi_instance    123         bool                    multi_instance;
142 };                                                124 };
143                                                   125 
144 static DEFINE_MUTEX(cpuhp_state_mutex);           126 static DEFINE_MUTEX(cpuhp_state_mutex);
145 static struct cpuhp_step cpuhp_hp_states[];    !! 127 static struct cpuhp_step cpuhp_bp_states[];
                                                   >> 128 static struct cpuhp_step cpuhp_ap_states[];
146                                                   129 
147 static struct cpuhp_step *cpuhp_get_step(enum  !! 130 static bool cpuhp_is_ap_state(enum cpuhp_state state)
148 {                                                 131 {
149         return cpuhp_hp_states + state;        !! 132         /*
                                                   >> 133          * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
                                                   >> 134          * purposes as that state is handled explicitly in cpu_down.
                                                   >> 135          */
                                                   >> 136         return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
150 }                                                 137 }
151                                                   138 
152 static bool cpuhp_step_empty(bool bringup, str !! 139 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
153 {                                                 140 {
154         return bringup ? !step->startup.single !! 141         struct cpuhp_step *sp;
                                                   >> 142 
                                                   >> 143         sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
                                                   >> 144         return sp + state;
155 }                                                 145 }
156                                                   146 
157 /**                                               147 /**
158  * cpuhp_invoke_callback - Invoke the callback !! 148  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
159  * @cpu:        The cpu for which the callback    149  * @cpu:        The cpu for which the callback should be invoked
160  * @state:      The state to do callbacks for     150  * @state:      The state to do callbacks for
161  * @bringup:    True if the bringup callback s    151  * @bringup:    True if the bringup callback should be invoked
162  * @node:       For multi-instance, do a singl    152  * @node:       For multi-instance, do a single entry callback for install/remove
163  * @lastp:      For multi-instance rollback, r    153  * @lastp:      For multi-instance rollback, remember how far we got
164  *                                                154  *
165  * Called from cpu hotplug and from the state     155  * Called from cpu hotplug and from the state register machinery.
166  *                                             << 
167  * Return: %0 on success or a negative errno c << 
168  */                                               156  */
169 static int cpuhp_invoke_callback(unsigned int     157 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
170                                  bool bringup,    158                                  bool bringup, struct hlist_node *node,
171                                  struct hlist_    159                                  struct hlist_node **lastp)
172 {                                                 160 {
173         struct cpuhp_cpu_state *st = per_cpu_p    161         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
174         struct cpuhp_step *step = cpuhp_get_st    162         struct cpuhp_step *step = cpuhp_get_step(state);
175         int (*cbm)(unsigned int cpu, struct hl    163         int (*cbm)(unsigned int cpu, struct hlist_node *node);
176         int (*cb)(unsigned int cpu);              164         int (*cb)(unsigned int cpu);
177         int ret, cnt;                             165         int ret, cnt;
178                                                   166 
179         if (st->fail == state) {                  167         if (st->fail == state) {
180                 st->fail = CPUHP_INVALID;         168                 st->fail = CPUHP_INVALID;
181                 return -EAGAIN;                << 
182         }                                      << 
183                                                   169 
184         if (cpuhp_step_empty(bringup, step)) { !! 170                 if (!(bringup ? step->startup.single : step->teardown.single))
185                 WARN_ON_ONCE(1);               !! 171                         return 0;
186                 return 0;                      !! 172 
                                                   >> 173                 return -EAGAIN;
187         }                                         174         }
188                                                   175 
189         if (!step->multi_instance) {              176         if (!step->multi_instance) {
190                 WARN_ON_ONCE(lastp && *lastp);    177                 WARN_ON_ONCE(lastp && *lastp);
191                 cb = bringup ? step->startup.s    178                 cb = bringup ? step->startup.single : step->teardown.single;
192                                                !! 179                 if (!cb)
                                                   >> 180                         return 0;
193                 trace_cpuhp_enter(cpu, st->tar    181                 trace_cpuhp_enter(cpu, st->target, state, cb);
194                 ret = cb(cpu);                    182                 ret = cb(cpu);
195                 trace_cpuhp_exit(cpu, st->stat    183                 trace_cpuhp_exit(cpu, st->state, state, ret);
196                 return ret;                       184                 return ret;
197         }                                         185         }
198         cbm = bringup ? step->startup.multi :     186         cbm = bringup ? step->startup.multi : step->teardown.multi;
                                                   >> 187         if (!cbm)
                                                   >> 188                 return 0;
199                                                   189 
200         /* Single invocation for instance add/    190         /* Single invocation for instance add/remove */
201         if (node) {                               191         if (node) {
202                 WARN_ON_ONCE(lastp && *lastp);    192                 WARN_ON_ONCE(lastp && *lastp);
203                 trace_cpuhp_multi_enter(cpu, s    193                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
204                 ret = cbm(cpu, node);             194                 ret = cbm(cpu, node);
205                 trace_cpuhp_exit(cpu, st->stat    195                 trace_cpuhp_exit(cpu, st->state, state, ret);
206                 return ret;                       196                 return ret;
207         }                                         197         }
208                                                   198 
209         /* State transition. Invoke on all ins    199         /* State transition. Invoke on all instances */
210         cnt = 0;                                  200         cnt = 0;
211         hlist_for_each(node, &step->list) {       201         hlist_for_each(node, &step->list) {
212                 if (lastp && node == *lastp)      202                 if (lastp && node == *lastp)
213                         break;                    203                         break;
214                                                   204 
215                 trace_cpuhp_multi_enter(cpu, s    205                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216                 ret = cbm(cpu, node);             206                 ret = cbm(cpu, node);
217                 trace_cpuhp_exit(cpu, st->stat    207                 trace_cpuhp_exit(cpu, st->state, state, ret);
218                 if (ret) {                        208                 if (ret) {
219                         if (!lastp)               209                         if (!lastp)
220                                 goto err;         210                                 goto err;
221                                                   211 
222                         *lastp = node;            212                         *lastp = node;
223                         return ret;               213                         return ret;
224                 }                                 214                 }
225                 cnt++;                            215                 cnt++;
226         }                                         216         }
227         if (lastp)                                217         if (lastp)
228                 *lastp = NULL;                    218                 *lastp = NULL;
229         return 0;                                 219         return 0;
230 err:                                              220 err:
231         /* Rollback the instances if one faile    221         /* Rollback the instances if one failed */
232         cbm = !bringup ? step->startup.multi :    222         cbm = !bringup ? step->startup.multi : step->teardown.multi;
233         if (!cbm)                                 223         if (!cbm)
234                 return ret;                       224                 return ret;
235                                                   225 
236         hlist_for_each(node, &step->list) {       226         hlist_for_each(node, &step->list) {
237                 if (!cnt--)                       227                 if (!cnt--)
238                         break;                    228                         break;
239                                                   229 
240                 trace_cpuhp_multi_enter(cpu, s    230                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
241                 ret = cbm(cpu, node);             231                 ret = cbm(cpu, node);
242                 trace_cpuhp_exit(cpu, st->stat    232                 trace_cpuhp_exit(cpu, st->state, state, ret);
243                 /*                                233                 /*
244                  * Rollback must not fail,        234                  * Rollback must not fail,
245                  */                               235                  */
246                 WARN_ON_ONCE(ret);                236                 WARN_ON_ONCE(ret);
247         }                                         237         }
248         return ret;                               238         return ret;
249 }                                                 239 }
250                                                   240 
251 #ifdef CONFIG_SMP                                 241 #ifdef CONFIG_SMP
252 static bool cpuhp_is_ap_state(enum cpuhp_state << 
253 {                                              << 
254         /*                                     << 
255          * The extra check for CPUHP_TEARDOWN_ << 
256          * purposes as that state is handled e << 
257          */                                    << 
258         return state > CPUHP_BRINGUP_CPU && st << 
259 }                                              << 
260                                                << 
261 static inline void wait_for_ap_thread(struct c    242 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
262 {                                                 243 {
263         struct completion *done = bringup ? &s    244         struct completion *done = bringup ? &st->done_up : &st->done_down;
264         wait_for_completion(done);                245         wait_for_completion(done);
265 }                                                 246 }
266                                                   247 
267 static inline void complete_ap_thread(struct c    248 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
268 {                                                 249 {
269         struct completion *done = bringup ? &s    250         struct completion *done = bringup ? &st->done_up : &st->done_down;
270         complete(done);                           251         complete(done);
271 }                                                 252 }
272                                                   253 
273 /*                                                254 /*
274  * The former STARTING/DYING states, ran with     255  * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
275  */                                               256  */
276 static bool cpuhp_is_atomic_state(enum cpuhp_s    257 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
277 {                                                 258 {
278         return CPUHP_AP_IDLE_DEAD <= state &&     259         return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
279 }                                                 260 }
280                                                   261 
281 /* Synchronization state management */         << 
282 enum cpuhp_sync_state {                        << 
283         SYNC_STATE_DEAD,                       << 
284         SYNC_STATE_KICKED,                     << 
285         SYNC_STATE_SHOULD_DIE,                 << 
286         SYNC_STATE_ALIVE,                      << 
287         SYNC_STATE_SHOULD_ONLINE,              << 
288         SYNC_STATE_ONLINE,                     << 
289 };                                             << 
290                                                << 
291 #ifdef CONFIG_HOTPLUG_CORE_SYNC                << 
292 /**                                            << 
293  * cpuhp_ap_update_sync_state - Update synchro << 
294  * @state:      The synchronization state to s << 
295  *                                             << 
296  * No synchronization point. Just update of th << 
297  * a full barrier so that the AP changes are v << 
298  */                                            << 
299 static inline void cpuhp_ap_update_sync_state( << 
300 {                                              << 
301         atomic_t *st = this_cpu_ptr(&cpuhp_sta << 
302                                                << 
303         (void)atomic_xchg(st, state);          << 
304 }                                              << 
305                                                << 
306 void __weak arch_cpuhp_sync_state_poll(void) { << 
307                                                << 
308 static bool cpuhp_wait_for_sync_state(unsigned << 
309                                       enum cpu << 
310 {                                              << 
311         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
312         ktime_t now, end, start = ktime_get(); << 
313         int sync;                              << 
314                                                << 
315         end = start + 10ULL * NSEC_PER_SEC;    << 
316                                                << 
317         sync = atomic_read(st);                << 
318         while (1) {                            << 
319                 if (sync == state) {           << 
320                         if (!atomic_try_cmpxch << 
321                                 continue;      << 
322                         return true;           << 
323                 }                              << 
324                                                << 
325                 now = ktime_get();             << 
326                 if (now > end) {               << 
327                         /* Timeout. Leave the  << 
328                         return false;          << 
329                 } else if (now - start < NSEC_ << 
330                         /* Poll for one millis << 
331                         arch_cpuhp_sync_state_ << 
332                 } else {                       << 
333                         usleep_range(USEC_PER_ << 
334                 }                              << 
335                 sync = atomic_read(st);        << 
336         }                                      << 
337         return true;                           << 
338 }                                              << 
339 #else  /* CONFIG_HOTPLUG_CORE_SYNC */          << 
340 static inline void cpuhp_ap_update_sync_state( << 
341 #endif /* !CONFIG_HOTPLUG_CORE_SYNC */         << 
342                                                << 
343 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD           << 
344 /**                                            << 
345  * cpuhp_ap_report_dead - Update synchronizati << 
346  *                                             << 
347  * No synchronization point. Just update of th << 
348  */                                            << 
349 void cpuhp_ap_report_dead(void)                << 
350 {                                              << 
351         cpuhp_ap_update_sync_state(SYNC_STATE_ << 
352 }                                              << 
353                                                << 
354 void __weak arch_cpuhp_cleanup_dead_cpu(unsign << 
355                                                << 
356 /*                                             << 
357  * Late CPU shutdown synchronization point. Ca << 
358  * because the AP cannot issue complete() at t << 
359  */                                            << 
360 static void cpuhp_bp_sync_dead(unsigned int cp << 
361 {                                              << 
362         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
363         int sync = atomic_read(st);            << 
364                                                << 
365         do {                                   << 
366                 /* CPU can have reported dead  << 
367                 if (sync == SYNC_STATE_DEAD)   << 
368                         break;                 << 
369         } while (!atomic_try_cmpxchg(st, &sync << 
370                                                << 
371         if (cpuhp_wait_for_sync_state(cpu, SYN << 
372                 /* CPU reached dead state. Inv << 
373                 arch_cpuhp_cleanup_dead_cpu(cp << 
374                 return;                        << 
375         }                                      << 
376                                                << 
377         /* No further action possible. Emit me << 
378         pr_err("CPU%u failed to report dead st << 
379 }                                              << 
380 #else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */      << 
381 static inline void cpuhp_bp_sync_dead(unsigned << 
382 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */    << 
383                                                << 
384 #ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL           << 
385 /**                                            << 
386  * cpuhp_ap_sync_alive - Synchronize AP with t << 
387  *                                             << 
388  * Updates the AP synchronization state to SYN << 
389  * for the BP to release it.                   << 
390  */                                            << 
391 void cpuhp_ap_sync_alive(void)                 << 
392 {                                              << 
393         atomic_t *st = this_cpu_ptr(&cpuhp_sta << 
394                                                << 
395         cpuhp_ap_update_sync_state(SYNC_STATE_ << 
396                                                << 
397         /* Wait for the control CPU to release << 
398         while (atomic_read(st) != SYNC_STATE_S << 
399                 cpu_relax();                   << 
400 }                                              << 
401                                                << 
402 static bool cpuhp_can_boot_ap(unsigned int cpu << 
403 {                                              << 
404         atomic_t *st = per_cpu_ptr(&cpuhp_stat << 
405         int sync = atomic_read(st);            << 
406                                                << 
407 again:                                         << 
408         switch (sync) {                        << 
409         case SYNC_STATE_DEAD:                  << 
410                 /* CPU is properly dead */     << 
411                 break;                         << 
412         case SYNC_STATE_KICKED:                << 
413                 /* CPU did not come up in prev << 
414                 break;                         << 
415         case SYNC_STATE_ALIVE:                 << 
416                 /* CPU is stuck cpuhp_ap_sync_ << 
417                 break;                         << 
418         default:                               << 
419                 /* CPU failed to report online << 
420                 return false;                  << 
421         }                                      << 
422                                                << 
423         /* Prepare for booting */              << 
424         if (!atomic_try_cmpxchg(st, &sync, SYN << 
425                 goto again;                    << 
426                                                << 
427         return true;                           << 
428 }                                              << 
429                                                << 
430 void __weak arch_cpuhp_cleanup_kick_cpu(unsign << 
431                                                << 
432 /*                                             << 
433  * Early CPU bringup synchronization point. Ca << 
434  * because the AP cannot issue complete() so e << 
435  */                                            << 
436 static int cpuhp_bp_sync_alive(unsigned int cp << 
437 {                                              << 
438         int ret = 0;                           << 
439                                                << 
440         if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SY << 
441                 return 0;                      << 
442                                                << 
443         if (!cpuhp_wait_for_sync_state(cpu, SY << 
444                 pr_err("CPU%u failed to report << 
445                 ret = -EIO;                    << 
446         }                                      << 
447                                                << 
448         /* Let the architecture cleanup the ki << 
449         arch_cpuhp_cleanup_kick_cpu(cpu);      << 
450         return ret;                            << 
451 }                                              << 
452 #else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */      << 
453 static inline int cpuhp_bp_sync_alive(unsigned << 
454 static inline bool cpuhp_can_boot_ap(unsigned  << 
455 #endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */    << 
456                                                << 
457 /* Serializes the updates to cpu_online_mask,     262 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
458 static DEFINE_MUTEX(cpu_add_remove_lock);         263 static DEFINE_MUTEX(cpu_add_remove_lock);
459 bool cpuhp_tasks_frozen;                          264 bool cpuhp_tasks_frozen;
460 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);            265 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
461                                                   266 
462 /*                                                267 /*
463  * The following two APIs (cpu_maps_update_beg    268  * The following two APIs (cpu_maps_update_begin/done) must be used when
464  * attempting to serialize the updates to cpu_    269  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
465  */                                               270  */
466 void cpu_maps_update_begin(void)                  271 void cpu_maps_update_begin(void)
467 {                                                 272 {
468         mutex_lock(&cpu_add_remove_lock);         273         mutex_lock(&cpu_add_remove_lock);
469 }                                                 274 }
470                                                   275 
471 void cpu_maps_update_done(void)                   276 void cpu_maps_update_done(void)
472 {                                                 277 {
473         mutex_unlock(&cpu_add_remove_lock);       278         mutex_unlock(&cpu_add_remove_lock);
474 }                                                 279 }
475                                                   280 
476 /*                                                281 /*
477  * If set, cpu_up and cpu_down will return -EB    282  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
478  * Should always be manipulated under cpu_add_    283  * Should always be manipulated under cpu_add_remove_lock
479  */                                               284  */
480 static int cpu_hotplug_disabled;                  285 static int cpu_hotplug_disabled;
481                                                   286 
482 #ifdef CONFIG_HOTPLUG_CPU                         287 #ifdef CONFIG_HOTPLUG_CPU
483                                                   288 
484 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);     289 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
485                                                   290 
486 static bool cpu_hotplug_offline_disabled __ro_ << 
487                                                << 
488 void cpus_read_lock(void)                         291 void cpus_read_lock(void)
489 {                                                 292 {
490         percpu_down_read(&cpu_hotplug_lock);      293         percpu_down_read(&cpu_hotplug_lock);
491 }                                                 294 }
492 EXPORT_SYMBOL_GPL(cpus_read_lock);                295 EXPORT_SYMBOL_GPL(cpus_read_lock);
493                                                   296 
494 int cpus_read_trylock(void)                    << 
495 {                                              << 
496         return percpu_down_read_trylock(&cpu_h << 
497 }                                              << 
498 EXPORT_SYMBOL_GPL(cpus_read_trylock);          << 
499                                                << 
500 void cpus_read_unlock(void)                       297 void cpus_read_unlock(void)
501 {                                                 298 {
502         percpu_up_read(&cpu_hotplug_lock);        299         percpu_up_read(&cpu_hotplug_lock);
503 }                                                 300 }
504 EXPORT_SYMBOL_GPL(cpus_read_unlock);              301 EXPORT_SYMBOL_GPL(cpus_read_unlock);
505                                                   302 
506 void cpus_write_lock(void)                        303 void cpus_write_lock(void)
507 {                                                 304 {
508         percpu_down_write(&cpu_hotplug_lock);     305         percpu_down_write(&cpu_hotplug_lock);
509 }                                                 306 }
510                                                   307 
511 void cpus_write_unlock(void)                      308 void cpus_write_unlock(void)
512 {                                                 309 {
513         percpu_up_write(&cpu_hotplug_lock);       310         percpu_up_write(&cpu_hotplug_lock);
514 }                                                 311 }
515                                                   312 
516 void lockdep_assert_cpus_held(void)               313 void lockdep_assert_cpus_held(void)
517 {                                                 314 {
518         /*                                     << 
519          * We can't have hotplug operations be << 
520          * and some init codepaths will knowin << 
521          * This is all valid, so mute lockdep  << 
522          * unheld locks.                       << 
523          */                                    << 
524         if (system_state < SYSTEM_RUNNING)     << 
525                 return;                        << 
526                                                << 
527         percpu_rwsem_assert_held(&cpu_hotplug_    315         percpu_rwsem_assert_held(&cpu_hotplug_lock);
528 }                                                 316 }
529                                                   317 
530 #ifdef CONFIG_LOCKDEP                          << 
531 int lockdep_is_cpus_held(void)                 << 
532 {                                              << 
533         return percpu_rwsem_is_held(&cpu_hotpl << 
534 }                                              << 
535 #endif                                         << 
536                                                << 
537 static void lockdep_acquire_cpus_lock(void)    << 
538 {                                              << 
539         rwsem_acquire(&cpu_hotplug_lock.dep_ma << 
540 }                                              << 
541                                                << 
542 static void lockdep_release_cpus_lock(void)    << 
543 {                                              << 
544         rwsem_release(&cpu_hotplug_lock.dep_ma << 
545 }                                              << 
546                                                << 
547 /* Declare CPU offlining not supported */      << 
548 void cpu_hotplug_disable_offlining(void)       << 
549 {                                              << 
550         cpu_maps_update_begin();               << 
551         cpu_hotplug_offline_disabled = true;   << 
552         cpu_maps_update_done();                << 
553 }                                              << 
554                                                << 
555 /*                                                318 /*
556  * Wait for currently running CPU hotplug oper    319  * Wait for currently running CPU hotplug operations to complete (if any) and
557  * disable future CPU hotplug (from sysfs). Th    320  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
558  * the 'cpu_hotplug_disabled' flag. The same l    321  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
559  * hotplug path before performing hotplug oper    322  * hotplug path before performing hotplug operations. So acquiring that lock
560  * guarantees mutual exclusion from any curren    323  * guarantees mutual exclusion from any currently running hotplug operations.
561  */                                               324  */
562 void cpu_hotplug_disable(void)                    325 void cpu_hotplug_disable(void)
563 {                                                 326 {
564         cpu_maps_update_begin();                  327         cpu_maps_update_begin();
565         cpu_hotplug_disabled++;                   328         cpu_hotplug_disabled++;
566         cpu_maps_update_done();                   329         cpu_maps_update_done();
567 }                                                 330 }
568 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);           331 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
569                                                   332 
570 static void __cpu_hotplug_enable(void)            333 static void __cpu_hotplug_enable(void)
571 {                                                 334 {
572         if (WARN_ONCE(!cpu_hotplug_disabled, "    335         if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
573                 return;                           336                 return;
574         cpu_hotplug_disabled--;                   337         cpu_hotplug_disabled--;
575 }                                                 338 }
576                                                   339 
577 void cpu_hotplug_enable(void)                     340 void cpu_hotplug_enable(void)
578 {                                                 341 {
579         cpu_maps_update_begin();                  342         cpu_maps_update_begin();
580         __cpu_hotplug_enable();                   343         __cpu_hotplug_enable();
581         cpu_maps_update_done();                   344         cpu_maps_update_done();
582 }                                                 345 }
583 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);            346 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
584                                                << 
585 #else                                          << 
586                                                << 
587 static void lockdep_acquire_cpus_lock(void)    << 
588 {                                              << 
589 }                                              << 
590                                                << 
591 static void lockdep_release_cpus_lock(void)    << 
592 {                                              << 
593 }                                              << 
594                                                << 
595 #endif  /* CONFIG_HOTPLUG_CPU */                  347 #endif  /* CONFIG_HOTPLUG_CPU */
596                                                   348 
597 /*                                             << 
598  * Architectures that need SMT-specific errata << 
599  * should override this.                       << 
600  */                                            << 
601 void __weak arch_smt_update(void) { }          << 
602                                                << 
603 #ifdef CONFIG_HOTPLUG_SMT                      << 
604                                                << 
605 enum cpuhp_smt_control cpu_smt_control __read_ << 
606 static unsigned int cpu_smt_max_threads __ro_a << 
607 unsigned int cpu_smt_num_threads __read_mostly << 
608                                                << 
609 void __init cpu_smt_disable(bool force)        << 
610 {                                              << 
611         if (!cpu_smt_possible())               << 
612                 return;                        << 
613                                                << 
614         if (force) {                           << 
615                 pr_info("SMT: Force disabled\n << 
616                 cpu_smt_control = CPU_SMT_FORC << 
617         } else {                               << 
618                 pr_info("SMT: disabled\n");    << 
619                 cpu_smt_control = CPU_SMT_DISA << 
620         }                                      << 
621         cpu_smt_num_threads = 1;               << 
622 }                                              << 
623                                                << 
624 /*                                             << 
625  * The decision whether SMT is supported can o << 
626  * CPU identification. Called from architectur << 
627  */                                            << 
628 void __init cpu_smt_set_num_threads(unsigned i << 
629                                     unsigned i << 
630 {                                              << 
631         WARN_ON(!num_threads || (num_threads > << 
632                                                << 
633         if (max_threads == 1)                  << 
634                 cpu_smt_control = CPU_SMT_NOT_ << 
635                                                << 
636         cpu_smt_max_threads = max_threads;     << 
637                                                << 
638         /*                                     << 
639          * If SMT has been disabled via the ke << 
640          * not supported, set cpu_smt_num_thre << 
641          * If enabled, take the architecture r << 
642          * to bring up into account.           << 
643          */                                    << 
644         if (cpu_smt_control != CPU_SMT_ENABLED << 
645                 cpu_smt_num_threads = 1;       << 
646         else if (num_threads < cpu_smt_num_thr << 
647                 cpu_smt_num_threads = num_thre << 
648 }                                              << 
649                                                << 
650 static int __init smt_cmdline_disable(char *st << 
651 {                                              << 
652         cpu_smt_disable(str && !strcmp(str, "f << 
653         return 0;                              << 
654 }                                              << 
655 early_param("nosmt", smt_cmdline_disable);     << 
656                                                << 
657 /*                                             << 
658  * For Archicture supporting partial SMT state << 
659  * Otherwise this has already been checked thr << 
660  * setting the SMT level.                      << 
661  */                                            << 
662 static inline bool cpu_smt_thread_allowed(unsi << 
663 {                                              << 
664 #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC          << 
665         return topology_smt_thread_allowed(cpu << 
666 #else                                          << 
667         return true;                           << 
668 #endif                                         << 
669 }                                              << 
670                                                << 
671 static inline bool cpu_bootable(unsigned int c << 
672 {                                              << 
673         if (cpu_smt_control == CPU_SMT_ENABLED << 
674                 return true;                   << 
675                                                << 
676         /* All CPUs are bootable if controls a << 
677         if (cpu_smt_control == CPU_SMT_NOT_IMP << 
678                 return true;                   << 
679                                                << 
680         /* All CPUs are bootable if CPU is not << 
681         if (cpu_smt_control == CPU_SMT_NOT_SUP << 
682                 return true;                   << 
683                                                << 
684         if (topology_is_primary_thread(cpu))   << 
685                 return true;                   << 
686                                                << 
687         /*                                     << 
688          * On x86 it's required to boot all lo << 
689          * that the init code can get a chance << 
690          * CPU. Otherwise, a broadcasted MCE o << 
691          * core will shutdown the machine.     << 
692          */                                    << 
693         return !cpumask_test_cpu(cpu, &cpus_bo << 
694 }                                              << 
695                                                << 
696 /* Returns true if SMT is supported and not fo << 
697 bool cpu_smt_possible(void)                    << 
698 {                                              << 
699         return cpu_smt_control != CPU_SMT_FORC << 
700                 cpu_smt_control != CPU_SMT_NOT << 
701 }                                              << 
702 EXPORT_SYMBOL_GPL(cpu_smt_possible);           << 
703                                                << 
704 #else                                          << 
705 static inline bool cpu_bootable(unsigned int c << 
706 #endif                                         << 
707                                                << 
708 static inline enum cpuhp_state                    349 static inline enum cpuhp_state
709 cpuhp_set_state(int cpu, struct cpuhp_cpu_stat !! 350 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
710 {                                                 351 {
711         enum cpuhp_state prev_state = st->stat    352         enum cpuhp_state prev_state = st->state;
712         bool bringup = st->state < target;     << 
713                                                   353 
714         st->rollback = false;                     354         st->rollback = false;
715         st->last = NULL;                          355         st->last = NULL;
716                                                   356 
717         st->target = target;                      357         st->target = target;
718         st->single = false;                       358         st->single = false;
719         st->bringup = bringup;                 !! 359         st->bringup = st->state < target;
720         if (cpu_dying(cpu) != !bringup)        << 
721                 set_cpu_dying(cpu, !bringup);  << 
722                                                   360 
723         return prev_state;                        361         return prev_state;
724 }                                                 362 }
725                                                   363 
726 static inline void                                364 static inline void
727 cpuhp_reset_state(int cpu, struct cpuhp_cpu_st !! 365 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
728                   enum cpuhp_state prev_state) << 
729 {                                                 366 {
730         bool bringup = !st->bringup;           << 
731                                                << 
732         st->target = prev_state;               << 
733                                                << 
734         /*                                     << 
735          * Already rolling back. No need inver << 
736          * the current state.                  << 
737          */                                    << 
738         if (st->rollback)                      << 
739                 return;                        << 
740                                                << 
741         st->rollback = true;                      367         st->rollback = true;
742                                                   368 
743         /*                                        369         /*
744          * If we have st->last we need to undo    370          * If we have st->last we need to undo partial multi_instance of this
745          * state first. Otherwise start undo a    371          * state first. Otherwise start undo at the previous state.
746          */                                       372          */
747         if (!st->last) {                          373         if (!st->last) {
748                 if (st->bringup)                  374                 if (st->bringup)
749                         st->state--;              375                         st->state--;
750                 else                              376                 else
751                         st->state++;              377                         st->state++;
752         }                                         378         }
753                                                   379 
754         st->bringup = bringup;                 !! 380         st->target = prev_state;
755         if (cpu_dying(cpu) != !bringup)        !! 381         st->bringup = !st->bringup;
756                 set_cpu_dying(cpu, !bringup);  << 
757 }                                                 382 }
758                                                   383 
759 /* Regular hotplug invocation of the AP hotplu    384 /* Regular hotplug invocation of the AP hotplug thread */
760 static void __cpuhp_kick_ap(struct cpuhp_cpu_s    385 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
761 {                                                 386 {
762         if (!st->single && st->state == st->ta    387         if (!st->single && st->state == st->target)
763                 return;                           388                 return;
764                                                   389 
765         st->result = 0;                           390         st->result = 0;
766         /*                                        391         /*
767          * Make sure the above stores are visi    392          * Make sure the above stores are visible before should_run becomes
768          * true. Paired with the mb() above in    393          * true. Paired with the mb() above in cpuhp_thread_fun()
769          */                                       394          */
770         smp_mb();                                 395         smp_mb();
771         st->should_run = true;                    396         st->should_run = true;
772         wake_up_process(st->thread);              397         wake_up_process(st->thread);
773         wait_for_ap_thread(st, st->bringup);      398         wait_for_ap_thread(st, st->bringup);
774 }                                                 399 }
775                                                   400 
776 static int cpuhp_kick_ap(int cpu, struct cpuhp !! 401 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
777                          enum cpuhp_state targ << 
778 {                                                 402 {
779         enum cpuhp_state prev_state;              403         enum cpuhp_state prev_state;
780         int ret;                                  404         int ret;
781                                                   405 
782         prev_state = cpuhp_set_state(cpu, st,  !! 406         prev_state = cpuhp_set_state(st, target);
783         __cpuhp_kick_ap(st);                      407         __cpuhp_kick_ap(st);
784         if ((ret = st->result)) {                 408         if ((ret = st->result)) {
785                 cpuhp_reset_state(cpu, st, pre !! 409                 cpuhp_reset_state(st, prev_state);
786                 __cpuhp_kick_ap(st);              410                 __cpuhp_kick_ap(st);
787         }                                         411         }
788                                                   412 
789         return ret;                               413         return ret;
790 }                                                 414 }
791                                                   415 
792 static int bringup_wait_for_ap_online(unsigned !! 416 static int bringup_wait_for_ap(unsigned int cpu)
793 {                                                 417 {
794         struct cpuhp_cpu_state *st = per_cpu_p    418         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
795                                                   419 
796         /* Wait for the CPU to reach CPUHP_AP_    420         /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
797         wait_for_ap_thread(st, true);             421         wait_for_ap_thread(st, true);
798         if (WARN_ON_ONCE((!cpu_online(cpu))))     422         if (WARN_ON_ONCE((!cpu_online(cpu))))
799                 return -ECANCELED;                423                 return -ECANCELED;
800                                                   424 
801         /* Unpark the hotplug thread of the ta !! 425         /* Unpark the stopper thread and the hotplug thread of the target cpu */
                                                   >> 426         stop_machine_unpark(cpu);
802         kthread_unpark(st->thread);               427         kthread_unpark(st->thread);
803                                                   428 
804         /*                                     << 
805          * SMT soft disabling on X86 requires  << 
806          * BIOS 'wait for SIPI' state in order << 
807          * CPU marked itself as booted_once in << 
808          * cpu_bootable() check will now retur << 
809          * primary sibling.                    << 
810          */                                    << 
811         if (!cpu_bootable(cpu))                << 
812                 return -ECANCELED;             << 
813         return 0;                              << 
814 }                                              << 
815                                                << 
816 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP            << 
817 static int cpuhp_kick_ap_alive(unsigned int cp << 
818 {                                              << 
819         if (!cpuhp_can_boot_ap(cpu))           << 
820                 return -EAGAIN;                << 
821                                                << 
822         return arch_cpuhp_kick_ap_alive(cpu, i << 
823 }                                              << 
824                                                << 
825 static int cpuhp_bringup_ap(unsigned int cpu)  << 
826 {                                              << 
827         struct cpuhp_cpu_state *st = per_cpu_p << 
828         int ret;                               << 
829                                                << 
830         /*                                     << 
831          * Some architectures have to walk the << 
832          * setup the vector space for the cpu  << 
833          * Prevent irq alloc/free across the b << 
834          */                                    << 
835         irq_lock_sparse();                     << 
836                                                << 
837         ret = cpuhp_bp_sync_alive(cpu);        << 
838         if (ret)                               << 
839                 goto out_unlock;               << 
840                                                << 
841         ret = bringup_wait_for_ap_online(cpu); << 
842         if (ret)                               << 
843                 goto out_unlock;               << 
844                                                << 
845         irq_unlock_sparse();                   << 
846                                                << 
847         if (st->target <= CPUHP_AP_ONLINE_IDLE    429         if (st->target <= CPUHP_AP_ONLINE_IDLE)
848                 return 0;                         430                 return 0;
849                                                   431 
850         return cpuhp_kick_ap(cpu, st, st->targ !! 432         return cpuhp_kick_ap(st, st->target);
851                                                << 
852 out_unlock:                                    << 
853         irq_unlock_sparse();                   << 
854         return ret;                            << 
855 }                                                 433 }
856 #else                                          !! 434 
857 static int bringup_cpu(unsigned int cpu)          435 static int bringup_cpu(unsigned int cpu)
858 {                                                 436 {
859         struct cpuhp_cpu_state *st = per_cpu_p << 
860         struct task_struct *idle = idle_thread    437         struct task_struct *idle = idle_thread_get(cpu);
861         int ret;                                  438         int ret;
862                                                   439 
863         if (!cpuhp_can_boot_ap(cpu))           << 
864                 return -EAGAIN;                << 
865                                                << 
866         /*                                        440         /*
867          * Some architectures have to walk the    441          * Some architectures have to walk the irq descriptors to
868          * setup the vector space for the cpu     442          * setup the vector space for the cpu which comes online.
869          *                                     !! 443          * Prevent irq alloc/free across the bringup.
870          * Prevent irq alloc/free across the b << 
871          * sparse irq lock. Hold it until the  << 
872          * startup in cpuhp_online_idle() whic << 
873          * intermediate synchronization points << 
874          */                                       444          */
875         irq_lock_sparse();                        445         irq_lock_sparse();
876                                                   446 
                                                   >> 447         /* Arch-specific enabling code. */
877         ret = __cpu_up(cpu, idle);                448         ret = __cpu_up(cpu, idle);
878         if (ret)                               << 
879                 goto out_unlock;               << 
880                                                << 
881         ret = cpuhp_bp_sync_alive(cpu);        << 
882         if (ret)                               << 
883                 goto out_unlock;               << 
884                                                << 
885         ret = bringup_wait_for_ap_online(cpu); << 
886         if (ret)                               << 
887                 goto out_unlock;               << 
888                                                << 
889         irq_unlock_sparse();                      449         irq_unlock_sparse();
890                                                !! 450         if (ret)
891         if (st->target <= CPUHP_AP_ONLINE_IDLE !! 451                 return ret;
892                 return 0;                      !! 452         return bringup_wait_for_ap(cpu);
893                                                << 
894         return cpuhp_kick_ap(cpu, st, st->targ << 
895                                                << 
896 out_unlock:                                    << 
897         irq_unlock_sparse();                   << 
898         return ret;                            << 
899 }                                              << 
900 #endif                                         << 
901                                                << 
902 static int finish_cpu(unsigned int cpu)        << 
903 {                                              << 
904         struct task_struct *idle = idle_thread << 
905         struct mm_struct *mm = idle->active_mm << 
906                                                << 
907         /*                                     << 
908          * idle_task_exit() will have switched << 
909          * clean up any remaining active_mm st << 
910          */                                    << 
911         if (mm != &init_mm)                    << 
912                 idle->active_mm = &init_mm;    << 
913         mmdrop_lazy_tlb(mm);                   << 
914         return 0;                              << 
915 }                                                 453 }
916                                                   454 
917 /*                                                455 /*
918  * Hotplug state machine related functions        456  * Hotplug state machine related functions
919  */                                               457  */
920                                                   458 
921 /*                                             !! 459 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
922  * Get the next state to run. Empty ones will  << 
923  * state must be run.                          << 
924  *                                             << 
925  * st->state will be modified ahead of time, t << 
926  * has already ran.                            << 
927  */                                            << 
928 static bool cpuhp_next_state(bool bringup,     << 
929                              enum cpuhp_state  << 
930                              struct cpuhp_cpu_ << 
931                              enum cpuhp_state  << 
932 {                                              << 
933         do {                                   << 
934                 if (bringup) {                 << 
935                         if (st->state >= targe << 
936                                 return false;  << 
937                                                << 
938                         *state_to_run = ++st-> << 
939                 } else {                       << 
940                         if (st->state <= targe << 
941                                 return false;  << 
942                                                << 
943                         *state_to_run = st->st << 
944                 }                              << 
945                                                << 
946                 if (!cpuhp_step_empty(bringup, << 
947                         break;                 << 
948         } while (true);                        << 
949                                                << 
950         return true;                           << 
951 }                                              << 
952                                                << 
953 static int __cpuhp_invoke_callback_range(bool  << 
954                                          unsig << 
955                                          struc << 
956                                          enum  << 
957                                          bool  << 
958 {                                                 460 {
959         enum cpuhp_state state;                !! 461         for (st->state--; st->state > st->target; st->state--) {
960         int ret = 0;                           !! 462                 struct cpuhp_step *step = cpuhp_get_step(st->state);
961                                                << 
962         while (cpuhp_next_state(bringup, &stat << 
963                 int err;                       << 
964                                                << 
965                 err = cpuhp_invoke_callback(cp << 
966                 if (!err)                      << 
967                         continue;              << 
968                                                   463 
969                 if (nofail) {                  !! 464                 if (!step->skip_onerr)
970                         pr_warn("CPU %u %s sta !! 465                         cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
971                                 cpu, bringup ? << 
972                                 cpuhp_get_step << 
973                                 st->state, err << 
974                         ret = -1;              << 
975                 } else {                       << 
976                         ret = err;             << 
977                         break;                 << 
978                 }                              << 
979         }                                         466         }
980                                                << 
981         return ret;                            << 
982 }                                              << 
983                                                << 
984 static inline int cpuhp_invoke_callback_range( << 
985                                                << 
986                                                << 
987                                                << 
988 {                                              << 
989         return __cpuhp_invoke_callback_range(b << 
990 }                                              << 
991                                                << 
992 static inline void cpuhp_invoke_callback_range << 
993                                                << 
994                                                << 
995                                                << 
996 {                                              << 
997         __cpuhp_invoke_callback_range(bringup, << 
998 }                                              << 
999                                                << 
1000 static inline bool can_rollback_cpu(struct cp << 
1001 {                                             << 
1002         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))   << 
1003                 return true;                  << 
1004         /*                                    << 
1005          * When CPU hotplug is disabled, then << 
1006          * possible because takedown_cpu() an << 
1007          * subsystem specific mechanisms are  << 
1008          * which would be completely unplugge << 
1009          * in the current state.              << 
1010          */                                   << 
1011         return st->state <= CPUHP_BRINGUP_CPU << 
1012 }                                                467 }
1013                                                  468 
1014 static int cpuhp_up_callbacks(unsigned int cp    469 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1015                               enum cpuhp_stat    470                               enum cpuhp_state target)
1016 {                                                471 {
1017         enum cpuhp_state prev_state = st->sta    472         enum cpuhp_state prev_state = st->state;
1018         int ret = 0;                             473         int ret = 0;
1019                                                  474 
1020         ret = cpuhp_invoke_callback_range(tru !! 475         while (st->state < target) {
1021         if (ret) {                            !! 476                 st->state++;
1022                 pr_debug("CPU UP failed (%d)  !! 477                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1023                          ret, cpu, cpuhp_get_ !! 478                 if (ret) {
1024                          st->state);          !! 479                         st->target = prev_state;
1025                                               !! 480                         undo_cpu_up(cpu, st);
1026                 cpuhp_reset_state(cpu, st, pr !! 481                         break;
1027                 if (can_rollback_cpu(st))     !! 482                 }
1028                         WARN_ON(cpuhp_invoke_ << 
1029                                               << 
1030         }                                        483         }
1031         return ret;                              484         return ret;
1032 }                                                485 }
1033                                                  486 
1034 /*                                               487 /*
1035  * The cpu hotplug threads manage the bringup    488  * The cpu hotplug threads manage the bringup and teardown of the cpus
1036  */                                              489  */
                                                   >> 490 static void cpuhp_create(unsigned int cpu)
                                                   >> 491 {
                                                   >> 492         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
                                                   >> 493 
                                                   >> 494         init_completion(&st->done_up);
                                                   >> 495         init_completion(&st->done_down);
                                                   >> 496 }
                                                   >> 497 
1037 static int cpuhp_should_run(unsigned int cpu)    498 static int cpuhp_should_run(unsigned int cpu)
1038 {                                                499 {
1039         struct cpuhp_cpu_state *st = this_cpu    500         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1040                                                  501 
1041         return st->should_run;                   502         return st->should_run;
1042 }                                                503 }
1043                                                  504 
1044 /*                                               505 /*
1045  * Execute teardown/startup callbacks on the     506  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
1046  * callbacks when a state gets [un]installed     507  * callbacks when a state gets [un]installed at runtime.
1047  *                                               508  *
1048  * Each invocation of this function by the sm    509  * Each invocation of this function by the smpboot thread does a single AP
1049  * state callback.                               510  * state callback.
1050  *                                               511  *
1051  * It has 3 modes of operation:                  512  * It has 3 modes of operation:
1052  *  - single: runs st->cb_state                  513  *  - single: runs st->cb_state
1053  *  - up:     runs ++st->state, while st->sta    514  *  - up:     runs ++st->state, while st->state < st->target
1054  *  - down:   runs st->state--, while st->sta    515  *  - down:   runs st->state--, while st->state > st->target
1055  *                                               516  *
1056  * When complete or on error, should_run is c    517  * When complete or on error, should_run is cleared and the completion is fired.
1057  */                                              518  */
1058 static void cpuhp_thread_fun(unsigned int cpu    519 static void cpuhp_thread_fun(unsigned int cpu)
1059 {                                                520 {
1060         struct cpuhp_cpu_state *st = this_cpu    521         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1061         bool bringup = st->bringup;              522         bool bringup = st->bringup;
1062         enum cpuhp_state state;                  523         enum cpuhp_state state;
1063                                                  524 
1064         if (WARN_ON_ONCE(!st->should_run))    << 
1065                 return;                       << 
1066                                               << 
1067         /*                                       525         /*
1068          * ACQUIRE for the cpuhp_should_run()    526          * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
1069          * that if we see ->should_run we als    527          * that if we see ->should_run we also see the rest of the state.
1070          */                                      528          */
1071         smp_mb();                                529         smp_mb();
1072                                                  530 
1073         /*                                    !! 531         if (WARN_ON_ONCE(!st->should_run))
1074          * The BP holds the hotplug lock, but !! 532                 return;
1075          * ensure that anybody asserting the  !! 533 
1076          * it so.                             << 
1077          */                                   << 
1078         lockdep_acquire_cpus_lock();          << 
1079         cpuhp_lock_acquire(bringup);             534         cpuhp_lock_acquire(bringup);
1080                                                  535 
1081         if (st->single) {                        536         if (st->single) {
1082                 state = st->cb_state;            537                 state = st->cb_state;
1083                 st->should_run = false;          538                 st->should_run = false;
1084         } else {                                 539         } else {
1085                 st->should_run = cpuhp_next_s !! 540                 if (bringup) {
1086                 if (!st->should_run)          !! 541                         st->state++;
1087                         goto end;             !! 542                         state = st->state;
                                                   >> 543                         st->should_run = (st->state < st->target);
                                                   >> 544                         WARN_ON_ONCE(st->state > st->target);
                                                   >> 545                 } else {
                                                   >> 546                         state = st->state;
                                                   >> 547                         st->state--;
                                                   >> 548                         st->should_run = (st->state > st->target);
                                                   >> 549                         WARN_ON_ONCE(st->state < st->target);
                                                   >> 550                 }
1088         }                                        551         }
1089                                                  552 
1090         WARN_ON_ONCE(!cpuhp_is_ap_state(state    553         WARN_ON_ONCE(!cpuhp_is_ap_state(state));
1091                                                  554 
                                                   >> 555         if (st->rollback) {
                                                   >> 556                 struct cpuhp_step *step = cpuhp_get_step(state);
                                                   >> 557                 if (step->skip_onerr)
                                                   >> 558                         goto next;
                                                   >> 559         }
                                                   >> 560 
1092         if (cpuhp_is_atomic_state(state)) {      561         if (cpuhp_is_atomic_state(state)) {
1093                 local_irq_disable();             562                 local_irq_disable();
1094                 st->result = cpuhp_invoke_cal    563                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1095                 local_irq_enable();              564                 local_irq_enable();
1096                                                  565 
1097                 /*                               566                 /*
1098                  * STARTING/DYING must not fa    567                  * STARTING/DYING must not fail!
1099                  */                              568                  */
1100                 WARN_ON_ONCE(st->result);        569                 WARN_ON_ONCE(st->result);
1101         } else {                                 570         } else {
1102                 st->result = cpuhp_invoke_cal    571                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1103         }                                        572         }
1104                                                  573 
1105         if (st->result) {                        574         if (st->result) {
1106                 /*                               575                 /*
1107                  * If we fail on a rollback,     576                  * If we fail on a rollback, we're up a creek without no
1108                  * paddle, no way forward, no    577                  * paddle, no way forward, no way back. We loose, thanks for
1109                  * playing.                      578                  * playing.
1110                  */                              579                  */
1111                 WARN_ON_ONCE(st->rollback);      580                 WARN_ON_ONCE(st->rollback);
1112                 st->should_run = false;          581                 st->should_run = false;
1113         }                                        582         }
1114                                                  583 
1115 end:                                          !! 584 next:
1116         cpuhp_lock_release(bringup);             585         cpuhp_lock_release(bringup);
1117         lockdep_release_cpus_lock();          << 
1118                                                  586 
1119         if (!st->should_run)                     587         if (!st->should_run)
1120                 complete_ap_thread(st, bringu    588                 complete_ap_thread(st, bringup);
1121 }                                                589 }
1122                                                  590 
1123 /* Invoke a single callback on a remote cpu *    591 /* Invoke a single callback on a remote cpu */
1124 static int                                       592 static int
1125 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_    593 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1126                          struct hlist_node *n    594                          struct hlist_node *node)
1127 {                                                595 {
1128         struct cpuhp_cpu_state *st = per_cpu_    596         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1129         int ret;                                 597         int ret;
1130                                                  598 
1131         if (!cpu_online(cpu))                    599         if (!cpu_online(cpu))
1132                 return 0;                        600                 return 0;
1133                                                  601 
1134         cpuhp_lock_acquire(false);               602         cpuhp_lock_acquire(false);
1135         cpuhp_lock_release(false);               603         cpuhp_lock_release(false);
1136                                                  604 
1137         cpuhp_lock_acquire(true);                605         cpuhp_lock_acquire(true);
1138         cpuhp_lock_release(true);                606         cpuhp_lock_release(true);
1139                                                  607 
1140         /*                                       608         /*
1141          * If we are up and running, use the     609          * If we are up and running, use the hotplug thread. For early calls
1142          * we invoke the thread function dire    610          * we invoke the thread function directly.
1143          */                                      611          */
1144         if (!st->thread)                         612         if (!st->thread)
1145                 return cpuhp_invoke_callback(    613                 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1146                                                  614 
1147         st->rollback = false;                    615         st->rollback = false;
1148         st->last = NULL;                         616         st->last = NULL;
1149                                                  617 
1150         st->node = node;                         618         st->node = node;
1151         st->bringup = bringup;                   619         st->bringup = bringup;
1152         st->cb_state = state;                    620         st->cb_state = state;
1153         st->single = true;                       621         st->single = true;
1154                                                  622 
1155         __cpuhp_kick_ap(st);                     623         __cpuhp_kick_ap(st);
1156                                                  624 
1157         /*                                       625         /*
1158          * If we failed and did a partial, do    626          * If we failed and did a partial, do a rollback.
1159          */                                      627          */
1160         if ((ret = st->result) && st->last) {    628         if ((ret = st->result) && st->last) {
1161                 st->rollback = true;             629                 st->rollback = true;
1162                 st->bringup = !bringup;          630                 st->bringup = !bringup;
1163                                                  631 
1164                 __cpuhp_kick_ap(st);             632                 __cpuhp_kick_ap(st);
1165         }                                        633         }
1166                                                  634 
1167         /*                                       635         /*
1168          * Clean up the leftovers so the next    636          * Clean up the leftovers so the next hotplug operation wont use stale
1169          * data.                                 637          * data.
1170          */                                      638          */
1171         st->node = st->last = NULL;              639         st->node = st->last = NULL;
1172         return ret;                              640         return ret;
1173 }                                                641 }
1174                                                  642 
1175 static int cpuhp_kick_ap_work(unsigned int cp    643 static int cpuhp_kick_ap_work(unsigned int cpu)
1176 {                                                644 {
1177         struct cpuhp_cpu_state *st = per_cpu_    645         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1178         enum cpuhp_state prev_state = st->sta    646         enum cpuhp_state prev_state = st->state;
1179         int ret;                                 647         int ret;
1180                                                  648 
1181         cpuhp_lock_acquire(false);               649         cpuhp_lock_acquire(false);
1182         cpuhp_lock_release(false);               650         cpuhp_lock_release(false);
1183                                                  651 
1184         cpuhp_lock_acquire(true);                652         cpuhp_lock_acquire(true);
1185         cpuhp_lock_release(true);                653         cpuhp_lock_release(true);
1186                                                  654 
1187         trace_cpuhp_enter(cpu, st->target, pr    655         trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1188         ret = cpuhp_kick_ap(cpu, st, st->targ !! 656         ret = cpuhp_kick_ap(st, st->target);
1189         trace_cpuhp_exit(cpu, st->state, prev    657         trace_cpuhp_exit(cpu, st->state, prev_state, ret);
1190                                                  658 
1191         return ret;                              659         return ret;
1192 }                                                660 }
1193                                                  661 
1194 static struct smp_hotplug_thread cpuhp_thread    662 static struct smp_hotplug_thread cpuhp_threads = {
1195         .store                  = &cpuhp_stat    663         .store                  = &cpuhp_state.thread,
                                                   >> 664         .create                 = &cpuhp_create,
1196         .thread_should_run      = cpuhp_shoul    665         .thread_should_run      = cpuhp_should_run,
1197         .thread_fn              = cpuhp_threa    666         .thread_fn              = cpuhp_thread_fun,
1198         .thread_comm            = "cpuhp/%u",    667         .thread_comm            = "cpuhp/%u",
1199         .selfparking            = true,          668         .selfparking            = true,
1200 };                                               669 };
1201                                                  670 
1202 static __init void cpuhp_init_state(void)     << 
1203 {                                             << 
1204         struct cpuhp_cpu_state *st;           << 
1205         int cpu;                              << 
1206                                               << 
1207         for_each_possible_cpu(cpu) {          << 
1208                 st = per_cpu_ptr(&cpuhp_state << 
1209                 init_completion(&st->done_up) << 
1210                 init_completion(&st->done_dow << 
1211         }                                     << 
1212 }                                             << 
1213                                               << 
1214 void __init cpuhp_threads_init(void)             671 void __init cpuhp_threads_init(void)
1215 {                                                672 {
1216         cpuhp_init_state();                   << 
1217         BUG_ON(smpboot_register_percpu_thread    673         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
1218         kthread_unpark(this_cpu_read(cpuhp_st    674         kthread_unpark(this_cpu_read(cpuhp_state.thread));
1219 }                                                675 }
1220                                                  676 
1221 #ifdef CONFIG_HOTPLUG_CPU                        677 #ifdef CONFIG_HOTPLUG_CPU
1222 #ifndef arch_clear_mm_cpumask_cpu             << 
1223 #define arch_clear_mm_cpumask_cpu(cpu, mm) cp << 
1224 #endif                                        << 
1225                                               << 
1226 /**                                              678 /**
1227  * clear_tasks_mm_cpumask - Safely clear task    679  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1228  * @cpu: a CPU id                                680  * @cpu: a CPU id
1229  *                                               681  *
1230  * This function walks all processes, finds a    682  * This function walks all processes, finds a valid mm struct for each one and
1231  * then clears a corresponding bit in mm's cp    683  * then clears a corresponding bit in mm's cpumask.  While this all sounds
1232  * trivial, there are various non-obvious cor    684  * trivial, there are various non-obvious corner cases, which this function
1233  * tries to solve in a safe manner.              685  * tries to solve in a safe manner.
1234  *                                               686  *
1235  * Also note that the function uses a somewha    687  * Also note that the function uses a somewhat relaxed locking scheme, so it may
1236  * be called only for an already offlined CPU    688  * be called only for an already offlined CPU.
1237  */                                              689  */
1238 void clear_tasks_mm_cpumask(int cpu)             690 void clear_tasks_mm_cpumask(int cpu)
1239 {                                                691 {
1240         struct task_struct *p;                   692         struct task_struct *p;
1241                                                  693 
1242         /*                                       694         /*
1243          * This function is called after the     695          * This function is called after the cpu is taken down and marked
1244          * offline, so its not like new tasks    696          * offline, so its not like new tasks will ever get this cpu set in
1245          * their mm mask. -- Peter Zijlstra      697          * their mm mask. -- Peter Zijlstra
1246          * Thus, we may use rcu_read_lock() h    698          * Thus, we may use rcu_read_lock() here, instead of grabbing
1247          * full-fledged tasklist_lock.           699          * full-fledged tasklist_lock.
1248          */                                      700          */
1249         WARN_ON(cpu_online(cpu));                701         WARN_ON(cpu_online(cpu));
1250         rcu_read_lock();                         702         rcu_read_lock();
1251         for_each_process(p) {                    703         for_each_process(p) {
1252                 struct task_struct *t;           704                 struct task_struct *t;
1253                                                  705 
1254                 /*                               706                 /*
1255                  * Main thread might exit, bu    707                  * Main thread might exit, but other threads may still have
1256                  * a valid mm. Find one.         708                  * a valid mm. Find one.
1257                  */                              709                  */
1258                 t = find_lock_task_mm(p);        710                 t = find_lock_task_mm(p);
1259                 if (!t)                          711                 if (!t)
1260                         continue;                712                         continue;
1261                 arch_clear_mm_cpumask_cpu(cpu !! 713                 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
1262                 task_unlock(t);                  714                 task_unlock(t);
1263         }                                        715         }
1264         rcu_read_unlock();                       716         rcu_read_unlock();
1265 }                                                717 }
1266                                                  718 
1267 /* Take this CPU down. */                        719 /* Take this CPU down. */
1268 static int take_cpu_down(void *_param)           720 static int take_cpu_down(void *_param)
1269 {                                                721 {
1270         struct cpuhp_cpu_state *st = this_cpu    722         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1271         enum cpuhp_state target = max((int)st    723         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1272         int err, cpu = smp_processor_id();       724         int err, cpu = smp_processor_id();
                                                   >> 725         int ret;
1273                                                  726 
1274         /* Ensure this CPU doesn't handle any    727         /* Ensure this CPU doesn't handle any more interrupts. */
1275         err = __cpu_disable();                   728         err = __cpu_disable();
1276         if (err < 0)                             729         if (err < 0)
1277                 return err;                      730                 return err;
1278                                                  731 
1279         /*                                       732         /*
1280          * Must be called from CPUHP_TEARDOWN !! 733          * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
1281          * down, that the current state is CP !! 734          * do this step again.
1282          */                                      735          */
1283         WARN_ON(st->state != (CPUHP_TEARDOWN_ !! 736         WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
1284                                               !! 737         st->state--;
1285         /*                                    !! 738         /* Invoke the former CPU_DYING callbacks */
1286          * Invoke the former CPU_DYING callba !! 739         for (; st->state > target; st->state--) {
1287          */                                   !! 740                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
1288         cpuhp_invoke_callback_range_nofail(fa !! 741                 /*
                                                   >> 742                  * DYING must not fail!
                                                   >> 743                  */
                                                   >> 744                 WARN_ON_ONCE(ret);
                                                   >> 745         }
1289                                                  746 
                                                   >> 747         /* Give up timekeeping duties */
                                                   >> 748         tick_handover_do_timer();
1290         /* Park the stopper thread */            749         /* Park the stopper thread */
1291         stop_machine_park(cpu);                  750         stop_machine_park(cpu);
1292         return 0;                                751         return 0;
1293 }                                                752 }
1294                                                  753 
1295 static int takedown_cpu(unsigned int cpu)        754 static int takedown_cpu(unsigned int cpu)
1296 {                                                755 {
1297         struct cpuhp_cpu_state *st = per_cpu_    756         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1298         int err;                                 757         int err;
1299                                                  758 
1300         /* Park the smpboot threads */           759         /* Park the smpboot threads */
1301         kthread_park(st->thread);             !! 760         kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
                                                   >> 761         smpboot_park_threads(cpu);
1302                                                  762 
1303         /*                                       763         /*
1304          * Prevent irq alloc/free while the d    764          * Prevent irq alloc/free while the dying cpu reorganizes the
1305          * interrupt affinities.                 765          * interrupt affinities.
1306          */                                      766          */
1307         irq_lock_sparse();                       767         irq_lock_sparse();
1308                                                  768 
1309         /*                                       769         /*
1310          * So now all preempt/rcu users must     770          * So now all preempt/rcu users must observe !cpu_active().
1311          */                                      771          */
1312         err = stop_machine_cpuslocked(take_cp    772         err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1313         if (err) {                               773         if (err) {
1314                 /* CPU refused to die */         774                 /* CPU refused to die */
1315                 irq_unlock_sparse();             775                 irq_unlock_sparse();
1316                 /* Unpark the hotplug thread     776                 /* Unpark the hotplug thread so we can rollback there */
1317                 kthread_unpark(st->thread);   !! 777                 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
1318                 return err;                      778                 return err;
1319         }                                        779         }
1320         BUG_ON(cpu_online(cpu));                 780         BUG_ON(cpu_online(cpu));
1321                                                  781 
1322         /*                                       782         /*
1323          * The teardown callback for CPUHP_AP    783          * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1324          * all runnable tasks from the CPU, t    784          * all runnable tasks from the CPU, there's only the idle task left now
1325          * that the migration thread is done     785          * that the migration thread is done doing the stop_machine thing.
1326          *                                       786          *
1327          * Wait for the stop thread to go awa    787          * Wait for the stop thread to go away.
1328          */                                      788          */
1329         wait_for_ap_thread(st, false);           789         wait_for_ap_thread(st, false);
1330         BUG_ON(st->state != CPUHP_AP_IDLE_DEA    790         BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1331                                                  791 
1332         /* Interrupts are moved away from the    792         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
1333         irq_unlock_sparse();                     793         irq_unlock_sparse();
1334                                                  794 
1335         hotplug_cpu__broadcast_tick_pull(cpu)    795         hotplug_cpu__broadcast_tick_pull(cpu);
1336         /* This actually kills the CPU. */       796         /* This actually kills the CPU. */
1337         __cpu_die(cpu);                          797         __cpu_die(cpu);
1338                                                  798 
1339         cpuhp_bp_sync_dead(cpu);              << 
1340                                               << 
1341         tick_cleanup_dead_cpu(cpu);              799         tick_cleanup_dead_cpu(cpu);
1342                                               << 
1343         /*                                    << 
1344          * Callbacks must be re-integrated ri << 
1345          * Otherwise an RCU callback could bl << 
1346          * waiting for its completion.        << 
1347          */                                   << 
1348         rcutree_migrate_callbacks(cpu);          800         rcutree_migrate_callbacks(cpu);
1349                                               << 
1350         return 0;                                801         return 0;
1351 }                                                802 }
1352                                                  803 
1353 static void cpuhp_complete_idle_dead(void *ar    804 static void cpuhp_complete_idle_dead(void *arg)
1354 {                                                805 {
1355         struct cpuhp_cpu_state *st = arg;        806         struct cpuhp_cpu_state *st = arg;
1356                                                  807 
1357         complete_ap_thread(st, false);           808         complete_ap_thread(st, false);
1358 }                                                809 }
1359                                                  810 
1360 void cpuhp_report_idle_dead(void)                811 void cpuhp_report_idle_dead(void)
1361 {                                                812 {
1362         struct cpuhp_cpu_state *st = this_cpu    813         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1363                                                  814 
1364         BUG_ON(st->state != CPUHP_AP_OFFLINE)    815         BUG_ON(st->state != CPUHP_AP_OFFLINE);
1365         tick_assert_timekeeping_handover();   !! 816         rcu_report_dead(smp_processor_id());
1366         rcutree_report_cpu_dead();            << 
1367         st->state = CPUHP_AP_IDLE_DEAD;          817         st->state = CPUHP_AP_IDLE_DEAD;
1368         /*                                       818         /*
1369          * We cannot call complete after rcut !! 819          * We cannot call complete after rcu_report_dead() so we delegate it
1370          * to an online cpu.                     820          * to an online cpu.
1371          */                                      821          */
1372         smp_call_function_single(cpumask_firs    822         smp_call_function_single(cpumask_first(cpu_online_mask),
1373                                  cpuhp_comple    823                                  cpuhp_complete_idle_dead, st, 0);
1374 }                                                824 }
1375                                                  825 
                                                   >> 826 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
                                                   >> 827 {
                                                   >> 828         for (st->state++; st->state < st->target; st->state++) {
                                                   >> 829                 struct cpuhp_step *step = cpuhp_get_step(st->state);
                                                   >> 830 
                                                   >> 831                 if (!step->skip_onerr)
                                                   >> 832                         cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
                                                   >> 833         }
                                                   >> 834 }
                                                   >> 835 
1376 static int cpuhp_down_callbacks(unsigned int     836 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1377                                 enum cpuhp_st    837                                 enum cpuhp_state target)
1378 {                                                838 {
1379         enum cpuhp_state prev_state = st->sta    839         enum cpuhp_state prev_state = st->state;
1380         int ret = 0;                             840         int ret = 0;
1381                                                  841 
1382         ret = cpuhp_invoke_callback_range(fal !! 842         for (; st->state > target; st->state--) {
1383         if (ret) {                            !! 843                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
1384                 pr_debug("CPU DOWN failed (%d !! 844                 if (ret) {
1385                          ret, cpu, cpuhp_get_ !! 845                         st->target = prev_state;
1386                          st->state);          !! 846                         undo_cpu_down(cpu, st);
1387                                               !! 847                         break;
1388                 cpuhp_reset_state(cpu, st, pr !! 848                 }
1389                                               << 
1390                 if (st->state < prev_state)   << 
1391                         WARN_ON(cpuhp_invoke_ << 
1392                                               << 
1393         }                                        849         }
1394                                               << 
1395         return ret;                              850         return ret;
1396 }                                                851 }
1397                                                  852 
1398 /* Requires cpu_add_remove_lock to be held */    853 /* Requires cpu_add_remove_lock to be held */
1399 static int __ref _cpu_down(unsigned int cpu,     854 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1400                            enum cpuhp_state t    855                            enum cpuhp_state target)
1401 {                                                856 {
1402         struct cpuhp_cpu_state *st = per_cpu_    857         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1403         int prev_state, ret = 0;                 858         int prev_state, ret = 0;
1404                                                  859 
1405         if (num_online_cpus() == 1)              860         if (num_online_cpus() == 1)
1406                 return -EBUSY;                   861                 return -EBUSY;
1407                                                  862 
1408         if (!cpu_present(cpu))                   863         if (!cpu_present(cpu))
1409                 return -EINVAL;                  864                 return -EINVAL;
1410                                                  865 
1411         cpus_write_lock();                       866         cpus_write_lock();
1412                                                  867 
1413         cpuhp_tasks_frozen = tasks_frozen;       868         cpuhp_tasks_frozen = tasks_frozen;
1414                                                  869 
1415         prev_state = cpuhp_set_state(cpu, st, !! 870         prev_state = cpuhp_set_state(st, target);
1416         /*                                       871         /*
1417          * If the current CPU state is in the    872          * If the current CPU state is in the range of the AP hotplug thread,
1418          * then we need to kick the thread.      873          * then we need to kick the thread.
1419          */                                      874          */
1420         if (st->state > CPUHP_TEARDOWN_CPU) {    875         if (st->state > CPUHP_TEARDOWN_CPU) {
1421                 st->target = max((int)target,    876                 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1422                 ret = cpuhp_kick_ap_work(cpu)    877                 ret = cpuhp_kick_ap_work(cpu);
1423                 /*                               878                 /*
1424                  * The AP side has done the e    879                  * The AP side has done the error rollback already. Just
1425                  * return the error code..       880                  * return the error code..
1426                  */                              881                  */
1427                 if (ret)                         882                 if (ret)
1428                         goto out;                883                         goto out;
1429                                                  884 
1430                 /*                               885                 /*
1431                  * We might have stopped stil    886                  * We might have stopped still in the range of the AP hotplug
1432                  * thread. Nothing to do anym    887                  * thread. Nothing to do anymore.
1433                  */                              888                  */
1434                 if (st->state > CPUHP_TEARDOW    889                 if (st->state > CPUHP_TEARDOWN_CPU)
1435                         goto out;                890                         goto out;
1436                                                  891 
1437                 st->target = target;             892                 st->target = target;
1438         }                                        893         }
1439         /*                                       894         /*
1440          * The AP brought itself down to CPUH    895          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1441          * to do the further cleanups.           896          * to do the further cleanups.
1442          */                                      897          */
1443         ret = cpuhp_down_callbacks(cpu, st, t    898         ret = cpuhp_down_callbacks(cpu, st, target);
1444         if (ret && st->state < prev_state) {  !! 899         if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1445                 if (st->state == CPUHP_TEARDO !! 900                 cpuhp_reset_state(st, prev_state);
1446                         cpuhp_reset_state(cpu !! 901                 __cpuhp_kick_ap(st);
1447                         __cpuhp_kick_ap(st);  << 
1448                 } else {                      << 
1449                         WARN(1, "DEAD callbac << 
1450                 }                             << 
1451         }                                        902         }
1452                                                  903 
1453 out:                                             904 out:
1454         cpus_write_unlock();                     905         cpus_write_unlock();
1455         /*                                       906         /*
1456          * Do post unplug cleanup. This is st    907          * Do post unplug cleanup. This is still protected against
1457          * concurrent CPU hotplug via cpu_add    908          * concurrent CPU hotplug via cpu_add_remove_lock.
1458          */                                      909          */
1459         lockup_detector_cleanup();               910         lockup_detector_cleanup();
1460         arch_smt_update();                    << 
1461         return ret;                              911         return ret;
1462 }                                                912 }
1463                                                  913 
1464 struct cpu_down_work {                        !! 914 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1465         unsigned int            cpu;          << 
1466         enum cpuhp_state        target;       << 
1467 };                                            << 
1468                                               << 
1469 static long __cpu_down_maps_locked(void *arg) << 
1470 {                                                915 {
1471         struct cpu_down_work *work = arg;     !! 916         int err;
1472                                               << 
1473         return _cpu_down(work->cpu, 0, work-> << 
1474 }                                             << 
1475                                               << 
1476 static int cpu_down_maps_locked(unsigned int  << 
1477 {                                             << 
1478         struct cpu_down_work work = { .cpu =  << 
1479                                                  917 
1480         /*                                    !! 918         cpu_maps_update_begin();
1481          * If the platform does not support h << 
1482          * differentiate it from a transient  << 
1483          */                                   << 
1484         if (cpu_hotplug_offline_disabled)     << 
1485                 return -EOPNOTSUPP;           << 
1486         if (cpu_hotplug_disabled)             << 
1487                 return -EBUSY;                << 
1488                                                  919 
1489         /*                                    !! 920         if (cpu_hotplug_disabled) {
1490          * Ensure that the control task does  !! 921                 err = -EBUSY;
1491          * CPU to prevent a deadlock against  !! 922                 goto out;
1492          * Also keep at least one housekeepin << 
1493          * an empty sched_domain span.        << 
1494          */                                   << 
1495         for_each_cpu_and(cpu, cpu_online_mask << 
1496                 if (cpu != work.cpu)          << 
1497                         return work_on_cpu(cp << 
1498         }                                        923         }
1499         return -EBUSY;                        << 
1500 }                                             << 
1501                                                  924 
1502 static int cpu_down(unsigned int cpu, enum cp !! 925         err = _cpu_down(cpu, 0, target);
1503 {                                             << 
1504         int err;                              << 
1505                                                  926 
1506         cpu_maps_update_begin();              !! 927 out:
1507         err = cpu_down_maps_locked(cpu, targe << 
1508         cpu_maps_update_done();                  928         cpu_maps_update_done();
1509         return err;                              929         return err;
1510 }                                                930 }
1511                                                  931 
1512 /**                                           !! 932 int cpu_down(unsigned int cpu)
1513  * cpu_device_down - Bring down a cpu device  << 
1514  * @dev: Pointer to the cpu device to offline << 
1515  *                                            << 
1516  * This function is meant to be used by devic << 
1517  *                                            << 
1518  * Other subsystems should use remove_cpu() i << 
1519  *                                            << 
1520  * Return: %0 on success or a negative errno  << 
1521  */                                           << 
1522 int cpu_device_down(struct device *dev)       << 
1523 {                                             << 
1524         return cpu_down(dev->id, CPUHP_OFFLIN << 
1525 }                                             << 
1526                                               << 
1527 int remove_cpu(unsigned int cpu)              << 
1528 {                                                933 {
1529         int ret;                              !! 934         return do_cpu_down(cpu, CPUHP_OFFLINE);
1530                                               << 
1531         lock_device_hotplug();                << 
1532         ret = device_offline(get_cpu_device(c << 
1533         unlock_device_hotplug();              << 
1534                                               << 
1535         return ret;                           << 
1536 }                                             << 
1537 EXPORT_SYMBOL_GPL(remove_cpu);                << 
1538                                               << 
1539 void smp_shutdown_nonboot_cpus(unsigned int p << 
1540 {                                             << 
1541         unsigned int cpu;                     << 
1542         int error;                            << 
1543                                               << 
1544         cpu_maps_update_begin();              << 
1545                                               << 
1546         /*                                    << 
1547          * Make certain the cpu I'm about to  << 
1548          *                                    << 
1549          * This is inline to what migrate_to_ << 
1550          */                                   << 
1551         if (!cpu_online(primary_cpu))         << 
1552                 primary_cpu = cpumask_first(c << 
1553                                               << 
1554         for_each_online_cpu(cpu) {            << 
1555                 if (cpu == primary_cpu)       << 
1556                         continue;             << 
1557                                               << 
1558                 error = cpu_down_maps_locked( << 
1559                 if (error) {                  << 
1560                         pr_err("Failed to off << 
1561                                 cpu, error);  << 
1562                         break;                << 
1563                 }                             << 
1564         }                                     << 
1565                                               << 
1566         /*                                    << 
1567          * Ensure all but the reboot CPU are  << 
1568          */                                   << 
1569         BUG_ON(num_online_cpus() > 1);        << 
1570                                               << 
1571         /*                                    << 
1572          * Make sure the CPUs won't be enable << 
1573          * point. Kexec will reboot to a new  << 
1574          * everything along the way.          << 
1575          */                                   << 
1576         cpu_hotplug_disabled++;               << 
1577                                               << 
1578         cpu_maps_update_done();               << 
1579 }                                                935 }
                                                   >> 936 EXPORT_SYMBOL(cpu_down);
1580                                                  937 
1581 #else                                            938 #else
1582 #define takedown_cpu            NULL             939 #define takedown_cpu            NULL
1583 #endif /*CONFIG_HOTPLUG_CPU*/                    940 #endif /*CONFIG_HOTPLUG_CPU*/
1584                                                  941 
1585 /**                                              942 /**
1586  * notify_cpu_starting(cpu) - Invoke the call    943  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1587  * @cpu: cpu that just started                   944  * @cpu: cpu that just started
1588  *                                               945  *
1589  * It must be called by the arch code on the     946  * It must be called by the arch code on the new cpu, before the new cpu
1590  * enables interrupts and before the "boot" c    947  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1591  */                                              948  */
1592 void notify_cpu_starting(unsigned int cpu)       949 void notify_cpu_starting(unsigned int cpu)
1593 {                                                950 {
1594         struct cpuhp_cpu_state *st = per_cpu_    951         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1595         enum cpuhp_state target = min((int)st    952         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
                                                   >> 953         int ret;
1596                                                  954 
1597         rcutree_report_cpu_starting(cpu);     !! 955         rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
1598         cpumask_set_cpu(cpu, &cpus_booted_onc !! 956         while (st->state < target) {
1599                                               !! 957                 st->state++;
1600         /*                                    !! 958                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1601          * STARTING must not fail!            !! 959                 /*
1602          */                                   !! 960                  * STARTING must not fail!
1603         cpuhp_invoke_callback_range_nofail(tr !! 961                  */
                                                   >> 962                 WARN_ON_ONCE(ret);
                                                   >> 963         }
1604 }                                                964 }
1605                                                  965 
1606 /*                                               966 /*
1607  * Called from the idle task. Wake up the con    967  * Called from the idle task. Wake up the controlling task which brings the
1608  * hotplug thread of the upcoming CPU up and  !! 968  * stopper and the hotplug thread of the upcoming CPU up and then delegates
1609  * online bringup to the hotplug thread.      !! 969  * the rest of the online bringup to the hotplug thread.
1610  */                                              970  */
1611 void cpuhp_online_idle(enum cpuhp_state state    971 void cpuhp_online_idle(enum cpuhp_state state)
1612 {                                                972 {
1613         struct cpuhp_cpu_state *st = this_cpu    973         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1614                                                  974 
1615         /* Happens for the boot cpu */           975         /* Happens for the boot cpu */
1616         if (state != CPUHP_AP_ONLINE_IDLE)       976         if (state != CPUHP_AP_ONLINE_IDLE)
1617                 return;                          977                 return;
1618                                                  978 
1619         cpuhp_ap_update_sync_state(SYNC_STATE << 
1620                                               << 
1621         /*                                    << 
1622          * Unpark the stopper thread before w << 
1623          * scheduling); this ensures the stop << 
1624          */                                   << 
1625         stop_machine_unpark(smp_processor_id( << 
1626                                               << 
1627         st->state = CPUHP_AP_ONLINE_IDLE;        979         st->state = CPUHP_AP_ONLINE_IDLE;
1628         complete_ap_thread(st, true);            980         complete_ap_thread(st, true);
1629 }                                                981 }
1630                                                  982 
1631 /* Requires cpu_add_remove_lock to be held */    983 /* Requires cpu_add_remove_lock to be held */
1632 static int _cpu_up(unsigned int cpu, int task    984 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1633 {                                                985 {
1634         struct cpuhp_cpu_state *st = per_cpu_    986         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1635         struct task_struct *idle;                987         struct task_struct *idle;
1636         int ret = 0;                             988         int ret = 0;
1637                                                  989 
1638         cpus_write_lock();                       990         cpus_write_lock();
1639                                                  991 
1640         if (!cpu_present(cpu)) {                 992         if (!cpu_present(cpu)) {
1641                 ret = -EINVAL;                   993                 ret = -EINVAL;
1642                 goto out;                        994                 goto out;
1643         }                                        995         }
1644                                                  996 
1645         /*                                       997         /*
1646          * The caller of cpu_up() might have  !! 998          * The caller of do_cpu_up might have raced with another
1647          * caller. Nothing to do.             !! 999          * caller. Ignore it for now.
1648          */                                      1000          */
1649         if (st->state >= target)                 1001         if (st->state >= target)
1650                 goto out;                        1002                 goto out;
1651                                                  1003 
1652         if (st->state == CPUHP_OFFLINE) {        1004         if (st->state == CPUHP_OFFLINE) {
1653                 /* Let it fail before we try     1005                 /* Let it fail before we try to bring the cpu up */
1654                 idle = idle_thread_get(cpu);     1006                 idle = idle_thread_get(cpu);
1655                 if (IS_ERR(idle)) {              1007                 if (IS_ERR(idle)) {
1656                         ret = PTR_ERR(idle);     1008                         ret = PTR_ERR(idle);
1657                         goto out;                1009                         goto out;
1658                 }                                1010                 }
1659                                               << 
1660                 /*                            << 
1661                  * Reset stale stack state fr << 
1662                  */                           << 
1663                 scs_task_reset(idle);         << 
1664                 kasan_unpoison_task_stack(idl << 
1665         }                                        1011         }
1666                                                  1012 
1667         cpuhp_tasks_frozen = tasks_frozen;       1013         cpuhp_tasks_frozen = tasks_frozen;
1668                                                  1014 
1669         cpuhp_set_state(cpu, st, target);     !! 1015         cpuhp_set_state(st, target);
1670         /*                                       1016         /*
1671          * If the current CPU state is in the    1017          * If the current CPU state is in the range of the AP hotplug thread,
1672          * then we need to kick the thread on    1018          * then we need to kick the thread once more.
1673          */                                      1019          */
1674         if (st->state > CPUHP_BRINGUP_CPU) {     1020         if (st->state > CPUHP_BRINGUP_CPU) {
1675                 ret = cpuhp_kick_ap_work(cpu)    1021                 ret = cpuhp_kick_ap_work(cpu);
1676                 /*                               1022                 /*
1677                  * The AP side has done the e    1023                  * The AP side has done the error rollback already. Just
1678                  * return the error code..       1024                  * return the error code..
1679                  */                              1025                  */
1680                 if (ret)                         1026                 if (ret)
1681                         goto out;                1027                         goto out;
1682         }                                        1028         }
1683                                                  1029 
1684         /*                                       1030         /*
1685          * Try to reach the target state. We     1031          * Try to reach the target state. We max out on the BP at
1686          * CPUHP_BRINGUP_CPU. After that the     1032          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1687          * responsible for bringing it up to     1033          * responsible for bringing it up to the target state.
1688          */                                      1034          */
1689         target = min((int)target, CPUHP_BRING    1035         target = min((int)target, CPUHP_BRINGUP_CPU);
1690         ret = cpuhp_up_callbacks(cpu, st, tar    1036         ret = cpuhp_up_callbacks(cpu, st, target);
1691 out:                                             1037 out:
1692         cpus_write_unlock();                     1038         cpus_write_unlock();
1693         arch_smt_update();                    << 
1694         return ret;                              1039         return ret;
1695 }                                                1040 }
1696                                                  1041 
1697 static int cpu_up(unsigned int cpu, enum cpuh !! 1042 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1698 {                                                1043 {
1699         int err = 0;                             1044         int err = 0;
1700                                                  1045 
1701         if (!cpu_possible(cpu)) {                1046         if (!cpu_possible(cpu)) {
1702                 pr_err("can't online cpu %d b    1047                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1703                        cpu);                     1048                        cpu);
                                                   >> 1049 #if defined(CONFIG_IA64)
                                                   >> 1050                 pr_err("please check additional_cpus= boot parameter\n");
                                                   >> 1051 #endif
1704                 return -EINVAL;                  1052                 return -EINVAL;
1705         }                                        1053         }
1706                                                  1054 
1707         err = try_online_node(cpu_to_node(cpu    1055         err = try_online_node(cpu_to_node(cpu));
1708         if (err)                                 1056         if (err)
1709                 return err;                      1057                 return err;
1710                                                  1058 
1711         cpu_maps_update_begin();                 1059         cpu_maps_update_begin();
1712                                                  1060 
1713         if (cpu_hotplug_disabled) {              1061         if (cpu_hotplug_disabled) {
1714                 err = -EBUSY;                    1062                 err = -EBUSY;
1715                 goto out;                        1063                 goto out;
1716         }                                        1064         }
1717         if (!cpu_bootable(cpu)) {             << 
1718                 err = -EPERM;                 << 
1719                 goto out;                     << 
1720         }                                     << 
1721                                                  1065 
1722         err = _cpu_up(cpu, 0, target);           1066         err = _cpu_up(cpu, 0, target);
1723 out:                                             1067 out:
1724         cpu_maps_update_done();                  1068         cpu_maps_update_done();
1725         return err;                              1069         return err;
1726 }                                                1070 }
1727                                                  1071 
1728 /**                                           !! 1072 int cpu_up(unsigned int cpu)
1729  * cpu_device_up - Bring up a cpu device      << 
1730  * @dev: Pointer to the cpu device to online  << 
1731  *                                            << 
1732  * This function is meant to be used by devic << 
1733  *                                            << 
1734  * Other subsystems should use add_cpu() inst << 
1735  *                                            << 
1736  * Return: %0 on success or a negative errno  << 
1737  */                                           << 
1738 int cpu_device_up(struct device *dev)         << 
1739 {                                             << 
1740         return cpu_up(dev->id, CPUHP_ONLINE); << 
1741 }                                             << 
1742                                               << 
1743 int add_cpu(unsigned int cpu)                 << 
1744 {                                             << 
1745         int ret;                              << 
1746                                               << 
1747         lock_device_hotplug();                << 
1748         ret = device_online(get_cpu_device(cp << 
1749         unlock_device_hotplug();              << 
1750                                               << 
1751         return ret;                           << 
1752 }                                             << 
1753 EXPORT_SYMBOL_GPL(add_cpu);                   << 
1754                                               << 
1755 /**                                           << 
1756  * bringup_hibernate_cpu - Bring up the CPU t << 
1757  * @sleep_cpu: The cpu we hibernated on and s << 
1758  *                                            << 
1759  * On some architectures like arm64, we can h << 
1760  * wake up the CPU we hibernated on might be  << 
1761  * using maxcpus= for example.                << 
1762  *                                            << 
1763  * Return: %0 on success or a negative errno  << 
1764  */                                           << 
1765 int bringup_hibernate_cpu(unsigned int sleep_ << 
1766 {                                             << 
1767         int ret;                              << 
1768                                               << 
1769         if (!cpu_online(sleep_cpu)) {         << 
1770                 pr_info("Hibernated on a CPU  << 
1771                 ret = cpu_up(sleep_cpu, CPUHP << 
1772                 if (ret) {                    << 
1773                         pr_err("Failed to bri << 
1774                         return ret;           << 
1775                 }                             << 
1776         }                                     << 
1777         return 0;                             << 
1778 }                                             << 
1779                                               << 
1780 static void __init cpuhp_bringup_mask(const s << 
1781                                       enum cp << 
1782 {                                             << 
1783         unsigned int cpu;                     << 
1784                                               << 
1785         for_each_cpu(cpu, mask) {             << 
1786                 struct cpuhp_cpu_state *st =  << 
1787                                               << 
1788                 if (cpu_up(cpu, target) && ca << 
1789                         /*                    << 
1790                          * If this failed the << 
1791                          * rolled back to CPU << 
1792                          * online. Clean it u << 
1793                          */                   << 
1794                         WARN_ON(cpuhp_invoke_ << 
1795                 }                             << 
1796                                               << 
1797                 if (!--ncpus)                 << 
1798                         break;                << 
1799         }                                     << 
1800 }                                             << 
1801                                               << 
1802 #ifdef CONFIG_HOTPLUG_PARALLEL                << 
1803 static bool __cpuhp_parallel_bringup __ro_aft << 
1804                                               << 
1805 static int __init parallel_bringup_parse_para << 
1806 {                                             << 
1807         return kstrtobool(arg, &__cpuhp_paral << 
1808 }                                             << 
1809 early_param("cpuhp.parallel", parallel_bringu << 
1810                                               << 
1811 #ifdef CONFIG_HOTPLUG_SMT                     << 
1812 static inline bool cpuhp_smt_aware(void)      << 
1813 {                                             << 
1814         return cpu_smt_max_threads > 1;       << 
1815 }                                             << 
1816                                               << 
1817 static inline const struct cpumask *cpuhp_get << 
1818 {                                             << 
1819         return cpu_primary_thread_mask;       << 
1820 }                                             << 
1821 #else                                         << 
1822 static inline bool cpuhp_smt_aware(void)      << 
1823 {                                             << 
1824         return false;                         << 
1825 }                                             << 
1826 static inline const struct cpumask *cpuhp_get << 
1827 {                                             << 
1828         return cpu_none_mask;                 << 
1829 }                                             << 
1830 #endif                                        << 
1831                                               << 
1832 bool __weak arch_cpuhp_init_parallel_bringup( << 
1833 {                                                1073 {
1834         return true;                          !! 1074         return do_cpu_up(cpu, CPUHP_ONLINE);
1835 }                                             << 
1836                                               << 
1837 /*                                            << 
1838  * On architectures which have enabled parall << 
1839  * prepare states for each of the to be onlin << 
1840  * sends the startup IPI to the APs. The APs  << 
1841  * bringup code in parallel and then wait for << 
1842  * them one by one for the final onlining pro << 
1843  *                                            << 
1844  * This avoids waiting for each AP to respond << 
1845  * CPUHP_BRINGUP_CPU.                         << 
1846  */                                           << 
1847 static bool __init cpuhp_bringup_cpus_paralle << 
1848 {                                             << 
1849         const struct cpumask *mask = cpu_pres << 
1850                                               << 
1851         if (__cpuhp_parallel_bringup)         << 
1852                 __cpuhp_parallel_bringup = ar << 
1853         if (!__cpuhp_parallel_bringup)        << 
1854                 return false;                 << 
1855                                               << 
1856         if (cpuhp_smt_aware()) {              << 
1857                 const struct cpumask *pmask = << 
1858                 static struct cpumask tmp_mas << 
1859                                               << 
1860                 /*                            << 
1861                  * X86 requires to prevent th << 
1862                  * the primary thread does a  << 
1863                  * reasons. Bring the primary << 
1864                  */                           << 
1865                 cpumask_and(&tmp_mask, mask,  << 
1866                 cpuhp_bringup_mask(&tmp_mask, << 
1867                 cpuhp_bringup_mask(&tmp_mask, << 
1868                 /* Account for the online CPU << 
1869                 ncpus -= num_online_cpus();   << 
1870                 if (!ncpus)                   << 
1871                         return true;          << 
1872                 /* Create the mask for second << 
1873                 cpumask_andnot(&tmp_mask, mas << 
1874                 mask = &tmp_mask;             << 
1875         }                                     << 
1876                                               << 
1877         /* Bring the not-yet started CPUs up  << 
1878         cpuhp_bringup_mask(mask, ncpus, CPUHP << 
1879         cpuhp_bringup_mask(mask, ncpus, CPUHP << 
1880         return true;                          << 
1881 }                                             << 
1882 #else                                         << 
1883 static inline bool cpuhp_bringup_cpus_paralle << 
1884 #endif /* CONFIG_HOTPLUG_PARALLEL */          << 
1885                                               << 
1886 void __init bringup_nonboot_cpus(unsigned int << 
1887 {                                             << 
1888         if (!max_cpus)                        << 
1889                 return;                       << 
1890                                               << 
1891         /* Try parallel bringup optimization  << 
1892         if (cpuhp_bringup_cpus_parallel(max_c << 
1893                 return;                       << 
1894                                               << 
1895         /* Full per CPU serialized bringup */ << 
1896         cpuhp_bringup_mask(cpu_present_mask,  << 
1897 }                                                1075 }
                                                   >> 1076 EXPORT_SYMBOL_GPL(cpu_up);
1898                                                  1077 
1899 #ifdef CONFIG_PM_SLEEP_SMP                       1078 #ifdef CONFIG_PM_SLEEP_SMP
1900 static cpumask_var_t frozen_cpus;                1079 static cpumask_var_t frozen_cpus;
1901                                                  1080 
1902 int freeze_secondary_cpus(int primary)           1081 int freeze_secondary_cpus(int primary)
1903 {                                                1082 {
1904         int cpu, error = 0;                      1083         int cpu, error = 0;
1905                                                  1084 
1906         cpu_maps_update_begin();                 1085         cpu_maps_update_begin();
1907         if (primary == -1) {                  !! 1086         if (!cpu_online(primary))
1908                 primary = cpumask_first(cpu_o    1087                 primary = cpumask_first(cpu_online_mask);
1909                 if (!housekeeping_cpu(primary << 
1910                         primary = housekeepin << 
1911         } else {                              << 
1912                 if (!cpu_online(primary))     << 
1913                         primary = cpumask_fir << 
1914         }                                     << 
1915                                               << 
1916         /*                                       1088         /*
1917          * We take down all of the non-boot C    1089          * We take down all of the non-boot CPUs in one shot to avoid races
1918          * with the userspace trying to use t    1090          * with the userspace trying to use the CPU hotplug at the same time
1919          */                                      1091          */
1920         cpumask_clear(frozen_cpus);              1092         cpumask_clear(frozen_cpus);
1921                                                  1093 
1922         pr_info("Disabling non-boot CPUs ...\    1094         pr_info("Disabling non-boot CPUs ...\n");
1923         for (cpu = nr_cpu_ids - 1; cpu >= 0;  !! 1095         for_each_online_cpu(cpu) {
1924                 if (!cpu_online(cpu) || cpu = !! 1096                 if (cpu == primary)
1925                         continue;                1097                         continue;
1926                                               << 
1927                 if (pm_wakeup_pending()) {    << 
1928                         pr_info("Wakeup pendi << 
1929                         error = -EBUSY;       << 
1930                         break;                << 
1931                 }                             << 
1932                                               << 
1933                 trace_suspend_resume(TPS("CPU    1098                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1934                 error = _cpu_down(cpu, 1, CPU    1099                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1935                 trace_suspend_resume(TPS("CPU    1100                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1936                 if (!error)                      1101                 if (!error)
1937                         cpumask_set_cpu(cpu,     1102                         cpumask_set_cpu(cpu, frozen_cpus);
1938                 else {                           1103                 else {
1939                         pr_err("Error taking     1104                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
1940                         break;                   1105                         break;
1941                 }                                1106                 }
1942         }                                        1107         }
1943                                                  1108 
1944         if (!error)                              1109         if (!error)
1945                 BUG_ON(num_online_cpus() > 1)    1110                 BUG_ON(num_online_cpus() > 1);
1946         else                                     1111         else
1947                 pr_err("Non-boot CPUs are not    1112                 pr_err("Non-boot CPUs are not disabled\n");
1948                                                  1113 
1949         /*                                       1114         /*
1950          * Make sure the CPUs won't be enable    1115          * Make sure the CPUs won't be enabled by someone else. We need to do
1951          * this even in case of failure as al !! 1116          * this even in case of failure as all disable_nonboot_cpus() users are
1952          * supposed to do thaw_secondary_cpus !! 1117          * supposed to do enable_nonboot_cpus() on the failure path.
1953          */                                      1118          */
1954         cpu_hotplug_disabled++;                  1119         cpu_hotplug_disabled++;
1955                                                  1120 
1956         cpu_maps_update_done();                  1121         cpu_maps_update_done();
1957         return error;                            1122         return error;
1958 }                                                1123 }
1959                                                  1124 
1960 void __weak arch_thaw_secondary_cpus_begin(vo !! 1125 void __weak arch_enable_nonboot_cpus_begin(void)
1961 {                                                1126 {
1962 }                                                1127 }
1963                                                  1128 
1964 void __weak arch_thaw_secondary_cpus_end(void !! 1129 void __weak arch_enable_nonboot_cpus_end(void)
1965 {                                                1130 {
1966 }                                                1131 }
1967                                                  1132 
1968 void thaw_secondary_cpus(void)                !! 1133 void enable_nonboot_cpus(void)
1969 {                                                1134 {
1970         int cpu, error;                          1135         int cpu, error;
1971                                                  1136 
1972         /* Allow everyone to use the CPU hotp    1137         /* Allow everyone to use the CPU hotplug again */
1973         cpu_maps_update_begin();                 1138         cpu_maps_update_begin();
1974         __cpu_hotplug_enable();                  1139         __cpu_hotplug_enable();
1975         if (cpumask_empty(frozen_cpus))          1140         if (cpumask_empty(frozen_cpus))
1976                 goto out;                        1141                 goto out;
1977                                                  1142 
1978         pr_info("Enabling non-boot CPUs ...\n    1143         pr_info("Enabling non-boot CPUs ...\n");
1979                                                  1144 
1980         arch_thaw_secondary_cpus_begin();     !! 1145         arch_enable_nonboot_cpus_begin();
1981                                                  1146 
1982         for_each_cpu(cpu, frozen_cpus) {         1147         for_each_cpu(cpu, frozen_cpus) {
1983                 trace_suspend_resume(TPS("CPU    1148                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1984                 error = _cpu_up(cpu, 1, CPUHP    1149                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1985                 trace_suspend_resume(TPS("CPU    1150                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1986                 if (!error) {                    1151                 if (!error) {
1987                         pr_info("CPU%d is up\    1152                         pr_info("CPU%d is up\n", cpu);
1988                         continue;                1153                         continue;
1989                 }                                1154                 }
1990                 pr_warn("Error taking CPU%d u    1155                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1991         }                                        1156         }
1992                                                  1157 
1993         arch_thaw_secondary_cpus_end();       !! 1158         arch_enable_nonboot_cpus_end();
1994                                                  1159 
1995         cpumask_clear(frozen_cpus);              1160         cpumask_clear(frozen_cpus);
1996 out:                                             1161 out:
1997         cpu_maps_update_done();                  1162         cpu_maps_update_done();
1998 }                                                1163 }
1999                                                  1164 
2000 static int __init alloc_frozen_cpus(void)        1165 static int __init alloc_frozen_cpus(void)
2001 {                                                1166 {
2002         if (!alloc_cpumask_var(&frozen_cpus,     1167         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
2003                 return -ENOMEM;                  1168                 return -ENOMEM;
2004         return 0;                                1169         return 0;
2005 }                                                1170 }
2006 core_initcall(alloc_frozen_cpus);                1171 core_initcall(alloc_frozen_cpus);
2007                                                  1172 
2008 /*                                               1173 /*
2009  * When callbacks for CPU hotplug notificatio    1174  * When callbacks for CPU hotplug notifications are being executed, we must
2010  * ensure that the state of the system with r    1175  * ensure that the state of the system with respect to the tasks being frozen
2011  * or not, as reported by the notification, r    1176  * or not, as reported by the notification, remains unchanged *throughout the
2012  * duration* of the execution of the callback    1177  * duration* of the execution of the callbacks.
2013  * Hence we need to prevent the freezer from     1178  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
2014  *                                               1179  *
2015  * This synchronization is implemented by mut    1180  * This synchronization is implemented by mutually excluding regular CPU
2016  * hotplug and Suspend/Hibernate call paths b    1181  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
2017  * Hibernate notifications.                      1182  * Hibernate notifications.
2018  */                                              1183  */
2019 static int                                       1184 static int
2020 cpu_hotplug_pm_callback(struct notifier_block    1185 cpu_hotplug_pm_callback(struct notifier_block *nb,
2021                         unsigned long action,    1186                         unsigned long action, void *ptr)
2022 {                                                1187 {
2023         switch (action) {                        1188         switch (action) {
2024                                                  1189 
2025         case PM_SUSPEND_PREPARE:                 1190         case PM_SUSPEND_PREPARE:
2026         case PM_HIBERNATION_PREPARE:             1191         case PM_HIBERNATION_PREPARE:
2027                 cpu_hotplug_disable();           1192                 cpu_hotplug_disable();
2028                 break;                           1193                 break;
2029                                                  1194 
2030         case PM_POST_SUSPEND:                    1195         case PM_POST_SUSPEND:
2031         case PM_POST_HIBERNATION:                1196         case PM_POST_HIBERNATION:
2032                 cpu_hotplug_enable();            1197                 cpu_hotplug_enable();
2033                 break;                           1198                 break;
2034                                                  1199 
2035         default:                                 1200         default:
2036                 return NOTIFY_DONE;              1201                 return NOTIFY_DONE;
2037         }                                        1202         }
2038                                                  1203 
2039         return NOTIFY_OK;                        1204         return NOTIFY_OK;
2040 }                                                1205 }
2041                                                  1206 
2042                                                  1207 
2043 static int __init cpu_hotplug_pm_sync_init(vo    1208 static int __init cpu_hotplug_pm_sync_init(void)
2044 {                                                1209 {
2045         /*                                       1210         /*
2046          * cpu_hotplug_pm_callback has higher    1211          * cpu_hotplug_pm_callback has higher priority than x86
2047          * bsp_pm_callback which depends on c    1212          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2048          * to disable cpu hotplug to avoid cp    1213          * to disable cpu hotplug to avoid cpu hotplug race.
2049          */                                      1214          */
2050         pm_notifier(cpu_hotplug_pm_callback,     1215         pm_notifier(cpu_hotplug_pm_callback, 0);
2051         return 0;                                1216         return 0;
2052 }                                                1217 }
2053 core_initcall(cpu_hotplug_pm_sync_init);         1218 core_initcall(cpu_hotplug_pm_sync_init);
2054                                                  1219 
2055 #endif /* CONFIG_PM_SLEEP_SMP */                 1220 #endif /* CONFIG_PM_SLEEP_SMP */
2056                                                  1221 
2057 int __boot_cpu_id;                               1222 int __boot_cpu_id;
2058                                                  1223 
2059 #endif /* CONFIG_SMP */                          1224 #endif /* CONFIG_SMP */
2060                                                  1225 
2061 /* Boot processor state steps */                 1226 /* Boot processor state steps */
2062 static struct cpuhp_step cpuhp_hp_states[] =  !! 1227 static struct cpuhp_step cpuhp_bp_states[] = {
2063         [CPUHP_OFFLINE] = {                      1228         [CPUHP_OFFLINE] = {
2064                 .name                   = "of    1229                 .name                   = "offline",
2065                 .startup.single         = NUL    1230                 .startup.single         = NULL,
2066                 .teardown.single        = NUL    1231                 .teardown.single        = NULL,
2067         },                                       1232         },
2068 #ifdef CONFIG_SMP                                1233 #ifdef CONFIG_SMP
2069         [CPUHP_CREATE_THREADS]= {                1234         [CPUHP_CREATE_THREADS]= {
2070                 .name                   = "th    1235                 .name                   = "threads:prepare",
2071                 .startup.single         = smp    1236                 .startup.single         = smpboot_create_threads,
2072                 .teardown.single        = NUL    1237                 .teardown.single        = NULL,
2073                 .cant_stop              = tru    1238                 .cant_stop              = true,
2074         },                                       1239         },
2075         [CPUHP_PERF_PREPARE] = {                 1240         [CPUHP_PERF_PREPARE] = {
2076                 .name                   = "pe    1241                 .name                   = "perf:prepare",
2077                 .startup.single         = per    1242                 .startup.single         = perf_event_init_cpu,
2078                 .teardown.single        = per    1243                 .teardown.single        = perf_event_exit_cpu,
2079         },                                       1244         },
2080         [CPUHP_RANDOM_PREPARE] = {            << 
2081                 .name                   = "ra << 
2082                 .startup.single         = ran << 
2083                 .teardown.single        = NUL << 
2084         },                                    << 
2085         [CPUHP_WORKQUEUE_PREP] = {               1245         [CPUHP_WORKQUEUE_PREP] = {
2086                 .name                   = "wo    1246                 .name                   = "workqueue:prepare",
2087                 .startup.single         = wor    1247                 .startup.single         = workqueue_prepare_cpu,
2088                 .teardown.single        = NUL    1248                 .teardown.single        = NULL,
2089         },                                       1249         },
2090         [CPUHP_HRTIMERS_PREPARE] = {             1250         [CPUHP_HRTIMERS_PREPARE] = {
2091                 .name                   = "hr    1251                 .name                   = "hrtimers:prepare",
2092                 .startup.single         = hrt    1252                 .startup.single         = hrtimers_prepare_cpu,
2093                 .teardown.single        = NUL !! 1253                 .teardown.single        = hrtimers_dead_cpu,
2094         },                                       1254         },
2095         [CPUHP_SMPCFD_PREPARE] = {               1255         [CPUHP_SMPCFD_PREPARE] = {
2096                 .name                   = "sm    1256                 .name                   = "smpcfd:prepare",
2097                 .startup.single         = smp    1257                 .startup.single         = smpcfd_prepare_cpu,
2098                 .teardown.single        = smp    1258                 .teardown.single        = smpcfd_dead_cpu,
2099         },                                       1259         },
2100         [CPUHP_RELAY_PREPARE] = {                1260         [CPUHP_RELAY_PREPARE] = {
2101                 .name                   = "re    1261                 .name                   = "relay:prepare",
2102                 .startup.single         = rel    1262                 .startup.single         = relay_prepare_cpu,
2103                 .teardown.single        = NUL    1263                 .teardown.single        = NULL,
2104         },                                       1264         },
                                                   >> 1265         [CPUHP_SLAB_PREPARE] = {
                                                   >> 1266                 .name                   = "slab:prepare",
                                                   >> 1267                 .startup.single         = slab_prepare_cpu,
                                                   >> 1268                 .teardown.single        = slab_dead_cpu,
                                                   >> 1269         },
2105         [CPUHP_RCUTREE_PREP] = {                 1270         [CPUHP_RCUTREE_PREP] = {
2106                 .name                   = "RC    1271                 .name                   = "RCU/tree:prepare",
2107                 .startup.single         = rcu    1272                 .startup.single         = rcutree_prepare_cpu,
2108                 .teardown.single        = rcu    1273                 .teardown.single        = rcutree_dead_cpu,
2109         },                                       1274         },
2110         /*                                       1275         /*
2111          * On the tear-down path, timers_dead    1276          * On the tear-down path, timers_dead_cpu() must be invoked
2112          * before blk_mq_queue_reinit_notify(    1277          * before blk_mq_queue_reinit_notify() from notify_dead(),
2113          * otherwise a RCU stall occurs.         1278          * otherwise a RCU stall occurs.
2114          */                                      1279          */
2115         [CPUHP_TIMERS_PREPARE] = {               1280         [CPUHP_TIMERS_PREPARE] = {
2116                 .name                   = "ti !! 1281                 .name                   = "timers:dead",
2117                 .startup.single         = tim    1282                 .startup.single         = timers_prepare_cpu,
2118                 .teardown.single        = tim    1283                 .teardown.single        = timers_dead_cpu,
2119         },                                       1284         },
2120                                               !! 1285         /* Kicks the plugged cpu into life */
2121 #ifdef CONFIG_HOTPLUG_SPLIT_STARTUP           << 
2122         /*                                    << 
2123          * Kicks the AP alive. AP will wait i << 
2124          * the next step will release it.     << 
2125          */                                   << 
2126         [CPUHP_BP_KICK_AP] = {                << 
2127                 .name                   = "cp << 
2128                 .startup.single         = cpu << 
2129         },                                    << 
2130                                               << 
2131         /*                                    << 
2132          * Waits for the AP to reach cpuhp_ap << 
2133          * releases it for the complete bring << 
2134          */                                   << 
2135         [CPUHP_BRINGUP_CPU] = {                  1286         [CPUHP_BRINGUP_CPU] = {
2136                 .name                   = "cp    1287                 .name                   = "cpu:bringup",
2137                 .startup.single         = cpu !! 1288                 .startup.single         = bringup_cpu,
2138                 .teardown.single        = fin !! 1289                 .teardown.single        = NULL,
2139                 .cant_stop              = tru    1290                 .cant_stop              = true,
2140         },                                       1291         },
2141 #else                                         << 
2142         /*                                       1292         /*
2143          * All-in-one CPU bringup state which !! 1293          * Handled on controll processor until the plugged processor manages
                                                   >> 1294          * this itself.
2144          */                                      1295          */
2145         [CPUHP_BRINGUP_CPU] = {               !! 1296         [CPUHP_TEARDOWN_CPU] = {
2146                 .name                   = "cp !! 1297                 .name                   = "cpu:teardown",
2147                 .startup.single         = bri !! 1298                 .startup.single         = NULL,
2148                 .teardown.single        = fin !! 1299                 .teardown.single        = takedown_cpu,
2149                 .cant_stop              = tru    1300                 .cant_stop              = true,
2150         },                                       1301         },
                                                   >> 1302 #else
                                                   >> 1303         [CPUHP_BRINGUP_CPU] = { },
2151 #endif                                           1304 #endif
                                                   >> 1305 };
                                                   >> 1306 
                                                   >> 1307 /* Application processor state steps */
                                                   >> 1308 static struct cpuhp_step cpuhp_ap_states[] = {
                                                   >> 1309 #ifdef CONFIG_SMP
2152         /* Final state before CPU kills itsel    1310         /* Final state before CPU kills itself */
2153         [CPUHP_AP_IDLE_DEAD] = {                 1311         [CPUHP_AP_IDLE_DEAD] = {
2154                 .name                   = "id    1312                 .name                   = "idle:dead",
2155         },                                       1313         },
2156         /*                                       1314         /*
2157          * Last state before CPU enters the i    1315          * Last state before CPU enters the idle loop to die. Transient state
2158          * for synchronization.                  1316          * for synchronization.
2159          */                                      1317          */
2160         [CPUHP_AP_OFFLINE] = {                   1318         [CPUHP_AP_OFFLINE] = {
2161                 .name                   = "ap    1319                 .name                   = "ap:offline",
2162                 .cant_stop              = tru    1320                 .cant_stop              = true,
2163         },                                       1321         },
2164         /* First state is scheduler control.     1322         /* First state is scheduler control. Interrupts are disabled */
2165         [CPUHP_AP_SCHED_STARTING] = {            1323         [CPUHP_AP_SCHED_STARTING] = {
2166                 .name                   = "sc    1324                 .name                   = "sched:starting",
2167                 .startup.single         = sch    1325                 .startup.single         = sched_cpu_starting,
2168                 .teardown.single        = sch    1326                 .teardown.single        = sched_cpu_dying,
2169         },                                       1327         },
2170         [CPUHP_AP_RCUTREE_DYING] = {             1328         [CPUHP_AP_RCUTREE_DYING] = {
2171                 .name                   = "RC    1329                 .name                   = "RCU/tree:dying",
2172                 .startup.single         = NUL    1330                 .startup.single         = NULL,
2173                 .teardown.single        = rcu    1331                 .teardown.single        = rcutree_dying_cpu,
2174         },                                       1332         },
2175         [CPUHP_AP_SMPCFD_DYING] = {              1333         [CPUHP_AP_SMPCFD_DYING] = {
2176                 .name                   = "sm    1334                 .name                   = "smpcfd:dying",
2177                 .startup.single         = NUL    1335                 .startup.single         = NULL,
2178                 .teardown.single        = smp    1336                 .teardown.single        = smpcfd_dying_cpu,
2179         },                                       1337         },
2180         [CPUHP_AP_HRTIMERS_DYING] = {         << 
2181                 .name                   = "hr << 
2182                 .startup.single         = NUL << 
2183                 .teardown.single        = hrt << 
2184         },                                    << 
2185         [CPUHP_AP_TICK_DYING] = {             << 
2186                 .name                   = "ti << 
2187                 .startup.single         = NUL << 
2188                 .teardown.single        = tic << 
2189         },                                    << 
2190         /* Entry state on starting. Interrupt    1338         /* Entry state on starting. Interrupts enabled from here on. Transient
2191          * state for synchronsization */         1339          * state for synchronsization */
2192         [CPUHP_AP_ONLINE] = {                    1340         [CPUHP_AP_ONLINE] = {
2193                 .name                   = "ap    1341                 .name                   = "ap:online",
2194         },                                       1342         },
2195         /*                                    << 
2196          * Handled on control processor until << 
2197          * this itself.                       << 
2198          */                                   << 
2199         [CPUHP_TEARDOWN_CPU] = {              << 
2200                 .name                   = "cp << 
2201                 .startup.single         = NUL << 
2202                 .teardown.single        = tak << 
2203                 .cant_stop              = tru << 
2204         },                                    << 
2205                                               << 
2206         [CPUHP_AP_SCHED_WAIT_EMPTY] = {       << 
2207                 .name                   = "sc << 
2208                 .startup.single         = NUL << 
2209                 .teardown.single        = sch << 
2210         },                                    << 
2211                                               << 
2212         /* Handle smpboot threads park/unpark    1343         /* Handle smpboot threads park/unpark */
2213         [CPUHP_AP_SMPBOOT_THREADS] = {           1344         [CPUHP_AP_SMPBOOT_THREADS] = {
2214                 .name                   = "sm    1345                 .name                   = "smpboot/threads:online",
2215                 .startup.single         = smp    1346                 .startup.single         = smpboot_unpark_threads,
2216                 .teardown.single        = smp !! 1347                 .teardown.single        = NULL,
2217         },                                       1348         },
2218         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {       1349         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
2219                 .name                   = "ir    1350                 .name                   = "irq/affinity:online",
2220                 .startup.single         = irq    1351                 .startup.single         = irq_affinity_online_cpu,
2221                 .teardown.single        = NUL    1352                 .teardown.single        = NULL,
2222         },                                       1353         },
2223         [CPUHP_AP_PERF_ONLINE] = {               1354         [CPUHP_AP_PERF_ONLINE] = {
2224                 .name                   = "pe    1355                 .name                   = "perf:online",
2225                 .startup.single         = per    1356                 .startup.single         = perf_event_init_cpu,
2226                 .teardown.single        = per    1357                 .teardown.single        = perf_event_exit_cpu,
2227         },                                       1358         },
2228         [CPUHP_AP_WATCHDOG_ONLINE] = {        << 
2229                 .name                   = "lo << 
2230                 .startup.single         = loc << 
2231                 .teardown.single        = loc << 
2232         },                                    << 
2233         [CPUHP_AP_WORKQUEUE_ONLINE] = {          1359         [CPUHP_AP_WORKQUEUE_ONLINE] = {
2234                 .name                   = "wo    1360                 .name                   = "workqueue:online",
2235                 .startup.single         = wor    1361                 .startup.single         = workqueue_online_cpu,
2236                 .teardown.single        = wor    1362                 .teardown.single        = workqueue_offline_cpu,
2237         },                                       1363         },
2238         [CPUHP_AP_RANDOM_ONLINE] = {          << 
2239                 .name                   = "ra << 
2240                 .startup.single         = ran << 
2241                 .teardown.single        = NUL << 
2242         },                                    << 
2243         [CPUHP_AP_RCUTREE_ONLINE] = {            1364         [CPUHP_AP_RCUTREE_ONLINE] = {
2244                 .name                   = "RC    1365                 .name                   = "RCU/tree:online",
2245                 .startup.single         = rcu    1366                 .startup.single         = rcutree_online_cpu,
2246                 .teardown.single        = rcu    1367                 .teardown.single        = rcutree_offline_cpu,
2247         },                                       1368         },
2248 #endif                                           1369 #endif
2249         /*                                       1370         /*
2250          * The dynamically registered state s    1371          * The dynamically registered state space is here
2251          */                                      1372          */
2252                                                  1373 
2253 #ifdef CONFIG_SMP                                1374 #ifdef CONFIG_SMP
2254         /* Last state is scheduler control se    1375         /* Last state is scheduler control setting the cpu active */
2255         [CPUHP_AP_ACTIVE] = {                    1376         [CPUHP_AP_ACTIVE] = {
2256                 .name                   = "sc    1377                 .name                   = "sched:active",
2257                 .startup.single         = sch    1378                 .startup.single         = sched_cpu_activate,
2258                 .teardown.single        = sch    1379                 .teardown.single        = sched_cpu_deactivate,
2259         },                                       1380         },
2260 #endif                                           1381 #endif
2261                                                  1382 
2262         /* CPU is fully up and running. */       1383         /* CPU is fully up and running. */
2263         [CPUHP_ONLINE] = {                       1384         [CPUHP_ONLINE] = {
2264                 .name                   = "on    1385                 .name                   = "online",
2265                 .startup.single         = NUL    1386                 .startup.single         = NULL,
2266                 .teardown.single        = NUL    1387                 .teardown.single        = NULL,
2267         },                                       1388         },
2268 };                                               1389 };
2269                                                  1390 
2270 /* Sanity check for callbacks */                 1391 /* Sanity check for callbacks */
2271 static int cpuhp_cb_check(enum cpuhp_state st    1392 static int cpuhp_cb_check(enum cpuhp_state state)
2272 {                                                1393 {
2273         if (state <= CPUHP_OFFLINE || state >    1394         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
2274                 return -EINVAL;                  1395                 return -EINVAL;
2275         return 0;                                1396         return 0;
2276 }                                                1397 }
2277                                                  1398 
2278 /*                                               1399 /*
2279  * Returns a free for dynamic slot assignment    1400  * Returns a free for dynamic slot assignment of the Online state. The states
2280  * are protected by the cpuhp_slot_states mut    1401  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2281  * by having no name assigned.                   1402  * by having no name assigned.
2282  */                                              1403  */
2283 static int cpuhp_reserve_state(enum cpuhp_sta    1404 static int cpuhp_reserve_state(enum cpuhp_state state)
2284 {                                                1405 {
2285         enum cpuhp_state i, end;                 1406         enum cpuhp_state i, end;
2286         struct cpuhp_step *step;                 1407         struct cpuhp_step *step;
2287                                                  1408 
2288         switch (state) {                         1409         switch (state) {
2289         case CPUHP_AP_ONLINE_DYN:                1410         case CPUHP_AP_ONLINE_DYN:
2290                 step = cpuhp_hp_states + CPUH !! 1411                 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
2291                 end = CPUHP_AP_ONLINE_DYN_END    1412                 end = CPUHP_AP_ONLINE_DYN_END;
2292                 break;                           1413                 break;
2293         case CPUHP_BP_PREPARE_DYN:               1414         case CPUHP_BP_PREPARE_DYN:
2294                 step = cpuhp_hp_states + CPUH !! 1415                 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
2295                 end = CPUHP_BP_PREPARE_DYN_EN    1416                 end = CPUHP_BP_PREPARE_DYN_END;
2296                 break;                           1417                 break;
2297         default:                                 1418         default:
2298                 return -EINVAL;                  1419                 return -EINVAL;
2299         }                                        1420         }
2300                                                  1421 
2301         for (i = state; i <= end; i++, step++    1422         for (i = state; i <= end; i++, step++) {
2302                 if (!step->name)                 1423                 if (!step->name)
2303                         return i;                1424                         return i;
2304         }                                        1425         }
2305         WARN(1, "No more dynamic states avail    1426         WARN(1, "No more dynamic states available for CPU hotplug\n");
2306         return -ENOSPC;                          1427         return -ENOSPC;
2307 }                                                1428 }
2308                                                  1429 
2309 static int cpuhp_store_callbacks(enum cpuhp_s    1430 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
2310                                  int (*startu    1431                                  int (*startup)(unsigned int cpu),
2311                                  int (*teardo    1432                                  int (*teardown)(unsigned int cpu),
2312                                  bool multi_i    1433                                  bool multi_instance)
2313 {                                                1434 {
2314         /* (Un)Install the callbacks for furt    1435         /* (Un)Install the callbacks for further cpu hotplug operations */
2315         struct cpuhp_step *sp;                   1436         struct cpuhp_step *sp;
2316         int ret = 0;                             1437         int ret = 0;
2317                                                  1438 
2318         /*                                       1439         /*
2319          * If name is NULL, then the state ge    1440          * If name is NULL, then the state gets removed.
2320          *                                       1441          *
2321          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_P    1442          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
2322          * the first allocation from these dy    1443          * the first allocation from these dynamic ranges, so the removal
2323          * would trigger a new allocation and    1444          * would trigger a new allocation and clear the wrong (already
2324          * empty) state, leaving the callback    1445          * empty) state, leaving the callbacks of the to be cleared state
2325          * dangling, which causes wreckage on    1446          * dangling, which causes wreckage on the next hotplug operation.
2326          */                                      1447          */
2327         if (name && (state == CPUHP_AP_ONLINE    1448         if (name && (state == CPUHP_AP_ONLINE_DYN ||
2328                      state == CPUHP_BP_PREPAR    1449                      state == CPUHP_BP_PREPARE_DYN)) {
2329                 ret = cpuhp_reserve_state(sta    1450                 ret = cpuhp_reserve_state(state);
2330                 if (ret < 0)                     1451                 if (ret < 0)
2331                         return ret;              1452                         return ret;
2332                 state = ret;                     1453                 state = ret;
2333         }                                        1454         }
2334         sp = cpuhp_get_step(state);              1455         sp = cpuhp_get_step(state);
2335         if (name && sp->name)                    1456         if (name && sp->name)
2336                 return -EBUSY;                   1457                 return -EBUSY;
2337                                                  1458 
2338         sp->startup.single = startup;            1459         sp->startup.single = startup;
2339         sp->teardown.single = teardown;          1460         sp->teardown.single = teardown;
2340         sp->name = name;                         1461         sp->name = name;
2341         sp->multi_instance = multi_instance;     1462         sp->multi_instance = multi_instance;
2342         INIT_HLIST_HEAD(&sp->list);              1463         INIT_HLIST_HEAD(&sp->list);
2343         return ret;                              1464         return ret;
2344 }                                                1465 }
2345                                                  1466 
2346 static void *cpuhp_get_teardown_cb(enum cpuhp    1467 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
2347 {                                                1468 {
2348         return cpuhp_get_step(state)->teardow    1469         return cpuhp_get_step(state)->teardown.single;
2349 }                                                1470 }
2350                                                  1471 
2351 /*                                               1472 /*
2352  * Call the startup/teardown function for a s    1473  * Call the startup/teardown function for a step either on the AP or
2353  * on the current CPU.                           1474  * on the current CPU.
2354  */                                              1475  */
2355 static int cpuhp_issue_call(int cpu, enum cpu    1476 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2356                             struct hlist_node    1477                             struct hlist_node *node)
2357 {                                                1478 {
2358         struct cpuhp_step *sp = cpuhp_get_ste    1479         struct cpuhp_step *sp = cpuhp_get_step(state);
2359         int ret;                                 1480         int ret;
2360                                                  1481 
2361         /*                                       1482         /*
2362          * If there's nothing to do, we done.    1483          * If there's nothing to do, we done.
2363          * Relies on the union for multi_inst    1484          * Relies on the union for multi_instance.
2364          */                                      1485          */
2365         if (cpuhp_step_empty(bringup, sp))    !! 1486         if ((bringup && !sp->startup.single) ||
                                                   >> 1487             (!bringup && !sp->teardown.single))
2366                 return 0;                        1488                 return 0;
2367         /*                                       1489         /*
2368          * The non AP bound callbacks can fai    1490          * The non AP bound callbacks can fail on bringup. On teardown
2369          * e.g. module removal we crash for n    1491          * e.g. module removal we crash for now.
2370          */                                      1492          */
2371 #ifdef CONFIG_SMP                                1493 #ifdef CONFIG_SMP
2372         if (cpuhp_is_ap_state(state))            1494         if (cpuhp_is_ap_state(state))
2373                 ret = cpuhp_invoke_ap_callbac    1495                 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
2374         else                                     1496         else
2375                 ret = cpuhp_invoke_callback(c    1497                 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2376 #else                                            1498 #else
2377         ret = cpuhp_invoke_callback(cpu, stat    1499         ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2378 #endif                                           1500 #endif
2379         BUG_ON(ret && !bringup);                 1501         BUG_ON(ret && !bringup);
2380         return ret;                              1502         return ret;
2381 }                                                1503 }
2382                                                  1504 
2383 /*                                               1505 /*
2384  * Called from __cpuhp_setup_state on a recov    1506  * Called from __cpuhp_setup_state on a recoverable failure.
2385  *                                               1507  *
2386  * Note: The teardown callbacks for rollback     1508  * Note: The teardown callbacks for rollback are not allowed to fail!
2387  */                                              1509  */
2388 static void cpuhp_rollback_install(int failed    1510 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2389                                    struct hli    1511                                    struct hlist_node *node)
2390 {                                                1512 {
2391         int cpu;                                 1513         int cpu;
2392                                                  1514 
2393         /* Roll back the already executed ste    1515         /* Roll back the already executed steps on the other cpus */
2394         for_each_present_cpu(cpu) {              1516         for_each_present_cpu(cpu) {
2395                 struct cpuhp_cpu_state *st =     1517                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2396                 int cpustate = st->state;        1518                 int cpustate = st->state;
2397                                                  1519 
2398                 if (cpu >= failedcpu)            1520                 if (cpu >= failedcpu)
2399                         break;                   1521                         break;
2400                                                  1522 
2401                 /* Did we invoke the startup     1523                 /* Did we invoke the startup call on that cpu ? */
2402                 if (cpustate >= state)           1524                 if (cpustate >= state)
2403                         cpuhp_issue_call(cpu,    1525                         cpuhp_issue_call(cpu, state, false, node);
2404         }                                        1526         }
2405 }                                                1527 }
2406                                                  1528 
2407 int __cpuhp_state_add_instance_cpuslocked(enu    1529 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
2408                                           str    1530                                           struct hlist_node *node,
2409                                           boo    1531                                           bool invoke)
2410 {                                                1532 {
2411         struct cpuhp_step *sp;                   1533         struct cpuhp_step *sp;
2412         int cpu;                                 1534         int cpu;
2413         int ret;                                 1535         int ret;
2414                                                  1536 
2415         lockdep_assert_cpus_held();              1537         lockdep_assert_cpus_held();
2416                                                  1538 
2417         sp = cpuhp_get_step(state);              1539         sp = cpuhp_get_step(state);
2418         if (sp->multi_instance == false)         1540         if (sp->multi_instance == false)
2419                 return -EINVAL;                  1541                 return -EINVAL;
2420                                                  1542 
2421         mutex_lock(&cpuhp_state_mutex);          1543         mutex_lock(&cpuhp_state_mutex);
2422                                                  1544 
2423         if (!invoke || !sp->startup.multi)       1545         if (!invoke || !sp->startup.multi)
2424                 goto add_node;                   1546                 goto add_node;
2425                                                  1547 
2426         /*                                       1548         /*
2427          * Try to call the startup callback f    1549          * Try to call the startup callback for each present cpu
2428          * depending on the hotplug state of     1550          * depending on the hotplug state of the cpu.
2429          */                                      1551          */
2430         for_each_present_cpu(cpu) {              1552         for_each_present_cpu(cpu) {
2431                 struct cpuhp_cpu_state *st =     1553                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2432                 int cpustate = st->state;        1554                 int cpustate = st->state;
2433                                                  1555 
2434                 if (cpustate < state)            1556                 if (cpustate < state)
2435                         continue;                1557                         continue;
2436                                                  1558 
2437                 ret = cpuhp_issue_call(cpu, s    1559                 ret = cpuhp_issue_call(cpu, state, true, node);
2438                 if (ret) {                       1560                 if (ret) {
2439                         if (sp->teardown.mult    1561                         if (sp->teardown.multi)
2440                                 cpuhp_rollbac    1562                                 cpuhp_rollback_install(cpu, state, node);
2441                         goto unlock;             1563                         goto unlock;
2442                 }                                1564                 }
2443         }                                        1565         }
2444 add_node:                                        1566 add_node:
2445         ret = 0;                                 1567         ret = 0;
2446         hlist_add_head(node, &sp->list);         1568         hlist_add_head(node, &sp->list);
2447 unlock:                                          1569 unlock:
2448         mutex_unlock(&cpuhp_state_mutex);        1570         mutex_unlock(&cpuhp_state_mutex);
2449         return ret;                              1571         return ret;
2450 }                                                1572 }
2451                                                  1573 
2452 int __cpuhp_state_add_instance(enum cpuhp_sta    1574 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2453                                bool invoke)      1575                                bool invoke)
2454 {                                                1576 {
2455         int ret;                                 1577         int ret;
2456                                                  1578 
2457         cpus_read_lock();                        1579         cpus_read_lock();
2458         ret = __cpuhp_state_add_instance_cpus    1580         ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2459         cpus_read_unlock();                      1581         cpus_read_unlock();
2460         return ret;                              1582         return ret;
2461 }                                                1583 }
2462 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance)    1584 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2463                                                  1585 
2464 /**                                              1586 /**
2465  * __cpuhp_setup_state_cpuslocked - Setup the    1587  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2466  * @state:              The state to setup       1588  * @state:              The state to setup
2467  * @name:               Name of the step      << 
2468  * @invoke:             If true, the startup     1589  * @invoke:             If true, the startup function is invoked for cpus where
2469  *                      cpu state >= @state      1590  *                      cpu state >= @state
2470  * @startup:            startup callback func    1591  * @startup:            startup callback function
2471  * @teardown:           teardown callback fun    1592  * @teardown:           teardown callback function
2472  * @multi_instance:     State is set up for m    1593  * @multi_instance:     State is set up for multiple instances which get
2473  *                      added afterwards.        1594  *                      added afterwards.
2474  *                                               1595  *
2475  * The caller needs to hold cpus read locked     1596  * The caller needs to hold cpus read locked while calling this function.
2476  * Return:                                    !! 1597  * Returns:
2477  *   On success:                                 1598  *   On success:
2478  *      Positive state number if @state is CP !! 1599  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
2479  *      0 for all other states                   1600  *      0 for all other states
2480  *   On failure: proper (negative) error code    1601  *   On failure: proper (negative) error code
2481  */                                              1602  */
2482 int __cpuhp_setup_state_cpuslocked(enum cpuhp    1603 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2483                                    const char    1604                                    const char *name, bool invoke,
2484                                    int (*star    1605                                    int (*startup)(unsigned int cpu),
2485                                    int (*tear    1606                                    int (*teardown)(unsigned int cpu),
2486                                    bool multi    1607                                    bool multi_instance)
2487 {                                                1608 {
2488         int cpu, ret = 0;                        1609         int cpu, ret = 0;
2489         bool dynstate;                           1610         bool dynstate;
2490                                                  1611 
2491         lockdep_assert_cpus_held();              1612         lockdep_assert_cpus_held();
2492                                                  1613 
2493         if (cpuhp_cb_check(state) || !name)      1614         if (cpuhp_cb_check(state) || !name)
2494                 return -EINVAL;                  1615                 return -EINVAL;
2495                                                  1616 
2496         mutex_lock(&cpuhp_state_mutex);          1617         mutex_lock(&cpuhp_state_mutex);
2497                                                  1618 
2498         ret = cpuhp_store_callbacks(state, na    1619         ret = cpuhp_store_callbacks(state, name, startup, teardown,
2499                                     multi_ins    1620                                     multi_instance);
2500                                                  1621 
2501         dynstate = state == CPUHP_AP_ONLINE_D !! 1622         dynstate = state == CPUHP_AP_ONLINE_DYN;
2502         if (ret > 0 && dynstate) {               1623         if (ret > 0 && dynstate) {
2503                 state = ret;                     1624                 state = ret;
2504                 ret = 0;                         1625                 ret = 0;
2505         }                                        1626         }
2506                                                  1627 
2507         if (ret || !invoke || !startup)          1628         if (ret || !invoke || !startup)
2508                 goto out;                        1629                 goto out;
2509                                                  1630 
2510         /*                                       1631         /*
2511          * Try to call the startup callback f    1632          * Try to call the startup callback for each present cpu
2512          * depending on the hotplug state of     1633          * depending on the hotplug state of the cpu.
2513          */                                      1634          */
2514         for_each_present_cpu(cpu) {              1635         for_each_present_cpu(cpu) {
2515                 struct cpuhp_cpu_state *st =     1636                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2516                 int cpustate = st->state;        1637                 int cpustate = st->state;
2517                                                  1638 
2518                 if (cpustate < state)            1639                 if (cpustate < state)
2519                         continue;                1640                         continue;
2520                                                  1641 
2521                 ret = cpuhp_issue_call(cpu, s    1642                 ret = cpuhp_issue_call(cpu, state, true, NULL);
2522                 if (ret) {                       1643                 if (ret) {
2523                         if (teardown)            1644                         if (teardown)
2524                                 cpuhp_rollbac    1645                                 cpuhp_rollback_install(cpu, state, NULL);
2525                         cpuhp_store_callbacks    1646                         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2526                         goto out;                1647                         goto out;
2527                 }                                1648                 }
2528         }                                        1649         }
2529 out:                                             1650 out:
2530         mutex_unlock(&cpuhp_state_mutex);        1651         mutex_unlock(&cpuhp_state_mutex);
2531         /*                                       1652         /*
2532          * If the requested state is CPUHP_AP !! 1653          * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2533          * return the dynamically allocated s !! 1654          * dynamically allocated state in case of success.
2534          */                                      1655          */
2535         if (!ret && dynstate)                    1656         if (!ret && dynstate)
2536                 return state;                    1657                 return state;
2537         return ret;                              1658         return ret;
2538 }                                                1659 }
2539 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked)    1660 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2540                                                  1661 
2541 int __cpuhp_setup_state(enum cpuhp_state stat    1662 int __cpuhp_setup_state(enum cpuhp_state state,
2542                         const char *name, boo    1663                         const char *name, bool invoke,
2543                         int (*startup)(unsign    1664                         int (*startup)(unsigned int cpu),
2544                         int (*teardown)(unsig    1665                         int (*teardown)(unsigned int cpu),
2545                         bool multi_instance)     1666                         bool multi_instance)
2546 {                                                1667 {
2547         int ret;                                 1668         int ret;
2548                                                  1669 
2549         cpus_read_lock();                        1670         cpus_read_lock();
2550         ret = __cpuhp_setup_state_cpuslocked(    1671         ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2551                                                  1672                                              teardown, multi_instance);
2552         cpus_read_unlock();                      1673         cpus_read_unlock();
2553         return ret;                              1674         return ret;
2554 }                                                1675 }
2555 EXPORT_SYMBOL(__cpuhp_setup_state);              1676 EXPORT_SYMBOL(__cpuhp_setup_state);
2556                                                  1677 
2557 int __cpuhp_state_remove_instance(enum cpuhp_    1678 int __cpuhp_state_remove_instance(enum cpuhp_state state,
2558                                   struct hlis    1679                                   struct hlist_node *node, bool invoke)
2559 {                                                1680 {
2560         struct cpuhp_step *sp = cpuhp_get_ste    1681         struct cpuhp_step *sp = cpuhp_get_step(state);
2561         int cpu;                                 1682         int cpu;
2562                                                  1683 
2563         BUG_ON(cpuhp_cb_check(state));           1684         BUG_ON(cpuhp_cb_check(state));
2564                                                  1685 
2565         if (!sp->multi_instance)                 1686         if (!sp->multi_instance)
2566                 return -EINVAL;                  1687                 return -EINVAL;
2567                                                  1688 
2568         cpus_read_lock();                        1689         cpus_read_lock();
2569         mutex_lock(&cpuhp_state_mutex);          1690         mutex_lock(&cpuhp_state_mutex);
2570                                                  1691 
2571         if (!invoke || !cpuhp_get_teardown_cb    1692         if (!invoke || !cpuhp_get_teardown_cb(state))
2572                 goto remove;                     1693                 goto remove;
2573         /*                                       1694         /*
2574          * Call the teardown callback for eac    1695          * Call the teardown callback for each present cpu depending
2575          * on the hotplug state of the cpu. T    1696          * on the hotplug state of the cpu. This function is not
2576          * allowed to fail currently!            1697          * allowed to fail currently!
2577          */                                      1698          */
2578         for_each_present_cpu(cpu) {              1699         for_each_present_cpu(cpu) {
2579                 struct cpuhp_cpu_state *st =     1700                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2580                 int cpustate = st->state;        1701                 int cpustate = st->state;
2581                                                  1702 
2582                 if (cpustate >= state)           1703                 if (cpustate >= state)
2583                         cpuhp_issue_call(cpu,    1704                         cpuhp_issue_call(cpu, state, false, node);
2584         }                                        1705         }
2585                                                  1706 
2586 remove:                                          1707 remove:
2587         hlist_del(node);                         1708         hlist_del(node);
2588         mutex_unlock(&cpuhp_state_mutex);        1709         mutex_unlock(&cpuhp_state_mutex);
2589         cpus_read_unlock();                      1710         cpus_read_unlock();
2590                                                  1711 
2591         return 0;                                1712         return 0;
2592 }                                                1713 }
2593 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instan    1714 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2594                                                  1715 
2595 /**                                              1716 /**
2596  * __cpuhp_remove_state_cpuslocked - Remove t    1717  * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2597  * @state:      The state to remove              1718  * @state:      The state to remove
2598  * @invoke:     If true, the teardown functio    1719  * @invoke:     If true, the teardown function is invoked for cpus where
2599  *              cpu state >= @state              1720  *              cpu state >= @state
2600  *                                               1721  *
2601  * The caller needs to hold cpus read locked     1722  * The caller needs to hold cpus read locked while calling this function.
2602  * The teardown callback is currently not all    1723  * The teardown callback is currently not allowed to fail. Think
2603  * about module removal!                         1724  * about module removal!
2604  */                                              1725  */
2605 void __cpuhp_remove_state_cpuslocked(enum cpu    1726 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2606 {                                                1727 {
2607         struct cpuhp_step *sp = cpuhp_get_ste    1728         struct cpuhp_step *sp = cpuhp_get_step(state);
2608         int cpu;                                 1729         int cpu;
2609                                                  1730 
2610         BUG_ON(cpuhp_cb_check(state));           1731         BUG_ON(cpuhp_cb_check(state));
2611                                                  1732 
2612         lockdep_assert_cpus_held();              1733         lockdep_assert_cpus_held();
2613                                                  1734 
2614         mutex_lock(&cpuhp_state_mutex);          1735         mutex_lock(&cpuhp_state_mutex);
2615         if (sp->multi_instance) {                1736         if (sp->multi_instance) {
2616                 WARN(!hlist_empty(&sp->list),    1737                 WARN(!hlist_empty(&sp->list),
2617                      "Error: Removing state %    1738                      "Error: Removing state %d which has instances left.\n",
2618                      state);                     1739                      state);
2619                 goto remove;                     1740                 goto remove;
2620         }                                        1741         }
2621                                                  1742 
2622         if (!invoke || !cpuhp_get_teardown_cb    1743         if (!invoke || !cpuhp_get_teardown_cb(state))
2623                 goto remove;                     1744                 goto remove;
2624                                                  1745 
2625         /*                                       1746         /*
2626          * Call the teardown callback for eac    1747          * Call the teardown callback for each present cpu depending
2627          * on the hotplug state of the cpu. T    1748          * on the hotplug state of the cpu. This function is not
2628          * allowed to fail currently!            1749          * allowed to fail currently!
2629          */                                      1750          */
2630         for_each_present_cpu(cpu) {              1751         for_each_present_cpu(cpu) {
2631                 struct cpuhp_cpu_state *st =     1752                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2632                 int cpustate = st->state;        1753                 int cpustate = st->state;
2633                                                  1754 
2634                 if (cpustate >= state)           1755                 if (cpustate >= state)
2635                         cpuhp_issue_call(cpu,    1756                         cpuhp_issue_call(cpu, state, false, NULL);
2636         }                                        1757         }
2637 remove:                                          1758 remove:
2638         cpuhp_store_callbacks(state, NULL, NU    1759         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2639         mutex_unlock(&cpuhp_state_mutex);        1760         mutex_unlock(&cpuhp_state_mutex);
2640 }                                                1761 }
2641 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked    1762 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2642                                                  1763 
2643 void __cpuhp_remove_state(enum cpuhp_state st    1764 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2644 {                                                1765 {
2645         cpus_read_lock();                        1766         cpus_read_lock();
2646         __cpuhp_remove_state_cpuslocked(state    1767         __cpuhp_remove_state_cpuslocked(state, invoke);
2647         cpus_read_unlock();                      1768         cpus_read_unlock();
2648 }                                                1769 }
2649 EXPORT_SYMBOL(__cpuhp_remove_state);             1770 EXPORT_SYMBOL(__cpuhp_remove_state);
2650                                                  1771 
2651 #ifdef CONFIG_HOTPLUG_SMT                     << 
2652 static void cpuhp_offline_cpu_device(unsigned << 
2653 {                                             << 
2654         struct device *dev = get_cpu_device(c << 
2655                                               << 
2656         dev->offline = true;                  << 
2657         /* Tell user space about the state ch << 
2658         kobject_uevent(&dev->kobj, KOBJ_OFFLI << 
2659 }                                             << 
2660                                               << 
2661 static void cpuhp_online_cpu_device(unsigned  << 
2662 {                                             << 
2663         struct device *dev = get_cpu_device(c << 
2664                                               << 
2665         dev->offline = false;                 << 
2666         /* Tell user space about the state ch << 
2667         kobject_uevent(&dev->kobj, KOBJ_ONLIN << 
2668 }                                             << 
2669                                               << 
2670 int cpuhp_smt_disable(enum cpuhp_smt_control  << 
2671 {                                             << 
2672         int cpu, ret = 0;                     << 
2673                                               << 
2674         cpu_maps_update_begin();              << 
2675         for_each_online_cpu(cpu) {            << 
2676                 if (topology_is_primary_threa << 
2677                         continue;             << 
2678                 /*                            << 
2679                  * Disable can be called with << 
2680                  * from a higher to lower num << 
2681                  */                           << 
2682                 if (ctrlval == CPU_SMT_ENABLE << 
2683                         continue;             << 
2684                 ret = cpu_down_maps_locked(cp << 
2685                 if (ret)                      << 
2686                         break;                << 
2687                 /*                            << 
2688                  * As this needs to hold the  << 
2689                  * to call device_offline() b << 
2690                  * cpu_down() which takes cpu << 
2691                  * needs to be held as this m << 
2692                  * abusers of the hotplug mac << 
2693                  *                            << 
2694                  * So nothing would update de << 
2695                  * leave the sysfs entry stal << 
2696                  * smt control has been chang << 
2697                  * called under the sysfs hot << 
2698                  * serialized against the reg << 
2699                  */                           << 
2700                 cpuhp_offline_cpu_device(cpu) << 
2701         }                                     << 
2702         if (!ret)                             << 
2703                 cpu_smt_control = ctrlval;    << 
2704         cpu_maps_update_done();               << 
2705         return ret;                           << 
2706 }                                             << 
2707                                               << 
2708 /* Check if the core a CPU belongs to is onli << 
2709 #if !defined(topology_is_core_online)         << 
2710 static inline bool topology_is_core_online(un << 
2711 {                                             << 
2712         return true;                          << 
2713 }                                             << 
2714 #endif                                        << 
2715                                               << 
2716 int cpuhp_smt_enable(void)                    << 
2717 {                                             << 
2718         int cpu, ret = 0;                     << 
2719                                               << 
2720         cpu_maps_update_begin();              << 
2721         cpu_smt_control = CPU_SMT_ENABLED;    << 
2722         for_each_present_cpu(cpu) {           << 
2723                 /* Skip online CPUs and CPUs  << 
2724                 if (cpu_online(cpu) || !node_ << 
2725                         continue;             << 
2726                 if (!cpu_smt_thread_allowed(c << 
2727                         continue;             << 
2728                 ret = _cpu_up(cpu, 0, CPUHP_O << 
2729                 if (ret)                      << 
2730                         break;                << 
2731                 /* See comment in cpuhp_smt_d << 
2732                 cpuhp_online_cpu_device(cpu); << 
2733         }                                     << 
2734         cpu_maps_update_done();               << 
2735         return ret;                           << 
2736 }                                             << 
2737 #endif                                        << 
2738                                               << 
2739 #if defined(CONFIG_SYSFS) && defined(CONFIG_H    1772 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2740 static ssize_t state_show(struct device *dev, !! 1773 static ssize_t show_cpuhp_state(struct device *dev,
2741                           struct device_attri !! 1774                                 struct device_attribute *attr, char *buf)
2742 {                                                1775 {
2743         struct cpuhp_cpu_state *st = per_cpu_    1776         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2744                                                  1777 
2745         return sprintf(buf, "%d\n", st->state    1778         return sprintf(buf, "%d\n", st->state);
2746 }                                                1779 }
2747 static DEVICE_ATTR_RO(state);                 !! 1780 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2748                                                  1781 
2749 static ssize_t target_store(struct device *de !! 1782 static ssize_t write_cpuhp_target(struct device *dev,
2750                             const char *buf,  !! 1783                                   struct device_attribute *attr,
                                                   >> 1784                                   const char *buf, size_t count)
2751 {                                                1785 {
2752         struct cpuhp_cpu_state *st = per_cpu_    1786         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2753         struct cpuhp_step *sp;                   1787         struct cpuhp_step *sp;
2754         int target, ret;                         1788         int target, ret;
2755                                                  1789 
2756         ret = kstrtoint(buf, 10, &target);       1790         ret = kstrtoint(buf, 10, &target);
2757         if (ret)                                 1791         if (ret)
2758                 return ret;                      1792                 return ret;
2759                                                  1793 
2760 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL          1794 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2761         if (target < CPUHP_OFFLINE || target     1795         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2762                 return -EINVAL;                  1796                 return -EINVAL;
2763 #else                                            1797 #else
2764         if (target != CPUHP_OFFLINE && target    1798         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2765                 return -EINVAL;                  1799                 return -EINVAL;
2766 #endif                                           1800 #endif
2767                                                  1801 
2768         ret = lock_device_hotplug_sysfs();       1802         ret = lock_device_hotplug_sysfs();
2769         if (ret)                                 1803         if (ret)
2770                 return ret;                      1804                 return ret;
2771                                                  1805 
2772         mutex_lock(&cpuhp_state_mutex);          1806         mutex_lock(&cpuhp_state_mutex);
2773         sp = cpuhp_get_step(target);             1807         sp = cpuhp_get_step(target);
2774         ret = !sp->name || sp->cant_stop ? -E    1808         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2775         mutex_unlock(&cpuhp_state_mutex);        1809         mutex_unlock(&cpuhp_state_mutex);
2776         if (ret)                                 1810         if (ret)
2777                 goto out;                        1811                 goto out;
2778                                                  1812 
2779         if (st->state < target)                  1813         if (st->state < target)
2780                 ret = cpu_up(dev->id, target) !! 1814                 ret = do_cpu_up(dev->id, target);
2781         else if (st->state > target)          !! 1815         else
2782                 ret = cpu_down(dev->id, targe !! 1816                 ret = do_cpu_down(dev->id, target);
2783         else if (WARN_ON(st->target != target << 
2784                 st->target = target;          << 
2785 out:                                             1817 out:
2786         unlock_device_hotplug();                 1818         unlock_device_hotplug();
2787         return ret ? ret : count;                1819         return ret ? ret : count;
2788 }                                                1820 }
2789                                                  1821 
2790 static ssize_t target_show(struct device *dev !! 1822 static ssize_t show_cpuhp_target(struct device *dev,
2791                            struct device_attr !! 1823                                  struct device_attribute *attr, char *buf)
2792 {                                                1824 {
2793         struct cpuhp_cpu_state *st = per_cpu_    1825         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2794                                                  1826 
2795         return sprintf(buf, "%d\n", st->targe    1827         return sprintf(buf, "%d\n", st->target);
2796 }                                                1828 }
2797 static DEVICE_ATTR_RW(target);                !! 1829 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
                                                   >> 1830 
2798                                                  1831 
2799 static ssize_t fail_store(struct device *dev, !! 1832 static ssize_t write_cpuhp_fail(struct device *dev,
2800                           const char *buf, si !! 1833                                 struct device_attribute *attr,
                                                   >> 1834                                 const char *buf, size_t count)
2801 {                                                1835 {
2802         struct cpuhp_cpu_state *st = per_cpu_    1836         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2803         struct cpuhp_step *sp;                   1837         struct cpuhp_step *sp;
2804         int fail, ret;                           1838         int fail, ret;
2805                                                  1839 
2806         ret = kstrtoint(buf, 10, &fail);         1840         ret = kstrtoint(buf, 10, &fail);
2807         if (ret)                                 1841         if (ret)
2808                 return ret;                      1842                 return ret;
2809                                                  1843 
2810         if (fail == CPUHP_INVALID) {          << 
2811                 st->fail = fail;              << 
2812                 return count;                 << 
2813         }                                     << 
2814                                               << 
2815         if (fail < CPUHP_OFFLINE || fail > CP << 
2816                 return -EINVAL;               << 
2817                                               << 
2818         /*                                       1844         /*
2819          * Cannot fail STARTING/DYING callbac    1845          * Cannot fail STARTING/DYING callbacks.
2820          */                                      1846          */
2821         if (cpuhp_is_atomic_state(fail))         1847         if (cpuhp_is_atomic_state(fail))
2822                 return -EINVAL;                  1848                 return -EINVAL;
2823                                                  1849 
2824         /*                                       1850         /*
2825          * DEAD callbacks cannot fail...      << 
2826          * ... neither can CPUHP_BRINGUP_CPU  << 
2827          * triggering STARTING callbacks, a f << 
2828          * hinder rollback.                   << 
2829          */                                   << 
2830         if (fail <= CPUHP_BRINGUP_CPU && st-> << 
2831                 return -EINVAL;               << 
2832                                               << 
2833         /*                                    << 
2834          * Cannot fail anything that doesn't     1851          * Cannot fail anything that doesn't have callbacks.
2835          */                                      1852          */
2836         mutex_lock(&cpuhp_state_mutex);          1853         mutex_lock(&cpuhp_state_mutex);
2837         sp = cpuhp_get_step(fail);               1854         sp = cpuhp_get_step(fail);
2838         if (!sp->startup.single && !sp->teard    1855         if (!sp->startup.single && !sp->teardown.single)
2839                 ret = -EINVAL;                   1856                 ret = -EINVAL;
2840         mutex_unlock(&cpuhp_state_mutex);        1857         mutex_unlock(&cpuhp_state_mutex);
2841         if (ret)                                 1858         if (ret)
2842                 return ret;                      1859                 return ret;
2843                                                  1860 
2844         st->fail = fail;                         1861         st->fail = fail;
2845                                                  1862 
2846         return count;                            1863         return count;
2847 }                                                1864 }
2848                                                  1865 
2849 static ssize_t fail_show(struct device *dev,  !! 1866 static ssize_t show_cpuhp_fail(struct device *dev,
2850                          struct device_attrib !! 1867                                struct device_attribute *attr, char *buf)
2851 {                                                1868 {
2852         struct cpuhp_cpu_state *st = per_cpu_    1869         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2853                                                  1870 
2854         return sprintf(buf, "%d\n", st->fail)    1871         return sprintf(buf, "%d\n", st->fail);
2855 }                                                1872 }
2856                                                  1873 
2857 static DEVICE_ATTR_RW(fail);                  !! 1874 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2858                                                  1875 
2859 static struct attribute *cpuhp_cpu_attrs[] =     1876 static struct attribute *cpuhp_cpu_attrs[] = {
2860         &dev_attr_state.attr,                    1877         &dev_attr_state.attr,
2861         &dev_attr_target.attr,                   1878         &dev_attr_target.attr,
2862         &dev_attr_fail.attr,                     1879         &dev_attr_fail.attr,
2863         NULL                                     1880         NULL
2864 };                                               1881 };
2865                                                  1882 
2866 static const struct attribute_group cpuhp_cpu    1883 static const struct attribute_group cpuhp_cpu_attr_group = {
2867         .attrs = cpuhp_cpu_attrs,                1884         .attrs = cpuhp_cpu_attrs,
2868         .name = "hotplug",                       1885         .name = "hotplug",
2869         NULL                                     1886         NULL
2870 };                                               1887 };
2871                                                  1888 
2872 static ssize_t states_show(struct device *dev !! 1889 static ssize_t show_cpuhp_states(struct device *dev,
2873                                  struct devic    1890                                  struct device_attribute *attr, char *buf)
2874 {                                                1891 {
2875         ssize_t cur, res = 0;                    1892         ssize_t cur, res = 0;
2876         int i;                                   1893         int i;
2877                                                  1894 
2878         mutex_lock(&cpuhp_state_mutex);          1895         mutex_lock(&cpuhp_state_mutex);
2879         for (i = CPUHP_OFFLINE; i <= CPUHP_ON    1896         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2880                 struct cpuhp_step *sp = cpuhp    1897                 struct cpuhp_step *sp = cpuhp_get_step(i);
2881                                                  1898 
2882                 if (sp->name) {                  1899                 if (sp->name) {
2883                         cur = sprintf(buf, "%    1900                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2884                         buf += cur;              1901                         buf += cur;
2885                         res += cur;              1902                         res += cur;
2886                 }                                1903                 }
2887         }                                        1904         }
2888         mutex_unlock(&cpuhp_state_mutex);        1905         mutex_unlock(&cpuhp_state_mutex);
2889         return res;                              1906         return res;
2890 }                                                1907 }
2891 static DEVICE_ATTR_RO(states);                !! 1908 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2892                                                  1909 
2893 static struct attribute *cpuhp_cpu_root_attrs    1910 static struct attribute *cpuhp_cpu_root_attrs[] = {
2894         &dev_attr_states.attr,                   1911         &dev_attr_states.attr,
2895         NULL                                     1912         NULL
2896 };                                               1913 };
2897                                                  1914 
2898 static const struct attribute_group cpuhp_cpu    1915 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2899         .attrs = cpuhp_cpu_root_attrs,           1916         .attrs = cpuhp_cpu_root_attrs,
2900         .name = "hotplug",                       1917         .name = "hotplug",
2901         NULL                                     1918         NULL
2902 };                                               1919 };
2903                                                  1920 
2904 #ifdef CONFIG_HOTPLUG_SMT                     << 
2905                                               << 
2906 static bool cpu_smt_num_threads_valid(unsigne << 
2907 {                                             << 
2908         if (IS_ENABLED(CONFIG_SMT_NUM_THREADS << 
2909                 return threads >= 1 && thread << 
2910         return threads == 1 || threads == cpu << 
2911 }                                             << 
2912                                               << 
2913 static ssize_t                                << 
2914 __store_smt_control(struct device *dev, struc << 
2915                     const char *buf, size_t c << 
2916 {                                             << 
2917         int ctrlval, ret, num_threads, orig_t << 
2918         bool force_off;                       << 
2919                                               << 
2920         if (cpu_smt_control == CPU_SMT_FORCE_ << 
2921                 return -EPERM;                << 
2922                                               << 
2923         if (cpu_smt_control == CPU_SMT_NOT_SU << 
2924                 return -ENODEV;               << 
2925                                               << 
2926         if (sysfs_streq(buf, "on")) {         << 
2927                 ctrlval = CPU_SMT_ENABLED;    << 
2928                 num_threads = cpu_smt_max_thr << 
2929         } else if (sysfs_streq(buf, "off")) { << 
2930                 ctrlval = CPU_SMT_DISABLED;   << 
2931                 num_threads = 1;              << 
2932         } else if (sysfs_streq(buf, "forceoff << 
2933                 ctrlval = CPU_SMT_FORCE_DISAB << 
2934                 num_threads = 1;              << 
2935         } else if (kstrtoint(buf, 10, &num_th << 
2936                 if (num_threads == 1)         << 
2937                         ctrlval = CPU_SMT_DIS << 
2938                 else if (cpu_smt_num_threads_ << 
2939                         ctrlval = CPU_SMT_ENA << 
2940                 else                          << 
2941                         return -EINVAL;       << 
2942         } else {                              << 
2943                 return -EINVAL;               << 
2944         }                                     << 
2945                                               << 
2946         ret = lock_device_hotplug_sysfs();    << 
2947         if (ret)                              << 
2948                 return ret;                   << 
2949                                               << 
2950         orig_threads = cpu_smt_num_threads;   << 
2951         cpu_smt_num_threads = num_threads;    << 
2952                                               << 
2953         force_off = ctrlval != cpu_smt_contro << 
2954                                               << 
2955         if (num_threads > orig_threads)       << 
2956                 ret = cpuhp_smt_enable();     << 
2957         else if (num_threads < orig_threads | << 
2958                 ret = cpuhp_smt_disable(ctrlv << 
2959                                               << 
2960         unlock_device_hotplug();              << 
2961         return ret ? ret : count;             << 
2962 }                                             << 
2963                                               << 
2964 #else /* !CONFIG_HOTPLUG_SMT */               << 
2965 static ssize_t                                << 
2966 __store_smt_control(struct device *dev, struc << 
2967                     const char *buf, size_t c << 
2968 {                                             << 
2969         return -ENODEV;                       << 
2970 }                                             << 
2971 #endif /* CONFIG_HOTPLUG_SMT */               << 
2972                                               << 
2973 static const char *smt_states[] = {           << 
2974         [CPU_SMT_ENABLED]               = "on << 
2975         [CPU_SMT_DISABLED]              = "of << 
2976         [CPU_SMT_FORCE_DISABLED]        = "fo << 
2977         [CPU_SMT_NOT_SUPPORTED]         = "no << 
2978         [CPU_SMT_NOT_IMPLEMENTED]       = "no << 
2979 };                                            << 
2980                                               << 
2981 static ssize_t control_show(struct device *de << 
2982                             struct device_att << 
2983 {                                             << 
2984         const char *state = smt_states[cpu_sm << 
2985                                               << 
2986 #ifdef CONFIG_HOTPLUG_SMT                     << 
2987         /*                                    << 
2988          * If SMT is enabled but not all thre << 
2989          * number of threads. If all threads  << 
2990          * show the state name.               << 
2991          */                                   << 
2992         if (cpu_smt_control == CPU_SMT_ENABLE << 
2993             cpu_smt_num_threads != cpu_smt_ma << 
2994                 return sysfs_emit(buf, "%d\n" << 
2995 #endif                                        << 
2996                                               << 
2997         return sysfs_emit(buf, "%s\n", state) << 
2998 }                                             << 
2999                                               << 
3000 static ssize_t control_store(struct device *d << 
3001                              const char *buf, << 
3002 {                                             << 
3003         return __store_smt_control(dev, attr, << 
3004 }                                             << 
3005 static DEVICE_ATTR_RW(control);               << 
3006                                               << 
3007 static ssize_t active_show(struct device *dev << 
3008                            struct device_attr << 
3009 {                                             << 
3010         return sysfs_emit(buf, "%d\n", sched_ << 
3011 }                                             << 
3012 static DEVICE_ATTR_RO(active);                << 
3013                                               << 
3014 static struct attribute *cpuhp_smt_attrs[] =  << 
3015         &dev_attr_control.attr,               << 
3016         &dev_attr_active.attr,                << 
3017         NULL                                  << 
3018 };                                            << 
3019                                               << 
3020 static const struct attribute_group cpuhp_smt << 
3021         .attrs = cpuhp_smt_attrs,             << 
3022         .name = "smt",                        << 
3023         NULL                                  << 
3024 };                                            << 
3025                                               << 
3026 static int __init cpu_smt_sysfs_init(void)    << 
3027 {                                             << 
3028         struct device *dev_root;              << 
3029         int ret = -ENODEV;                    << 
3030                                               << 
3031         dev_root = bus_get_dev_root(&cpu_subs << 
3032         if (dev_root) {                       << 
3033                 ret = sysfs_create_group(&dev << 
3034                 put_device(dev_root);         << 
3035         }                                     << 
3036         return ret;                           << 
3037 }                                             << 
3038                                               << 
3039 static int __init cpuhp_sysfs_init(void)         1921 static int __init cpuhp_sysfs_init(void)
3040 {                                                1922 {
3041         struct device *dev_root;              << 
3042         int cpu, ret;                            1923         int cpu, ret;
3043                                                  1924 
3044         ret = cpu_smt_sysfs_init();           !! 1925         ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
                                                   >> 1926                                  &cpuhp_cpu_root_attr_group);
3045         if (ret)                                 1927         if (ret)
3046                 return ret;                      1928                 return ret;
3047                                                  1929 
3048         dev_root = bus_get_dev_root(&cpu_subs << 
3049         if (dev_root) {                       << 
3050                 ret = sysfs_create_group(&dev << 
3051                 put_device(dev_root);         << 
3052                 if (ret)                      << 
3053                         return ret;           << 
3054         }                                     << 
3055                                               << 
3056         for_each_possible_cpu(cpu) {             1930         for_each_possible_cpu(cpu) {
3057                 struct device *dev = get_cpu_    1931                 struct device *dev = get_cpu_device(cpu);
3058                                                  1932 
3059                 if (!dev)                        1933                 if (!dev)
3060                         continue;                1934                         continue;
3061                 ret = sysfs_create_group(&dev    1935                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
3062                 if (ret)                         1936                 if (ret)
3063                         return ret;              1937                         return ret;
3064         }                                        1938         }
3065         return 0;                                1939         return 0;
3066 }                                                1940 }
3067 device_initcall(cpuhp_sysfs_init);               1941 device_initcall(cpuhp_sysfs_init);
3068 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU  !! 1942 #endif
3069                                                  1943 
3070 /*                                               1944 /*
3071  * cpu_bit_bitmap[] is a special, "compressed    1945  * cpu_bit_bitmap[] is a special, "compressed" data structure that
3072  * represents all NR_CPUS bits binary values     1946  * represents all NR_CPUS bits binary values of 1<<nr.
3073  *                                               1947  *
3074  * It is used by cpumask_of() to get a consta    1948  * It is used by cpumask_of() to get a constant address to a CPU
3075  * mask value that has a single bit set only.    1949  * mask value that has a single bit set only.
3076  */                                              1950  */
3077                                                  1951 
3078 /* cpu_bit_bitmap[0] is empty - so we can bac    1952 /* cpu_bit_bitmap[0] is empty - so we can back into it */
3079 #define MASK_DECLARE_1(x)       [x+1][0] = (1    1953 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
3080 #define MASK_DECLARE_2(x)       MASK_DECLARE_    1954 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3081 #define MASK_DECLARE_4(x)       MASK_DECLARE_    1955 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3082 #define MASK_DECLARE_8(x)       MASK_DECLARE_    1956 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3083                                                  1957 
3084 const unsigned long cpu_bit_bitmap[BITS_PER_L    1958 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3085                                                  1959 
3086         MASK_DECLARE_8(0),      MASK_DECLARE_    1960         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
3087         MASK_DECLARE_8(16),     MASK_DECLARE_    1961         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
3088 #if BITS_PER_LONG > 32                           1962 #if BITS_PER_LONG > 32
3089         MASK_DECLARE_8(32),     MASK_DECLARE_    1963         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
3090         MASK_DECLARE_8(48),     MASK_DECLARE_    1964         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
3091 #endif                                           1965 #endif
3092 };                                               1966 };
3093 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);               1967 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3094                                                  1968 
3095 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) =    1969 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
3096 EXPORT_SYMBOL(cpu_all_bits);                     1970 EXPORT_SYMBOL(cpu_all_bits);
3097                                                  1971 
3098 #ifdef CONFIG_INIT_ALL_POSSIBLE                  1972 #ifdef CONFIG_INIT_ALL_POSSIBLE
3099 struct cpumask __cpu_possible_mask __ro_after !! 1973 struct cpumask __cpu_possible_mask __read_mostly
3100         = {CPU_BITS_ALL};                        1974         = {CPU_BITS_ALL};
3101 #else                                            1975 #else
3102 struct cpumask __cpu_possible_mask __ro_after !! 1976 struct cpumask __cpu_possible_mask __read_mostly;
3103 #endif                                           1977 #endif
3104 EXPORT_SYMBOL(__cpu_possible_mask);              1978 EXPORT_SYMBOL(__cpu_possible_mask);
3105                                                  1979 
3106 struct cpumask __cpu_online_mask __read_mostl    1980 struct cpumask __cpu_online_mask __read_mostly;
3107 EXPORT_SYMBOL(__cpu_online_mask);                1981 EXPORT_SYMBOL(__cpu_online_mask);
3108                                                  1982 
3109 struct cpumask __cpu_enabled_mask __read_most << 
3110 EXPORT_SYMBOL(__cpu_enabled_mask);            << 
3111                                               << 
3112 struct cpumask __cpu_present_mask __read_most    1983 struct cpumask __cpu_present_mask __read_mostly;
3113 EXPORT_SYMBOL(__cpu_present_mask);               1984 EXPORT_SYMBOL(__cpu_present_mask);
3114                                                  1985 
3115 struct cpumask __cpu_active_mask __read_mostl    1986 struct cpumask __cpu_active_mask __read_mostly;
3116 EXPORT_SYMBOL(__cpu_active_mask);                1987 EXPORT_SYMBOL(__cpu_active_mask);
3117                                                  1988 
3118 struct cpumask __cpu_dying_mask __read_mostly << 
3119 EXPORT_SYMBOL(__cpu_dying_mask);              << 
3120                                               << 
3121 atomic_t __num_online_cpus __read_mostly;     << 
3122 EXPORT_SYMBOL(__num_online_cpus);             << 
3123                                               << 
3124 void init_cpu_present(const struct cpumask *s    1989 void init_cpu_present(const struct cpumask *src)
3125 {                                                1990 {
3126         cpumask_copy(&__cpu_present_mask, src    1991         cpumask_copy(&__cpu_present_mask, src);
3127 }                                                1992 }
3128                                                  1993 
3129 void init_cpu_possible(const struct cpumask *    1994 void init_cpu_possible(const struct cpumask *src)
3130 {                                                1995 {
3131         cpumask_copy(&__cpu_possible_mask, sr    1996         cpumask_copy(&__cpu_possible_mask, src);
3132 }                                                1997 }
3133                                                  1998 
3134 void init_cpu_online(const struct cpumask *sr    1999 void init_cpu_online(const struct cpumask *src)
3135 {                                                2000 {
3136         cpumask_copy(&__cpu_online_mask, src)    2001         cpumask_copy(&__cpu_online_mask, src);
3137 }                                                2002 }
3138                                                  2003 
3139 void set_cpu_online(unsigned int cpu, bool on << 
3140 {                                             << 
3141         /*                                    << 
3142          * atomic_inc/dec() is required to ha << 
3143          * function by the reboot and kexec c << 
3144          * IPI/NMI broadcasts when shutting d << 
3145          * regular CPU hotplug is properly se << 
3146          *                                    << 
3147          * Note, that the fact that __num_onl << 
3148          * does not protect readers which are << 
3149          * concurrent hotplug operations.     << 
3150          */                                   << 
3151         if (online) {                         << 
3152                 if (!cpumask_test_and_set_cpu << 
3153                         atomic_inc(&__num_onl << 
3154         } else {                              << 
3155                 if (cpumask_test_and_clear_cp << 
3156                         atomic_dec(&__num_onl << 
3157         }                                     << 
3158 }                                             << 
3159                                               << 
3160 /*                                               2004 /*
3161  * Activate the first processor.                 2005  * Activate the first processor.
3162  */                                              2006  */
3163 void __init boot_cpu_init(void)                  2007 void __init boot_cpu_init(void)
3164 {                                                2008 {
3165         int cpu = smp_processor_id();            2009         int cpu = smp_processor_id();
3166                                                  2010 
3167         /* Mark the boot cpu "present", "onli    2011         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
3168         set_cpu_online(cpu, true);               2012         set_cpu_online(cpu, true);
3169         set_cpu_active(cpu, true);               2013         set_cpu_active(cpu, true);
3170         set_cpu_present(cpu, true);              2014         set_cpu_present(cpu, true);
3171         set_cpu_possible(cpu, true);             2015         set_cpu_possible(cpu, true);
3172                                                  2016 
3173 #ifdef CONFIG_SMP                                2017 #ifdef CONFIG_SMP
3174         __boot_cpu_id = cpu;                     2018         __boot_cpu_id = cpu;
3175 #endif                                           2019 #endif
3176 }                                                2020 }
3177                                                  2021 
3178 /*                                               2022 /*
3179  * Must be called _AFTER_ setting up the per_    2023  * Must be called _AFTER_ setting up the per_cpu areas
3180  */                                              2024  */
3181 void __init boot_cpu_hotplug_init(void)       !! 2025 void __init boot_cpu_state_init(void)
3182 {                                                2026 {
3183 #ifdef CONFIG_SMP                             !! 2027         per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
3184         cpumask_set_cpu(smp_processor_id(), & << 
3185         atomic_set(this_cpu_ptr(&cpuhp_state. << 
3186 #endif                                        << 
3187         this_cpu_write(cpuhp_state.state, CPU << 
3188         this_cpu_write(cpuhp_state.target, CP << 
3189 }                                                2028 }
3190                                               << 
3191 #ifdef CONFIG_CPU_MITIGATIONS                 << 
3192 /*                                            << 
3193  * These are used for a global "mitigations=" << 
3194  * optional CPU mitigations.                  << 
3195  */                                           << 
3196 enum cpu_mitigations {                        << 
3197         CPU_MITIGATIONS_OFF,                  << 
3198         CPU_MITIGATIONS_AUTO,                 << 
3199         CPU_MITIGATIONS_AUTO_NOSMT,           << 
3200 };                                            << 
3201                                               << 
3202 static enum cpu_mitigations cpu_mitigations _ << 
3203                                               << 
3204 static int __init mitigations_parse_cmdline(c << 
3205 {                                             << 
3206         if (!strcmp(arg, "off"))              << 
3207                 cpu_mitigations = CPU_MITIGAT << 
3208         else if (!strcmp(arg, "auto"))        << 
3209                 cpu_mitigations = CPU_MITIGAT << 
3210         else if (!strcmp(arg, "auto,nosmt"))  << 
3211                 cpu_mitigations = CPU_MITIGAT << 
3212         else                                  << 
3213                 pr_crit("Unsupported mitigati << 
3214                         arg);                 << 
3215                                               << 
3216         return 0;                             << 
3217 }                                             << 
3218                                               << 
3219 /* mitigations=off */                         << 
3220 bool cpu_mitigations_off(void)                << 
3221 {                                             << 
3222         return cpu_mitigations == CPU_MITIGAT << 
3223 }                                             << 
3224 EXPORT_SYMBOL_GPL(cpu_mitigations_off);       << 
3225                                               << 
3226 /* mitigations=auto,nosmt */                  << 
3227 bool cpu_mitigations_auto_nosmt(void)         << 
3228 {                                             << 
3229         return cpu_mitigations == CPU_MITIGAT << 
3230 }                                             << 
3231 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt) << 
3232 #else                                         << 
3233 static int __init mitigations_parse_cmdline(c << 
3234 {                                             << 
3235         pr_crit("Kernel compiled without miti << 
3236         return 0;                             << 
3237 }                                             << 
3238 #endif                                        << 
3239 early_param("mitigations", mitigations_parse_ << 
3240                                                  2029 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php