~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/stop_machine.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/stop_machine.c (Version linux-6.11.5) and /kernel/stop_machine.c (Version ccs-tools-1.8.9)


  1 // SPDX-License-Identifier: GPL-2.0-or-later        1 
  2 /*                                                
  3  * kernel/stop_machine.c                          
  4  *                                                
  5  * Copyright (C) 2008, 2005     IBM Corporatio    
  6  * Copyright (C) 2008, 2005     Rusty Russell     
  7  * Copyright (C) 2010           SUSE Linux Pro    
  8  * Copyright (C) 2010           Tejun Heo <tj@    
  9  */                                               
 10 #include <linux/compiler.h>                       
 11 #include <linux/completion.h>                     
 12 #include <linux/cpu.h>                            
 13 #include <linux/init.h>                           
 14 #include <linux/kthread.h>                        
 15 #include <linux/export.h>                         
 16 #include <linux/percpu.h>                         
 17 #include <linux/sched.h>                          
 18 #include <linux/stop_machine.h>                   
 19 #include <linux/interrupt.h>                      
 20 #include <linux/kallsyms.h>                       
 21 #include <linux/smpboot.h>                        
 22 #include <linux/atomic.h>                         
 23 #include <linux/nmi.h>                            
 24 #include <linux/sched/wake_q.h>                   
 25                                                   
 26 /*                                                
 27  * Structure to determine completion condition    
 28  * be shared by works on different cpus.          
 29  */                                               
 30 struct cpu_stop_done {                            
 31         atomic_t                nr_todo;          
 32         int                     ret;              
 33         struct completion       completion;       
 34 };                                                
 35                                                   
 36 /* the actual stopper, one per every possible     
 37 struct cpu_stopper {                              
 38         struct task_struct      *thread;          
 39                                                   
 40         raw_spinlock_t          lock;             
 41         bool                    enabled;          
 42         struct list_head        works;            
 43                                                   
 44         struct cpu_stop_work    stop_work;        
 45         unsigned long           caller;           
 46         cpu_stop_fn_t           fn;               
 47 };                                                
 48                                                   
 49 static DEFINE_PER_CPU(struct cpu_stopper, cpu_    
 50 static bool stop_machine_initialized = false;     
 51                                                   
 52 void print_stop_info(const char *log_lvl, stru    
 53 {                                                 
 54         /*                                        
 55          * If @task is a stopper task, it cann    
 56          * stable.                                
 57          */                                       
 58         struct cpu_stopper *stopper = per_cpu_    
 59                                                   
 60         if (task != stopper->thread)              
 61                 return;                           
 62                                                   
 63         printk("%sStopper: %pS <- %pS\n", log_    
 64 }                                                 
 65                                                   
 66 /* static data for stop_cpus */                   
 67 static DEFINE_MUTEX(stop_cpus_mutex);             
 68 static bool stop_cpus_in_progress;                
 69                                                   
 70 static void cpu_stop_init_done(struct cpu_stop    
 71 {                                                 
 72         memset(done, 0, sizeof(*done));           
 73         atomic_set(&done->nr_todo, nr_todo);      
 74         init_completion(&done->completion);       
 75 }                                                 
 76                                                   
 77 /* signal completion unless @done is NULL */      
 78 static void cpu_stop_signal_done(struct cpu_st    
 79 {                                                 
 80         if (atomic_dec_and_test(&done->nr_todo    
 81                 complete(&done->completion);      
 82 }                                                 
 83                                                   
 84 static void __cpu_stop_queue_work(struct cpu_s    
 85                                         struct    
 86                                         struct    
 87 {                                                 
 88         list_add_tail(&work->list, &stopper->w    
 89         wake_q_add(wakeq, stopper->thread);       
 90 }                                                 
 91                                                   
 92 /* queue @work to @stopper.  if offline, @work    
 93 static bool cpu_stop_queue_work(unsigned int c    
 94 {                                                 
 95         struct cpu_stopper *stopper = &per_cpu    
 96         DEFINE_WAKE_Q(wakeq);                     
 97         unsigned long flags;                      
 98         bool enabled;                             
 99                                                   
100         preempt_disable();                        
101         raw_spin_lock_irqsave(&stopper->lock,     
102         enabled = stopper->enabled;               
103         if (enabled)                              
104                 __cpu_stop_queue_work(stopper,    
105         else if (work->done)                      
106                 cpu_stop_signal_done(work->don    
107         raw_spin_unlock_irqrestore(&stopper->l    
108                                                   
109         wake_up_q(&wakeq);                        
110         preempt_enable();                         
111                                                   
112         return enabled;                           
113 }                                                 
114                                                   
115 /**                                               
116  * stop_one_cpu - stop a cpu                      
117  * @cpu: cpu to stop                              
118  * @fn: function to execute                       
119  * @arg: argument to @fn                          
120  *                                                
121  * Execute @fn(@arg) on @cpu.  @fn is run in a    
122  * the highest priority preempting any task on    
123  * monopolizing it.  This function returns aft    
124  * complete.                                      
125  *                                                
126  * This function doesn't guarantee @cpu stays     
127  * completes.  If @cpu goes down in the middle    
128  * partially or fully on different cpus.  @fn     
129  * for that or the caller should ensure that @    
130  * this function completes.                       
131  *                                                
132  * CONTEXT:                                       
133  * Might sleep.                                   
134  *                                                
135  * RETURNS:                                       
136  * -ENOENT if @fn(@arg) was not executed becau    
137  * otherwise, the return value of @fn.            
138  */                                               
139 int stop_one_cpu(unsigned int cpu, cpu_stop_fn    
140 {                                                 
141         struct cpu_stop_done done;                
142         struct cpu_stop_work work = { .fn = fn    
143                                                   
144         cpu_stop_init_done(&done, 1);             
145         if (!cpu_stop_queue_work(cpu, &work))     
146                 return -ENOENT;                   
147         /*                                        
148          * In case @cpu == smp_proccessor_id()    
149          * cycle by doing a preemption:           
150          */                                       
151         cond_resched();                           
152         wait_for_completion(&done.completion);    
153         return done.ret;                          
154 }                                                 
155                                                   
156 /* This controls the threads on each CPU. */      
157 enum multi_stop_state {                           
158         /* Dummy starting state for thread. */    
159         MULTI_STOP_NONE,                          
160         /* Awaiting everyone to be scheduled.     
161         MULTI_STOP_PREPARE,                       
162         /* Disable interrupts. */                 
163         MULTI_STOP_DISABLE_IRQ,                   
164         /* Run the function */                    
165         MULTI_STOP_RUN,                           
166         /* Exit */                                
167         MULTI_STOP_EXIT,                          
168 };                                                
169                                                   
170 struct multi_stop_data {                          
171         cpu_stop_fn_t           fn;               
172         void                    *data;            
173         /* Like num_online_cpus(), but hotplug    
174         unsigned int            num_threads;      
175         const struct cpumask    *active_cpus;     
176                                                   
177         enum multi_stop_state   state;            
178         atomic_t                thread_ack;       
179 };                                                
180                                                   
181 static void set_state(struct multi_stop_data *    
182                       enum multi_stop_state ne    
183 {                                                 
184         /* Reset ack counter. */                  
185         atomic_set(&msdata->thread_ack, msdata    
186         smp_wmb();                                
187         WRITE_ONCE(msdata->state, newstate);      
188 }                                                 
189                                                   
190 /* Last one to ack a state moves to the next s    
191 static void ack_state(struct multi_stop_data *    
192 {                                                 
193         if (atomic_dec_and_test(&msdata->threa    
194                 set_state(msdata, msdata->stat    
195 }                                                 
196                                                   
197 notrace void __weak stop_machine_yield(const s    
198 {                                                 
199         cpu_relax();                              
200 }                                                 
201                                                   
202 /* This is the cpu_stop function which stops t    
203 static int multi_cpu_stop(void *data)             
204 {                                                 
205         struct multi_stop_data *msdata = data;    
206         enum multi_stop_state newstate, cursta    
207         int cpu = smp_processor_id(), err = 0;    
208         const struct cpumask *cpumask;            
209         unsigned long flags;                      
210         bool is_active;                           
211                                                   
212         /*                                        
213          * When called from stop_machine_from_    
214          * already be disabled.  Save the stat    
215          */                                       
216         local_save_flags(flags);                  
217                                                   
218         if (!msdata->active_cpus) {               
219                 cpumask = cpu_online_mask;        
220                 is_active = cpu == cpumask_fir    
221         } else {                                  
222                 cpumask = msdata->active_cpus;    
223                 is_active = cpumask_test_cpu(c    
224         }                                         
225                                                   
226         /* Simple state machine */                
227         do {                                      
228                 /* Chill out and ensure we re-    
229                 stop_machine_yield(cpumask);      
230                 newstate = READ_ONCE(msdata->s    
231                 if (newstate != curstate) {       
232                         curstate = newstate;      
233                         switch (curstate) {       
234                         case MULTI_STOP_DISABL    
235                                 local_irq_disa    
236                                 hard_irq_disab    
237                                 break;            
238                         case MULTI_STOP_RUN:      
239                                 if (is_active)    
240                                         err =     
241                                 break;            
242                         default:                  
243                                 break;            
244                         }                         
245                         ack_state(msdata);        
246                 } else if (curstate > MULTI_ST    
247                         /*                        
248                          * At this stage all o    
249                          * in the same loop. A    
250                          * be detected and rep    
251                          */                       
252                         touch_nmi_watchdog();     
253                 }                                 
254                 rcu_momentary_dyntick_idle();     
255         } while (curstate != MULTI_STOP_EXIT);    
256                                                   
257         local_irq_restore(flags);                 
258         return err;                               
259 }                                                 
260                                                   
261 static int cpu_stop_queue_two_works(int cpu1,     
262                                     int cpu2,     
263 {                                                 
264         struct cpu_stopper *stopper1 = per_cpu    
265         struct cpu_stopper *stopper2 = per_cpu    
266         DEFINE_WAKE_Q(wakeq);                     
267         int err;                                  
268                                                   
269 retry:                                            
270         /*                                        
271          * The waking up of stopper threads ha    
272          * scheduling context as the queueing.    
273          * possibility of one of the above sto    
274          * CPU, and preempting us. This will c    
275          * stopper forever.                       
276          */                                       
277         preempt_disable();                        
278         raw_spin_lock_irq(&stopper1->lock);       
279         raw_spin_lock_nested(&stopper2->lock,     
280                                                   
281         if (!stopper1->enabled || !stopper2->e    
282                 err = -ENOENT;                    
283                 goto unlock;                      
284         }                                         
285                                                   
286         /*                                        
287          * Ensure that if we race with __stop_    
288          * queued up in reverse order leading     
289          *                                        
290          * We can't miss stop_cpus_in_progress    
291          * queued a work on cpu1 but not on cp    
292          *                                        
293          * It can be falsely true but it is sa    
294          * queue_stop_cpus_work() does everyth    
295          */                                       
296         if (unlikely(stop_cpus_in_progress)) {    
297                 err = -EDEADLK;                   
298                 goto unlock;                      
299         }                                         
300                                                   
301         err = 0;                                  
302         __cpu_stop_queue_work(stopper1, work1,    
303         __cpu_stop_queue_work(stopper2, work2,    
304                                                   
305 unlock:                                           
306         raw_spin_unlock(&stopper2->lock);         
307         raw_spin_unlock_irq(&stopper1->lock);     
308                                                   
309         if (unlikely(err == -EDEADLK)) {          
310                 preempt_enable();                 
311                                                   
312                 while (stop_cpus_in_progress)     
313                         cpu_relax();              
314                                                   
315                 goto retry;                       
316         }                                         
317                                                   
318         wake_up_q(&wakeq);                        
319         preempt_enable();                         
320                                                   
321         return err;                               
322 }                                                 
323 /**                                               
324  * stop_two_cpus - stops two cpus                 
325  * @cpu1: the cpu to stop                         
326  * @cpu2: the other cpu to stop                   
327  * @fn: function to execute                       
328  * @arg: argument to @fn                          
329  *                                                
330  * Stops both the current and specified CPU an    
331  *                                                
332  * returns when both are completed.               
333  */                                               
334 int stop_two_cpus(unsigned int cpu1, unsigned     
335 {                                                 
336         struct cpu_stop_done done;                
337         struct cpu_stop_work work1, work2;        
338         struct multi_stop_data msdata;            
339                                                   
340         msdata = (struct multi_stop_data){        
341                 .fn = fn,                         
342                 .data = arg,                      
343                 .num_threads = 2,                 
344                 .active_cpus = cpumask_of(cpu1    
345         };                                        
346                                                   
347         work1 = work2 = (struct cpu_stop_work)    
348                 .fn = multi_cpu_stop,             
349                 .arg = &msdata,                   
350                 .done = &done,                    
351                 .caller = _RET_IP_,               
352         };                                        
353                                                   
354         cpu_stop_init_done(&done, 2);             
355         set_state(&msdata, MULTI_STOP_PREPARE)    
356                                                   
357         if (cpu1 > cpu2)                          
358                 swap(cpu1, cpu2);                 
359         if (cpu_stop_queue_two_works(cpu1, &wo    
360                 return -ENOENT;                   
361                                                   
362         wait_for_completion(&done.completion);    
363         return done.ret;                          
364 }                                                 
365                                                   
366 /**                                               
367  * stop_one_cpu_nowait - stop a cpu but don't     
368  * @cpu: cpu to stop                              
369  * @fn: function to execute                       
370  * @arg: argument to @fn                          
371  * @work_buf: pointer to cpu_stop_work structu    
372  *                                                
373  * Similar to stop_one_cpu() but doesn't wait     
374  * caller is responsible for ensuring @work_bu    
375  * and will remain untouched until stopper sta    
376  *                                                
377  * CONTEXT:                                       
378  * Don't care.                                    
379  *                                                
380  * RETURNS:                                       
381  * true if cpu_stop_work was queued successful    
382  * false otherwise.                               
383  */                                               
384 bool stop_one_cpu_nowait(unsigned int cpu, cpu    
385                         struct cpu_stop_work *    
386 {                                                 
387         *work_buf = (struct cpu_stop_work){ .f    
388         return cpu_stop_queue_work(cpu, work_b    
389 }                                                 
390                                                   
391 static bool queue_stop_cpus_work(const struct     
392                                  cpu_stop_fn_t    
393                                  struct cpu_st    
394 {                                                 
395         struct cpu_stop_work *work;               
396         unsigned int cpu;                         
397         bool queued = false;                      
398                                                   
399         /*                                        
400          * Disable preemption while queueing t    
401          * preempted by a stopper which might     
402          * to enter @fn which can lead to dead    
403          */                                       
404         preempt_disable();                        
405         stop_cpus_in_progress = true;             
406         barrier();                                
407         for_each_cpu(cpu, cpumask) {              
408                 work = &per_cpu(cpu_stopper.st    
409                 work->fn = fn;                    
410                 work->arg = arg;                  
411                 work->done = done;                
412                 work->caller = _RET_IP_;          
413                 if (cpu_stop_queue_work(cpu, w    
414                         queued = true;            
415         }                                         
416         barrier();                                
417         stop_cpus_in_progress = false;            
418         preempt_enable();                         
419                                                   
420         return queued;                            
421 }                                                 
422                                                   
423 static int __stop_cpus(const struct cpumask *c    
424                        cpu_stop_fn_t fn, void     
425 {                                                 
426         struct cpu_stop_done done;                
427                                                   
428         cpu_stop_init_done(&done, cpumask_weig    
429         if (!queue_stop_cpus_work(cpumask, fn,    
430                 return -ENOENT;                   
431         wait_for_completion(&done.completion);    
432         return done.ret;                          
433 }                                                 
434                                                   
435 /**                                               
436  * stop_cpus - stop multiple cpus                 
437  * @cpumask: cpus to stop                         
438  * @fn: function to execute                       
439  * @arg: argument to @fn                          
440  *                                                
441  * Execute @fn(@arg) on online cpus in @cpumas    
442  * @fn is run in a process context with the hi    
443  * preempting any task on the cpu and monopoli    
444  * returns after all executions are complete.     
445  *                                                
446  * This function doesn't guarantee the cpus in    
447  * till @fn completes.  If some cpus go down i    
448  * on the cpu may happen partially or fully on    
449  * should either be ready for that or the call    
450  * the cpus stay online until this function co    
451  *                                                
452  * All stop_cpus() calls are serialized making    
453  * for all cpus to start executing it.            
454  *                                                
455  * CONTEXT:                                       
456  * Might sleep.                                   
457  *                                                
458  * RETURNS:                                       
459  * -ENOENT if @fn(@arg) was not executed at al    
460  * @cpumask were offline; otherwise, 0 if all     
461  * returned 0, any non zero return value if an    
462  */                                               
463 static int stop_cpus(const struct cpumask *cpu    
464 {                                                 
465         int ret;                                  
466                                                   
467         /* static works are used, process one     
468         mutex_lock(&stop_cpus_mutex);             
469         ret = __stop_cpus(cpumask, fn, arg);      
470         mutex_unlock(&stop_cpus_mutex);           
471         return ret;                               
472 }                                                 
473                                                   
474 static int cpu_stop_should_run(unsigned int cp    
475 {                                                 
476         struct cpu_stopper *stopper = &per_cpu    
477         unsigned long flags;                      
478         int run;                                  
479                                                   
480         raw_spin_lock_irqsave(&stopper->lock,     
481         run = !list_empty(&stopper->works);       
482         raw_spin_unlock_irqrestore(&stopper->l    
483         return run;                               
484 }                                                 
485                                                   
486 static void cpu_stopper_thread(unsigned int cp    
487 {                                                 
488         struct cpu_stopper *stopper = &per_cpu    
489         struct cpu_stop_work *work;               
490                                                   
491 repeat:                                           
492         work = NULL;                              
493         raw_spin_lock_irq(&stopper->lock);        
494         if (!list_empty(&stopper->works)) {       
495                 work = list_first_entry(&stopp    
496                                         struct    
497                 list_del_init(&work->list);       
498         }                                         
499         raw_spin_unlock_irq(&stopper->lock);      
500                                                   
501         if (work) {                               
502                 cpu_stop_fn_t fn = work->fn;      
503                 void *arg = work->arg;            
504                 struct cpu_stop_done *done = w    
505                 int ret;                          
506                                                   
507                 /* cpu stop callbacks must not    
508                 stopper->caller = work->caller    
509                 stopper->fn = fn;                 
510                 preempt_count_inc();              
511                 ret = fn(arg);                    
512                 if (done) {                       
513                         if (ret)                  
514                                 done->ret = re    
515                         cpu_stop_signal_done(d    
516                 }                                 
517                 preempt_count_dec();              
518                 stopper->fn = NULL;               
519                 stopper->caller = 0;              
520                 WARN_ONCE(preempt_count(),        
521                           "cpu_stop: %ps(%p) l    
522                 goto repeat;                      
523         }                                         
524 }                                                 
525                                                   
526 void stop_machine_park(int cpu)                   
527 {                                                 
528         struct cpu_stopper *stopper = &per_cpu    
529         /*                                        
530          * Lockless. cpu_stopper_thread() will    
531          * the pending works before it parks,     
532          * the new works.                         
533          */                                       
534         stopper->enabled = false;                 
535         kthread_park(stopper->thread);            
536 }                                                 
537                                                   
538 static void cpu_stop_create(unsigned int cpu)     
539 {                                                 
540         sched_set_stop_task(cpu, per_cpu(cpu_s    
541 }                                                 
542                                                   
543 static void cpu_stop_park(unsigned int cpu)       
544 {                                                 
545         struct cpu_stopper *stopper = &per_cpu    
546                                                   
547         WARN_ON(!list_empty(&stopper->works));    
548 }                                                 
549                                                   
550 void stop_machine_unpark(int cpu)                 
551 {                                                 
552         struct cpu_stopper *stopper = &per_cpu    
553                                                   
554         stopper->enabled = true;                  
555         kthread_unpark(stopper->thread);          
556 }                                                 
557                                                   
558 static struct smp_hotplug_thread cpu_stop_thre    
559         .store                  = &cpu_stopper    
560         .thread_should_run      = cpu_stop_sho    
561         .thread_fn              = cpu_stopper_    
562         .thread_comm            = "migration/%    
563         .create                 = cpu_stop_cre    
564         .park                   = cpu_stop_par    
565         .selfparking            = true,           
566 };                                                
567                                                   
568 static int __init cpu_stop_init(void)             
569 {                                                 
570         unsigned int cpu;                         
571                                                   
572         for_each_possible_cpu(cpu) {              
573                 struct cpu_stopper *stopper =     
574                                                   
575                 raw_spin_lock_init(&stopper->l    
576                 INIT_LIST_HEAD(&stopper->works    
577         }                                         
578                                                   
579         BUG_ON(smpboot_register_percpu_thread(    
580         stop_machine_unpark(raw_smp_processor_    
581         stop_machine_initialized = true;          
582         return 0;                                 
583 }                                                 
584 early_initcall(cpu_stop_init);                    
585                                                   
586 int stop_machine_cpuslocked(cpu_stop_fn_t fn,     
587                             const struct cpuma    
588 {                                                 
589         struct multi_stop_data msdata = {         
590                 .fn = fn,                         
591                 .data = data,                     
592                 .num_threads = num_online_cpus    
593                 .active_cpus = cpus,              
594         };                                        
595                                                   
596         lockdep_assert_cpus_held();               
597                                                   
598         if (!stop_machine_initialized) {          
599                 /*                                
600                  * Handle the case where stop_    
601                  * early in boot before stop_m    
602                  * initialized.                   
603                  */                               
604                 unsigned long flags;              
605                 int ret;                          
606                                                   
607                 WARN_ON_ONCE(msdata.num_thread    
608                                                   
609                 local_irq_save(flags);            
610                 hard_irq_disable();               
611                 ret = (*fn)(data);                
612                 local_irq_restore(flags);         
613                                                   
614                 return ret;                       
615         }                                         
616                                                   
617         /* Set the initial state and stop all     
618         set_state(&msdata, MULTI_STOP_PREPARE)    
619         return stop_cpus(cpu_online_mask, mult    
620 }                                                 
621                                                   
622 int stop_machine(cpu_stop_fn_t fn, void *data,    
623 {                                                 
624         int ret;                                  
625                                                   
626         /* No CPUs can come up or down during     
627         cpus_read_lock();                         
628         ret = stop_machine_cpuslocked(fn, data    
629         cpus_read_unlock();                       
630         return ret;                               
631 }                                                 
632 EXPORT_SYMBOL_GPL(stop_machine);                  
633                                                   
634 #ifdef CONFIG_SCHED_SMT                           
635 int stop_core_cpuslocked(unsigned int cpu, cpu    
636 {                                                 
637         const struct cpumask *smt_mask = cpu_s    
638                                                   
639         struct multi_stop_data msdata = {         
640                 .fn = fn,                         
641                 .data = data,                     
642                 .num_threads = cpumask_weight(    
643                 .active_cpus = smt_mask,          
644         };                                        
645                                                   
646         lockdep_assert_cpus_held();               
647                                                   
648         /* Set the initial state and stop all     
649         set_state(&msdata, MULTI_STOP_PREPARE)    
650         return stop_cpus(smt_mask, multi_cpu_s    
651 }                                                 
652 EXPORT_SYMBOL_GPL(stop_core_cpuslocked);          
653 #endif                                            
654                                                   
655 /**                                               
656  * stop_machine_from_inactive_cpu - stop_machi    
657  * @fn: the function to run                       
658  * @data: the data ptr for the @fn()              
659  * @cpus: the cpus to run the @fn() on (NULL =    
660  *                                                
661  * This is identical to stop_machine() but can    
662  * is not active.  The local CPU is in the pro    
663  * CPU hotplug can start) and not marked activ    
664  * context to sleep.                              
665  *                                                
666  * This function provides stop_machine() funct    
667  * using busy-wait for synchronization and exe    
668  * CPU.                                           
669  *                                                
670  * CONTEXT:                                       
671  * Local CPU is inactive.  Temporarily stops a    
672  *                                                
673  * RETURNS:                                       
674  * 0 if all executions of @fn returned 0, any     
675  * returned non zero.                             
676  */                                               
677 int stop_machine_from_inactive_cpu(cpu_stop_fn    
678                                   const struct    
679 {                                                 
680         struct multi_stop_data msdata = { .fn     
681                                             .a    
682         struct cpu_stop_done done;                
683         int ret;                                  
684                                                   
685         /* Local CPU must be inactive and CPU     
686         BUG_ON(cpu_active(raw_smp_processor_id    
687         msdata.num_threads = num_active_cpus()    
688                                                   
689         /* No proper task established and can'    
690         while (!mutex_trylock(&stop_cpus_mutex    
691                 cpu_relax();                      
692                                                   
693         /* Schedule work on other CPUs and exe    
694         set_state(&msdata, MULTI_STOP_PREPARE)    
695         cpu_stop_init_done(&done, num_active_c    
696         queue_stop_cpus_work(cpu_active_mask,     
697                              &done);              
698         ret = multi_cpu_stop(&msdata);            
699                                                   
700         /* Busy wait for completion. */           
701         while (!completion_done(&done.completi    
702                 cpu_relax();                      
703                                                   
704         mutex_unlock(&stop_cpus_mutex);           
705         return ret ?: done.ret;                   
706 }                                                 
707                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php