~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/kernel/smp.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/sh/kernel/smp.c (Architecture alpha) and /arch/sparc/kernel/smp.c (Architecture sparc)


  1 // SPDX-License-Identifier: GPL-2.0                 1 
  2 /*                                                
  3  * arch/sh/kernel/smp.c                           
  4  *                                                
  5  * SMP support for the SuperH processors.         
  6  *                                                
  7  * Copyright (C) 2002 - 2010 Paul Mundt           
  8  * Copyright (C) 2006 - 2007 Akio Idehara         
  9  */                                               
 10 #include <linux/err.h>                            
 11 #include <linux/cache.h>                          
 12 #include <linux/cpumask.h>                        
 13 #include <linux/delay.h>                          
 14 #include <linux/init.h>                           
 15 #include <linux/spinlock.h>                       
 16 #include <linux/mm.h>                             
 17 #include <linux/module.h>                         
 18 #include <linux/cpu.h>                            
 19 #include <linux/interrupt.h>                      
 20 #include <linux/sched/mm.h>                       
 21 #include <linux/sched/hotplug.h>                  
 22 #include <linux/atomic.h>                         
 23 #include <linux/clockchips.h>                     
 24 #include <linux/profile.h>                        
 25                                                   
 26 #include <asm/processor.h>                        
 27 #include <asm/mmu_context.h>                      
 28 #include <asm/smp.h>                              
 29 #include <asm/cacheflush.h>                       
 30 #include <asm/sections.h>                         
 31 #include <asm/setup.h>                            
 32                                                   
 33 int __cpu_number_map[NR_CPUS];          /* Map    
 34 int __cpu_logical_map[NR_CPUS];         /* Map    
 35                                                   
 36 struct plat_smp_ops *mp_ops = NULL;               
 37                                                   
 38 /* State of each CPU */                           
 39 DEFINE_PER_CPU(int, cpu_state) = { 0 };           
 40                                                   
 41 void register_smp_ops(struct plat_smp_ops *ops    
 42 {                                                 
 43         if (mp_ops)                               
 44                 printk(KERN_WARNING "Overridin    
 45                                                   
 46         mp_ops = ops;                             
 47 }                                                 
 48                                                   
 49 static inline void smp_store_cpu_info(unsigned    
 50 {                                                 
 51         struct sh_cpuinfo *c = cpu_data + cpu;    
 52                                                   
 53         memcpy(c, &boot_cpu_data, sizeof(struc    
 54                                                   
 55         c->loops_per_jiffy = loops_per_jiffy;     
 56 }                                                 
 57                                                   
 58 void __init smp_prepare_cpus(unsigned int max_    
 59 {                                                 
 60         unsigned int cpu = smp_processor_id();    
 61                                                   
 62         init_new_context(current, &init_mm);      
 63         current_thread_info()->cpu = cpu;         
 64         mp_ops->prepare_cpus(max_cpus);           
 65                                                   
 66 #ifndef CONFIG_HOTPLUG_CPU                        
 67         init_cpu_present(cpu_possible_mask);      
 68 #endif                                            
 69 }                                                 
 70                                                   
 71 void __init smp_prepare_boot_cpu(void)            
 72 {                                                 
 73         unsigned int cpu = smp_processor_id();    
 74                                                   
 75         __cpu_number_map[0] = cpu;                
 76         __cpu_logical_map[0] = cpu;               
 77                                                   
 78         set_cpu_online(cpu, true);                
 79         set_cpu_possible(cpu, true);              
 80                                                   
 81         per_cpu(cpu_state, cpu) = CPU_ONLINE;     
 82 }                                                 
 83                                                   
 84 #ifdef CONFIG_HOTPLUG_CPU                         
 85 void native_cpu_die(unsigned int cpu)             
 86 {                                                 
 87         unsigned int i;                           
 88                                                   
 89         for (i = 0; i < 10; i++) {                
 90                 smp_rmb();                        
 91                 if (per_cpu(cpu_state, cpu) ==    
 92                         if (system_state == SY    
 93                                 pr_info("CPU %    
 94                                                   
 95                         return;                   
 96                 }                                 
 97                                                   
 98                 msleep(100);                      
 99         }                                         
100                                                   
101         pr_err("CPU %u didn't die...\n", cpu);    
102 }                                                 
103                                                   
104 int native_cpu_disable(unsigned int cpu)          
105 {                                                 
106         return cpu == 0 ? -EPERM : 0;             
107 }                                                 
108                                                   
109 void play_dead_common(void)                       
110 {                                                 
111         idle_task_exit();                         
112         irq_ctx_exit(raw_smp_processor_id());     
113         mb();                                     
114                                                   
115         __this_cpu_write(cpu_state, CPU_DEAD);    
116         local_irq_disable();                      
117 }                                                 
118                                                   
119 void native_play_dead(void)                       
120 {                                                 
121         play_dead_common();                       
122 }                                                 
123                                                   
124 int __cpu_disable(void)                           
125 {                                                 
126         unsigned int cpu = smp_processor_id();    
127         int ret;                                  
128                                                   
129         ret = mp_ops->cpu_disable(cpu);           
130         if (ret)                                  
131                 return ret;                       
132                                                   
133         /*                                        
134          * Take this CPU offline.  Once we cle    
135          * and we must not schedule until we'r    
136          */                                       
137         set_cpu_online(cpu, false);               
138                                                   
139         /*                                        
140          * OK - migrate IRQs away from this CP    
141          */                                       
142         migrate_irqs();                           
143                                                   
144         /*                                        
145          * Flush user cache and TLB mappings,     
146          * from the vm mask set of all process    
147          */                                       
148         flush_cache_all();                        
149 #ifdef CONFIG_MMU                                 
150         local_flush_tlb_all();                    
151 #endif                                            
152                                                   
153         clear_tasks_mm_cpumask(cpu);              
154                                                   
155         return 0;                                 
156 }                                                 
157 #else /* ... !CONFIG_HOTPLUG_CPU */               
158 int native_cpu_disable(unsigned int cpu)          
159 {                                                 
160         return -ENOSYS;                           
161 }                                                 
162                                                   
163 void native_cpu_die(unsigned int cpu)             
164 {                                                 
165         /* We said "no" in __cpu_disable */       
166         BUG();                                    
167 }                                                 
168                                                   
169 void native_play_dead(void)                       
170 {                                                 
171         BUG();                                    
172 }                                                 
173 #endif                                            
174                                                   
175 static asmlinkage void start_secondary(void)      
176 {                                                 
177         unsigned int cpu = smp_processor_id();    
178         struct mm_struct *mm = &init_mm;          
179                                                   
180         enable_mmu();                             
181         mmgrab(mm);                               
182         mmget(mm);                                
183         current->active_mm = mm;                  
184 #ifdef CONFIG_MMU                                 
185         enter_lazy_tlb(mm, current);              
186         local_flush_tlb_all();                    
187 #endif                                            
188                                                   
189         per_cpu_trap_init();                      
190                                                   
191         notify_cpu_starting(cpu);                 
192                                                   
193         local_irq_enable();                       
194                                                   
195         calibrate_delay();                        
196                                                   
197         smp_store_cpu_info(cpu);                  
198                                                   
199         set_cpu_online(cpu, true);                
200         per_cpu(cpu_state, cpu) = CPU_ONLINE;     
201                                                   
202         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE    
203 }                                                 
204                                                   
205 extern struct {                                   
206         unsigned long sp;                         
207         unsigned long bss_start;                  
208         unsigned long bss_end;                    
209         void *start_kernel_fn;                    
210         void *cpu_init_fn;                        
211         void *thread_info;                        
212 } stack_start;                                    
213                                                   
214 int __cpu_up(unsigned int cpu, struct task_str    
215 {                                                 
216         unsigned long timeout;                    
217                                                   
218         per_cpu(cpu_state, cpu) = CPU_UP_PREPA    
219                                                   
220         /* Fill in data in head.S for secondar    
221         stack_start.sp = tsk->thread.sp;          
222         stack_start.thread_info = tsk->stack;     
223         stack_start.bss_start = 0; /* don't cl    
224         stack_start.start_kernel_fn = start_se    
225                                                   
226         flush_icache_range((unsigned long)&sta    
227                            (unsigned long)&sta    
228         wmb();                                    
229                                                   
230         mp_ops->start_cpu(cpu, (unsigned long)    
231                                                   
232         timeout = jiffies + HZ;                   
233         while (time_before(jiffies, timeout))     
234                 if (cpu_online(cpu))              
235                         break;                    
236                                                   
237                 udelay(10);                       
238                 barrier();                        
239         }                                         
240                                                   
241         if (cpu_online(cpu))                      
242                 return 0;                         
243                                                   
244         return -ENOENT;                           
245 }                                                 
246                                                   
247 void __init smp_cpus_done(unsigned int max_cpu    
248 {                                                 
249         unsigned long bogosum = 0;                
250         int cpu;                                  
251                                                   
252         for_each_online_cpu(cpu)                  
253                 bogosum += cpu_data[cpu].loops    
254                                                   
255         printk(KERN_INFO "SMP: Total of %d pro    
256                "(%lu.%02lu BogoMIPS).\n", num_    
257                bogosum / (500000/HZ),             
258                (bogosum / (5000/HZ)) % 100);      
259 }                                                 
260                                                   
261 void arch_smp_send_reschedule(int cpu)            
262 {                                                 
263         mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDU    
264 }                                                 
265                                                   
266 void smp_send_stop(void)                          
267 {                                                 
268         smp_call_function(stop_this_cpu, 0, 0)    
269 }                                                 
270                                                   
271 void arch_send_call_function_ipi_mask(const st    
272 {                                                 
273         int cpu;                                  
274                                                   
275         for_each_cpu(cpu, mask)                   
276                 mp_ops->send_ipi(cpu, SMP_MSG_    
277 }                                                 
278                                                   
279 void arch_send_call_function_single_ipi(int cp    
280 {                                                 
281         mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION    
282 }                                                 
283                                                   
284 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST       
285 void tick_broadcast(const struct cpumask *mask    
286 {                                                 
287         int cpu;                                  
288                                                   
289         for_each_cpu(cpu, mask)                   
290                 mp_ops->send_ipi(cpu, SMP_MSG_    
291 }                                                 
292                                                   
293 static void ipi_timer(void)                       
294 {                                                 
295         irq_enter();                              
296         tick_receive_broadcast();                 
297         irq_exit();                               
298 }                                                 
299 #endif                                            
300                                                   
301 void smp_message_recv(unsigned int msg)           
302 {                                                 
303         switch (msg) {                            
304         case SMP_MSG_FUNCTION:                    
305                 generic_smp_call_function_inte    
306                 break;                            
307         case SMP_MSG_RESCHEDULE:                  
308                 scheduler_ipi();                  
309                 break;                            
310         case SMP_MSG_FUNCTION_SINGLE:             
311                 generic_smp_call_function_sing    
312                 break;                            
313 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST       
314         case SMP_MSG_TIMER:                       
315                 ipi_timer();                      
316                 break;                            
317 #endif                                            
318         default:                                  
319                 printk(KERN_WARNING "SMP %d: %    
320                        smp_processor_id(), __f    
321                 break;                            
322         }                                         
323 }                                                 
324                                                   
325 #ifdef CONFIG_PROFILING                           
326 /* Not really SMP stuff ... */                    
327 int setup_profiling_timer(unsigned int multipl    
328 {                                                 
329         return 0;                                 
330 }                                                 
331 #endif                                            
332                                                   
333 #ifdef CONFIG_MMU                                 
334                                                   
335 static void flush_tlb_all_ipi(void *info)         
336 {                                                 
337         local_flush_tlb_all();                    
338 }                                                 
339                                                   
340 void flush_tlb_all(void)                          
341 {                                                 
342         on_each_cpu(flush_tlb_all_ipi, 0, 1);     
343 }                                                 
344                                                   
345 static void flush_tlb_mm_ipi(void *mm)            
346 {                                                 
347         local_flush_tlb_mm((struct mm_struct *    
348 }                                                 
349                                                   
350 /*                                                
351  * The following tlb flush calls are invoked w    
352  * being torn down, or pte attributes are chan    
353  * address spaces, a new context is obtained o    
354  * context on other cpus are invalidated to fo    
355  * at switch_mm time, should the mm ever be us    
356  * multithreaded address spaces, intercpu inte    
357  * Another case where intercpu interrupts are     
358  * mm might be active on another cpu (eg debug    
359  * behalf of debugees, kswapd stealing pages f    
360  * Kanoj 07/00.                                   
361  */                                               
362 void flush_tlb_mm(struct mm_struct *mm)           
363 {                                                 
364         preempt_disable();                        
365                                                   
366         if ((atomic_read(&mm->mm_users) != 1)     
367                 smp_call_function(flush_tlb_mm    
368         } else {                                  
369                 int i;                            
370                 for_each_online_cpu(i)            
371                         if (smp_processor_id()    
372                                 cpu_context(i,    
373         }                                         
374         local_flush_tlb_mm(mm);                   
375                                                   
376         preempt_enable();                         
377 }                                                 
378                                                   
379 struct flush_tlb_data {                           
380         struct vm_area_struct *vma;               
381         unsigned long addr1;                      
382         unsigned long addr2;                      
383 };                                                
384                                                   
385 static void flush_tlb_range_ipi(void *info)       
386 {                                                 
387         struct flush_tlb_data *fd = (struct fl    
388                                                   
389         local_flush_tlb_range(fd->vma, fd->add    
390 }                                                 
391                                                   
392 void flush_tlb_range(struct vm_area_struct *vm    
393                      unsigned long start, unsi    
394 {                                                 
395         struct mm_struct *mm = vma->vm_mm;        
396                                                   
397         preempt_disable();                        
398         if ((atomic_read(&mm->mm_users) != 1)     
399                 struct flush_tlb_data fd;         
400                                                   
401                 fd.vma = vma;                     
402                 fd.addr1 = start;                 
403                 fd.addr2 = end;                   
404                 smp_call_function(flush_tlb_ra    
405         } else {                                  
406                 int i;                            
407                 for_each_online_cpu(i)            
408                         if (smp_processor_id()    
409                                 cpu_context(i,    
410         }                                         
411         local_flush_tlb_range(vma, start, end)    
412         preempt_enable();                         
413 }                                                 
414                                                   
415 static void flush_tlb_kernel_range_ipi(void *i    
416 {                                                 
417         struct flush_tlb_data *fd = (struct fl    
418                                                   
419         local_flush_tlb_kernel_range(fd->addr1    
420 }                                                 
421                                                   
422 void flush_tlb_kernel_range(unsigned long star    
423 {                                                 
424         struct flush_tlb_data fd;                 
425                                                   
426         fd.addr1 = start;                         
427         fd.addr2 = end;                           
428         on_each_cpu(flush_tlb_kernel_range_ipi    
429 }                                                 
430                                                   
431 static void flush_tlb_page_ipi(void *info)        
432 {                                                 
433         struct flush_tlb_data *fd = (struct fl    
434                                                   
435         local_flush_tlb_page(fd->vma, fd->addr    
436 }                                                 
437                                                   
438 void flush_tlb_page(struct vm_area_struct *vma    
439 {                                                 
440         preempt_disable();                        
441         if ((atomic_read(&vma->vm_mm->mm_users    
442             (current->mm != vma->vm_mm)) {        
443                 struct flush_tlb_data fd;         
444                                                   
445                 fd.vma = vma;                     
446                 fd.addr1 = page;                  
447                 smp_call_function(flush_tlb_pa    
448         } else {                                  
449                 int i;                            
450                 for_each_online_cpu(i)            
451                         if (smp_processor_id()    
452                                 cpu_context(i,    
453         }                                         
454         local_flush_tlb_page(vma, page);          
455         preempt_enable();                         
456 }                                                 
457                                                   
458 static void flush_tlb_one_ipi(void *info)         
459 {                                                 
460         struct flush_tlb_data *fd = (struct fl    
461         local_flush_tlb_one(fd->addr1, fd->add    
462 }                                                 
463                                                   
464 void flush_tlb_one(unsigned long asid, unsigne    
465 {                                                 
466         struct flush_tlb_data fd;                 
467                                                   
468         fd.addr1 = asid;                          
469         fd.addr2 = vaddr;                         
470                                                   
471         smp_call_function(flush_tlb_one_ipi, (    
472         local_flush_tlb_one(asid, vaddr);         
473 }                                                 
474                                                   
475 #endif                                            
476                                                   

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php