~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/kernel/smp.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/sh/kernel/smp.c (Architecture alpha) and /arch/mips/kernel/smp.c (Architecture mips)


  1 // SPDX-License-Identifier: GPL-2.0            !!   1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*                                                  2 /*
  3  * arch/sh/kernel/smp.c                        << 
  4  *                                                  3  *
  5  * SMP support for the SuperH processors.      !!   4  * Copyright (C) 2000, 2001 Kanoj Sarcar
  6  *                                             !!   5  * Copyright (C) 2000, 2001 Ralf Baechle
  7  * Copyright (C) 2002 - 2010 Paul Mundt        !!   6  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  8  * Copyright (C) 2006 - 2007 Akio Idehara      !!   7  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  9  */                                                 8  */
 10 #include <linux/err.h>                         << 
 11 #include <linux/cache.h>                            9 #include <linux/cache.h>
 12 #include <linux/cpumask.h>                     << 
 13 #include <linux/delay.h>                           10 #include <linux/delay.h>
 14 #include <linux/init.h>                            11 #include <linux/init.h>
 15 #include <linux/spinlock.h>                    << 
 16 #include <linux/mm.h>                          << 
 17 #include <linux/module.h>                      << 
 18 #include <linux/cpu.h>                         << 
 19 #include <linux/interrupt.h>                       12 #include <linux/interrupt.h>
 20 #include <linux/sched/mm.h>                    << 
 21 #include <linux/sched/hotplug.h>               << 
 22 #include <linux/atomic.h>                      << 
 23 #include <linux/clockchips.h>                  << 
 24 #include <linux/profile.h>                         13 #include <linux/profile.h>
                                                   >>  14 #include <linux/smp.h>
                                                   >>  15 #include <linux/spinlock.h>
                                                   >>  16 #include <linux/threads.h>
                                                   >>  17 #include <linux/export.h>
                                                   >>  18 #include <linux/time.h>
                                                   >>  19 #include <linux/timex.h>
                                                   >>  20 #include <linux/sched/mm.h>
                                                   >>  21 #include <linux/cpumask.h>
                                                   >>  22 #include <linux/cpu.h>
                                                   >>  23 #include <linux/err.h>
                                                   >>  24 #include <linux/ftrace.h>
                                                   >>  25 #include <linux/irqdomain.h>
                                                   >>  26 #include <linux/of.h>
                                                   >>  27 #include <linux/of_irq.h>
 25                                                    28 
                                                   >>  29 #include <linux/atomic.h>
                                                   >>  30 #include <asm/cpu.h>
                                                   >>  31 #include <asm/ginvt.h>
 26 #include <asm/processor.h>                         32 #include <asm/processor.h>
                                                   >>  33 #include <asm/idle.h>
                                                   >>  34 #include <asm/r4k-timer.h>
                                                   >>  35 #include <asm/mips-cps.h>
 27 #include <asm/mmu_context.h>                       36 #include <asm/mmu_context.h>
 28 #include <asm/smp.h>                           !!  37 #include <asm/time.h>
 29 #include <asm/cacheflush.h>                    << 
 30 #include <asm/sections.h>                      << 
 31 #include <asm/setup.h>                             38 #include <asm/setup.h>
                                                   >>  39 #include <asm/maar.h>
                                                   >>  40 
                                                   >>  41 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
                                                   >>  42 EXPORT_SYMBOL(__cpu_number_map);
 32                                                    43 
 33 int __cpu_number_map[NR_CPUS];          /* Map << 
 34 int __cpu_logical_map[NR_CPUS];         /* Map     44 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
                                                   >>  45 EXPORT_SYMBOL(__cpu_logical_map);
                                                   >>  46 
                                                   >>  47 /* Number of TCs (or siblings in Intel speak) per CPU core */
                                                   >>  48 int smp_num_siblings = 1;
                                                   >>  49 EXPORT_SYMBOL(smp_num_siblings);
                                                   >>  50 
                                                   >>  51 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
                                                   >>  52 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
                                                   >>  53 EXPORT_SYMBOL(cpu_sibling_map);
                                                   >>  54 
                                                   >>  55 /* representing the core map of multi-core chips of each logical CPU */
                                                   >>  56 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
                                                   >>  57 EXPORT_SYMBOL(cpu_core_map);
                                                   >>  58 
                                                   >>  59 static DECLARE_COMPLETION(cpu_starting);
                                                   >>  60 static DECLARE_COMPLETION(cpu_running);
                                                   >>  61 
                                                   >>  62 /*
                                                   >>  63  * A logical cpu mask containing only one VPE per core to
                                                   >>  64  * reduce the number of IPIs on large MT systems.
                                                   >>  65  */
                                                   >>  66 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
                                                   >>  67 EXPORT_SYMBOL(cpu_foreign_map);
                                                   >>  68 
                                                   >>  69 /* representing cpus for which sibling maps can be computed */
                                                   >>  70 static cpumask_t cpu_sibling_setup_map;
 35                                                    71 
 36 struct plat_smp_ops *mp_ops = NULL;            !!  72 /* representing cpus for which core maps can be computed */
                                                   >>  73 static cpumask_t cpu_core_setup_map;
 37                                                    74 
 38 /* State of each CPU */                        !!  75 cpumask_t cpu_coherent_mask;
 39 DEFINE_PER_CPU(int, cpu_state) = { 0 };        << 
 40                                                    76 
 41 void register_smp_ops(struct plat_smp_ops *ops !!  77 unsigned int smp_max_threads __initdata = UINT_MAX;
                                                   >>  78 
                                                   >>  79 static int __init early_nosmt(char *s)
 42 {                                                  80 {
 43         if (mp_ops)                            !!  81         smp_max_threads = 1;
 44                 printk(KERN_WARNING "Overridin !!  82         return 0;
                                                   >>  83 }
                                                   >>  84 early_param("nosmt", early_nosmt);
 45                                                    85 
 46         mp_ops = ops;                          !!  86 static int __init early_smt(char *s)
                                                   >>  87 {
                                                   >>  88         get_option(&s, &smp_max_threads);
                                                   >>  89         /* Ensure at least one thread is available */
                                                   >>  90         smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX);
                                                   >>  91         return 0;
 47 }                                                  92 }
                                                   >>  93 early_param("smt", early_smt);
                                                   >>  94 
                                                   >>  95 #ifdef CONFIG_GENERIC_IRQ_IPI
                                                   >>  96 static struct irq_desc *call_desc;
                                                   >>  97 static struct irq_desc *sched_desc;
                                                   >>  98 #endif
 48                                                    99 
 49 static inline void smp_store_cpu_info(unsigned !! 100 static inline void set_cpu_sibling_map(int cpu)
 50 {                                                 101 {
 51         struct sh_cpuinfo *c = cpu_data + cpu; !! 102         int i;
 52                                                   103 
 53         memcpy(c, &boot_cpu_data, sizeof(struc !! 104         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
 54                                                   105 
 55         c->loops_per_jiffy = loops_per_jiffy;  !! 106         if (smp_num_siblings > 1) {
                                                   >> 107                 for_each_cpu(i, &cpu_sibling_setup_map) {
                                                   >> 108                         if (cpus_are_siblings(cpu, i)) {
                                                   >> 109                                 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
                                                   >> 110                                 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
                                                   >> 111                         }
                                                   >> 112                 }
                                                   >> 113         } else
                                                   >> 114                 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
 56 }                                                 115 }
 57                                                   116 
 58 void __init smp_prepare_cpus(unsigned int max_ !! 117 static inline void set_cpu_core_map(int cpu)
 59 {                                                 118 {
 60         unsigned int cpu = smp_processor_id(); !! 119         int i;
 61                                                   120 
 62         init_new_context(current, &init_mm);   !! 121         cpumask_set_cpu(cpu, &cpu_core_setup_map);
 63         current_thread_info()->cpu = cpu;      << 
 64         mp_ops->prepare_cpus(max_cpus);        << 
 65                                                   122 
 66 #ifndef CONFIG_HOTPLUG_CPU                     !! 123         for_each_cpu(i, &cpu_core_setup_map) {
 67         init_cpu_present(cpu_possible_mask);   !! 124                 if (cpu_data[cpu].package == cpu_data[i].package) {
 68 #endif                                         !! 125                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
                                                   >> 126                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
                                                   >> 127                 }
                                                   >> 128         }
 69 }                                                 129 }
 70                                                   130 
 71 void __init smp_prepare_boot_cpu(void)         !! 131 /*
                                                   >> 132  * Calculate a new cpu_foreign_map mask whenever a
                                                   >> 133  * new cpu appears or disappears.
                                                   >> 134  */
                                                   >> 135 void calculate_cpu_foreign_map(void)
 72 {                                                 136 {
 73         unsigned int cpu = smp_processor_id(); !! 137         int i, k, core_present;
                                                   >> 138         cpumask_t temp_foreign_map;
 74                                                   139 
 75         __cpu_number_map[0] = cpu;             !! 140         /* Re-calculate the mask */
 76         __cpu_logical_map[0] = cpu;            !! 141         cpumask_clear(&temp_foreign_map);
                                                   >> 142         for_each_online_cpu(i) {
                                                   >> 143                 core_present = 0;
                                                   >> 144                 for_each_cpu(k, &temp_foreign_map)
                                                   >> 145                         if (cpus_are_siblings(i, k))
                                                   >> 146                                 core_present = 1;
                                                   >> 147                 if (!core_present)
                                                   >> 148                         cpumask_set_cpu(i, &temp_foreign_map);
                                                   >> 149         }
 77                                                   150 
 78         set_cpu_online(cpu, true);             !! 151         for_each_online_cpu(i)
 79         set_cpu_possible(cpu, true);           !! 152                 cpumask_andnot(&cpu_foreign_map[i],
                                                   >> 153                                &temp_foreign_map, &cpu_sibling_map[i]);
                                                   >> 154 }
 80                                                   155 
 81         per_cpu(cpu_state, cpu) = CPU_ONLINE;  !! 156 const struct plat_smp_ops *mp_ops;
                                                   >> 157 EXPORT_SYMBOL(mp_ops);
                                                   >> 158 
                                                   >> 159 void register_smp_ops(const struct plat_smp_ops *ops)
                                                   >> 160 {
                                                   >> 161         if (mp_ops)
                                                   >> 162                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
                                                   >> 163 
                                                   >> 164         mp_ops = ops;
 82 }                                                 165 }
 83                                                   166 
 84 #ifdef CONFIG_HOTPLUG_CPU                      !! 167 #ifdef CONFIG_GENERIC_IRQ_IPI
 85 void native_cpu_die(unsigned int cpu)          !! 168 void mips_smp_send_ipi_single(int cpu, unsigned int action)
 86 {                                                 169 {
 87         unsigned int i;                        !! 170         mips_smp_send_ipi_mask(cpumask_of(cpu), action);
                                                   >> 171 }
 88                                                   172 
 89         for (i = 0; i < 10; i++) {             !! 173 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
 90                 smp_rmb();                     !! 174 {
 91                 if (per_cpu(cpu_state, cpu) == !! 175         unsigned long flags;
 92                         if (system_state == SY !! 176         unsigned int core;
 93                                 pr_info("CPU % !! 177         int cpu;
 94                                                   178 
 95                         return;                !! 179         local_irq_save(flags);
 96                 }                              << 
 97                                                   180 
 98                 msleep(100);                   !! 181         switch (action) {
                                                   >> 182         case SMP_CALL_FUNCTION:
                                                   >> 183                 __ipi_send_mask(call_desc, mask);
                                                   >> 184                 break;
                                                   >> 185 
                                                   >> 186         case SMP_RESCHEDULE_YOURSELF:
                                                   >> 187                 __ipi_send_mask(sched_desc, mask);
                                                   >> 188                 break;
                                                   >> 189 
                                                   >> 190         default:
                                                   >> 191                 BUG();
 99         }                                         192         }
100                                                   193 
101         pr_err("CPU %u didn't die...\n", cpu); !! 194         if (mips_cpc_present()) {
102 }                                              !! 195                 for_each_cpu(cpu, mask) {
                                                   >> 196                         if (cpus_are_siblings(cpu, smp_processor_id()))
                                                   >> 197                                 continue;
                                                   >> 198 
                                                   >> 199                         core = cpu_core(&cpu_data[cpu]);
                                                   >> 200 
                                                   >> 201                         while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
                                                   >> 202                                 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
                                                   >> 203                                 mips_cpc_lock_other(core);
                                                   >> 204                                 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
                                                   >> 205                                 mips_cpc_unlock_other();
                                                   >> 206                                 mips_cm_unlock_other();
                                                   >> 207                         }
                                                   >> 208                 }
                                                   >> 209         }
103                                                   210 
104 int native_cpu_disable(unsigned int cpu)       !! 211         local_irq_restore(flags);
105 {                                              << 
106         return cpu == 0 ? -EPERM : 0;          << 
107 }                                                 212 }
108                                                   213 
109 void play_dead_common(void)                    !! 214 
                                                   >> 215 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
110 {                                                 216 {
111         idle_task_exit();                      !! 217         scheduler_ipi();
112         irq_ctx_exit(raw_smp_processor_id());  << 
113         mb();                                  << 
114                                                   218 
115         __this_cpu_write(cpu_state, CPU_DEAD); !! 219         return IRQ_HANDLED;
116         local_irq_disable();                   << 
117 }                                                 220 }
118                                                   221 
119 void native_play_dead(void)                    !! 222 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
120 {                                                 223 {
121         play_dead_common();                    !! 224         generic_smp_call_function_interrupt();
                                                   >> 225 
                                                   >> 226         return IRQ_HANDLED;
122 }                                                 227 }
123                                                   228 
124 int __cpu_disable(void)                        !! 229 static void smp_ipi_init_one(unsigned int virq, const char *name,
                                                   >> 230                              irq_handler_t handler)
125 {                                                 231 {
126         unsigned int cpu = smp_processor_id(); << 
127         int ret;                                  232         int ret;
128                                                   233 
129         ret = mp_ops->cpu_disable(cpu);        !! 234         irq_set_handler(virq, handle_percpu_irq);
130         if (ret)                               !! 235         ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
131                 return ret;                    !! 236         BUG_ON(ret);
                                                   >> 237 }
132                                                   238 
133         /*                                     !! 239 static unsigned int call_virq, sched_virq;
134          * Take this CPU offline.  Once we cle !! 240 
135          * and we must not schedule until we'r !! 241 int mips_smp_ipi_allocate(const struct cpumask *mask)
136          */                                    !! 242 {
137         set_cpu_online(cpu, false);            !! 243         int virq;
                                                   >> 244         struct irq_domain *ipidomain;
                                                   >> 245         struct device_node *node;
                                                   >> 246 
                                                   >> 247         node = of_irq_find_parent(of_root);
                                                   >> 248         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
138                                                   249 
139         /*                                        250         /*
140          * OK - migrate IRQs away from this CP !! 251          * Some platforms have half DT setup. So if we found irq node but
                                                   >> 252          * didn't find an ipidomain, try to search for one that is not in the
                                                   >> 253          * DT.
141          */                                       254          */
142         migrate_irqs();                        !! 255         if (node && !ipidomain)
                                                   >> 256                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
143                                                   257 
144         /*                                        258         /*
145          * Flush user cache and TLB mappings,  !! 259          * There are systems which use IPI IRQ domains, but only have one
146          * from the vm mask set of all process !! 260          * registered when some runtime condition is met. For example a Malta
                                                   >> 261          * kernel may include support for GIC & CPU interrupt controller IPI
                                                   >> 262          * IRQ domains, but if run on a system with no GIC & no MT ASE then
                                                   >> 263          * neither will be supported or registered.
                                                   >> 264          *
                                                   >> 265          * We only have a problem if we're actually using multiple CPUs so fail
                                                   >> 266          * loudly if that is the case. Otherwise simply return, skipping IPI
                                                   >> 267          * setup, if we're running with only a single CPU.
147          */                                       268          */
148         flush_cache_all();                     !! 269         if (!ipidomain) {
149 #ifdef CONFIG_MMU                              !! 270                 BUG_ON(num_present_cpus() > 1);
150         local_flush_tlb_all();                 !! 271                 return 0;
151 #endif                                         !! 272         }
152                                                   273 
153         clear_tasks_mm_cpumask(cpu);           !! 274         virq = irq_reserve_ipi(ipidomain, mask);
                                                   >> 275         BUG_ON(!virq);
                                                   >> 276         if (!call_virq)
                                                   >> 277                 call_virq = virq;
                                                   >> 278 
                                                   >> 279         virq = irq_reserve_ipi(ipidomain, mask);
                                                   >> 280         BUG_ON(!virq);
                                                   >> 281         if (!sched_virq)
                                                   >> 282                 sched_virq = virq;
                                                   >> 283 
                                                   >> 284         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
                                                   >> 285                 int cpu;
                                                   >> 286 
                                                   >> 287                 for_each_cpu(cpu, mask) {
                                                   >> 288                         smp_ipi_init_one(call_virq + cpu, "IPI call",
                                                   >> 289                                          ipi_call_interrupt);
                                                   >> 290                         smp_ipi_init_one(sched_virq + cpu, "IPI resched",
                                                   >> 291                                          ipi_resched_interrupt);
                                                   >> 292                 }
                                                   >> 293         } else {
                                                   >> 294                 smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
                                                   >> 295                 smp_ipi_init_one(sched_virq, "IPI resched",
                                                   >> 296                                  ipi_resched_interrupt);
                                                   >> 297         }
154                                                   298 
155         return 0;                                 299         return 0;
156 }                                                 300 }
157 #else /* ... !CONFIG_HOTPLUG_CPU */            << 
158 int native_cpu_disable(unsigned int cpu)       << 
159 {                                              << 
160         return -ENOSYS;                        << 
161 }                                              << 
162                                                   301 
163 void native_cpu_die(unsigned int cpu)          !! 302 int mips_smp_ipi_free(const struct cpumask *mask)
164 {                                                 303 {
165         /* We said "no" in __cpu_disable */    !! 304         struct irq_domain *ipidomain;
166         BUG();                                 !! 305         struct device_node *node;
167 }                                              << 
168                                                   306 
169 void native_play_dead(void)                    !! 307         node = of_irq_find_parent(of_root);
170 {                                              !! 308         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
171         BUG();                                 << 
172 }                                              << 
173 #endif                                         << 
174                                                   309 
175 static asmlinkage void start_secondary(void)   !! 310         /*
176 {                                              !! 311          * Some platforms have half DT setup. So if we found irq node but
177         unsigned int cpu = smp_processor_id(); !! 312          * didn't find an ipidomain, try to search for one that is not in the
178         struct mm_struct *mm = &init_mm;       !! 313          * DT.
                                                   >> 314          */
                                                   >> 315         if (node && !ipidomain)
                                                   >> 316                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
179                                                   317 
180         enable_mmu();                          !! 318         BUG_ON(!ipidomain);
181         mmgrab(mm);                            << 
182         mmget(mm);                             << 
183         current->active_mm = mm;               << 
184 #ifdef CONFIG_MMU                              << 
185         enter_lazy_tlb(mm, current);           << 
186         local_flush_tlb_all();                 << 
187 #endif                                         << 
188                                                   319 
189         per_cpu_trap_init();                   !! 320         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
                                                   >> 321                 int cpu;
190                                                   322 
191         notify_cpu_starting(cpu);              !! 323                 for_each_cpu(cpu, mask) {
                                                   >> 324                         free_irq(call_virq + cpu, NULL);
                                                   >> 325                         free_irq(sched_virq + cpu, NULL);
                                                   >> 326                 }
                                                   >> 327         }
                                                   >> 328         irq_destroy_ipi(call_virq, mask);
                                                   >> 329         irq_destroy_ipi(sched_virq, mask);
                                                   >> 330         return 0;
                                                   >> 331 }
192                                                   332 
193         local_irq_enable();                    << 
194                                                   333 
195         calibrate_delay();                     !! 334 static int __init mips_smp_ipi_init(void)
                                                   >> 335 {
                                                   >> 336         if (num_possible_cpus() == 1)
                                                   >> 337                 return 0;
196                                                   338 
197         smp_store_cpu_info(cpu);               !! 339         mips_smp_ipi_allocate(cpu_possible_mask);
198                                                   340 
199         set_cpu_online(cpu, true);             !! 341         call_desc = irq_to_desc(call_virq);
200         per_cpu(cpu_state, cpu) = CPU_ONLINE;  !! 342         sched_desc = irq_to_desc(sched_virq);
201                                                   343 
202         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE !! 344         return 0;
203 }                                                 345 }
                                                   >> 346 early_initcall(mips_smp_ipi_init);
                                                   >> 347 #endif
204                                                   348 
205 extern struct {                                !! 349 /*
206         unsigned long sp;                      !! 350  * First C code run on the secondary CPUs after being started up by
207         unsigned long bss_start;               !! 351  * the master.
208         unsigned long bss_end;                 !! 352  */
209         void *start_kernel_fn;                 !! 353 asmlinkage void start_secondary(void)
210         void *cpu_init_fn;                     << 
211         void *thread_info;                     << 
212 } stack_start;                                 << 
213                                                << 
214 int __cpu_up(unsigned int cpu, struct task_str << 
215 {                                                 354 {
216         unsigned long timeout;                 !! 355         unsigned int cpu = raw_smp_processor_id();
217                                                   356 
218         per_cpu(cpu_state, cpu) = CPU_UP_PREPA !! 357         cpu_probe();
                                                   >> 358         per_cpu_trap_init(false);
                                                   >> 359         rcutree_report_cpu_starting(cpu);
                                                   >> 360         mips_clockevent_init();
                                                   >> 361         mp_ops->init_secondary();
                                                   >> 362         cpu_report();
                                                   >> 363         maar_init();
219                                                   364 
220         /* Fill in data in head.S for secondar !! 365         /*
221         stack_start.sp = tsk->thread.sp;       !! 366          * XXX parity protection should be folded in here when it's converted
222         stack_start.thread_info = tsk->stack;  !! 367          * to an option instead of something based on .cputype
223         stack_start.bss_start = 0; /* don't cl !! 368          */
224         stack_start.start_kernel_fn = start_se << 
225                                                   369 
226         flush_icache_range((unsigned long)&sta !! 370         calibrate_delay();
227                            (unsigned long)&sta !! 371         cpu_data[cpu].udelay_val = loops_per_jiffy;
228         wmb();                                 << 
229                                                   372 
230         mp_ops->start_cpu(cpu, (unsigned long) !! 373         set_cpu_sibling_map(cpu);
                                                   >> 374         set_cpu_core_map(cpu);
231                                                   375 
232         timeout = jiffies + HZ;                !! 376         cpumask_set_cpu(cpu, &cpu_coherent_mask);
233         while (time_before(jiffies, timeout))  !! 377         notify_cpu_starting(cpu);
234                 if (cpu_online(cpu))           << 
235                         break;                 << 
236                                                   378 
237                 udelay(10);                    !! 379         /* Notify boot CPU that we're starting & ready to sync counters */
238                 barrier();                     !! 380         complete(&cpu_starting);
239         }                                      << 
240                                                   381 
241         if (cpu_online(cpu))                   !! 382         synchronise_count_slave(cpu);
242                 return 0;                      << 
243                                                   383 
244         return -ENOENT;                        !! 384         /* The CPU is running and counters synchronised, now mark it online */
245 }                                              !! 385         set_cpu_online(cpu, true);
246                                                   386 
247 void __init smp_cpus_done(unsigned int max_cpu !! 387         calculate_cpu_foreign_map();
248 {                                              << 
249         unsigned long bogosum = 0;             << 
250         int cpu;                               << 
251                                                   388 
252         for_each_online_cpu(cpu)               !! 389         /*
253                 bogosum += cpu_data[cpu].loops !! 390          * Notify boot CPU that we're up & online and it can safely return
                                                   >> 391          * from __cpu_up
                                                   >> 392          */
                                                   >> 393         complete(&cpu_running);
254                                                   394 
255         printk(KERN_INFO "SMP: Total of %d pro !! 395         /*
256                "(%lu.%02lu BogoMIPS).\n", num_ !! 396          * irq will be enabled in ->smp_finish(), enabling it too early
257                bogosum / (500000/HZ),          !! 397          * is dangerous.
258                (bogosum / (5000/HZ)) % 100);   !! 398          */
                                                   >> 399         WARN_ON_ONCE(!irqs_disabled());
                                                   >> 400         mp_ops->smp_finish();
                                                   >> 401 
                                                   >> 402         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
259 }                                                 403 }
260                                                   404 
261 void arch_smp_send_reschedule(int cpu)         !! 405 static void stop_this_cpu(void *dummy)
262 {                                                 406 {
263         mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDU !! 407         /*
                                                   >> 408          * Remove this CPU:
                                                   >> 409          */
                                                   >> 410 
                                                   >> 411         set_cpu_online(smp_processor_id(), false);
                                                   >> 412         calculate_cpu_foreign_map();
                                                   >> 413         local_irq_disable();
                                                   >> 414         while (1);
264 }                                                 415 }
265                                                   416 
266 void smp_send_stop(void)                          417 void smp_send_stop(void)
267 {                                                 418 {
268         smp_call_function(stop_this_cpu, 0, 0) !! 419         smp_call_function(stop_this_cpu, NULL, 0);
269 }                                                 420 }
270                                                   421 
271 void arch_send_call_function_ipi_mask(const st !! 422 void __init smp_cpus_done(unsigned int max_cpus)
272 {                                                 423 {
273         int cpu;                               << 
274                                                << 
275         for_each_cpu(cpu, mask)                << 
276                 mp_ops->send_ipi(cpu, SMP_MSG_ << 
277 }                                                 424 }
278                                                   425 
279 void arch_send_call_function_single_ipi(int cp !! 426 /* called from main before smp_init() */
                                                   >> 427 void __init smp_prepare_cpus(unsigned int max_cpus)
280 {                                                 428 {
281         mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION !! 429         init_new_context(current, &init_mm);
                                                   >> 430         current_thread_info()->cpu = 0;
                                                   >> 431         mp_ops->prepare_cpus(max_cpus);
                                                   >> 432         set_cpu_sibling_map(0);
                                                   >> 433         set_cpu_core_map(0);
                                                   >> 434         calculate_cpu_foreign_map();
                                                   >> 435 #ifndef CONFIG_HOTPLUG_CPU
                                                   >> 436         init_cpu_present(cpu_possible_mask);
                                                   >> 437 #endif
                                                   >> 438         cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
282 }                                                 439 }
283                                                   440 
284 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST    !! 441 /* preload SMP state for boot cpu */
285 void tick_broadcast(const struct cpumask *mask !! 442 void __init smp_prepare_boot_cpu(void)
286 {                                                 443 {
287         int cpu;                               !! 444         if (mp_ops->prepare_boot_cpu)
288                                                !! 445                 mp_ops->prepare_boot_cpu();
289         for_each_cpu(cpu, mask)                !! 446         set_cpu_possible(0, true);
290                 mp_ops->send_ipi(cpu, SMP_MSG_ !! 447         set_cpu_online(0, true);
291 }                                                 448 }
292                                                   449 
293 static void ipi_timer(void)                    !! 450 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
294 {                                                 451 {
295         irq_enter();                           !! 452         int err;
296         tick_receive_broadcast();              << 
297         irq_exit();                            << 
298 }                                              << 
299 #endif                                         << 
300                                                   453 
301 void smp_message_recv(unsigned int msg)        !! 454         err = mp_ops->boot_secondary(cpu, tidle);
302 {                                              !! 455         if (err)
303         switch (msg) {                         !! 456                 return err;
304         case SMP_MSG_FUNCTION:                 !! 457 
305                 generic_smp_call_function_inte !! 458         /* Wait for CPU to start and be ready to sync counters */
306                 break;                         !! 459         if (!wait_for_completion_timeout(&cpu_starting,
307         case SMP_MSG_RESCHEDULE:               !! 460                                          msecs_to_jiffies(1000))) {
308                 scheduler_ipi();               !! 461                 pr_crit("CPU%u: failed to start\n", cpu);
309                 break;                         !! 462                 return -EIO;
310         case SMP_MSG_FUNCTION_SINGLE:          << 
311                 generic_smp_call_function_sing << 
312                 break;                         << 
313 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST    << 
314         case SMP_MSG_TIMER:                    << 
315                 ipi_timer();                   << 
316                 break;                         << 
317 #endif                                         << 
318         default:                               << 
319                 printk(KERN_WARNING "SMP %d: % << 
320                        smp_processor_id(), __f << 
321                 break;                         << 
322         }                                         463         }
                                                   >> 464 
                                                   >> 465         /* Wait for CPU to finish startup & mark itself online before return */
                                                   >> 466         wait_for_completion(&cpu_running);
                                                   >> 467         return 0;
323 }                                                 468 }
324                                                   469 
325 #ifdef CONFIG_PROFILING                           470 #ifdef CONFIG_PROFILING
326 /* Not really SMP stuff ... */                    471 /* Not really SMP stuff ... */
327 int setup_profiling_timer(unsigned int multipl    472 int setup_profiling_timer(unsigned int multiplier)
328 {                                                 473 {
329         return 0;                                 474         return 0;
330 }                                                 475 }
331 #endif                                            476 #endif
332                                                   477 
333 #ifdef CONFIG_MMU                              << 
334                                                << 
335 static void flush_tlb_all_ipi(void *info)         478 static void flush_tlb_all_ipi(void *info)
336 {                                                 479 {
337         local_flush_tlb_all();                    480         local_flush_tlb_all();
338 }                                                 481 }
339                                                   482 
340 void flush_tlb_all(void)                          483 void flush_tlb_all(void)
341 {                                                 484 {
342         on_each_cpu(flush_tlb_all_ipi, 0, 1);  !! 485         if (cpu_has_mmid) {
                                                   >> 486                 htw_stop();
                                                   >> 487                 ginvt_full();
                                                   >> 488                 sync_ginv();
                                                   >> 489                 instruction_hazard();
                                                   >> 490                 htw_start();
                                                   >> 491                 return;
                                                   >> 492         }
                                                   >> 493 
                                                   >> 494         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
343 }                                                 495 }
344                                                   496 
345 static void flush_tlb_mm_ipi(void *mm)            497 static void flush_tlb_mm_ipi(void *mm)
346 {                                                 498 {
347         local_flush_tlb_mm((struct mm_struct * !! 499         drop_mmu_context((struct mm_struct *)mm);
                                                   >> 500 }
                                                   >> 501 
                                                   >> 502 /*
                                                   >> 503  * Special Variant of smp_call_function for use by TLB functions:
                                                   >> 504  *
                                                   >> 505  *  o No return value
                                                   >> 506  *  o collapses to normal function call on UP kernels
                                                   >> 507  *  o collapses to normal function call on systems with a single shared
                                                   >> 508  *    primary cache.
                                                   >> 509  */
                                                   >> 510 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
                                                   >> 511 {
                                                   >> 512         smp_call_function(func, info, 1);
                                                   >> 513 }
                                                   >> 514 
                                                   >> 515 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
                                                   >> 516 {
                                                   >> 517         preempt_disable();
                                                   >> 518 
                                                   >> 519         smp_on_other_tlbs(func, info);
                                                   >> 520         func(info);
                                                   >> 521 
                                                   >> 522         preempt_enable();
348 }                                                 523 }
349                                                   524 
350 /*                                                525 /*
351  * The following tlb flush calls are invoked w    526  * The following tlb flush calls are invoked when old translations are
352  * being torn down, or pte attributes are chan    527  * being torn down, or pte attributes are changing. For single threaded
353  * address spaces, a new context is obtained o    528  * address spaces, a new context is obtained on the current cpu, and tlb
354  * context on other cpus are invalidated to fo    529  * context on other cpus are invalidated to force a new context allocation
355  * at switch_mm time, should the mm ever be us    530  * at switch_mm time, should the mm ever be used on other cpus. For
356  * multithreaded address spaces, intercpu inte !! 531  * multithreaded address spaces, inter-CPU interrupts have to be sent.
357  * Another case where intercpu interrupts are  !! 532  * Another case where inter-CPU interrupts are required is when the target
358  * mm might be active on another cpu (eg debug    533  * mm might be active on another cpu (eg debuggers doing the flushes on
359  * behalf of debugees, kswapd stealing pages f    534  * behalf of debugees, kswapd stealing pages from another process etc).
360  * Kanoj 07/00.                                   535  * Kanoj 07/00.
361  */                                               536  */
                                                   >> 537 
362 void flush_tlb_mm(struct mm_struct *mm)           538 void flush_tlb_mm(struct mm_struct *mm)
363 {                                                 539 {
                                                   >> 540         if (!mm)
                                                   >> 541                 return;
                                                   >> 542 
                                                   >> 543         if (atomic_read(&mm->mm_users) == 0)
                                                   >> 544                 return;         /* happens as a result of exit_mmap() */
                                                   >> 545 
364         preempt_disable();                        546         preempt_disable();
365                                                   547 
366         if ((atomic_read(&mm->mm_users) != 1)  !! 548         if (cpu_has_mmid) {
367                 smp_call_function(flush_tlb_mm !! 549                 /*
                                                   >> 550                  * No need to worry about other CPUs - the ginvt in
                                                   >> 551                  * drop_mmu_context() will be globalized.
                                                   >> 552                  */
                                                   >> 553         } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
                                                   >> 554                 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
368         } else {                                  555         } else {
369                 int i;                         !! 556                 unsigned int cpu;
370                 for_each_online_cpu(i)         !! 557 
371                         if (smp_processor_id() !! 558                 for_each_online_cpu(cpu) {
372                                 cpu_context(i, !! 559                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
                                                   >> 560                                 set_cpu_context(cpu, mm, 0);
                                                   >> 561                 }
373         }                                         562         }
374         local_flush_tlb_mm(mm);                !! 563         drop_mmu_context(mm);
375                                                   564 
376         preempt_enable();                         565         preempt_enable();
377 }                                                 566 }
378                                                   567 
379 struct flush_tlb_data {                           568 struct flush_tlb_data {
380         struct vm_area_struct *vma;               569         struct vm_area_struct *vma;
381         unsigned long addr1;                      570         unsigned long addr1;
382         unsigned long addr2;                      571         unsigned long addr2;
383 };                                                572 };
384                                                   573 
385 static void flush_tlb_range_ipi(void *info)       574 static void flush_tlb_range_ipi(void *info)
386 {                                                 575 {
387         struct flush_tlb_data *fd = (struct fl !! 576         struct flush_tlb_data *fd = info;
388                                                   577 
389         local_flush_tlb_range(fd->vma, fd->add    578         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
390 }                                                 579 }
391                                                   580 
392 void flush_tlb_range(struct vm_area_struct *vm !! 581 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
393                      unsigned long start, unsi << 
394 {                                                 582 {
395         struct mm_struct *mm = vma->vm_mm;        583         struct mm_struct *mm = vma->vm_mm;
                                                   >> 584         unsigned long addr;
                                                   >> 585         u32 old_mmid;
396                                                   586 
397         preempt_disable();                        587         preempt_disable();
398         if ((atomic_read(&mm->mm_users) != 1)  !! 588         if (cpu_has_mmid) {
399                 struct flush_tlb_data fd;      !! 589                 htw_stop();
                                                   >> 590                 old_mmid = read_c0_memorymapid();
                                                   >> 591                 write_c0_memorymapid(cpu_asid(0, mm));
                                                   >> 592                 mtc0_tlbw_hazard();
                                                   >> 593                 addr = round_down(start, PAGE_SIZE * 2);
                                                   >> 594                 end = round_up(end, PAGE_SIZE * 2);
                                                   >> 595                 do {
                                                   >> 596                         ginvt_va_mmid(addr);
                                                   >> 597                         sync_ginv();
                                                   >> 598                         addr += PAGE_SIZE * 2;
                                                   >> 599                 } while (addr < end);
                                                   >> 600                 write_c0_memorymapid(old_mmid);
                                                   >> 601                 instruction_hazard();
                                                   >> 602                 htw_start();
                                                   >> 603         } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
                                                   >> 604                 struct flush_tlb_data fd = {
                                                   >> 605                         .vma = vma,
                                                   >> 606                         .addr1 = start,
                                                   >> 607                         .addr2 = end,
                                                   >> 608                 };
400                                                   609 
401                 fd.vma = vma;                  !! 610                 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
402                 fd.addr1 = start;              !! 611                 local_flush_tlb_range(vma, start, end);
403                 fd.addr2 = end;                << 
404                 smp_call_function(flush_tlb_ra << 
405         } else {                                  612         } else {
406                 int i;                         !! 613                 unsigned int cpu;
407                 for_each_online_cpu(i)         !! 614                 int exec = vma->vm_flags & VM_EXEC;
408                         if (smp_processor_id() !! 615 
409                                 cpu_context(i, !! 616                 for_each_online_cpu(cpu) {
                                                   >> 617                         /*
                                                   >> 618                          * flush_cache_range() will only fully flush icache if
                                                   >> 619                          * the VMA is executable, otherwise we must invalidate
                                                   >> 620                          * ASID without it appearing to has_valid_asid() as if
                                                   >> 621                          * mm has been completely unused by that CPU.
                                                   >> 622                          */
                                                   >> 623                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
                                                   >> 624                                 set_cpu_context(cpu, mm, !exec);
                                                   >> 625                 }
                                                   >> 626                 local_flush_tlb_range(vma, start, end);
410         }                                         627         }
411         local_flush_tlb_range(vma, start, end) << 
412         preempt_enable();                         628         preempt_enable();
413 }                                                 629 }
414                                                   630 
415 static void flush_tlb_kernel_range_ipi(void *i    631 static void flush_tlb_kernel_range_ipi(void *info)
416 {                                                 632 {
417         struct flush_tlb_data *fd = (struct fl !! 633         struct flush_tlb_data *fd = info;
418                                                   634 
419         local_flush_tlb_kernel_range(fd->addr1    635         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
420 }                                                 636 }
421                                                   637 
422 void flush_tlb_kernel_range(unsigned long star    638 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
423 {                                                 639 {
424         struct flush_tlb_data fd;              !! 640         struct flush_tlb_data fd = {
                                                   >> 641                 .addr1 = start,
                                                   >> 642                 .addr2 = end,
                                                   >> 643         };
425                                                   644 
426         fd.addr1 = start;                      !! 645         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
427         fd.addr2 = end;                        << 
428         on_each_cpu(flush_tlb_kernel_range_ipi << 
429 }                                                 646 }
430                                                   647 
431 static void flush_tlb_page_ipi(void *info)        648 static void flush_tlb_page_ipi(void *info)
432 {                                                 649 {
433         struct flush_tlb_data *fd = (struct fl !! 650         struct flush_tlb_data *fd = info;
434                                                   651 
435         local_flush_tlb_page(fd->vma, fd->addr    652         local_flush_tlb_page(fd->vma, fd->addr1);
436 }                                                 653 }
437                                                   654 
438 void flush_tlb_page(struct vm_area_struct *vma    655 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
439 {                                                 656 {
                                                   >> 657         u32 old_mmid;
                                                   >> 658 
440         preempt_disable();                        659         preempt_disable();
441         if ((atomic_read(&vma->vm_mm->mm_users !! 660         if (cpu_has_mmid) {
442             (current->mm != vma->vm_mm)) {     !! 661                 htw_stop();
443                 struct flush_tlb_data fd;      !! 662                 old_mmid = read_c0_memorymapid();
444                                                !! 663                 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
445                 fd.vma = vma;                  !! 664                 mtc0_tlbw_hazard();
446                 fd.addr1 = page;               !! 665                 ginvt_va_mmid(page);
447                 smp_call_function(flush_tlb_pa !! 666                 sync_ginv();
                                                   >> 667                 write_c0_memorymapid(old_mmid);
                                                   >> 668                 instruction_hazard();
                                                   >> 669                 htw_start();
                                                   >> 670         } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
                                                   >> 671                    (current->mm != vma->vm_mm)) {
                                                   >> 672                 struct flush_tlb_data fd = {
                                                   >> 673                         .vma = vma,
                                                   >> 674                         .addr1 = page,
                                                   >> 675                 };
                                                   >> 676 
                                                   >> 677                 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
                                                   >> 678                 local_flush_tlb_page(vma, page);
448         } else {                                  679         } else {
449                 int i;                         !! 680                 unsigned int cpu;
450                 for_each_online_cpu(i)         !! 681 
451                         if (smp_processor_id() !! 682                 for_each_online_cpu(cpu) {
452                                 cpu_context(i, !! 683                         /*
                                                   >> 684                          * flush_cache_page() only does partial flushes, so
                                                   >> 685                          * invalidate ASID without it appearing to
                                                   >> 686                          * has_valid_asid() as if mm has been completely unused
                                                   >> 687                          * by that CPU.
                                                   >> 688                          */
                                                   >> 689                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
                                                   >> 690                                 set_cpu_context(cpu, vma->vm_mm, 1);
                                                   >> 691                 }
                                                   >> 692                 local_flush_tlb_page(vma, page);
453         }                                         693         }
454         local_flush_tlb_page(vma, page);       << 
455         preempt_enable();                         694         preempt_enable();
456 }                                                 695 }
457                                                   696 
458 static void flush_tlb_one_ipi(void *info)         697 static void flush_tlb_one_ipi(void *info)
459 {                                                 698 {
460         struct flush_tlb_data *fd = (struct fl !! 699         unsigned long vaddr = (unsigned long) info;
461         local_flush_tlb_one(fd->addr1, fd->add !! 700 
                                                   >> 701         local_flush_tlb_one(vaddr);
462 }                                                 702 }
463                                                   703 
464 void flush_tlb_one(unsigned long asid, unsigne !! 704 void flush_tlb_one(unsigned long vaddr)
465 {                                                 705 {
466         struct flush_tlb_data fd;              !! 706         smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
                                                   >> 707 }
467                                                   708 
468         fd.addr1 = asid;                       !! 709 EXPORT_SYMBOL(flush_tlb_page);
469         fd.addr2 = vaddr;                      !! 710 EXPORT_SYMBOL(flush_tlb_one);
470                                                   711 
471         smp_call_function(flush_tlb_one_ipi, ( !! 712 #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
472         local_flush_tlb_one(asid, vaddr);      !! 713 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
                                                   >> 714 {
                                                   >> 715         if (mp_ops->cleanup_dead_cpu)
                                                   >> 716                 mp_ops->cleanup_dead_cpu(cpu);
473 }                                                 717 }
474                                                << 
475 #endif                                            718 #endif
                                                   >> 719 
                                                   >> 720 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
                                                   >> 721 
                                                   >> 722 static void tick_broadcast_callee(void *info)
                                                   >> 723 {
                                                   >> 724         tick_receive_broadcast();
                                                   >> 725 }
                                                   >> 726 
                                                   >> 727 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
                                                   >> 728         CSD_INIT(tick_broadcast_callee, NULL);
                                                   >> 729 
                                                   >> 730 void tick_broadcast(const struct cpumask *mask)
                                                   >> 731 {
                                                   >> 732         call_single_data_t *csd;
                                                   >> 733         int cpu;
                                                   >> 734 
                                                   >> 735         for_each_cpu(cpu, mask) {
                                                   >> 736                 csd = &per_cpu(tick_broadcast_csd, cpu);
                                                   >> 737                 smp_call_function_single_async(cpu, csd);
                                                   >> 738         }
                                                   >> 739 }
                                                   >> 740 
                                                   >> 741 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
476                                                   742 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php