~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/kernel/smp.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/sh/kernel/smp.c (Version linux-6.12-rc7) and /arch/alpha/kernel/smp.c (Version linux-5.3.18)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * arch/sh/kernel/smp.c                        !!   3  *      linux/arch/alpha/kernel/smp.c
  4  *                                                  4  *
  5  * SMP support for the SuperH processors.      !!   5  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
                                                   >>   6  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
                                                   >>   7  *            Created an function that conforms to the old calling convention
                                                   >>   8  *            of smp_call_function().
                                                   >>   9  *
                                                   >>  10  *            This is helpful for DCPI.
  6  *                                                 11  *
  7  * Copyright (C) 2002 - 2010 Paul Mundt        << 
  8  * Copyright (C) 2006 - 2007 Akio Idehara      << 
  9  */                                                12  */
                                                   >>  13 
                                                   >>  14 #include <linux/errno.h>
                                                   >>  15 #include <linux/kernel.h>
                                                   >>  16 #include <linux/kernel_stat.h>
                                                   >>  17 #include <linux/module.h>
                                                   >>  18 #include <linux/sched/mm.h>
                                                   >>  19 #include <linux/mm.h>
 10 #include <linux/err.h>                             20 #include <linux/err.h>
 11 #include <linux/cache.h>                       !!  21 #include <linux/threads.h>
 12 #include <linux/cpumask.h>                     !!  22 #include <linux/smp.h>
 13 #include <linux/delay.h>                       !!  23 #include <linux/interrupt.h>
 14 #include <linux/init.h>                            24 #include <linux/init.h>
                                                   >>  25 #include <linux/delay.h>
 15 #include <linux/spinlock.h>                        26 #include <linux/spinlock.h>
 16 #include <linux/mm.h>                          !!  27 #include <linux/irq.h>
 17 #include <linux/module.h>                      !!  28 #include <linux/cache.h>
                                                   >>  29 #include <linux/profile.h>
                                                   >>  30 #include <linux/bitops.h>
 18 #include <linux/cpu.h>                             31 #include <linux/cpu.h>
 19 #include <linux/interrupt.h>                   !!  32 
 20 #include <linux/sched/mm.h>                    !!  33 #include <asm/hwrpb.h>
 21 #include <linux/sched/hotplug.h>               !!  34 #include <asm/ptrace.h>
 22 #include <linux/atomic.h>                          35 #include <linux/atomic.h>
 23 #include <linux/clockchips.h>                  << 
 24 #include <linux/profile.h>                     << 
 25                                                    36 
 26 #include <asm/processor.h>                     !!  37 #include <asm/io.h>
                                                   >>  38 #include <asm/irq.h>
                                                   >>  39 #include <asm/pgtable.h>
                                                   >>  40 #include <asm/pgalloc.h>
 27 #include <asm/mmu_context.h>                       41 #include <asm/mmu_context.h>
 28 #include <asm/smp.h>                           !!  42 #include <asm/tlbflush.h>
 29 #include <asm/cacheflush.h>                    << 
 30 #include <asm/sections.h>                      << 
 31 #include <asm/setup.h>                         << 
 32                                                    43 
 33 int __cpu_number_map[NR_CPUS];          /* Map !!  44 #include "proto.h"
 34 int __cpu_logical_map[NR_CPUS];         /* Map !!  45 #include "irq_impl.h"
 35                                                    46 
 36 struct plat_smp_ops *mp_ops = NULL;            << 
 37                                                    47 
 38 /* State of each CPU */                        !!  48 #define DEBUG_SMP 0
 39 DEFINE_PER_CPU(int, cpu_state) = { 0 };        !!  49 #if DEBUG_SMP
                                                   >>  50 #define DBGS(args)      printk args
                                                   >>  51 #else
                                                   >>  52 #define DBGS(args)
                                                   >>  53 #endif
 40                                                    54 
 41 void register_smp_ops(struct plat_smp_ops *ops !!  55 /* A collection of per-processor data.  */
 42 {                                              !!  56 struct cpuinfo_alpha cpu_data[NR_CPUS];
 43         if (mp_ops)                            !!  57 EXPORT_SYMBOL(cpu_data);
 44                 printk(KERN_WARNING "Overridin !!  58 
                                                   >>  59 /* A collection of single bit ipi messages.  */
                                                   >>  60 static struct {
                                                   >>  61         unsigned long bits ____cacheline_aligned;
                                                   >>  62 } ipi_data[NR_CPUS] __cacheline_aligned;
                                                   >>  63 
                                                   >>  64 enum ipi_message_type {
                                                   >>  65         IPI_RESCHEDULE,
                                                   >>  66         IPI_CALL_FUNC,
                                                   >>  67         IPI_CPU_STOP,
                                                   >>  68 };
 45                                                    69 
 46         mp_ops = ops;                          !!  70 /* Set to a secondary's cpuid when it comes online.  */
 47 }                                              !!  71 static int smp_secondary_alive = 0;
 48                                                    72 
 49 static inline void smp_store_cpu_info(unsigned !!  73 int smp_num_probed;             /* Internal processor count */
 50 {                                              !!  74 int smp_num_cpus = 1;           /* Number that came online.  */
 51         struct sh_cpuinfo *c = cpu_data + cpu; !!  75 EXPORT_SYMBOL(smp_num_cpus);
 52                                                    76 
 53         memcpy(c, &boot_cpu_data, sizeof(struc !!  77 /*
                                                   >>  78  * Called by both boot and secondaries to move global data into
                                                   >>  79  *  per-processor storage.
                                                   >>  80  */
                                                   >>  81 static inline void __init
                                                   >>  82 smp_store_cpu_info(int cpuid)
                                                   >>  83 {
                                                   >>  84         cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
                                                   >>  85         cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
                                                   >>  86         cpu_data[cpuid].need_new_asn = 0;
                                                   >>  87         cpu_data[cpuid].asn_lock = 0;
                                                   >>  88 }
 54                                                    89 
 55         c->loops_per_jiffy = loops_per_jiffy;  !!  90 /*
                                                   >>  91  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
                                                   >>  92  */
                                                   >>  93 static inline void __init
                                                   >>  94 smp_setup_percpu_timer(int cpuid)
                                                   >>  95 {
                                                   >>  96         cpu_data[cpuid].prof_counter = 1;
                                                   >>  97         cpu_data[cpuid].prof_multiplier = 1;
 56 }                                                  98 }
 57                                                    99 
 58 void __init smp_prepare_cpus(unsigned int max_ !! 100 static void __init
                                                   >> 101 wait_boot_cpu_to_stop(int cpuid)
 59 {                                                 102 {
 60         unsigned int cpu = smp_processor_id(); !! 103         unsigned long stop = jiffies + 10*HZ;
 61                                                   104 
 62         init_new_context(current, &init_mm);   !! 105         while (time_before(jiffies, stop)) {
 63         current_thread_info()->cpu = cpu;      !! 106                 if (!smp_secondary_alive)
 64         mp_ops->prepare_cpus(max_cpus);        !! 107                         return;
                                                   >> 108                 barrier();
                                                   >> 109         }
 65                                                   110 
 66 #ifndef CONFIG_HOTPLUG_CPU                     !! 111         printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
 67         init_cpu_present(cpu_possible_mask);   !! 112         for (;;)
 68 #endif                                         !! 113                 barrier();
 69 }                                                 114 }
 70                                                   115 
 71 void __init smp_prepare_boot_cpu(void)         !! 116 /*
                                                   >> 117  * Where secondaries begin a life of C.
                                                   >> 118  */
                                                   >> 119 void __init
                                                   >> 120 smp_callin(void)
 72 {                                                 121 {
 73         unsigned int cpu = smp_processor_id(); !! 122         int cpuid = hard_smp_processor_id();
 74                                                   123 
 75         __cpu_number_map[0] = cpu;             !! 124         if (cpu_online(cpuid)) {
 76         __cpu_logical_map[0] = cpu;            !! 125                 printk("??, cpu 0x%x already present??\n", cpuid);
                                                   >> 126                 BUG();
                                                   >> 127         }
                                                   >> 128         set_cpu_online(cpuid, true);
 77                                                   129 
 78         set_cpu_online(cpu, true);             !! 130         /* Turn on machine checks.  */
 79         set_cpu_possible(cpu, true);           !! 131         wrmces(7);
 80                                                   132 
 81         per_cpu(cpu_state, cpu) = CPU_ONLINE;  !! 133         /* Set trap vectors.  */
 82 }                                              !! 134         trap_init();
 83                                                   135 
 84 #ifdef CONFIG_HOTPLUG_CPU                      !! 136         /* Set interrupt vector.  */
 85 void native_cpu_die(unsigned int cpu)          !! 137         wrent(entInt, 0);
 86 {                                              << 
 87         unsigned int i;                        << 
 88                                                   138 
 89         for (i = 0; i < 10; i++) {             !! 139         /* Get our local ticker going. */
 90                 smp_rmb();                     !! 140         smp_setup_percpu_timer(cpuid);
 91                 if (per_cpu(cpu_state, cpu) == !! 141         init_clockevent();
 92                         if (system_state == SY << 
 93                                 pr_info("CPU % << 
 94                                                   142 
 95                         return;                !! 143         /* Call platform-specific callin, if specified */
 96                 }                              !! 144         if (alpha_mv.smp_callin)
                                                   >> 145                 alpha_mv.smp_callin();
 97                                                   146 
 98                 msleep(100);                   !! 147         /* All kernel threads share the same mm context.  */
 99         }                                      !! 148         mmgrab(&init_mm);
                                                   >> 149         current->active_mm = &init_mm;
100                                                   150 
101         pr_err("CPU %u didn't die...\n", cpu); !! 151         /* inform the notifiers about the new cpu */
102 }                                              !! 152         notify_cpu_starting(cpuid);
103                                                   153 
104 int native_cpu_disable(unsigned int cpu)       !! 154         /* Must have completely accurate bogos.  */
105 {                                              !! 155         local_irq_enable();
106         return cpu == 0 ? -EPERM : 0;          << 
107 }                                              << 
108                                                   156 
109 void play_dead_common(void)                    !! 157         /* Wait boot CPU to stop with irq enabled before running
110 {                                              !! 158            calibrate_delay. */
111         idle_task_exit();                      !! 159         wait_boot_cpu_to_stop(cpuid);
112         irq_ctx_exit(raw_smp_processor_id());  << 
113         mb();                                     160         mb();
                                                   >> 161         calibrate_delay();
114                                                   162 
115         __this_cpu_write(cpu_state, CPU_DEAD); !! 163         smp_store_cpu_info(cpuid);
116         local_irq_disable();                   !! 164         /* Allow master to continue only after we written loops_per_jiffy.  */
117 }                                              !! 165         wmb();
                                                   >> 166         smp_secondary_alive = 1;
118                                                   167 
119 void native_play_dead(void)                    !! 168         DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
120 {                                              !! 169               cpuid, current, current->active_mm));
121         play_dead_common();                    !! 170 
                                                   >> 171         preempt_disable();
                                                   >> 172         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
122 }                                                 173 }
123                                                   174 
124 int __cpu_disable(void)                        !! 175 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
                                                   >> 176 static int
                                                   >> 177 wait_for_txrdy (unsigned long cpumask)
125 {                                                 178 {
126         unsigned int cpu = smp_processor_id(); !! 179         unsigned long timeout;
127         int ret;                               << 
128                                                   180 
129         ret = mp_ops->cpu_disable(cpu);        !! 181         if (!(hwrpb->txrdy & cpumask))
130         if (ret)                               !! 182                 return 0;
131                 return ret;                    << 
132                                                   183 
133         /*                                     !! 184         timeout = jiffies + 10*HZ;
134          * Take this CPU offline.  Once we cle !! 185         while (time_before(jiffies, timeout)) {
135          * and we must not schedule until we'r !! 186                 if (!(hwrpb->txrdy & cpumask))
136          */                                    !! 187                         return 0;
137         set_cpu_online(cpu, false);            !! 188                 udelay(10);
                                                   >> 189                 barrier();
                                                   >> 190         }
138                                                   191 
139         /*                                     !! 192         return -1;
140          * OK - migrate IRQs away from this CP !! 193 }
141          */                                    << 
142         migrate_irqs();                        << 
143                                                   194 
144         /*                                     !! 195 /*
145          * Flush user cache and TLB mappings,  !! 196  * Send a message to a secondary's console.  "START" is one such
146          * from the vm mask set of all process !! 197  * interesting message.  ;-)
147          */                                    !! 198  */
148         flush_cache_all();                     !! 199 static void
149 #ifdef CONFIG_MMU                              !! 200 send_secondary_console_msg(char *str, int cpuid)
150         local_flush_tlb_all();                 !! 201 {
151 #endif                                         !! 202         struct percpu_struct *cpu;
                                                   >> 203         register char *cp1, *cp2;
                                                   >> 204         unsigned long cpumask;
                                                   >> 205         size_t len;
                                                   >> 206 
                                                   >> 207         cpu = (struct percpu_struct *)
                                                   >> 208                 ((char*)hwrpb
                                                   >> 209                  + hwrpb->processor_offset
                                                   >> 210                  + cpuid * hwrpb->processor_size);
                                                   >> 211 
                                                   >> 212         cpumask = (1UL << cpuid);
                                                   >> 213         if (wait_for_txrdy(cpumask))
                                                   >> 214                 goto timeout;
                                                   >> 215 
                                                   >> 216         cp2 = str;
                                                   >> 217         len = strlen(cp2);
                                                   >> 218         *(unsigned int *)&cpu->ipc_buffer[0] = len;
                                                   >> 219         cp1 = (char *) &cpu->ipc_buffer[1];
                                                   >> 220         memcpy(cp1, cp2, len);
152                                                   221 
153         clear_tasks_mm_cpumask(cpu);           !! 222         /* atomic test and set */
                                                   >> 223         wmb();
                                                   >> 224         set_bit(cpuid, &hwrpb->rxrdy);
154                                                   225 
155         return 0;                              !! 226         if (wait_for_txrdy(cpumask))
156 }                                              !! 227                 goto timeout;
157 #else /* ... !CONFIG_HOTPLUG_CPU */            !! 228         return;
158 int native_cpu_disable(unsigned int cpu)       << 
159 {                                              << 
160         return -ENOSYS;                        << 
161 }                                              << 
162                                                   229 
163 void native_cpu_die(unsigned int cpu)          !! 230  timeout:
164 {                                              !! 231         printk("Processor %x not ready\n", cpuid);
165         /* We said "no" in __cpu_disable */    << 
166         BUG();                                 << 
167 }                                                 232 }
168                                                   233 
169 void native_play_dead(void)                    !! 234 /*
                                                   >> 235  * A secondary console wants to send a message.  Receive it.
                                                   >> 236  */
                                                   >> 237 static void
                                                   >> 238 recv_secondary_console_msg(void)
170 {                                                 239 {
171         BUG();                                 !! 240         int mycpu, i, cnt;
                                                   >> 241         unsigned long txrdy = hwrpb->txrdy;
                                                   >> 242         char *cp1, *cp2, buf[80];
                                                   >> 243         struct percpu_struct *cpu;
                                                   >> 244 
                                                   >> 245         DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
                                                   >> 246 
                                                   >> 247         mycpu = hard_smp_processor_id();
                                                   >> 248 
                                                   >> 249         for (i = 0; i < NR_CPUS; i++) {
                                                   >> 250                 if (!(txrdy & (1UL << i)))
                                                   >> 251                         continue;
                                                   >> 252 
                                                   >> 253                 DBGS(("recv_secondary_console_msg: "
                                                   >> 254                       "TXRDY contains CPU %d.\n", i));
                                                   >> 255 
                                                   >> 256                 cpu = (struct percpu_struct *)
                                                   >> 257                   ((char*)hwrpb
                                                   >> 258                    + hwrpb->processor_offset
                                                   >> 259                    + i * hwrpb->processor_size);
                                                   >> 260 
                                                   >> 261                 DBGS(("recv_secondary_console_msg: on %d from %d"
                                                   >> 262                       " HALT_REASON 0x%lx FLAGS 0x%lx\n",
                                                   >> 263                       mycpu, i, cpu->halt_reason, cpu->flags));
                                                   >> 264 
                                                   >> 265                 cnt = cpu->ipc_buffer[0] >> 32;
                                                   >> 266                 if (cnt <= 0 || cnt >= 80)
                                                   >> 267                         strcpy(buf, "<<< BOGUS MSG >>>");
                                                   >> 268                 else {
                                                   >> 269                         cp1 = (char *) &cpu->ipc_buffer[1];
                                                   >> 270                         cp2 = buf;
                                                   >> 271                         memcpy(cp2, cp1, cnt);
                                                   >> 272                         cp2[cnt] = '\0';
                                                   >> 273                         
                                                   >> 274                         while ((cp2 = strchr(cp2, '\r')) != 0) {
                                                   >> 275                                 *cp2 = ' ';
                                                   >> 276                                 if (cp2[1] == '\n')
                                                   >> 277                                         cp2[1] = ' ';
                                                   >> 278                         }
                                                   >> 279                 }
                                                   >> 280 
                                                   >> 281                 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
                                                   >> 282                       "message is '%s'\n", mycpu, buf));
                                                   >> 283         }
                                                   >> 284 
                                                   >> 285         hwrpb->txrdy = 0;
172 }                                                 286 }
173 #endif                                         << 
174                                                   287 
175 static asmlinkage void start_secondary(void)   !! 288 /*
                                                   >> 289  * Convince the console to have a secondary cpu begin execution.
                                                   >> 290  */
                                                   >> 291 static int
                                                   >> 292 secondary_cpu_start(int cpuid, struct task_struct *idle)
176 {                                                 293 {
177         unsigned int cpu = smp_processor_id(); !! 294         struct percpu_struct *cpu;
178         struct mm_struct *mm = &init_mm;       !! 295         struct pcb_struct *hwpcb, *ipcb;
179                                                !! 296         unsigned long timeout;
180         enable_mmu();                          !! 297           
181         mmgrab(mm);                            !! 298         cpu = (struct percpu_struct *)
182         mmget(mm);                             !! 299                 ((char*)hwrpb
183         current->active_mm = mm;               !! 300                  + hwrpb->processor_offset
184 #ifdef CONFIG_MMU                              !! 301                  + cpuid * hwrpb->processor_size);
185         enter_lazy_tlb(mm, current);           !! 302         hwpcb = (struct pcb_struct *) cpu->hwpcb;
186         local_flush_tlb_all();                 !! 303         ipcb = &task_thread_info(idle)->pcb;
                                                   >> 304 
                                                   >> 305         /* Initialize the CPU's HWPCB to something just good enough for
                                                   >> 306            us to get started.  Immediately after starting, we'll swpctx
                                                   >> 307            to the target idle task's pcb.  Reuse the stack in the mean
                                                   >> 308            time.  Precalculate the target PCBB.  */
                                                   >> 309         hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
                                                   >> 310         hwpcb->usp = 0;
                                                   >> 311         hwpcb->ptbr = ipcb->ptbr;
                                                   >> 312         hwpcb->pcc = 0;
                                                   >> 313         hwpcb->asn = 0;
                                                   >> 314         hwpcb->unique = virt_to_phys(ipcb);
                                                   >> 315         hwpcb->flags = ipcb->flags;
                                                   >> 316         hwpcb->res1 = hwpcb->res2 = 0;
                                                   >> 317 
                                                   >> 318 #if 0
                                                   >> 319         DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
                                                   >> 320               hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
187 #endif                                            321 #endif
                                                   >> 322         DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
                                                   >> 323               cpuid, idle->state, ipcb->flags));
188                                                   324 
189         per_cpu_trap_init();                   !! 325         /* Setup HWRPB fields that SRM uses to activate secondary CPU */
                                                   >> 326         hwrpb->CPU_restart = __smp_callin;
                                                   >> 327         hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
190                                                   328 
191         notify_cpu_starting(cpu);              !! 329         /* Recalculate and update the HWRPB checksum */
                                                   >> 330         hwrpb_update_checksum(hwrpb);
192                                                   331 
193         local_irq_enable();                    !! 332         /*
                                                   >> 333          * Send a "start" command to the specified processor.
                                                   >> 334          */
194                                                   335 
195         calibrate_delay();                     !! 336         /* SRM III 3.4.1.3 */
                                                   >> 337         cpu->flags |= 0x22;     /* turn on Context Valid and Restart Capable */
                                                   >> 338         cpu->flags &= ~1;       /* turn off Bootstrap In Progress */
                                                   >> 339         wmb();
196                                                   340 
197         smp_store_cpu_info(cpu);               !! 341         send_secondary_console_msg("START\r\n", cpuid);
198                                                   342 
199         set_cpu_online(cpu, true);             !! 343         /* Wait 10 seconds for an ACK from the console.  */
200         per_cpu(cpu_state, cpu) = CPU_ONLINE;  !! 344         timeout = jiffies + 10*HZ;
                                                   >> 345         while (time_before(jiffies, timeout)) {
                                                   >> 346                 if (cpu->flags & 1)
                                                   >> 347                         goto started;
                                                   >> 348                 udelay(10);
                                                   >> 349                 barrier();
                                                   >> 350         }
                                                   >> 351         printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
                                                   >> 352         return -1;
201                                                   353 
202         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE !! 354  started:
                                                   >> 355         DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
                                                   >> 356         return 0;
203 }                                                 357 }
204                                                   358 
205 extern struct {                                !! 359 /*
206         unsigned long sp;                      !! 360  * Bring one cpu online.
207         unsigned long bss_start;               !! 361  */
208         unsigned long bss_end;                 !! 362 static int
209         void *start_kernel_fn;                 !! 363 smp_boot_one_cpu(int cpuid, struct task_struct *idle)
210         void *cpu_init_fn;                     << 
211         void *thread_info;                     << 
212 } stack_start;                                 << 
213                                                << 
214 int __cpu_up(unsigned int cpu, struct task_str << 
215 {                                                 364 {
216         unsigned long timeout;                    365         unsigned long timeout;
217                                                   366 
218         per_cpu(cpu_state, cpu) = CPU_UP_PREPA !! 367         /* Signal the secondary to wait a moment.  */
                                                   >> 368         smp_secondary_alive = -1;
219                                                   369 
220         /* Fill in data in head.S for secondar !! 370         /* Whirrr, whirrr, whirrrrrrrrr... */
221         stack_start.sp = tsk->thread.sp;       !! 371         if (secondary_cpu_start(cpuid, idle))
222         stack_start.thread_info = tsk->stack;  !! 372                 return -1;
223         stack_start.bss_start = 0; /* don't cl << 
224         stack_start.start_kernel_fn = start_se << 
225                                                   373 
226         flush_icache_range((unsigned long)&sta !! 374         /* Notify the secondary CPU it can run calibrate_delay.  */
227                            (unsigned long)&sta !! 375         mb();
228         wmb();                                 !! 376         smp_secondary_alive = 0;
229                                                << 
230         mp_ops->start_cpu(cpu, (unsigned long) << 
231                                                   377 
232         timeout = jiffies + HZ;                !! 378         /* We've been acked by the console; wait one second for
                                                   >> 379            the task to start up for real.  */
                                                   >> 380         timeout = jiffies + 1*HZ;
233         while (time_before(jiffies, timeout))     381         while (time_before(jiffies, timeout)) {
234                 if (cpu_online(cpu))           !! 382                 if (smp_secondary_alive == 1)
235                         break;                 !! 383                         goto alive;
236                                                << 
237                 udelay(10);                       384                 udelay(10);
238                 barrier();                        385                 barrier();
239         }                                         386         }
240                                                   387 
241         if (cpu_online(cpu))                   !! 388         /* We failed to boot the CPU.  */
242                 return 0;                      !! 389 
                                                   >> 390         printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
                                                   >> 391         return -1;
243                                                   392 
244         return -ENOENT;                        !! 393  alive:
                                                   >> 394         /* Another "Red Snapper". */
                                                   >> 395         return 0;
245 }                                                 396 }
246                                                   397 
247 void __init smp_cpus_done(unsigned int max_cpu !! 398 /*
                                                   >> 399  * Called from setup_arch.  Detect an SMP system and which processors
                                                   >> 400  * are present.
                                                   >> 401  */
                                                   >> 402 void __init
                                                   >> 403 setup_smp(void)
248 {                                                 404 {
249         unsigned long bogosum = 0;             !! 405         struct percpu_struct *cpubase, *cpu;
250         int cpu;                               !! 406         unsigned long i;
                                                   >> 407 
                                                   >> 408         if (boot_cpuid != 0) {
                                                   >> 409                 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
                                                   >> 410                        boot_cpuid);
                                                   >> 411         }
251                                                   412 
252         for_each_online_cpu(cpu)               !! 413         if (hwrpb->nr_processors > 1) {
253                 bogosum += cpu_data[cpu].loops !! 414                 int boot_cpu_palrev;
254                                                   415 
255         printk(KERN_INFO "SMP: Total of %d pro !! 416                 DBGS(("setup_smp: nr_processors %ld\n",
256                "(%lu.%02lu BogoMIPS).\n", num_ !! 417                       hwrpb->nr_processors));
257                bogosum / (500000/HZ),          !! 418 
258                (bogosum / (5000/HZ)) % 100);   !! 419                 cpubase = (struct percpu_struct *)
                                                   >> 420                         ((char*)hwrpb + hwrpb->processor_offset);
                                                   >> 421                 boot_cpu_palrev = cpubase->pal_revision;
                                                   >> 422 
                                                   >> 423                 for (i = 0; i < hwrpb->nr_processors; i++) {
                                                   >> 424                         cpu = (struct percpu_struct *)
                                                   >> 425                                 ((char *)cpubase + i*hwrpb->processor_size);
                                                   >> 426                         if ((cpu->flags & 0x1cc) == 0x1cc) {
                                                   >> 427                                 smp_num_probed++;
                                                   >> 428                                 set_cpu_possible(i, true);
                                                   >> 429                                 set_cpu_present(i, true);
                                                   >> 430                                 cpu->pal_revision = boot_cpu_palrev;
                                                   >> 431                         }
                                                   >> 432 
                                                   >> 433                         DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
                                                   >> 434                               i, cpu->flags, cpu->type));
                                                   >> 435                         DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
                                                   >> 436                               i, cpu->pal_revision));
                                                   >> 437                 }
                                                   >> 438         } else {
                                                   >> 439                 smp_num_probed = 1;
                                                   >> 440         }
                                                   >> 441 
                                                   >> 442         printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
                                                   >> 443                smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
259 }                                                 444 }
260                                                   445 
261 void arch_smp_send_reschedule(int cpu)         !! 446 /*
                                                   >> 447  * Called by smp_init prepare the secondaries
                                                   >> 448  */
                                                   >> 449 void __init
                                                   >> 450 smp_prepare_cpus(unsigned int max_cpus)
262 {                                                 451 {
263         mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDU !! 452         /* Take care of some initial bookkeeping.  */
                                                   >> 453         memset(ipi_data, 0, sizeof(ipi_data));
                                                   >> 454 
                                                   >> 455         current_thread_info()->cpu = boot_cpuid;
                                                   >> 456 
                                                   >> 457         smp_store_cpu_info(boot_cpuid);
                                                   >> 458         smp_setup_percpu_timer(boot_cpuid);
                                                   >> 459 
                                                   >> 460         /* Nothing to do on a UP box, or when told not to.  */
                                                   >> 461         if (smp_num_probed == 1 || max_cpus == 0) {
                                                   >> 462                 init_cpu_possible(cpumask_of(boot_cpuid));
                                                   >> 463                 init_cpu_present(cpumask_of(boot_cpuid));
                                                   >> 464                 printk(KERN_INFO "SMP mode deactivated.\n");
                                                   >> 465                 return;
                                                   >> 466         }
                                                   >> 467 
                                                   >> 468         printk(KERN_INFO "SMP starting up secondaries.\n");
                                                   >> 469 
                                                   >> 470         smp_num_cpus = smp_num_probed;
264 }                                                 471 }
265                                                   472 
266 void smp_send_stop(void)                       !! 473 void
                                                   >> 474 smp_prepare_boot_cpu(void)
267 {                                                 475 {
268         smp_call_function(stop_this_cpu, 0, 0) << 
269 }                                                 476 }
270                                                   477 
271 void arch_send_call_function_ipi_mask(const st !! 478 int
                                                   >> 479 __cpu_up(unsigned int cpu, struct task_struct *tidle)
                                                   >> 480 {
                                                   >> 481         smp_boot_one_cpu(cpu, tidle);
                                                   >> 482 
                                                   >> 483         return cpu_online(cpu) ? 0 : -ENOSYS;
                                                   >> 484 }
                                                   >> 485 
                                                   >> 486 void __init
                                                   >> 487 smp_cpus_done(unsigned int max_cpus)
272 {                                                 488 {
273         int cpu;                                  489         int cpu;
                                                   >> 490         unsigned long bogosum = 0;
274                                                   491 
275         for_each_cpu(cpu, mask)                !! 492         for(cpu = 0; cpu < NR_CPUS; cpu++) 
276                 mp_ops->send_ipi(cpu, SMP_MSG_ !! 493                 if (cpu_online(cpu))
                                                   >> 494                         bogosum += cpu_data[cpu].loops_per_jiffy;
                                                   >> 495         
                                                   >> 496         printk(KERN_INFO "SMP: Total of %d processors activated "
                                                   >> 497                "(%lu.%02lu BogoMIPS).\n",
                                                   >> 498                num_online_cpus(), 
                                                   >> 499                (bogosum + 2500) / (500000/HZ),
                                                   >> 500                ((bogosum + 2500) / (5000/HZ)) % 100);
277 }                                                 501 }
278                                                   502 
279 void arch_send_call_function_single_ipi(int cp !! 503 int
                                                   >> 504 setup_profiling_timer(unsigned int multiplier)
280 {                                                 505 {
281         mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION !! 506         return -EINVAL;
282 }                                                 507 }
283                                                   508 
284 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST    !! 509 static void
285 void tick_broadcast(const struct cpumask *mask !! 510 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
286 {                                                 511 {
287         int cpu;                               !! 512         int i;
                                                   >> 513 
                                                   >> 514         mb();
                                                   >> 515         for_each_cpu(i, to_whom)
                                                   >> 516                 set_bit(operation, &ipi_data[i].bits);
288                                                   517 
289         for_each_cpu(cpu, mask)                !! 518         mb();
290                 mp_ops->send_ipi(cpu, SMP_MSG_ !! 519         for_each_cpu(i, to_whom)
                                                   >> 520                 wripir(i);
291 }                                                 521 }
292                                                   522 
293 static void ipi_timer(void)                    !! 523 void
                                                   >> 524 handle_ipi(struct pt_regs *regs)
294 {                                                 525 {
295         irq_enter();                           !! 526         int this_cpu = smp_processor_id();
296         tick_receive_broadcast();              !! 527         unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
297         irq_exit();                            !! 528         unsigned long ops;
                                                   >> 529 
                                                   >> 530 #if 0
                                                   >> 531         DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
                                                   >> 532               this_cpu, *pending_ipis, regs->pc));
                                                   >> 533 #endif
                                                   >> 534 
                                                   >> 535         mb();   /* Order interrupt and bit testing. */
                                                   >> 536         while ((ops = xchg(pending_ipis, 0)) != 0) {
                                                   >> 537           mb(); /* Order bit clearing and data access. */
                                                   >> 538           do {
                                                   >> 539                 unsigned long which;
                                                   >> 540 
                                                   >> 541                 which = ops & -ops;
                                                   >> 542                 ops &= ~which;
                                                   >> 543                 which = __ffs(which);
                                                   >> 544 
                                                   >> 545                 switch (which) {
                                                   >> 546                 case IPI_RESCHEDULE:
                                                   >> 547                         scheduler_ipi();
                                                   >> 548                         break;
                                                   >> 549 
                                                   >> 550                 case IPI_CALL_FUNC:
                                                   >> 551                         generic_smp_call_function_interrupt();
                                                   >> 552                         break;
                                                   >> 553 
                                                   >> 554                 case IPI_CPU_STOP:
                                                   >> 555                         halt();
                                                   >> 556 
                                                   >> 557                 default:
                                                   >> 558                         printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
                                                   >> 559                                this_cpu, which);
                                                   >> 560                         break;
                                                   >> 561                 }
                                                   >> 562           } while (ops);
                                                   >> 563 
                                                   >> 564           mb(); /* Order data access and bit testing. */
                                                   >> 565         }
                                                   >> 566 
                                                   >> 567         cpu_data[this_cpu].ipi_count++;
                                                   >> 568 
                                                   >> 569         if (hwrpb->txrdy)
                                                   >> 570                 recv_secondary_console_msg();
298 }                                                 571 }
                                                   >> 572 
                                                   >> 573 void
                                                   >> 574 smp_send_reschedule(int cpu)
                                                   >> 575 {
                                                   >> 576 #ifdef DEBUG_IPI_MSG
                                                   >> 577         if (cpu == hard_smp_processor_id())
                                                   >> 578                 printk(KERN_WARNING
                                                   >> 579                        "smp_send_reschedule: Sending IPI to self.\n");
299 #endif                                            580 #endif
                                                   >> 581         send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
                                                   >> 582 }
300                                                   583 
301 void smp_message_recv(unsigned int msg)        !! 584 void
                                                   >> 585 smp_send_stop(void)
302 {                                                 586 {
303         switch (msg) {                         !! 587         cpumask_t to_whom;
304         case SMP_MSG_FUNCTION:                 !! 588         cpumask_copy(&to_whom, cpu_possible_mask);
305                 generic_smp_call_function_inte !! 589         cpumask_clear_cpu(smp_processor_id(), &to_whom);
306                 break;                         !! 590 #ifdef DEBUG_IPI_MSG
307         case SMP_MSG_RESCHEDULE:               !! 591         if (hard_smp_processor_id() != boot_cpu_id)
308                 scheduler_ipi();               !! 592                 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
309                 break;                         << 
310         case SMP_MSG_FUNCTION_SINGLE:          << 
311                 generic_smp_call_function_sing << 
312                 break;                         << 
313 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST    << 
314         case SMP_MSG_TIMER:                    << 
315                 ipi_timer();                   << 
316                 break;                         << 
317 #endif                                            593 #endif
318         default:                               !! 594         send_ipi_message(&to_whom, IPI_CPU_STOP);
319                 printk(KERN_WARNING "SMP %d: % << 
320                        smp_processor_id(), __f << 
321                 break;                         << 
322         }                                      << 
323 }                                                 595 }
324                                                   596 
325 #ifdef CONFIG_PROFILING                        !! 597 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
326 /* Not really SMP stuff ... */                 << 
327 int setup_profiling_timer(unsigned int multipl << 
328 {                                                 598 {
329         return 0;                              !! 599         send_ipi_message(mask, IPI_CALL_FUNC);
                                                   >> 600 }
                                                   >> 601 
                                                   >> 602 void arch_send_call_function_single_ipi(int cpu)
                                                   >> 603 {
                                                   >> 604         send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
330 }                                                 605 }
331 #endif                                         << 
332                                                   606 
333 #ifdef CONFIG_MMU                              !! 607 static void
                                                   >> 608 ipi_imb(void *ignored)
                                                   >> 609 {
                                                   >> 610         imb();
                                                   >> 611 }
334                                                   612 
335 static void flush_tlb_all_ipi(void *info)      !! 613 void
                                                   >> 614 smp_imb(void)
336 {                                                 615 {
337         local_flush_tlb_all();                 !! 616         /* Must wait other processors to flush their icache before continue. */
                                                   >> 617         on_each_cpu(ipi_imb, NULL, 1);
338 }                                                 618 }
                                                   >> 619 EXPORT_SYMBOL(smp_imb);
339                                                   620 
340 void flush_tlb_all(void)                       !! 621 static void
                                                   >> 622 ipi_flush_tlb_all(void *ignored)
341 {                                                 623 {
342         on_each_cpu(flush_tlb_all_ipi, 0, 1);  !! 624         tbia();
343 }                                                 625 }
344                                                   626 
345 static void flush_tlb_mm_ipi(void *mm)         !! 627 void
                                                   >> 628 flush_tlb_all(void)
346 {                                                 629 {
347         local_flush_tlb_mm((struct mm_struct * !! 630         /* Although we don't have any data to pass, we do want to
                                                   >> 631            synchronize with the other processors.  */
                                                   >> 632         on_each_cpu(ipi_flush_tlb_all, NULL, 1);
348 }                                                 633 }
349                                                   634 
350 /*                                             !! 635 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
351  * The following tlb flush calls are invoked w !! 636 
352  * being torn down, or pte attributes are chan !! 637 static void
353  * address spaces, a new context is obtained o !! 638 ipi_flush_tlb_mm(void *x)
354  * context on other cpus are invalidated to fo !! 639 {
355  * at switch_mm time, should the mm ever be us !! 640         struct mm_struct *mm = (struct mm_struct *) x;
356  * multithreaded address spaces, intercpu inte !! 641         if (mm == current->active_mm && !asn_locked())
357  * Another case where intercpu interrupts are  !! 642                 flush_tlb_current(mm);
358  * mm might be active on another cpu (eg debug !! 643         else
359  * behalf of debugees, kswapd stealing pages f !! 644                 flush_tlb_other(mm);
360  * Kanoj 07/00.                                !! 645 }
361  */                                            !! 646 
362 void flush_tlb_mm(struct mm_struct *mm)        !! 647 void
                                                   >> 648 flush_tlb_mm(struct mm_struct *mm)
363 {                                                 649 {
364         preempt_disable();                        650         preempt_disable();
365                                                   651 
366         if ((atomic_read(&mm->mm_users) != 1)  !! 652         if (mm == current->active_mm) {
367                 smp_call_function(flush_tlb_mm !! 653                 flush_tlb_current(mm);
368         } else {                               !! 654                 if (atomic_read(&mm->mm_users) <= 1) {
369                 int i;                         !! 655                         int cpu, this_cpu = smp_processor_id();
370                 for_each_online_cpu(i)         !! 656                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
371                         if (smp_processor_id() !! 657                                 if (!cpu_online(cpu) || cpu == this_cpu)
372                                 cpu_context(i, !! 658                                         continue;
                                                   >> 659                                 if (mm->context[cpu])
                                                   >> 660                                         mm->context[cpu] = 0;
                                                   >> 661                         }
                                                   >> 662                         preempt_enable();
                                                   >> 663                         return;
                                                   >> 664                 }
373         }                                         665         }
374         local_flush_tlb_mm(mm);                !! 666 
                                                   >> 667         smp_call_function(ipi_flush_tlb_mm, mm, 1);
375                                                   668 
376         preempt_enable();                         669         preempt_enable();
377 }                                                 670 }
                                                   >> 671 EXPORT_SYMBOL(flush_tlb_mm);
378                                                   672 
379 struct flush_tlb_data {                        !! 673 struct flush_tlb_page_struct {
380         struct vm_area_struct *vma;               674         struct vm_area_struct *vma;
381         unsigned long addr1;                   !! 675         struct mm_struct *mm;
382         unsigned long addr2;                   !! 676         unsigned long addr;
383 };                                                677 };
384                                                   678 
385 static void flush_tlb_range_ipi(void *info)    !! 679 static void
                                                   >> 680 ipi_flush_tlb_page(void *x)
386 {                                                 681 {
387         struct flush_tlb_data *fd = (struct fl !! 682         struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
                                                   >> 683         struct mm_struct * mm = data->mm;
388                                                   684 
389         local_flush_tlb_range(fd->vma, fd->add !! 685         if (mm == current->active_mm && !asn_locked())
                                                   >> 686                 flush_tlb_current_page(mm, data->vma, data->addr);
                                                   >> 687         else
                                                   >> 688                 flush_tlb_other(mm);
390 }                                                 689 }
391                                                   690 
392 void flush_tlb_range(struct vm_area_struct *vm !! 691 void
393                      unsigned long start, unsi !! 692 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
394 {                                                 693 {
                                                   >> 694         struct flush_tlb_page_struct data;
395         struct mm_struct *mm = vma->vm_mm;        695         struct mm_struct *mm = vma->vm_mm;
396                                                   696 
397         preempt_disable();                        697         preempt_disable();
398         if ((atomic_read(&mm->mm_users) != 1)  << 
399                 struct flush_tlb_data fd;      << 
400                                                   698 
401                 fd.vma = vma;                  !! 699         if (mm == current->active_mm) {
402                 fd.addr1 = start;              !! 700                 flush_tlb_current_page(mm, vma, addr);
403                 fd.addr2 = end;                !! 701                 if (atomic_read(&mm->mm_users) <= 1) {
404                 smp_call_function(flush_tlb_ra !! 702                         int cpu, this_cpu = smp_processor_id();
405         } else {                               !! 703                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
406                 int i;                         !! 704                                 if (!cpu_online(cpu) || cpu == this_cpu)
407                 for_each_online_cpu(i)         !! 705                                         continue;
408                         if (smp_processor_id() !! 706                                 if (mm->context[cpu])
409                                 cpu_context(i, !! 707                                         mm->context[cpu] = 0;
                                                   >> 708                         }
                                                   >> 709                         preempt_enable();
                                                   >> 710                         return;
                                                   >> 711                 }
410         }                                         712         }
411         local_flush_tlb_range(vma, start, end) !! 713 
                                                   >> 714         data.vma = vma;
                                                   >> 715         data.mm = mm;
                                                   >> 716         data.addr = addr;
                                                   >> 717 
                                                   >> 718         smp_call_function(ipi_flush_tlb_page, &data, 1);
                                                   >> 719 
412         preempt_enable();                         720         preempt_enable();
413 }                                                 721 }
                                                   >> 722 EXPORT_SYMBOL(flush_tlb_page);
414                                                   723 
415 static void flush_tlb_kernel_range_ipi(void *i !! 724 void
                                                   >> 725 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
416 {                                                 726 {
417         struct flush_tlb_data *fd = (struct fl !! 727         /* On the Alpha we always flush the whole user tlb.  */
418                                                !! 728         flush_tlb_mm(vma->vm_mm);
419         local_flush_tlb_kernel_range(fd->addr1 << 
420 }                                                 729 }
                                                   >> 730 EXPORT_SYMBOL(flush_tlb_range);
421                                                   731 
422 void flush_tlb_kernel_range(unsigned long star !! 732 static void
                                                   >> 733 ipi_flush_icache_page(void *x)
423 {                                                 734 {
424         struct flush_tlb_data fd;              !! 735         struct mm_struct *mm = (struct mm_struct *) x;
425                                                !! 736         if (mm == current->active_mm && !asn_locked())
426         fd.addr1 = start;                      !! 737                 __load_new_mm_context(mm);
427         fd.addr2 = end;                        !! 738         else
428         on_each_cpu(flush_tlb_kernel_range_ipi !! 739                 flush_tlb_other(mm);
429 }                                                 740 }
430                                                   741 
431 static void flush_tlb_page_ipi(void *info)     !! 742 void
                                                   >> 743 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
                                                   >> 744                         unsigned long addr, int len)
432 {                                                 745 {
433         struct flush_tlb_data *fd = (struct fl !! 746         struct mm_struct *mm = vma->vm_mm;
434                                                   747 
435         local_flush_tlb_page(fd->vma, fd->addr !! 748         if ((vma->vm_flags & VM_EXEC) == 0)
436 }                                              !! 749                 return;
437                                                   750 
438 void flush_tlb_page(struct vm_area_struct *vma << 
439 {                                              << 
440         preempt_disable();                        751         preempt_disable();
441         if ((atomic_read(&vma->vm_mm->mm_users << 
442             (current->mm != vma->vm_mm)) {     << 
443                 struct flush_tlb_data fd;      << 
444                                                << 
445                 fd.vma = vma;                  << 
446                 fd.addr1 = page;               << 
447                 smp_call_function(flush_tlb_pa << 
448         } else {                               << 
449                 int i;                         << 
450                 for_each_online_cpu(i)         << 
451                         if (smp_processor_id() << 
452                                 cpu_context(i, << 
453         }                                      << 
454         local_flush_tlb_page(vma, page);       << 
455         preempt_enable();                      << 
456 }                                              << 
457                                                << 
458 static void flush_tlb_one_ipi(void *info)      << 
459 {                                              << 
460         struct flush_tlb_data *fd = (struct fl << 
461         local_flush_tlb_one(fd->addr1, fd->add << 
462 }                                              << 
463                                                   752 
464 void flush_tlb_one(unsigned long asid, unsigne !! 753         if (mm == current->active_mm) {
465 {                                              !! 754                 __load_new_mm_context(mm);
466         struct flush_tlb_data fd;              !! 755                 if (atomic_read(&mm->mm_users) <= 1) {
                                                   >> 756                         int cpu, this_cpu = smp_processor_id();
                                                   >> 757                         for (cpu = 0; cpu < NR_CPUS; cpu++) {
                                                   >> 758                                 if (!cpu_online(cpu) || cpu == this_cpu)
                                                   >> 759                                         continue;
                                                   >> 760                                 if (mm->context[cpu])
                                                   >> 761                                         mm->context[cpu] = 0;
                                                   >> 762                         }
                                                   >> 763                         preempt_enable();
                                                   >> 764                         return;
                                                   >> 765                 }
                                                   >> 766         }
467                                                   767 
468         fd.addr1 = asid;                       !! 768         smp_call_function(ipi_flush_icache_page, mm, 1);
469         fd.addr2 = vaddr;                      << 
470                                                   769 
471         smp_call_function(flush_tlb_one_ipi, ( !! 770         preempt_enable();
472         local_flush_tlb_one(asid, vaddr);      << 
473 }                                                 771 }
474                                                << 
475 #endif                                         << 
476                                                   772 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php