1 // SPDX-License-Identifier: GPL-2.0 << 2 /* 1 /* 3 * arch/sh/kernel/smp.c !! 2 * linux/arch/alpha/kernel/smp.c 4 * 3 * 5 * SMP support for the SuperH processors. !! 4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com) >> 5 * Renamed modified smp_call_function to smp_call_function_on_cpu() >> 6 * Created an function that conforms to the old calling convention >> 7 * of smp_call_function(). >> 8 * >> 9 * This is helpful for DCPI. 6 * 10 * 7 * Copyright (C) 2002 - 2010 Paul Mundt << 8 * Copyright (C) 2006 - 2007 Akio Idehara << 9 */ 11 */ 10 #include <linux/err.h> !! 12 11 #include <linux/cache.h> !! 13 #include <linux/errno.h> 12 #include <linux/cpumask.h> !! 14 #include <linux/kernel.h> 13 #include <linux/delay.h> !! 15 #include <linux/kernel_stat.h> 14 #include <linux/init.h> !! 16 #include <linux/sched.h> 15 #include <linux/spinlock.h> << 16 #include <linux/mm.h> 17 #include <linux/mm.h> 17 #include <linux/module.h> !! 18 #include <linux/threads.h> 18 #include <linux/cpu.h> !! 19 #include <linux/smp.h> >> 20 #include <linux/smp_lock.h> 19 #include <linux/interrupt.h> 21 #include <linux/interrupt.h> 20 #include <linux/sched/mm.h> !! 22 #include <linux/init.h> 21 #include <linux/sched/hotplug.h> !! 23 #include <linux/delay.h> 22 #include <linux/atomic.h> !! 24 #include <linux/spinlock.h> 23 #include <linux/clockchips.h> !! 25 #include <linux/irq.h> 24 #include <linux/profile.h> !! 26 #include <linux/cache.h> 25 27 26 #include <asm/processor.h> !! 28 #include <asm/hwrpb.h> >> 29 #include <asm/ptrace.h> >> 30 #include <asm/atomic.h> >> 31 >> 32 #include <asm/io.h> >> 33 #include <asm/irq.h> >> 34 #include <asm/bitops.h> >> 35 #include <asm/pgtable.h> >> 36 #include <asm/pgalloc.h> >> 37 #include <asm/hardirq.h> >> 38 #include <asm/softirq.h> 27 #include <asm/mmu_context.h> 39 #include <asm/mmu_context.h> 28 #include <asm/smp.h> << 29 #include <asm/cacheflush.h> << 30 #include <asm/sections.h> << 31 #include <asm/setup.h> << 32 40 33 int __cpu_number_map[NR_CPUS]; /* Map !! 41 #define __KERNEL_SYSCALLS__ 34 int __cpu_logical_map[NR_CPUS]; /* Map !! 42 #include <asm/unistd.h> 35 43 36 struct plat_smp_ops *mp_ops = NULL; !! 44 #include "proto.h" >> 45 #include "irq_impl.h" 37 46 38 /* State of each CPU */ << 39 DEFINE_PER_CPU(int, cpu_state) = { 0 }; << 40 47 41 void register_smp_ops(struct plat_smp_ops *ops !! 48 #define DEBUG_SMP 0 42 { !! 49 #if DEBUG_SMP 43 if (mp_ops) !! 50 #define DBGS(args) printk args 44 printk(KERN_WARNING "Overridin !! 51 #else >> 52 #define DBGS(args) >> 53 #endif 45 54 46 mp_ops = ops; !! 55 /* A collection of per-processor data. */ 47 } !! 56 struct cpuinfo_alpha cpu_data[NR_CPUS]; 48 57 49 static inline void smp_store_cpu_info(unsigned !! 58 /* A collection of single bit ipi messages. */ 50 { !! 59 static struct { 51 struct sh_cpuinfo *c = cpu_data + cpu; !! 60 unsigned long bits ____cacheline_aligned; >> 61 } ipi_data[NR_CPUS] __cacheline_aligned; >> 62 >> 63 enum ipi_message_type { >> 64 IPI_RESCHEDULE, >> 65 IPI_CALL_FUNC, >> 66 IPI_CPU_STOP, >> 67 }; 52 68 53 memcpy(c, &boot_cpu_data, sizeof(struc !! 69 spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; 54 70 55 c->loops_per_jiffy = loops_per_jiffy; !! 71 /* Set to a secondary's cpuid when it comes online. */ 56 } !! 72 static unsigned long smp_secondary_alive; 57 73 58 void __init smp_prepare_cpus(unsigned int max_ !! 74 /* Which cpus ids came online. */ 59 { !! 75 unsigned long cpu_present_mask; 60 unsigned int cpu = smp_processor_id(); << 61 76 62 init_new_context(current, &init_mm); !! 77 /* cpus reported in the hwrpb */ 63 current_thread_info()->cpu = cpu; !! 78 static unsigned long hwrpb_cpu_present_mask __initdata = 0; 64 mp_ops->prepare_cpus(max_cpus); << 65 79 66 #ifndef CONFIG_HOTPLUG_CPU !! 80 static int max_cpus = NR_CPUS; /* Command-line limitation. */ 67 init_cpu_present(cpu_possible_mask); !! 81 int smp_num_probed; /* Internal processor count */ 68 #endif !! 82 int smp_num_cpus = 1; /* Number that came online. */ >> 83 int smp_threads_ready; /* True once the per process idle is forked. */ >> 84 >> 85 int __cpu_number_map[NR_CPUS]; >> 86 int __cpu_logical_map[NR_CPUS]; >> 87 >> 88 extern void calibrate_delay(void); >> 89 extern asmlinkage void entInt(void); >> 90 >> 91 >> 92 static int __init nosmp(char *str) >> 93 { >> 94 max_cpus = 0; >> 95 return 1; 69 } 96 } 70 97 71 void __init smp_prepare_boot_cpu(void) !! 98 __setup("nosmp", nosmp); >> 99 >> 100 static int __init maxcpus(char *str) 72 { 101 { 73 unsigned int cpu = smp_processor_id(); !! 102 get_option(&str, &max_cpus); >> 103 return 1; >> 104 } 74 105 75 __cpu_number_map[0] = cpu; !! 106 __setup("maxcpus=", maxcpus); 76 __cpu_logical_map[0] = cpu; << 77 107 78 set_cpu_online(cpu, true); << 79 set_cpu_possible(cpu, true); << 80 108 81 per_cpu(cpu_state, cpu) = CPU_ONLINE; !! 109 /* >> 110 * Called by both boot and secondaries to move global data into >> 111 * per-processor storage. >> 112 */ >> 113 static inline void __init >> 114 smp_store_cpu_info(int cpuid) >> 115 { >> 116 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy; >> 117 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION; >> 118 cpu_data[cpuid].need_new_asn = 0; >> 119 cpu_data[cpuid].asn_lock = 0; >> 120 local_irq_count(cpuid) = 0; >> 121 local_bh_count(cpuid) = 0; 82 } 122 } 83 123 84 #ifdef CONFIG_HOTPLUG_CPU !! 124 /* 85 void native_cpu_die(unsigned int cpu) !! 125 * Ideally sets up per-cpu profiling hooks. Doesn't do much now... >> 126 */ >> 127 static inline void __init >> 128 smp_setup_percpu_timer(int cpuid) 86 { 129 { 87 unsigned int i; !! 130 cpu_data[cpuid].prof_counter = 1; >> 131 cpu_data[cpuid].prof_multiplier = 1; >> 132 } 88 133 89 for (i = 0; i < 10; i++) { !! 134 static void __init 90 smp_rmb(); !! 135 wait_boot_cpu_to_stop(int cpuid) 91 if (per_cpu(cpu_state, cpu) == !! 136 { 92 if (system_state == SY !! 137 long stop = jiffies + 10*HZ; 93 pr_info("CPU % << 94 138 >> 139 while (time_before(jiffies, stop)) { >> 140 if (!smp_secondary_alive) 95 return; 141 return; 96 } !! 142 barrier(); 97 << 98 msleep(100); << 99 } 143 } 100 144 101 pr_err("CPU %u didn't die...\n", cpu); !! 145 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid); >> 146 for (;;) >> 147 barrier(); 102 } 148 } 103 149 104 int native_cpu_disable(unsigned int cpu) !! 150 /* >> 151 * Where secondaries begin a life of C. >> 152 */ >> 153 void __init >> 154 smp_callin(void) 105 { 155 { 106 return cpu == 0 ? -EPERM : 0; !! 156 int cpuid = hard_smp_processor_id(); 107 } << 108 157 109 void play_dead_common(void) !! 158 if (current != init_tasks[cpu_number_map(cpuid)]) { 110 { !! 159 printk("BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p\n", 111 idle_task_exit(); !! 160 cpuid, current, init_tasks[cpu_number_map(cpuid)]); 112 irq_ctx_exit(raw_smp_processor_id()); !! 161 } 113 mb(); << 114 162 115 __this_cpu_write(cpu_state, CPU_DEAD); !! 163 DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state)); 116 local_irq_disable(); << 117 } << 118 164 119 void native_play_dead(void) !! 165 /* Turn on machine checks. */ 120 { !! 166 wrmces(7); 121 play_dead_common(); << 122 } << 123 167 124 int __cpu_disable(void) !! 168 /* Set trap vectors. */ 125 { !! 169 trap_init(); 126 unsigned int cpu = smp_processor_id(); << 127 int ret; << 128 170 129 ret = mp_ops->cpu_disable(cpu); !! 171 /* Set interrupt vector. */ 130 if (ret) !! 172 wrent(entInt, 0); 131 return ret; << 132 173 133 /* !! 174 /* Get our local ticker going. */ 134 * Take this CPU offline. Once we cle !! 175 smp_setup_percpu_timer(cpuid); 135 * and we must not schedule until we'r !! 176 136 */ !! 177 /* Call platform-specific callin, if specified */ 137 set_cpu_online(cpu, false); !! 178 if (alpha_mv.smp_callin) alpha_mv.smp_callin(); >> 179 >> 180 /* Must have completely accurate bogos. */ >> 181 __sti(); 138 182 139 /* 183 /* 140 * OK - migrate IRQs away from this CP !! 184 * Wait boot CPU to stop with irq enabled before >> 185 * running calibrate_delay(). 141 */ 186 */ 142 migrate_irqs(); !! 187 wait_boot_cpu_to_stop(cpuid); >> 188 mb(); >> 189 >> 190 calibrate_delay(); >> 191 >> 192 smp_store_cpu_info(cpuid); >> 193 >> 194 { >> 195 #define LPJ(c) ((long)cpu_data[c].loops_per_jiffy) >> 196 long diff = LPJ(boot_cpuid) - LPJ(cpuid); >> 197 if (diff < 0) diff = -diff; >> 198 >> 199 if (diff > LPJ(boot_cpuid)/10) { >> 200 printk("Bogus BogoMIPS for cpu %d - trusting boot CPU\n", >> 201 cpuid); >> 202 loops_per_jiffy = LPJ(cpuid) = LPJ(boot_cpuid); >> 203 } >> 204 } 143 205 144 /* 206 /* 145 * Flush user cache and TLB mappings, !! 207 * Allow master to continue only after we written 146 * from the vm mask set of all process !! 208 * the loops_per_jiffy. 147 */ 209 */ 148 flush_cache_all(); !! 210 wmb(); 149 #ifdef CONFIG_MMU !! 211 smp_secondary_alive = 1; 150 local_flush_tlb_all(); << 151 #endif << 152 212 153 clear_tasks_mm_cpumask(cpu); !! 213 /* Wait for the go code. */ >> 214 while (!smp_threads_ready) >> 215 barrier(); 154 216 155 return 0; !! 217 DBGS(("smp_callin: commencing CPU %d current %p\n", 156 } !! 218 cpuid, current)); 157 #else /* ... !CONFIG_HOTPLUG_CPU */ !! 219 158 int native_cpu_disable(unsigned int cpu) !! 220 /* Setup the scheduler for this processor. */ 159 { !! 221 init_idle(); 160 return -ENOSYS; !! 222 >> 223 /* ??? This should be in init_idle. */ >> 224 atomic_inc(&init_mm.mm_count); >> 225 current->active_mm = &init_mm; >> 226 /* Do nothing. */ >> 227 cpu_idle(); 161 } 228 } 162 229 163 void native_cpu_die(unsigned int cpu) !! 230 /* >> 231 * Send a message to a secondary's console. "START" is one such >> 232 * interesting message. ;-) >> 233 */ >> 234 static void >> 235 send_secondary_console_msg(char *str, int cpuid) 164 { 236 { 165 /* We said "no" in __cpu_disable */ !! 237 struct percpu_struct *cpu; 166 BUG(); !! 238 register char *cp1, *cp2; >> 239 unsigned long cpumask; >> 240 size_t len; >> 241 long timeout; >> 242 >> 243 cpu = (struct percpu_struct *) >> 244 ((char*)hwrpb >> 245 + hwrpb->processor_offset >> 246 + cpuid * hwrpb->processor_size); >> 247 >> 248 cpumask = (1UL << cpuid); >> 249 if (hwrpb->txrdy & cpumask) >> 250 goto delay1; >> 251 ready1: >> 252 >> 253 cp2 = str; >> 254 len = strlen(cp2); >> 255 *(unsigned int *)&cpu->ipc_buffer[0] = len; >> 256 cp1 = (char *) &cpu->ipc_buffer[1]; >> 257 memcpy(cp1, cp2, len); >> 258 >> 259 /* atomic test and set */ >> 260 wmb(); >> 261 set_bit(cpuid, &hwrpb->rxrdy); >> 262 >> 263 if (hwrpb->txrdy & cpumask) >> 264 goto delay2; >> 265 ready2: >> 266 return; >> 267 >> 268 delay1: >> 269 /* Wait 10 seconds. Note that jiffies aren't ticking yet. */ >> 270 for (timeout = 1000000; timeout > 0; --timeout) { >> 271 if (!(hwrpb->txrdy & cpumask)) >> 272 goto ready1; >> 273 udelay(10); >> 274 barrier(); >> 275 } >> 276 goto timeout; >> 277 >> 278 delay2: >> 279 /* Wait 10 seconds. */ >> 280 for (timeout = 1000000; timeout > 0; --timeout) { >> 281 if (!(hwrpb->txrdy & cpumask)) >> 282 goto ready2; >> 283 udelay(10); >> 284 barrier(); >> 285 } >> 286 goto timeout; >> 287 >> 288 timeout: >> 289 printk("Processor %x not ready\n", cpuid); >> 290 return; 167 } 291 } 168 292 169 void native_play_dead(void) !! 293 /* >> 294 * A secondary console wants to send a message. Receive it. >> 295 */ >> 296 static void >> 297 recv_secondary_console_msg(void) 170 { 298 { 171 BUG(); !! 299 int mycpu, i, cnt; >> 300 unsigned long txrdy = hwrpb->txrdy; >> 301 char *cp1, *cp2, buf[80]; >> 302 struct percpu_struct *cpu; >> 303 >> 304 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy)); >> 305 >> 306 mycpu = hard_smp_processor_id(); >> 307 >> 308 for (i = 0; i < NR_CPUS; i++) { >> 309 if (!(txrdy & (1UL << i))) >> 310 continue; >> 311 >> 312 DBGS(("recv_secondary_console_msg: " >> 313 "TXRDY contains CPU %d.\n", i)); >> 314 >> 315 cpu = (struct percpu_struct *) >> 316 ((char*)hwrpb >> 317 + hwrpb->processor_offset >> 318 + i * hwrpb->processor_size); >> 319 >> 320 DBGS(("recv_secondary_console_msg: on %d from %d" >> 321 " HALT_REASON 0x%lx FLAGS 0x%lx\n", >> 322 mycpu, i, cpu->halt_reason, cpu->flags)); >> 323 >> 324 cnt = cpu->ipc_buffer[0] >> 32; >> 325 if (cnt <= 0 || cnt >= 80) >> 326 strcpy(buf, "<<< BOGUS MSG >>>"); >> 327 else { >> 328 cp1 = (char *) &cpu->ipc_buffer[11]; >> 329 cp2 = buf; >> 330 strcpy(cp2, cp1); >> 331 >> 332 while ((cp2 = strchr(cp2, '\r')) != 0) { >> 333 *cp2 = ' '; >> 334 if (cp2[1] == '\n') >> 335 cp2[1] = ' '; >> 336 } >> 337 } >> 338 >> 339 DBGS((KERN_INFO "recv_secondary_console_msg: on %d " >> 340 "message is '%s'\n", mycpu, buf)); >> 341 } >> 342 >> 343 hwrpb->txrdy = 0; 172 } 344 } 173 #endif << 174 345 175 static asmlinkage void start_secondary(void) !! 346 /* >> 347 * Convince the console to have a secondary cpu begin execution. >> 348 */ >> 349 static int __init >> 350 secondary_cpu_start(int cpuid, struct task_struct *idle) 176 { 351 { 177 unsigned int cpu = smp_processor_id(); !! 352 struct percpu_struct *cpu; 178 struct mm_struct *mm = &init_mm; !! 353 struct pcb_struct *hwpcb; 179 !! 354 long timeout; 180 enable_mmu(); !! 355 181 mmgrab(mm); !! 356 cpu = (struct percpu_struct *) 182 mmget(mm); !! 357 ((char*)hwrpb 183 current->active_mm = mm; !! 358 + hwrpb->processor_offset 184 #ifdef CONFIG_MMU !! 359 + cpuid * hwrpb->processor_size); 185 enter_lazy_tlb(mm, current); !! 360 hwpcb = (struct pcb_struct *) cpu->hwpcb; 186 local_flush_tlb_all(); !! 361 >> 362 /* Initialize the CPU's HWPCB to something just good enough for >> 363 us to get started. Immediately after starting, we'll swpctx >> 364 to the target idle task's ptb. Reuse the stack in the mean >> 365 time. Precalculate the target PCBB. */ >> 366 hwpcb->ksp = (unsigned long) idle + sizeof(union task_union) - 16; >> 367 hwpcb->usp = 0; >> 368 hwpcb->ptbr = idle->thread.ptbr; >> 369 hwpcb->pcc = 0; >> 370 hwpcb->asn = 0; >> 371 hwpcb->unique = virt_to_phys(&idle->thread); >> 372 hwpcb->flags = idle->thread.pal_flags; >> 373 hwpcb->res1 = hwpcb->res2 = 0; >> 374 >> 375 #if 0 >> 376 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n", >> 377 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique)); 187 #endif 378 #endif >> 379 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", >> 380 cpuid, idle->state, idle->thread.pal_flags)); 188 381 189 per_cpu_trap_init(); !! 382 /* Setup HWRPB fields that SRM uses to activate secondary CPU */ >> 383 hwrpb->CPU_restart = __smp_callin; >> 384 hwrpb->CPU_restart_data = (unsigned long) __smp_callin; 190 385 191 notify_cpu_starting(cpu); !! 386 /* Recalculate and update the HWRPB checksum */ >> 387 hwrpb_update_checksum(hwrpb); 192 388 193 local_irq_enable(); !! 389 /* >> 390 * Send a "start" command to the specified processor. >> 391 */ 194 392 195 calibrate_delay(); !! 393 /* SRM III 3.4.1.3 */ >> 394 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ >> 395 cpu->flags &= ~1; /* turn off Bootstrap In Progress */ >> 396 wmb(); 196 397 197 smp_store_cpu_info(cpu); !! 398 send_secondary_console_msg("START\r\n", cpuid); 198 399 199 set_cpu_online(cpu, true); !! 400 /* Wait 10 seconds for an ACK from the console. Note that jiffies 200 per_cpu(cpu_state, cpu) = CPU_ONLINE; !! 401 aren't ticking yet. */ >> 402 for (timeout = 1000000; timeout > 0; timeout--) { >> 403 if (cpu->flags & 1) >> 404 goto started; >> 405 udelay(10); >> 406 barrier(); >> 407 } >> 408 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid); >> 409 return -1; 201 410 202 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE !! 411 started: >> 412 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid)); >> 413 return 0; 203 } 414 } 204 415 205 extern struct { !! 416 static int __init fork_by_hand(void) 206 unsigned long sp; << 207 unsigned long bss_start; << 208 unsigned long bss_end; << 209 void *start_kernel_fn; << 210 void *cpu_init_fn; << 211 void *thread_info; << 212 } stack_start; << 213 << 214 int __cpu_up(unsigned int cpu, struct task_str << 215 { 417 { 216 unsigned long timeout; !! 418 struct pt_regs regs; 217 !! 419 /* 218 per_cpu(cpu_state, cpu) = CPU_UP_PREPA !! 420 * don't care about the regs settings since >> 421 * we'll never reschedule the forked task. >> 422 */ >> 423 return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0); >> 424 } 219 425 220 /* Fill in data in head.S for secondar !! 426 /* 221 stack_start.sp = tsk->thread.sp; !! 427 * Bring one cpu online. 222 stack_start.thread_info = tsk->stack; !! 428 */ 223 stack_start.bss_start = 0; /* don't cl !! 429 static int __init 224 stack_start.start_kernel_fn = start_se !! 430 smp_boot_one_cpu(int cpuid, int cpunum) >> 431 { >> 432 struct task_struct *idle; >> 433 long timeout; 225 434 226 flush_icache_range((unsigned long)&sta !! 435 /* Cook up an idler for this guy. Note that the address we give 227 (unsigned long)&sta !! 436 to kernel_thread is irrelevant -- it's going to start where 228 wmb(); !! 437 HWRPB.CPU_restart says to start. But this gets all the other >> 438 task-y sort of data structures set up like we wish. */ >> 439 /* >> 440 * We can't use kernel_thread since we must avoid to >> 441 * reschedule the child. >> 442 */ >> 443 if (fork_by_hand() < 0) >> 444 panic("failed fork for CPU %d", cpuid); 229 445 230 mp_ops->start_cpu(cpu, (unsigned long) !! 446 idle = init_task.prev_task; >> 447 if (!idle) >> 448 panic("No idle process for CPU %d", cpuid); >> 449 if (idle == &init_task) >> 450 panic("idle process is init_task for CPU %d", cpuid); >> 451 >> 452 idle->processor = cpuid; >> 453 idle->cpus_runnable = 1 << cpuid; /* we schedule the first task manually */ >> 454 __cpu_logical_map[cpunum] = cpuid; >> 455 __cpu_number_map[cpuid] = cpunum; >> 456 >> 457 del_from_runqueue(idle); >> 458 unhash_process(idle); >> 459 init_tasks[cpunum] = idle; >> 460 >> 461 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", >> 462 cpuid, idle->state, idle->flags)); >> 463 >> 464 /* The secondary will change this once it is happy. Note that >> 465 secondary_cpu_start contains the necessary memory barrier. */ >> 466 smp_secondary_alive = -1; >> 467 >> 468 /* Whirrr, whirrr, whirrrrrrrrr... */ >> 469 if (secondary_cpu_start(cpuid, idle)) >> 470 return -1; 231 471 232 timeout = jiffies + HZ; !! 472 mb(); 233 while (time_before(jiffies, timeout)) !! 473 /* Notify the secondary CPU it can run calibrate_delay() */ 234 if (cpu_online(cpu)) !! 474 smp_secondary_alive = 0; 235 break; << 236 475 >> 476 /* We've been acked by the console; wait one second for the task >> 477 to start up for real. Note that jiffies aren't ticking yet. */ >> 478 for (timeout = 0; timeout < 1000000; timeout++) { >> 479 if (smp_secondary_alive == 1) >> 480 goto alive; 237 udelay(10); 481 udelay(10); 238 barrier(); 482 barrier(); 239 } 483 } 240 484 241 if (cpu_online(cpu)) !! 485 /* we must invalidate our stuff as we failed to boot the CPU */ 242 return 0; !! 486 __cpu_logical_map[cpunum] = -1; >> 487 __cpu_number_map[cpuid] = -1; >> 488 >> 489 /* the idle task is local to us so free it as we don't use it */ >> 490 free_task_struct(idle); 243 491 244 return -ENOENT; !! 492 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid); >> 493 return -1; >> 494 >> 495 alive: >> 496 /* Another "Red Snapper". */ >> 497 return 0; 245 } 498 } 246 499 247 void __init smp_cpus_done(unsigned int max_cpu !! 500 /* >> 501 * Called from setup_arch. Detect an SMP system and which processors >> 502 * are present. >> 503 */ >> 504 void __init >> 505 setup_smp(void) 248 { 506 { 249 unsigned long bogosum = 0; !! 507 struct percpu_struct *cpubase, *cpu; 250 int cpu; !! 508 int i; >> 509 >> 510 if (boot_cpuid != 0) { >> 511 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", >> 512 boot_cpuid); >> 513 } 251 514 252 for_each_online_cpu(cpu) !! 515 if (hwrpb->nr_processors > 1) { 253 bogosum += cpu_data[cpu].loops !! 516 int boot_cpu_palrev; >> 517 >> 518 DBGS(("setup_smp: nr_processors %ld\n", >> 519 hwrpb->nr_processors)); >> 520 >> 521 cpubase = (struct percpu_struct *) >> 522 ((char*)hwrpb + hwrpb->processor_offset); >> 523 boot_cpu_palrev = cpubase->pal_revision; >> 524 >> 525 for (i = 0; i < hwrpb->nr_processors; i++ ) { >> 526 cpu = (struct percpu_struct *) >> 527 ((char *)cpubase + i*hwrpb->processor_size); >> 528 if ((cpu->flags & 0x1cc) == 0x1cc) { >> 529 smp_num_probed++; >> 530 /* Assume here that "whami" == index */ >> 531 hwrpb_cpu_present_mask |= (1UL << i); >> 532 cpu->pal_revision = boot_cpu_palrev; >> 533 } >> 534 >> 535 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n", >> 536 i, cpu->flags, cpu->type)); >> 537 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n", >> 538 i, cpu->pal_revision)); >> 539 } >> 540 } else { >> 541 smp_num_probed = 1; >> 542 hwrpb_cpu_present_mask = (1UL << boot_cpuid); >> 543 } >> 544 cpu_present_mask = 1UL << boot_cpuid; >> 545 >> 546 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", >> 547 smp_num_probed, hwrpb_cpu_present_mask); >> 548 } 254 549 >> 550 /* >> 551 * Called by smp_init bring all the secondaries online and hold them. >> 552 */ >> 553 void __init >> 554 smp_boot_cpus(void) >> 555 { >> 556 int cpu_count, i; >> 557 unsigned long bogosum; >> 558 >> 559 /* Take care of some initial bookkeeping. */ >> 560 memset(__cpu_number_map, -1, sizeof(__cpu_number_map)); >> 561 memset(__cpu_logical_map, -1, sizeof(__cpu_logical_map)); >> 562 memset(ipi_data, 0, sizeof(ipi_data)); >> 563 >> 564 __cpu_number_map[boot_cpuid] = 0; >> 565 __cpu_logical_map[0] = boot_cpuid; >> 566 current->processor = boot_cpuid; >> 567 >> 568 smp_store_cpu_info(boot_cpuid); >> 569 smp_setup_percpu_timer(boot_cpuid); >> 570 >> 571 init_idle(); >> 572 >> 573 /* ??? This should be in init_idle. */ >> 574 atomic_inc(&init_mm.mm_count); >> 575 current->active_mm = &init_mm; >> 576 >> 577 /* Nothing to do on a UP box, or when told not to. */ >> 578 if (smp_num_probed == 1 || max_cpus == 0) { >> 579 printk(KERN_INFO "SMP mode deactivated.\n"); >> 580 return; >> 581 } >> 582 >> 583 printk(KERN_INFO "SMP starting up secondaries.\n"); >> 584 >> 585 cpu_count = 1; >> 586 for (i = 0; i < NR_CPUS; i++) { >> 587 if (cpu_count >= max_cpus) >> 588 break; >> 589 >> 590 if (i == boot_cpuid) >> 591 continue; >> 592 >> 593 if (((hwrpb_cpu_present_mask >> i) & 1) == 0) >> 594 continue; >> 595 >> 596 if (smp_boot_one_cpu(i, cpu_count)) >> 597 continue; >> 598 >> 599 cpu_present_mask |= 1UL << i; >> 600 cpu_count++; >> 601 } >> 602 >> 603 if (cpu_count == 1) { >> 604 printk(KERN_ERR "SMP: Only one lonely processor alive.\n"); >> 605 return; >> 606 } >> 607 >> 608 bogosum = 0; >> 609 for (i = 0; i < NR_CPUS; i++) { >> 610 if (cpu_present_mask & (1UL << i)) >> 611 bogosum += cpu_data[i].loops_per_jiffy; >> 612 } 255 printk(KERN_INFO "SMP: Total of %d pro 613 printk(KERN_INFO "SMP: Total of %d processors activated " 256 "(%lu.%02lu BogoMIPS).\n", num_ !! 614 "(%lu.%02lu BogoMIPS).\n", 257 bogosum / (500000/HZ), !! 615 cpu_count, bogosum / (500000/HZ), 258 (bogosum / (5000/HZ)) % 100); 616 (bogosum / (5000/HZ)) % 100); >> 617 >> 618 smp_num_cpus = cpu_count; 259 } 619 } 260 620 261 void arch_smp_send_reschedule(int cpu) !! 621 /* >> 622 * Called by smp_init to release the blocking online cpus once they >> 623 * are all started. >> 624 */ >> 625 void __init >> 626 smp_commence(void) 262 { 627 { 263 mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDU !! 628 /* smp_init sets smp_threads_ready -- that's enough. */ >> 629 mb(); 264 } 630 } 265 631 266 void smp_send_stop(void) !! 632 >> 633 void >> 634 smp_percpu_timer_interrupt(struct pt_regs *regs) >> 635 { >> 636 int cpu = smp_processor_id(); >> 637 unsigned long user = user_mode(regs); >> 638 struct cpuinfo_alpha *data = &cpu_data[cpu]; >> 639 >> 640 /* Record kernel PC. */ >> 641 if (!user) >> 642 alpha_do_profile(regs->pc); >> 643 >> 644 if (!--data->prof_counter) { >> 645 /* We need to make like a normal interrupt -- otherwise >> 646 timer interrupts ignore the global interrupt lock, >> 647 which would be a Bad Thing. */ >> 648 irq_enter(cpu, RTC_IRQ); >> 649 >> 650 update_process_times(user); >> 651 >> 652 data->prof_counter = data->prof_multiplier; >> 653 irq_exit(cpu, RTC_IRQ); >> 654 >> 655 if (softirq_pending(cpu)) >> 656 do_softirq(); >> 657 } >> 658 } >> 659 >> 660 int __init >> 661 setup_profiling_timer(unsigned int multiplier) 267 { 662 { 268 smp_call_function(stop_this_cpu, 0, 0) !! 663 return -EINVAL; 269 } 664 } 270 665 271 void arch_send_call_function_ipi_mask(const st !! 666 >> 667 static void >> 668 send_ipi_message(unsigned long to_whom, enum ipi_message_type operation) 272 { 669 { 273 int cpu; !! 670 long i, j; >> 671 >> 672 /* Reduce the number of memory barriers by doing two loops, >> 673 one to set the bits, one to invoke the interrupts. */ >> 674 >> 675 mb(); /* Order out-of-band data and bit setting. */ >> 676 >> 677 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) { >> 678 if (to_whom & j) >> 679 set_bit(operation, &ipi_data[i].bits); >> 680 } 274 681 275 for_each_cpu(cpu, mask) !! 682 mb(); /* Order bit setting and interrupt. */ 276 mp_ops->send_ipi(cpu, SMP_MSG_ !! 683 >> 684 for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) { >> 685 if (to_whom & j) >> 686 wripir(i); >> 687 } 277 } 688 } 278 689 279 void arch_send_call_function_single_ipi(int cp !! 690 /* Structure and data for smp_call_function. This is designed to >> 691 minimize static memory requirements. Plus it looks cleaner. */ >> 692 >> 693 struct smp_call_struct { >> 694 void (*func) (void *info); >> 695 void *info; >> 696 long wait; >> 697 atomic_t unstarted_count; >> 698 atomic_t unfinished_count; >> 699 }; >> 700 >> 701 static struct smp_call_struct *smp_call_function_data; >> 702 >> 703 /* Atomicly drop data into a shared pointer. The pointer is free if >> 704 it is initially locked. If retry, spin until free. */ >> 705 >> 706 static inline int >> 707 pointer_lock (void *lock, void *data, int retry) 280 { 708 { 281 mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION !! 709 void *old, *tmp; >> 710 >> 711 mb(); >> 712 again: >> 713 /* Compare and swap with zero. */ >> 714 asm volatile ( >> 715 "1: ldq_l %0,%1\n" >> 716 " mov %3,%2\n" >> 717 " bne %0,2f\n" >> 718 " stq_c %2,%1\n" >> 719 " beq %2,1b\n" >> 720 "2:" >> 721 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) >> 722 : "r"(data) >> 723 : "memory"); >> 724 >> 725 if (old == 0) >> 726 return 0; >> 727 if (! retry) >> 728 return -EBUSY; >> 729 >> 730 while (*(void **)lock) >> 731 barrier(); >> 732 goto again; 282 } 733 } 283 734 284 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST !! 735 void 285 void tick_broadcast(const struct cpumask *mask !! 736 handle_ipi(struct pt_regs *regs) 286 { 737 { 287 int cpu; !! 738 int this_cpu = smp_processor_id(); >> 739 unsigned long *pending_ipis = &ipi_data[this_cpu].bits; >> 740 unsigned long ops; >> 741 >> 742 #if 0 >> 743 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", >> 744 this_cpu, *pending_ipis, regs->pc)); >> 745 #endif 288 746 289 for_each_cpu(cpu, mask) !! 747 mb(); /* Order interrupt and bit testing. */ 290 mp_ops->send_ipi(cpu, SMP_MSG_ !! 748 while ((ops = xchg(pending_ipis, 0)) != 0) { >> 749 mb(); /* Order bit clearing and data access. */ >> 750 do { >> 751 unsigned long which; >> 752 >> 753 which = ops & -ops; >> 754 ops &= ~which; >> 755 which = ffz(~which); >> 756 >> 757 if (which == IPI_RESCHEDULE) { >> 758 /* Reschedule callback. Everything to be done >> 759 is done by the interrupt return path. */ >> 760 } >> 761 else if (which == IPI_CALL_FUNC) { >> 762 struct smp_call_struct *data; >> 763 void (*func)(void *info); >> 764 void *info; >> 765 int wait; >> 766 >> 767 data = smp_call_function_data; >> 768 func = data->func; >> 769 info = data->info; >> 770 wait = data->wait; >> 771 >> 772 /* Notify the sending CPU that the data has been >> 773 received, and execution is about to begin. */ >> 774 mb(); >> 775 atomic_dec (&data->unstarted_count); >> 776 >> 777 /* At this point the structure may be gone unless >> 778 wait is true. */ >> 779 (*func)(info); >> 780 >> 781 /* Notify the sending CPU that the task is done. */ >> 782 mb(); >> 783 if (wait) atomic_dec (&data->unfinished_count); >> 784 } >> 785 else if (which == IPI_CPU_STOP) { >> 786 halt(); >> 787 } >> 788 else { >> 789 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", >> 790 this_cpu, which); >> 791 } >> 792 } while (ops); >> 793 >> 794 mb(); /* Order data access and bit testing. */ >> 795 } >> 796 >> 797 cpu_data[this_cpu].ipi_count++; >> 798 >> 799 if (hwrpb->txrdy) >> 800 recv_secondary_console_msg(); 291 } 801 } 292 802 293 static void ipi_timer(void) !! 803 void >> 804 smp_send_reschedule(int cpu) 294 { 805 { 295 irq_enter(); !! 806 #if DEBUG_IPI_MSG 296 tick_receive_broadcast(); !! 807 if (cpu == hard_smp_processor_id()) 297 irq_exit(); !! 808 printk(KERN_WARNING 298 } !! 809 "smp_send_reschedule: Sending IPI to self.\n"); 299 #endif 810 #endif >> 811 send_ipi_message(1UL << cpu, IPI_RESCHEDULE); >> 812 } 300 813 301 void smp_message_recv(unsigned int msg) !! 814 void >> 815 smp_send_stop(void) 302 { 816 { 303 switch (msg) { !! 817 unsigned long to_whom = cpu_present_mask ^ (1UL << smp_processor_id()); 304 case SMP_MSG_FUNCTION: !! 818 #if DEBUG_IPI_MSG 305 generic_smp_call_function_inte !! 819 if (hard_smp_processor_id() != boot_cpu_id) 306 break; !! 820 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); 307 case SMP_MSG_RESCHEDULE: << 308 scheduler_ipi(); << 309 break; << 310 case SMP_MSG_FUNCTION_SINGLE: << 311 generic_smp_call_function_sing << 312 break; << 313 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST << 314 case SMP_MSG_TIMER: << 315 ipi_timer(); << 316 break; << 317 #endif 821 #endif 318 default: !! 822 send_ipi_message(to_whom, IPI_CPU_STOP); 319 printk(KERN_WARNING "SMP %d: % << 320 smp_processor_id(), __f << 321 break; << 322 } << 323 } 823 } 324 824 325 #ifdef CONFIG_PROFILING !! 825 /* 326 /* Not really SMP stuff ... */ !! 826 * Run a function on all other CPUs. 327 int setup_profiling_timer(unsigned int multipl !! 827 * <func> The function to run. This must be fast and non-blocking. 328 { !! 828 * <info> An arbitrary pointer to pass to the function. >> 829 * <retry> If true, keep retrying until ready. >> 830 * <wait> If true, wait until function has completed on other CPUs. >> 831 * [RETURNS] 0 on success, else a negative status code. >> 832 * >> 833 * Does not return until remote CPUs are nearly ready to execute <func> >> 834 * or are or have executed. >> 835 */ >> 836 >> 837 int >> 838 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, >> 839 int wait, unsigned long to_whom) >> 840 { >> 841 struct smp_call_struct data; >> 842 long timeout; >> 843 int num_cpus_to_call; >> 844 long i,j; >> 845 >> 846 data.func = func; >> 847 data.info = info; >> 848 data.wait = wait; >> 849 >> 850 to_whom &= ~(1L << smp_processor_id()); >> 851 for (i = 0, j = 1, num_cpus_to_call = 0; i < NR_CPUS; ++i, j <<= 1) >> 852 if (to_whom & j) >> 853 num_cpus_to_call++; >> 854 >> 855 atomic_set(&data.unstarted_count, num_cpus_to_call); >> 856 atomic_set(&data.unfinished_count, num_cpus_to_call); >> 857 >> 858 /* Acquire the smp_call_function_data mutex. */ >> 859 if (pointer_lock(&smp_call_function_data, &data, retry)) >> 860 return -EBUSY; >> 861 >> 862 /* Send a message to the requested CPUs. */ >> 863 send_ipi_message(to_whom, IPI_CALL_FUNC); >> 864 >> 865 /* Wait for a minimal response. */ >> 866 timeout = jiffies + HZ; >> 867 while (atomic_read (&data.unstarted_count) > 0 >> 868 && time_before (jiffies, timeout)) >> 869 barrier(); >> 870 >> 871 /* If there's no response yet, log a message but allow a longer >> 872 * timeout period -- if we get a response this time, log >> 873 * a message saying when we got it.. >> 874 */ >> 875 if (atomic_read(&data.unstarted_count) > 0) { >> 876 long start_time = jiffies; >> 877 printk(KERN_ERR "%s: initial timeout -- trying long wait\n", >> 878 __FUNCTION__); >> 879 timeout = jiffies + 30 * HZ; >> 880 while (atomic_read(&data.unstarted_count) > 0 >> 881 && time_before(jiffies, timeout)) >> 882 barrier(); >> 883 if (atomic_read(&data.unstarted_count) <= 0) { >> 884 long delta = jiffies - start_time; >> 885 printk(KERN_ERR >> 886 "%s: response %ld.%ld seconds into long wait\n", >> 887 __FUNCTION__, delta / HZ, >> 888 (100 * (delta - ((delta / HZ) * HZ))) / HZ); >> 889 } >> 890 } >> 891 >> 892 /* We either got one or timed out -- clear the lock. */ >> 893 mb(); >> 894 smp_call_function_data = 0; >> 895 >> 896 /* >> 897 * If after both the initial and long timeout periods we still don't >> 898 * have a response, something is very wrong... >> 899 */ >> 900 BUG_ON(atomic_read (&data.unstarted_count) > 0); >> 901 >> 902 /* Wait for a complete response, if needed. */ >> 903 if (wait) { >> 904 while (atomic_read (&data.unfinished_count) > 0) >> 905 barrier(); >> 906 } >> 907 329 return 0; 908 return 0; 330 } 909 } 331 #endif << 332 910 333 #ifdef CONFIG_MMU !! 911 int >> 912 smp_call_function (void (*func) (void *info), void *info, int retry, int wait) >> 913 { >> 914 return smp_call_function_on_cpu (func, info, retry, wait, >> 915 cpu_present_mask); >> 916 } 334 917 335 static void flush_tlb_all_ipi(void *info) !! 918 static void >> 919 ipi_imb(void *ignored) 336 { 920 { 337 local_flush_tlb_all(); !! 921 imb(); 338 } 922 } 339 923 340 void flush_tlb_all(void) !! 924 void >> 925 smp_imb(void) 341 { 926 { 342 on_each_cpu(flush_tlb_all_ipi, 0, 1); !! 927 /* Must wait other processors to flush their icache before continue. */ >> 928 if (smp_call_function(ipi_imb, NULL, 1, 1)) >> 929 printk(KERN_CRIT "smp_imb: timed out\n"); >> 930 >> 931 imb(); 343 } 932 } 344 933 345 static void flush_tlb_mm_ipi(void *mm) !! 934 static void >> 935 ipi_flush_tlb_all(void *ignored) 346 { 936 { 347 local_flush_tlb_mm((struct mm_struct * !! 937 tbia(); 348 } 938 } 349 939 350 /* !! 940 void 351 * The following tlb flush calls are invoked w !! 941 flush_tlb_all(void) 352 * being torn down, or pte attributes are chan << 353 * address spaces, a new context is obtained o << 354 * context on other cpus are invalidated to fo << 355 * at switch_mm time, should the mm ever be us << 356 * multithreaded address spaces, intercpu inte << 357 * Another case where intercpu interrupts are << 358 * mm might be active on another cpu (eg debug << 359 * behalf of debugees, kswapd stealing pages f << 360 * Kanoj 07/00. << 361 */ << 362 void flush_tlb_mm(struct mm_struct *mm) << 363 { 942 { 364 preempt_disable(); !! 943 /* Although we don't have any data to pass, we do want to >> 944 synchronize with the other processors. */ >> 945 if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) { >> 946 printk(KERN_CRIT "flush_tlb_all: timed out\n"); >> 947 } 365 948 366 if ((atomic_read(&mm->mm_users) != 1) !! 949 tbia(); 367 smp_call_function(flush_tlb_mm !! 950 } 368 } else { !! 951 369 int i; !! 952 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) 370 for_each_online_cpu(i) !! 953 371 if (smp_processor_id() !! 954 static void 372 cpu_context(i, !! 955 ipi_flush_tlb_mm(void *x) >> 956 { >> 957 struct mm_struct *mm = (struct mm_struct *) x; >> 958 if (mm == current->active_mm && !asn_locked()) >> 959 flush_tlb_current(mm); >> 960 else >> 961 flush_tlb_other(mm); >> 962 } >> 963 >> 964 void >> 965 flush_tlb_mm(struct mm_struct *mm) >> 966 { >> 967 if (mm == current->active_mm) { >> 968 flush_tlb_current(mm); >> 969 if (atomic_read(&mm->mm_users) <= 1) { >> 970 int i, cpu, this_cpu = smp_processor_id(); >> 971 for (i = 0; i < smp_num_cpus; i++) { >> 972 cpu = cpu_logical_map(i); >> 973 if (cpu == this_cpu) >> 974 continue; >> 975 if (mm->context[cpu]) >> 976 mm->context[cpu] = 0; >> 977 } >> 978 return; >> 979 } 373 } 980 } 374 local_flush_tlb_mm(mm); << 375 981 376 preempt_enable(); !! 982 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { >> 983 printk(KERN_CRIT "flush_tlb_mm: timed out\n"); >> 984 } 377 } 985 } 378 986 379 struct flush_tlb_data { !! 987 struct flush_tlb_page_struct { 380 struct vm_area_struct *vma; 988 struct vm_area_struct *vma; 381 unsigned long addr1; !! 989 struct mm_struct *mm; 382 unsigned long addr2; !! 990 unsigned long addr; 383 }; 991 }; 384 992 385 static void flush_tlb_range_ipi(void *info) !! 993 static void >> 994 ipi_flush_tlb_page(void *x) 386 { 995 { 387 struct flush_tlb_data *fd = (struct fl !! 996 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x; >> 997 struct mm_struct * mm = data->mm; 388 998 389 local_flush_tlb_range(fd->vma, fd->add !! 999 if (mm == current->active_mm && !asn_locked()) >> 1000 flush_tlb_current_page(mm, data->vma, data->addr); >> 1001 else >> 1002 flush_tlb_other(mm); 390 } 1003 } 391 1004 392 void flush_tlb_range(struct vm_area_struct *vm !! 1005 void 393 unsigned long start, unsi !! 1006 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) 394 { 1007 { >> 1008 struct flush_tlb_page_struct data; 395 struct mm_struct *mm = vma->vm_mm; 1009 struct mm_struct *mm = vma->vm_mm; 396 1010 397 preempt_disable(); !! 1011 if (mm == current->active_mm) { 398 if ((atomic_read(&mm->mm_users) != 1) !! 1012 flush_tlb_current_page(mm, vma, addr); 399 struct flush_tlb_data fd; !! 1013 if (atomic_read(&mm->mm_users) <= 1) { 400 !! 1014 int i, cpu, this_cpu = smp_processor_id(); 401 fd.vma = vma; !! 1015 for (i = 0; i < smp_num_cpus; i++) { 402 fd.addr1 = start; !! 1016 cpu = cpu_logical_map(i); 403 fd.addr2 = end; !! 1017 if (cpu == this_cpu) 404 smp_call_function(flush_tlb_ra !! 1018 continue; 405 } else { !! 1019 if (mm->context[cpu]) 406 int i; !! 1020 mm->context[cpu] = 0; 407 for_each_online_cpu(i) !! 1021 } 408 if (smp_processor_id() !! 1022 return; 409 cpu_context(i, !! 1023 } 410 } 1024 } 411 local_flush_tlb_range(vma, start, end) << 412 preempt_enable(); << 413 } << 414 1025 415 static void flush_tlb_kernel_range_ipi(void *i !! 1026 data.vma = vma; 416 { !! 1027 data.mm = mm; 417 struct flush_tlb_data *fd = (struct fl !! 1028 data.addr = addr; 418 1029 419 local_flush_tlb_kernel_range(fd->addr1 !! 1030 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { >> 1031 printk(KERN_CRIT "flush_tlb_page: timed out\n"); >> 1032 } 420 } 1033 } 421 1034 422 void flush_tlb_kernel_range(unsigned long star !! 1035 void >> 1036 flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end) 423 { 1037 { 424 struct flush_tlb_data fd; !! 1038 /* On the Alpha we always flush the whole user tlb. */ >> 1039 flush_tlb_mm(mm); >> 1040 } 425 1041 426 fd.addr1 = start; !! 1042 static void 427 fd.addr2 = end; !! 1043 ipi_flush_icache_page(void *x) 428 on_each_cpu(flush_tlb_kernel_range_ipi !! 1044 { >> 1045 struct mm_struct *mm = (struct mm_struct *) x; >> 1046 if (mm == current->active_mm && !asn_locked()) >> 1047 __load_new_mm_context(mm); >> 1048 else >> 1049 flush_tlb_other(mm); 429 } 1050 } 430 1051 431 static void flush_tlb_page_ipi(void *info) !! 1052 void >> 1053 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, >> 1054 unsigned long addr, int len) 432 { 1055 { 433 struct flush_tlb_data *fd = (struct fl !! 1056 struct mm_struct *mm = vma->vm_mm; 434 1057 435 local_flush_tlb_page(fd->vma, fd->addr !! 1058 if ((vma->vm_flags & VM_EXEC) == 0) 436 } !! 1059 return; 437 1060 438 void flush_tlb_page(struct vm_area_struct *vma !! 1061 if (mm == current->active_mm) { >> 1062 __load_new_mm_context(mm); >> 1063 if (atomic_read(&mm->mm_users) <= 1) { >> 1064 int i, cpu, this_cpu = smp_processor_id(); >> 1065 for (i = 0; i < smp_num_cpus; i++) { >> 1066 cpu = cpu_logical_map(i); >> 1067 if (cpu == this_cpu) >> 1068 continue; >> 1069 if (mm->context[cpu]) >> 1070 mm->context[cpu] = 0; >> 1071 } >> 1072 return; >> 1073 } >> 1074 } >> 1075 >> 1076 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { >> 1077 printk(KERN_CRIT "flush_icache_page: timed out\n"); >> 1078 } >> 1079 } >> 1080 >> 1081 #ifdef CONFIG_DEBUG_SPINLOCK >> 1082 void >> 1083 spin_unlock(spinlock_t * lock) 439 { 1084 { 440 preempt_disable(); !! 1085 mb(); 441 if ((atomic_read(&vma->vm_mm->mm_users !! 1086 lock->lock = 0; 442 (current->mm != vma->vm_mm)) { << 443 struct flush_tlb_data fd; << 444 1087 445 fd.vma = vma; !! 1088 lock->on_cpu = -1; 446 fd.addr1 = page; !! 1089 lock->previous = NULL; 447 smp_call_function(flush_tlb_pa !! 1090 lock->task = NULL; 448 } else { !! 1091 lock->base_file = "none"; 449 int i; !! 1092 lock->line_no = 0; 450 for_each_online_cpu(i) !! 1093 } 451 if (smp_processor_id() !! 1094 452 cpu_context(i, !! 1095 void >> 1096 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no) >> 1097 { >> 1098 long tmp; >> 1099 long stuck; >> 1100 void *inline_pc = __builtin_return_address(0); >> 1101 unsigned long started = jiffies; >> 1102 int printed = 0; >> 1103 int cpu = smp_processor_id(); >> 1104 >> 1105 stuck = 1L << 30; >> 1106 try_again: >> 1107 >> 1108 /* Use sub-sections to put the actual loop at the end >> 1109 of this object file's text section so as to perfect >> 1110 branch prediction. */ >> 1111 __asm__ __volatile__( >> 1112 "1: ldl_l %0,%1\n" >> 1113 " subq %2,1,%2\n" >> 1114 " blbs %0,2f\n" >> 1115 " or %0,1,%0\n" >> 1116 " stl_c %0,%1\n" >> 1117 " beq %0,3f\n" >> 1118 "4: mb\n" >> 1119 ".subsection 2\n" >> 1120 "2: ldl %0,%1\n" >> 1121 " subq %2,1,%2\n" >> 1122 "3: blt %2,4b\n" >> 1123 " blbs %0,2b\n" >> 1124 " br 1b\n" >> 1125 ".previous" >> 1126 : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) >> 1127 : "1" (lock->lock), "2" (stuck) : "memory"); >> 1128 >> 1129 if (stuck < 0) { >> 1130 printk(KERN_WARNING >> 1131 "%s:%d spinlock stuck in %s at %p(%d)" >> 1132 " owner %s at %p(%d) %s:%d\n", >> 1133 base_file, line_no, >> 1134 current->comm, inline_pc, cpu, >> 1135 lock->task->comm, lock->previous, >> 1136 lock->on_cpu, lock->base_file, lock->line_no); >> 1137 stuck = 1L << 36; >> 1138 printed = 1; >> 1139 goto try_again; >> 1140 } >> 1141 >> 1142 /* Exiting. Got the lock. */ >> 1143 lock->on_cpu = cpu; >> 1144 lock->previous = inline_pc; >> 1145 lock->task = current; >> 1146 lock->base_file = base_file; >> 1147 lock->line_no = line_no; >> 1148 >> 1149 if (printed) { >> 1150 printk(KERN_WARNING >> 1151 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", >> 1152 base_file, line_no, current->comm, inline_pc, >> 1153 cpu, jiffies - started); 453 } 1154 } 454 local_flush_tlb_page(vma, page); << 455 preempt_enable(); << 456 } 1155 } 457 1156 458 static void flush_tlb_one_ipi(void *info) !! 1157 int >> 1158 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) 459 { 1159 { 460 struct flush_tlb_data *fd = (struct fl !! 1160 int ret; 461 local_flush_tlb_one(fd->addr1, fd->add !! 1161 if ((ret = !test_and_set_bit(0, lock))) { >> 1162 lock->on_cpu = smp_processor_id(); >> 1163 lock->previous = __builtin_return_address(0); >> 1164 lock->task = current; >> 1165 } else { >> 1166 lock->base_file = base_file; >> 1167 lock->line_no = line_no; >> 1168 } >> 1169 return ret; >> 1170 } >> 1171 #endif /* CONFIG_DEBUG_SPINLOCK */ >> 1172 >> 1173 #ifdef CONFIG_DEBUG_RWLOCK >> 1174 void write_lock(rwlock_t * lock) >> 1175 { >> 1176 long regx, regy; >> 1177 int stuck_lock, stuck_reader; >> 1178 void *inline_pc = __builtin_return_address(0); >> 1179 >> 1180 try_again: >> 1181 >> 1182 stuck_lock = 1<<30; >> 1183 stuck_reader = 1<<30; >> 1184 >> 1185 __asm__ __volatile__( >> 1186 "1: ldl_l %1,%0\n" >> 1187 " blbs %1,6f\n" >> 1188 " blt %1,8f\n" >> 1189 " mov 1,%1\n" >> 1190 " stl_c %1,%0\n" >> 1191 " beq %1,6f\n" >> 1192 "4: mb\n" >> 1193 ".subsection 2\n" >> 1194 "6: blt %3,4b # debug\n" >> 1195 " subl %3,1,%3 # debug\n" >> 1196 " ldl %1,%0\n" >> 1197 " blbs %1,6b\n" >> 1198 "8: blt %4,4b # debug\n" >> 1199 " subl %4,1,%4 # debug\n" >> 1200 " ldl %1,%0\n" >> 1201 " blt %1,8b\n" >> 1202 " br 1b\n" >> 1203 ".previous" >> 1204 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), >> 1205 "=&r" (stuck_lock), "=&r" (stuck_reader) >> 1206 : "" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); >> 1207 >> 1208 if (stuck_lock < 0) { >> 1209 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); >> 1210 goto try_again; >> 1211 } >> 1212 if (stuck_reader < 0) { >> 1213 printk(KERN_WARNING "write_lock stuck on readers at %p\n", >> 1214 inline_pc); >> 1215 goto try_again; >> 1216 } 462 } 1217 } 463 1218 464 void flush_tlb_one(unsigned long asid, unsigne !! 1219 void read_lock(rwlock_t * lock) 465 { 1220 { 466 struct flush_tlb_data fd; !! 1221 long regx; 467 !! 1222 int stuck_lock; 468 fd.addr1 = asid; !! 1223 void *inline_pc = __builtin_return_address(0); 469 fd.addr2 = vaddr; !! 1224 470 !! 1225 try_again: 471 smp_call_function(flush_tlb_one_ipi, ( !! 1226 472 local_flush_tlb_one(asid, vaddr); !! 1227 stuck_lock = 1<<30; >> 1228 >> 1229 __asm__ __volatile__( >> 1230 "1: ldl_l %1,%0;" >> 1231 " blbs %1,6f;" >> 1232 " subl %1,2,%1;" >> 1233 " stl_c %1,%0;" >> 1234 " beq %1,6f;" >> 1235 "4: mb\n" >> 1236 ".subsection 2\n" >> 1237 "6: ldl %1,%0;" >> 1238 " blt %2,4b # debug\n" >> 1239 " subl %2,1,%2 # debug\n" >> 1240 " blbs %1,6b;" >> 1241 " br 1b\n" >> 1242 ".previous" >> 1243 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) >> 1244 : "" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); >> 1245 >> 1246 if (stuck_lock < 0) { >> 1247 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); >> 1248 goto try_again; >> 1249 } 473 } 1250 } 474 !! 1251 #endif /* CONFIG_DEBUG_RWLOCK */ 475 #endif << 476 1252
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.