~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/kernel/smp.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4  *
  5  * Derived from MIPS:
  6  * Copyright (C) 2000, 2001 Kanoj Sarcar
  7  * Copyright (C) 2000, 2001 Ralf Baechle
  8  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  9  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
 10  */
 11 #include <linux/acpi.h>
 12 #include <linux/cpu.h>
 13 #include <linux/cpumask.h>
 14 #include <linux/init.h>
 15 #include <linux/interrupt.h>
 16 #include <linux/irq_work.h>
 17 #include <linux/profile.h>
 18 #include <linux/seq_file.h>
 19 #include <linux/smp.h>
 20 #include <linux/threads.h>
 21 #include <linux/export.h>
 22 #include <linux/syscore_ops.h>
 23 #include <linux/time.h>
 24 #include <linux/tracepoint.h>
 25 #include <linux/sched/hotplug.h>
 26 #include <linux/sched/task_stack.h>
 27 
 28 #include <asm/cpu.h>
 29 #include <asm/idle.h>
 30 #include <asm/loongson.h>
 31 #include <asm/mmu_context.h>
 32 #include <asm/numa.h>
 33 #include <asm/paravirt.h>
 34 #include <asm/processor.h>
 35 #include <asm/setup.h>
 36 #include <asm/time.h>
 37 
 38 int __cpu_number_map[NR_CPUS];   /* Map physical to logical */
 39 EXPORT_SYMBOL(__cpu_number_map);
 40 
 41 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
 42 EXPORT_SYMBOL(__cpu_logical_map);
 43 
 44 /* Representing the threads (siblings) of each logical CPU */
 45 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
 46 EXPORT_SYMBOL(cpu_sibling_map);
 47 
 48 /* Representing the core map of multi-core chips of each logical CPU */
 49 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 50 EXPORT_SYMBOL(cpu_core_map);
 51 
 52 static DECLARE_COMPLETION(cpu_starting);
 53 static DECLARE_COMPLETION(cpu_running);
 54 
 55 /*
 56  * A logcal cpu mask containing only one VPE per core to
 57  * reduce the number of IPIs on large MT systems.
 58  */
 59 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
 60 EXPORT_SYMBOL(cpu_foreign_map);
 61 
 62 /* representing cpus for which sibling maps can be computed */
 63 static cpumask_t cpu_sibling_setup_map;
 64 
 65 /* representing cpus for which core maps can be computed */
 66 static cpumask_t cpu_core_setup_map;
 67 
 68 struct secondary_data cpuboot_data;
 69 static DEFINE_PER_CPU(int, cpu_state);
 70 
 71 static const char *ipi_types[NR_IPI] __tracepoint_string = {
 72         [IPI_RESCHEDULE] = "Rescheduling interrupts",
 73         [IPI_CALL_FUNCTION] = "Function call interrupts",
 74         [IPI_IRQ_WORK] = "IRQ work interrupts",
 75 };
 76 
 77 void show_ipi_list(struct seq_file *p, int prec)
 78 {
 79         unsigned int cpu, i;
 80 
 81         for (i = 0; i < NR_IPI; i++) {
 82                 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
 83                 for_each_online_cpu(cpu)
 84                         seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
 85                 seq_printf(p, " LoongArch  %d  %s\n", i + 1, ipi_types[i]);
 86         }
 87 }
 88 
 89 static inline void set_cpu_core_map(int cpu)
 90 {
 91         int i;
 92 
 93         cpumask_set_cpu(cpu, &cpu_core_setup_map);
 94 
 95         for_each_cpu(i, &cpu_core_setup_map) {
 96                 if (cpu_data[cpu].package == cpu_data[i].package) {
 97                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
 98                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
 99                 }
100         }
101 }
102 
103 static inline void set_cpu_sibling_map(int cpu)
104 {
105         int i;
106 
107         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
108 
109         for_each_cpu(i, &cpu_sibling_setup_map) {
110                 if (cpus_are_siblings(cpu, i)) {
111                         cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
112                         cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
113                 }
114         }
115 }
116 
117 static inline void clear_cpu_sibling_map(int cpu)
118 {
119         int i;
120 
121         for_each_cpu(i, &cpu_sibling_setup_map) {
122                 if (cpus_are_siblings(cpu, i)) {
123                         cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
124                         cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
125                 }
126         }
127 
128         cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
129 }
130 
131 /*
132  * Calculate a new cpu_foreign_map mask whenever a
133  * new cpu appears or disappears.
134  */
135 void calculate_cpu_foreign_map(void)
136 {
137         int i, k, core_present;
138         cpumask_t temp_foreign_map;
139 
140         /* Re-calculate the mask */
141         cpumask_clear(&temp_foreign_map);
142         for_each_online_cpu(i) {
143                 core_present = 0;
144                 for_each_cpu(k, &temp_foreign_map)
145                         if (cpus_are_siblings(i, k))
146                                 core_present = 1;
147                 if (!core_present)
148                         cpumask_set_cpu(i, &temp_foreign_map);
149         }
150 
151         for_each_online_cpu(i)
152                 cpumask_andnot(&cpu_foreign_map[i],
153                                &temp_foreign_map, &cpu_sibling_map[i]);
154 }
155 
156 /* Send mailbox buffer via Mail_Send */
157 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
158 {
159         uint64_t val;
160 
161         /* Send high 32 bits */
162         val = IOCSR_MBUF_SEND_BLOCKING;
163         val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
164         val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
165         val |= (data & IOCSR_MBUF_SEND_H32_MASK);
166         iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
167 
168         /* Send low 32 bits */
169         val = IOCSR_MBUF_SEND_BLOCKING;
170         val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
171         val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
172         val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
173         iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
174 };
175 
176 static u32 ipi_read_clear(int cpu)
177 {
178         u32 action;
179 
180         /* Load the ipi register to figure out what we're supposed to do */
181         action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
182         /* Clear the ipi register to clear the interrupt */
183         iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
184         wbflush();
185 
186         return action;
187 }
188 
189 static void ipi_write_action(int cpu, u32 action)
190 {
191         uint32_t val;
192 
193         val = IOCSR_IPI_SEND_BLOCKING | action;
194         val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
195         iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
196 }
197 
198 static void loongson_send_ipi_single(int cpu, unsigned int action)
199 {
200         ipi_write_action(cpu_logical_map(cpu), (u32)action);
201 }
202 
203 static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
204 {
205         unsigned int i;
206 
207         for_each_cpu(i, mask)
208                 ipi_write_action(cpu_logical_map(i), (u32)action);
209 }
210 
211 /*
212  * This function sends a 'reschedule' IPI to another CPU.
213  * it goes straight through and wastes no time serializing
214  * anything. Worst case is that we lose a reschedule ...
215  */
216 void arch_smp_send_reschedule(int cpu)
217 {
218         mp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE);
219 }
220 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
221 
222 #ifdef CONFIG_IRQ_WORK
223 void arch_irq_work_raise(void)
224 {
225         mp_ops.send_ipi_single(smp_processor_id(), ACTION_IRQ_WORK);
226 }
227 #endif
228 
229 static irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
230 {
231         unsigned int action;
232         unsigned int cpu = smp_processor_id();
233 
234         action = ipi_read_clear(cpu_logical_map(cpu));
235 
236         if (action & SMP_RESCHEDULE) {
237                 scheduler_ipi();
238                 per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
239         }
240 
241         if (action & SMP_CALL_FUNCTION) {
242                 generic_smp_call_function_interrupt();
243                 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
244         }
245 
246         if (action & SMP_IRQ_WORK) {
247                 irq_work_run();
248                 per_cpu(irq_stat, cpu).ipi_irqs[IPI_IRQ_WORK]++;
249         }
250 
251         return IRQ_HANDLED;
252 }
253 
254 static void loongson_init_ipi(void)
255 {
256         int r, ipi_irq;
257 
258         ipi_irq = get_percpu_irq(INT_IPI);
259         if (ipi_irq < 0)
260                 panic("IPI IRQ mapping failed\n");
261 
262         irq_set_percpu_devid(ipi_irq);
263         r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat);
264         if (r < 0)
265                 panic("IPI IRQ request failed\n");
266 }
267 
268 struct smp_ops mp_ops = {
269         .init_ipi               = loongson_init_ipi,
270         .send_ipi_single        = loongson_send_ipi_single,
271         .send_ipi_mask          = loongson_send_ipi_mask,
272 };
273 
274 static void __init fdt_smp_setup(void)
275 {
276 #ifdef CONFIG_OF
277         unsigned int cpu, cpuid;
278         struct device_node *node = NULL;
279 
280         for_each_of_cpu_node(node) {
281                 if (!of_device_is_available(node))
282                         continue;
283 
284                 cpuid = of_get_cpu_hwid(node, 0);
285                 if (cpuid >= nr_cpu_ids)
286                         continue;
287 
288                 if (cpuid == loongson_sysconf.boot_cpu_id)
289                         cpu = 0;
290                 else
291                         cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
292 
293                 num_processors++;
294                 set_cpu_possible(cpu, true);
295                 set_cpu_present(cpu, true);
296                 __cpu_number_map[cpuid] = cpu;
297                 __cpu_logical_map[cpu] = cpuid;
298 
299                 early_numa_add_cpu(cpu, 0);
300                 set_cpuid_to_node(cpuid, 0);
301         }
302 
303         loongson_sysconf.nr_cpus = num_processors;
304         set_bit(0, loongson_sysconf.cores_io_master);
305 #endif
306 }
307 
308 void __init loongson_smp_setup(void)
309 {
310         fdt_smp_setup();
311 
312         if (loongson_sysconf.cores_per_package == 0)
313                 loongson_sysconf.cores_per_package = num_processors;
314 
315         cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
316         cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
317 
318         pv_ipi_init();
319         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
320         pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
321 }
322 
323 void __init loongson_prepare_cpus(unsigned int max_cpus)
324 {
325         int i = 0;
326 
327         parse_acpi_topology();
328 
329         for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
330                 set_cpu_present(i, true);
331                 csr_mail_send(0, __cpu_logical_map[i], 0);
332                 cpu_data[i].global_id = __cpu_logical_map[i];
333         }
334 
335         per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
336 }
337 
338 /*
339  * Setup the PC, SP, and TP of a secondary processor and start it running!
340  */
341 void loongson_boot_secondary(int cpu, struct task_struct *idle)
342 {
343         unsigned long entry;
344 
345         pr_info("Booting CPU#%d...\n", cpu);
346 
347         entry = __pa_symbol((unsigned long)&smpboot_entry);
348         cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
349         cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
350 
351         csr_mail_send(entry, cpu_logical_map(cpu), 0);
352 
353         loongson_send_ipi_single(cpu, ACTION_BOOT_CPU);
354 }
355 
356 /*
357  * SMP init and finish on secondary CPUs
358  */
359 void loongson_init_secondary(void)
360 {
361         unsigned int cpu = smp_processor_id();
362         unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
363                              ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER | ECFGF_SIP0;
364 
365         change_csr_ecfg(ECFG0_IM, imask);
366 
367         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
368 
369 #ifdef CONFIG_NUMA
370         numa_add_cpu(cpu);
371 #endif
372         per_cpu(cpu_state, cpu) = CPU_ONLINE;
373         cpu_data[cpu].package =
374                      cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
375         cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
376                      cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
377 }
378 
379 void loongson_smp_finish(void)
380 {
381         local_irq_enable();
382         iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
383         pr_info("CPU#%d finished\n", smp_processor_id());
384 }
385 
386 #ifdef CONFIG_HOTPLUG_CPU
387 
388 int loongson_cpu_disable(void)
389 {
390         unsigned long flags;
391         unsigned int cpu = smp_processor_id();
392 
393         if (io_master(cpu))
394                 return -EBUSY;
395 
396 #ifdef CONFIG_NUMA
397         numa_remove_cpu(cpu);
398 #endif
399         set_cpu_online(cpu, false);
400         clear_cpu_sibling_map(cpu);
401         calculate_cpu_foreign_map();
402         local_irq_save(flags);
403         irq_migrate_all_off_this_cpu();
404         clear_csr_ecfg(ECFG0_IM);
405         local_irq_restore(flags);
406         local_flush_tlb_all();
407 
408         return 0;
409 }
410 
411 void loongson_cpu_die(unsigned int cpu)
412 {
413         while (per_cpu(cpu_state, cpu) != CPU_DEAD)
414                 cpu_relax();
415 
416         mb();
417 }
418 
419 void __noreturn arch_cpu_idle_dead(void)
420 {
421         register uint64_t addr;
422         register void (*init_fn)(void);
423 
424         idle_task_exit();
425         local_irq_enable();
426         set_csr_ecfg(ECFGF_IPI);
427         __this_cpu_write(cpu_state, CPU_DEAD);
428 
429         __smp_mb();
430         do {
431                 __asm__ __volatile__("idle 0\n\t");
432                 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
433         } while (addr == 0);
434 
435         local_irq_disable();
436         init_fn = (void *)TO_CACHE(addr);
437         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
438 
439         init_fn();
440         BUG();
441 }
442 
443 #endif
444 
445 /*
446  * Power management
447  */
448 #ifdef CONFIG_PM
449 
450 static int loongson_ipi_suspend(void)
451 {
452         return 0;
453 }
454 
455 static void loongson_ipi_resume(void)
456 {
457         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
458 }
459 
460 static struct syscore_ops loongson_ipi_syscore_ops = {
461         .resume         = loongson_ipi_resume,
462         .suspend        = loongson_ipi_suspend,
463 };
464 
465 /*
466  * Enable boot cpu ipi before enabling nonboot cpus
467  * during syscore_resume.
468  */
469 static int __init ipi_pm_init(void)
470 {
471         register_syscore_ops(&loongson_ipi_syscore_ops);
472         return 0;
473 }
474 
475 core_initcall(ipi_pm_init);
476 #endif
477 
478 /* Preload SMP state for boot cpu */
479 void smp_prepare_boot_cpu(void)
480 {
481         unsigned int cpu, node, rr_node;
482 
483         set_cpu_possible(0, true);
484         set_cpu_online(0, true);
485         set_my_cpu_offset(per_cpu_offset(0));
486         numa_add_cpu(0);
487 
488         rr_node = first_node(node_online_map);
489         for_each_possible_cpu(cpu) {
490                 node = early_cpu_to_node(cpu);
491 
492                 /*
493                  * The mapping between present cpus and nodes has been
494                  * built during MADT and SRAT parsing.
495                  *
496                  * If possible cpus = present cpus here, early_cpu_to_node
497                  * will return valid node.
498                  *
499                  * If possible cpus > present cpus here (e.g. some possible
500                  * cpus will be added by cpu-hotplug later), for possible but
501                  * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
502                  * and we just map them to online nodes in round-robin way.
503                  * Once hotplugged, new correct mapping will be built for them.
504                  */
505                 if (node != NUMA_NO_NODE)
506                         set_cpu_numa_node(cpu, node);
507                 else {
508                         set_cpu_numa_node(cpu, rr_node);
509                         rr_node = next_node_in(rr_node, node_online_map);
510                 }
511         }
512 }
513 
514 /* called from main before smp_init() */
515 void __init smp_prepare_cpus(unsigned int max_cpus)
516 {
517         init_new_context(current, &init_mm);
518         current_thread_info()->cpu = 0;
519         loongson_prepare_cpus(max_cpus);
520         set_cpu_sibling_map(0);
521         set_cpu_core_map(0);
522         calculate_cpu_foreign_map();
523 #ifndef CONFIG_HOTPLUG_CPU
524         init_cpu_present(cpu_possible_mask);
525 #endif
526 }
527 
528 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
529 {
530         loongson_boot_secondary(cpu, tidle);
531 
532         /* Wait for CPU to start and be ready to sync counters */
533         if (!wait_for_completion_timeout(&cpu_starting,
534                                          msecs_to_jiffies(5000))) {
535                 pr_crit("CPU%u: failed to start\n", cpu);
536                 return -EIO;
537         }
538 
539         /* Wait for CPU to finish startup & mark itself online before return */
540         wait_for_completion(&cpu_running);
541 
542         return 0;
543 }
544 
545 /*
546  * First C code run on the secondary CPUs after being started up by
547  * the master.
548  */
549 asmlinkage void start_secondary(void)
550 {
551         unsigned int cpu;
552 
553         sync_counter();
554         cpu = raw_smp_processor_id();
555         set_my_cpu_offset(per_cpu_offset(cpu));
556 
557         cpu_probe();
558         constant_clockevent_init();
559         loongson_init_secondary();
560 
561         set_cpu_sibling_map(cpu);
562         set_cpu_core_map(cpu);
563 
564         notify_cpu_starting(cpu);
565 
566         /* Notify boot CPU that we're starting */
567         complete(&cpu_starting);
568 
569         /* The CPU is running, now mark it online */
570         set_cpu_online(cpu, true);
571 
572         calculate_cpu_foreign_map();
573 
574         /*
575          * Notify boot CPU that we're up & online and it can safely return
576          * from __cpu_up()
577          */
578         complete(&cpu_running);
579 
580         /*
581          * irq will be enabled in loongson_smp_finish(), enabling it too
582          * early is dangerous.
583          */
584         WARN_ON_ONCE(!irqs_disabled());
585         loongson_smp_finish();
586 
587         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
588 }
589 
590 void __init smp_cpus_done(unsigned int max_cpus)
591 {
592 }
593 
594 static void stop_this_cpu(void *dummy)
595 {
596         set_cpu_online(smp_processor_id(), false);
597         calculate_cpu_foreign_map();
598         local_irq_disable();
599         while (true);
600 }
601 
602 void smp_send_stop(void)
603 {
604         smp_call_function(stop_this_cpu, NULL, 0);
605 }
606 
607 #ifdef CONFIG_PROFILING
608 int setup_profiling_timer(unsigned int multiplier)
609 {
610         return 0;
611 }
612 #endif
613 
614 static void flush_tlb_all_ipi(void *info)
615 {
616         local_flush_tlb_all();
617 }
618 
619 void flush_tlb_all(void)
620 {
621         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
622 }
623 
624 static void flush_tlb_mm_ipi(void *mm)
625 {
626         local_flush_tlb_mm((struct mm_struct *)mm);
627 }
628 
629 void flush_tlb_mm(struct mm_struct *mm)
630 {
631         if (atomic_read(&mm->mm_users) == 0)
632                 return;         /* happens as a result of exit_mmap() */
633 
634         preempt_disable();
635 
636         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
637                 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
638         } else {
639                 unsigned int cpu;
640 
641                 for_each_online_cpu(cpu) {
642                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
643                                 cpu_context(cpu, mm) = 0;
644                 }
645                 local_flush_tlb_mm(mm);
646         }
647 
648         preempt_enable();
649 }
650 
651 struct flush_tlb_data {
652         struct vm_area_struct *vma;
653         unsigned long addr1;
654         unsigned long addr2;
655 };
656 
657 static void flush_tlb_range_ipi(void *info)
658 {
659         struct flush_tlb_data *fd = info;
660 
661         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
662 }
663 
664 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
665 {
666         struct mm_struct *mm = vma->vm_mm;
667 
668         preempt_disable();
669         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
670                 struct flush_tlb_data fd = {
671                         .vma = vma,
672                         .addr1 = start,
673                         .addr2 = end,
674                 };
675 
676                 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
677         } else {
678                 unsigned int cpu;
679 
680                 for_each_online_cpu(cpu) {
681                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
682                                 cpu_context(cpu, mm) = 0;
683                 }
684                 local_flush_tlb_range(vma, start, end);
685         }
686         preempt_enable();
687 }
688 
689 static void flush_tlb_kernel_range_ipi(void *info)
690 {
691         struct flush_tlb_data *fd = info;
692 
693         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
694 }
695 
696 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
697 {
698         struct flush_tlb_data fd = {
699                 .addr1 = start,
700                 .addr2 = end,
701         };
702 
703         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
704 }
705 
706 static void flush_tlb_page_ipi(void *info)
707 {
708         struct flush_tlb_data *fd = info;
709 
710         local_flush_tlb_page(fd->vma, fd->addr1);
711 }
712 
713 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
714 {
715         preempt_disable();
716         if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
717                 struct flush_tlb_data fd = {
718                         .vma = vma,
719                         .addr1 = page,
720                 };
721 
722                 on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
723         } else {
724                 unsigned int cpu;
725 
726                 for_each_online_cpu(cpu) {
727                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
728                                 cpu_context(cpu, vma->vm_mm) = 0;
729                 }
730                 local_flush_tlb_page(vma, page);
731         }
732         preempt_enable();
733 }
734 EXPORT_SYMBOL(flush_tlb_page);
735 
736 static void flush_tlb_one_ipi(void *info)
737 {
738         unsigned long vaddr = (unsigned long) info;
739 
740         local_flush_tlb_one(vaddr);
741 }
742 
743 void flush_tlb_one(unsigned long vaddr)
744 {
745         on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
746 }
747 EXPORT_SYMBOL(flush_tlb_one);
748 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php