~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/kernel/sun4d_smp.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Sparc SS1000/SC2000 SMP support.
  3  *
  4  * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5  *
  6  * Based on sun4m's smp.c, which is:
  7  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  8  */
  9 
 10 #include <linux/clockchips.h>
 11 #include <linux/interrupt.h>
 12 #include <linux/profile.h>
 13 #include <linux/delay.h>
 14 #include <linux/sched/mm.h>
 15 #include <linux/cpu.h>
 16 
 17 #include <asm/cacheflush.h>
 18 #include <asm/switch_to.h>
 19 #include <asm/tlbflush.h>
 20 #include <asm/timer.h>
 21 #include <asm/oplib.h>
 22 #include <asm/sbi.h>
 23 #include <asm/mmu.h>
 24 
 25 #include "kernel.h"
 26 #include "irq.h"
 27 
 28 #define IRQ_CROSS_CALL          15
 29 
 30 static volatile int smp_processors_ready;
 31 static int smp_highest_cpu;
 32 
 33 static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
 34 {
 35         __asm__ __volatile__("swap [%1], %0\n\t" :
 36                              "=&r" (val), "=&r" (ptr) :
 37                              "" (val), "1" (ptr));
 38         return val;
 39 }
 40 
 41 static void smp4d_ipi_init(void);
 42 
 43 static unsigned char cpu_leds[32];
 44 
 45 static inline void show_leds(int cpuid)
 46 {
 47         cpuid &= 0x1e;
 48         __asm__ __volatile__ ("stba %0, [%1] %2" : :
 49                               "r" ((cpu_leds[cpuid] << 4) | cpu_leds[cpuid+1]),
 50                               "r" (ECSR_BASE(cpuid) | BB_LEDS),
 51                               "i" (ASI_M_CTL));
 52 }
 53 
 54 void sun4d_cpu_pre_starting(void *arg)
 55 {
 56         int cpuid = hard_smp_processor_id();
 57 
 58         /* Show we are alive */
 59         cpu_leds[cpuid] = 0x6;
 60         show_leds(cpuid);
 61 
 62         /* Enable level15 interrupt, disable level14 interrupt for now */
 63         cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
 64 }
 65 
 66 void sun4d_cpu_pre_online(void *arg)
 67 {
 68         unsigned long flags;
 69         int cpuid;
 70 
 71         cpuid = hard_smp_processor_id();
 72 
 73         /* Unblock the master CPU _only_ when the scheduler state
 74          * of all secondary CPUs will be up-to-date, so after
 75          * the SMP initialization the master will be just allowed
 76          * to call the scheduler code.
 77          */
 78         sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
 79         local_ops->cache_all();
 80         local_ops->tlb_all();
 81 
 82         while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
 83                 barrier();
 84 
 85         while (current_set[cpuid]->cpu != cpuid)
 86                 barrier();
 87 
 88         /* Fix idle thread fields. */
 89         __asm__ __volatile__("ld [%0], %%g6\n\t"
 90                              : : "r" (&current_set[cpuid])
 91                              : "memory" /* paranoid */);
 92 
 93         cpu_leds[cpuid] = 0x9;
 94         show_leds(cpuid);
 95 
 96         /* Attach to the address space of init_task. */
 97         mmgrab(&init_mm);
 98         current->active_mm = &init_mm;
 99 
100         local_ops->cache_all();
101         local_ops->tlb_all();
102 
103         while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
104                 barrier();
105 
106         spin_lock_irqsave(&sun4d_imsk_lock, flags);
107         cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
108         spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
109 }
110 
111 /*
112  *      Cycle through the processors asking the PROM to start each one.
113  */
114 void __init smp4d_boot_cpus(void)
115 {
116         smp4d_ipi_init();
117         if (boot_cpu_id)
118                 current_set[0] = NULL;
119         local_ops->cache_all();
120 }
121 
122 int smp4d_boot_one_cpu(int i, struct task_struct *idle)
123 {
124         unsigned long *entry = &sun4d_cpu_startup;
125         int timeout;
126         int cpu_node;
127 
128         cpu_find_by_instance(i, &cpu_node, NULL);
129         current_set[i] = task_thread_info(idle);
130         /*
131          * Initialize the contexts table
132          * Since the call to prom_startcpu() trashes the structure,
133          * we need to re-initialize it for each cpu
134          */
135         smp_penguin_ctable.which_io = 0;
136         smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
137         smp_penguin_ctable.reg_size = 0;
138 
139         /* whirrr, whirrr, whirrrrrrrrr... */
140         printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
141         local_ops->cache_all();
142         prom_startcpu(cpu_node,
143                       &smp_penguin_ctable, 0, (char *)entry);
144 
145         printk(KERN_INFO "prom_startcpu returned :)\n");
146 
147         /* wheee... it's going... */
148         for (timeout = 0; timeout < 10000; timeout++) {
149                 if (cpu_callin_map[i])
150                         break;
151                 udelay(200);
152         }
153 
154         if (!(cpu_callin_map[i])) {
155                 printk(KERN_ERR "Processor %d is stuck.\n", i);
156                 return -ENODEV;
157 
158         }
159         local_ops->cache_all();
160         return 0;
161 }
162 
163 void __init smp4d_smp_done(void)
164 {
165         int i, first;
166         int *prev;
167 
168         /* setup cpu list for irq rotation */
169         first = 0;
170         prev = &first;
171         for_each_online_cpu(i) {
172                 *prev = i;
173                 prev = &cpu_data(i).next;
174         }
175         *prev = first;
176         local_ops->cache_all();
177 
178         /* Ok, they are spinning and ready to go. */
179         smp_processors_ready = 1;
180         sun4d_distribute_irqs();
181 }
182 
183 /* Memory structure giving interrupt handler information about IPI generated */
184 struct sun4d_ipi_work {
185         int single;
186         int msk;
187         int resched;
188 };
189 
190 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sun4d_ipi_work, sun4d_ipi_work);
191 
192 /* Initialize IPIs on the SUN4D SMP machine */
193 static void __init smp4d_ipi_init(void)
194 {
195         int cpu;
196         struct sun4d_ipi_work *work;
197 
198         printk(KERN_INFO "smp4d: setup IPI at IRQ %d\n", SUN4D_IPI_IRQ);
199 
200         for_each_possible_cpu(cpu) {
201                 work = &per_cpu(sun4d_ipi_work, cpu);
202                 work->single = work->msk = work->resched = 0;
203         }
204 }
205 
206 void sun4d_ipi_interrupt(void)
207 {
208         struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work);
209 
210         if (work->single) {
211                 work->single = 0;
212                 smp_call_function_single_interrupt();
213         }
214         if (work->msk) {
215                 work->msk = 0;
216                 smp_call_function_interrupt();
217         }
218         if (work->resched) {
219                 work->resched = 0;
220                 smp_resched_interrupt();
221         }
222 }
223 
224 /* +-------+-------------+-----------+------------------------------------+
225  * | bcast |  devid      |   sid     |              levels mask           |
226  * +-------+-------------+-----------+------------------------------------+
227  *  31      30         23 22       15 14                                 0
228  */
229 #define IGEN_MESSAGE(bcast, devid, sid, levels) \
230         (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
231 
232 static void sun4d_send_ipi(int cpu, int level)
233 {
234         cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
235 }
236 
237 static void sun4d_ipi_single(int cpu)
238 {
239         struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
240 
241         /* Mark work */
242         work->single = 1;
243 
244         /* Generate IRQ on the CPU */
245         sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
246 }
247 
248 static void sun4d_ipi_mask_one(int cpu)
249 {
250         struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
251 
252         /* Mark work */
253         work->msk = 1;
254 
255         /* Generate IRQ on the CPU */
256         sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
257 }
258 
259 static void sun4d_ipi_resched(int cpu)
260 {
261         struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu);
262 
263         /* Mark work */
264         work->resched = 1;
265 
266         /* Generate IRQ on the CPU (any IRQ will cause resched) */
267         sun4d_send_ipi(cpu, SUN4D_IPI_IRQ);
268 }
269 
270 static struct smp_funcall {
271         void *func;
272         unsigned long arg1;
273         unsigned long arg2;
274         unsigned long arg3;
275         unsigned long arg4;
276         unsigned long arg5;
277         unsigned char processors_in[NR_CPUS];  /* Set when ipi entered. */
278         unsigned char processors_out[NR_CPUS]; /* Set when ipi exited. */
279 } ccall_info __attribute__((aligned(8)));
280 
281 static DEFINE_SPINLOCK(cross_call_lock);
282 
283 /* Cross calls must be serialized, at least currently. */
284 static void sun4d_cross_call(void *func, cpumask_t mask, unsigned long arg1,
285                              unsigned long arg2, unsigned long arg3,
286                              unsigned long arg4)
287 {
288         if (smp_processors_ready) {
289                 register int high = smp_highest_cpu;
290                 unsigned long flags;
291 
292                 spin_lock_irqsave(&cross_call_lock, flags);
293 
294                 {
295                         /*
296                          * If you make changes here, make sure
297                          * gcc generates proper code...
298                          */
299                         register void *f asm("i0") = func;
300                         register unsigned long a1 asm("i1") = arg1;
301                         register unsigned long a2 asm("i2") = arg2;
302                         register unsigned long a3 asm("i3") = arg3;
303                         register unsigned long a4 asm("i4") = arg4;
304                         register unsigned long a5 asm("i5") = 0;
305 
306                         __asm__ __volatile__(
307                                 "std %0, [%6]\n\t"
308                                 "std %2, [%6 + 8]\n\t"
309                                 "std %4, [%6 + 16]\n\t" : :
310                                 "r"(f), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
311                                 "r" (&ccall_info.func));
312                 }
313 
314                 /* Init receive/complete mapping, plus fire the IPI's off. */
315                 {
316                         register int i;
317 
318                         cpumask_clear_cpu(smp_processor_id(), &mask);
319                         cpumask_and(&mask, cpu_online_mask, &mask);
320                         for (i = 0; i <= high; i++) {
321                                 if (cpumask_test_cpu(i, &mask)) {
322                                         ccall_info.processors_in[i] = 0;
323                                         ccall_info.processors_out[i] = 0;
324                                         sun4d_send_ipi(i, IRQ_CROSS_CALL);
325                                 }
326                         }
327                 }
328 
329                 {
330                         register int i;
331 
332                         i = 0;
333                         do {
334                                 if (!cpumask_test_cpu(i, &mask))
335                                         continue;
336                                 while (!ccall_info.processors_in[i])
337                                         barrier();
338                         } while (++i <= high);
339 
340                         i = 0;
341                         do {
342                                 if (!cpumask_test_cpu(i, &mask))
343                                         continue;
344                                 while (!ccall_info.processors_out[i])
345                                         barrier();
346                         } while (++i <= high);
347                 }
348 
349                 spin_unlock_irqrestore(&cross_call_lock, flags);
350         }
351 }
352 
353 /* Running cross calls. */
354 void smp4d_cross_call_irq(void)
355 {
356         void (*func)(unsigned long, unsigned long, unsigned long, unsigned long,
357                      unsigned long) = ccall_info.func;
358         int i = hard_smp_processor_id();
359 
360         ccall_info.processors_in[i] = 1;
361         func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4,
362              ccall_info.arg5);
363         ccall_info.processors_out[i] = 1;
364 }
365 
366 void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
367 {
368         struct pt_regs *old_regs;
369         int cpu = hard_smp_processor_id();
370         struct clock_event_device *ce;
371         static int cpu_tick[NR_CPUS];
372         static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
373 
374         old_regs = set_irq_regs(regs);
375         bw_get_prof_limit(cpu);
376         bw_clear_intr_mask(0, 1);       /* INTR_TABLE[0] & 1 is Profile IRQ */
377 
378         cpu_tick[cpu]++;
379         if (!(cpu_tick[cpu] & 15)) {
380                 if (cpu_tick[cpu] == 0x60)
381                         cpu_tick[cpu] = 0;
382                 cpu_leds[cpu] = led_mask[cpu_tick[cpu] >> 4];
383                 show_leds(cpu);
384         }
385 
386         ce = &per_cpu(sparc32_clockevent, cpu);
387 
388         irq_enter();
389         ce->event_handler(ce);
390         irq_exit();
391 
392         set_irq_regs(old_regs);
393 }
394 
395 static const struct sparc32_ipi_ops sun4d_ipi_ops = {
396         .cross_call = sun4d_cross_call,
397         .resched    = sun4d_ipi_resched,
398         .single     = sun4d_ipi_single,
399         .mask_one   = sun4d_ipi_mask_one,
400 };
401 
402 void __init sun4d_init_smp(void)
403 {
404         int i;
405 
406         /* Patch ipi15 trap table */
407         t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
408 
409         sparc32_ipi_ops = &sun4d_ipi_ops;
410 
411         for (i = 0; i < NR_CPUS; i++) {
412                 ccall_info.processors_in[i] = 1;
413                 ccall_info.processors_out[i] = 1;
414         }
415 }
416 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php