~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/alpha/kernel/traps.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * arch/alpha/kernel/traps.c
  4  *
  5  * (C) Copyright 1994 Linus Torvalds
  6  */
  7 
  8 /*
  9  * This file initializes the trap entry points
 10  */
 11 
 12 #include <linux/cpu.h>
 13 #include <linux/jiffies.h>
 14 #include <linux/mm.h>
 15 #include <linux/sched/signal.h>
 16 #include <linux/sched/debug.h>
 17 #include <linux/tty.h>
 18 #include <linux/delay.h>
 19 #include <linux/extable.h>
 20 #include <linux/kallsyms.h>
 21 #include <linux/ratelimit.h>
 22 
 23 #include <asm/gentrap.h>
 24 #include <linux/uaccess.h>
 25 #include <asm/unaligned.h>
 26 #include <asm/sysinfo.h>
 27 #include <asm/hwrpb.h>
 28 #include <asm/mmu_context.h>
 29 #include <asm/special_insns.h>
 30 
 31 #include "proto.h"
 32 
 33 void
 34 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
 35 {
 36         printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
 37                regs->pc, regs->r26, regs->ps, print_tainted());
 38         printk("pc is at %pSR\n", (void *)regs->pc);
 39         printk("ra is at %pSR\n", (void *)regs->r26);
 40         printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
 41                regs->r0, regs->r1, regs->r2);
 42         printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
 43                regs->r3, regs->r4, regs->r5);
 44         printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
 45                regs->r6, regs->r7, regs->r8);
 46 
 47         if (r9_15) {
 48                 printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
 49                        r9_15[9], r9_15[10], r9_15[11]);
 50                 printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
 51                        r9_15[12], r9_15[13], r9_15[14]);
 52                 printk("s6 = %016lx\n", r9_15[15]);
 53         }
 54 
 55         printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
 56                regs->r16, regs->r17, regs->r18);
 57         printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
 58                regs->r19, regs->r20, regs->r21);
 59         printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
 60                regs->r22, regs->r23, regs->r24);
 61         printk("t11= %016lx  pv = %016lx  at = %016lx\n",
 62                regs->r25, regs->r27, regs->r28);
 63         printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
 64 #if 0
 65 __halt();
 66 #endif
 67 }
 68 
 69 #if 0
 70 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
 71                            "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
 72                            "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
 73                            "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
 74 #endif
 75 
 76 static void
 77 dik_show_code(unsigned int *pc)
 78 {
 79         long i;
 80 
 81         printk("Code:");
 82         for (i = -6; i < 2; i++) {
 83                 unsigned int insn;
 84                 if (__get_user(insn, (unsigned int __user *)pc + i))
 85                         break;
 86                 printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
 87         }
 88         printk("\n");
 89 }
 90 
 91 static void
 92 dik_show_trace(unsigned long *sp, const char *loglvl)
 93 {
 94         long i = 0;
 95         printk("%sTrace:\n", loglvl);
 96         while (0x1ff8 & (unsigned long) sp) {
 97                 extern char _stext[], _etext[];
 98                 unsigned long tmp = *sp;
 99                 sp++;
100                 if (!is_kernel_text(tmp))
101                         continue;
102                 printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
103                 if (i > 40) {
104                         printk("%s ...", loglvl);
105                         break;
106                 }
107         }
108         printk("%s\n", loglvl);
109 }
110 
111 static int kstack_depth_to_print = 24;
112 
113 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
114 {
115         unsigned long *stack;
116         int i;
117 
118         /*
119          * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
120          * back trace for this cpu.
121          */
122         if(sp==NULL)
123                 sp=(unsigned long*)&sp;
124 
125         stack = sp;
126         for(i=0; i < kstack_depth_to_print; i++) {
127                 if (((long) stack & (THREAD_SIZE-1)) == 0)
128                         break;
129                 if ((i % 4) == 0) {
130                         if (i)
131                                 pr_cont("\n");
132                         printk("%s       ", loglvl);
133                 } else {
134                         pr_cont(" ");
135                 }
136                 pr_cont("%016lx", *stack++);
137         }
138         pr_cont("\n");
139         dik_show_trace(sp, loglvl);
140 }
141 
142 void
143 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
144 {
145         if (regs->ps & 8)
146                 return;
147 #ifdef CONFIG_SMP
148         printk("CPU %d ", hard_smp_processor_id());
149 #endif
150         printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
151         dik_show_regs(regs, r9_15);
152         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
153         dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
154         dik_show_code((unsigned int *)regs->pc);
155 
156         if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
157                 printk("die_if_kernel recursion detected.\n");
158                 local_irq_enable();
159                 while (1);
160         }
161         make_task_dead(SIGSEGV);
162 }
163 
164 #ifndef CONFIG_MATHEMU
165 static long dummy_emul(void) { return 0; }
166 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
167   = (void *)dummy_emul;
168 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
169 long (*alpha_fp_emul) (unsigned long pc)
170   = (void *)dummy_emul;
171 EXPORT_SYMBOL_GPL(alpha_fp_emul);
172 #else
173 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
174 long alpha_fp_emul (unsigned long pc);
175 #endif
176 
177 asmlinkage void
178 do_entArith(unsigned long summary, unsigned long write_mask,
179             struct pt_regs *regs)
180 {
181         long si_code = FPE_FLTINV;
182 
183         if (summary & 1) {
184                 /* Software-completion summary bit is set, so try to
185                    emulate the instruction.  If the processor supports
186                    precise exceptions, we don't have to search.  */
187                 if (!amask(AMASK_PRECISE_TRAP))
188                         si_code = alpha_fp_emul(regs->pc - 4);
189                 else
190                         si_code = alpha_fp_emul_imprecise(regs, write_mask);
191                 if (si_code == 0)
192                         return;
193         }
194         die_if_kernel("Arithmetic fault", regs, 0, NULL);
195 
196         send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
197 }
198 
199 asmlinkage void
200 do_entIF(unsigned long type, struct pt_regs *regs)
201 {
202         int signo, code;
203 
204         if (type == 3) { /* FEN fault */
205                 /* Irritating users can call PAL_clrfen to disable the
206                    FPU for the process.  The kernel will then trap in
207                    do_switch_stack and undo_switch_stack when we try
208                    to save and restore the FP registers.
209 
210                    Given that GCC by default generates code that uses the
211                    FP registers, PAL_clrfen is not useful except for DoS
212                    attacks.  So turn the bleeding FPU back on and be done
213                    with it.  */
214                 current_thread_info()->pcb.flags |= 1;
215                 __reload_thread(&current_thread_info()->pcb);
216                 return;
217         }
218         if (!user_mode(regs)) {
219                 if (type == 1) {
220                         const unsigned int *data
221                           = (const unsigned int *) regs->pc;
222                         printk("Kernel bug at %s:%d\n",
223                                (const char *)(data[1] | (long)data[2] << 32), 
224                                data[0]);
225                 }
226 #ifdef CONFIG_ALPHA_WTINT
227                 if (type == 4) {
228                         /* If CALL_PAL WTINT is totally unsupported by the
229                            PALcode, e.g. MILO, "emulate" it by overwriting
230                            the insn.  */
231                         unsigned int *pinsn
232                           = (unsigned int *) regs->pc - 1;
233                         if (*pinsn == PAL_wtint) {
234                                 *pinsn = 0x47e01400; /* mov 0,$0 */
235                                 imb();
236                                 regs->r0 = 0;
237                                 return;
238                         }
239                 }
240 #endif /* ALPHA_WTINT */
241                 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
242                               regs, type, NULL);
243         }
244 
245         switch (type) {
246               case 0: /* breakpoint */
247                 if (ptrace_cancel_bpt(current)) {
248                         regs->pc -= 4;  /* make pc point to former bpt */
249                 }
250 
251                 send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc,
252                                current);
253                 return;
254 
255               case 1: /* bugcheck */
256                 send_sig_fault_trapno(SIGTRAP, TRAP_UNK,
257                                       (void __user *) regs->pc, 0, current);
258                 return;
259                 
260               case 2: /* gentrap */
261                 switch ((long) regs->r16) {
262                 case GEN_INTOVF:
263                         signo = SIGFPE;
264                         code = FPE_INTOVF;
265                         break;
266                 case GEN_INTDIV:
267                         signo = SIGFPE;
268                         code = FPE_INTDIV;
269                         break;
270                 case GEN_FLTOVF:
271                         signo = SIGFPE;
272                         code = FPE_FLTOVF;
273                         break;
274                 case GEN_FLTDIV:
275                         signo = SIGFPE;
276                         code = FPE_FLTDIV;
277                         break;
278                 case GEN_FLTUND:
279                         signo = SIGFPE;
280                         code = FPE_FLTUND;
281                         break;
282                 case GEN_FLTINV:
283                         signo = SIGFPE;
284                         code = FPE_FLTINV;
285                         break;
286                 case GEN_FLTINE:
287                         signo = SIGFPE;
288                         code = FPE_FLTRES;
289                         break;
290                 case GEN_ROPRAND:
291                         signo = SIGFPE;
292                         code = FPE_FLTUNK;
293                         break;
294 
295                 case GEN_DECOVF:
296                 case GEN_DECDIV:
297                 case GEN_DECINV:
298                 case GEN_ASSERTERR:
299                 case GEN_NULPTRERR:
300                 case GEN_STKOVF:
301                 case GEN_STRLENERR:
302                 case GEN_SUBSTRERR:
303                 case GEN_RANGERR:
304                 case GEN_SUBRNG:
305                 case GEN_SUBRNG1:
306                 case GEN_SUBRNG2:
307                 case GEN_SUBRNG3:
308                 case GEN_SUBRNG4:
309                 case GEN_SUBRNG5:
310                 case GEN_SUBRNG6:
311                 case GEN_SUBRNG7:
312                 default:
313                         signo = SIGTRAP;
314                         code = TRAP_UNK;
315                         break;
316                 }
317 
318                 send_sig_fault_trapno(signo, code, (void __user *) regs->pc,
319                                       regs->r16, current);
320                 return;
321 
322               case 4: /* opDEC */
323                 break;
324 
325               case 5: /* illoc */
326               default: /* unexpected instruction-fault type */
327                       ;
328         }
329 
330         send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, current);
331 }
332 
333 /* There is an ifdef in the PALcode in MILO that enables a 
334    "kernel debugging entry point" as an unprivileged call_pal.
335 
336    We don't want to have anything to do with it, but unfortunately
337    several versions of MILO included in distributions have it enabled,
338    and if we don't put something on the entry point we'll oops.  */
339 
340 asmlinkage void
341 do_entDbg(struct pt_regs *regs)
342 {
343         die_if_kernel("Instruction fault", regs, 0, NULL);
344 
345         force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc);
346 }
347 
348 
349 /*
350  * entUna has a different register layout to be reasonably simple. It
351  * needs access to all the integer registers (the kernel doesn't use
352  * fp-regs), and it needs to have them in order for simpler access.
353  *
354  * Due to the non-standard register layout (and because we don't want
355  * to handle floating-point regs), user-mode unaligned accesses are
356  * handled separately by do_entUnaUser below.
357  *
358  * Oh, btw, we don't handle the "gp" register correctly, but if we fault
359  * on a gp-register unaligned load/store, something is _very_ wrong
360  * in the kernel anyway..
361  */
362 struct allregs {
363         unsigned long regs[32];
364         unsigned long ps, pc, gp, a0, a1, a2;
365 };
366 
367 struct unaligned_stat {
368         unsigned long count, va, pc;
369 } unaligned[2];
370 
371 
372 /* Macro for exception fixup code to access integer registers.  */
373 #define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
374 
375 
376 asmlinkage void
377 do_entUna(void * va, unsigned long opcode, unsigned long reg,
378           struct allregs *regs)
379 {
380         long error, tmp1, tmp2, tmp3, tmp4;
381         unsigned long pc = regs->pc - 4;
382         unsigned long *_regs = regs->regs;
383         const struct exception_table_entry *fixup;
384 
385         unaligned[0].count++;
386         unaligned[0].va = (unsigned long) va;
387         unaligned[0].pc = pc;
388 
389         /* We don't want to use the generic get/put unaligned macros as
390            we want to trap exceptions.  Only if we actually get an
391            exception will we decide whether we should have caught it.  */
392 
393         switch (opcode) {
394         case 0x0c: /* ldwu */
395                 __asm__ __volatile__(
396                 "1:     ldq_u %1,0(%3)\n"
397                 "2:     ldq_u %2,1(%3)\n"
398                 "       extwl %1,%3,%1\n"
399                 "       extwh %2,%3,%2\n"
400                 "3:\n"
401                 EXC(1b,3b,%1,%0)
402                 EXC(2b,3b,%2,%0)
403                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
404                         : "r"(va), ""(0));
405                 if (error)
406                         goto got_exception;
407                 una_reg(reg) = tmp1|tmp2;
408                 return;
409 
410         case 0x28: /* ldl */
411                 __asm__ __volatile__(
412                 "1:     ldq_u %1,0(%3)\n"
413                 "2:     ldq_u %2,3(%3)\n"
414                 "       extll %1,%3,%1\n"
415                 "       extlh %2,%3,%2\n"
416                 "3:\n"
417                 EXC(1b,3b,%1,%0)
418                 EXC(2b,3b,%2,%0)
419                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
420                         : "r"(va), ""(0));
421                 if (error)
422                         goto got_exception;
423                 una_reg(reg) = (int)(tmp1|tmp2);
424                 return;
425 
426         case 0x29: /* ldq */
427                 __asm__ __volatile__(
428                 "1:     ldq_u %1,0(%3)\n"
429                 "2:     ldq_u %2,7(%3)\n"
430                 "       extql %1,%3,%1\n"
431                 "       extqh %2,%3,%2\n"
432                 "3:\n"
433                 EXC(1b,3b,%1,%0)
434                 EXC(2b,3b,%2,%0)
435                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
436                         : "r"(va), ""(0));
437                 if (error)
438                         goto got_exception;
439                 una_reg(reg) = tmp1|tmp2;
440                 return;
441 
442         /* Note that the store sequences do not indicate that they change
443            memory because it _should_ be affecting nothing in this context.
444            (Otherwise we have other, much larger, problems.)  */
445         case 0x0d: /* stw */
446                 __asm__ __volatile__(
447                 "1:     ldq_u %2,1(%5)\n"
448                 "2:     ldq_u %1,0(%5)\n"
449                 "       inswh %6,%5,%4\n"
450                 "       inswl %6,%5,%3\n"
451                 "       mskwh %2,%5,%2\n"
452                 "       mskwl %1,%5,%1\n"
453                 "       or %2,%4,%2\n"
454                 "       or %1,%3,%1\n"
455                 "3:     stq_u %2,1(%5)\n"
456                 "4:     stq_u %1,0(%5)\n"
457                 "5:\n"
458                 EXC(1b,5b,%2,%0)
459                 EXC(2b,5b,%1,%0)
460                 EXC(3b,5b,$31,%0)
461                 EXC(4b,5b,$31,%0)
462                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
463                           "=&r"(tmp3), "=&r"(tmp4)
464                         : "r"(va), "r"(una_reg(reg)), ""(0));
465                 if (error)
466                         goto got_exception;
467                 return;
468 
469         case 0x2c: /* stl */
470                 __asm__ __volatile__(
471                 "1:     ldq_u %2,3(%5)\n"
472                 "2:     ldq_u %1,0(%5)\n"
473                 "       inslh %6,%5,%4\n"
474                 "       insll %6,%5,%3\n"
475                 "       msklh %2,%5,%2\n"
476                 "       mskll %1,%5,%1\n"
477                 "       or %2,%4,%2\n"
478                 "       or %1,%3,%1\n"
479                 "3:     stq_u %2,3(%5)\n"
480                 "4:     stq_u %1,0(%5)\n"
481                 "5:\n"
482                 EXC(1b,5b,%2,%0)
483                 EXC(2b,5b,%1,%0)
484                 EXC(3b,5b,$31,%0)
485                 EXC(4b,5b,$31,%0)
486                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
487                           "=&r"(tmp3), "=&r"(tmp4)
488                         : "r"(va), "r"(una_reg(reg)), ""(0));
489                 if (error)
490                         goto got_exception;
491                 return;
492 
493         case 0x2d: /* stq */
494                 __asm__ __volatile__(
495                 "1:     ldq_u %2,7(%5)\n"
496                 "2:     ldq_u %1,0(%5)\n"
497                 "       insqh %6,%5,%4\n"
498                 "       insql %6,%5,%3\n"
499                 "       mskqh %2,%5,%2\n"
500                 "       mskql %1,%5,%1\n"
501                 "       or %2,%4,%2\n"
502                 "       or %1,%3,%1\n"
503                 "3:     stq_u %2,7(%5)\n"
504                 "4:     stq_u %1,0(%5)\n"
505                 "5:\n"
506                 EXC(1b,5b,%2,%0)
507                 EXC(2b,5b,%1,%0)
508                 EXC(3b,5b,$31,%0)
509                 EXC(4b,5b,$31,%0)
510                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
511                           "=&r"(tmp3), "=&r"(tmp4)
512                         : "r"(va), "r"(una_reg(reg)), ""(0));
513                 if (error)
514                         goto got_exception;
515                 return;
516         }
517 
518         printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
519                 pc, va, opcode, reg);
520         make_task_dead(SIGSEGV);
521 
522 got_exception:
523         /* Ok, we caught the exception, but we don't want it.  Is there
524            someone to pass it along to?  */
525         if ((fixup = search_exception_tables(pc)) != 0) {
526                 unsigned long newpc;
527                 newpc = fixup_exception(una_reg, fixup, pc);
528 
529                 printk("Forwarding unaligned exception at %lx (%lx)\n",
530                        pc, newpc);
531 
532                 regs->pc = newpc;
533                 return;
534         }
535 
536         /*
537          * Yikes!  No one to forward the exception to.
538          * Since the registers are in a weird format, dump them ourselves.
539          */
540 
541         printk("%s(%d): unhandled unaligned exception\n",
542                current->comm, task_pid_nr(current));
543 
544         printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
545                pc, una_reg(26), regs->ps);
546         printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
547                una_reg(0), una_reg(1), una_reg(2));
548         printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
549                una_reg(3), una_reg(4), una_reg(5));
550         printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
551                una_reg(6), una_reg(7), una_reg(8));
552         printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
553                una_reg(9), una_reg(10), una_reg(11));
554         printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
555                una_reg(12), una_reg(13), una_reg(14));
556         printk("r15= %016lx\n", una_reg(15));
557         printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
558                una_reg(16), una_reg(17), una_reg(18));
559         printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
560                una_reg(19), una_reg(20), una_reg(21));
561         printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
562                una_reg(22), una_reg(23), una_reg(24));
563         printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
564                una_reg(25), una_reg(27), una_reg(28));
565         printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
566 
567         dik_show_code((unsigned int *)pc);
568         dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
569 
570         if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
571                 printk("die_if_kernel recursion detected.\n");
572                 local_irq_enable();
573                 while (1);
574         }
575         make_task_dead(SIGSEGV);
576 }
577 
578 /*
579  * Convert an s-floating point value in memory format to the
580  * corresponding value in register format.  The exponent
581  * needs to be remapped to preserve non-finite values
582  * (infinities, not-a-numbers, denormals).
583  */
584 static inline unsigned long
585 s_mem_to_reg (unsigned long s_mem)
586 {
587         unsigned long frac    = (s_mem >>  0) & 0x7fffff;
588         unsigned long sign    = (s_mem >> 31) & 0x1;
589         unsigned long exp_msb = (s_mem >> 30) & 0x1;
590         unsigned long exp_low = (s_mem >> 23) & 0x7f;
591         unsigned long exp;
592 
593         exp = (exp_msb << 10) | exp_low;        /* common case */
594         if (exp_msb) {
595                 if (exp_low == 0x7f) {
596                         exp = 0x7ff;
597                 }
598         } else {
599                 if (exp_low == 0x00) {
600                         exp = 0x000;
601                 } else {
602                         exp |= (0x7 << 7);
603                 }
604         }
605         return (sign << 63) | (exp << 52) | (frac << 29);
606 }
607 
608 /*
609  * Convert an s-floating point value in register format to the
610  * corresponding value in memory format.
611  */
612 static inline unsigned long
613 s_reg_to_mem (unsigned long s_reg)
614 {
615         return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
616 }
617 
618 /*
619  * Handle user-level unaligned fault.  Handling user-level unaligned
620  * faults is *extremely* slow and produces nasty messages.  A user
621  * program *should* fix unaligned faults ASAP.
622  *
623  * Notice that we have (almost) the regular kernel stack layout here,
624  * so finding the appropriate registers is a little more difficult
625  * than in the kernel case.
626  *
627  * Finally, we handle regular integer load/stores only.  In
628  * particular, load-linked/store-conditionally and floating point
629  * load/stores are not supported.  The former make no sense with
630  * unaligned faults (they are guaranteed to fail) and I don't think
631  * the latter will occur in any decent program.
632  *
633  * Sigh. We *do* have to handle some FP operations, because GCC will
634  * uses them as temporary storage for integer memory to memory copies.
635  * However, we need to deal with stt/ldt and sts/lds only.
636  */
637 
638 #define OP_INT_MASK     ( 1L << 0x28 | 1L << 0x2c   /* ldl stl */       \
639                         | 1L << 0x29 | 1L << 0x2d   /* ldq stq */       \
640                         | 1L << 0x0c | 1L << 0x0d   /* ldwu stw */      \
641                         | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
642 
643 #define OP_WRITE_MASK   ( 1L << 0x26 | 1L << 0x27   /* sts stt */       \
644                         | 1L << 0x2c | 1L << 0x2d   /* stl stq */       \
645                         | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
646 
647 #define R(x)    ((size_t) &((struct pt_regs *)0)->x)
648 
649 static int unauser_reg_offsets[32] = {
650         R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
651         /* r9 ... r15 are stored in front of regs.  */
652         -56, -48, -40, -32, -24, -16, -8,
653         R(r16), R(r17), R(r18),
654         R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
655         R(r27), R(r28), R(gp),
656         0, 0
657 };
658 
659 #undef R
660 
661 asmlinkage void
662 do_entUnaUser(void __user * va, unsigned long opcode,
663               unsigned long reg, struct pt_regs *regs)
664 {
665         static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
666 
667         unsigned long tmp1, tmp2, tmp3, tmp4;
668         unsigned long fake_reg, *reg_addr = &fake_reg;
669         int si_code;
670         long error;
671 
672         /* Check the UAC bits to decide what the user wants us to do
673            with the unaligned access.  */
674 
675         if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
676                 if (__ratelimit(&ratelimit)) {
677                         printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
678                                current->comm, task_pid_nr(current),
679                                regs->pc - 4, va, opcode, reg);
680                 }
681         }
682         if ((current_thread_info()->status & TS_UAC_SIGBUS))
683                 goto give_sigbus;
684         /* Not sure why you'd want to use this, but... */
685         if ((current_thread_info()->status & TS_UAC_NOFIX))
686                 return;
687 
688         /* Don't bother reading ds in the access check since we already
689            know that this came from the user.  Also rely on the fact that
690            the page at TASK_SIZE is unmapped and so can't be touched anyway. */
691         if ((unsigned long)va >= TASK_SIZE)
692                 goto give_sigsegv;
693 
694         ++unaligned[1].count;
695         unaligned[1].va = (unsigned long)va;
696         unaligned[1].pc = regs->pc - 4;
697 
698         if ((1L << opcode) & OP_INT_MASK) {
699                 /* it's an integer load/store */
700                 if (reg < 30) {
701                         reg_addr = (unsigned long *)
702                           ((char *)regs + unauser_reg_offsets[reg]);
703                 } else if (reg == 30) {
704                         /* usp in PAL regs */
705                         fake_reg = rdusp();
706                 } else {
707                         /* zero "register" */
708                         fake_reg = 0;
709                 }
710         }
711 
712         /* We don't want to use the generic get/put unaligned macros as
713            we want to trap exceptions.  Only if we actually get an
714            exception will we decide whether we should have caught it.  */
715 
716         switch (opcode) {
717         case 0x0c: /* ldwu */
718                 __asm__ __volatile__(
719                 "1:     ldq_u %1,0(%3)\n"
720                 "2:     ldq_u %2,1(%3)\n"
721                 "       extwl %1,%3,%1\n"
722                 "       extwh %2,%3,%2\n"
723                 "3:\n"
724                 EXC(1b,3b,%1,%0)
725                 EXC(2b,3b,%2,%0)
726                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
727                         : "r"(va), ""(0));
728                 if (error)
729                         goto give_sigsegv;
730                 *reg_addr = tmp1|tmp2;
731                 break;
732 
733         case 0x22: /* lds */
734                 __asm__ __volatile__(
735                 "1:     ldq_u %1,0(%3)\n"
736                 "2:     ldq_u %2,3(%3)\n"
737                 "       extll %1,%3,%1\n"
738                 "       extlh %2,%3,%2\n"
739                 "3:\n"
740                 EXC(1b,3b,%1,%0)
741                 EXC(2b,3b,%2,%0)
742                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
743                         : "r"(va), ""(0));
744                 if (error)
745                         goto give_sigsegv;
746                 alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
747                 return;
748 
749         case 0x23: /* ldt */
750                 __asm__ __volatile__(
751                 "1:     ldq_u %1,0(%3)\n"
752                 "2:     ldq_u %2,7(%3)\n"
753                 "       extql %1,%3,%1\n"
754                 "       extqh %2,%3,%2\n"
755                 "3:\n"
756                 EXC(1b,3b,%1,%0)
757                 EXC(2b,3b,%2,%0)
758                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
759                         : "r"(va), ""(0));
760                 if (error)
761                         goto give_sigsegv;
762                 alpha_write_fp_reg(reg, tmp1|tmp2);
763                 return;
764 
765         case 0x28: /* ldl */
766                 __asm__ __volatile__(
767                 "1:     ldq_u %1,0(%3)\n"
768                 "2:     ldq_u %2,3(%3)\n"
769                 "       extll %1,%3,%1\n"
770                 "       extlh %2,%3,%2\n"
771                 "3:\n"
772                 EXC(1b,3b,%1,%0)
773                 EXC(2b,3b,%2,%0)
774                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
775                         : "r"(va), ""(0));
776                 if (error)
777                         goto give_sigsegv;
778                 *reg_addr = (int)(tmp1|tmp2);
779                 break;
780 
781         case 0x29: /* ldq */
782                 __asm__ __volatile__(
783                 "1:     ldq_u %1,0(%3)\n"
784                 "2:     ldq_u %2,7(%3)\n"
785                 "       extql %1,%3,%1\n"
786                 "       extqh %2,%3,%2\n"
787                 "3:\n"
788                 EXC(1b,3b,%1,%0)
789                 EXC(2b,3b,%2,%0)
790                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
791                         : "r"(va), ""(0));
792                 if (error)
793                         goto give_sigsegv;
794                 *reg_addr = tmp1|tmp2;
795                 break;
796 
797         /* Note that the store sequences do not indicate that they change
798            memory because it _should_ be affecting nothing in this context.
799            (Otherwise we have other, much larger, problems.)  */
800         case 0x0d: /* stw */
801                 __asm__ __volatile__(
802                 "1:     ldq_u %2,1(%5)\n"
803                 "2:     ldq_u %1,0(%5)\n"
804                 "       inswh %6,%5,%4\n"
805                 "       inswl %6,%5,%3\n"
806                 "       mskwh %2,%5,%2\n"
807                 "       mskwl %1,%5,%1\n"
808                 "       or %2,%4,%2\n"
809                 "       or %1,%3,%1\n"
810                 "3:     stq_u %2,1(%5)\n"
811                 "4:     stq_u %1,0(%5)\n"
812                 "5:\n"
813                 EXC(1b,5b,%2,%0)
814                 EXC(2b,5b,%1,%0)
815                 EXC(3b,5b,$31,%0)
816                 EXC(4b,5b,$31,%0)
817                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
818                           "=&r"(tmp3), "=&r"(tmp4)
819                         : "r"(va), "r"(*reg_addr), ""(0));
820                 if (error)
821                         goto give_sigsegv;
822                 return;
823 
824         case 0x26: /* sts */
825                 fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
826                 fallthrough;
827 
828         case 0x2c: /* stl */
829                 __asm__ __volatile__(
830                 "1:     ldq_u %2,3(%5)\n"
831                 "2:     ldq_u %1,0(%5)\n"
832                 "       inslh %6,%5,%4\n"
833                 "       insll %6,%5,%3\n"
834                 "       msklh %2,%5,%2\n"
835                 "       mskll %1,%5,%1\n"
836                 "       or %2,%4,%2\n"
837                 "       or %1,%3,%1\n"
838                 "3:     stq_u %2,3(%5)\n"
839                 "4:     stq_u %1,0(%5)\n"
840                 "5:\n"
841                 EXC(1b,5b,%2,%0)
842                 EXC(2b,5b,%1,%0)
843                 EXC(3b,5b,$31,%0)
844                 EXC(4b,5b,$31,%0)
845                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
846                           "=&r"(tmp3), "=&r"(tmp4)
847                         : "r"(va), "r"(*reg_addr), ""(0));
848                 if (error)
849                         goto give_sigsegv;
850                 return;
851 
852         case 0x27: /* stt */
853                 fake_reg = alpha_read_fp_reg(reg);
854                 fallthrough;
855 
856         case 0x2d: /* stq */
857                 __asm__ __volatile__(
858                 "1:     ldq_u %2,7(%5)\n"
859                 "2:     ldq_u %1,0(%5)\n"
860                 "       insqh %6,%5,%4\n"
861                 "       insql %6,%5,%3\n"
862                 "       mskqh %2,%5,%2\n"
863                 "       mskql %1,%5,%1\n"
864                 "       or %2,%4,%2\n"
865                 "       or %1,%3,%1\n"
866                 "3:     stq_u %2,7(%5)\n"
867                 "4:     stq_u %1,0(%5)\n"
868                 "5:\n"
869                 EXC(1b,5b,%2,%0)
870                 EXC(2b,5b,%1,%0)
871                 EXC(3b,5b,$31,%0)
872                 EXC(4b,5b,$31,%0)
873                         : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
874                           "=&r"(tmp3), "=&r"(tmp4)
875                         : "r"(va), "r"(*reg_addr), ""(0));
876                 if (error)
877                         goto give_sigsegv;
878                 return;
879 
880         default:
881                 /* What instruction were you trying to use, exactly?  */
882                 goto give_sigbus;
883         }
884 
885         /* Only integer loads should get here; everyone else returns early. */
886         if (reg == 30)
887                 wrusp(fake_reg);
888         return;
889 
890 give_sigsegv:
891         regs->pc -= 4;  /* make pc point to faulting insn */
892 
893         /* We need to replicate some of the logic in mm/fault.c,
894            since we don't have access to the fault code in the
895            exception handling return path.  */
896         if ((unsigned long)va >= TASK_SIZE)
897                 si_code = SEGV_ACCERR;
898         else {
899                 struct mm_struct *mm = current->mm;
900                 mmap_read_lock(mm);
901                 if (find_vma(mm, (unsigned long)va))
902                         si_code = SEGV_ACCERR;
903                 else
904                         si_code = SEGV_MAPERR;
905                 mmap_read_unlock(mm);
906         }
907         send_sig_fault(SIGSEGV, si_code, va, current);
908         return;
909 
910 give_sigbus:
911         regs->pc -= 4;
912         send_sig_fault(SIGBUS, BUS_ADRALN, va, current);
913         return;
914 }
915 
916 void
917 trap_init(void)
918 {
919         /* Tell PAL-code what global pointer we want in the kernel.  */
920         register unsigned long gptr __asm__("$29");
921         wrkgp(gptr);
922 
923         wrent(entArith, 1);
924         wrent(entMM, 2);
925         wrent(entIF, 3);
926         wrent(entUna, 4);
927         wrent(entSys, 5);
928         wrent(entDbg, 6);
929 }
930 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php