1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * OpenRISC traps.c 4 * 5 * Linux architectural port borrowing liberally from similar works of 6 * others. All original copyrights apply as per the original source 7 * declaration. 8 * 9 * Modifications for the OpenRISC architecture: 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> 12 * 13 * Here we handle the break vectors not used by the system call 14 * mechanism, as well as some general stack/register dumping 15 * things. 16 */ 17 18 #include <linux/init.h> 19 #include <linux/sched.h> 20 #include <linux/sched/debug.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/kernel.h> 23 #include <linux/extable.h> 24 #include <linux/kmod.h> 25 #include <linux/string.h> 26 #include <linux/errno.h> 27 #include <linux/ptrace.h> 28 #include <linux/timer.h> 29 #include <linux/mm.h> 30 #include <linux/kallsyms.h> 31 #include <linux/uaccess.h> 32 33 #include <asm/bug.h> 34 #include <asm/fpu.h> 35 #include <asm/io.h> 36 #include <asm/processor.h> 37 #include <asm/unwinder.h> 38 #include <asm/sections.h> 39 40 int lwa_flag; 41 static unsigned long __user *lwa_addr; 42 43 asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector); 44 asmlinkage void do_trap(struct pt_regs *regs, unsigned long address); 45 asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address); 46 asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address); 47 asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address); 48 asmlinkage void do_illegal_instruction(struct pt_regs *regs, 49 unsigned long address); 50 51 static void print_trace(void *data, unsigned long addr, int reliable) 52 { 53 const char *loglvl = data; 54 55 pr_info("%s[<%p>] %s%pS\n", loglvl, (void *) addr, reliable ? "" : "? ", 56 (void *) addr); 57 } 58 59 static void print_data(unsigned long base_addr, unsigned long word, int i) 60 { 61 if (i == 0) 62 pr_info("(%08lx:)\t%08lx", base_addr + (i * 4), word); 63 else 64 pr_info(" %08lx:\t%08lx", base_addr + (i * 4), word); 65 } 66 67 /* displays a short stack trace */ 68 void show_stack(struct task_struct *task, unsigned long *esp, const char *loglvl) 69 { 70 if (esp == NULL) 71 esp = (unsigned long *)&esp; 72 73 pr_info("%sCall trace:\n", loglvl); 74 unwind_stack((void *)loglvl, esp, print_trace); 75 } 76 77 void show_registers(struct pt_regs *regs) 78 { 79 int i; 80 int in_kernel = 1; 81 unsigned long esp; 82 83 esp = (unsigned long)(regs->sp); 84 if (user_mode(regs)) 85 in_kernel = 0; 86 87 pr_info("CPU #: %d\n" 88 " PC: %08lx SR: %08lx SP: %08lx\n", 89 smp_processor_id(), regs->pc, regs->sr, regs->sp); 90 pr_info("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n", 91 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]); 92 pr_info("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n", 93 regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]); 94 pr_info("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n", 95 regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]); 96 pr_info("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n", 97 regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]); 98 pr_info("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n", 99 regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]); 100 pr_info("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n", 101 regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]); 102 pr_info("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n", 103 regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]); 104 pr_info("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n", 105 regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]); 106 pr_info(" RES: %08lx oGPR11: %08lx\n", 107 regs->gpr[11], regs->orig_gpr11); 108 109 pr_info("Process %s (pid: %d, stackpage=%08lx)\n", 110 current->comm, current->pid, (unsigned long)current); 111 /* 112 * When in-kernel, we also print out the stack and code at the 113 * time of the fault.. 114 */ 115 if (in_kernel) { 116 117 pr_info("\nStack: "); 118 show_stack(NULL, (unsigned long *)esp, KERN_EMERG); 119 120 if (esp < PAGE_OFFSET) 121 goto bad_stack; 122 123 pr_info("\n"); 124 for (i = -8; i < 24; i += 1) { 125 unsigned long word; 126 127 if (__get_user(word, &((unsigned long *)esp)[i])) { 128 bad_stack: 129 pr_info(" Bad Stack value."); 130 break; 131 } 132 133 print_data(esp, word, i); 134 } 135 136 pr_info("\nCode: "); 137 if (regs->pc < PAGE_OFFSET) 138 goto bad; 139 140 for (i = -6; i < 6; i += 1) { 141 unsigned long word; 142 143 if (__get_user(word, &((unsigned long *)regs->pc)[i])) { 144 bad: 145 pr_info(" Bad PC value."); 146 break; 147 } 148 149 print_data(regs->pc, word, i); 150 } 151 } 152 pr_info("\n"); 153 } 154 155 /* This is normally the 'Oops' routine */ 156 void __noreturn die(const char *str, struct pt_regs *regs, long err) 157 { 158 159 console_verbose(); 160 pr_emerg("\n%s#: %04lx\n", str, err & 0xffff); 161 show_registers(regs); 162 #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION 163 pr_emerg("\n\nUNHANDLED_EXCEPTION: entering infinite loop\n"); 164 165 /* shut down interrupts */ 166 local_irq_disable(); 167 168 __asm__ __volatile__("l.nop 1"); 169 do {} while (1); 170 #endif 171 make_task_dead(SIGSEGV); 172 } 173 174 asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector) 175 { 176 pr_emerg("Unable to handle exception at EA =0x%x, vector 0x%x", 177 ea, vector); 178 die("Oops", regs, 9); 179 } 180 181 asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address) 182 { 183 if (user_mode(regs)) { 184 int code = FPE_FLTUNK; 185 #ifdef CONFIG_FPU 186 unsigned long fpcsr; 187 188 save_fpu(current); 189 fpcsr = current->thread.fpcsr; 190 191 if (fpcsr & SPR_FPCSR_IVF) 192 code = FPE_FLTINV; 193 else if (fpcsr & SPR_FPCSR_OVF) 194 code = FPE_FLTOVF; 195 else if (fpcsr & SPR_FPCSR_UNF) 196 code = FPE_FLTUND; 197 else if (fpcsr & SPR_FPCSR_DZF) 198 code = FPE_FLTDIV; 199 else if (fpcsr & SPR_FPCSR_IXF) 200 code = FPE_FLTRES; 201 202 /* Clear all flags */ 203 current->thread.fpcsr &= ~SPR_FPCSR_ALLF; 204 restore_fpu(current); 205 #endif 206 force_sig_fault(SIGFPE, code, (void __user *)regs->pc); 207 } else { 208 pr_emerg("KERNEL: Illegal fpe exception 0x%.8lx\n", regs->pc); 209 die("Die:", regs, SIGFPE); 210 } 211 } 212 213 asmlinkage void do_trap(struct pt_regs *regs, unsigned long address) 214 { 215 if (user_mode(regs)) { 216 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); 217 } else { 218 pr_emerg("KERNEL: Illegal trap exception 0x%.8lx\n", regs->pc); 219 die("Die:", regs, SIGILL); 220 } 221 } 222 223 asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address) 224 { 225 if (user_mode(regs)) { 226 /* Send a SIGBUS */ 227 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)address); 228 } else { 229 pr_emerg("KERNEL: Unaligned Access 0x%.8lx\n", address); 230 die("Die:", regs, address); 231 } 232 233 } 234 235 asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address) 236 { 237 if (user_mode(regs)) { 238 /* Send a SIGBUS */ 239 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); 240 } else { /* Kernel mode */ 241 pr_emerg("KERNEL: Bus error (SIGBUS) 0x%.8lx\n", address); 242 die("Die:", regs, address); 243 } 244 } 245 246 static inline int in_delay_slot(struct pt_regs *regs) 247 { 248 #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX 249 /* No delay slot flag, do the old way */ 250 unsigned int op, insn; 251 252 insn = *((unsigned int *)regs->pc); 253 op = insn >> 26; 254 switch (op) { 255 case 0x00: /* l.j */ 256 case 0x01: /* l.jal */ 257 case 0x03: /* l.bnf */ 258 case 0x04: /* l.bf */ 259 case 0x11: /* l.jr */ 260 case 0x12: /* l.jalr */ 261 return 1; 262 default: 263 return 0; 264 } 265 #else 266 return mfspr(SPR_SR) & SPR_SR_DSX; 267 #endif 268 } 269 270 static inline void adjust_pc(struct pt_regs *regs, unsigned long address) 271 { 272 int displacement; 273 unsigned int rb, op, jmp; 274 275 if (unlikely(in_delay_slot(regs))) { 276 /* In delay slot, instruction at pc is a branch, simulate it */ 277 jmp = *((unsigned int *)regs->pc); 278 279 displacement = sign_extend32(((jmp) & 0x3ffffff) << 2, 27); 280 rb = (jmp & 0x0000ffff) >> 11; 281 op = jmp >> 26; 282 283 switch (op) { 284 case 0x00: /* l.j */ 285 regs->pc += displacement; 286 return; 287 case 0x01: /* l.jal */ 288 regs->pc += displacement; 289 regs->gpr[9] = regs->pc + 8; 290 return; 291 case 0x03: /* l.bnf */ 292 if (regs->sr & SPR_SR_F) 293 regs->pc += 8; 294 else 295 regs->pc += displacement; 296 return; 297 case 0x04: /* l.bf */ 298 if (regs->sr & SPR_SR_F) 299 regs->pc += displacement; 300 else 301 regs->pc += 8; 302 return; 303 case 0x11: /* l.jr */ 304 regs->pc = regs->gpr[rb]; 305 return; 306 case 0x12: /* l.jalr */ 307 regs->pc = regs->gpr[rb]; 308 regs->gpr[9] = regs->pc + 8; 309 return; 310 default: 311 break; 312 } 313 } else { 314 regs->pc += 4; 315 } 316 } 317 318 static inline void simulate_lwa(struct pt_regs *regs, unsigned long address, 319 unsigned int insn) 320 { 321 unsigned int ra, rd; 322 unsigned long value; 323 unsigned long orig_pc; 324 long imm; 325 326 const struct exception_table_entry *entry; 327 328 orig_pc = regs->pc; 329 adjust_pc(regs, address); 330 331 ra = (insn >> 16) & 0x1f; 332 rd = (insn >> 21) & 0x1f; 333 imm = (short)insn; 334 lwa_addr = (unsigned long __user *)(regs->gpr[ra] + imm); 335 336 if ((unsigned long)lwa_addr & 0x3) { 337 do_unaligned_access(regs, address); 338 return; 339 } 340 341 if (get_user(value, lwa_addr)) { 342 if (user_mode(regs)) { 343 force_sig(SIGSEGV); 344 return; 345 } 346 347 if ((entry = search_exception_tables(orig_pc))) { 348 regs->pc = entry->fixup; 349 return; 350 } 351 352 /* kernel access in kernel space, load it directly */ 353 value = *((unsigned long *)lwa_addr); 354 } 355 356 lwa_flag = 1; 357 regs->gpr[rd] = value; 358 } 359 360 static inline void simulate_swa(struct pt_regs *regs, unsigned long address, 361 unsigned int insn) 362 { 363 unsigned long __user *vaddr; 364 unsigned long orig_pc; 365 unsigned int ra, rb; 366 long imm; 367 368 const struct exception_table_entry *entry; 369 370 orig_pc = regs->pc; 371 adjust_pc(regs, address); 372 373 ra = (insn >> 16) & 0x1f; 374 rb = (insn >> 11) & 0x1f; 375 imm = (short)(((insn & 0x2200000) >> 10) | (insn & 0x7ff)); 376 vaddr = (unsigned long __user *)(regs->gpr[ra] + imm); 377 378 if (!lwa_flag || vaddr != lwa_addr) { 379 regs->sr &= ~SPR_SR_F; 380 return; 381 } 382 383 if ((unsigned long)vaddr & 0x3) { 384 do_unaligned_access(regs, address); 385 return; 386 } 387 388 if (put_user(regs->gpr[rb], vaddr)) { 389 if (user_mode(regs)) { 390 force_sig(SIGSEGV); 391 return; 392 } 393 394 if ((entry = search_exception_tables(orig_pc))) { 395 regs->pc = entry->fixup; 396 return; 397 } 398 399 /* kernel access in kernel space, store it directly */ 400 *((unsigned long *)vaddr) = regs->gpr[rb]; 401 } 402 403 lwa_flag = 0; 404 regs->sr |= SPR_SR_F; 405 } 406 407 #define INSN_LWA 0x1b 408 #define INSN_SWA 0x33 409 410 asmlinkage void do_illegal_instruction(struct pt_regs *regs, 411 unsigned long address) 412 { 413 unsigned int op; 414 unsigned int insn = *((unsigned int *)address); 415 416 op = insn >> 26; 417 418 switch (op) { 419 case INSN_LWA: 420 simulate_lwa(regs, address, insn); 421 return; 422 423 case INSN_SWA: 424 simulate_swa(regs, address, insn); 425 return; 426 427 default: 428 break; 429 } 430 431 if (user_mode(regs)) { 432 /* Send a SIGILL */ 433 force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)address); 434 } else { /* Kernel mode */ 435 pr_emerg("KERNEL: Illegal instruction (SIGILL) 0x%.8lx\n", 436 address); 437 die("Die:", regs, address); 438 } 439 } 440
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.