1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Regents of the University of California 4 */ 5 6 #include <linux/cpu.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/randomize_kstack.h> 10 #include <linux/sched.h> 11 #include <linux/sched/debug.h> 12 #include <linux/sched/signal.h> 13 #include <linux/signal.h> 14 #include <linux/kdebug.h> 15 #include <linux/uaccess.h> 16 #include <linux/kprobes.h> 17 #include <linux/uprobes.h> 18 #include <asm/uprobes.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/irq.h> 22 #include <linux/kexec.h> 23 #include <linux/entry-common.h> 24 25 #include <asm/asm-prototypes.h> 26 #include <asm/bug.h> 27 #include <asm/cfi.h> 28 #include <asm/csr.h> 29 #include <asm/processor.h> 30 #include <asm/ptrace.h> 31 #include <asm/syscall.h> 32 #include <asm/thread_info.h> 33 #include <asm/vector.h> 34 #include <asm/irq_stack.h> 35 36 int show_unhandled_signals = 1; 37 38 static DEFINE_SPINLOCK(die_lock); 39 40 static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns) 41 { 42 const void __user *uaddr = (__force const void __user *)insns; 43 44 if (!user_mode(regs)) 45 return get_kernel_nofault(*val, insns); 46 47 /* The user space code from other tasks cannot be accessed. */ 48 if (regs != task_pt_regs(current)) 49 return -EPERM; 50 51 return copy_from_user_nofault(val, uaddr, sizeof(*val)); 52 } 53 54 static void dump_instr(const char *loglvl, struct pt_regs *regs) 55 { 56 char str[sizeof("0000 ") * 12 + 2 + 1], *p = str; 57 const u16 *insns = (u16 *)instruction_pointer(regs); 58 long bad; 59 u16 val; 60 int i; 61 62 for (i = -10; i < 2; i++) { 63 bad = copy_code(regs, &val, &insns[i]); 64 if (!bad) { 65 p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val); 66 } else { 67 printk("%sCode: Unable to access instruction at 0x%px.\n", 68 loglvl, &insns[i]); 69 return; 70 } 71 } 72 printk("%sCode: %s\n", loglvl, str); 73 } 74 75 void die(struct pt_regs *regs, const char *str) 76 { 77 static int die_counter; 78 int ret; 79 long cause; 80 unsigned long flags; 81 82 oops_enter(); 83 84 spin_lock_irqsave(&die_lock, flags); 85 console_verbose(); 86 bust_spinlocks(1); 87 88 pr_emerg("%s [#%d]\n", str, ++die_counter); 89 print_modules(); 90 if (regs) { 91 show_regs(regs); 92 dump_instr(KERN_EMERG, regs); 93 } 94 95 cause = regs ? regs->cause : -1; 96 ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV); 97 98 if (kexec_should_crash(current)) 99 crash_kexec(regs); 100 101 bust_spinlocks(0); 102 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 103 spin_unlock_irqrestore(&die_lock, flags); 104 oops_exit(); 105 106 if (in_interrupt()) 107 panic("Fatal exception in interrupt"); 108 if (panic_on_oops) 109 panic("Fatal exception"); 110 if (ret != NOTIFY_STOP) 111 make_task_dead(SIGSEGV); 112 } 113 114 void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) 115 { 116 struct task_struct *tsk = current; 117 118 if (show_unhandled_signals && unhandled_signal(tsk, signo) 119 && printk_ratelimit()) { 120 pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT, 121 tsk->comm, task_pid_nr(tsk), signo, code, addr); 122 print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); 123 pr_cont("\n"); 124 __show_regs(regs); 125 dump_instr(KERN_INFO, regs); 126 } 127 128 force_sig_fault(signo, code, (void __user *)addr); 129 } 130 131 static void do_trap_error(struct pt_regs *regs, int signo, int code, 132 unsigned long addr, const char *str) 133 { 134 current->thread.bad_cause = regs->cause; 135 136 if (user_mode(regs)) { 137 do_trap(regs, signo, code, addr); 138 } else { 139 if (!fixup_exception(regs)) 140 die(regs, str); 141 } 142 } 143 144 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE) 145 #define __trap_section __noinstr_section(".xip.traps") 146 #else 147 #define __trap_section noinstr 148 #endif 149 #define DO_ERROR_INFO(name, signo, code, str) \ 150 asmlinkage __visible __trap_section void name(struct pt_regs *regs) \ 151 { \ 152 if (user_mode(regs)) { \ 153 irqentry_enter_from_user_mode(regs); \ 154 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 155 irqentry_exit_to_user_mode(regs); \ 156 } else { \ 157 irqentry_state_t state = irqentry_nmi_enter(regs); \ 158 do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \ 159 irqentry_nmi_exit(regs, state); \ 160 } \ 161 } 162 163 DO_ERROR_INFO(do_trap_unknown, 164 SIGILL, ILL_ILLTRP, "unknown exception"); 165 DO_ERROR_INFO(do_trap_insn_misaligned, 166 SIGBUS, BUS_ADRALN, "instruction address misaligned"); 167 DO_ERROR_INFO(do_trap_insn_fault, 168 SIGSEGV, SEGV_ACCERR, "instruction access fault"); 169 170 asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs) 171 { 172 bool handled; 173 174 if (user_mode(regs)) { 175 irqentry_enter_from_user_mode(regs); 176 177 local_irq_enable(); 178 179 handled = riscv_v_first_use_handler(regs); 180 181 local_irq_disable(); 182 183 if (!handled) 184 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 185 "Oops - illegal instruction"); 186 187 irqentry_exit_to_user_mode(regs); 188 } else { 189 irqentry_state_t state = irqentry_nmi_enter(regs); 190 191 do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc, 192 "Oops - illegal instruction"); 193 194 irqentry_nmi_exit(regs, state); 195 } 196 } 197 198 DO_ERROR_INFO(do_trap_load_fault, 199 SIGSEGV, SEGV_ACCERR, "load access fault"); 200 201 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) 202 { 203 if (user_mode(regs)) { 204 irqentry_enter_from_user_mode(regs); 205 206 if (handle_misaligned_load(regs)) 207 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 208 "Oops - load address misaligned"); 209 210 irqentry_exit_to_user_mode(regs); 211 } else { 212 irqentry_state_t state = irqentry_nmi_enter(regs); 213 214 if (handle_misaligned_load(regs)) 215 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 216 "Oops - load address misaligned"); 217 218 irqentry_nmi_exit(regs, state); 219 } 220 } 221 222 asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs) 223 { 224 if (user_mode(regs)) { 225 irqentry_enter_from_user_mode(regs); 226 227 if (handle_misaligned_store(regs)) 228 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 229 "Oops - store (or AMO) address misaligned"); 230 231 irqentry_exit_to_user_mode(regs); 232 } else { 233 irqentry_state_t state = irqentry_nmi_enter(regs); 234 235 if (handle_misaligned_store(regs)) 236 do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc, 237 "Oops - store (or AMO) address misaligned"); 238 239 irqentry_nmi_exit(regs, state); 240 } 241 } 242 DO_ERROR_INFO(do_trap_store_fault, 243 SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); 244 DO_ERROR_INFO(do_trap_ecall_s, 245 SIGILL, ILL_ILLTRP, "environment call from S-mode"); 246 DO_ERROR_INFO(do_trap_ecall_m, 247 SIGILL, ILL_ILLTRP, "environment call from M-mode"); 248 249 static inline unsigned long get_break_insn_length(unsigned long pc) 250 { 251 bug_insn_t insn; 252 253 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 254 return 0; 255 256 return GET_INSN_LENGTH(insn); 257 } 258 259 static bool probe_single_step_handler(struct pt_regs *regs) 260 { 261 bool user = user_mode(regs); 262 263 return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs); 264 } 265 266 static bool probe_breakpoint_handler(struct pt_regs *regs) 267 { 268 bool user = user_mode(regs); 269 270 return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs); 271 } 272 273 void handle_break(struct pt_regs *regs) 274 { 275 if (probe_single_step_handler(regs)) 276 return; 277 278 if (probe_breakpoint_handler(regs)) 279 return; 280 281 current->thread.bad_cause = regs->cause; 282 283 if (user_mode(regs)) 284 force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc); 285 #ifdef CONFIG_KGDB 286 else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP) 287 == NOTIFY_STOP) 288 return; 289 #endif 290 else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN || 291 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) 292 regs->epc += get_break_insn_length(regs->epc); 293 else 294 die(regs, "Kernel BUG"); 295 } 296 297 asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs) 298 { 299 if (user_mode(regs)) { 300 irqentry_enter_from_user_mode(regs); 301 302 handle_break(regs); 303 304 irqentry_exit_to_user_mode(regs); 305 } else { 306 irqentry_state_t state = irqentry_nmi_enter(regs); 307 308 handle_break(regs); 309 310 irqentry_nmi_exit(regs, state); 311 } 312 } 313 314 asmlinkage __visible __trap_section __no_stack_protector 315 void do_trap_ecall_u(struct pt_regs *regs) 316 { 317 if (user_mode(regs)) { 318 long syscall = regs->a7; 319 320 regs->epc += 4; 321 regs->orig_a0 = regs->a0; 322 regs->a0 = -ENOSYS; 323 324 riscv_v_vstate_discard(regs); 325 326 syscall = syscall_enter_from_user_mode(regs, syscall); 327 328 add_random_kstack_offset(); 329 330 if (syscall >= 0 && syscall < NR_syscalls) 331 syscall_handler(regs, syscall); 332 333 /* 334 * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(), 335 * so the maximum stack offset is 1k bytes (10 bits). 336 * 337 * The actual entropy will be further reduced by the compiler when 338 * applying stack alignment constraints: 16-byte (i.e. 4-bit) aligned 339 * for RV32I or RV64I. 340 * 341 * The resulting 6 bits of entropy is seen in SP[9:4]. 342 */ 343 choose_random_kstack_offset(get_random_u16()); 344 345 syscall_exit_to_user_mode(regs); 346 } else { 347 irqentry_state_t state = irqentry_nmi_enter(regs); 348 349 do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc, 350 "Oops - environment call from U-mode"); 351 352 irqentry_nmi_exit(regs, state); 353 } 354 355 } 356 357 #ifdef CONFIG_MMU 358 asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs) 359 { 360 irqentry_state_t state = irqentry_enter(regs); 361 362 handle_page_fault(regs); 363 364 local_irq_disable(); 365 366 irqentry_exit(regs, state); 367 } 368 #endif 369 370 static void noinstr handle_riscv_irq(struct pt_regs *regs) 371 { 372 struct pt_regs *old_regs; 373 374 irq_enter_rcu(); 375 old_regs = set_irq_regs(regs); 376 handle_arch_irq(regs); 377 set_irq_regs(old_regs); 378 irq_exit_rcu(); 379 } 380 381 asmlinkage void noinstr do_irq(struct pt_regs *regs) 382 { 383 irqentry_state_t state = irqentry_enter(regs); 384 385 if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack()) 386 call_on_irq_stack(regs, handle_riscv_irq); 387 else 388 handle_riscv_irq(regs); 389 390 irqentry_exit(regs, state); 391 } 392 393 #ifdef CONFIG_GENERIC_BUG 394 int is_valid_bugaddr(unsigned long pc) 395 { 396 bug_insn_t insn; 397 398 if (pc < VMALLOC_START) 399 return 0; 400 if (get_kernel_nofault(insn, (bug_insn_t *)pc)) 401 return 0; 402 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) 403 return (insn == __BUG_INSN_32); 404 else 405 return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16); 406 } 407 #endif /* CONFIG_GENERIC_BUG */ 408 409 #ifdef CONFIG_VMAP_STACK 410 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], 411 overflow_stack)__aligned(16); 412 413 asmlinkage void handle_bad_stack(struct pt_regs *regs) 414 { 415 unsigned long tsk_stk = (unsigned long)current->stack; 416 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack); 417 418 console_verbose(); 419 420 pr_emerg("Insufficient stack space to handle exception!\n"); 421 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n", 422 tsk_stk, tsk_stk + THREAD_SIZE); 423 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n", 424 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE); 425 426 __show_regs(regs); 427 panic("Kernel stack overflow"); 428 429 for (;;) 430 wait_for_interrupt(); 431 } 432 #endif 433
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.