1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc. 4 * 5 * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net) 6 * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz) 7 */ 8 9 #include <asm/head.h> 10 11 #include <linux/string.h> 12 #include <linux/types.h> 13 #include <linux/sched.h> 14 #include <linux/sched/debug.h> 15 #include <linux/ptrace.h> 16 #include <linux/mman.h> 17 #include <linux/signal.h> 18 #include <linux/mm.h> 19 #include <linux/extable.h> 20 #include <linux/init.h> 21 #include <linux/perf_event.h> 22 #include <linux/interrupt.h> 23 #include <linux/kprobes.h> 24 #include <linux/kdebug.h> 25 #include <linux/percpu.h> 26 #include <linux/context_tracking.h> 27 #include <linux/uaccess.h> 28 29 #include <asm/page.h> 30 #include <asm/openprom.h> 31 #include <asm/oplib.h> 32 #include <asm/asi.h> 33 #include <asm/lsu.h> 34 #include <asm/sections.h> 35 #include <asm/mmu_context.h> 36 #include <asm/setup.h> 37 38 int show_unhandled_signals = 1; 39 40 static void __kprobes unhandled_fault(unsigned long address, 41 struct task_struct *tsk, 42 struct pt_regs *regs) 43 { 44 if ((unsigned long) address < PAGE_SIZE) { 45 printk(KERN_ALERT "Unable to handle kernel NULL " 46 "pointer dereference\n"); 47 } else { 48 printk(KERN_ALERT "Unable to handle kernel paging request " 49 "at virtual address %016lx\n", (unsigned long)address); 50 } 51 printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n", 52 (tsk->mm ? 53 CTX_HWBITS(tsk->mm->context) : 54 CTX_HWBITS(tsk->active_mm->context))); 55 printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n", 56 (tsk->mm ? (unsigned long) tsk->mm->pgd : 57 (unsigned long) tsk->active_mm->pgd)); 58 die_if_kernel("Oops", regs); 59 } 60 61 static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) 62 { 63 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n", 64 regs->tpc); 65 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]); 66 printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]); 67 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr); 68 dump_stack(); 69 unhandled_fault(regs->tpc, current, regs); 70 } 71 72 /* 73 * We now make sure that mmap_lock is held in all paths that call 74 * this. Additionally, to prevent kswapd from ripping ptes from 75 * under us, raise interrupts around the time that we look at the 76 * pte, kswapd will have to wait to get his smp ipi response from 77 * us. vmtruncate likewise. This saves us having to get pte lock. 78 */ 79 static unsigned int get_user_insn(unsigned long tpc) 80 { 81 pgd_t *pgdp = pgd_offset(current->mm, tpc); 82 p4d_t *p4dp; 83 pud_t *pudp; 84 pmd_t *pmdp; 85 pte_t *ptep, pte; 86 unsigned long pa; 87 u32 insn = 0; 88 89 if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) 90 goto out; 91 p4dp = p4d_offset(pgdp, tpc); 92 if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp))) 93 goto out; 94 pudp = pud_offset(p4dp, tpc); 95 if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) 96 goto out; 97 98 /* This disables preemption for us as well. */ 99 local_irq_disable(); 100 101 pmdp = pmd_offset(pudp, tpc); 102 again: 103 if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) 104 goto out_irq_enable; 105 106 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 107 if (is_hugetlb_pmd(*pmdp)) { 108 pa = pmd_pfn(*pmdp) << PAGE_SHIFT; 109 pa += tpc & ~HPAGE_MASK; 110 111 /* Use phys bypass so we don't pollute dtlb/dcache. */ 112 __asm__ __volatile__("lduwa [%1] %2, %0" 113 : "=r" (insn) 114 : "r" (pa), "i" (ASI_PHYS_USE_EC)); 115 } else 116 #endif 117 { 118 ptep = pte_offset_map(pmdp, tpc); 119 if (!ptep) 120 goto again; 121 pte = *ptep; 122 if (pte_present(pte)) { 123 pa = (pte_pfn(pte) << PAGE_SHIFT); 124 pa += (tpc & ~PAGE_MASK); 125 126 /* Use phys bypass so we don't pollute dtlb/dcache. */ 127 __asm__ __volatile__("lduwa [%1] %2, %0" 128 : "=r" (insn) 129 : "r" (pa), "i" (ASI_PHYS_USE_EC)); 130 } 131 pte_unmap(ptep); 132 } 133 out_irq_enable: 134 local_irq_enable(); 135 out: 136 return insn; 137 } 138 139 static inline void 140 show_signal_msg(struct pt_regs *regs, int sig, int code, 141 unsigned long address, struct task_struct *tsk) 142 { 143 if (!unhandled_signal(tsk, sig)) 144 return; 145 146 if (!printk_ratelimit()) 147 return; 148 149 printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x", 150 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, 151 tsk->comm, task_pid_nr(tsk), address, 152 (void *)regs->tpc, (void *)regs->u_regs[UREG_I7], 153 (void *)regs->u_regs[UREG_FP], code); 154 155 print_vma_addr(KERN_CONT " in ", regs->tpc); 156 157 printk(KERN_CONT "\n"); 158 } 159 160 static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, 161 unsigned long fault_addr, unsigned int insn, 162 int fault_code) 163 { 164 unsigned long addr; 165 166 if (fault_code & FAULT_CODE_ITLB) { 167 addr = regs->tpc; 168 } else { 169 /* If we were able to probe the faulting instruction, use it 170 * to compute a precise fault address. Otherwise use the fault 171 * time provided address which may only have page granularity. 172 */ 173 if (insn) 174 addr = compute_effective_address(regs, insn, 0); 175 else 176 addr = fault_addr; 177 } 178 179 if (unlikely(show_unhandled_signals)) 180 show_signal_msg(regs, sig, code, addr, current); 181 182 force_sig_fault(sig, code, (void __user *) addr); 183 } 184 185 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) 186 { 187 if (!insn) { 188 if (!regs->tpc || (regs->tpc & 0x3)) 189 return 0; 190 if (regs->tstate & TSTATE_PRIV) { 191 insn = *(unsigned int *) regs->tpc; 192 } else { 193 insn = get_user_insn(regs->tpc); 194 } 195 } 196 return insn; 197 } 198 199 static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code, 200 int fault_code, unsigned int insn, 201 unsigned long address) 202 { 203 unsigned char asi = ASI_P; 204 205 if ((!insn) && (regs->tstate & TSTATE_PRIV)) 206 goto cannot_handle; 207 208 /* If user insn could be read (thus insn is zero), that 209 * is fine. We will just gun down the process with a signal 210 * in that case. 211 */ 212 213 if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) && 214 (insn & 0xc0800000) == 0xc0800000) { 215 if (insn & 0x2000) 216 asi = (regs->tstate >> 24); 217 else 218 asi = (insn >> 5); 219 if ((asi & 0xf2) == 0x82) { 220 if (insn & 0x1000000) { 221 handle_ldf_stq(insn, regs); 222 } else { 223 /* This was a non-faulting load. Just clear the 224 * destination register(s) and continue with the next 225 * instruction. -jj 226 */ 227 handle_ld_nf(insn, regs); 228 } 229 return; 230 } 231 } 232 233 /* Is this in ex_table? */ 234 if (regs->tstate & TSTATE_PRIV) { 235 const struct exception_table_entry *entry; 236 237 entry = search_exception_tables(regs->tpc); 238 if (entry) { 239 regs->tpc = entry->fixup; 240 regs->tnpc = regs->tpc + 4; 241 return; 242 } 243 } else { 244 /* The si_code was set to make clear whether 245 * this was a SEGV_MAPERR or SEGV_ACCERR fault. 246 */ 247 do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code); 248 return; 249 } 250 251 cannot_handle: 252 unhandled_fault (address, current, regs); 253 } 254 255 static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs) 256 { 257 static int times; 258 259 if (times++ < 10) 260 printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports " 261 "64-bit TPC [%lx]\n", 262 current->comm, current->pid, 263 regs->tpc); 264 show_regs(regs); 265 } 266 267 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) 268 { 269 enum ctx_state prev_state = exception_enter(); 270 struct mm_struct *mm = current->mm; 271 struct vm_area_struct *vma; 272 unsigned int insn = 0; 273 int si_code, fault_code; 274 vm_fault_t fault; 275 unsigned long address, mm_rss; 276 unsigned int flags = FAULT_FLAG_DEFAULT; 277 278 fault_code = get_thread_fault_code(); 279 280 if (kprobe_page_fault(regs, 0)) 281 goto exit_exception; 282 283 si_code = SEGV_MAPERR; 284 address = current_thread_info()->fault_address; 285 286 if ((fault_code & FAULT_CODE_ITLB) && 287 (fault_code & FAULT_CODE_DTLB)) 288 BUG(); 289 290 if (test_thread_flag(TIF_32BIT)) { 291 if (!(regs->tstate & TSTATE_PRIV)) { 292 if (unlikely((regs->tpc >> 32) != 0)) { 293 bogus_32bit_fault_tpc(regs); 294 goto intr_or_no_mm; 295 } 296 } 297 if (unlikely((address >> 32) != 0)) 298 goto intr_or_no_mm; 299 } 300 301 if (regs->tstate & TSTATE_PRIV) { 302 unsigned long tpc = regs->tpc; 303 304 /* Sanity check the PC. */ 305 if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) || 306 (tpc >= MODULES_VADDR && tpc < MODULES_END)) { 307 /* Valid, no problems... */ 308 } else { 309 bad_kernel_pc(regs, address); 310 goto exit_exception; 311 } 312 } else 313 flags |= FAULT_FLAG_USER; 314 315 /* 316 * If we're in an interrupt or have no user 317 * context, we must not take the fault.. 318 */ 319 if (faulthandler_disabled() || !mm) 320 goto intr_or_no_mm; 321 322 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 323 324 if (!mmap_read_trylock(mm)) { 325 if ((regs->tstate & TSTATE_PRIV) && 326 !search_exception_tables(regs->tpc)) { 327 insn = get_fault_insn(regs, insn); 328 goto handle_kernel_fault; 329 } 330 331 retry: 332 mmap_read_lock(mm); 333 } 334 335 if (fault_code & FAULT_CODE_BAD_RA) 336 goto do_sigbus; 337 338 vma = find_vma(mm, address); 339 if (!vma) 340 goto bad_area; 341 342 /* Pure DTLB misses do not tell us whether the fault causing 343 * load/store/atomic was a write or not, it only says that there 344 * was no match. So in such a case we (carefully) read the 345 * instruction to try and figure this out. It's an optimization 346 * so it's ok if we can't do this. 347 * 348 * Special hack, window spill/fill knows the exact fault type. 349 */ 350 if (((fault_code & 351 (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) && 352 (vma->vm_flags & VM_WRITE) != 0) { 353 insn = get_fault_insn(regs, 0); 354 if (!insn) 355 goto continue_fault; 356 /* All loads, stores and atomics have bits 30 and 31 both set 357 * in the instruction. Bit 21 is set in all stores, but we 358 * have to avoid prefetches which also have bit 21 set. 359 */ 360 if ((insn & 0xc0200000) == 0xc0200000 && 361 (insn & 0x01780000) != 0x01680000) { 362 /* Don't bother updating thread struct value, 363 * because update_mmu_cache only cares which tlb 364 * the access came from. 365 */ 366 fault_code |= FAULT_CODE_WRITE; 367 } 368 } 369 continue_fault: 370 371 if (vma->vm_start <= address) 372 goto good_area; 373 if (!(vma->vm_flags & VM_GROWSDOWN)) 374 goto bad_area; 375 if (!(fault_code & FAULT_CODE_WRITE)) { 376 /* Non-faulting loads shouldn't expand stack. */ 377 insn = get_fault_insn(regs, insn); 378 if ((insn & 0xc0800000) == 0xc0800000) { 379 unsigned char asi; 380 381 if (insn & 0x2000) 382 asi = (regs->tstate >> 24); 383 else 384 asi = (insn >> 5); 385 if ((asi & 0xf2) == 0x82) 386 goto bad_area; 387 } 388 } 389 vma = expand_stack(mm, address); 390 if (!vma) 391 goto bad_area_nosemaphore; 392 /* 393 * Ok, we have a good vm_area for this memory access, so 394 * we can handle it.. 395 */ 396 good_area: 397 si_code = SEGV_ACCERR; 398 399 /* If we took a ITLB miss on a non-executable page, catch 400 * that here. 401 */ 402 if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) { 403 WARN(address != regs->tpc, 404 "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc); 405 WARN_ON(regs->tstate & TSTATE_PRIV); 406 goto bad_area; 407 } 408 409 if (fault_code & FAULT_CODE_WRITE) { 410 if (!(vma->vm_flags & VM_WRITE)) 411 goto bad_area; 412 413 /* Spitfire has an icache which does not snoop 414 * processor stores. Later processors do... 415 */ 416 if (tlb_type == spitfire && 417 (vma->vm_flags & VM_EXEC) != 0 && 418 vma->vm_file != NULL) 419 set_thread_fault_code(fault_code | 420 FAULT_CODE_BLKCOMMIT); 421 422 flags |= FAULT_FLAG_WRITE; 423 } else { 424 /* Allow reads even for write-only mappings */ 425 if (!(vma->vm_flags & (VM_READ | VM_EXEC))) 426 goto bad_area; 427 } 428 429 fault = handle_mm_fault(vma, address, flags, regs); 430 431 if (fault_signal_pending(fault, regs)) { 432 if (regs->tstate & TSTATE_PRIV) { 433 insn = get_fault_insn(regs, insn); 434 goto handle_kernel_fault; 435 } 436 goto exit_exception; 437 } 438 439 /* The fault is fully completed (including releasing mmap lock) */ 440 if (fault & VM_FAULT_COMPLETED) 441 goto lock_released; 442 443 if (unlikely(fault & VM_FAULT_ERROR)) { 444 if (fault & VM_FAULT_OOM) 445 goto out_of_memory; 446 else if (fault & VM_FAULT_SIGSEGV) 447 goto bad_area; 448 else if (fault & VM_FAULT_SIGBUS) 449 goto do_sigbus; 450 BUG(); 451 } 452 453 if (fault & VM_FAULT_RETRY) { 454 flags |= FAULT_FLAG_TRIED; 455 456 /* No need to mmap_read_unlock(mm) as we would 457 * have already released it in __lock_page_or_retry 458 * in mm/filemap.c. 459 */ 460 461 goto retry; 462 } 463 mmap_read_unlock(mm); 464 465 lock_released: 466 mm_rss = get_mm_rss(mm); 467 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) 468 mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE)); 469 #endif 470 if (unlikely(mm_rss > 471 mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) 472 tsb_grow(mm, MM_TSB_BASE, mm_rss); 473 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 474 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; 475 mm_rss *= REAL_HPAGE_PER_HPAGE; 476 if (unlikely(mm_rss > 477 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { 478 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) 479 tsb_grow(mm, MM_TSB_HUGE, mm_rss); 480 else 481 hugetlb_setup(regs); 482 483 } 484 #endif 485 exit_exception: 486 exception_exit(prev_state); 487 return; 488 489 /* 490 * Something tried to access memory that isn't in our memory map.. 491 * Fix it, but check if it's kernel or user first.. 492 */ 493 bad_area: 494 mmap_read_unlock(mm); 495 bad_area_nosemaphore: 496 insn = get_fault_insn(regs, insn); 497 498 handle_kernel_fault: 499 do_kernel_fault(regs, si_code, fault_code, insn, address); 500 goto exit_exception; 501 502 /* 503 * We ran out of memory, or some other thing happened to us that made 504 * us unable to handle the page fault gracefully. 505 */ 506 out_of_memory: 507 insn = get_fault_insn(regs, insn); 508 mmap_read_unlock(mm); 509 if (!(regs->tstate & TSTATE_PRIV)) { 510 pagefault_out_of_memory(); 511 goto exit_exception; 512 } 513 goto handle_kernel_fault; 514 515 intr_or_no_mm: 516 insn = get_fault_insn(regs, 0); 517 goto handle_kernel_fault; 518 519 do_sigbus: 520 insn = get_fault_insn(regs, insn); 521 mmap_read_unlock(mm); 522 523 /* 524 * Send a sigbus, regardless of whether we were in kernel 525 * or user mode. 526 */ 527 do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code); 528 529 /* Kernel mode? Handle exceptions or die */ 530 if (regs->tstate & TSTATE_PRIV) 531 goto handle_kernel_fault; 532 } 533
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.