1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 * 12 * This file contains exception handler for address error exception with the 13 * special capability to execute faulting instructions in software. The 14 * handler does not try to handle the case when the program counter points 15 * to an address not aligned to a word boundary. 16 * 17 * Putting data to unaligned addresses is a bad practice even on Intel where 18 * only the performance is affected. Much worse is that such code is non- 19 * portable. Due to several programs that die on MIPS due to alignment 20 * problems I decided to implement this handler anyway though I originally 21 * didn't intend to do this at all for user code. 22 * 23 * For now I enable fixing of address errors by default to make life easier. 24 * I however intend to disable this somewhen in the future when the alignment 25 * problems with user programs have been fixed. For programmers this is the 26 * right way to go. 27 * 28 * Fixing address errors is a per process option. The option is inherited 29 * across fork(2) and execve(2) calls. If you really want to use the 30 * option in your user programs - I discourage the use of the software 31 * emulation strongly - use the following code in your userland stuff: 32 * 33 * #include <sys/sysmips.h> 34 * 35 * ... 36 * sysmips(MIPS_FIXADE, x); 37 * ... 38 * 39 * The argument x is 0 for disabling software emulation, enabled otherwise. 40 * 41 * Below a little program to play around with this feature. 42 * 43 * #include <stdio.h> 44 * #include <sys/sysmips.h> 45 * 46 * struct foo { 47 * unsigned char bar[8]; 48 * }; 49 * 50 * main(int argc, char *argv[]) 51 * { 52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 53 * unsigned int *p = (unsigned int *) (x.bar + 3); 54 * int i; 55 * 56 * if (argc > 1) 57 * sysmips(MIPS_FIXADE, atoi(argv[1])); 58 * 59 * printf("*p = %08lx\n", *p); 60 * 61 * *p = 0xdeadface; 62 * 63 * for(i = 0; i <= 7; i++) 64 * printf("%02x ", x.bar[i]); 65 * printf("\n"); 66 * } 67 * 68 * Coprocessor loads are not supported; I think this case is unimportant 69 * in the practice. 70 * 71 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 72 * exception for the R6000. 73 * A store crossing a page boundary might be executed only partially. 74 * Undo the partial store in this case. 75 */ 76 #include <linux/context_tracking.h> 77 #include <linux/mm.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/sched.h> 81 #include <linux/debugfs.h> 82 #include <linux/perf_event.h> 83 84 #include <asm/asm.h> 85 #include <asm/branch.h> 86 #include <asm/byteorder.h> 87 #include <asm/cop2.h> 88 #include <asm/debug.h> 89 #include <asm/fpu.h> 90 #include <asm/fpu_emulator.h> 91 #include <asm/inst.h> 92 #include <asm/unaligned-emul.h> 93 #include <asm/mmu_context.h> 94 #include <asm/traps.h> 95 #include <linux/uaccess.h> 96 97 #include "access-helper.h" 98 99 enum { 100 UNALIGNED_ACTION_QUIET, 101 UNALIGNED_ACTION_SIGNAL, 102 UNALIGNED_ACTION_SHOW, 103 }; 104 #ifdef CONFIG_DEBUG_FS 105 static u32 unaligned_instructions; 106 static u32 unaligned_action; 107 #else 108 #define unaligned_action UNALIGNED_ACTION_QUIET 109 #endif 110 extern void show_registers(struct pt_regs *regs); 111 112 static void emulate_load_store_insn(struct pt_regs *regs, 113 void __user *addr, unsigned int *pc) 114 { 115 unsigned long origpc, orig31, value; 116 union mips_instruction insn; 117 unsigned int res; 118 bool user = user_mode(regs); 119 120 origpc = (unsigned long)pc; 121 orig31 = regs->regs[31]; 122 123 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 124 125 /* 126 * This load never faults. 127 */ 128 __get_inst32(&insn.word, pc, user); 129 130 switch (insn.i_format.opcode) { 131 /* 132 * These are instructions that a compiler doesn't generate. We 133 * can assume therefore that the code is MIPS-aware and 134 * really buggy. Emulating these instructions would break the 135 * semantics anyway. 136 */ 137 case ll_op: 138 case lld_op: 139 case sc_op: 140 case scd_op: 141 142 /* 143 * For these instructions the only way to create an address 144 * error is an attempted access to kernel/supervisor address 145 * space. 146 */ 147 case ldl_op: 148 case ldr_op: 149 case lwl_op: 150 case lwr_op: 151 case sdl_op: 152 case sdr_op: 153 case swl_op: 154 case swr_op: 155 case lb_op: 156 case lbu_op: 157 case sb_op: 158 goto sigbus; 159 160 /* 161 * The remaining opcodes are the ones that are really of 162 * interest. 163 */ 164 #ifdef CONFIG_MACH_INGENIC 165 case spec2_op: 166 if (insn.mxu_lx_format.func != mxu_lx_op) 167 goto sigbus; /* other MXU instructions we don't care */ 168 169 switch (insn.mxu_lx_format.op) { 170 case mxu_lxw_op: 171 if (user && !access_ok(addr, 4)) 172 goto sigbus; 173 LoadW(addr, value, res); 174 if (res) 175 goto fault; 176 compute_return_epc(regs); 177 regs->regs[insn.mxu_lx_format.rd] = value; 178 break; 179 case mxu_lxh_op: 180 if (user && !access_ok(addr, 2)) 181 goto sigbus; 182 LoadHW(addr, value, res); 183 if (res) 184 goto fault; 185 compute_return_epc(regs); 186 regs->regs[insn.dsp_format.rd] = value; 187 break; 188 case mxu_lxhu_op: 189 if (user && !access_ok(addr, 2)) 190 goto sigbus; 191 LoadHWU(addr, value, res); 192 if (res) 193 goto fault; 194 compute_return_epc(regs); 195 regs->regs[insn.dsp_format.rd] = value; 196 break; 197 case mxu_lxb_op: 198 case mxu_lxbu_op: 199 goto sigbus; 200 default: 201 goto sigill; 202 } 203 break; 204 #endif 205 case spec3_op: 206 if (insn.dsp_format.func == lx_op) { 207 switch (insn.dsp_format.op) { 208 case lwx_op: 209 if (user && !access_ok(addr, 4)) 210 goto sigbus; 211 LoadW(addr, value, res); 212 if (res) 213 goto fault; 214 compute_return_epc(regs); 215 regs->regs[insn.dsp_format.rd] = value; 216 break; 217 case lhx_op: 218 if (user && !access_ok(addr, 2)) 219 goto sigbus; 220 LoadHW(addr, value, res); 221 if (res) 222 goto fault; 223 compute_return_epc(regs); 224 regs->regs[insn.dsp_format.rd] = value; 225 break; 226 default: 227 goto sigill; 228 } 229 } 230 #ifdef CONFIG_EVA 231 else { 232 /* 233 * we can land here only from kernel accessing user 234 * memory, so we need to "switch" the address limit to 235 * user space, so that address check can work properly. 236 */ 237 switch (insn.spec3_format.func) { 238 case lhe_op: 239 if (!access_ok(addr, 2)) 240 goto sigbus; 241 LoadHWE(addr, value, res); 242 if (res) 243 goto fault; 244 compute_return_epc(regs); 245 regs->regs[insn.spec3_format.rt] = value; 246 break; 247 case lwe_op: 248 if (!access_ok(addr, 4)) 249 goto sigbus; 250 LoadWE(addr, value, res); 251 if (res) 252 goto fault; 253 compute_return_epc(regs); 254 regs->regs[insn.spec3_format.rt] = value; 255 break; 256 case lhue_op: 257 if (!access_ok(addr, 2)) 258 goto sigbus; 259 LoadHWUE(addr, value, res); 260 if (res) 261 goto fault; 262 compute_return_epc(regs); 263 regs->regs[insn.spec3_format.rt] = value; 264 break; 265 case she_op: 266 if (!access_ok(addr, 2)) 267 goto sigbus; 268 compute_return_epc(regs); 269 value = regs->regs[insn.spec3_format.rt]; 270 StoreHWE(addr, value, res); 271 if (res) 272 goto fault; 273 break; 274 case swe_op: 275 if (!access_ok(addr, 4)) 276 goto sigbus; 277 compute_return_epc(regs); 278 value = regs->regs[insn.spec3_format.rt]; 279 StoreWE(addr, value, res); 280 if (res) 281 goto fault; 282 break; 283 default: 284 goto sigill; 285 } 286 } 287 #endif 288 break; 289 case lh_op: 290 if (user && !access_ok(addr, 2)) 291 goto sigbus; 292 293 if (IS_ENABLED(CONFIG_EVA) && user) 294 LoadHWE(addr, value, res); 295 else 296 LoadHW(addr, value, res); 297 298 if (res) 299 goto fault; 300 compute_return_epc(regs); 301 regs->regs[insn.i_format.rt] = value; 302 break; 303 304 case lw_op: 305 if (user && !access_ok(addr, 4)) 306 goto sigbus; 307 308 if (IS_ENABLED(CONFIG_EVA) && user) 309 LoadWE(addr, value, res); 310 else 311 LoadW(addr, value, res); 312 313 if (res) 314 goto fault; 315 compute_return_epc(regs); 316 regs->regs[insn.i_format.rt] = value; 317 break; 318 319 case lhu_op: 320 if (user && !access_ok(addr, 2)) 321 goto sigbus; 322 323 if (IS_ENABLED(CONFIG_EVA) && user) 324 LoadHWUE(addr, value, res); 325 else 326 LoadHWU(addr, value, res); 327 328 if (res) 329 goto fault; 330 compute_return_epc(regs); 331 regs->regs[insn.i_format.rt] = value; 332 break; 333 334 case lwu_op: 335 #ifdef CONFIG_64BIT 336 /* 337 * A 32-bit kernel might be running on a 64-bit processor. But 338 * if we're on a 32-bit processor and an i-cache incoherency 339 * or race makes us see a 64-bit instruction here the sdl/sdr 340 * would blow up, so for now we don't handle unaligned 64-bit 341 * instructions on 32-bit kernels. 342 */ 343 if (user && !access_ok(addr, 4)) 344 goto sigbus; 345 346 LoadWU(addr, value, res); 347 if (res) 348 goto fault; 349 compute_return_epc(regs); 350 regs->regs[insn.i_format.rt] = value; 351 break; 352 #endif /* CONFIG_64BIT */ 353 354 /* Cannot handle 64-bit instructions in 32-bit kernel */ 355 goto sigill; 356 357 case ld_op: 358 #ifdef CONFIG_64BIT 359 /* 360 * A 32-bit kernel might be running on a 64-bit processor. But 361 * if we're on a 32-bit processor and an i-cache incoherency 362 * or race makes us see a 64-bit instruction here the sdl/sdr 363 * would blow up, so for now we don't handle unaligned 64-bit 364 * instructions on 32-bit kernels. 365 */ 366 if (user && !access_ok(addr, 8)) 367 goto sigbus; 368 369 LoadDW(addr, value, res); 370 if (res) 371 goto fault; 372 compute_return_epc(regs); 373 regs->regs[insn.i_format.rt] = value; 374 break; 375 #endif /* CONFIG_64BIT */ 376 377 /* Cannot handle 64-bit instructions in 32-bit kernel */ 378 goto sigill; 379 380 case sh_op: 381 if (user && !access_ok(addr, 2)) 382 goto sigbus; 383 384 compute_return_epc(regs); 385 value = regs->regs[insn.i_format.rt]; 386 387 if (IS_ENABLED(CONFIG_EVA) && user) 388 StoreHWE(addr, value, res); 389 else 390 StoreHW(addr, value, res); 391 392 if (res) 393 goto fault; 394 break; 395 396 case sw_op: 397 if (user && !access_ok(addr, 4)) 398 goto sigbus; 399 400 compute_return_epc(regs); 401 value = regs->regs[insn.i_format.rt]; 402 403 if (IS_ENABLED(CONFIG_EVA) && user) 404 StoreWE(addr, value, res); 405 else 406 StoreW(addr, value, res); 407 408 if (res) 409 goto fault; 410 break; 411 412 case sd_op: 413 #ifdef CONFIG_64BIT 414 /* 415 * A 32-bit kernel might be running on a 64-bit processor. But 416 * if we're on a 32-bit processor and an i-cache incoherency 417 * or race makes us see a 64-bit instruction here the sdl/sdr 418 * would blow up, so for now we don't handle unaligned 64-bit 419 * instructions on 32-bit kernels. 420 */ 421 if (user && !access_ok(addr, 8)) 422 goto sigbus; 423 424 compute_return_epc(regs); 425 value = regs->regs[insn.i_format.rt]; 426 StoreDW(addr, value, res); 427 if (res) 428 goto fault; 429 break; 430 #endif /* CONFIG_64BIT */ 431 432 /* Cannot handle 64-bit instructions in 32-bit kernel */ 433 goto sigill; 434 435 #ifdef CONFIG_MIPS_FP_SUPPORT 436 437 case lwc1_op: 438 case ldc1_op: 439 case swc1_op: 440 case sdc1_op: 441 case cop1x_op: { 442 void __user *fault_addr = NULL; 443 444 die_if_kernel("Unaligned FP access in kernel code", regs); 445 BUG_ON(!used_math()); 446 447 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 448 &fault_addr); 449 own_fpu(1); /* Restore FPU state. */ 450 451 /* Signal if something went wrong. */ 452 process_fpemu_return(res, fault_addr, 0); 453 454 if (res == 0) 455 break; 456 return; 457 } 458 #endif /* CONFIG_MIPS_FP_SUPPORT */ 459 460 #ifdef CONFIG_CPU_HAS_MSA 461 462 case msa_op: { 463 unsigned int wd, preempted; 464 enum msa_2b_fmt df; 465 union fpureg *fpr; 466 467 if (!cpu_has_msa) 468 goto sigill; 469 470 /* 471 * If we've reached this point then userland should have taken 472 * the MSA disabled exception & initialised vector context at 473 * some point in the past. 474 */ 475 BUG_ON(!thread_msa_context_live()); 476 477 df = insn.msa_mi10_format.df; 478 wd = insn.msa_mi10_format.wd; 479 fpr = ¤t->thread.fpu.fpr[wd]; 480 481 switch (insn.msa_mi10_format.func) { 482 case msa_ld_op: 483 if (!access_ok(addr, sizeof(*fpr))) 484 goto sigbus; 485 486 do { 487 /* 488 * If we have live MSA context keep track of 489 * whether we get preempted in order to avoid 490 * the register context we load being clobbered 491 * by the live context as it's saved during 492 * preemption. If we don't have live context 493 * then it can't be saved to clobber the value 494 * we load. 495 */ 496 preempted = test_thread_flag(TIF_USEDMSA); 497 498 res = __copy_from_user_inatomic(fpr, addr, 499 sizeof(*fpr)); 500 if (res) 501 goto fault; 502 503 /* 504 * Update the hardware register if it is in use 505 * by the task in this quantum, in order to 506 * avoid having to save & restore the whole 507 * vector context. 508 */ 509 preempt_disable(); 510 if (test_thread_flag(TIF_USEDMSA)) { 511 write_msa_wr(wd, fpr, df); 512 preempted = 0; 513 } 514 preempt_enable(); 515 } while (preempted); 516 break; 517 518 case msa_st_op: 519 if (!access_ok(addr, sizeof(*fpr))) 520 goto sigbus; 521 522 /* 523 * Update from the hardware register if it is in use by 524 * the task in this quantum, in order to avoid having to 525 * save & restore the whole vector context. 526 */ 527 preempt_disable(); 528 if (test_thread_flag(TIF_USEDMSA)) 529 read_msa_wr(wd, fpr, df); 530 preempt_enable(); 531 532 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr)); 533 if (res) 534 goto fault; 535 break; 536 537 default: 538 goto sigbus; 539 } 540 541 compute_return_epc(regs); 542 break; 543 } 544 #endif /* CONFIG_CPU_HAS_MSA */ 545 546 #ifndef CONFIG_CPU_MIPSR6 547 /* 548 * COP2 is available to implementor for application specific use. 549 * It's up to applications to register a notifier chain and do 550 * whatever they have to do, including possible sending of signals. 551 * 552 * This instruction has been reallocated in Release 6 553 */ 554 case lwc2_op: 555 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 556 break; 557 558 case ldc2_op: 559 cu2_notifier_call_chain(CU2_LDC2_OP, regs); 560 break; 561 562 case swc2_op: 563 cu2_notifier_call_chain(CU2_SWC2_OP, regs); 564 break; 565 566 case sdc2_op: 567 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 568 break; 569 #endif 570 default: 571 /* 572 * Pheeee... We encountered an yet unknown instruction or 573 * cache coherence problem. Die sucker, die ... 574 */ 575 goto sigill; 576 } 577 578 #ifdef CONFIG_DEBUG_FS 579 unaligned_instructions++; 580 #endif 581 582 return; 583 584 fault: 585 /* roll back jump/branch */ 586 regs->cp0_epc = origpc; 587 regs->regs[31] = orig31; 588 /* Did we have an exception handler installed? */ 589 if (fixup_exception(regs)) 590 return; 591 592 die_if_kernel("Unhandled kernel unaligned access", regs); 593 force_sig(SIGSEGV); 594 595 return; 596 597 sigbus: 598 die_if_kernel("Unhandled kernel unaligned access", regs); 599 force_sig(SIGBUS); 600 601 return; 602 603 sigill: 604 die_if_kernel 605 ("Unhandled kernel unaligned access or invalid instruction", regs); 606 force_sig(SIGILL); 607 } 608 609 /* Recode table from 16-bit register notation to 32-bit GPR. */ 610 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 611 612 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 613 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 614 615 static void emulate_load_store_microMIPS(struct pt_regs *regs, 616 void __user *addr) 617 { 618 unsigned long value; 619 unsigned int res; 620 int i; 621 unsigned int reg = 0, rvar; 622 unsigned long orig31; 623 u16 __user *pc16; 624 u16 halfword; 625 unsigned int word; 626 unsigned long origpc, contpc; 627 union mips_instruction insn; 628 struct mm_decoded_insn mminsn; 629 bool user = user_mode(regs); 630 631 origpc = regs->cp0_epc; 632 orig31 = regs->regs[31]; 633 634 mminsn.micro_mips_mode = 1; 635 636 /* 637 * This load never faults. 638 */ 639 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); 640 __get_user(halfword, pc16); 641 pc16++; 642 contpc = regs->cp0_epc + 2; 643 word = ((unsigned int)halfword << 16); 644 mminsn.pc_inc = 2; 645 646 if (!mm_insn_16bit(halfword)) { 647 __get_user(halfword, pc16); 648 pc16++; 649 contpc = regs->cp0_epc + 4; 650 mminsn.pc_inc = 4; 651 word |= halfword; 652 } 653 mminsn.insn = word; 654 655 if (get_user(halfword, pc16)) 656 goto fault; 657 mminsn.next_pc_inc = 2; 658 word = ((unsigned int)halfword << 16); 659 660 if (!mm_insn_16bit(halfword)) { 661 pc16++; 662 if (get_user(halfword, pc16)) 663 goto fault; 664 mminsn.next_pc_inc = 4; 665 word |= halfword; 666 } 667 mminsn.next_insn = word; 668 669 insn = (union mips_instruction)(mminsn.insn); 670 if (mm_isBranchInstr(regs, mminsn, &contpc)) 671 insn = (union mips_instruction)(mminsn.next_insn); 672 673 /* Parse instruction to find what to do */ 674 675 switch (insn.mm_i_format.opcode) { 676 677 case mm_pool32a_op: 678 switch (insn.mm_x_format.func) { 679 case mm_lwxs_op: 680 reg = insn.mm_x_format.rd; 681 goto loadW; 682 } 683 684 goto sigbus; 685 686 case mm_pool32b_op: 687 switch (insn.mm_m_format.func) { 688 case mm_lwp_func: 689 reg = insn.mm_m_format.rd; 690 if (reg == 31) 691 goto sigbus; 692 693 if (user && !access_ok(addr, 8)) 694 goto sigbus; 695 696 LoadW(addr, value, res); 697 if (res) 698 goto fault; 699 regs->regs[reg] = value; 700 addr += 4; 701 LoadW(addr, value, res); 702 if (res) 703 goto fault; 704 regs->regs[reg + 1] = value; 705 goto success; 706 707 case mm_swp_func: 708 reg = insn.mm_m_format.rd; 709 if (reg == 31) 710 goto sigbus; 711 712 if (user && !access_ok(addr, 8)) 713 goto sigbus; 714 715 value = regs->regs[reg]; 716 StoreW(addr, value, res); 717 if (res) 718 goto fault; 719 addr += 4; 720 value = regs->regs[reg + 1]; 721 StoreW(addr, value, res); 722 if (res) 723 goto fault; 724 goto success; 725 726 case mm_ldp_func: 727 #ifdef CONFIG_64BIT 728 reg = insn.mm_m_format.rd; 729 if (reg == 31) 730 goto sigbus; 731 732 if (user && !access_ok(addr, 16)) 733 goto sigbus; 734 735 LoadDW(addr, value, res); 736 if (res) 737 goto fault; 738 regs->regs[reg] = value; 739 addr += 8; 740 LoadDW(addr, value, res); 741 if (res) 742 goto fault; 743 regs->regs[reg + 1] = value; 744 goto success; 745 #endif /* CONFIG_64BIT */ 746 747 goto sigill; 748 749 case mm_sdp_func: 750 #ifdef CONFIG_64BIT 751 reg = insn.mm_m_format.rd; 752 if (reg == 31) 753 goto sigbus; 754 755 if (user && !access_ok(addr, 16)) 756 goto sigbus; 757 758 value = regs->regs[reg]; 759 StoreDW(addr, value, res); 760 if (res) 761 goto fault; 762 addr += 8; 763 value = regs->regs[reg + 1]; 764 StoreDW(addr, value, res); 765 if (res) 766 goto fault; 767 goto success; 768 #endif /* CONFIG_64BIT */ 769 770 goto sigill; 771 772 case mm_lwm32_func: 773 reg = insn.mm_m_format.rd; 774 rvar = reg & 0xf; 775 if ((rvar > 9) || !reg) 776 goto sigill; 777 if (reg & 0x10) { 778 if (user && !access_ok(addr, 4 * (rvar + 1))) 779 goto sigbus; 780 } else { 781 if (user && !access_ok(addr, 4 * rvar)) 782 goto sigbus; 783 } 784 if (rvar == 9) 785 rvar = 8; 786 for (i = 16; rvar; rvar--, i++) { 787 LoadW(addr, value, res); 788 if (res) 789 goto fault; 790 addr += 4; 791 regs->regs[i] = value; 792 } 793 if ((reg & 0xf) == 9) { 794 LoadW(addr, value, res); 795 if (res) 796 goto fault; 797 addr += 4; 798 regs->regs[30] = value; 799 } 800 if (reg & 0x10) { 801 LoadW(addr, value, res); 802 if (res) 803 goto fault; 804 regs->regs[31] = value; 805 } 806 goto success; 807 808 case mm_swm32_func: 809 reg = insn.mm_m_format.rd; 810 rvar = reg & 0xf; 811 if ((rvar > 9) || !reg) 812 goto sigill; 813 if (reg & 0x10) { 814 if (user && !access_ok(addr, 4 * (rvar + 1))) 815 goto sigbus; 816 } else { 817 if (user && !access_ok(addr, 4 * rvar)) 818 goto sigbus; 819 } 820 if (rvar == 9) 821 rvar = 8; 822 for (i = 16; rvar; rvar--, i++) { 823 value = regs->regs[i]; 824 StoreW(addr, value, res); 825 if (res) 826 goto fault; 827 addr += 4; 828 } 829 if ((reg & 0xf) == 9) { 830 value = regs->regs[30]; 831 StoreW(addr, value, res); 832 if (res) 833 goto fault; 834 addr += 4; 835 } 836 if (reg & 0x10) { 837 value = regs->regs[31]; 838 StoreW(addr, value, res); 839 if (res) 840 goto fault; 841 } 842 goto success; 843 844 case mm_ldm_func: 845 #ifdef CONFIG_64BIT 846 reg = insn.mm_m_format.rd; 847 rvar = reg & 0xf; 848 if ((rvar > 9) || !reg) 849 goto sigill; 850 if (reg & 0x10) { 851 if (user && !access_ok(addr, 8 * (rvar + 1))) 852 goto sigbus; 853 } else { 854 if (user && !access_ok(addr, 8 * rvar)) 855 goto sigbus; 856 } 857 if (rvar == 9) 858 rvar = 8; 859 860 for (i = 16; rvar; rvar--, i++) { 861 LoadDW(addr, value, res); 862 if (res) 863 goto fault; 864 addr += 4; 865 regs->regs[i] = value; 866 } 867 if ((reg & 0xf) == 9) { 868 LoadDW(addr, value, res); 869 if (res) 870 goto fault; 871 addr += 8; 872 regs->regs[30] = value; 873 } 874 if (reg & 0x10) { 875 LoadDW(addr, value, res); 876 if (res) 877 goto fault; 878 regs->regs[31] = value; 879 } 880 goto success; 881 #endif /* CONFIG_64BIT */ 882 883 goto sigill; 884 885 case mm_sdm_func: 886 #ifdef CONFIG_64BIT 887 reg = insn.mm_m_format.rd; 888 rvar = reg & 0xf; 889 if ((rvar > 9) || !reg) 890 goto sigill; 891 if (reg & 0x10) { 892 if (user && !access_ok(addr, 8 * (rvar + 1))) 893 goto sigbus; 894 } else { 895 if (user && !access_ok(addr, 8 * rvar)) 896 goto sigbus; 897 } 898 if (rvar == 9) 899 rvar = 8; 900 901 for (i = 16; rvar; rvar--, i++) { 902 value = regs->regs[i]; 903 StoreDW(addr, value, res); 904 if (res) 905 goto fault; 906 addr += 8; 907 } 908 if ((reg & 0xf) == 9) { 909 value = regs->regs[30]; 910 StoreDW(addr, value, res); 911 if (res) 912 goto fault; 913 addr += 8; 914 } 915 if (reg & 0x10) { 916 value = regs->regs[31]; 917 StoreDW(addr, value, res); 918 if (res) 919 goto fault; 920 } 921 goto success; 922 #endif /* CONFIG_64BIT */ 923 924 goto sigill; 925 926 /* LWC2, SWC2, LDC2, SDC2 are not serviced */ 927 } 928 929 goto sigbus; 930 931 case mm_pool32c_op: 932 switch (insn.mm_m_format.func) { 933 case mm_lwu_func: 934 reg = insn.mm_m_format.rd; 935 goto loadWU; 936 } 937 938 /* LL,SC,LLD,SCD are not serviced */ 939 goto sigbus; 940 941 #ifdef CONFIG_MIPS_FP_SUPPORT 942 case mm_pool32f_op: 943 switch (insn.mm_x_format.func) { 944 case mm_lwxc1_func: 945 case mm_swxc1_func: 946 case mm_ldxc1_func: 947 case mm_sdxc1_func: 948 goto fpu_emul; 949 } 950 951 goto sigbus; 952 953 case mm_ldc132_op: 954 case mm_sdc132_op: 955 case mm_lwc132_op: 956 case mm_swc132_op: { 957 void __user *fault_addr = NULL; 958 959 fpu_emul: 960 /* roll back jump/branch */ 961 regs->cp0_epc = origpc; 962 regs->regs[31] = orig31; 963 964 die_if_kernel("Unaligned FP access in kernel code", regs); 965 BUG_ON(!used_math()); 966 BUG_ON(!is_fpu_owner()); 967 968 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 969 &fault_addr); 970 own_fpu(1); /* restore FPU state */ 971 972 /* If something went wrong, signal */ 973 process_fpemu_return(res, fault_addr, 0); 974 975 if (res == 0) 976 goto success; 977 return; 978 } 979 #endif /* CONFIG_MIPS_FP_SUPPORT */ 980 981 case mm_lh32_op: 982 reg = insn.mm_i_format.rt; 983 goto loadHW; 984 985 case mm_lhu32_op: 986 reg = insn.mm_i_format.rt; 987 goto loadHWU; 988 989 case mm_lw32_op: 990 reg = insn.mm_i_format.rt; 991 goto loadW; 992 993 case mm_sh32_op: 994 reg = insn.mm_i_format.rt; 995 goto storeHW; 996 997 case mm_sw32_op: 998 reg = insn.mm_i_format.rt; 999 goto storeW; 1000 1001 case mm_ld32_op: 1002 reg = insn.mm_i_format.rt; 1003 goto loadDW; 1004 1005 case mm_sd32_op: 1006 reg = insn.mm_i_format.rt; 1007 goto storeDW; 1008 1009 case mm_pool16c_op: 1010 switch (insn.mm16_m_format.func) { 1011 case mm_lwm16_op: 1012 reg = insn.mm16_m_format.rlist; 1013 rvar = reg + 1; 1014 if (user && !access_ok(addr, 4 * rvar)) 1015 goto sigbus; 1016 1017 for (i = 16; rvar; rvar--, i++) { 1018 LoadW(addr, value, res); 1019 if (res) 1020 goto fault; 1021 addr += 4; 1022 regs->regs[i] = value; 1023 } 1024 LoadW(addr, value, res); 1025 if (res) 1026 goto fault; 1027 regs->regs[31] = value; 1028 1029 goto success; 1030 1031 case mm_swm16_op: 1032 reg = insn.mm16_m_format.rlist; 1033 rvar = reg + 1; 1034 if (user && !access_ok(addr, 4 * rvar)) 1035 goto sigbus; 1036 1037 for (i = 16; rvar; rvar--, i++) { 1038 value = regs->regs[i]; 1039 StoreW(addr, value, res); 1040 if (res) 1041 goto fault; 1042 addr += 4; 1043 } 1044 value = regs->regs[31]; 1045 StoreW(addr, value, res); 1046 if (res) 1047 goto fault; 1048 1049 goto success; 1050 1051 } 1052 1053 goto sigbus; 1054 1055 case mm_lhu16_op: 1056 reg = reg16to32[insn.mm16_rb_format.rt]; 1057 goto loadHWU; 1058 1059 case mm_lw16_op: 1060 reg = reg16to32[insn.mm16_rb_format.rt]; 1061 goto loadW; 1062 1063 case mm_sh16_op: 1064 reg = reg16to32st[insn.mm16_rb_format.rt]; 1065 goto storeHW; 1066 1067 case mm_sw16_op: 1068 reg = reg16to32st[insn.mm16_rb_format.rt]; 1069 goto storeW; 1070 1071 case mm_lwsp16_op: 1072 reg = insn.mm16_r5_format.rt; 1073 goto loadW; 1074 1075 case mm_swsp16_op: 1076 reg = insn.mm16_r5_format.rt; 1077 goto storeW; 1078 1079 case mm_lwgp16_op: 1080 reg = reg16to32[insn.mm16_r3_format.rt]; 1081 goto loadW; 1082 1083 default: 1084 goto sigill; 1085 } 1086 1087 loadHW: 1088 if (user && !access_ok(addr, 2)) 1089 goto sigbus; 1090 1091 LoadHW(addr, value, res); 1092 if (res) 1093 goto fault; 1094 regs->regs[reg] = value; 1095 goto success; 1096 1097 loadHWU: 1098 if (user && !access_ok(addr, 2)) 1099 goto sigbus; 1100 1101 LoadHWU(addr, value, res); 1102 if (res) 1103 goto fault; 1104 regs->regs[reg] = value; 1105 goto success; 1106 1107 loadW: 1108 if (user && !access_ok(addr, 4)) 1109 goto sigbus; 1110 1111 LoadW(addr, value, res); 1112 if (res) 1113 goto fault; 1114 regs->regs[reg] = value; 1115 goto success; 1116 1117 loadWU: 1118 #ifdef CONFIG_64BIT 1119 /* 1120 * A 32-bit kernel might be running on a 64-bit processor. But 1121 * if we're on a 32-bit processor and an i-cache incoherency 1122 * or race makes us see a 64-bit instruction here the sdl/sdr 1123 * would blow up, so for now we don't handle unaligned 64-bit 1124 * instructions on 32-bit kernels. 1125 */ 1126 if (user && !access_ok(addr, 4)) 1127 goto sigbus; 1128 1129 LoadWU(addr, value, res); 1130 if (res) 1131 goto fault; 1132 regs->regs[reg] = value; 1133 goto success; 1134 #endif /* CONFIG_64BIT */ 1135 1136 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1137 goto sigill; 1138 1139 loadDW: 1140 #ifdef CONFIG_64BIT 1141 /* 1142 * A 32-bit kernel might be running on a 64-bit processor. But 1143 * if we're on a 32-bit processor and an i-cache incoherency 1144 * or race makes us see a 64-bit instruction here the sdl/sdr 1145 * would blow up, so for now we don't handle unaligned 64-bit 1146 * instructions on 32-bit kernels. 1147 */ 1148 if (user && !access_ok(addr, 8)) 1149 goto sigbus; 1150 1151 LoadDW(addr, value, res); 1152 if (res) 1153 goto fault; 1154 regs->regs[reg] = value; 1155 goto success; 1156 #endif /* CONFIG_64BIT */ 1157 1158 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1159 goto sigill; 1160 1161 storeHW: 1162 if (user && !access_ok(addr, 2)) 1163 goto sigbus; 1164 1165 value = regs->regs[reg]; 1166 StoreHW(addr, value, res); 1167 if (res) 1168 goto fault; 1169 goto success; 1170 1171 storeW: 1172 if (user && !access_ok(addr, 4)) 1173 goto sigbus; 1174 1175 value = regs->regs[reg]; 1176 StoreW(addr, value, res); 1177 if (res) 1178 goto fault; 1179 goto success; 1180 1181 storeDW: 1182 #ifdef CONFIG_64BIT 1183 /* 1184 * A 32-bit kernel might be running on a 64-bit processor. But 1185 * if we're on a 32-bit processor and an i-cache incoherency 1186 * or race makes us see a 64-bit instruction here the sdl/sdr 1187 * would blow up, so for now we don't handle unaligned 64-bit 1188 * instructions on 32-bit kernels. 1189 */ 1190 if (user && !access_ok(addr, 8)) 1191 goto sigbus; 1192 1193 value = regs->regs[reg]; 1194 StoreDW(addr, value, res); 1195 if (res) 1196 goto fault; 1197 goto success; 1198 #endif /* CONFIG_64BIT */ 1199 1200 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1201 goto sigill; 1202 1203 success: 1204 regs->cp0_epc = contpc; /* advance or branch */ 1205 1206 #ifdef CONFIG_DEBUG_FS 1207 unaligned_instructions++; 1208 #endif 1209 return; 1210 1211 fault: 1212 /* roll back jump/branch */ 1213 regs->cp0_epc = origpc; 1214 regs->regs[31] = orig31; 1215 /* Did we have an exception handler installed? */ 1216 if (fixup_exception(regs)) 1217 return; 1218 1219 die_if_kernel("Unhandled kernel unaligned access", regs); 1220 force_sig(SIGSEGV); 1221 1222 return; 1223 1224 sigbus: 1225 die_if_kernel("Unhandled kernel unaligned access", regs); 1226 force_sig(SIGBUS); 1227 1228 return; 1229 1230 sigill: 1231 die_if_kernel 1232 ("Unhandled kernel unaligned access or invalid instruction", regs); 1233 force_sig(SIGILL); 1234 } 1235 1236 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) 1237 { 1238 unsigned long value; 1239 unsigned int res; 1240 int reg; 1241 unsigned long orig31; 1242 u16 __user *pc16; 1243 unsigned long origpc; 1244 union mips16e_instruction mips16inst, oldinst; 1245 unsigned int opcode; 1246 int extended = 0; 1247 bool user = user_mode(regs); 1248 1249 origpc = regs->cp0_epc; 1250 orig31 = regs->regs[31]; 1251 pc16 = (unsigned short __user *)msk_isa16_mode(origpc); 1252 /* 1253 * This load never faults. 1254 */ 1255 __get_user(mips16inst.full, pc16); 1256 oldinst = mips16inst; 1257 1258 /* skip EXTEND instruction */ 1259 if (mips16inst.ri.opcode == MIPS16e_extend_op) { 1260 extended = 1; 1261 pc16++; 1262 __get_user(mips16inst.full, pc16); 1263 } else if (delay_slot(regs)) { 1264 /* skip jump instructions */ 1265 /* JAL/JALX are 32 bits but have OPCODE in first short int */ 1266 if (mips16inst.ri.opcode == MIPS16e_jal_op) 1267 pc16++; 1268 pc16++; 1269 if (get_user(mips16inst.full, pc16)) 1270 goto sigbus; 1271 } 1272 1273 opcode = mips16inst.ri.opcode; 1274 switch (opcode) { 1275 case MIPS16e_i64_op: /* I64 or RI64 instruction */ 1276 switch (mips16inst.i64.func) { /* I64/RI64 func field check */ 1277 case MIPS16e_ldpc_func: 1278 case MIPS16e_ldsp_func: 1279 reg = reg16to32[mips16inst.ri64.ry]; 1280 goto loadDW; 1281 1282 case MIPS16e_sdsp_func: 1283 reg = reg16to32[mips16inst.ri64.ry]; 1284 goto writeDW; 1285 1286 case MIPS16e_sdrasp_func: 1287 reg = 29; /* GPRSP */ 1288 goto writeDW; 1289 } 1290 1291 goto sigbus; 1292 1293 case MIPS16e_swsp_op: 1294 reg = reg16to32[mips16inst.ri.rx]; 1295 if (extended && cpu_has_mips16e2) 1296 switch (mips16inst.ri.imm >> 5) { 1297 case 0: /* SWSP */ 1298 case 1: /* SWGP */ 1299 break; 1300 case 2: /* SHGP */ 1301 opcode = MIPS16e_sh_op; 1302 break; 1303 default: 1304 goto sigbus; 1305 } 1306 break; 1307 1308 case MIPS16e_lwpc_op: 1309 reg = reg16to32[mips16inst.ri.rx]; 1310 break; 1311 1312 case MIPS16e_lwsp_op: 1313 reg = reg16to32[mips16inst.ri.rx]; 1314 if (extended && cpu_has_mips16e2) 1315 switch (mips16inst.ri.imm >> 5) { 1316 case 0: /* LWSP */ 1317 case 1: /* LWGP */ 1318 break; 1319 case 2: /* LHGP */ 1320 opcode = MIPS16e_lh_op; 1321 break; 1322 case 4: /* LHUGP */ 1323 opcode = MIPS16e_lhu_op; 1324 break; 1325 default: 1326 goto sigbus; 1327 } 1328 break; 1329 1330 case MIPS16e_i8_op: 1331 if (mips16inst.i8.func != MIPS16e_swrasp_func) 1332 goto sigbus; 1333 reg = 29; /* GPRSP */ 1334 break; 1335 1336 default: 1337 reg = reg16to32[mips16inst.rri.ry]; 1338 break; 1339 } 1340 1341 switch (opcode) { 1342 1343 case MIPS16e_lb_op: 1344 case MIPS16e_lbu_op: 1345 case MIPS16e_sb_op: 1346 goto sigbus; 1347 1348 case MIPS16e_lh_op: 1349 if (user && !access_ok(addr, 2)) 1350 goto sigbus; 1351 1352 LoadHW(addr, value, res); 1353 if (res) 1354 goto fault; 1355 MIPS16e_compute_return_epc(regs, &oldinst); 1356 regs->regs[reg] = value; 1357 break; 1358 1359 case MIPS16e_lhu_op: 1360 if (user && !access_ok(addr, 2)) 1361 goto sigbus; 1362 1363 LoadHWU(addr, value, res); 1364 if (res) 1365 goto fault; 1366 MIPS16e_compute_return_epc(regs, &oldinst); 1367 regs->regs[reg] = value; 1368 break; 1369 1370 case MIPS16e_lw_op: 1371 case MIPS16e_lwpc_op: 1372 case MIPS16e_lwsp_op: 1373 if (user && !access_ok(addr, 4)) 1374 goto sigbus; 1375 1376 LoadW(addr, value, res); 1377 if (res) 1378 goto fault; 1379 MIPS16e_compute_return_epc(regs, &oldinst); 1380 regs->regs[reg] = value; 1381 break; 1382 1383 case MIPS16e_lwu_op: 1384 #ifdef CONFIG_64BIT 1385 /* 1386 * A 32-bit kernel might be running on a 64-bit processor. But 1387 * if we're on a 32-bit processor and an i-cache incoherency 1388 * or race makes us see a 64-bit instruction here the sdl/sdr 1389 * would blow up, so for now we don't handle unaligned 64-bit 1390 * instructions on 32-bit kernels. 1391 */ 1392 if (user && !access_ok(addr, 4)) 1393 goto sigbus; 1394 1395 LoadWU(addr, value, res); 1396 if (res) 1397 goto fault; 1398 MIPS16e_compute_return_epc(regs, &oldinst); 1399 regs->regs[reg] = value; 1400 break; 1401 #endif /* CONFIG_64BIT */ 1402 1403 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1404 goto sigill; 1405 1406 case MIPS16e_ld_op: 1407 loadDW: 1408 #ifdef CONFIG_64BIT 1409 /* 1410 * A 32-bit kernel might be running on a 64-bit processor. But 1411 * if we're on a 32-bit processor and an i-cache incoherency 1412 * or race makes us see a 64-bit instruction here the sdl/sdr 1413 * would blow up, so for now we don't handle unaligned 64-bit 1414 * instructions on 32-bit kernels. 1415 */ 1416 if (user && !access_ok(addr, 8)) 1417 goto sigbus; 1418 1419 LoadDW(addr, value, res); 1420 if (res) 1421 goto fault; 1422 MIPS16e_compute_return_epc(regs, &oldinst); 1423 regs->regs[reg] = value; 1424 break; 1425 #endif /* CONFIG_64BIT */ 1426 1427 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1428 goto sigill; 1429 1430 case MIPS16e_sh_op: 1431 if (user && !access_ok(addr, 2)) 1432 goto sigbus; 1433 1434 MIPS16e_compute_return_epc(regs, &oldinst); 1435 value = regs->regs[reg]; 1436 StoreHW(addr, value, res); 1437 if (res) 1438 goto fault; 1439 break; 1440 1441 case MIPS16e_sw_op: 1442 case MIPS16e_swsp_op: 1443 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 1444 if (user && !access_ok(addr, 4)) 1445 goto sigbus; 1446 1447 MIPS16e_compute_return_epc(regs, &oldinst); 1448 value = regs->regs[reg]; 1449 StoreW(addr, value, res); 1450 if (res) 1451 goto fault; 1452 break; 1453 1454 case MIPS16e_sd_op: 1455 writeDW: 1456 #ifdef CONFIG_64BIT 1457 /* 1458 * A 32-bit kernel might be running on a 64-bit processor. But 1459 * if we're on a 32-bit processor and an i-cache incoherency 1460 * or race makes us see a 64-bit instruction here the sdl/sdr 1461 * would blow up, so for now we don't handle unaligned 64-bit 1462 * instructions on 32-bit kernels. 1463 */ 1464 if (user && !access_ok(addr, 8)) 1465 goto sigbus; 1466 1467 MIPS16e_compute_return_epc(regs, &oldinst); 1468 value = regs->regs[reg]; 1469 StoreDW(addr, value, res); 1470 if (res) 1471 goto fault; 1472 break; 1473 #endif /* CONFIG_64BIT */ 1474 1475 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1476 goto sigill; 1477 1478 default: 1479 /* 1480 * Pheeee... We encountered an yet unknown instruction or 1481 * cache coherence problem. Die sucker, die ... 1482 */ 1483 goto sigill; 1484 } 1485 1486 #ifdef CONFIG_DEBUG_FS 1487 unaligned_instructions++; 1488 #endif 1489 1490 return; 1491 1492 fault: 1493 /* roll back jump/branch */ 1494 regs->cp0_epc = origpc; 1495 regs->regs[31] = orig31; 1496 /* Did we have an exception handler installed? */ 1497 if (fixup_exception(regs)) 1498 return; 1499 1500 die_if_kernel("Unhandled kernel unaligned access", regs); 1501 force_sig(SIGSEGV); 1502 1503 return; 1504 1505 sigbus: 1506 die_if_kernel("Unhandled kernel unaligned access", regs); 1507 force_sig(SIGBUS); 1508 1509 return; 1510 1511 sigill: 1512 die_if_kernel 1513 ("Unhandled kernel unaligned access or invalid instruction", regs); 1514 force_sig(SIGILL); 1515 } 1516 1517 asmlinkage void do_ade(struct pt_regs *regs) 1518 { 1519 enum ctx_state prev_state; 1520 unsigned int *pc; 1521 1522 prev_state = exception_enter(); 1523 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1524 1, regs, regs->cp0_badvaddr); 1525 1526 #ifdef CONFIG_64BIT 1527 /* 1528 * check, if we are hitting space between CPU implemented maximum 1529 * virtual user address and 64bit maximum virtual user address 1530 * and do exception handling to get EFAULTs for get_user/put_user 1531 */ 1532 if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) && 1533 (regs->cp0_badvaddr < XKSSEG)) { 1534 if (fixup_exception(regs)) { 1535 current->thread.cp0_baduaddr = regs->cp0_badvaddr; 1536 return; 1537 } 1538 goto sigbus; 1539 } 1540 #endif 1541 1542 /* 1543 * Did we catch a fault trying to load an instruction? 1544 */ 1545 if (regs->cp0_badvaddr == regs->cp0_epc) 1546 goto sigbus; 1547 1548 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 1549 goto sigbus; 1550 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 1551 goto sigbus; 1552 1553 /* 1554 * Do branch emulation only if we didn't forward the exception. 1555 * This is all so but ugly ... 1556 */ 1557 1558 /* 1559 * Are we running in microMIPS mode? 1560 */ 1561 if (get_isa16_mode(regs->cp0_epc)) { 1562 /* 1563 * Did we catch a fault trying to load an instruction in 1564 * 16-bit mode? 1565 */ 1566 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) 1567 goto sigbus; 1568 if (unaligned_action == UNALIGNED_ACTION_SHOW) 1569 show_registers(regs); 1570 1571 if (cpu_has_mmips) { 1572 emulate_load_store_microMIPS(regs, 1573 (void __user *)regs->cp0_badvaddr); 1574 return; 1575 } 1576 1577 if (cpu_has_mips16) { 1578 emulate_load_store_MIPS16e(regs, 1579 (void __user *)regs->cp0_badvaddr); 1580 return; 1581 } 1582 1583 goto sigbus; 1584 } 1585 1586 if (unaligned_action == UNALIGNED_ACTION_SHOW) 1587 show_registers(regs); 1588 pc = (unsigned int *)exception_epc(regs); 1589 1590 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 1591 1592 return; 1593 1594 sigbus: 1595 die_if_kernel("Kernel unaligned instruction access", regs); 1596 force_sig(SIGBUS); 1597 1598 /* 1599 * XXX On return from the signal handler we should advance the epc 1600 */ 1601 exception_exit(prev_state); 1602 } 1603 1604 #ifdef CONFIG_DEBUG_FS 1605 static int __init debugfs_unaligned(void) 1606 { 1607 debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, 1608 &unaligned_instructions); 1609 debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 1610 mips_debugfs_dir, &unaligned_action); 1611 return 0; 1612 } 1613 arch_initcall(debugfs_unaligned); 1614 #endif 1615
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.