1 /* !! 1 /* -*- mode: asm -*- 2 * Low-level exception handling << 3 * << 4 * This file is subject to the terms and condi << 5 * License. See the file "COPYING" in the mai << 6 * for more details. << 7 * << 8 * Copyright (C) 2004 - 2008 by Tensilica Inc. << 9 * Copyright (C) 2015 Cadence Design Systems I << 10 * << 11 * Chris Zankel <chris@zankel.net> << 12 * << 13 */ << 14 << 15 #include <linux/linkage.h> << 16 #include <linux/pgtable.h> << 17 #include <asm/asm-offsets.h> << 18 #include <asm/asmmacro.h> << 19 #include <asm/processor.h> << 20 #include <asm/coprocessor.h> << 21 #include <asm/thread_info.h> << 22 #include <asm/asm-uaccess.h> << 23 #include <asm/unistd.h> << 24 #include <asm/ptrace.h> << 25 #include <asm/current.h> << 26 #include <asm/page.h> << 27 #include <asm/signal.h> << 28 #include <asm/tlbflush.h> << 29 #include <variant/tie-asm.h> << 30 << 31 /* << 32 * Macro to find first bit set in WINDOWBASE f << 33 * << 34 * 100....0 -> 1 << 35 * 010....0 -> 2 << 36 * 000....1 -> WSBITS << 37 */ << 38 << 39 .macro ffs_ws bit mask << 40 << 41 #if XCHAL_HAVE_NSA << 42 nsau \bit, \mask << 43 addi \bit, \bit, WSBITS - 32 + 1 << 44 #else << 45 movi \bit, WSBITS << 46 #if WSBITS > 16 << 47 _bltui \mask, 0x10000, 99f << 48 addi \bit, \bit, -16 << 49 extui \mask, \mask, 16, 16 << 50 #endif << 51 #if WSBITS > 8 << 52 99: _bltui \mask, 0x100, 99f << 53 addi \bit, \bit, -8 << 54 srli \mask, \mask, 8 << 55 #endif << 56 99: _bltui \mask, 0x10, 99f << 57 addi \bit, \bit, -4 << 58 srli \mask, \mask, 4 << 59 99: _bltui \mask, 0x4, 99f << 60 addi \bit, \bit, -2 << 61 srli \mask, \mask, 2 << 62 99: _bltui \mask, 0x2, 99f << 63 addi \bit, \bit, -1 << 64 99: << 65 << 66 #endif << 67 .endm << 68 << 69 << 70 .macro irq_save flags tmp << 71 #if XTENSA_FAKE_NMI << 72 #if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL << 73 rsr \flags, ps << 74 extui \tmp, \flags, PS_INTLEVEL_SHIF << 75 bgei \tmp, LOCKLEVEL, 99f << 76 rsil \tmp, LOCKLEVEL << 77 99: << 78 #else << 79 movi \tmp, LOCKLEVEL << 80 rsr \flags, ps << 81 or \flags, \flags, \tmp << 82 xsr \flags, ps << 83 rsync << 84 #endif << 85 #else << 86 rsil \flags, LOCKLEVEL << 87 #endif << 88 .endm << 89 << 90 /* ----------------- DEFAULT FIRST LEVEL EXCEP << 91 << 92 /* << 93 * First-level exception handler for user exce << 94 * Save some special registers, extra states a << 95 * register file that were in use in the user << 96 * exception code. << 97 * We save SAR (used to calculate WMASK), and << 98 * save them for kernel exceptions). << 99 * << 100 * Entry condition for user_exception: << 101 * << 102 * a0: trashed, original value saved << 103 * a1: a1 << 104 * a2: new stack pointer, original va << 105 * a3: a3 << 106 * depc: a2, original value saved on st << 107 * excsave1: dispatch table << 108 * << 109 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS << 110 * < VALID_DOUBLE_EXCEPTION_ADDRESS << 111 * << 112 * Entry condition for _user_exception: << 113 * << 114 * a0-a3 and depc have been saved to PT_AREG << 115 * excsave has been restored, and << 116 * stack pointer (a1) has been set. << 117 * << 118 * Note: _user_exception might be at an odd ad << 119 */ << 120 .literal_position << 121 << 122 ENTRY(user_exception) << 123 << 124 /* Save a1, a2, a3, and set SP. */ << 125 << 126 rsr a0, depc << 127 s32i a1, a2, PT_AREG1 << 128 s32i a0, a2, PT_AREG2 << 129 s32i a3, a2, PT_AREG3 << 130 mov a1, a2 << 131 << 132 .globl _user_exception << 133 _user_exception: << 134 << 135 /* Save SAR and turn off single steppi << 136 << 137 movi a2, 0 << 138 wsr a2, depc # term << 139 rsr a3, sar << 140 xsr a2, icountlevel << 141 s32i a3, a1, PT_SAR << 142 s32i a2, a1, PT_ICOUNTLEVEL << 143 << 144 #if XCHAL_HAVE_THREADPTR << 145 rur a2, threadptr << 146 s32i a2, a1, PT_THREADPTR << 147 #endif << 148 << 149 /* Rotate ws so that the current windo << 150 /* Assume ws = xxwww1yyyy. Rotate ws r << 151 << 152 #if defined(USER_SUPPORT_WINDOWED) << 153 rsr a2, windowbase << 154 rsr a3, windowstart << 155 ssr a2 << 156 s32i a2, a1, PT_WINDOWBASE << 157 s32i a3, a1, PT_WINDOWSTART << 158 slli a2, a3, 32-WSBITS << 159 src a2, a3, a2 << 160 srli a2, a2, 32-WSBITS << 161 s32i a2, a1, PT_WMASK # need << 162 #else << 163 movi a2, 0 << 164 movi a3, 1 << 165 s32i a2, a1, PT_WINDOWBASE << 166 s32i a3, a1, PT_WINDOWSTART << 167 s32i a3, a1, PT_WMASK << 168 #endif << 169 << 170 /* Save only live registers. */ << 171 << 172 UABI_W _bbsi.l a2, 1, .Lsave_window_registers << 173 s32i a4, a1, PT_AREG4 << 174 s32i a5, a1, PT_AREG5 << 175 s32i a6, a1, PT_AREG6 << 176 s32i a7, a1, PT_AREG7 << 177 UABI_W _bbsi.l a2, 2, .Lsave_window_registers << 178 s32i a8, a1, PT_AREG8 << 179 s32i a9, a1, PT_AREG9 << 180 s32i a10, a1, PT_AREG10 << 181 s32i a11, a1, PT_AREG11 << 182 UABI_W _bbsi.l a2, 3, .Lsave_window_registers << 183 s32i a12, a1, PT_AREG12 << 184 s32i a13, a1, PT_AREG13 << 185 s32i a14, a1, PT_AREG14 << 186 s32i a15, a1, PT_AREG15 << 187 << 188 #if defined(USER_SUPPORT_WINDOWED) << 189 /* If only one valid frame skip saving << 190 << 191 beqi a2, 1, common_exception << 192 << 193 /* Save the remaining registers. << 194 * We have to save all registers up to << 195 * the right, except the current frame << 196 * Assume a2 is: 001001000110001 << 197 * All register frames starting from t << 198 * must be saved. << 199 */ << 200 .Lsave_window_registers: << 201 addi a3, a2, -1 # elim << 202 neg a3, a3 # yyyy << 203 and a3, a3, a2 # max. << 204 << 205 /* Find number of frames to save */ << 206 << 207 ffs_ws a0, a3 # numb << 208 << 209 /* Store information into WMASK: << 210 * bits 0..3: xxx1 masked lower 4 bits << 211 * bits 4...: number of valid 4-regist << 212 */ << 213 << 214 slli a3, a0, 4 # numb << 215 extui a2, a2, 0, 4 # mask << 216 or a2, a3, a2 << 217 s32i a2, a1, PT_WMASK # need << 218 << 219 /* Save 4 registers at a time */ << 220 << 221 1: rotw -1 << 222 s32i a0, a5, PT_AREG_END - 16 << 223 s32i a1, a5, PT_AREG_END - 12 << 224 s32i a2, a5, PT_AREG_END - 8 << 225 s32i a3, a5, PT_AREG_END - 4 << 226 addi a0, a4, -1 << 227 addi a1, a5, -16 << 228 _bnez a0, 1b << 229 << 230 /* WINDOWBASE still in SAR! */ << 231 << 232 rsr a2, sar # orig << 233 movi a3, 1 << 234 ssl a2 << 235 sll a3, a3 << 236 wsr a3, windowstart # set << 237 wsr a2, windowbase # and << 238 rsync << 239 << 240 /* We are back to the original stack p << 241 #endif << 242 /* Now, jump to the common exception h << 243 << 244 j common_exception << 245 << 246 ENDPROC(user_exception) << 247 << 248 /* << 249 * First-level exit handler for kernel excepti << 250 * Save special registers and the live window << 251 * Note: Even though we changes the stack poin << 252 * MOVSP here, as we do that when we ret << 253 * (See comment in the kernel exception << 254 * << 255 * Entry condition for kernel_exception: << 256 * << 257 * a0: trashed, original value saved << 258 * a1: a1 << 259 * a2: new stack pointer, original in << 260 * a3: a3 << 261 * depc: a2, original value saved on st << 262 * excsave_1: dispatch table << 263 * << 264 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS << 265 * < VALID_DOUBLE_EXCEPTION_ADDRESS << 266 * << 267 * Entry condition for _kernel_exception: << 268 * << 269 * a0-a3 and depc have been saved to PT_AREG << 270 * excsave has been restored, and << 271 * stack pointer (a1) has been set. << 272 * << 273 * Note: _kernel_exception might be at an odd << 274 */ << 275 << 276 ENTRY(kernel_exception) << 277 << 278 /* Save a1, a2, a3, and set SP. */ << 279 << 280 rsr a0, depc # get << 281 s32i a1, a2, PT_AREG1 << 282 s32i a0, a2, PT_AREG2 << 283 s32i a3, a2, PT_AREG3 << 284 mov a1, a2 << 285 << 286 .globl _kernel_exception << 287 _kernel_exception: << 288 << 289 /* Save SAR and turn off single steppi << 290 << 291 movi a2, 0 << 292 rsr a3, sar << 293 xsr a2, icountlevel << 294 s32i a3, a1, PT_SAR << 295 s32i a2, a1, PT_ICOUNTLEVEL << 296 << 297 #if defined(__XTENSA_WINDOWED_ABI__) << 298 /* Rotate ws so that the current windo << 299 /* Assume ws = xxwww1yyyy. Rotate ws r << 300 << 301 rsr a2, windowbase # don' << 302 rsr a3, windowstart # need << 303 ssr a2 << 304 slli a2, a3, 32-WSBITS << 305 src a2, a3, a2 << 306 srli a2, a2, 32-WSBITS << 307 s32i a2, a1, PT_WMASK # need << 308 #endif << 309 << 310 /* Save only the live window-frame */ << 311 << 312 KABI_W _bbsi.l a2, 1, 1f << 313 s32i a4, a1, PT_AREG4 << 314 s32i a5, a1, PT_AREG5 << 315 s32i a6, a1, PT_AREG6 << 316 s32i a7, a1, PT_AREG7 << 317 KABI_W _bbsi.l a2, 2, 1f << 318 s32i a8, a1, PT_AREG8 << 319 s32i a9, a1, PT_AREG9 << 320 s32i a10, a1, PT_AREG10 << 321 s32i a11, a1, PT_AREG11 << 322 KABI_W _bbsi.l a2, 3, 1f << 323 s32i a12, a1, PT_AREG12 << 324 s32i a13, a1, PT_AREG13 << 325 s32i a14, a1, PT_AREG14 << 326 s32i a15, a1, PT_AREG15 << 327 << 328 #ifdef __XTENSA_WINDOWED_ABI__ << 329 _bnei a2, 1, 1f << 330 /* Copy spill slots of a0 and a1 to im << 331 * in order to keep exception stack co << 332 */ << 333 l32i a3, a1, PT_KERNEL_SIZE << 334 l32i a0, a1, PT_KERNEL_SIZE + 4 << 335 s32e a3, a1, -16 << 336 s32e a0, a1, -12 << 337 #endif << 338 1: << 339 l32i a0, a1, PT_AREG0 # rest << 340 wsr a0, depc << 341 << 342 /* << 343 * This is the common exception handler. << 344 * We get here from the user exception handler << 345 * from the kernel exception handler. << 346 * Save the remaining special registers, switc << 347 * to the second-level exception handler. << 348 * << 349 */ << 350 << 351 common_exception: << 352 << 353 /* Save some registers, disable loops << 354 << 355 rsr a2, debugcause << 356 rsr a3, epc1 << 357 s32i a2, a1, PT_DEBUGCAUSE << 358 s32i a3, a1, PT_PC << 359 << 360 movi a2, NO_SYSCALL << 361 rsr a3, excvaddr << 362 s32i a2, a1, PT_SYSCALL << 363 movi a2, 0 << 364 s32i a3, a1, PT_EXCVADDR << 365 #if XCHAL_HAVE_LOOPS << 366 xsr a2, lcount << 367 s32i a2, a1, PT_LCOUNT << 368 #endif << 369 << 370 #if XCHAL_HAVE_EXCLUSIVE << 371 /* Clear exclusive access monitor set << 372 clrex << 373 #endif << 374 << 375 /* It is now save to restore the EXC_T << 376 << 377 rsr a2, exccause << 378 movi a3, 0 << 379 rsr a0, excsave1 << 380 s32i a2, a1, PT_EXCCAUSE << 381 s32i a3, a0, EXC_TABLE_FIXUP << 382 << 383 /* All unrecoverable states are saved << 384 * Now we can allow exceptions again. << 385 * PS.INTLEVEL is set to LOCKLEVEL dis << 386 * otherwise it's left unchanged. << 387 * << 388 * Set PS(EXCM = 0, UM = 0, RING = 0, << 389 */ << 390 << 391 rsr a3, ps << 392 s32i a3, a1, PT_PS # save << 393 << 394 #if XTENSA_FAKE_NMI << 395 /* Correct PS needs to be saved in the << 396 * - in case of exception or level-1 i << 397 * and is already saved. << 398 * - in case of medium level interrupt << 399 */ << 400 movi a0, EXCCAUSE_MAPPED_NMI << 401 extui a3, a3, PS_INTLEVEL_SHIFT, PS_ << 402 beq a2, a0, .Lmedium_level_irq << 403 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, << 404 beqz a3, .Llevel1_irq # leve << 405 << 406 .Lmedium_level_irq: << 407 rsr a0, excsave2 << 408 s32i a0, a1, PT_PS # save << 409 bgei a3, LOCKLEVEL, .Lexception << 410 << 411 .Llevel1_irq: << 412 movi a3, LOCKLEVEL << 413 << 414 .Lexception: << 415 KABI_W movi a0, PS_WOE_MASK << 416 KABI_W or a3, a3, a0 << 417 #else << 418 addi a2, a2, -EXCCAUSE_LEVEL1_INTER << 419 movi a0, LOCKLEVEL << 420 extui a3, a3, PS_INTLEVEL_SHIFT, PS_ << 421 # a3 = << 422 moveqz a3, a0, a2 # a3 = << 423 KABI_W movi a2, PS_WOE_MASK << 424 KABI_W or a3, a3, a2 << 425 #endif << 426 << 427 /* restore return address (or 0 if ret << 428 rsr a0, depc << 429 wsr a3, ps << 430 rsync # PS.W << 431 << 432 /* Save lbeg, lend */ << 433 #if XCHAL_HAVE_LOOPS << 434 rsr a4, lbeg << 435 rsr a3, lend << 436 s32i a4, a1, PT_LBEG << 437 s32i a3, a1, PT_LEND << 438 #endif << 439 << 440 /* Save SCOMPARE1 */ << 441 << 442 #if XCHAL_HAVE_S32C1I << 443 rsr a3, scompare1 << 444 s32i a3, a1, PT_SCOMPARE1 << 445 #endif << 446 << 447 /* Save optional registers. */ << 448 << 449 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_X << 450 << 451 #ifdef CONFIG_TRACE_IRQFLAGS << 452 rsr abi_tmp0, ps << 453 extui abi_tmp0, abi_tmp0, PS << 454 beqz abi_tmp0, 1f << 455 abi_call trace_hardirqs_off << 456 1: << 457 #endif << 458 #ifdef CONFIG_CONTEXT_TRACKING_USER << 459 l32i abi_tmp0, a1, PT_PS << 460 bbci.l abi_tmp0, PS_UM_BIT, 1 << 461 abi_call user_exit_callable << 462 1: << 463 #endif << 464 << 465 /* Go to second-level dispatcher. Set << 466 * exception handler and call the exce << 467 */ << 468 << 469 l32i abi_arg1, a1, PT_EXCCA << 470 rsr abi_tmp0, excsave1 << 471 addx4 abi_tmp0, abi_arg1, ab << 472 l32i abi_tmp0, abi_tmp0, EX << 473 mov abi_arg0, a1 << 474 << 475 /* Call the second-level handler */ << 476 << 477 abi_callx abi_tmp0 << 478 << 479 /* Jump here for exception exit */ << 480 .global common_exception_return << 481 common_exception_return: << 482 << 483 #if XTENSA_FAKE_NMI << 484 l32i abi_tmp0, a1, PT_EXCCA << 485 movi abi_tmp1, EXCCAUSE_MAP << 486 l32i abi_saved1, a1, PT_PS << 487 beq abi_tmp0, abi_tmp1, .L << 488 #endif << 489 .Ltif_loop: << 490 irq_save abi_tmp0, abi_tmp1 << 491 #ifdef CONFIG_TRACE_IRQFLAGS << 492 abi_call trace_hardirqs_off << 493 #endif << 494 << 495 /* Jump if we are returning from kerne << 496 << 497 l32i abi_saved1, a1, PT_PS << 498 GET_THREAD_INFO(abi_tmp0, a1) << 499 l32i abi_saved0, abi_tmp0, << 500 _bbci.l abi_saved1, PS_UM_BIT, << 501 << 502 /* Specific to a user exception exit: << 503 * We need to check some flags for sig << 504 * and have to restore WB and WS, extr << 505 * in the register file that were in u << 506 * Note that we don't disable interrup << 507 */ << 508 << 509 _bbsi.l abi_saved0, TIF_NEED_R << 510 movi abi_tmp0, _TIF_SIGPEND << 511 bnone abi_saved0, abi_tmp0, << 512 << 513 l32i abi_tmp0, a1, PT_DEPC << 514 bgeui abi_tmp0, VALID_DOUBLE << 515 << 516 /* Call do_signal() */ << 517 << 518 #ifdef CONFIG_TRACE_IRQFLAGS << 519 abi_call trace_hardirqs_on << 520 #endif << 521 rsil abi_tmp0, 0 << 522 mov abi_arg0, a1 << 523 abi_call do_notify_resume << 524 j .Ltif_loop << 525 << 526 .Lresched: << 527 #ifdef CONFIG_TRACE_IRQFLAGS << 528 abi_call trace_hardirqs_on << 529 #endif << 530 rsil abi_tmp0, 0 << 531 abi_call schedule # void << 532 j .Ltif_loop << 533 << 534 .Lexit_tif_loop_kernel: << 535 #ifdef CONFIG_PREEMPTION << 536 _bbci.l abi_saved0, TIF_NEED_R << 537 << 538 /* Check current_thread_info->preempt_ << 539 << 540 l32i abi_tmp1, abi_tmp0, TI << 541 bnez abi_tmp1, .Lrestore_st << 542 abi_call preempt_schedule_irq << 543 #endif << 544 j .Lrestore_state << 545 << 546 .Lexit_tif_loop_user: << 547 #ifdef CONFIG_CONTEXT_TRACKING_USER << 548 abi_call user_enter_callable << 549 #endif << 550 #ifdef CONFIG_HAVE_HW_BREAKPOINT << 551 _bbci.l abi_saved0, TIF_DB_DIS << 552 abi_call restore_dbreak << 553 1: << 554 #endif << 555 #ifdef CONFIG_DEBUG_TLB_SANITY << 556 l32i abi_tmp0, a1, PT_DEPC << 557 bgeui abi_tmp0, VALID_DOUBLE << 558 abi_call check_tlb_sanity << 559 #endif << 560 << 561 .Lrestore_state: << 562 #ifdef CONFIG_TRACE_IRQFLAGS << 563 extui abi_tmp0, abi_saved1, << 564 bgei abi_tmp0, LOCKLEVEL, 1 << 565 abi_call trace_hardirqs_on << 566 1: << 567 #endif << 568 /* << 569 * Restore optional registers. << 570 * abi_arg* are used as temporary regi << 571 */ << 572 << 573 load_xtregs_opt a1 abi_tmp0 abi_arg0 a << 574 << 575 /* Restore SCOMPARE1 */ << 576 << 577 #if XCHAL_HAVE_S32C1I << 578 l32i abi_tmp0, a1, PT_SCOMP << 579 wsr abi_tmp0, scompare1 << 580 #endif << 581 wsr abi_saved1, ps << 582 _bbci.l abi_saved1, PS_UM_BIT, << 583 << 584 user_exception_exit: << 585 << 586 /* Restore the state of the task and r << 587 << 588 #if defined(USER_SUPPORT_WINDOWED) << 589 /* Switch to the user thread WINDOWBAS << 590 << 591 l32i a2, a1, PT_WINDOWBASE << 592 l32i a3, a1, PT_WINDOWSTART << 593 wsr a1, depc # use << 594 wsr a3, windowstart # rest << 595 ssr a2 # pres << 596 wsr a2, windowbase # swit << 597 rsync << 598 rsr a1, depc # rest << 599 l32i a2, a1, PT_WMASK # regi << 600 rotw -1 # we r << 601 _bltui a6, 16, .Lclear_regs # only << 602 << 603 /* The working registers are a0 and a3 << 604 * a4..a7. Be careful not to destroy << 605 * Note: wmask has the format YYYYM: << 606 * Y: number of registers saved << 607 * M: 4 bit mask of first 16 reg << 608 */ << 609 << 610 mov a2, a6 << 611 mov a3, a5 << 612 << 613 1: rotw -1 # a0.. << 614 addi a3, a7, -4*4 # next << 615 addi a2, a6, -16 # decr << 616 l32i a4, a3, PT_AREG_END + 0 << 617 l32i a5, a3, PT_AREG_END + 4 << 618 l32i a6, a3, PT_AREG_END + 8 << 619 l32i a7, a3, PT_AREG_END + 12 << 620 _bgeui a2, 16, 1b << 621 << 622 /* Clear unrestored registers (don't l << 623 << 624 .Lclear_regs: << 625 rsr a0, windowbase << 626 rsr a3, sar << 627 sub a3, a0, a3 << 628 beqz a3, 2f << 629 extui a3, a3, 0, WBBITS << 630 << 631 1: rotw -1 << 632 addi a3, a7, -1 << 633 movi a4, 0 << 634 movi a5, 0 << 635 movi a6, 0 << 636 movi a7, 0 << 637 bgei a3, 1, 1b << 638 << 639 /* We are back were we were when we st << 640 * Note: a2 still contains WMASK (if w << 641 * frame where we had loaded a2) << 642 * (if we have restored WSBITS-1 << 643 */ << 644 2: << 645 #else << 646 movi a2, 1 << 647 #endif << 648 #if XCHAL_HAVE_THREADPTR << 649 l32i a3, a1, PT_THREADPTR << 650 wur a3, threadptr << 651 #endif << 652 << 653 j common_exception_exit << 654 << 655 /* This is the kernel exception exit. << 656 * We avoided to do a MOVSP when we en << 657 * have to do it here. << 658 */ << 659 << 660 kernel_exception_exit: << 661 << 662 #if defined(__XTENSA_WINDOWED_ABI__) << 663 /* Check if we have to do a movsp. << 664 * << 665 * We only have to do a movsp if the p << 666 * been spilled to the *temporary* exc << 667 * task's stack. This is the case if t << 668 * WINDOWSTART for the previous window << 669 * (not spilled) but is zero now (spil << 670 * If this bit is zero, all other bits << 671 * current window frame are also zero. << 672 * 'and' WINDOWSTART and WINDOWSTART-1 << 673 * << 674 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* << 675 * << 676 * The result is zero only if one bit << 677 * << 678 * (Note: We might have gone through s << 679 * we come back to the current << 680 * different from the time the << 681 */ << 682 << 683 /* Test WINDOWSTART before and after t << 684 * We actually have WMASK, so we only << 685 */ << 686 << 687 l32i a2, a1, PT_WMASK << 688 _beqi a2, 1, common_exception_exit << 689 << 690 /* Test WINDOWSTART now. If spilled, d << 691 << 692 rsr a3, windowstart << 693 addi a0, a3, -1 << 694 and a3, a3, a0 << 695 _bnez a3, common_exception_exit << 696 << 697 /* Do a movsp (we returned from a call << 698 << 699 addi a0, a1, -16 << 700 l32i a3, a0, 0 << 701 l32i a4, a0, 4 << 702 s32i a3, a1, PT_KERNEL_SIZE + 0 << 703 s32i a4, a1, PT_KERNEL_SIZE + 4 << 704 l32i a3, a0, 8 << 705 l32i a4, a0, 12 << 706 s32i a3, a1, PT_KERNEL_SIZE + 8 << 707 s32i a4, a1, PT_KERNEL_SIZE + 12 << 708 << 709 /* Common exception exit. << 710 * We restore the special register and << 711 * return from the exception. << 712 * << 713 * Note: We expect a2 to hold PT_WMASK << 714 */ << 715 #else << 716 movi a2, 1 << 717 #endif << 718 << 719 common_exception_exit: << 720 << 721 /* Restore address registers. */ << 722 << 723 _bbsi.l a2, 1, 1f << 724 l32i a4, a1, PT_AREG4 << 725 l32i a5, a1, PT_AREG5 << 726 l32i a6, a1, PT_AREG6 << 727 l32i a7, a1, PT_AREG7 << 728 _bbsi.l a2, 2, 1f << 729 l32i a8, a1, PT_AREG8 << 730 l32i a9, a1, PT_AREG9 << 731 l32i a10, a1, PT_AREG10 << 732 l32i a11, a1, PT_AREG11 << 733 _bbsi.l a2, 3, 1f << 734 l32i a12, a1, PT_AREG12 << 735 l32i a13, a1, PT_AREG13 << 736 l32i a14, a1, PT_AREG14 << 737 l32i a15, a1, PT_AREG15 << 738 << 739 /* Restore PC, SAR */ << 740 << 741 1: l32i a2, a1, PT_PC << 742 l32i a3, a1, PT_SAR << 743 wsr a2, epc1 << 744 wsr a3, sar << 745 << 746 /* Restore LBEG, LEND, LCOUNT */ << 747 #if XCHAL_HAVE_LOOPS << 748 l32i a2, a1, PT_LBEG << 749 l32i a3, a1, PT_LEND << 750 wsr a2, lbeg << 751 l32i a2, a1, PT_LCOUNT << 752 wsr a3, lend << 753 wsr a2, lcount << 754 #endif << 755 << 756 /* We control single stepping through << 757 << 758 l32i a2, a1, PT_ICOUNTLEVEL << 759 movi a3, -2 << 760 wsr a2, icountlevel << 761 wsr a3, icount << 762 << 763 /* Check if it was double exception. * << 764 << 765 l32i a0, a1, PT_DEPC << 766 l32i a3, a1, PT_AREG3 << 767 l32i a2, a1, PT_AREG2 << 768 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADD << 769 << 770 /* Restore a0...a3 and return */ << 771 << 772 l32i a0, a1, PT_AREG0 << 773 l32i a1, a1, PT_AREG1 << 774 rfe << 775 << 776 1: wsr a0, depc << 777 l32i a0, a1, PT_AREG0 << 778 l32i a1, a1, PT_AREG1 << 779 rfde << 780 << 781 ENDPROC(kernel_exception) << 782 << 783 /* << 784 * Debug exception handler. << 785 * << 786 * Currently, we don't support KGDB, so only u << 787 * << 788 * When we get here, a0 is trashed and saved << 789 */ << 790 << 791 .literal_position << 792 << 793 ENTRY(debug_exception) << 794 << 795 rsr a0, SREG_EPS + XCHAL_DEBUGLEVE << 796 bbsi.l a0, PS_EXCM_BIT, .Ldebug_excep << 797 << 798 /* Set EPC1 and EXCCAUSE */ << 799 << 800 wsr a2, depc # save << 801 rsr a2, SREG_EPC + XCHAL_DEBUGLEVE << 802 wsr a2, epc1 << 803 << 804 movi a2, EXCCAUSE_MAPPED_DEBUG << 805 wsr a2, exccause << 806 << 807 /* Restore PS to the value before the << 808 << 809 movi a2, 1 << PS_EXCM_BIT << 810 or a2, a0, a2 << 811 wsr a2, ps << 812 << 813 /* Switch to kernel/user stack, restor << 814 << 815 bbsi.l a2, PS_UM_BIT, .Ldebug_excepti << 816 addi a2, a1, -16 - PT_KERNEL_SIZE << 817 << 818 .Ldebug_exception_continue: << 819 l32i a0, a3, DT_DEBUG_SAVE << 820 s32i a1, a2, PT_AREG1 << 821 s32i a0, a2, PT_AREG0 << 822 movi a0, 0 << 823 s32i a0, a2, PT_DEPC # mark << 824 xsr a3, SREG_EXCSAVE + XCHAL_DEBUG << 825 xsr a0, depc << 826 s32i a3, a2, PT_AREG3 << 827 s32i a0, a2, PT_AREG2 << 828 mov a1, a2 << 829 << 830 /* Debug exception is handled as an ex << 831 * likely be enabled in the common exc << 832 * preemption if we have HW breakpoint << 833 * meaning. << 834 */ << 835 #if defined(CONFIG_PREEMPT_COUNT) && defined(C << 836 GET_THREAD_INFO(a2, a1) << 837 l32i a3, a2, TI_PRE_COUNT << 838 addi a3, a3, 1 << 839 s32i a3, a2, TI_PRE_COUNT << 840 #endif << 841 << 842 rsr a2, ps << 843 bbsi.l a2, PS_UM_BIT, _user_exception << 844 j _kernel_exception << 845 << 846 .Ldebug_exception_user: << 847 rsr a2, excsave1 << 848 l32i a2, a2, EXC_TABLE_KSTK # load << 849 j .Ldebug_exception_continue << 850 << 851 .Ldebug_exception_in_exception: << 852 #ifdef CONFIG_HAVE_HW_BREAKPOINT << 853 /* Debug exception while in exception << 854 * window overflow/underflow handler o << 855 * data breakpoint, in which case save << 856 * breakpoints, single-step faulting i << 857 * breakpoints. << 858 */ << 859 << 860 bbci.l a0, PS_UM_BIT, .Ldebug_excepti << 861 << 862 rsr a0, debugcause << 863 bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ld << 864 << 865 .set _index, 0 << 866 .rept XCHAL_NUM_DBREAK << 867 l32i a0, a3, DT_DBREAKC_SAVE + _ind << 868 wsr a0, SREG_DBREAKC + _index << 869 .set _index, _index + 1 << 870 .endr << 871 << 872 l32i a0, a3, DT_ICOUNT_LEVEL_SAVE << 873 wsr a0, icountlevel << 874 << 875 l32i a0, a3, DT_ICOUNT_SAVE << 876 xsr a0, icount << 877 << 878 l32i a0, a3, DT_DEBUG_SAVE << 879 xsr a3, SREG_EXCSAVE + XCHAL_DEBUG << 880 rfi XCHAL_DEBUGLEVEL << 881 << 882 .Ldebug_save_dbreak: << 883 .set _index, 0 << 884 .rept XCHAL_NUM_DBREAK << 885 movi a0, 0 << 886 xsr a0, SREG_DBREAKC + _index << 887 s32i a0, a3, DT_DBREAKC_SAVE + _ind << 888 .set _index, _index + 1 << 889 .endr << 890 << 891 movi a0, XCHAL_EXCM_LEVEL + 1 << 892 xsr a0, icountlevel << 893 s32i a0, a3, DT_ICOUNT_LEVEL_SAVE << 894 << 895 movi a0, 0xfffffffe << 896 xsr a0, icount << 897 s32i a0, a3, DT_ICOUNT_SAVE << 898 << 899 l32i a0, a3, DT_DEBUG_SAVE << 900 xsr a3, SREG_EXCSAVE + XCHAL_DEBUG << 901 rfi XCHAL_DEBUGLEVEL << 902 #else << 903 /* Debug exception while in exception << 904 j .Ldebug_exception_in_exception << 905 #endif << 906 << 907 ENDPROC(debug_exception) << 908 << 909 /* << 910 * We get here in case of an unrecoverable exc << 911 * The only thing we can do is to be nice and << 912 * We only produce a single stack frame for pa << 913 * << 914 * 2 * 915 * Entry conditions: !! 3 * linux/arch/m68k/kernel/entry.S 916 * 4 * 917 * - a0 contains the caller address; origina !! 5 * Copyright (C) 1991, 1992 Linus Torvalds 918 * - the original a0 contains a valid return << 919 * - a2 contains a valid stackpointer << 920 * << 921 * Notes: << 922 * << 923 * - If the stack pointer could be invalid, << 924 * dummy stack pointer (e.g. the stack of << 925 * << 926 * - If the return address could be invalid, << 927 * to 0, so the backtrace would stop. << 928 * << 929 */ << 930 .align 4 << 931 unrecoverable_text: << 932 .ascii "Unrecoverable error in excepti << 933 << 934 .literal_position << 935 << 936 ENTRY(unrecoverable_exception) << 937 << 938 #if XCHAL_HAVE_WINDOWED << 939 movi a0, 1 << 940 movi a1, 0 << 941 << 942 wsr a0, windowstart << 943 wsr a1, windowbase << 944 rsync << 945 #endif << 946 << 947 movi a1, KERNEL_PS_WOE_MASK | LOCKL << 948 wsr a1, ps << 949 rsync << 950 << 951 movi a1, init_task << 952 movi a0, 0 << 953 addi a1, a1, PT_REGS_OFFSET << 954 << 955 movi abi_arg0, unrecoverable_text << 956 abi_call panic << 957 << 958 1: j 1b << 959 << 960 ENDPROC(unrecoverable_exception) << 961 << 962 /* -------------------------- FAST EXCEPTION H << 963 << 964 __XTENSA_HANDLER << 965 .literal_position << 966 << 967 #ifdef SUPPORT_WINDOWED << 968 /* << 969 * Fast-handler for alloca exceptions << 970 * << 971 * The ALLOCA handler is entered when user co << 972 * instruction and the caller's frame is not << 973 * << 974 * This algorithm was taken from the Ross Morl << 975 * << 976 * /home/ross/rtos/porting/XtensaRTOS-Porti << 977 * << 978 * It leverages the existing window spill/fill << 979 * double exceptions. The 'movsp' instruction << 980 * the next window needs to be loaded. In fact << 981 * replaced at some point by changing the hard << 982 * of the proper size instead. << 983 * << 984 * This algorithm simply backs out the registe << 985 * exception handler, makes it appear that we << 986 * by rotating the window back and then settin << 987 * the 'ps' register with the rolled back wind << 988 * will be re-executed and this time since the << 989 * active AR registers it won't cause an excep << 990 * << 991 * If the WindowUnderflow code gets a TLB miss << 992 * the partial WindowUnderflow will be handled << 993 * handler. << 994 * << 995 * Entry condition: << 996 * << 997 * a0: trashed, original value saved << 998 * a1: a1 << 999 * a2: new stack pointer, original in << 1000 * a3: a3 << 1001 * depc: a2, original value saved on s << 1002 * excsave_1: dispatch table << 1003 * << 1004 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1005 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1006 */ << 1007 << 1008 ENTRY(fast_alloca) << 1009 rsr a0, windowbase << 1010 rotw -1 << 1011 rsr a2, ps << 1012 extui a3, a2, PS_OWB_SHIFT, PS_OWB_ << 1013 xor a3, a3, a4 << 1014 l32i a4, a6, PT_AREG0 << 1015 l32i a1, a6, PT_DEPC << 1016 rsr a6, depc << 1017 wsr a1, depc << 1018 slli a3, a3, PS_OWB_SHIFT << 1019 xor a2, a2, a3 << 1020 wsr a2, ps << 1021 rsync << 1022 << 1023 _bbci.l a4, 31, 4f << 1024 rotw -1 << 1025 _bbci.l a8, 30, 8f << 1026 rotw -1 << 1027 j _WindowUnderflow12 << 1028 8: j _WindowUnderflow8 << 1029 4: j _WindowUnderflow4 << 1030 ENDPROC(fast_alloca) << 1031 #endif << 1032 << 1033 #ifdef CONFIG_USER_ABI_CALL0_PROBE << 1034 /* << 1035 * fast illegal instruction handler. << 1036 * << 1037 * This is used to fix up user PS.WOE on the << 1038 * by the first opcode related to register wi << 1039 * already set it goes directly to the common << 1040 * << 1041 * Entry condition: << 1042 * << 1043 * a0: trashed, original value saved << 1044 * a1: a1 << 1045 * a2: new stack pointer, original i << 1046 * a3: a3 << 1047 * depc: a2, original value saved on s << 1048 * excsave_1: dispatch table << 1049 */ << 1050 << 1051 ENTRY(fast_illegal_instruction_user) << 1052 << 1053 rsr a0, ps << 1054 bbsi.l a0, PS_WOE_BIT, 1f << 1055 s32i a3, a2, PT_AREG3 << 1056 movi a3, PS_WOE_MASK << 1057 or a0, a0, a3 << 1058 wsr a0, ps << 1059 #ifdef CONFIG_USER_ABI_CALL0_PROBE << 1060 GET_THREAD_INFO(a3, a2) << 1061 rsr a0, epc1 << 1062 s32i a0, a3, TI_PS_WOE_FIX_ADDR << 1063 #endif << 1064 l32i a3, a2, PT_AREG3 << 1065 l32i a0, a2, PT_AREG0 << 1066 rsr a2, depc << 1067 rfe << 1068 1: << 1069 call0 user_exception << 1070 << 1071 ENDPROC(fast_illegal_instruction_user) << 1072 #endif << 1073 << 1074 /* << 1075 * fast system calls. << 1076 * << 1077 * WARNING: The kernel doesn't save the enti << 1078 * handling a fast system call. These functi << 1079 * usually offering some functionality not av << 1080 * << 1081 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. << 1082 * << 1083 * Entry condition: << 1084 * << 1085 * a0: trashed, original value saved << 1086 * a1: a1 << 1087 * a2: new stack pointer, original i << 1088 * a3: a3 << 1089 * depc: a2, original value saved on s << 1090 * excsave_1: dispatch table << 1091 */ << 1092 << 1093 ENTRY(fast_syscall_user) << 1094 << 1095 /* Skip syscall. */ << 1096 << 1097 rsr a0, epc1 << 1098 addi a0, a0, 3 << 1099 wsr a0, epc1 << 1100 << 1101 l32i a0, a2, PT_DEPC << 1102 bgeui a0, VALID_DOUBLE_EXCEPTION_AD << 1103 << 1104 rsr a0, depc << 1105 _beqz a0, fast_syscall_spill_regist << 1106 _beqi a0, __NR_xtensa, fast_syscall << 1107 << 1108 call0 user_exception << 1109 << 1110 ENDPROC(fast_syscall_user) << 1111 << 1112 ENTRY(fast_syscall_unrecoverable) << 1113 << 1114 /* Restore all states. */ << 1115 << 1116 l32i a0, a2, PT_AREG0 # res << 1117 xsr a2, depc # res << 1118 << 1119 wsr a0, excsave1 << 1120 call0 unrecoverable_exception << 1121 << 1122 ENDPROC(fast_syscall_unrecoverable) << 1123 << 1124 /* << 1125 * sysxtensa syscall handler << 1126 * 6 * 1127 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, !! 7 * This file is subject to the terms and conditions of the GNU General Public 1128 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, !! 8 * License. See the file README.legal in the main directory of this archive 1129 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, !! 9 * for more details. 1130 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, << 1131 * a2 a6 << 1132 * << 1133 * Entry condition: << 1134 * << 1135 * a0: a2 (syscall-nr), original val << 1136 * a1: a1 << 1137 * a2: new stack pointer, original i << 1138 * a3: a3 << 1139 * a4..a15: unchanged << 1140 * depc: a2, original value saved on s << 1141 * excsave_1: dispatch table << 1142 * << 1143 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1144 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1145 * << 1146 * Note: we don't have to save a2; a2 holds t << 1147 */ << 1148 << 1149 .literal_position << 1150 << 1151 #ifdef CONFIG_FAST_SYSCALL_XTENSA << 1152 << 1153 ENTRY(fast_syscall_xtensa) << 1154 << 1155 s32i a7, a2, PT_AREG7 # we << 1156 movi a7, 4 # siz << 1157 access_ok a3, a7, a0, a2, .Leac # a0: << 1158 << 1159 _bgeui a6, SYS_XTENSA_COUNT, .Lill << 1160 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP << 1161 << 1162 /* Fall through for ATOMIC_CMP_SWP. * << 1163 << 1164 .Lswp: /* Atomic compare and swap */ << 1165 << 1166 EX(.Leac) l32i a0, a3, 0 # rea << 1167 bne a0, a4, 1f # sam << 1168 EX(.Leac) s32i a5, a3, 0 # dif << 1169 l32i a7, a2, PT_AREG7 # res << 1170 l32i a0, a2, PT_AREG0 # res << 1171 movi a2, 1 # and << 1172 rfe << 1173 << 1174 1: l32i a7, a2, PT_AREG7 # res << 1175 l32i a0, a2, PT_AREG0 # res << 1176 movi a2, 0 # ret << 1177 rfe << 1178 << 1179 .Lnswp: /* Atomic set, add, and exg_add. */ << 1180 << 1181 EX(.Leac) l32i a7, a3, 0 # ori << 1182 addi a6, a6, -SYS_XTENSA_ATOMIC_SE << 1183 add a0, a4, a7 # + a << 1184 moveqz a0, a4, a6 # set << 1185 addi a6, a6, SYS_XTENSA_ATOMIC_SET << 1186 EX(.Leac) s32i a0, a3, 0 # wri << 1187 << 1188 mov a0, a2 << 1189 mov a2, a7 << 1190 l32i a7, a0, PT_AREG7 # res << 1191 l32i a0, a0, PT_AREG0 # res << 1192 rfe << 1193 << 1194 .Leac: l32i a7, a2, PT_AREG7 # res << 1195 l32i a0, a2, PT_AREG0 # res << 1196 movi a2, -EFAULT << 1197 rfe << 1198 << 1199 .Lill: l32i a7, a2, PT_AREG7 # res << 1200 l32i a0, a2, PT_AREG0 # res << 1201 movi a2, -EINVAL << 1202 rfe << 1203 << 1204 ENDPROC(fast_syscall_xtensa) << 1205 << 1206 #else /* CONFIG_FAST_SYSCALL_XTENSA */ << 1207 << 1208 ENTRY(fast_syscall_xtensa) << 1209 << 1210 l32i a0, a2, PT_AREG0 # res << 1211 movi a2, -ENOSYS << 1212 rfe << 1213 << 1214 ENDPROC(fast_syscall_xtensa) << 1215 << 1216 #endif /* CONFIG_FAST_SYSCALL_XTENSA */ << 1217 << 1218 << 1219 /* fast_syscall_spill_registers. << 1220 * << 1221 * Entry condition: << 1222 * << 1223 * a0: trashed, original value saved << 1224 * a1: a1 << 1225 * a2: new stack pointer, original i << 1226 * a3: a3 << 1227 * depc: a2, original value saved on s << 1228 * excsave_1: dispatch table << 1229 * << 1230 * Note: We assume the stack pointer is EXC_T << 1231 */ << 1232 << 1233 #if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTE << 1234 defined(USER_SUPPORT_WINDOWED << 1235 << 1236 ENTRY(fast_syscall_spill_registers) << 1237 << 1238 /* Register a FIXUP handler (pass cur << 1239 << 1240 xsr a3, excsave1 << 1241 movi a0, fast_syscall_spill_regist << 1242 s32i a0, a3, EXC_TABLE_FIXUP << 1243 rsr a0, windowbase << 1244 s32i a0, a3, EXC_TABLE_PARAM << 1245 xsr a3, excsave1 # res << 1246 << 1247 /* Save a3, a4 and SAR on stack. */ << 1248 << 1249 rsr a0, sar << 1250 s32i a3, a2, PT_AREG3 << 1251 s32i a0, a2, PT_SAR << 1252 << 1253 /* The spill routine might clobber a4 << 1254 << 1255 s32i a4, a2, PT_AREG4 << 1256 s32i a7, a2, PT_AREG7 << 1257 s32i a8, a2, PT_AREG8 << 1258 s32i a11, a2, PT_AREG11 << 1259 s32i a12, a2, PT_AREG12 << 1260 s32i a15, a2, PT_AREG15 << 1261 << 1262 /* << 1263 * Rotate ws so that the current wind << 1264 * Assume ws = xxxwww1yy (www1 curren << 1265 * Rotate ws right so that a4 = yyxxx << 1266 */ << 1267 << 1268 rsr a0, windowbase << 1269 rsr a3, windowstart # a3 << 1270 ssr a0 # hol << 1271 slli a0, a3, WSBITS << 1272 or a3, a3, a0 # a3 << 1273 srl a3, a3 # a3 << 1274 << 1275 /* We are done if there are no more t << 1276 << 1277 extui a3, a3, 1, WSBITS-1 # a3 << 1278 movi a0, (1 << (WSBITS-1)) << 1279 _beqz a3, .Lnospill # onl << 1280 << 1281 /* We want 1 at the top, so that we r << 1282 << 1283 or a3, a3, a0 # 1yy << 1284 << 1285 /* Skip empty frames - get 'oldest' W << 1286 << 1287 wsr a3, windowstart # sav << 1288 neg a0, a3 << 1289 and a3, a0, a3 # fir << 1290 << 1291 ffs_ws a0, a3 # a0: << 1292 movi a3, WSBITS << 1293 sub a0, a3, a0 # WSB << 1294 ssr a0 # sav << 1295 << 1296 rsr a3, windowbase << 1297 add a3, a3, a0 << 1298 wsr a3, windowbase << 1299 rsync << 1300 << 1301 rsr a3, windowstart << 1302 srl a3, a3 # shi << 1303 << 1304 /* WB is now just one frame below the << 1305 window. WS is shifted so the oldes << 1306 and WS differ by one 4-register fr << 1307 << 1308 /* Save frames. Depending what call w << 1309 * we have to save 4,8. or 12 registe << 1310 */ << 1311 << 1312 << 1313 .Lloop: _bbsi.l a3, 1, .Lc4 << 1314 _bbci.l a3, 2, .Lc12 << 1315 << 1316 .Lc8: s32e a4, a13, -16 << 1317 l32e a4, a5, -12 << 1318 s32e a8, a4, -32 << 1319 s32e a5, a13, -12 << 1320 s32e a6, a13, -8 << 1321 s32e a7, a13, -4 << 1322 s32e a9, a4, -28 << 1323 s32e a10, a4, -24 << 1324 s32e a11, a4, -20 << 1325 srli a11, a3, 2 # shi << 1326 rotw 2 << 1327 _bnei a3, 1, .Lloop << 1328 j .Lexit << 1329 << 1330 .Lc4: s32e a4, a9, -16 << 1331 s32e a5, a9, -12 << 1332 s32e a6, a9, -8 << 1333 s32e a7, a9, -4 << 1334 << 1335 srli a7, a3, 1 << 1336 rotw 1 << 1337 _bnei a3, 1, .Lloop << 1338 j .Lexit << 1339 << 1340 .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit << 1341 << 1342 /* 12-register frame (call12) */ << 1343 << 1344 l32e a0, a5, -12 << 1345 s32e a8, a0, -48 << 1346 mov a8, a0 << 1347 << 1348 s32e a9, a8, -44 << 1349 s32e a10, a8, -40 << 1350 s32e a11, a8, -36 << 1351 s32e a12, a8, -32 << 1352 s32e a13, a8, -28 << 1353 s32e a14, a8, -24 << 1354 s32e a15, a8, -20 << 1355 srli a15, a3, 3 << 1356 << 1357 /* The stack pointer for a4..a7 is ou << 1358 * window, grab the stackpointer, and << 1359 * Alternatively, we could also use t << 1360 * makes the fixup routine much more << 1361 * rotw 1 << 1362 * s32e a0, a13, -16 << 1363 * ... << 1364 * rotw 2 << 1365 */ << 1366 << 1367 rotw 1 << 1368 mov a4, a13 << 1369 rotw -1 << 1370 << 1371 s32e a4, a8, -16 << 1372 s32e a5, a8, -12 << 1373 s32e a6, a8, -8 << 1374 s32e a7, a8, -4 << 1375 << 1376 rotw 3 << 1377 << 1378 _beqi a3, 1, .Lexit << 1379 j .Lloop << 1380 << 1381 .Lexit: << 1382 << 1383 /* Done. Do the final rotation and se << 1384 << 1385 rotw 1 << 1386 rsr a3, windowbase << 1387 ssl a3 << 1388 movi a3, 1 << 1389 sll a3, a3 << 1390 wsr a3, windowstart << 1391 .Lnospill: << 1392 << 1393 /* Advance PC, restore registers and << 1394 << 1395 l32i a3, a2, PT_SAR << 1396 l32i a0, a2, PT_AREG0 << 1397 wsr a3, sar << 1398 l32i a3, a2, PT_AREG3 << 1399 << 1400 /* Restore clobbered registers. */ << 1401 << 1402 l32i a4, a2, PT_AREG4 << 1403 l32i a7, a2, PT_AREG7 << 1404 l32i a8, a2, PT_AREG8 << 1405 l32i a11, a2, PT_AREG11 << 1406 l32i a12, a2, PT_AREG12 << 1407 l32i a15, a2, PT_AREG15 << 1408 << 1409 movi a2, 0 << 1410 rfe << 1411 << 1412 .Linvalid_mask: << 1413 << 1414 /* We get here because of an unrecove << 1415 * registers, so set up a dummy frame << 1416 * Note: We assume EXC_TABLE_KSTK con << 1417 */ << 1418 << 1419 movi a0, 1 << 1420 movi a1, 0 << 1421 << 1422 wsr a0, windowstart << 1423 wsr a1, windowbase << 1424 rsync << 1425 << 1426 movi a0, 0 << 1427 << 1428 rsr a3, excsave1 << 1429 l32i a1, a3, EXC_TABLE_KSTK << 1430 << 1431 movi a4, KERNEL_PS_WOE_MASK | LOCK << 1432 wsr a4, ps << 1433 rsync << 1434 << 1435 movi abi_arg0, SIGSEGV << 1436 abi_call make_task_dead << 1437 << 1438 /* shouldn't return, so panic */ << 1439 << 1440 wsr a0, excsave1 << 1441 call0 unrecoverable_exception << 1442 1: j 1b << 1443 << 1444 << 1445 ENDPROC(fast_syscall_spill_registers) << 1446 << 1447 /* Fixup handler. << 1448 * 10 * 1449 * We get here if the spill routine causes an !! 11 * Linux/m68k support by Hamish Macdonald 1450 * We basically restore WINDOWBASE and WINDOW << 1451 * we entered the spill routine and jump to t << 1452 * 12 * 1453 * Note that we only need to restore the bits !! 13 * 68060 fixes by Jesper Skov 1454 * been spilled yet by the _spill_register ro << 1455 * rotated windowstart with only those bits s << 1456 * spilled yet. Because a3 is rotated such th << 1457 * frame for the current windowbase - 1, we n << 1458 * value of the current windowbase + 1 and mo << 1459 * 14 * 1460 * a0: value of depc, original value in depc << 1461 * a2: trashed, original value in EXC_TABLE_D << 1462 * a3: exctable, original value in excsave1 << 1463 */ 15 */ 1464 16 1465 ENTRY(fast_syscall_spill_registers_fixup) << 1466 << 1467 rsr a2, windowbase # get current << 1468 xsr a0, depc # restore dep << 1469 ssl a2 # set shift ( << 1470 << 1471 /* We need to make sure the current r << 1472 * To do this, we simply set the bit << 1473 * in WS, so that the exception handl << 1474 * << 1475 * Note: we use a3 to set the windowb << 1476 * of it, saving it in the original _ << 1477 * the exception handler call. << 1478 */ << 1479 << 1480 xsr a3, excsave1 # get spill-m << 1481 slli a3, a3, 1 # shift left << 1482 addi a3, a3, 1 # set the bit << 1483 << 1484 slli a2, a3, 32-WSBITS << 1485 src a2, a3, a2 # a2 = xxwww1 << 1486 wsr a2, windowstart # set correct << 1487 << 1488 srli a3, a3, 1 << 1489 rsr a2, excsave1 << 1490 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE << 1491 xsr a2, excsave1 << 1492 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE << 1493 l32i a3, a2, EXC_TABLE_PARAM # ori << 1494 xsr a2, excsave1 << 1495 << 1496 /* Return to the original (user task) << 1497 * We leave the following frame behin << 1498 * a0, a1, a2 same << 1499 * a3: trashed (saved in EXC << 1500 * depc: depc (we have to retu << 1501 * excsave_1: exctable << 1502 */ << 1503 << 1504 wsr a3, windowbase << 1505 rsync << 1506 << 1507 /* We are now in the original frame w << 1508 * a0: return address << 1509 * a1: used, stack pointer << 1510 * a2: kernel stack pointer << 1511 * a3: available << 1512 * depc: exception address << 1513 * excsave: exctable << 1514 * Note: This frame might be the same << 1515 */ << 1516 << 1517 /* Setup stack pointer. */ << 1518 << 1519 addi a2, a2, -PT_USER_SIZE << 1520 s32i a0, a2, PT_AREG0 << 1521 << 1522 /* Make sure we return to this fixup << 1523 << 1524 movi a3, fast_syscall_spill_regist << 1525 s32i a3, a2, PT_DEPC # set << 1526 << 1527 /* Jump to the exception handler. */ << 1528 << 1529 rsr a3, excsave1 << 1530 rsr a0, exccause << 1531 addx4 a0, a0, a3 << 1532 l32i a0, a0, EXC_TABLE_FAST_USER << 1533 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE << 1534 jx a0 << 1535 << 1536 ENDPROC(fast_syscall_spill_registers_fixup) << 1537 << 1538 ENTRY(fast_syscall_spill_registers_fixup_retu << 1539 << 1540 /* When we return here, all registers << 1541 << 1542 wsr a2, depc # exc << 1543 << 1544 /* Restore fixup handler. */ << 1545 << 1546 rsr a2, excsave1 << 1547 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE << 1548 movi a3, fast_syscall_spill_regist << 1549 s32i a3, a2, EXC_TABLE_FIXUP << 1550 rsr a3, windowbase << 1551 s32i a3, a2, EXC_TABLE_PARAM << 1552 l32i a2, a2, EXC_TABLE_KSTK << 1553 << 1554 /* Load WB at the time the exception << 1555 << 1556 rsr a3, sar # WB << 1557 neg a3, a3 << 1558 wsr a3, windowbase << 1559 rsync << 1560 << 1561 rsr a3, excsave1 << 1562 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE << 1563 << 1564 rfde << 1565 << 1566 ENDPROC(fast_syscall_spill_registers_fixup_re << 1567 << 1568 #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS << 1569 << 1570 ENTRY(fast_syscall_spill_registers) << 1571 << 1572 l32i a0, a2, PT_AREG0 # res << 1573 movi a2, -ENOSYS << 1574 rfe << 1575 << 1576 ENDPROC(fast_syscall_spill_registers) << 1577 << 1578 #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS << 1579 << 1580 #ifdef CONFIG_MMU << 1581 /* 17 /* 1582 * We should never get here. Bail out! !! 18 * entry.S contains the system-call and fault low-level handling routines. 1583 */ !! 19 * This also contains the timer-interrupt handler, as well as all interrupts 1584 !! 20 * and faults that can result in a task-switch. 1585 ENTRY(fast_second_level_miss_double_kernel) << 1586 << 1587 1: << 1588 call0 unrecoverable_exception << 1589 1: j 1b << 1590 << 1591 ENDPROC(fast_second_level_miss_double_kernel) << 1592 << 1593 /* First-level entry handler for user, kernel << 1594 * TLB miss exceptions. Note that for now, u << 1595 * exceptions share the same entry point and << 1596 * << 1597 * An old, less-efficient C version of this f << 1598 * We include it below, interleaved as commen << 1599 * << 1600 * Entry condition: << 1601 * 21 * 1602 * a0: trashed, original value saved !! 22 * NOTE: This code handles signal-recognition, which happens every time 1603 * a1: a1 !! 23 * after a timer-interrupt and after each system call. 1604 * a2: new stack pointer, original i << 1605 * a3: a3 << 1606 * depc: a2, original value saved on s << 1607 * excsave_1: dispatch table << 1608 * 24 * 1609 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1610 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1611 */ 25 */ 1612 26 1613 ENTRY(fast_second_level_miss) << 1614 << 1615 /* Save a1 and a3. Note: we don't exp << 1616 << 1617 s32i a1, a2, PT_AREG1 << 1618 s32i a3, a2, PT_AREG3 << 1619 << 1620 /* We need to map the page of PTEs fo << 1621 * the pointer to that page. Also, i << 1622 * to be NULL while tsk->active_mm is << 1623 * a vmalloc address. In that rare c << 1624 * active_mm instead to avoid a fault << 1625 * << 1626 * http://mail.nl.linux.org/linux-mm/ << 1627 * (or search Internet on "mm vs. a << 1628 * << 1629 * if (!mm) << 1630 * mm = tsk->active_mm; << 1631 * pgd = pgd_offset (mm, regs->e << 1632 * pmd = pmd_offset (pgd, regs-> << 1633 * pmdval = *pmd; << 1634 */ << 1635 << 1636 GET_CURRENT(a1,a2) << 1637 l32i a0, a1, TASK_MM # tsk << 1638 beqz a0, .Lfast_second_level_miss_ << 1639 << 1640 .Lfast_second_level_miss_continue: << 1641 rsr a3, excvaddr # fau << 1642 _PGD_OFFSET(a0, a3, a1) << 1643 l32i a0, a0, 0 # rea << 1644 beqz a0, .Lfast_second_level_miss_ << 1645 << 1646 /* Read ptevaddr and convert to top o << 1647 * << 1648 * vpnval = read_ptevaddr_regist << 1649 * vpnval += DTLB_WAY_PGTABLE; << 1650 * pteval = mk_pte (virt_to_page << 1651 * write_dtlb_entry (pteval, vpn << 1652 * << 1653 * The messy computation for 'pteval' << 1654 * into the following: << 1655 * << 1656 * pteval = ((pmdval - PAGE_OFFSET + << 1657 * | PAGE_DIRECTORY << 1658 */ << 1659 << 1660 movi a1, (PHYS_OFFSET - PAGE_OFFSE << 1661 add a0, a0, a1 # pmd << 1662 extui a1, a0, 0, PAGE_SHIFT # ... << 1663 xor a0, a0, a1 << 1664 << 1665 movi a1, _PAGE_DIRECTORY << 1666 or a0, a0, a1 # ... << 1667 << 1668 /* << 1669 * We utilize all three wired-ways (7 << 1670 * Memory regions are mapped to the D << 1671 * This allows to map the three most << 1672 * DTLBs: << 1673 * 0,1 -> way 7 program (0040 << 1674 * 2 -> way 8 shared libari << 1675 * 3 -> way 0 stack (3000.0 << 1676 */ << 1677 << 1678 extui a3, a3, 28, 2 # add << 1679 rsr a1, ptevaddr << 1680 addx2 a3, a3, a3 # -> << 1681 srli a1, a1, PAGE_SHIFT << 1682 extui a3, a3, 2, 2 # -> << 1683 slli a1, a1, PAGE_SHIFT # pte << 1684 addi a3, a3, DTLB_WAY_PGD << 1685 add a1, a1, a3 # ... << 1686 << 1687 .Lfast_second_level_miss_wdtlb: << 1688 wdtlb a0, a1 << 1689 dsync << 1690 << 1691 /* Exit critical section. */ << 1692 .Lfast_second_level_miss_skip_wdtlb: << 1693 rsr a3, excsave1 << 1694 movi a0, 0 << 1695 s32i a0, a3, EXC_TABLE_FIXUP << 1696 << 1697 /* Restore the working registers, and << 1698 << 1699 l32i a0, a2, PT_AREG0 << 1700 l32i a1, a2, PT_AREG1 << 1701 l32i a3, a2, PT_AREG3 << 1702 l32i a2, a2, PT_DEPC << 1703 << 1704 bgeui a2, VALID_DOUBLE_EXCEPTION_AD << 1705 << 1706 /* Restore excsave1 and return. */ << 1707 << 1708 rsr a2, depc << 1709 rfe << 1710 << 1711 /* Return from double exception. */ << 1712 << 1713 1: xsr a2, depc << 1714 esync << 1715 rfde << 1716 << 1717 .Lfast_second_level_miss_no_mm: << 1718 l32i a0, a1, TASK_ACTIVE_MM # unl << 1719 bnez a0, .Lfast_second_level_miss_ << 1720 << 1721 /* Even more unlikely case active_mm << 1722 * We can get here with NMI in the mi << 1723 * touches vmalloc area. << 1724 */ << 1725 movi a0, init_mm << 1726 j .Lfast_second_level_miss_cont << 1727 << 1728 .Lfast_second_level_miss_no_pmd: << 1729 #if (DCACHE_WAY_SIZE > PAGE_SIZE) << 1730 << 1731 /* Special case for cache aliasing. << 1732 * We (should) only get here if a cle << 1733 * or the aliased cache flush functio << 1734 * by another task. Re-establish temp << 1735 * TLBTEMP_BASE areas. << 1736 */ << 1737 << 1738 /* We shouldn't be in a double except << 1739 << 1740 l32i a0, a2, PT_DEPC << 1741 bgeui a0, VALID_DOUBLE_EXCEPTION_AD << 1742 << 1743 /* Make sure the exception originated << 1744 << 1745 movi a0, __tlbtemp_mapping_start << 1746 rsr a3, epc1 << 1747 bltu a3, a0, .Lfast_second_level_m << 1748 movi a0, __tlbtemp_mapping_end << 1749 bgeu a3, a0, .Lfast_second_level_m << 1750 << 1751 /* Check if excvaddr was in one of th << 1752 << 1753 movi a3, TLBTEMP_BASE_1 << 1754 rsr a0, excvaddr << 1755 bltu a0, a3, .Lfast_second_level_m << 1756 << 1757 addi a1, a0, -TLBTEMP_SIZE << 1758 bgeu a1, a3, .Lfast_second_level_m << 1759 << 1760 /* Check if we have to restore an ITL << 1761 << 1762 movi a1, __tlbtemp_mapping_itlb << 1763 rsr a3, epc1 << 1764 sub a3, a3, a1 << 1765 << 1766 /* Calculate VPN */ << 1767 << 1768 movi a1, PAGE_MASK << 1769 and a1, a1, a0 << 1770 << 1771 /* Jump for ITLB entry */ << 1772 << 1773 bgez a3, 1f << 1774 << 1775 /* We can use up to two TLBTEMP areas << 1776 << 1777 extui a3, a0, PAGE_SHIFT + DCACHE_A << 1778 add a1, a3, a1 << 1779 << 1780 /* PPN is in a6 for the first TLBTEMP << 1781 << 1782 mov a0, a6 << 1783 movnez a0, a7, a3 << 1784 j .Lfast_second_level_miss_wdtl << 1785 << 1786 /* ITLB entry. We only use dst in a6. << 1787 << 1788 1: witlb a6, a1 << 1789 isync << 1790 j .Lfast_second_level_miss_skip << 1791 << 1792 << 1793 #endif // DCACHE_WAY_SIZE > PAGE_SIZE << 1794 << 1795 /* Invalid PGD, default exception han << 1796 .Lfast_second_level_miss_slow: << 1797 << 1798 rsr a1, depc << 1799 s32i a1, a2, PT_AREG2 << 1800 mov a1, a2 << 1801 << 1802 rsr a2, ps << 1803 bbsi.l a2, PS_UM_BIT, 1f << 1804 call0 _kernel_exception << 1805 1: call0 _user_exception << 1806 << 1807 ENDPROC(fast_second_level_miss) << 1808 << 1809 /* 27 /* 1810 * StoreProhibitedException !! 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so 1811 * !! 29 * all pointers that used to be 'current' are now entry 1812 * Update the pte and invalidate the itlb map !! 30 * number 0 in the 'current_set' list. 1813 * << 1814 * Entry condition: << 1815 * 31 * 1816 * a0: trashed, original value saved !! 32 * 6/05/00 RZ: addedd writeback completion after return from sighandler 1817 * a1: a1 !! 33 * for 68040 1818 * a2: new stack pointer, original i << 1819 * a3: a3 << 1820 * depc: a2, original value saved on s << 1821 * excsave_1: dispatch table << 1822 * << 1823 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1824 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1825 */ 34 */ 1826 35 1827 ENTRY(fast_store_prohibited) !! 36 #include <linux/config.h> 1828 !! 37 #include <linux/linkage.h> 1829 /* Save a1 and a3. */ !! 38 #include <asm/entry.h> 1830 !! 39 #include <asm/errno.h> 1831 s32i a1, a2, PT_AREG1 !! 40 #include <asm/setup.h> 1832 s32i a3, a2, PT_AREG3 !! 41 #include <asm/segment.h> 1833 !! 42 #include <asm/traps.h> 1834 GET_CURRENT(a1,a2) !! 43 #include <asm/unistd.h> 1835 l32i a0, a1, TASK_MM # tsk << 1836 beqz a0, .Lfast_store_no_mm << 1837 << 1838 .Lfast_store_continue: << 1839 rsr a1, excvaddr # fau << 1840 _PGD_OFFSET(a0, a1, a3) << 1841 l32i a0, a0, 0 << 1842 beqz a0, .Lfast_store_slow << 1843 << 1844 /* << 1845 * Note that we test _PAGE_WRITABLE_B << 1846 * and is not PAGE_NONE. See pgtable. << 1847 */ << 1848 << 1849 _PTE_OFFSET(a0, a1, a3) << 1850 l32i a3, a0, 0 # rea << 1851 movi a1, _PAGE_CA_INVALID << 1852 ball a3, a1, .Lfast_store_slow << 1853 bbci.l a3, _PAGE_WRITABLE_BIT, .Lfas << 1854 << 1855 movi a1, _PAGE_ACCESSED | _PAGE_DI << 1856 or a3, a3, a1 << 1857 rsr a1, excvaddr << 1858 s32i a3, a0, 0 << 1859 << 1860 /* We need to flush the cache if we h << 1861 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DC << 1862 dhwb a0, 0 << 1863 #endif << 1864 pdtlb a0, a1 << 1865 wdtlb a3, a0 << 1866 << 1867 /* Exit critical section. */ << 1868 << 1869 movi a0, 0 << 1870 rsr a3, excsave1 << 1871 s32i a0, a3, EXC_TABLE_FIXUP << 1872 << 1873 /* Restore the working registers, and << 1874 << 1875 l32i a3, a2, PT_AREG3 << 1876 l32i a1, a2, PT_AREG1 << 1877 l32i a0, a2, PT_AREG0 << 1878 l32i a2, a2, PT_DEPC << 1879 << 1880 bgeui a2, VALID_DOUBLE_EXCEPTION_AD << 1881 rsr a2, depc << 1882 rfe << 1883 << 1884 /* Double exception. Restore FIXUP ha << 1885 << 1886 1: xsr a2, depc << 1887 esync << 1888 rfde << 1889 << 1890 .Lfast_store_no_mm: << 1891 l32i a0, a1, TASK_ACTIVE_MM # unl << 1892 j .Lfast_store_continue << 1893 << 1894 /* If there was a problem, handle fau << 1895 .Lfast_store_slow: << 1896 rsr a1, excvaddr << 1897 pdtlb a0, a1 << 1898 bbci.l a0, DTLB_HIT_BIT, 1f << 1899 idtlb a0 << 1900 1: << 1901 rsr a3, depc # still holds << 1902 s32i a3, a2, PT_AREG2 << 1903 mov a1, a2 << 1904 << 1905 rsr a2, ps << 1906 bbsi.l a2, PS_UM_BIT, 1f << 1907 call0 _kernel_exception << 1908 1: call0 _user_exception << 1909 << 1910 ENDPROC(fast_store_prohibited) << 1911 44 1912 #endif /* CONFIG_MMU */ !! 45 #include "m68k_defs.h" 1913 46 1914 .text !! 47 .globl system_call, buserr, trap 1915 /* !! 48 .globl resume, ret_from_exception 1916 * System Calls. !! 49 .globl ret_from_signal 1917 * !! 50 .globl inthandler, sys_call_table 1918 * void system_call (struct pt_regs* regs, in !! 51 .globl sys_fork, sys_clone, sys_vfork 1919 * a2 !! 52 .globl ret_from_interrupt, bad_interrupt 1920 */ !! 53 1921 .literal_position !! 54 .text >> 55 ENTRY(buserr) >> 56 SAVE_ALL_INT >> 57 GET_CURRENT(%d0) >> 58 movel %sp,%sp@- | stack frame pointer argument >> 59 bsrl buserr_c >> 60 addql #4,%sp >> 61 jra ret_from_exception >> 62 >> 63 ENTRY(trap) >> 64 SAVE_ALL_INT >> 65 GET_CURRENT(%d0) >> 66 movel %sp,%sp@- | stack frame pointer argument >> 67 bsrl trap_c >> 68 addql #4,%sp >> 69 jra ret_from_exception >> 70 >> 71 | After a fork we jump here directly from resume, >> 72 | so that %d1 contains the previous task >> 73 | schedule_tail now used regardless of CONFIG_SMP >> 74 ENTRY(ret_from_fork) >> 75 movel %d1,%sp@- >> 76 jsr schedule_tail >> 77 addql #4,%sp >> 78 jra ret_from_exception >> 79 >> 80 badsys: >> 81 movel #-ENOSYS,%sp@(PT_D0) >> 82 jra ret_from_exception >> 83 >> 84 do_trace: >> 85 movel #-ENOSYS,%sp@(PT_D0) | needed for strace >> 86 subql #4,%sp >> 87 SAVE_SWITCH_STACK >> 88 jbsr syscall_trace >> 89 RESTORE_SWITCH_STACK >> 90 addql #4,%sp >> 91 movel %sp@(PT_ORIG_D0),%d1 >> 92 movel #-ENOSYS,%d0 >> 93 cmpl #NR_syscalls,%d1 >> 94 jcc 1f >> 95 jbsr @(sys_call_table,%d1:l:4)@(0) >> 96 1: movel %d0,%sp@(PT_D0) | save the return value >> 97 subql #4,%sp | dummy return address >> 98 SAVE_SWITCH_STACK >> 99 jbsr syscall_trace >> 100 >> 101 ret_from_signal: >> 102 RESTORE_SWITCH_STACK >> 103 addql #4,%sp >> 104 /* on 68040 complete pending writebacks if any */ >> 105 #ifdef CONFIG_M68040 >> 106 bfextu %sp@(PT_VECTOR){#0,#4},%d0 >> 107 subql #7,%d0 | bus error frame ? >> 108 jbne 1f >> 109 movel %sp,%sp@- >> 110 jbsr berr_040cleanup >> 111 addql #4,%sp >> 112 1: >> 113 #endif >> 114 jra ret_from_exception 1922 115 1923 ENTRY(system_call) 116 ENTRY(system_call) >> 117 SAVE_ALL_SYS 1924 118 1925 #if defined(__XTENSA_WINDOWED_ABI__) !! 119 GET_CURRENT(%d1) 1926 abi_entry_default !! 120 | save top of frame 1927 #elif defined(__XTENSA_CALL0_ABI__) !! 121 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 1928 abi_entry(12) !! 122 1929 !! 123 tstb %curptr@(TASK_SYSCALL_TRACE) 1930 s32i a0, sp, 0 !! 124 jne do_trace 1931 s32i abi_saved0, sp, 4 !! 125 cmpl #NR_syscalls,%d0 1932 s32i abi_saved1, sp, 8 !! 126 jcc badsys 1933 mov abi_saved0, a2 !! 127 jbsr @(sys_call_table,%d0:l:4)@(0) 1934 #else !! 128 movel %d0,%sp@(PT_D0) | save the return value 1935 #error Unsupported Xtensa ABI !! 129 1936 #endif !! 130 |oriw #0x0700,%sr 1937 !! 131 movel %curptr@(TASK_WORK),%d0 1938 /* regs->syscall = regs->areg[2] */ !! 132 jne syscall_exit_work 1939 !! 133 1: RESTORE_ALL 1940 l32i a7, abi_saved0, PT_AREG2 !! 134 1941 s32i a7, abi_saved0, PT_SYSCALL !! 135 syscall_exit_work: 1942 !! 136 btst #5,%sp@(PT_SR) | check if returning to kernel 1943 GET_THREAD_INFO(a4, a1) !! 137 bnes 1b | if so, skip resched, signals 1944 l32i abi_saved1, a4, TI_FLAGS !! 138 tstw %d0 1945 movi a4, _TIF_WORK_MASK !! 139 jeq do_signal_return 1946 and abi_saved1, abi_saved1, a4 !! 140 tstb %d0 1947 beqz abi_saved1, 1f !! 141 jne do_delayed_trace 1948 !! 142 1949 mov abi_arg0, abi_saved0 !! 143 pea resume_userspace 1950 abi_call do_syscall_trace_ente !! 144 jmp schedule 1951 beqz abi_rv, .Lsyscall_exit !! 145 1952 l32i a7, abi_saved0, PT_SYSCALL !! 146 ret_from_exception: 1953 !! 147 btst #5,%sp@(PT_SR) | check if returning to kernel 1954 1: !! 148 bnes 1f | if so, skip resched, signals 1955 /* syscall = sys_call_table[syscall_n !! 149 | only allow interrupts when we are really the last one on the 1956 !! 150 | kernel stack, otherwise stack overflow can occur during 1957 movi a4, sys_call_table !! 151 | heavy interrupt load 1958 movi a5, __NR_syscalls !! 152 andw #ALLOWINT,%sr 1959 movi abi_rv, -ENOSYS !! 153 1960 bgeu a7, a5, 1f !! 154 resume_userspace: 1961 !! 155 movel %curptr@(TASK_WORK),%d0 1962 addx4 a4, a7, a4 !! 156 lsrl #8,%d0 1963 l32i abi_tmp0, a4, 0 !! 157 jne exit_work 1964 !! 158 1: RESTORE_ALL 1965 /* Load args: arg0 - arg5 are passed !! 159 1966 !! 160 exit_work: 1967 l32i abi_arg0, abi_saved0, PT_AREG !! 161 | save top of frame 1968 l32i abi_arg1, abi_saved0, PT_AREG !! 162 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) 1969 l32i abi_arg2, abi_saved0, PT_AREG !! 163 tstb %d0 1970 l32i abi_arg3, abi_saved0, PT_AREG !! 164 jeq do_signal_return 1971 l32i abi_arg4, abi_saved0, PT_AREG !! 165 1972 l32i abi_arg5, abi_saved0, PT_AREG !! 166 pea resume_userspace 1973 !! 167 jmp schedule 1974 abi_callx abi_tmp0 !! 168 1975 !! 169 do_signal_return: 1976 1: /* regs->areg[2] = return_value */ !! 170 |andw #ALLOWINT,%sr 1977 !! 171 subql #4,%sp | dummy return address 1978 s32i abi_rv, abi_saved0, PT_AREG2 !! 172 SAVE_SWITCH_STACK 1979 bnez abi_saved1, 1f !! 173 pea %sp@(SWITCH_STACK_SIZE) 1980 .Lsyscall_exit: !! 174 clrl %sp@- 1981 #if defined(__XTENSA_WINDOWED_ABI__) !! 175 bsrl do_signal 1982 abi_ret_default !! 176 addql #8,%sp 1983 #elif defined(__XTENSA_CALL0_ABI__) !! 177 RESTORE_SWITCH_STACK 1984 l32i a0, sp, 0 !! 178 addql #4,%sp 1985 l32i abi_saved0, sp, 4 !! 179 jbra resume_userspace 1986 l32i abi_saved1, sp, 8 !! 180 1987 abi_ret(12) !! 181 do_delayed_trace: 1988 #else !! 182 bclr #7,%sp@(PT_SR) | clear trace bit in SR 1989 #error Unsupported Xtensa ABI !! 183 pea 1 | send SIGTRAP 1990 #endif !! 184 movel %curptr,%sp@- 1991 !! 185 pea LSIGTRAP 1992 1: !! 186 jbsr send_sig 1993 mov abi_arg0, abi_saved0 !! 187 addql #8,%sp 1994 abi_call do_syscall_trace_leav !! 188 addql #4,%sp 1995 j .Lsyscall_exit !! 189 jbra resume_userspace 1996 !! 190 1997 ENDPROC(system_call) !! 191 1998 !! 192 #if 0 1999 /* !! 193 #ifdef CONFIG_AMIGA 2000 * Spill live registers on the kernel stack m !! 194 ami_inthandler: 2001 * !! 195 addql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT 2002 * Entry condition: ps.woe is set, ps.excm is !! 196 SAVE_ALL_INT 2003 * Exit condition: windowstart has single bit !! 197 GET_CURRENT(%d0) 2004 * May clobber: a12, a13 !! 198 2005 */ !! 199 bfextu %sp@(PT_VECTOR){#4,#12},%d0 2006 .macro spill_registers_kernel !! 200 movel %d0,%a0 2007 !! 201 addql #1,%a0@(kstat+STAT_IRQ-VECOFF(VEC_SPUR)) 2008 #if XCHAL_NUM_AREGS > 16 !! 202 movel %a0@(autoirq_list-VECOFF(VEC_SPUR)),%a0 2009 call12 1f !! 203 2010 _j 2f !! 204 | amiga vector int handler get the req mask instead of irq vector 2011 retw !! 205 lea CUSTOMBASE,%a1 2012 .align 4 !! 206 movew %a1@(C_INTREQR),%d0 2013 1: !! 207 andw %a1@(C_INTENAR),%d0 2014 _entry a1, 48 !! 208 2015 addi a12, a0, 3 !! 209 | prepare stack (push frame pointer, dev_id & req mask) 2016 #if XCHAL_NUM_AREGS > 32 !! 210 pea %sp@ 2017 .rept (XCHAL_NUM_AREGS - 32) / 12 !! 211 movel %a0@(IRQ_DEVID),%sp@- 2018 _entry a1, 48 !! 212 movel %d0,%sp@- 2019 mov a12, a0 !! 213 pea %pc@(ret_from_interrupt:w) 2020 .endr !! 214 jbra @(IRQ_HANDLER,%a0)@(0) 2021 #endif !! 215 2022 _entry a1, 16 !! 216 ENTRY(nmi_handler) 2023 #if XCHAL_NUM_AREGS % 12 == 0 !! 217 rte 2024 mov a8, a8 !! 218 #endif 2025 #elif XCHAL_NUM_AREGS % 12 == 4 !! 219 #endif 2026 mov a12, a12 !! 220 2027 #elif XCHAL_NUM_AREGS % 12 == 8 !! 221 /* 2028 mov a4, a4 !! 222 ** This is the main interrupt handler, responsible for calling process_int() 2029 #endif !! 223 */ 2030 retw !! 224 inthandler: >> 225 SAVE_ALL_INT >> 226 GET_CURRENT(%d0) >> 227 addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2) >> 228 | put exception # in d0 >> 229 bfextu %sp@(PT_VECTOR){#4,#10},%d0 >> 230 >> 231 movel %sp,%sp@- >> 232 movel %d0,%sp@- | put vector # on stack >> 233 #if defined(MACH_Q40_ONLY) && defined(CONFIG_BLK_DEV_FD) >> 234 btstb #4,0xff000000 | Q40 floppy needs very special treatment ... >> 235 jbeq 1f >> 236 btstb #3,0xff000004 >> 237 jbeq 1f >> 238 jbsr floppy_hardint >> 239 jbra 3f >> 240 1: >> 241 #endif >> 242 jbsr process_int | process the IRQ >> 243 3: addql #8,%sp | pop parameters off stack >> 244 >> 245 ret_from_interrupt: >> 246 subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2) >> 247 jeq 1f 2031 2: 248 2: 2032 #else !! 249 RESTORE_ALL 2033 mov a12, a12 << 2034 #endif << 2035 .endm << 2036 << 2037 /* << 2038 * Task switch. << 2039 * << 2040 * struct task* _switch_to (struct task* pre << 2041 * a2 a2 << 2042 */ << 2043 << 2044 ENTRY(_switch_to) << 2045 << 2046 #if defined(__XTENSA_WINDOWED_ABI__) << 2047 abi_entry(XTENSA_SPILL_STACK_RESERVE) << 2048 #elif defined(__XTENSA_CALL0_ABI__) << 2049 abi_entry(16) << 2050 << 2051 s32i a12, sp, 0 << 2052 s32i a13, sp, 4 << 2053 s32i a14, sp, 8 << 2054 s32i a15, sp, 12 << 2055 #else << 2056 #error Unsupported Xtensa ABI << 2057 #endif << 2058 mov a11, a3 # and << 2059 << 2060 l32i a4, a2, TASK_THREAD_INFO << 2061 l32i a5, a3, TASK_THREAD_INFO << 2062 << 2063 save_xtregs_user a4 a6 a8 a9 a12 a13 << 2064 << 2065 #if THREAD_RA > 1020 || THREAD_SP > 1020 << 2066 addi a10, a2, TASK_THREAD << 2067 s32i a0, a10, THREAD_RA - TASK_THR << 2068 s32i a1, a10, THREAD_SP - TASK_THR << 2069 #else << 2070 s32i a0, a2, THREAD_RA # sav << 2071 s32i a1, a2, THREAD_SP # sav << 2072 #endif << 2073 << 2074 #if defined(CONFIG_STACKPROTECTOR) && !define << 2075 movi a6, __stack_chk_guard << 2076 l32i a8, a3, TASK_STACK_CANARY << 2077 s32i a8, a6, 0 << 2078 #endif << 2079 << 2080 /* Disable ints while we manipulate t << 2081 << 2082 irq_save a14, a3 << 2083 rsync << 2084 << 2085 /* Switch CPENABLE */ << 2086 << 2087 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_ << 2088 l32i a3, a5, THREAD_CPENABLE << 2089 #ifdef CONFIG_SMP << 2090 beqz a3, 1f << 2091 memw # pairs with << 2092 l32i a6, a5, THREAD_CP_OWNER_CPU << 2093 l32i a7, a5, THREAD_CPU << 2094 beq a6, a7, 1f # load 0 into << 2095 movi a3, 0 << 2096 1: 250 1: 2097 #endif !! 251 moveq #(~ALLOWINT>>8)&0xff,%d0 2098 wsr a3, cpenable !! 252 andb %sp@(PT_SR),%d0 2099 #endif !! 253 jne 2b 2100 !! 254 2101 #if XCHAL_HAVE_EXCLUSIVE !! 255 /* check if we need to do software interrupts */ 2102 l32i a3, a5, THREAD_ATOMCTL8 !! 256 tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING 2103 getex a3 !! 257 jeq ret_from_exception 2104 s32i a3, a4, THREAD_ATOMCTL8 !! 258 pea ret_from_exception 2105 #endif !! 259 jra do_softirq 2106 !! 260 2107 /* Flush register file. */ !! 261 2108 !! 262 /* Handler for uninitialized and spurious interrupts */ 2109 #if defined(__XTENSA_WINDOWED_ABI__) !! 263 2110 spill_registers_kernel !! 264 bad_interrupt: 2111 #endif !! 265 addql #1,num_spurious 2112 !! 266 rte 2113 /* Set kernel stack (and leave critic !! 267 2114 * Note: It's save to set it here. Th !! 268 ENTRY(sys_fork) 2115 * because the kernel stack wil !! 269 SAVE_SWITCH_STACK 2116 * we return from kernel space. !! 270 pea %sp@(SWITCH_STACK_SIZE) 2117 */ !! 271 jbsr m68k_fork 2118 !! 272 addql #4,%sp 2119 rsr a3, excsave1 # exc !! 273 RESTORE_SWITCH_STACK 2120 addi a7, a5, PT_REGS_OFFSET !! 274 rts 2121 s32i a7, a3, EXC_TABLE_KSTK !! 275 2122 !! 276 ENTRY(sys_clone) 2123 /* restore context of the task 'next' !! 277 SAVE_SWITCH_STACK 2124 !! 278 pea %sp@(SWITCH_STACK_SIZE) 2125 l32i a0, a11, THREAD_RA # res !! 279 jbsr m68k_clone 2126 l32i a1, a11, THREAD_SP # res !! 280 addql #4,%sp 2127 !! 281 RESTORE_SWITCH_STACK 2128 load_xtregs_user a5 a6 a8 a9 a12 a13 !! 282 rts 2129 !! 283 2130 wsr a14, ps !! 284 ENTRY(sys_vfork) 2131 rsync !! 285 SAVE_SWITCH_STACK 2132 !! 286 pea %sp@(SWITCH_STACK_SIZE) 2133 #if defined(__XTENSA_WINDOWED_ABI__) !! 287 jbsr m68k_vfork 2134 abi_ret(XTENSA_SPILL_STACK_RESERVE) !! 288 addql #4,%sp 2135 #elif defined(__XTENSA_CALL0_ABI__) !! 289 RESTORE_SWITCH_STACK 2136 l32i a12, sp, 0 !! 290 rts 2137 l32i a13, sp, 4 !! 291 2138 l32i a14, sp, 8 !! 292 ENTRY(sys_sigsuspend) 2139 l32i a15, sp, 12 !! 293 SAVE_SWITCH_STACK 2140 abi_ret(16) !! 294 pea %sp@(SWITCH_STACK_SIZE) 2141 #else !! 295 jbsr do_sigsuspend 2142 #error Unsupported Xtensa ABI !! 296 addql #4,%sp 2143 #endif !! 297 RESTORE_SWITCH_STACK 2144 !! 298 rts 2145 ENDPROC(_switch_to) !! 299 2146 !! 300 ENTRY(sys_rt_sigsuspend) 2147 ENTRY(ret_from_fork) !! 301 SAVE_SWITCH_STACK 2148 !! 302 pea %sp@(SWITCH_STACK_SIZE) 2149 /* void schedule_tail (struct task_st !! 303 jbsr do_rt_sigsuspend 2150 * Note: prev is still in abi_arg0 (r !! 304 addql #4,%sp 2151 */ !! 305 RESTORE_SWITCH_STACK 2152 abi_call schedule_tail !! 306 rts 2153 !! 307 2154 mov abi_arg0, a1 !! 308 ENTRY(sys_sigreturn) 2155 abi_call do_syscall_trace_leav !! 309 SAVE_SWITCH_STACK 2156 j common_exception_retu !! 310 jbsr do_sigreturn 2157 !! 311 RESTORE_SWITCH_STACK 2158 ENDPROC(ret_from_fork) !! 312 rts 2159 !! 313 2160 /* !! 314 ENTRY(sys_rt_sigreturn) 2161 * Kernel thread creation helper !! 315 SAVE_SWITCH_STACK 2162 * On entry, set up by copy_thread: abi_saved !! 316 jbsr do_rt_sigreturn 2163 * abi_saved1 = thread_fn arg. Left from _swi !! 317 RESTORE_SWITCH_STACK 2164 */ !! 318 rts 2165 ENTRY(ret_from_kernel_thread) << 2166 << 2167 abi_call schedule_tail << 2168 mov abi_arg0, abi_saved1 << 2169 abi_callx abi_saved0 << 2170 j common_exception_retu << 2171 << 2172 ENDPROC(ret_from_kernel_thread) << 2173 << 2174 #ifdef CONFIG_HIBERNATION << 2175 << 2176 .section .bss, "aw" << 2177 .align 4 << 2178 .Lsaved_regs: << 2179 #if defined(__XTENSA_WINDOWED_ABI__) << 2180 .fill 2, 4 << 2181 #elif defined(__XTENSA_CALL0_ABI__) << 2182 .fill 6, 4 << 2183 #else << 2184 #error Unsupported Xtensa ABI << 2185 #endif << 2186 .align XCHAL_NCP_SA_ALIGN << 2187 .Lsaved_user_regs: << 2188 .fill XTREGS_USER_SIZE, 1 << 2189 << 2190 .previous << 2191 << 2192 ENTRY(swsusp_arch_suspend) << 2193 << 2194 abi_entry_default << 2195 << 2196 movi a2, .Lsaved_regs << 2197 movi a3, .Lsaved_user_regs << 2198 s32i a0, a2, 0 << 2199 s32i a1, a2, 4 << 2200 save_xtregs_user a3 a4 a5 a6 a7 a8 0 << 2201 #if defined(__XTENSA_WINDOWED_ABI__) << 2202 spill_registers_kernel << 2203 #elif defined(__XTENSA_CALL0_ABI__) << 2204 s32i a12, a2, 8 << 2205 s32i a13, a2, 12 << 2206 s32i a14, a2, 16 << 2207 s32i a15, a2, 20 << 2208 #else << 2209 #error Unsupported Xtensa ABI << 2210 #endif << 2211 abi_call swsusp_save << 2212 mov a2, abi_rv << 2213 abi_ret_default << 2214 << 2215 ENDPROC(swsusp_arch_suspend) << 2216 << 2217 ENTRY(swsusp_arch_resume) << 2218 << 2219 abi_entry_default << 2220 319 2221 #if defined(__XTENSA_WINDOWED_ABI__) !! 320 resume: 2222 spill_registers_kernel !! 321 /* 2223 #endif !! 322 * Beware - when entering resume, prev (the current task) is 2224 !! 323 * in a0, next (the new task) is in a1,so don't change these 2225 movi a2, restore_pblist !! 324 * registers until their contents are no longer needed. 2226 l32i a2, a2, 0 !! 325 */ 2227 !! 326 2228 .Lcopy_pbe: !! 327 /* save sr */ 2229 l32i a3, a2, PBE_ADDRESS !! 328 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 2230 l32i a4, a2, PBE_ORIG_ADDR !! 329 2231 !! 330 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 2232 __loopi a3, a9, PAGE_SIZE, 16 !! 331 movec %sfc,%d0 2233 l32i a5, a3, 0 !! 332 movew %d0,%a0@(TASK_THREAD+THREAD_FS) 2234 l32i a6, a3, 4 !! 333 2235 l32i a7, a3, 8 !! 334 /* save usp */ 2236 l32i a8, a3, 12 !! 335 /* it is better to use a movel here instead of a movew 8*) */ 2237 addi a3, a3, 16 !! 336 movec %usp,%d0 2238 s32i a5, a4, 0 !! 337 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 2239 s32i a6, a4, 4 !! 338 2240 s32i a7, a4, 8 !! 339 /* save non-scratch registers on stack */ 2241 s32i a8, a4, 12 !! 340 SAVE_SWITCH_STACK 2242 addi a4, a4, 16 !! 341 2243 __endl a3, a9 !! 342 /* save current kernel stack pointer */ 2244 !! 343 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 2245 l32i a2, a2, PBE_NEXT !! 344 2246 bnez a2, .Lcopy_pbe !! 345 /* save floating point context */ 2247 !! 346 #ifndef CONFIG_M68KFPU_EMU_ONLY 2248 movi a2, .Lsaved_regs !! 347 #ifdef CONFIG_M68KFPU_EMU 2249 movi a3, .Lsaved_user_regs !! 348 tstl m68k_fputype 2250 l32i a0, a2, 0 !! 349 jeq 3f 2251 l32i a1, a2, 4 !! 350 #endif 2252 load_xtregs_user a3 a4 a5 a6 a7 a8 0 !! 351 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 2253 #if defined(__XTENSA_CALL0_ABI__) !! 352 2254 l32i a12, a2, 8 !! 353 #if defined(CONFIG_M68060) 2255 l32i a13, a2, 12 !! 354 #if !defined(CPU_M68060_ONLY) 2256 l32i a14, a2, 16 !! 355 btst #3,m68k_cputype+3 2257 l32i a15, a2, 20 !! 356 beqs 1f 2258 #endif !! 357 #endif 2259 movi a2, 0 !! 358 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 2260 abi_ret_default !! 359 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 2261 !! 360 jeq 3f 2262 ENDPROC(swsusp_arch_resume) !! 361 #if !defined(CPU_M68060_ONLY) 2263 !! 362 jra 2f 2264 #endif !! 363 #endif >> 364 #endif /* CONFIG_M68060 */ >> 365 #if !defined(CPU_M68060_ONLY) >> 366 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) >> 367 jeq 3f >> 368 #endif >> 369 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) >> 370 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) >> 371 3: >> 372 #endif /* CONFIG_M68KFPU_EMU_ONLY */ >> 373 /* Return previous task in %d1 */ >> 374 movel %curptr,%d1 >> 375 >> 376 /* switch to new task (a1 contains new task) */ >> 377 movel %a1,%curptr >> 378 >> 379 /* restore floating point context */ >> 380 #ifndef CONFIG_M68KFPU_EMU_ONLY >> 381 #ifdef CONFIG_M68KFPU_EMU >> 382 tstl m68k_fputype >> 383 jeq 4f >> 384 #endif >> 385 #if defined(CONFIG_M68060) >> 386 #if !defined(CPU_M68060_ONLY) >> 387 btst #3,m68k_cputype+3 >> 388 beqs 1f >> 389 #endif >> 390 /* The 060 FPU keeps status in bits 15-8 of the first longword */ >> 391 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) >> 392 jeq 3f >> 393 #if !defined(CPU_M68060_ONLY) >> 394 jra 2f >> 395 #endif >> 396 #endif /* CONFIG_M68060 */ >> 397 #if !defined(CPU_M68060_ONLY) >> 398 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) >> 399 jeq 3f >> 400 #endif >> 401 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 >> 402 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar >> 403 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) >> 404 4: >> 405 #endif /* CONFIG_M68KFPU_EMU_ONLY */ >> 406 >> 407 /* restore the kernel stack pointer */ >> 408 movel %a1@(TASK_THREAD+THREAD_KSP),%sp >> 409 >> 410 /* restore non-scratch registers */ >> 411 RESTORE_SWITCH_STACK >> 412 >> 413 /* restore user stack pointer */ >> 414 movel %a1@(TASK_THREAD+THREAD_USP),%a0 >> 415 movel %a0,%usp >> 416 >> 417 /* restore fs (sfc,%dfc) */ >> 418 movew %a1@(TASK_THREAD+THREAD_FS),%a0 >> 419 movec %a0,%sfc >> 420 movec %a0,%dfc >> 421 >> 422 /* restore status register */ >> 423 movew %a1@(TASK_THREAD+THREAD_SR),%sr >> 424 >> 425 rts >> 426 >> 427 .data >> 428 ALIGN >> 429 sys_call_table: >> 430 .long sys_ni_syscall /* 0 - old "setup()" system call*/ >> 431 .long sys_exit >> 432 .long sys_fork >> 433 .long sys_read >> 434 .long sys_write >> 435 .long sys_open /* 5 */ >> 436 .long sys_close >> 437 .long sys_waitpid >> 438 .long sys_creat >> 439 .long sys_link >> 440 .long sys_unlink /* 10 */ >> 441 .long sys_execve >> 442 .long sys_chdir >> 443 .long sys_time >> 444 .long sys_mknod >> 445 .long sys_chmod /* 15 */ >> 446 .long sys_chown16 >> 447 .long sys_ni_syscall /* old break syscall holder */ >> 448 .long sys_stat >> 449 .long sys_lseek >> 450 .long sys_getpid /* 20 */ >> 451 .long sys_mount >> 452 .long sys_oldumount >> 453 .long sys_setuid16 >> 454 .long sys_getuid16 >> 455 .long sys_stime /* 25 */ >> 456 .long sys_ptrace >> 457 .long sys_alarm >> 458 .long sys_fstat >> 459 .long sys_pause >> 460 .long sys_utime /* 30 */ >> 461 .long sys_ni_syscall /* old stty syscall holder */ >> 462 .long sys_ni_syscall /* old gtty syscall holder */ >> 463 .long sys_access >> 464 .long sys_nice >> 465 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ >> 466 .long sys_sync >> 467 .long sys_kill >> 468 .long sys_rename >> 469 .long sys_mkdir >> 470 .long sys_rmdir /* 40 */ >> 471 .long sys_dup >> 472 .long sys_pipe >> 473 .long sys_times >> 474 .long sys_ni_syscall /* old prof syscall holder */ >> 475 .long sys_brk /* 45 */ >> 476 .long sys_setgid16 >> 477 .long sys_getgid16 >> 478 .long sys_signal >> 479 .long sys_geteuid16 >> 480 .long sys_getegid16 /* 50 */ >> 481 .long sys_acct >> 482 .long sys_umount /* recycled never used phys() */ >> 483 .long sys_ni_syscall /* old lock syscall holder */ >> 484 .long sys_ioctl >> 485 .long sys_fcntl /* 55 */ >> 486 .long sys_ni_syscall /* old mpx syscall holder */ >> 487 .long sys_setpgid >> 488 .long sys_ni_syscall /* old ulimit syscall holder */ >> 489 .long sys_ni_syscall >> 490 .long sys_umask /* 60 */ >> 491 .long sys_chroot >> 492 .long sys_ustat >> 493 .long sys_dup2 >> 494 .long sys_getppid >> 495 .long sys_getpgrp /* 65 */ >> 496 .long sys_setsid >> 497 .long sys_sigaction >> 498 .long sys_sgetmask >> 499 .long sys_ssetmask >> 500 .long sys_setreuid16 /* 70 */ >> 501 .long sys_setregid16 >> 502 .long sys_sigsuspend >> 503 .long sys_sigpending >> 504 .long sys_sethostname >> 505 .long sys_setrlimit /* 75 */ >> 506 .long sys_old_getrlimit >> 507 .long sys_getrusage >> 508 .long sys_gettimeofday >> 509 .long sys_settimeofday >> 510 .long sys_getgroups16 /* 80 */ >> 511 .long sys_setgroups16 >> 512 .long old_select >> 513 .long sys_symlink >> 514 .long sys_lstat >> 515 .long sys_readlink /* 85 */ >> 516 .long sys_uselib >> 517 .long sys_swapon >> 518 .long sys_reboot >> 519 .long old_readdir >> 520 .long old_mmap /* 90 */ >> 521 .long sys_munmap >> 522 .long sys_truncate >> 523 .long sys_ftruncate >> 524 .long sys_fchmod >> 525 .long sys_fchown16 /* 95 */ >> 526 .long sys_getpriority >> 527 .long sys_setpriority >> 528 .long sys_ni_syscall /* old profil syscall holder */ >> 529 .long sys_statfs >> 530 .long sys_fstatfs /* 100 */ >> 531 .long sys_ioperm >> 532 .long sys_socketcall >> 533 .long sys_syslog >> 534 .long sys_setitimer >> 535 .long sys_getitimer /* 105 */ >> 536 .long sys_newstat >> 537 .long sys_newlstat >> 538 .long sys_newfstat >> 539 .long sys_ni_syscall >> 540 .long sys_ni_syscall /* 110 */ /* iopl for i386 */ >> 541 .long sys_vhangup >> 542 .long sys_ni_syscall /* obsolete idle() syscall */ >> 543 .long sys_ni_syscall /* vm86old for i386 */ >> 544 .long sys_wait4 >> 545 .long sys_swapoff /* 115 */ >> 546 .long sys_sysinfo >> 547 .long sys_ipc >> 548 .long sys_fsync >> 549 .long sys_sigreturn >> 550 .long sys_clone /* 120 */ >> 551 .long sys_setdomainname >> 552 .long sys_newuname >> 553 .long sys_cacheflush /* modify_ldt for i386 */ >> 554 .long sys_adjtimex >> 555 .long sys_mprotect /* 125 */ >> 556 .long sys_sigprocmask >> 557 .long sys_ni_syscall /* old "create_module" */ >> 558 .long sys_init_module >> 559 .long sys_delete_module >> 560 .long sys_ni_syscall /* 130 - old "get_kernel_syms" */ >> 561 .long sys_quotactl >> 562 .long sys_getpgid >> 563 .long sys_fchdir >> 564 .long sys_bdflush >> 565 .long sys_sysfs /* 135 */ >> 566 .long sys_personality >> 567 .long sys_ni_syscall /* for afs_syscall */ >> 568 .long sys_setfsuid16 >> 569 .long sys_setfsgid16 >> 570 .long sys_llseek /* 140 */ >> 571 .long sys_getdents >> 572 .long sys_select >> 573 .long sys_flock >> 574 .long sys_msync >> 575 .long sys_readv /* 145 */ >> 576 .long sys_writev >> 577 .long sys_getsid >> 578 .long sys_fdatasync >> 579 .long sys_sysctl >> 580 .long sys_mlock /* 150 */ >> 581 .long sys_munlock >> 582 .long sys_mlockall >> 583 .long sys_munlockall >> 584 .long sys_sched_setparam >> 585 .long sys_sched_getparam /* 155 */ >> 586 .long sys_sched_setscheduler >> 587 .long sys_sched_getscheduler >> 588 .long sys_sched_yield >> 589 .long sys_sched_get_priority_max >> 590 .long sys_sched_get_priority_min /* 160 */ >> 591 .long sys_sched_rr_get_interval >> 592 .long sys_nanosleep >> 593 .long sys_mremap >> 594 .long sys_setresuid16 >> 595 .long sys_getresuid16 /* 165 */ >> 596 .long sys_getpagesize >> 597 .long sys_ni_syscall /* old sys_query_module */ >> 598 .long sys_poll >> 599 .long sys_nfsservctl >> 600 .long sys_setresgid16 /* 170 */ >> 601 .long sys_getresgid16 >> 602 .long sys_prctl >> 603 .long sys_rt_sigreturn >> 604 .long sys_rt_sigaction >> 605 .long sys_rt_sigprocmask /* 175 */ >> 606 .long sys_rt_sigpending >> 607 .long sys_rt_sigtimedwait >> 608 .long sys_rt_sigqueueinfo >> 609 .long sys_rt_sigsuspend >> 610 .long sys_pread64 /* 180 */ >> 611 .long sys_pwrite64 >> 612 .long sys_lchown16; >> 613 .long sys_getcwd >> 614 .long sys_capget >> 615 .long sys_capset /* 185 */ >> 616 .long sys_sigaltstack >> 617 .long sys_sendfile >> 618 .long sys_ni_syscall /* streams1 */ >> 619 .long sys_ni_syscall /* streams2 */ >> 620 .long sys_vfork /* 190 */ >> 621 .long sys_getrlimit >> 622 .long sys_mmap2 >> 623 .long sys_truncate64 >> 624 .long sys_ftruncate64 >> 625 .long sys_stat64 /* 195 */ >> 626 .long sys_lstat64 >> 627 .long sys_fstat64 >> 628 .long sys_chown >> 629 .long sys_getuid >> 630 .long sys_getgid /* 200 */ >> 631 .long sys_geteuid >> 632 .long sys_getegid >> 633 .long sys_setreuid >> 634 .long sys_setregid >> 635 .long sys_getgroups /* 205 */ >> 636 .long sys_setgroups >> 637 .long sys_fchown >> 638 .long sys_setresuid >> 639 .long sys_getresuid >> 640 .long sys_setresgid /* 210 */ >> 641 .long sys_getresgid >> 642 .long sys_lchown >> 643 .long sys_setuid >> 644 .long sys_setgid >> 645 .long sys_setfsuid /* 215 */ >> 646 .long sys_setfsgid >> 647 .long sys_pivot_root >> 648 .long sys_ni_syscall >> 649 .long sys_ni_syscall >> 650 .long sys_getdents64 /* 220 */ >> 651 .long sys_gettid >> 652 .long sys_tkill >> 653 .long sys_setxattr >> 654 .long sys_lsetxattr >> 655 .long sys_fsetxattr /* 225 */ >> 656 .long sys_getxattr >> 657 .long sys_lgetxattr >> 658 .long sys_fgetxattr >> 659 .long sys_listxattr >> 660 .long sys_llistxattr /* 230 */ >> 661 .long sys_flistxattr >> 662 .long sys_removexattr >> 663 .long sys_lremovexattr >> 664 .long sys_fremovexattr >> 665 .long sys_futex /* 235 */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.