1 /* SPDX-License-Identifier: GPL-2.0-only */ !! 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * Low-level exception handling code !! 3 * arch/alpha/kernel/entry.S 4 * 4 * 5 * Copyright (C) 2012 ARM Ltd. !! 5 * Kernel entry-points. 6 * Authors: Catalin Marinas <catalin.marina << 7 * Will Deacon <will.deacon@arm.co << 8 */ 6 */ 9 7 10 #include <linux/arm-smccc.h> << 11 #include <linux/init.h> << 12 #include <linux/linkage.h> << 13 << 14 #include <asm/alternative.h> << 15 #include <asm/assembler.h> << 16 #include <asm/asm-offsets.h> 8 #include <asm/asm-offsets.h> 17 #include <asm/asm_pointer_auth.h> << 18 #include <asm/bug.h> << 19 #include <asm/cpufeature.h> << 20 #include <asm/errno.h> << 21 #include <asm/esr.h> << 22 #include <asm/irq.h> << 23 #include <asm/memory.h> << 24 #include <asm/mmu.h> << 25 #include <asm/processor.h> << 26 #include <asm/ptrace.h> << 27 #include <asm/scs.h> << 28 #include <asm/thread_info.h> 9 #include <asm/thread_info.h> 29 #include <asm/asm-uaccess.h> !! 10 #include <asm/pal.h> >> 11 #include <asm/errno.h> 30 #include <asm/unistd.h> 12 #include <asm/unistd.h> 31 13 32 .macro clear_gp_regs << 33 .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12 << 34 mov x\n, xzr << 35 .endr << 36 .endm << 37 << 38 .macro kernel_ventry, el:req, ht:req, << 39 .align 7 << 40 .Lventry_start\@: << 41 .if \el == 0 << 42 /* << 43 * This must be the first instruction << 44 * skipped by the trampoline vectors, << 45 */ << 46 b .Lskip_tramp_vectors_cleanup\@ << 47 .if \regsize == 64 << 48 mrs x30, tpidrro_el0 << 49 msr tpidrro_el0, xzr << 50 .else << 51 mov x30, xzr << 52 .endif << 53 .Lskip_tramp_vectors_cleanup\@: << 54 .endif << 55 << 56 sub sp, sp, #PT_REGS_SIZE << 57 #ifdef CONFIG_VMAP_STACK << 58 /* << 59 * Test whether the SP has overflowed, << 60 * Task and IRQ stacks are aligned so << 61 * should always be zero. << 62 */ << 63 add sp, sp, x0 << 64 sub x0, sp, x0 << 65 tbnz x0, #THREAD_SHIFT, 0f << 66 sub x0, sp, x0 << 67 sub sp, sp, x0 << 68 b el\el\ht\()_\regsize\()_\label << 69 << 70 0: << 71 /* << 72 * Either we've just detected an overf << 73 * while on the overflow stack. Either << 74 * userspace, and can clobber EL0 regi << 75 */ << 76 << 77 /* Stash the original SP (minus PT_REG << 78 msr tpidr_el0, x0 << 79 << 80 /* Recover the original x0 value and s << 81 sub x0, sp, x0 << 82 msr tpidrro_el0, x0 << 83 << 84 /* Switch to the overflow stack */ << 85 adr_this_cpu sp, overflow_stack + OVER << 86 << 87 /* << 88 * Check whether we were already on th << 89 * after panic() re-enables interrupts << 90 */ << 91 mrs x0, tpidr_el0 << 92 sub x0, sp, x0 << 93 tst x0, #~(OVERFLOW_STACK_SIZE - 1 << 94 b.ne __bad_stack << 95 << 96 /* We were already on the overflow sta << 97 sub sp, sp, x0 << 98 mrs x0, tpidrro_el0 << 99 #endif << 100 b el\el\ht\()_\regsize\()_\label << 101 .org .Lventry_start\@ + 128 // Did we over << 102 .endm << 103 << 104 .macro tramp_alias, dst, sym << 105 .set .Lalias\@, TRAMP_VALIAS + \sym << 106 movz \dst, :abs_g2_s:.Lalias\@ << 107 movk \dst, :abs_g1_nc:.Lalias\@ << 108 movk \dst, :abs_g0_nc:.Lalias\@ << 109 .endm << 110 << 111 /* << 112 * This macro corrupts x0-x3. It is th << 113 * them if required. << 114 */ << 115 .macro apply_ssbd, state, tmp1, tmp2 << 116 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v << 117 b .L__asm_ssbd_skip\@ << 118 alternative_cb_end << 119 ldr_this_cpu \tmp2, arm64_ssbd_call << 120 cbz \tmp2, .L__asm_ssbd_skip\@ << 121 ldr \tmp2, [tsk, #TSK_TI_FLAGS] << 122 tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd << 123 mov w0, #ARM_SMCCC_ARCH_WORKAROUND << 124 mov w1, #\state << 125 alternative_cb ARM64_ALWAYS_SYSTEM, smccc_pat << 126 nop << 127 alternative_cb_end << 128 .L__asm_ssbd_skip\@: << 129 .endm << 130 << 131 /* Check for MTE asynchronous tag chec << 132 .macro check_mte_async_tcf, tmp, ti_fl << 133 #ifdef CONFIG_ARM64_MTE << 134 .arch_extension lse << 135 alternative_if_not ARM64_MTE << 136 b 1f << 137 alternative_else_nop_endif << 138 /* << 139 * Asynchronous tag check faults are o << 140 * ASYM (3) modes. In each of these mo << 141 * set, so skip the check if it is uns << 142 */ << 143 tbz \thread_sctlr, #(SCTLR_EL1_TCF << 144 mrs_s \tmp, SYS_TFSRE0_EL1 << 145 tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, << 146 /* Asynchronous TCF occurred for TTBR0 << 147 mov \tmp, #_TIF_MTE_ASYNC_FAULT << 148 add \ti_flags, tsk, #TSK_TI_FLAGS << 149 stset \tmp, [\ti_flags] << 150 1: << 151 #endif << 152 .endm << 153 << 154 /* Clear the MTE asynchronous tag chec << 155 .macro clear_mte_async_tcf thread_sctl << 156 #ifdef CONFIG_ARM64_MTE << 157 alternative_if ARM64_MTE << 158 /* See comment in check_mte_async_tcf << 159 tbz \thread_sctlr, #(SCTLR_EL1_TCF << 160 dsb ish << 161 msr_s SYS_TFSRE0_EL1, xzr << 162 1: << 163 alternative_else_nop_endif << 164 #endif << 165 .endm << 166 << 167 .macro mte_set_gcr, mte_ctrl, tmp << 168 #ifdef CONFIG_ARM64_MTE << 169 ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR << 170 orr \tmp, \tmp, #SYS_GCR_EL1_RRND << 171 msr_s SYS_GCR_EL1, \tmp << 172 #endif << 173 .endm << 174 << 175 .macro mte_set_kernel_gcr, tmp, tmp2 << 176 #ifdef CONFIG_KASAN_HW_TAGS << 177 alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_ << 178 b 1f << 179 alternative_cb_end << 180 mov \tmp, KERNEL_GCR_EL1 << 181 msr_s SYS_GCR_EL1, \tmp << 182 1: << 183 #endif << 184 .endm << 185 << 186 .macro mte_set_user_gcr, tsk, tmp, tmp << 187 #ifdef CONFIG_KASAN_HW_TAGS << 188 alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_ << 189 b 1f << 190 alternative_cb_end << 191 ldr \tmp, [\tsk, #THREAD_MTE_CTRL] << 192 << 193 mte_set_gcr \tmp, \tmp2 << 194 1: << 195 #endif << 196 .endm << 197 << 198 .macro kernel_entry, el, regsize = 64 << 199 .if \el == 0 << 200 alternative_insn nop, SET_PSTATE_DIT(1 << 201 .endif << 202 .if \regsize == 32 << 203 mov w0, w0 << 204 .endif << 205 stp x0, x1, [sp, #16 * 0] << 206 stp x2, x3, [sp, #16 * 1] << 207 stp x4, x5, [sp, #16 * 2] << 208 stp x6, x7, [sp, #16 * 3] << 209 stp x8, x9, [sp, #16 * 4] << 210 stp x10, x11, [sp, #16 * 5] << 211 stp x12, x13, [sp, #16 * 6] << 212 stp x14, x15, [sp, #16 * 7] << 213 stp x16, x17, [sp, #16 * 8] << 214 stp x18, x19, [sp, #16 * 9] << 215 stp x20, x21, [sp, #16 * 10] << 216 stp x22, x23, [sp, #16 * 11] << 217 stp x24, x25, [sp, #16 * 12] << 218 stp x26, x27, [sp, #16 * 13] << 219 stp x28, x29, [sp, #16 * 14] << 220 << 221 .if \el == 0 << 222 clear_gp_regs << 223 mrs x21, sp_el0 << 224 ldr_this_cpu tsk, __entry_task, x20 << 225 msr sp_el0, tsk << 226 << 227 /* << 228 * Ensure MDSCR_EL1.SS is clear, since << 229 * when scheduling. << 230 */ << 231 ldr x19, [tsk, #TSK_TI_FLAGS] << 232 disable_step_tsk x19, x20 << 233 << 234 /* Check for asynchronous tag check fa << 235 ldr x0, [tsk, THREAD_SCTLR_USER] << 236 check_mte_async_tcf x22, x23, x0 << 237 << 238 #ifdef CONFIG_ARM64_PTR_AUTH << 239 alternative_if ARM64_HAS_ADDRESS_AUTH << 240 /* << 241 * Enable IA for in-kernel PAC if the << 242 * this could be implemented with an u << 243 * a load, this was measured to be slo << 244 * << 245 * Install the kernel IA key only if I << 246 * was disabled on kernel exit then we << 247 * installed so there is no need to in << 248 */ << 249 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f << 250 __ptrauth_keys_install_kernel_nosync t << 251 b 2f << 252 1: << 253 mrs x0, sctlr_el1 << 254 orr x0, x0, SCTLR_ELx_ENIA << 255 msr sctlr_el1, x0 << 256 2: << 257 alternative_else_nop_endif << 258 #endif << 259 << 260 apply_ssbd 1, x22, x23 << 261 << 262 mte_set_kernel_gcr x22, x23 << 263 << 264 /* << 265 * Any non-self-synchronizing system r << 266 * kernel entry should be placed befor << 267 */ << 268 alternative_if ARM64_MTE << 269 isb << 270 b 1f << 271 alternative_else_nop_endif << 272 alternative_if ARM64_HAS_ADDRESS_AUTH << 273 isb << 274 alternative_else_nop_endif << 275 1: << 276 << 277 scs_load_current << 278 .else << 279 add x21, sp, #PT_REGS_SIZE << 280 get_current_task tsk << 281 .endif /* \el == 0 */ << 282 mrs x22, elr_el1 << 283 mrs x23, spsr_el1 << 284 stp lr, x21, [sp, #S_LR] << 285 << 286 /* << 287 * For exceptions from EL0, create a f << 288 * For exceptions from EL1, create a s << 289 * interrupted code shows up in the ba << 290 */ << 291 .if \el == 0 << 292 stp xzr, xzr, [sp, #S_STACKFRAME] << 293 .else << 294 stp x29, x22, [sp, #S_STACKFRAME] << 295 .endif << 296 add x29, sp, #S_STACKFRAME << 297 << 298 #ifdef CONFIG_ARM64_SW_TTBR0_PAN << 299 alternative_if_not ARM64_HAS_PAN << 300 bl __swpan_entry_el\el << 301 alternative_else_nop_endif << 302 #endif << 303 << 304 stp x22, x23, [sp, #S_PC] << 305 << 306 /* Not in a syscall by default (el0_sv << 307 .if \el == 0 << 308 mov w21, #NO_SYSCALL << 309 str w21, [sp, #S_SYSCALLNO] << 310 .endif << 311 << 312 #ifdef CONFIG_ARM64_PSEUDO_NMI << 313 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING << 314 b .Lskip_pmr_save\@ << 315 alternative_else_nop_endif << 316 << 317 mrs_s x20, SYS_ICC_PMR_EL1 << 318 str x20, [sp, #S_PMR_SAVE] << 319 mov x20, #GIC_PRIO_IRQON | GIC_PRI << 320 msr_s SYS_ICC_PMR_EL1, x20 << 321 << 322 .Lskip_pmr_save\@: << 323 #endif << 324 << 325 /* << 326 * Registers that may be useful after << 327 * << 328 * x20 - ICC_PMR_EL1 << 329 * x21 - aborted SP << 330 * x22 - aborted PC << 331 * x23 - aborted PSTATE << 332 */ << 333 .endm << 334 << 335 .macro kernel_exit, el << 336 .if \el != 0 << 337 disable_daif << 338 .endif << 339 << 340 #ifdef CONFIG_ARM64_PSEUDO_NMI << 341 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING << 342 b .Lskip_pmr_restore\@ << 343 alternative_else_nop_endif << 344 << 345 ldr x20, [sp, #S_PMR_SAVE] << 346 msr_s SYS_ICC_PMR_EL1, x20 << 347 << 348 /* Ensure priority change is seen by r << 349 alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_ << 350 dsb sy << 351 alternative_else_nop_endif << 352 << 353 .Lskip_pmr_restore\@: << 354 #endif << 355 << 356 ldp x21, x22, [sp, #S_PC] << 357 << 358 #ifdef CONFIG_ARM64_SW_TTBR0_PAN << 359 alternative_if_not ARM64_HAS_PAN << 360 bl __swpan_exit_el\el << 361 alternative_else_nop_endif << 362 #endif << 363 << 364 .if \el == 0 << 365 ldr x23, [sp, #S_SP] << 366 msr sp_el0, x23 << 367 tst x22, #PSR_MODE32_BIT << 368 b.eq 3f << 369 << 370 #ifdef CONFIG_ARM64_ERRATUM_845719 << 371 alternative_if ARM64_WORKAROUND_845719 << 372 #ifdef CONFIG_PID_IN_CONTEXTIDR << 373 mrs x29, contextidr_el1 << 374 msr contextidr_el1, x29 << 375 #else << 376 msr contextidr_el1, xzr << 377 #endif << 378 alternative_else_nop_endif << 379 #endif << 380 3: << 381 scs_save tsk << 382 << 383 /* Ignore asynchronous tag check fault << 384 ldr x0, [tsk, THREAD_SCTLR_USER] << 385 clear_mte_async_tcf x0 << 386 << 387 #ifdef CONFIG_ARM64_PTR_AUTH << 388 alternative_if ARM64_HAS_ADDRESS_AUTH << 389 /* << 390 * IA was enabled for in-kernel PAC. D << 391 * alternatively install the user's IA << 392 * SCTLR bits were updated on task swi << 393 * << 394 * No kernel C function calls after th << 395 */ << 396 tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f << 397 __ptrauth_keys_install_user tsk, x0, x << 398 b 2f << 399 1: << 400 mrs x0, sctlr_el1 << 401 bic x0, x0, SCTLR_ELx_ENIA << 402 msr sctlr_el1, x0 << 403 2: << 404 alternative_else_nop_endif << 405 #endif << 406 << 407 mte_set_user_gcr tsk, x0, x1 << 408 << 409 apply_ssbd 0, x0, x1 << 410 .endif << 411 << 412 msr elr_el1, x21 << 413 msr spsr_el1, x22 << 414 ldp x0, x1, [sp, #16 * 0] << 415 ldp x2, x3, [sp, #16 * 1] << 416 ldp x4, x5, [sp, #16 * 2] << 417 ldp x6, x7, [sp, #16 * 3] << 418 ldp x8, x9, [sp, #16 * 4] << 419 ldp x10, x11, [sp, #16 * 5] << 420 ldp x12, x13, [sp, #16 * 6] << 421 ldp x14, x15, [sp, #16 * 7] << 422 ldp x16, x17, [sp, #16 * 8] << 423 ldp x18, x19, [sp, #16 * 9] << 424 ldp x20, x21, [sp, #16 * 10] << 425 ldp x22, x23, [sp, #16 * 11] << 426 ldp x24, x25, [sp, #16 * 12] << 427 ldp x26, x27, [sp, #16 * 13] << 428 ldp x28, x29, [sp, #16 * 14] << 429 << 430 .if \el == 0 << 431 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 << 432 alternative_insn "b .L_skip_tramp_exit << 433 << 434 msr far_el1, x29 << 435 << 436 ldr_this_cpu x30, this_cpu_vector, << 437 tramp_alias x29, tramp_exit << 438 msr vbar_el1, x30 << 439 ldr lr, [sp, #S_LR] << 440 add sp, sp, #PT_REGS_SIZE << 441 br x29 << 442 << 443 .L_skip_tramp_exit_\@: << 444 #endif << 445 .endif << 446 << 447 ldr lr, [sp, #S_LR] << 448 add sp, sp, #PT_REGS_SIZE << 449 << 450 .if \el == 0 << 451 /* This must be after the last explici << 452 alternative_if ARM64_WORKAROUND_SPECULATIVE_UN << 453 tlbi vale1, xzr << 454 dsb nsh << 455 alternative_else_nop_endif << 456 .else << 457 /* Ensure any device/NC reads complete << 458 alternative_insn nop, "dmb sy", ARM64_ << 459 .endif << 460 << 461 eret << 462 sb << 463 .endm << 464 << 465 #ifdef CONFIG_ARM64_SW_TTBR0_PAN << 466 /* << 467 * Set the TTBR0 PAN bit in SPSR. When << 468 * EL0, there is no need to check the << 469 * accesses are always enabled. << 470 * Note that the meaning of this bit d << 471 * feature as all TTBR0_EL1 accesses a << 472 * user mappings. << 473 */ << 474 SYM_CODE_START_LOCAL(__swpan_entry_el1) << 475 mrs x21, ttbr0_el1 << 476 tst x21, #TTBR_ASID_MASK << 477 orr x23, x23, #PSR_PAN_BIT << 478 b.eq 1f << 479 and x23, x23, #~PSR_PAN_BIT << 480 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL << 481 __uaccess_ttbr0_disable x21 << 482 1: ret << 483 SYM_CODE_END(__swpan_entry_el1) << 484 << 485 /* << 486 * Restore access to TTBR0_EL1. If ret << 487 * PAN bit checking. << 488 */ << 489 SYM_CODE_START_LOCAL(__swpan_exit_el1) << 490 tbnz x22, #22, 1f << 491 __uaccess_ttbr0_enable x0, x1 << 492 1: and x22, x22, #~PSR_PAN_BIT << 493 ret << 494 SYM_CODE_END(__swpan_exit_el1) << 495 << 496 SYM_CODE_START_LOCAL(__swpan_exit_el0) << 497 __uaccess_ttbr0_enable x0, x1 << 498 /* << 499 * Enable errata workarounds only if r << 500 * workaround currently required for T << 501 * Cavium erratum 27456 (broadcast TLB << 502 * corruption). << 503 */ << 504 b post_ttbr_update_workaround << 505 SYM_CODE_END(__swpan_exit_el0) << 506 #endif << 507 << 508 /* GPRs used by entry code */ << 509 tsk .req x28 // current thr << 510 << 511 .text 14 .text >> 15 .set noat >> 16 .cfi_sections .debug_frame 512 17 513 /* !! 18 /* Stack offsets. */ 514 * Exception vectors. !! 19 #define SP_OFF 184 515 */ !! 20 #define SWITCH_STACK_SIZE 64 516 .pushsection ".entry.text", "ax" !! 21 517 !! 22 .macro CFI_START_OSF_FRAME func 518 .align 11 !! 23 .align 4 519 SYM_CODE_START(vectors) !! 24 .globl \func 520 kernel_ventry 1, t, 64, sync !! 25 .type \func,@function 521 kernel_ventry 1, t, 64, irq !! 26 \func: 522 kernel_ventry 1, t, 64, fiq !! 27 .cfi_startproc simple 523 kernel_ventry 1, t, 64, error !! 28 .cfi_return_column 64 524 !! 29 .cfi_def_cfa $sp, 48 525 kernel_ventry 1, h, 64, sync !! 30 .cfi_rel_offset 64, 8 526 kernel_ventry 1, h, 64, irq !! 31 .cfi_rel_offset $gp, 16 527 kernel_ventry 1, h, 64, fiq !! 32 .cfi_rel_offset $16, 24 528 kernel_ventry 1, h, 64, error !! 33 .cfi_rel_offset $17, 32 529 !! 34 .cfi_rel_offset $18, 40 530 kernel_ventry 0, t, 64, sync !! 35 .endm 531 kernel_ventry 0, t, 64, irq << 532 kernel_ventry 0, t, 64, fiq << 533 kernel_ventry 0, t, 64, error << 534 << 535 kernel_ventry 0, t, 32, sync << 536 kernel_ventry 0, t, 32, irq << 537 kernel_ventry 0, t, 32, fiq << 538 kernel_ventry 0, t, 32, error << 539 SYM_CODE_END(vectors) << 540 << 541 #ifdef CONFIG_VMAP_STACK << 542 SYM_CODE_START_LOCAL(__bad_stack) << 543 /* << 544 * We detected an overflow in kernel_v << 545 * overflow stack. Stash the exception << 546 * handler. << 547 */ << 548 << 549 /* Restore the original x0 value */ << 550 mrs x0, tpidrro_el0 << 551 36 552 /* !! 37 .macro CFI_END_OSF_FRAME func 553 * Store the original GPRs to the new !! 38 .cfi_endproc 554 * PT_REGS_SIZE) was stashed in tpidr_ !! 39 .size \func, . - \func 555 */ !! 40 .endm 556 sub sp, sp, #PT_REGS_SIZE << 557 kernel_entry 1 << 558 mrs x0, tpidr_el0 << 559 add x0, x0, #PT_REGS_SIZE << 560 str x0, [sp, #S_SP] << 561 << 562 /* Stash the regs for handle_bad_stack << 563 mov x0, sp << 564 << 565 /* Time to die */ << 566 bl handle_bad_stack << 567 ASM_BUG() << 568 SYM_CODE_END(__bad_stack) << 569 #endif /* CONFIG_VMAP_STACK */ << 570 << 571 << 572 .macro entry_handler el:req, ht:req, r << 573 SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\ << 574 kernel_entry \el, \regsize << 575 mov x0, sp << 576 bl el\el\ht\()_\regsize\()_\label << 577 .if \el == 0 << 578 b ret_to_user << 579 .else << 580 b ret_to_kernel << 581 .endif << 582 SYM_CODE_END(el\el\ht\()_\regsize\()_\label) << 583 .endm << 584 41 585 /* 42 /* 586 * Early exception handlers !! 43 * This defines the normal kernel pt-regs layout. 587 */ !! 44 * 588 entry_handler 1, t, 64, sync !! 45 * regs 9-15 preserved by C code 589 entry_handler 1, t, 64, irq !! 46 * regs 16-18 saved by PAL-code 590 entry_handler 1, t, 64, fiq !! 47 * regs 29-30 saved and set up by PAL-code 591 entry_handler 1, t, 64, error !! 48 * JRP - Save regs 16-18 in a special area of the stack, so that 592 !! 49 * the palcode-provided values are available to the signal handler. 593 entry_handler 1, h, 64, sync !! 50 */ 594 entry_handler 1, h, 64, irq !! 51 595 entry_handler 1, h, 64, fiq !! 52 .macro SAVE_ALL 596 entry_handler 1, h, 64, error !! 53 subq $sp, SP_OFF, $sp 597 !! 54 .cfi_adjust_cfa_offset SP_OFF 598 entry_handler 0, t, 64, sync !! 55 stq $0, 0($sp) 599 entry_handler 0, t, 64, irq !! 56 stq $1, 8($sp) 600 entry_handler 0, t, 64, fiq !! 57 stq $2, 16($sp) 601 entry_handler 0, t, 64, error !! 58 stq $3, 24($sp) 602 !! 59 stq $4, 32($sp) 603 entry_handler 0, t, 32, sync !! 60 stq $28, 144($sp) 604 entry_handler 0, t, 32, irq !! 61 .cfi_rel_offset $0, 0 605 entry_handler 0, t, 32, fiq !! 62 .cfi_rel_offset $1, 8 606 entry_handler 0, t, 32, error !! 63 .cfi_rel_offset $2, 16 607 !! 64 .cfi_rel_offset $3, 24 608 SYM_CODE_START_LOCAL(ret_to_kernel) !! 65 .cfi_rel_offset $4, 32 609 kernel_exit 1 !! 66 .cfi_rel_offset $28, 144 610 SYM_CODE_END(ret_to_kernel) !! 67 lda $2, alpha_mv 611 !! 68 stq $5, 40($sp) 612 SYM_CODE_START_LOCAL(ret_to_user) !! 69 stq $6, 48($sp) 613 ldr x19, [tsk, #TSK_TI_FLAGS] !! 70 stq $7, 56($sp) 614 enable_step_tsk x19, x2 !! 71 stq $8, 64($sp) 615 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK !! 72 stq $19, 72($sp) 616 bl stackleak_erase_on_task_stack !! 73 stq $20, 80($sp) 617 #endif !! 74 stq $21, 88($sp) 618 kernel_exit 0 !! 75 ldq $2, HAE_CACHE($2) 619 SYM_CODE_END(ret_to_user) !! 76 stq $22, 96($sp) 620 !! 77 stq $23, 104($sp) 621 .popsection !! 78 stq $24, 112($sp) 622 !! 79 stq $25, 120($sp) 623 // Move from tramp_pg_dir to swapper_p !! 80 stq $26, 128($sp) 624 .macro tramp_map_kernel, tmp !! 81 stq $27, 136($sp) 625 mrs \tmp, ttbr1_el1 !! 82 stq $2, 152($sp) 626 add \tmp, \tmp, #TRAMP_SWAPPER_OFF !! 83 stq $16, 160($sp) 627 bic \tmp, \tmp, #USER_ASID_FLAG !! 84 stq $17, 168($sp) 628 msr ttbr1_el1, \tmp !! 85 stq $18, 176($sp) 629 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 !! 86 .cfi_rel_offset $5, 40 630 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1 !! 87 .cfi_rel_offset $6, 48 631 /* ASID already in \tmp[63:48] */ !! 88 .cfi_rel_offset $7, 56 632 movk \tmp, #:abs_g2_nc:(TRAMP_VALIA !! 89 .cfi_rel_offset $8, 64 633 movk \tmp, #:abs_g1_nc:(TRAMP_VALIA !! 90 .cfi_rel_offset $19, 72 634 /* 2MB boundary containing the vectors !! 91 .cfi_rel_offset $20, 80 635 movk \tmp, #:abs_g0_nc:((TRAMP_VALI !! 92 .cfi_rel_offset $21, 88 636 isb !! 93 .cfi_rel_offset $22, 96 637 tlbi vae1, \tmp !! 94 .cfi_rel_offset $23, 104 638 dsb nsh !! 95 .cfi_rel_offset $24, 112 639 alternative_else_nop_endif !! 96 .cfi_rel_offset $25, 120 640 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ !! 97 .cfi_rel_offset $26, 128 641 .endm !! 98 .cfi_rel_offset $27, 136 642 !! 99 .endm 643 // Move from swapper_pg_dir to tramp_p << 644 .macro tramp_unmap_kernel, tmp << 645 mrs \tmp, ttbr1_el1 << 646 sub \tmp, \tmp, #TRAMP_SWAPPER_OFF << 647 orr \tmp, \tmp, #USER_ASID_FLAG << 648 msr ttbr1_el1, \tmp << 649 /* << 650 * We avoid running the post_ttbr_upda << 651 * it's only needed by Cavium ThunderX << 652 * disabled. << 653 */ << 654 .endm << 655 << 656 .macro tramp_data_read_var << 657 #ifdef CONFIG_RELOCATABLE << 658 ldr \dst, .L__tramp_data_\ << 659 .ifndef .L__tramp_data_\var << 660 .pushsection ".entry.tramp.rodata", << 661 .align 3 << 662 .L__tramp_data_\var: << 663 .quad \var << 664 .popsection << 665 .endif << 666 #else << 667 /* << 668 * As !RELOCATABLE implies !RANDOMIZE_ << 669 * compile time constant (and hence no << 670 * << 671 * As statically allocated kernel code << 672 * 47 bits of the address space we can << 673 * instruction to load the upper 16 bi << 674 */ << 675 movz \dst, :abs_g2_s:\var << 676 movk \dst, :abs_g1_nc:\var << 677 movk \dst, :abs_g0_nc:\var << 678 #endif << 679 .endm << 680 << 681 #define BHB_MITIGATION_NONE 0 << 682 #define BHB_MITIGATION_LOOP 1 << 683 #define BHB_MITIGATION_FW 2 << 684 #define BHB_MITIGATION_INSN 3 << 685 << 686 .macro tramp_ventry, vector_start, reg << 687 .align 7 << 688 1: << 689 .if \regsize == 64 << 690 msr tpidrro_el0, x30 // Res << 691 .endif << 692 << 693 .if \bhb == BHB_MITIGATION_LOOP << 694 /* << 695 * This sequence must appear before th << 696 * ret out of tramp_ventry. It appears << 697 */ << 698 __mitigate_spectre_bhb_loop x30 << 699 .endif // \bhb == BHB_MITIGATION_LOOP << 700 << 701 .if \bhb == BHB_MITIGATION_INSN << 702 clearbhb << 703 isb << 704 .endif // \bhb == BHB_MITIGATION_INSN << 705 << 706 .if \kpti == 1 << 707 /* << 708 * Defend against branch aliasing atta << 709 * entry onto the return stack and usi << 710 * enter the full-fat kernel vectors. << 711 */ << 712 bl 2f << 713 b . << 714 2: << 715 tramp_map_kernel x30 << 716 alternative_insn isb, nop, ARM64_WORKAROUND_QC << 717 tramp_data_read_var x30, vectors << 718 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2 << 719 prfm plil1strm, [x30, #(1b - \vecto << 720 alternative_else_nop_endif << 721 << 722 msr vbar_el1, x30 << 723 isb << 724 .else << 725 adr_l x30, vectors << 726 .endif // \kpti == 1 << 727 100 728 .if \bhb == BHB_MITIGATION_FW !! 101 .macro RESTORE_ALL 729 /* !! 102 lda $19, alpha_mv 730 * The firmware sequence must appear b !! 103 ldq $0, 0($sp) 731 * i.e. the ret out of tramp_ventry. B !! 104 ldq $1, 8($sp) 732 * mapped to save/restore the register !! 105 ldq $2, 16($sp) 733 */ !! 106 ldq $3, 24($sp) 734 __mitigate_spectre_bhb_fw !! 107 ldq $21, 152($sp) 735 .endif // \bhb == BHB_MITIGATION_FW !! 108 ldq $20, HAE_CACHE($19) >> 109 ldq $4, 32($sp) >> 110 ldq $5, 40($sp) >> 111 ldq $6, 48($sp) >> 112 ldq $7, 56($sp) >> 113 subq $20, $21, $20 >> 114 ldq $8, 64($sp) >> 115 beq $20, 99f >> 116 ldq $20, HAE_REG($19) >> 117 stq $21, HAE_CACHE($19) >> 118 stq $21, 0($20) >> 119 99: ldq $19, 72($sp) >> 120 ldq $20, 80($sp) >> 121 ldq $21, 88($sp) >> 122 ldq $22, 96($sp) >> 123 ldq $23, 104($sp) >> 124 ldq $24, 112($sp) >> 125 ldq $25, 120($sp) >> 126 ldq $26, 128($sp) >> 127 ldq $27, 136($sp) >> 128 ldq $28, 144($sp) >> 129 addq $sp, SP_OFF, $sp >> 130 .cfi_restore $0 >> 131 .cfi_restore $1 >> 132 .cfi_restore $2 >> 133 .cfi_restore $3 >> 134 .cfi_restore $4 >> 135 .cfi_restore $5 >> 136 .cfi_restore $6 >> 137 .cfi_restore $7 >> 138 .cfi_restore $8 >> 139 .cfi_restore $19 >> 140 .cfi_restore $20 >> 141 .cfi_restore $21 >> 142 .cfi_restore $22 >> 143 .cfi_restore $23 >> 144 .cfi_restore $24 >> 145 .cfi_restore $25 >> 146 .cfi_restore $26 >> 147 .cfi_restore $27 >> 148 .cfi_restore $28 >> 149 .cfi_adjust_cfa_offset -SP_OFF >> 150 .endm 736 151 737 add x30, x30, #(1b - \vector_start !! 152 .macro DO_SWITCH_STACK 738 ret !! 153 bsr $1, do_switch_stack 739 .org 1b + 128 // Did we overflow the ventry !! 154 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE 740 .endm !! 155 .cfi_rel_offset $9, 0 >> 156 .cfi_rel_offset $10, 8 >> 157 .cfi_rel_offset $11, 16 >> 158 .cfi_rel_offset $12, 24 >> 159 .cfi_rel_offset $13, 32 >> 160 .cfi_rel_offset $14, 40 >> 161 .cfi_rel_offset $15, 48 >> 162 .endm 741 163 742 .macro generate_tramp_vector, kpti, !! 164 .macro UNDO_SWITCH_STACK 743 .Lvector_start\@: !! 165 bsr $1, undo_switch_stack 744 .space 0x400 !! 166 .cfi_restore $9 745 !! 167 .cfi_restore $10 746 .rept 4 !! 168 .cfi_restore $11 747 tramp_ventry .Lvector_start\@, 64, !! 169 .cfi_restore $12 748 .endr !! 170 .cfi_restore $13 749 .rept 4 !! 171 .cfi_restore $14 750 tramp_ventry .Lvector_start\@, 32, !! 172 .cfi_restore $15 751 .endr !! 173 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE 752 .endm !! 174 .endm 753 175 754 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 << 755 /* 176 /* 756 * Exception vectors trampoline. !! 177 * Non-syscall kernel entry points. 757 * The order must match __bp_harden_el1_vector << 758 * arm64_bp_harden_el1_vectors enum. << 759 */ 178 */ 760 .pushsection ".entry.tramp.text", "ax" << 761 .align 11 << 762 SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors) << 763 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY << 764 generate_tramp_vector kpti=1, bhb=BH << 765 generate_tramp_vector kpti=1, bhb=BH << 766 generate_tramp_vector kpti=1, bhb=BH << 767 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 768 generate_tramp_vector kpti=1, bhb=BH << 769 SYM_CODE_END(tramp_vectors) << 770 << 771 SYM_CODE_START_LOCAL(tramp_exit) << 772 tramp_unmap_kernel x29 << 773 mrs x29, far_el1 << 774 eret << 775 sb << 776 SYM_CODE_END(tramp_exit) << 777 .popsection << 778 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ << 779 179 780 /* !! 180 CFI_START_OSF_FRAME entInt 781 * Exception vectors for spectre mitigations o !! 181 SAVE_ALL 782 * kpti is not in use. !! 182 lda $8, 0x3fff >> 183 lda $26, ret_from_sys_call >> 184 bic $sp, $8, $8 >> 185 mov $sp, $19 >> 186 jsr $31, do_entInt >> 187 CFI_END_OSF_FRAME entInt >> 188 >> 189 CFI_START_OSF_FRAME entArith >> 190 SAVE_ALL >> 191 lda $8, 0x3fff >> 192 lda $26, ret_from_sys_call >> 193 bic $sp, $8, $8 >> 194 mov $sp, $18 >> 195 jsr $31, do_entArith >> 196 CFI_END_OSF_FRAME entArith >> 197 >> 198 CFI_START_OSF_FRAME entMM >> 199 SAVE_ALL >> 200 /* save $9 - $15 so the inline exception code can manipulate them. */ >> 201 subq $sp, 56, $sp >> 202 .cfi_adjust_cfa_offset 56 >> 203 stq $9, 0($sp) >> 204 stq $10, 8($sp) >> 205 stq $11, 16($sp) >> 206 stq $12, 24($sp) >> 207 stq $13, 32($sp) >> 208 stq $14, 40($sp) >> 209 stq $15, 48($sp) >> 210 .cfi_rel_offset $9, 0 >> 211 .cfi_rel_offset $10, 8 >> 212 .cfi_rel_offset $11, 16 >> 213 .cfi_rel_offset $12, 24 >> 214 .cfi_rel_offset $13, 32 >> 215 .cfi_rel_offset $14, 40 >> 216 .cfi_rel_offset $15, 48 >> 217 addq $sp, 56, $19 >> 218 /* handle the fault */ >> 219 lda $8, 0x3fff >> 220 bic $sp, $8, $8 >> 221 jsr $26, do_page_fault >> 222 /* reload the registers after the exception code played. */ >> 223 ldq $9, 0($sp) >> 224 ldq $10, 8($sp) >> 225 ldq $11, 16($sp) >> 226 ldq $12, 24($sp) >> 227 ldq $13, 32($sp) >> 228 ldq $14, 40($sp) >> 229 ldq $15, 48($sp) >> 230 addq $sp, 56, $sp >> 231 .cfi_restore $9 >> 232 .cfi_restore $10 >> 233 .cfi_restore $11 >> 234 .cfi_restore $12 >> 235 .cfi_restore $13 >> 236 .cfi_restore $14 >> 237 .cfi_restore $15 >> 238 .cfi_adjust_cfa_offset -56 >> 239 /* finish up the syscall as normal. */ >> 240 br ret_from_sys_call >> 241 CFI_END_OSF_FRAME entMM >> 242 >> 243 CFI_START_OSF_FRAME entIF >> 244 SAVE_ALL >> 245 lda $8, 0x3fff >> 246 lda $26, ret_from_sys_call >> 247 bic $sp, $8, $8 >> 248 mov $sp, $17 >> 249 jsr $31, do_entIF >> 250 CFI_END_OSF_FRAME entIF >> 251 >> 252 CFI_START_OSF_FRAME entUna >> 253 lda $sp, -256($sp) >> 254 .cfi_adjust_cfa_offset 256 >> 255 stq $0, 0($sp) >> 256 .cfi_rel_offset $0, 0 >> 257 .cfi_remember_state >> 258 ldq $0, 256($sp) /* get PS */ >> 259 stq $1, 8($sp) >> 260 stq $2, 16($sp) >> 261 stq $3, 24($sp) >> 262 and $0, 8, $0 /* user mode? */ >> 263 stq $4, 32($sp) >> 264 bne $0, entUnaUser /* yup -> do user-level unaligned fault */ >> 265 stq $5, 40($sp) >> 266 stq $6, 48($sp) >> 267 stq $7, 56($sp) >> 268 stq $8, 64($sp) >> 269 stq $9, 72($sp) >> 270 stq $10, 80($sp) >> 271 stq $11, 88($sp) >> 272 stq $12, 96($sp) >> 273 stq $13, 104($sp) >> 274 stq $14, 112($sp) >> 275 stq $15, 120($sp) >> 276 /* 16-18 PAL-saved */ >> 277 stq $19, 152($sp) >> 278 stq $20, 160($sp) >> 279 stq $21, 168($sp) >> 280 stq $22, 176($sp) >> 281 stq $23, 184($sp) >> 282 stq $24, 192($sp) >> 283 stq $25, 200($sp) >> 284 stq $26, 208($sp) >> 285 stq $27, 216($sp) >> 286 stq $28, 224($sp) >> 287 mov $sp, $19 >> 288 stq $gp, 232($sp) >> 289 .cfi_rel_offset $1, 1*8 >> 290 .cfi_rel_offset $2, 2*8 >> 291 .cfi_rel_offset $3, 3*8 >> 292 .cfi_rel_offset $4, 4*8 >> 293 .cfi_rel_offset $5, 5*8 >> 294 .cfi_rel_offset $6, 6*8 >> 295 .cfi_rel_offset $7, 7*8 >> 296 .cfi_rel_offset $8, 8*8 >> 297 .cfi_rel_offset $9, 9*8 >> 298 .cfi_rel_offset $10, 10*8 >> 299 .cfi_rel_offset $11, 11*8 >> 300 .cfi_rel_offset $12, 12*8 >> 301 .cfi_rel_offset $13, 13*8 >> 302 .cfi_rel_offset $14, 14*8 >> 303 .cfi_rel_offset $15, 15*8 >> 304 .cfi_rel_offset $19, 19*8 >> 305 .cfi_rel_offset $20, 20*8 >> 306 .cfi_rel_offset $21, 21*8 >> 307 .cfi_rel_offset $22, 22*8 >> 308 .cfi_rel_offset $23, 23*8 >> 309 .cfi_rel_offset $24, 24*8 >> 310 .cfi_rel_offset $25, 25*8 >> 311 .cfi_rel_offset $26, 26*8 >> 312 .cfi_rel_offset $27, 27*8 >> 313 .cfi_rel_offset $28, 28*8 >> 314 .cfi_rel_offset $29, 29*8 >> 315 lda $8, 0x3fff >> 316 stq $31, 248($sp) >> 317 bic $sp, $8, $8 >> 318 jsr $26, do_entUna >> 319 ldq $0, 0($sp) >> 320 ldq $1, 8($sp) >> 321 ldq $2, 16($sp) >> 322 ldq $3, 24($sp) >> 323 ldq $4, 32($sp) >> 324 ldq $5, 40($sp) >> 325 ldq $6, 48($sp) >> 326 ldq $7, 56($sp) >> 327 ldq $8, 64($sp) >> 328 ldq $9, 72($sp) >> 329 ldq $10, 80($sp) >> 330 ldq $11, 88($sp) >> 331 ldq $12, 96($sp) >> 332 ldq $13, 104($sp) >> 333 ldq $14, 112($sp) >> 334 ldq $15, 120($sp) >> 335 /* 16-18 PAL-saved */ >> 336 ldq $19, 152($sp) >> 337 ldq $20, 160($sp) >> 338 ldq $21, 168($sp) >> 339 ldq $22, 176($sp) >> 340 ldq $23, 184($sp) >> 341 ldq $24, 192($sp) >> 342 ldq $25, 200($sp) >> 343 ldq $26, 208($sp) >> 344 ldq $27, 216($sp) >> 345 ldq $28, 224($sp) >> 346 ldq $gp, 232($sp) >> 347 lda $sp, 256($sp) >> 348 .cfi_restore $1 >> 349 .cfi_restore $2 >> 350 .cfi_restore $3 >> 351 .cfi_restore $4 >> 352 .cfi_restore $5 >> 353 .cfi_restore $6 >> 354 .cfi_restore $7 >> 355 .cfi_restore $8 >> 356 .cfi_restore $9 >> 357 .cfi_restore $10 >> 358 .cfi_restore $11 >> 359 .cfi_restore $12 >> 360 .cfi_restore $13 >> 361 .cfi_restore $14 >> 362 .cfi_restore $15 >> 363 .cfi_restore $19 >> 364 .cfi_restore $20 >> 365 .cfi_restore $21 >> 366 .cfi_restore $22 >> 367 .cfi_restore $23 >> 368 .cfi_restore $24 >> 369 .cfi_restore $25 >> 370 .cfi_restore $26 >> 371 .cfi_restore $27 >> 372 .cfi_restore $28 >> 373 .cfi_restore $29 >> 374 .cfi_adjust_cfa_offset -256 >> 375 call_pal PAL_rti >> 376 >> 377 .align 4 >> 378 entUnaUser: >> 379 .cfi_restore_state >> 380 ldq $0, 0($sp) /* restore original $0 */ >> 381 lda $sp, 256($sp) /* pop entUna's stack frame */ >> 382 .cfi_restore $0 >> 383 .cfi_adjust_cfa_offset -256 >> 384 SAVE_ALL /* setup normal kernel stack */ >> 385 lda $sp, -56($sp) >> 386 .cfi_adjust_cfa_offset 56 >> 387 stq $9, 0($sp) >> 388 stq $10, 8($sp) >> 389 stq $11, 16($sp) >> 390 stq $12, 24($sp) >> 391 stq $13, 32($sp) >> 392 stq $14, 40($sp) >> 393 stq $15, 48($sp) >> 394 .cfi_rel_offset $9, 0 >> 395 .cfi_rel_offset $10, 8 >> 396 .cfi_rel_offset $11, 16 >> 397 .cfi_rel_offset $12, 24 >> 398 .cfi_rel_offset $13, 32 >> 399 .cfi_rel_offset $14, 40 >> 400 .cfi_rel_offset $15, 48 >> 401 lda $8, 0x3fff >> 402 addq $sp, 56, $19 >> 403 bic $sp, $8, $8 >> 404 jsr $26, do_entUnaUser >> 405 ldq $9, 0($sp) >> 406 ldq $10, 8($sp) >> 407 ldq $11, 16($sp) >> 408 ldq $12, 24($sp) >> 409 ldq $13, 32($sp) >> 410 ldq $14, 40($sp) >> 411 ldq $15, 48($sp) >> 412 lda $sp, 56($sp) >> 413 .cfi_restore $9 >> 414 .cfi_restore $10 >> 415 .cfi_restore $11 >> 416 .cfi_restore $12 >> 417 .cfi_restore $13 >> 418 .cfi_restore $14 >> 419 .cfi_restore $15 >> 420 .cfi_adjust_cfa_offset -56 >> 421 br ret_from_sys_call >> 422 CFI_END_OSF_FRAME entUna >> 423 >> 424 CFI_START_OSF_FRAME entDbg >> 425 SAVE_ALL >> 426 lda $8, 0x3fff >> 427 lda $26, ret_from_sys_call >> 428 bic $sp, $8, $8 >> 429 mov $sp, $16 >> 430 jsr $31, do_entDbg >> 431 CFI_END_OSF_FRAME entDbg >> 432 >> 433 /* >> 434 * The system call entry point is special. Most importantly, it looks >> 435 * like a function call to userspace as far as clobbered registers. We >> 436 * do preserve the argument registers (for syscall restarts) and $26 >> 437 * (for leaf syscall functions). >> 438 * >> 439 * So much for theory. We don't take advantage of this yet. >> 440 * >> 441 * Note that a0-a2 are not saved by PALcode as with the other entry points. 783 */ 442 */ 784 .macro generate_el1_vector, bhb << 785 .Lvector_start\@: << 786 kernel_ventry 1, t, 64, sync << 787 kernel_ventry 1, t, 64, irq << 788 kernel_ventry 1, t, 64, fiq << 789 kernel_ventry 1, t, 64, error << 790 << 791 kernel_ventry 1, h, 64, sync << 792 kernel_ventry 1, h, 64, irq << 793 kernel_ventry 1, h, 64, fiq << 794 kernel_ventry 1, h, 64, error << 795 << 796 .rept 4 << 797 tramp_ventry .Lvector_start\@, 64, << 798 .endr << 799 .rept 4 << 800 tramp_ventry .Lvector_start\@, 32, << 801 .endr << 802 .endm << 803 << 804 /* The order must match tramp_vecs and the arm << 805 .pushsection ".entry.text", "ax" << 806 .align 11 << 807 SYM_CODE_START(__bp_harden_el1_vectors) << 808 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY << 809 generate_el1_vector bhb=BHB_MITIGA << 810 generate_el1_vector bhb=BHB_MITIGA << 811 generate_el1_vector bhb=BHB_MITIGA << 812 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 813 SYM_CODE_END(__bp_harden_el1_vectors) << 814 .popsection << 815 443 >> 444 .align 4 >> 445 .globl entSys >> 446 .type entSys, @function >> 447 .cfi_startproc simple >> 448 .cfi_return_column 64 >> 449 .cfi_def_cfa $sp, 48 >> 450 .cfi_rel_offset 64, 8 >> 451 .cfi_rel_offset $gp, 16 >> 452 entSys: >> 453 SAVE_ALL >> 454 lda $8, 0x3fff >> 455 bic $sp, $8, $8 >> 456 lda $4, NR_syscalls($31) >> 457 stq $16, SP_OFF+24($sp) >> 458 lda $5, sys_call_table >> 459 lda $27, sys_ni_syscall >> 460 cmpult $0, $4, $4 >> 461 ldl $3, TI_FLAGS($8) >> 462 stq $17, SP_OFF+32($sp) >> 463 s8addq $0, $5, $5 >> 464 stq $18, SP_OFF+40($sp) >> 465 .cfi_rel_offset $16, SP_OFF+24 >> 466 .cfi_rel_offset $17, SP_OFF+32 >> 467 .cfi_rel_offset $18, SP_OFF+40 >> 468 #ifdef CONFIG_AUDITSYSCALL >> 469 lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT >> 470 and $3, $6, $3 >> 471 bne $3, strace >> 472 #else >> 473 blbs $3, strace /* check for SYSCALL_TRACE in disguise */ >> 474 #endif >> 475 beq $4, 1f >> 476 ldq $27, 0($5) >> 477 1: jsr $26, ($27), sys_ni_syscall >> 478 ldgp $gp, 0($26) >> 479 blt $0, $syscall_error /* the call failed */ >> 480 $ret_success: >> 481 stq $0, 0($sp) >> 482 stq $31, 72($sp) /* a3=0 => no error */ >> 483 >> 484 .align 4 >> 485 .globl ret_from_sys_call >> 486 ret_from_sys_call: >> 487 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ >> 488 ldq $0, SP_OFF($sp) >> 489 and $0, 8, $0 >> 490 beq $0, ret_to_kernel >> 491 ret_to_user: >> 492 /* Make sure need_resched and sigpending don't change between >> 493 sampling and the rti. */ >> 494 lda $16, 7 >> 495 call_pal PAL_swpipl >> 496 ldl $17, TI_FLAGS($8) >> 497 and $17, _TIF_WORK_MASK, $2 >> 498 bne $2, work_pending >> 499 restore_all: >> 500 ldl $2, TI_STATUS($8) >> 501 and $2, TS_SAVED_FP | TS_RESTORE_FP, $3 >> 502 bne $3, restore_fpu >> 503 restore_other: >> 504 .cfi_remember_state >> 505 RESTORE_ALL >> 506 call_pal PAL_rti >> 507 >> 508 ret_to_kernel: >> 509 .cfi_restore_state >> 510 lda $16, 7 >> 511 call_pal PAL_swpipl >> 512 br restore_other >> 513 >> 514 .align 3 >> 515 $syscall_error: >> 516 /* >> 517 * Some system calls (e.g., ptrace) can return arbitrary >> 518 * values which might normally be mistaken as error numbers. >> 519 * Those functions must zero $0 (v0) directly in the stack >> 520 * frame to indicate that a negative return value wasn't an >> 521 * error number.. >> 522 */ >> 523 ldq $18, 0($sp) /* old syscall nr (zero if success) */ >> 524 beq $18, $ret_success >> 525 >> 526 ldq $19, 72($sp) /* .. and this a3 */ >> 527 subq $31, $0, $0 /* with error in v0 */ >> 528 addq $31, 1, $1 /* set a3 for errno return */ >> 529 stq $0, 0($sp) >> 530 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ >> 531 stq $1, 72($sp) /* a3 for return */ >> 532 br ret_from_sys_call 816 533 817 /* 534 /* 818 * Register switch for AArch64. The callee-sav !! 535 * Do all cleanup when returning from all interrupts and system calls. 819 * and restored. On entry: << 820 * x0 = previous task_struct (must be preser << 821 * x1 = next task_struct << 822 * Previous and next are guaranteed not to be << 823 * 536 * 824 */ !! 537 * Arguments: 825 SYM_FUNC_START(cpu_switch_to) !! 538 * $8: current. 826 mov x10, #THREAD_CPU_CONTEXT !! 539 * $17: TI_FLAGS. 827 add x8, x0, x10 !! 540 * $18: The old syscall number, or zero if this is not a return 828 mov x9, sp !! 541 * from a syscall that errored and is possibly restartable. 829 stp x19, x20, [x8], #16 !! 542 * $19: The old a3 value 830 stp x21, x22, [x8], #16 !! 543 */ 831 stp x23, x24, [x8], #16 !! 544 832 stp x25, x26, [x8], #16 !! 545 .align 4 833 stp x27, x28, [x8], #16 !! 546 .type work_pending, @function 834 stp x29, x9, [x8], #16 !! 547 work_pending: 835 str lr, [x8] !! 548 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2 836 add x8, x1, x10 !! 549 bne $2, $work_notifysig 837 ldp x19, x20, [x8], #16 !! 550 838 ldp x21, x22, [x8], #16 !! 551 $work_resched: 839 ldp x23, x24, [x8], #16 !! 552 /* 840 ldp x25, x26, [x8], #16 !! 553 * We can get here only if we returned from syscall without SIGPENDING 841 ldp x27, x28, [x8], #16 !! 554 * or got through work_notifysig already. Either case means no syscall 842 ldp x29, x9, [x8], #16 !! 555 * restarts for us, so let $18 and $19 burn. 843 ldr lr, [x8] !! 556 */ 844 mov sp, x9 !! 557 jsr $26, schedule 845 msr sp_el0, x1 !! 558 mov 0, $18 846 ptrauth_keys_install_kernel x1, x8, x9 !! 559 br ret_to_user 847 scs_save x0 !! 560 848 scs_load_current !! 561 $work_notifysig: >> 562 mov $sp, $16 >> 563 DO_SWITCH_STACK >> 564 jsr $26, do_work_pending >> 565 UNDO_SWITCH_STACK >> 566 br restore_all >> 567 >> 568 /* >> 569 * PTRACE syscall handler >> 570 */ >> 571 >> 572 .align 4 >> 573 .type strace, @function >> 574 strace: >> 575 /* set up signal stack, call syscall_trace */ >> 576 // NB: if anyone adds preemption, this block will need to be protected >> 577 ldl $1, TI_STATUS($8) >> 578 and $1, TS_SAVED_FP, $3 >> 579 or $1, TS_SAVED_FP, $2 >> 580 bne $3, 1f >> 581 stl $2, TI_STATUS($8) >> 582 bsr $26, __save_fpu >> 583 1: >> 584 DO_SWITCH_STACK >> 585 jsr $26, syscall_trace_enter /* returns the syscall number */ >> 586 UNDO_SWITCH_STACK >> 587 >> 588 /* get the arguments back.. */ >> 589 ldq $16, SP_OFF+24($sp) >> 590 ldq $17, SP_OFF+32($sp) >> 591 ldq $18, SP_OFF+40($sp) >> 592 ldq $19, 72($sp) >> 593 ldq $20, 80($sp) >> 594 ldq $21, 88($sp) >> 595 >> 596 /* get the system call pointer.. */ >> 597 lda $1, NR_syscalls($31) >> 598 lda $2, sys_call_table >> 599 lda $27, sys_ni_syscall >> 600 cmpult $0, $1, $1 >> 601 s8addq $0, $2, $2 >> 602 beq $1, 1f >> 603 ldq $27, 0($2) >> 604 1: jsr $26, ($27), sys_gettimeofday >> 605 ret_from_straced: >> 606 ldgp $gp, 0($26) >> 607 >> 608 /* check return.. */ >> 609 blt $0, $strace_error /* the call failed */ >> 610 $strace_success: >> 611 stq $31, 72($sp) /* a3=0 => no error */ >> 612 stq $0, 0($sp) /* save return value */ >> 613 >> 614 DO_SWITCH_STACK >> 615 jsr $26, syscall_trace_leave >> 616 UNDO_SWITCH_STACK >> 617 br $31, ret_from_sys_call >> 618 >> 619 .align 3 >> 620 $strace_error: >> 621 ldq $18, 0($sp) /* old syscall nr (zero if success) */ >> 622 beq $18, $strace_success >> 623 ldq $19, 72($sp) /* .. and this a3 */ >> 624 >> 625 subq $31, $0, $0 /* with error in v0 */ >> 626 addq $31, 1, $1 /* set a3 for errno return */ >> 627 stq $0, 0($sp) >> 628 stq $1, 72($sp) /* a3 for return */ >> 629 >> 630 DO_SWITCH_STACK >> 631 mov $18, $9 /* save old syscall number */ >> 632 mov $19, $10 /* save old a3 */ >> 633 jsr $26, syscall_trace_leave >> 634 mov $9, $18 >> 635 mov $10, $19 >> 636 UNDO_SWITCH_STACK >> 637 >> 638 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ >> 639 br ret_from_sys_call >> 640 CFI_END_OSF_FRAME entSys >> 641 >> 642 /* >> 643 * Save and restore the switch stack -- aka the balance of the user context. >> 644 */ >> 645 >> 646 .align 4 >> 647 .type do_switch_stack, @function >> 648 .cfi_startproc simple >> 649 .cfi_return_column 64 >> 650 .cfi_def_cfa $sp, 0 >> 651 .cfi_register 64, $1 >> 652 do_switch_stack: >> 653 lda $sp, -SWITCH_STACK_SIZE($sp) >> 654 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE >> 655 stq $9, 0($sp) >> 656 stq $10, 8($sp) >> 657 stq $11, 16($sp) >> 658 stq $12, 24($sp) >> 659 stq $13, 32($sp) >> 660 stq $14, 40($sp) >> 661 stq $15, 48($sp) >> 662 stq $26, 56($sp) >> 663 ret $31, ($1), 1 >> 664 .cfi_endproc >> 665 .size do_switch_stack, .-do_switch_stack >> 666 >> 667 .align 4 >> 668 .type undo_switch_stack, @function >> 669 .cfi_startproc simple >> 670 .cfi_def_cfa $sp, 0 >> 671 .cfi_register 64, $1 >> 672 undo_switch_stack: >> 673 ldq $9, 0($sp) >> 674 ldq $10, 8($sp) >> 675 ldq $11, 16($sp) >> 676 ldq $12, 24($sp) >> 677 ldq $13, 32($sp) >> 678 ldq $14, 40($sp) >> 679 ldq $15, 48($sp) >> 680 ldq $26, 56($sp) >> 681 lda $sp, SWITCH_STACK_SIZE($sp) >> 682 ret $31, ($1), 1 >> 683 .cfi_endproc >> 684 .size undo_switch_stack, .-undo_switch_stack >> 685 >> 686 #define FR(n) n * 8 + TI_FP($8) >> 687 .align 4 >> 688 .globl __save_fpu >> 689 .type __save_fpu, @function >> 690 __save_fpu: >> 691 #define V(n) stt $f##n, FR(n) >> 692 V( 0); V( 1); V( 2); V( 3) >> 693 V( 4); V( 5); V( 6); V( 7) >> 694 V( 8); V( 9); V(10); V(11) >> 695 V(12); V(13); V(14); V(15) >> 696 V(16); V(17); V(18); V(19) >> 697 V(20); V(21); V(22); V(23) >> 698 V(24); V(25); V(26); V(27) >> 699 mf_fpcr $f0 # get fpcr >> 700 V(28); V(29); V(30) >> 701 stt $f0, FR(31) # save fpcr in slot of $f31 >> 702 ldt $f0, FR(0) # don't let "__save_fpu" change fp state. 849 ret 703 ret 850 SYM_FUNC_END(cpu_switch_to) !! 704 #undef V 851 NOKPROBE(cpu_switch_to) !! 705 .size __save_fpu, .-__save_fpu 852 706 853 /* !! 707 .align 4 854 * This is how we return from a fork. !! 708 restore_fpu: 855 */ !! 709 and $3, TS_RESTORE_FP, $3 856 SYM_CODE_START(ret_from_fork) !! 710 bic $2, TS_SAVED_FP | TS_RESTORE_FP, $2 857 bl schedule_tail !! 711 beq $3, 1f 858 cbz x19, 1f !! 712 #define V(n) ldt $f##n, FR(n) 859 mov x0, x20 !! 713 ldt $f30, FR(31) # get saved fpcr 860 blr x19 !! 714 V( 0); V( 1); V( 2); V( 3) 861 1: get_current_task tsk !! 715 mt_fpcr $f30 # install saved fpcr 862 mov x0, sp !! 716 V( 4); V( 5); V( 6); V( 7) 863 bl asm_exit_to_user_mode !! 717 V( 8); V( 9); V(10); V(11) 864 b ret_to_user !! 718 V(12); V(13); V(14); V(15) 865 SYM_CODE_END(ret_from_fork) !! 719 V(16); V(17); V(18); V(19) 866 NOKPROBE(ret_from_fork) !! 720 V(20); V(21); V(22); V(23) >> 721 V(24); V(25); V(26); V(27) >> 722 V(28); V(29); V(30) >> 723 1: stl $2, TI_STATUS($8) >> 724 br restore_other >> 725 #undef V >> 726 >> 727 >> 728 /* >> 729 * The meat of the context switch code. >> 730 */ >> 731 .align 4 >> 732 .globl alpha_switch_to >> 733 .type alpha_switch_to, @function >> 734 .cfi_startproc >> 735 alpha_switch_to: >> 736 DO_SWITCH_STACK >> 737 ldl $1, TI_STATUS($8) >> 738 and $1, TS_RESTORE_FP, $3 >> 739 bne $3, 1f >> 740 or $1, TS_RESTORE_FP | TS_SAVED_FP, $2 >> 741 and $1, TS_SAVED_FP, $3 >> 742 stl $2, TI_STATUS($8) >> 743 bne $3, 1f >> 744 bsr $26, __save_fpu >> 745 1: >> 746 call_pal PAL_swpctx >> 747 lda $8, 0x3fff >> 748 UNDO_SWITCH_STACK >> 749 bic $sp, $8, $8 >> 750 mov $17, $0 >> 751 ret >> 752 .cfi_endproc >> 753 .size alpha_switch_to, .-alpha_switch_to 867 754 868 /* 755 /* 869 * void call_on_irq_stack(struct pt_regs *regs !! 756 * New processes begin life here. 870 * void (*func)(struct << 871 * << 872 * Calls func(regs) using this CPU's irq stack << 873 */ 757 */ 874 SYM_FUNC_START(call_on_irq_stack) << 875 #ifdef CONFIG_SHADOW_CALL_STACK << 876 get_current_task x16 << 877 scs_save x16 << 878 ldr_this_cpu scs_sp, irq_shadow_call_s << 879 #endif << 880 758 881 /* Create a frame record to save our L !! 759 .globl ret_from_fork 882 stp x29, x30, [sp, #-16]! !! 760 .align 4 883 mov x29, sp !! 761 .ent ret_from_fork 884 !! 762 ret_from_fork: 885 ldr_this_cpu x16, irq_stack_ptr, x17 !! 763 lda $26, ret_to_user 886 !! 764 mov $17, $16 887 /* Move to the new stack and call the !! 765 jmp $31, schedule_tail 888 add sp, x16, #IRQ_STACK_SIZE !! 766 .end ret_from_fork 889 blr x1 !! 767 890 !! 768 /* 891 /* !! 769 * ... and new kernel threads - here 892 * Restore the SP from the FP, and res !! 770 */ 893 * record. !! 771 .align 4 894 */ !! 772 .globl ret_from_kernel_thread 895 mov sp, x29 !! 773 .ent ret_from_kernel_thread 896 ldp x29, x30, [sp], #16 !! 774 ret_from_kernel_thread: 897 scs_load_current !! 775 mov $17, $16 >> 776 jsr $26, schedule_tail >> 777 mov $9, $27 >> 778 mov $10, $16 >> 779 jsr $26, ($9) >> 780 br $31, ret_to_user >> 781 .end ret_from_kernel_thread >> 782 >> 783 >> 784 /* >> 785 * Special system calls. Most of these are special in that they either >> 786 * have to play switch_stack games. >> 787 */ >> 788 >> 789 .macro fork_like name >> 790 .align 4 >> 791 .globl alpha_\name >> 792 .ent alpha_\name >> 793 alpha_\name: >> 794 .prologue 0 >> 795 bsr $1, do_switch_stack >> 796 // NB: if anyone adds preemption, this block will need to be protected >> 797 ldl $1, TI_STATUS($8) >> 798 and $1, TS_SAVED_FP, $3 >> 799 or $1, TS_SAVED_FP, $2 >> 800 bne $3, 1f >> 801 stl $2, TI_STATUS($8) >> 802 bsr $26, __save_fpu >> 803 1: >> 804 jsr $26, sys_\name >> 805 ldq $26, 56($sp) >> 806 lda $sp, SWITCH_STACK_SIZE($sp) 898 ret 807 ret 899 SYM_FUNC_END(call_on_irq_stack) !! 808 .end alpha_\name 900 NOKPROBE(call_on_irq_stack) << 901 << 902 #ifdef CONFIG_ARM_SDE_INTERFACE << 903 << 904 #include <asm/sdei.h> << 905 #include <uapi/linux/arm_sdei.h> << 906 << 907 .macro sdei_handler_exit exit_mode << 908 /* On success, this call never returns << 909 cmp \exit_mode, #SDEI_EXIT_SMC << 910 b.ne 99f << 911 smc #0 << 912 b . << 913 99: hvc #0 << 914 b . << 915 .endm 809 .endm 916 810 917 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 !! 811 fork_like fork 918 /* !! 812 fork_like vfork 919 * The regular SDEI entry point may have been !! 813 fork_like clone 920 * the kernel. This trampoline restores the ke !! 814 fork_like clone3 921 * argument accessible. !! 815 922 * !! 816 .macro sigreturn_like name 923 * This clobbers x4, __sdei_handler() will res !! 817 .align 4 924 * copy. !! 818 .globl sys_\name 925 */ !! 819 .ent sys_\name 926 .pushsection ".entry.tramp.text", "ax" !! 820 sys_\name: 927 SYM_CODE_START(__sdei_asm_entry_trampoline) !! 821 .prologue 0 928 mrs x4, ttbr1_el1 !! 822 lda $9, ret_from_straced 929 tbz x4, #USER_ASID_BIT, 1f !! 823 cmpult $26, $9, $9 930 !! 824 lda $sp, -SWITCH_STACK_SIZE($sp) 931 tramp_map_kernel tmp=x4 !! 825 jsr $26, do_\name 932 isb !! 826 bne $9, 1f 933 mov x4, xzr !! 827 jsr $26, syscall_trace_leave 934 !! 828 1: br $1, undo_switch_stack 935 /* !! 829 br ret_from_sys_call 936 * Remember whether to unmap the kerne !! 830 .end sys_\name 937 */ !! 831 .endm 938 1: str x4, [x1, #(SDEI_EVENT_INTREGS << 939 tramp_data_read_var x4, __sdei_asm << 940 br x4 << 941 SYM_CODE_END(__sdei_asm_entry_trampoline) << 942 NOKPROBE(__sdei_asm_entry_trampoline) << 943 << 944 /* << 945 * Make the exit call and restore the original << 946 * << 947 * x0 & x1: setup for the exit API call << 948 * x2: exit_mode << 949 * x4: struct sdei_registered_event argument f << 950 */ << 951 SYM_CODE_START(__sdei_asm_exit_trampoline) << 952 ldr x4, [x4, #(SDEI_EVENT_INTREGS << 953 cbnz x4, 1f << 954 << 955 tramp_unmap_kernel tmp=x4 << 956 << 957 1: sdei_handler_exit exit_mode=x2 << 958 SYM_CODE_END(__sdei_asm_exit_trampoline) << 959 NOKPROBE(__sdei_asm_exit_trampoline) << 960 .popsection // .entry.tramp.text << 961 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ << 962 << 963 /* << 964 * Software Delegated Exception entry point. << 965 * << 966 * x0: Event number << 967 * x1: struct sdei_registered_event argument f << 968 * x2: interrupted PC << 969 * x3: interrupted PSTATE << 970 * x4: maybe clobbered by the trampoline << 971 * << 972 * Firmware has preserved x0->x17 for us, we m << 973 * follow SMC-CC. We save (or retrieve) all th << 974 * want them. << 975 */ << 976 SYM_CODE_START(__sdei_asm_handler) << 977 stp x2, x3, [x1, #SDEI_EVENT_INTRE << 978 stp x4, x5, [x1, #SDEI_EVENT_INTRE << 979 stp x6, x7, [x1, #SDEI_EVENT_INTRE << 980 stp x8, x9, [x1, #SDEI_EVENT_INTRE << 981 stp x10, x11, [x1, #SDEI_EVENT_INT << 982 stp x12, x13, [x1, #SDEI_EVENT_INT << 983 stp x14, x15, [x1, #SDEI_EVENT_INT << 984 stp x16, x17, [x1, #SDEI_EVENT_INT << 985 stp x18, x19, [x1, #SDEI_EVENT_INT << 986 stp x20, x21, [x1, #SDEI_EVENT_INT << 987 stp x22, x23, [x1, #SDEI_EVENT_INT << 988 stp x24, x25, [x1, #SDEI_EVENT_INT << 989 stp x26, x27, [x1, #SDEI_EVENT_INT << 990 stp x28, x29, [x1, #SDEI_EVENT_INT << 991 mov x4, sp << 992 stp lr, x4, [x1, #SDEI_EVENT_INTRE << 993 << 994 mov x19, x1 << 995 << 996 /* Store the registered-event for cras << 997 ldrb w4, [x19, #SDEI_EVENT_PRIORITY << 998 cbnz w4, 1f << 999 adr_this_cpu dst=x5, sym=sdei_active_n << 1000 b 2f << 1001 1: adr_this_cpu dst=x5, sym=sdei_active_ << 1002 2: str x19, [x5] << 1003 << 1004 #ifdef CONFIG_VMAP_STACK << 1005 /* << 1006 * entry.S may have been using sp as << 1007 * this is a normal or critical event << 1008 * stack for this CPU. << 1009 */ << 1010 cbnz w4, 1f << 1011 ldr_this_cpu dst=x5, sym=sdei_stack_n << 1012 b 2f << 1013 1: ldr_this_cpu dst=x5, sym=sdei_stack_c << 1014 2: mov x6, #SDEI_STACK_SIZE << 1015 add x5, x5, x6 << 1016 mov sp, x5 << 1017 #endif << 1018 832 1019 #ifdef CONFIG_SHADOW_CALL_STACK !! 833 sigreturn_like sigreturn 1020 /* Use a separate shadow call stack f !! 834 sigreturn_like rt_sigreturn 1021 cbnz w4, 3f << 1022 ldr_this_cpu dst=scs_sp, sym=sdei_sha << 1023 b 4f << 1024 3: ldr_this_cpu dst=scs_sp, sym=sdei_sha << 1025 4: << 1026 #endif << 1027 835 1028 /* !! 836 .align 4 1029 * We may have interrupted userspace, !! 837 .globl alpha_syscall_zero 1030 * return-to either of these. We can' !! 838 .ent alpha_syscall_zero 1031 */ !! 839 alpha_syscall_zero: 1032 mrs x28, sp_el0 !! 840 .prologue 0 1033 ldr_this_cpu dst=x0, sym=__entry_t !! 841 /* Special because it needs to do something opposite to 1034 msr sp_el0, x0 !! 842 force_successful_syscall_return(). We use the saved 1035 !! 843 syscall number for that, zero meaning "not an error". 1036 /* If we interrupted the kernel point !! 844 That works nicely, but for real syscall 0 we need to 1037 and x0, x3, #0xc !! 845 make sure that this logics doesn't get confused. 1038 mrs x1, CurrentEL !! 846 Store a non-zero there - -ENOSYS we need in register 1039 cmp x0, x1 !! 847 for our return value will do just fine. 1040 csel x29, x29, xzr, eq // fp !! 848 */ 1041 csel x4, x2, xzr, eq // el !! 849 lda $0, -ENOSYS 1042 !! 850 unop 1043 stp x29, x4, [sp, #-16]! !! 851 stq $0, 0($sp) 1044 mov x29, sp !! 852 ret 1045 !! 853 .end alpha_syscall_zero 1046 add x0, x19, #SDEI_EVENT_INTREGS << 1047 mov x1, x19 << 1048 bl __sdei_handler << 1049 << 1050 msr sp_el0, x28 << 1051 /* restore regs >x17 that we clobbere << 1052 mov x4, x19 // keep x4 fo << 1053 ldp x28, x29, [x4, #SDEI_EVENT_IN << 1054 ldp x18, x19, [x4, #SDEI_EVENT_IN << 1055 ldp lr, x1, [x4, #SDEI_EVENT_INTR << 1056 mov sp, x1 << 1057 << 1058 mov x1, x0 // ad << 1059 /* x0 = (x0 <= SDEI_EV_FAILED) ? << 1060 * EVENT_COMPLETE:EVENT_COMPLETE_AND_ << 1061 */ << 1062 cmp x0, #SDEI_EV_FAILED << 1063 mov_q x2, SDEI_1_0_FN_SDEI_EVENT_CO << 1064 mov_q x3, SDEI_1_0_FN_SDEI_EVENT_CO << 1065 csel x0, x2, x3, ls << 1066 << 1067 ldr_l x2, sdei_exit_mode << 1068 << 1069 /* Clear the registered-event seen by << 1070 ldrb w3, [x4, #SDEI_EVENT_PRIORITY << 1071 cbnz w3, 1f << 1072 adr_this_cpu dst=x5, sym=sdei_active_ << 1073 b 2f << 1074 1: adr_this_cpu dst=x5, sym=sdei_active_ << 1075 2: str xzr, [x5] << 1076 << 1077 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 << 1078 sdei_handler_exit exit_mode=x2 << 1079 alternative_else_nop_endif << 1080 << 1081 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 << 1082 tramp_alias dst=x5, sym=__sdei_as << 1083 br x5 << 1084 #endif << 1085 SYM_CODE_END(__sdei_asm_handler) << 1086 NOKPROBE(__sdei_asm_handler) << 1087 << 1088 SYM_CODE_START(__sdei_handler_abort) << 1089 mov_q x0, SDEI_1_0_FN_SDEI_EVENT_CO << 1090 adr x1, 1f << 1091 ldr_l x2, sdei_exit_mode << 1092 sdei_handler_exit exit_mode=x2 << 1093 // exit the handler and jump to the n << 1094 // Exit will stomp x0-x17, PSTATE, EL << 1095 1: ret << 1096 SYM_CODE_END(__sdei_handler_abort) << 1097 NOKPROBE(__sdei_handler_abort) << 1098 #endif /* CONFIG_ARM_SDE_INTERFACE */ <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.