~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm64/kernel/entry.S (Version linux-6.12-rc7) and /arch/mips/kernel/entry.S (Version linux-5.1.21)


  1 /* SPDX-License-Identifier: GPL-2.0-only */    << 
  2 /*                                                  1 /*
  3  * Low-level exception handling code           !!   2  * This file is subject to the terms and conditions of the GNU General Public
  4  *                                             !!   3  * License.  See the file "COPYING" in the main directory of this archive
  5  * Copyright (C) 2012 ARM Ltd.                 !!   4  * for more details.
  6  * Authors:     Catalin Marinas <catalin.marina !!   5  *
  7  *              Will Deacon <will.deacon@arm.co !!   6  * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  8  */                                            !!   7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9                                                !!   8  * Copyright (C) 2001 MIPS Technologies, Inc.
 10 #include <linux/arm-smccc.h>                   !!   9  */
 11 #include <linux/init.h>                        !!  10 
 12 #include <linux/linkage.h>                     !!  11 #include <asm/asm.h>
 13                                                !!  12 #include <asm/asmmacro.h>
 14 #include <asm/alternative.h>                   !!  13 #include <asm/compiler.h>
 15 #include <asm/assembler.h>                     !!  14 #include <asm/irqflags.h>
 16 #include <asm/asm-offsets.h>                   !!  15 #include <asm/regdef.h>
 17 #include <asm/asm_pointer_auth.h>              !!  16 #include <asm/mipsregs.h>
 18 #include <asm/bug.h>                           !!  17 #include <asm/stackframe.h>
 19 #include <asm/cpufeature.h>                    !!  18 #include <asm/isadep.h>
 20 #include <asm/errno.h>                         << 
 21 #include <asm/esr.h>                           << 
 22 #include <asm/irq.h>                           << 
 23 #include <asm/memory.h>                        << 
 24 #include <asm/mmu.h>                           << 
 25 #include <asm/processor.h>                     << 
 26 #include <asm/ptrace.h>                        << 
 27 #include <asm/scs.h>                           << 
 28 #include <asm/thread_info.h>                       19 #include <asm/thread_info.h>
 29 #include <asm/asm-uaccess.h>                   !!  20 #include <asm/war.h>
 30 #include <asm/unistd.h>                        << 
 31                                                << 
 32         .macro  clear_gp_regs                  << 
 33         .irp    n,0,1,2,3,4,5,6,7,8,9,10,11,12 << 
 34         mov     x\n, xzr                       << 
 35         .endr                                  << 
 36         .endm                                  << 
 37                                                << 
 38         .macro kernel_ventry, el:req, ht:req,  << 
 39         .align 7                               << 
 40 .Lventry_start\@:                              << 
 41         .if     \el == 0                       << 
 42         /*                                     << 
 43          * This must be the first instruction  << 
 44          * skipped by the trampoline vectors,  << 
 45          */                                    << 
 46         b       .Lskip_tramp_vectors_cleanup\@ << 
 47         .if     \regsize == 64                 << 
 48         mrs     x30, tpidrro_el0               << 
 49         msr     tpidrro_el0, xzr               << 
 50         .else                                  << 
 51         mov     x30, xzr                       << 
 52         .endif                                 << 
 53 .Lskip_tramp_vectors_cleanup\@:                << 
 54         .endif                                 << 
 55                                                << 
 56         sub     sp, sp, #PT_REGS_SIZE          << 
 57 #ifdef CONFIG_VMAP_STACK                       << 
 58         /*                                     << 
 59          * Test whether the SP has overflowed, << 
 60          * Task and IRQ stacks are aligned so  << 
 61          * should always be zero.              << 
 62          */                                    << 
 63         add     sp, sp, x0                     << 
 64         sub     x0, sp, x0                     << 
 65         tbnz    x0, #THREAD_SHIFT, 0f          << 
 66         sub     x0, sp, x0                     << 
 67         sub     sp, sp, x0                     << 
 68         b       el\el\ht\()_\regsize\()_\label << 
 69                                                << 
 70 0:                                             << 
 71         /*                                     << 
 72          * Either we've just detected an overf << 
 73          * while on the overflow stack. Either << 
 74          * userspace, and can clobber EL0 regi << 
 75          */                                    << 
 76                                                << 
 77         /* Stash the original SP (minus PT_REG << 
 78         msr     tpidr_el0, x0                  << 
 79                                                << 
 80         /* Recover the original x0 value and s << 
 81         sub     x0, sp, x0                     << 
 82         msr     tpidrro_el0, x0                << 
 83                                                << 
 84         /* Switch to the overflow stack */     << 
 85         adr_this_cpu sp, overflow_stack + OVER << 
 86                                                << 
 87         /*                                     << 
 88          * Check whether we were already on th << 
 89          * after panic() re-enables interrupts << 
 90          */                                    << 
 91         mrs     x0, tpidr_el0                  << 
 92         sub     x0, sp, x0                     << 
 93         tst     x0, #~(OVERFLOW_STACK_SIZE - 1 << 
 94         b.ne    __bad_stack                    << 
 95                                                << 
 96         /* We were already on the overflow sta << 
 97         sub     sp, sp, x0                     << 
 98         mrs     x0, tpidrro_el0                << 
 99 #endif                                         << 
100         b       el\el\ht\()_\regsize\()_\label << 
101 .org .Lventry_start\@ + 128     // Did we over << 
102         .endm                                  << 
103                                                << 
104         .macro  tramp_alias, dst, sym          << 
105         .set    .Lalias\@, TRAMP_VALIAS + \sym << 
106         movz    \dst, :abs_g2_s:.Lalias\@      << 
107         movk    \dst, :abs_g1_nc:.Lalias\@     << 
108         movk    \dst, :abs_g0_nc:.Lalias\@     << 
109         .endm                                  << 
110                                                << 
111         /*                                     << 
112          * This macro corrupts x0-x3. It is th << 
113          * them if required.                   << 
114          */                                    << 
115         .macro  apply_ssbd, state, tmp1, tmp2  << 
116 alternative_cb  ARM64_ALWAYS_SYSTEM, spectre_v << 
117         b       .L__asm_ssbd_skip\@            << 
118 alternative_cb_end                             << 
119         ldr_this_cpu    \tmp2, arm64_ssbd_call << 
120         cbz     \tmp2,  .L__asm_ssbd_skip\@    << 
121         ldr     \tmp2, [tsk, #TSK_TI_FLAGS]    << 
122         tbnz    \tmp2, #TIF_SSBD, .L__asm_ssbd << 
123         mov     w0, #ARM_SMCCC_ARCH_WORKAROUND << 
124         mov     w1, #\state                    << 
125 alternative_cb  ARM64_ALWAYS_SYSTEM, smccc_pat << 
126         nop                                    << 
127 alternative_cb_end                             << 
128 .L__asm_ssbd_skip\@:                           << 
129         .endm                                  << 
130                                                << 
131         /* Check for MTE asynchronous tag chec << 
132         .macro check_mte_async_tcf, tmp, ti_fl << 
133 #ifdef CONFIG_ARM64_MTE                        << 
134         .arch_extension lse                    << 
135 alternative_if_not ARM64_MTE                   << 
136         b       1f                             << 
137 alternative_else_nop_endif                     << 
138         /*                                     << 
139          * Asynchronous tag check faults are o << 
140          * ASYM (3) modes. In each of these mo << 
141          * set, so skip the check if it is uns << 
142          */                                    << 
143         tbz     \thread_sctlr, #(SCTLR_EL1_TCF << 
144         mrs_s   \tmp, SYS_TFSRE0_EL1           << 
145         tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, << 
146         /* Asynchronous TCF occurred for TTBR0 << 
147         mov     \tmp, #_TIF_MTE_ASYNC_FAULT    << 
148         add     \ti_flags, tsk, #TSK_TI_FLAGS  << 
149         stset   \tmp, [\ti_flags]              << 
150 1:                                             << 
151 #endif                                         << 
152         .endm                                  << 
153                                                << 
154         /* Clear the MTE asynchronous tag chec << 
155         .macro clear_mte_async_tcf thread_sctl << 
156 #ifdef CONFIG_ARM64_MTE                        << 
157 alternative_if ARM64_MTE                       << 
158         /* See comment in check_mte_async_tcf  << 
159         tbz     \thread_sctlr, #(SCTLR_EL1_TCF << 
160         dsb     ish                            << 
161         msr_s   SYS_TFSRE0_EL1, xzr            << 
162 1:                                             << 
163 alternative_else_nop_endif                     << 
164 #endif                                         << 
165         .endm                                  << 
166                                                << 
167         .macro mte_set_gcr, mte_ctrl, tmp      << 
168 #ifdef CONFIG_ARM64_MTE                        << 
169         ubfx    \tmp, \mte_ctrl, #MTE_CTRL_GCR << 
170         orr     \tmp, \tmp, #SYS_GCR_EL1_RRND  << 
171         msr_s   SYS_GCR_EL1, \tmp              << 
172 #endif                                         << 
173         .endm                                  << 
174                                                << 
175         .macro mte_set_kernel_gcr, tmp, tmp2   << 
176 #ifdef CONFIG_KASAN_HW_TAGS                    << 
177 alternative_cb  ARM64_ALWAYS_SYSTEM, kasan_hw_ << 
178         b       1f                             << 
179 alternative_cb_end                             << 
180         mov     \tmp, KERNEL_GCR_EL1           << 
181         msr_s   SYS_GCR_EL1, \tmp              << 
182 1:                                             << 
183 #endif                                         << 
184         .endm                                  << 
185                                                    21 
186         .macro mte_set_user_gcr, tsk, tmp, tmp !!  22 #ifndef CONFIG_PREEMPT
187 #ifdef CONFIG_KASAN_HW_TAGS                    !!  23 #define resume_kernel   restore_all
188 alternative_cb  ARM64_ALWAYS_SYSTEM, kasan_hw_ << 
189         b       1f                             << 
190 alternative_cb_end                             << 
191         ldr     \tmp, [\tsk, #THREAD_MTE_CTRL] << 
192                                                << 
193         mte_set_gcr \tmp, \tmp2                << 
194 1:                                             << 
195 #endif                                         << 
196         .endm                                  << 
197                                                << 
198         .macro  kernel_entry, el, regsize = 64 << 
199         .if     \el == 0                       << 
200         alternative_insn nop, SET_PSTATE_DIT(1 << 
201         .endif                                 << 
202         .if     \regsize == 32                 << 
203         mov     w0, w0                         << 
204         .endif                                 << 
205         stp     x0, x1, [sp, #16 * 0]          << 
206         stp     x2, x3, [sp, #16 * 1]          << 
207         stp     x4, x5, [sp, #16 * 2]          << 
208         stp     x6, x7, [sp, #16 * 3]          << 
209         stp     x8, x9, [sp, #16 * 4]          << 
210         stp     x10, x11, [sp, #16 * 5]        << 
211         stp     x12, x13, [sp, #16 * 6]        << 
212         stp     x14, x15, [sp, #16 * 7]        << 
213         stp     x16, x17, [sp, #16 * 8]        << 
214         stp     x18, x19, [sp, #16 * 9]        << 
215         stp     x20, x21, [sp, #16 * 10]       << 
216         stp     x22, x23, [sp, #16 * 11]       << 
217         stp     x24, x25, [sp, #16 * 12]       << 
218         stp     x26, x27, [sp, #16 * 13]       << 
219         stp     x28, x29, [sp, #16 * 14]       << 
220                                                << 
221         .if     \el == 0                       << 
222         clear_gp_regs                          << 
223         mrs     x21, sp_el0                    << 
224         ldr_this_cpu    tsk, __entry_task, x20 << 
225         msr     sp_el0, tsk                    << 
226                                                << 
227         /*                                     << 
228          * Ensure MDSCR_EL1.SS is clear, since << 
229          * when scheduling.                    << 
230          */                                    << 
231         ldr     x19, [tsk, #TSK_TI_FLAGS]      << 
232         disable_step_tsk x19, x20              << 
233                                                << 
234         /* Check for asynchronous tag check fa << 
235         ldr     x0, [tsk, THREAD_SCTLR_USER]   << 
236         check_mte_async_tcf x22, x23, x0       << 
237                                                << 
238 #ifdef CONFIG_ARM64_PTR_AUTH                   << 
239 alternative_if ARM64_HAS_ADDRESS_AUTH          << 
240         /*                                     << 
241          * Enable IA for in-kernel PAC if the  << 
242          * this could be implemented with an u << 
243          * a load, this was measured to be slo << 
244          *                                     << 
245          * Install the kernel IA key only if I << 
246          * was disabled on kernel exit then we << 
247          * installed so there is no need to in << 
248          */                                    << 
249         tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f   << 
250         __ptrauth_keys_install_kernel_nosync t << 
251         b       2f                             << 
252 1:                                             << 
253         mrs     x0, sctlr_el1                  << 
254         orr     x0, x0, SCTLR_ELx_ENIA         << 
255         msr     sctlr_el1, x0                  << 
256 2:                                             << 
257 alternative_else_nop_endif                     << 
258 #endif                                         << 
259                                                << 
260         apply_ssbd 1, x22, x23                 << 
261                                                << 
262         mte_set_kernel_gcr x22, x23            << 
263                                                << 
264         /*                                     << 
265          * Any non-self-synchronizing system r << 
266          * kernel entry should be placed befor << 
267          */                                    << 
268 alternative_if ARM64_MTE                       << 
269         isb                                    << 
270         b       1f                             << 
271 alternative_else_nop_endif                     << 
272 alternative_if ARM64_HAS_ADDRESS_AUTH          << 
273         isb                                    << 
274 alternative_else_nop_endif                     << 
275 1:                                             << 
276                                                << 
277         scs_load_current                       << 
278         .else                                  << 
279         add     x21, sp, #PT_REGS_SIZE         << 
280         get_current_task tsk                   << 
281         .endif /* \el == 0 */                  << 
282         mrs     x22, elr_el1                   << 
283         mrs     x23, spsr_el1                  << 
284         stp     lr, x21, [sp, #S_LR]           << 
285                                                << 
286         /*                                     << 
287          * For exceptions from EL0, create a f << 
288          * For exceptions from EL1, create a s << 
289          * interrupted code shows up in the ba << 
290          */                                    << 
291         .if \el == 0                           << 
292         stp     xzr, xzr, [sp, #S_STACKFRAME]  << 
293         .else                                  << 
294         stp     x29, x22, [sp, #S_STACKFRAME]  << 
295         .endif                                 << 
296         add     x29, sp, #S_STACKFRAME         << 
297                                                << 
298 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               << 
299 alternative_if_not ARM64_HAS_PAN               << 
300         bl      __swpan_entry_el\el            << 
301 alternative_else_nop_endif                     << 
302 #endif                                         << 
303                                                << 
304         stp     x22, x23, [sp, #S_PC]          << 
305                                                << 
306         /* Not in a syscall by default (el0_sv << 
307         .if     \el == 0                       << 
308         mov     w21, #NO_SYSCALL               << 
309         str     w21, [sp, #S_SYSCALLNO]        << 
310         .endif                                 << 
311                                                << 
312 #ifdef CONFIG_ARM64_PSEUDO_NMI                 << 
313 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING  << 
314         b       .Lskip_pmr_save\@              << 
315 alternative_else_nop_endif                     << 
316                                                << 
317         mrs_s   x20, SYS_ICC_PMR_EL1           << 
318         str     x20, [sp, #S_PMR_SAVE]         << 
319         mov     x20, #GIC_PRIO_IRQON | GIC_PRI << 
320         msr_s   SYS_ICC_PMR_EL1, x20           << 
321                                                << 
322 .Lskip_pmr_save\@:                             << 
323 #endif                                         << 
324                                                << 
325         /*                                     << 
326          * Registers that may be useful after  << 
327          *                                     << 
328          * x20 - ICC_PMR_EL1                   << 
329          * x21 - aborted SP                    << 
330          * x22 - aborted PC                    << 
331          * x23 - aborted PSTATE                << 
332         */                                     << 
333         .endm                                  << 
334                                                << 
335         .macro  kernel_exit, el                << 
336         .if     \el != 0                       << 
337         disable_daif                           << 
338         .endif                                 << 
339                                                << 
340 #ifdef CONFIG_ARM64_PSEUDO_NMI                 << 
341 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING  << 
342         b       .Lskip_pmr_restore\@           << 
343 alternative_else_nop_endif                     << 
344                                                << 
345         ldr     x20, [sp, #S_PMR_SAVE]         << 
346         msr_s   SYS_ICC_PMR_EL1, x20           << 
347                                                << 
348         /* Ensure priority change is seen by r << 
349 alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_ << 
350         dsb     sy                             << 
351 alternative_else_nop_endif                     << 
352                                                << 
353 .Lskip_pmr_restore\@:                          << 
354 #endif                                         << 
355                                                << 
356         ldp     x21, x22, [sp, #S_PC]          << 
357                                                << 
358 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               << 
359 alternative_if_not ARM64_HAS_PAN               << 
360         bl      __swpan_exit_el\el             << 
361 alternative_else_nop_endif                     << 
362 #endif                                         << 
363                                                << 
364         .if     \el == 0                       << 
365         ldr     x23, [sp, #S_SP]               << 
366         msr     sp_el0, x23                    << 
367         tst     x22, #PSR_MODE32_BIT           << 
368         b.eq    3f                             << 
369                                                << 
370 #ifdef CONFIG_ARM64_ERRATUM_845719             << 
371 alternative_if ARM64_WORKAROUND_845719         << 
372 #ifdef CONFIG_PID_IN_CONTEXTIDR                << 
373         mrs     x29, contextidr_el1            << 
374         msr     contextidr_el1, x29            << 
375 #else                                              24 #else
376         msr contextidr_el1, xzr                !!  25 #define __ret_from_irq  ret_from_exception
377 #endif                                         << 
378 alternative_else_nop_endif                     << 
379 #endif                                         << 
380 3:                                             << 
381         scs_save tsk                           << 
382                                                << 
383         /* Ignore asynchronous tag check fault << 
384         ldr     x0, [tsk, THREAD_SCTLR_USER]   << 
385         clear_mte_async_tcf x0                 << 
386                                                << 
387 #ifdef CONFIG_ARM64_PTR_AUTH                   << 
388 alternative_if ARM64_HAS_ADDRESS_AUTH          << 
389         /*                                     << 
390          * IA was enabled for in-kernel PAC. D << 
391          * alternatively install the user's IA << 
392          * SCTLR bits were updated on task swi << 
393          *                                     << 
394          * No kernel C function calls after th << 
395          */                                    << 
396         tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f   << 
397         __ptrauth_keys_install_user tsk, x0, x << 
398         b       2f                             << 
399 1:                                             << 
400         mrs     x0, sctlr_el1                  << 
401         bic     x0, x0, SCTLR_ELx_ENIA         << 
402         msr     sctlr_el1, x0                  << 
403 2:                                             << 
404 alternative_else_nop_endif                     << 
405 #endif                                         << 
406                                                << 
407         mte_set_user_gcr tsk, x0, x1           << 
408                                                << 
409         apply_ssbd 0, x0, x1                   << 
410         .endif                                 << 
411                                                << 
412         msr     elr_el1, x21                   << 
413         msr     spsr_el1, x22                  << 
414         ldp     x0, x1, [sp, #16 * 0]          << 
415         ldp     x2, x3, [sp, #16 * 1]          << 
416         ldp     x4, x5, [sp, #16 * 2]          << 
417         ldp     x6, x7, [sp, #16 * 3]          << 
418         ldp     x8, x9, [sp, #16 * 4]          << 
419         ldp     x10, x11, [sp, #16 * 5]        << 
420         ldp     x12, x13, [sp, #16 * 6]        << 
421         ldp     x14, x15, [sp, #16 * 7]        << 
422         ldp     x16, x17, [sp, #16 * 8]        << 
423         ldp     x18, x19, [sp, #16 * 9]        << 
424         ldp     x20, x21, [sp, #16 * 10]       << 
425         ldp     x22, x23, [sp, #16 * 11]       << 
426         ldp     x24, x25, [sp, #16 * 12]       << 
427         ldp     x26, x27, [sp, #16 * 13]       << 
428         ldp     x28, x29, [sp, #16 * 14]       << 
429                                                << 
430         .if     \el == 0                       << 
431 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              << 
432         alternative_insn "b .L_skip_tramp_exit << 
433                                                << 
434         msr     far_el1, x29                   << 
435                                                << 
436         ldr_this_cpu    x30, this_cpu_vector,  << 
437         tramp_alias     x29, tramp_exit        << 
438         msr             vbar_el1, x30          << 
439         ldr             lr, [sp, #S_LR]        << 
440         add             sp, sp, #PT_REGS_SIZE  << 
441         br              x29                    << 
442                                                << 
443 .L_skip_tramp_exit_\@:                         << 
444 #endif                                         << 
445         .endif                                 << 
446                                                << 
447         ldr     lr, [sp, #S_LR]                << 
448         add     sp, sp, #PT_REGS_SIZE          << 
449                                                << 
450         .if \el == 0                           << 
451         /* This must be after the last explici << 
452 alternative_if ARM64_WORKAROUND_SPECULATIVE_UN << 
453         tlbi    vale1, xzr                     << 
454         dsb     nsh                            << 
455 alternative_else_nop_endif                     << 
456         .else                                  << 
457         /* Ensure any device/NC reads complete << 
458         alternative_insn nop, "dmb sy", ARM64_ << 
459         .endif                                 << 
460                                                << 
461         eret                                   << 
462         sb                                     << 
463         .endm                                  << 
464                                                << 
465 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               << 
466         /*                                     << 
467          * Set the TTBR0 PAN bit in SPSR. When << 
468          * EL0, there is no need to check the  << 
469          * accesses are always enabled.        << 
470          * Note that the meaning of this bit d << 
471          * feature as all TTBR0_EL1 accesses a << 
472          * user mappings.                      << 
473          */                                    << 
474 SYM_CODE_START_LOCAL(__swpan_entry_el1)        << 
475         mrs     x21, ttbr0_el1                 << 
476         tst     x21, #TTBR_ASID_MASK           << 
477         orr     x23, x23, #PSR_PAN_BIT         << 
478         b.eq    1f                             << 
479         and     x23, x23, #~PSR_PAN_BIT        << 
480 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL << 
481         __uaccess_ttbr0_disable x21            << 
482 1:      ret                                    << 
483 SYM_CODE_END(__swpan_entry_el1)                << 
484                                                << 
485         /*                                     << 
486          * Restore access to TTBR0_EL1. If ret << 
487          * PAN bit checking.                   << 
488          */                                    << 
489 SYM_CODE_START_LOCAL(__swpan_exit_el1)         << 
490         tbnz    x22, #22, 1f                   << 
491         __uaccess_ttbr0_enable x0, x1          << 
492 1:      and     x22, x22, #~PSR_PAN_BIT        << 
493         ret                                    << 
494 SYM_CODE_END(__swpan_exit_el1)                 << 
495                                                << 
496 SYM_CODE_START_LOCAL(__swpan_exit_el0)         << 
497         __uaccess_ttbr0_enable x0, x1          << 
498         /*                                     << 
499          * Enable errata workarounds only if r << 
500          * workaround currently required for T << 
501          * Cavium erratum 27456 (broadcast TLB << 
502          * corruption).                        << 
503          */                                    << 
504         b       post_ttbr_update_workaround    << 
505 SYM_CODE_END(__swpan_exit_el0)                 << 
506 #endif                                             26 #endif
507                                                    27 
508 /* GPRs used by entry code */                  << 
509 tsk     .req    x28             // current thr << 
510                                                << 
511         .text                                      28         .text
512                                                !!  29         .align  5
513 /*                                             !!  30 #ifndef CONFIG_PREEMPT
514  * Exception vectors.                          !!  31 FEXPORT(ret_from_exception)
515  */                                            !!  32         local_irq_disable                       # preempt stop
516         .pushsection ".entry.text", "ax"       !!  33         b       __ret_from_irq
517                                                !!  34 #endif
518         .align  11                             !!  35 FEXPORT(ret_from_irq)
519 SYM_CODE_START(vectors)                        !!  36         LONG_S  s0, TI_REGS($28)
520         kernel_ventry   1, t, 64, sync         !!  37 FEXPORT(__ret_from_irq)
521         kernel_ventry   1, t, 64, irq          !!  38 /*
522         kernel_ventry   1, t, 64, fiq          !!  39  * We can be coming here from a syscall done in the kernel space,
523         kernel_ventry   1, t, 64, error        !!  40  * e.g. a failed kernel_execve().
524                                                !!  41  */
525         kernel_ventry   1, h, 64, sync         !!  42 resume_userspace_check:
526         kernel_ventry   1, h, 64, irq          !!  43         LONG_L  t0, PT_STATUS(sp)               # returning to kernel mode?
527         kernel_ventry   1, h, 64, fiq          !!  44         andi    t0, t0, KU_USER
528         kernel_ventry   1, h, 64, error        !!  45         beqz    t0, resume_kernel
529                                                !!  46 
530         kernel_ventry   0, t, 64, sync         !!  47 resume_userspace:
531         kernel_ventry   0, t, 64, irq          !!  48         local_irq_disable               # make sure we dont miss an
532         kernel_ventry   0, t, 64, fiq          !!  49                                         # interrupt setting need_resched
533         kernel_ventry   0, t, 64, error        !!  50                                         # between sampling and return
534                                                !!  51         LONG_L  a2, TI_FLAGS($28)       # current->work
535         kernel_ventry   0, t, 32, sync         !!  52         andi    t0, a2, _TIF_WORK_MASK  # (ignoring syscall_trace)
536         kernel_ventry   0, t, 32, irq          !!  53         bnez    t0, work_pending
537         kernel_ventry   0, t, 32, fiq          !!  54         j       restore_all
538         kernel_ventry   0, t, 32, error        !!  55 
539 SYM_CODE_END(vectors)                          !!  56 #ifdef CONFIG_PREEMPT
540                                                !!  57 resume_kernel:
541 #ifdef CONFIG_VMAP_STACK                       !!  58         local_irq_disable
542 SYM_CODE_START_LOCAL(__bad_stack)              !!  59         lw      t0, TI_PRE_COUNT($28)
543         /*                                     !!  60         bnez    t0, restore_all
544          * We detected an overflow in kernel_v !!  61 need_resched:
545          * overflow stack. Stash the exception !!  62         LONG_L  t0, TI_FLAGS($28)
546          * handler.                            !!  63         andi    t1, t0, _TIF_NEED_RESCHED
547          */                                    !!  64         beqz    t1, restore_all
548                                                !!  65         LONG_L  t0, PT_STATUS(sp)               # Interrupts off?
549         /* Restore the original x0 value */    !!  66         andi    t0, 1
550         mrs     x0, tpidrro_el0                !!  67         beqz    t0, restore_all
551                                                !!  68         jal     preempt_schedule_irq
552         /*                                     !!  69         b       need_resched
553          * Store the original GPRs to the new  !!  70 #endif
554          * PT_REGS_SIZE) was stashed in tpidr_ !!  71 
555          */                                    !!  72 FEXPORT(ret_from_kernel_thread)
556         sub     sp, sp, #PT_REGS_SIZE          !!  73         jal     schedule_tail           # a0 = struct task_struct *prev
557         kernel_entry 1                         !!  74         move    a0, s1
558         mrs     x0, tpidr_el0                  !!  75         jal     s0
559         add     x0, x0, #PT_REGS_SIZE          !!  76         j       syscall_exit
560         str     x0, [sp, #S_SP]                !!  77 
561                                                !!  78 FEXPORT(ret_from_fork)
562         /* Stash the regs for handle_bad_stack !!  79         jal     schedule_tail           # a0 = struct task_struct *prev
563         mov     x0, sp                         !!  80 
564                                                !!  81 FEXPORT(syscall_exit)
565         /* Time to die */                      !!  82 #ifdef CONFIG_DEBUG_RSEQ
566         bl      handle_bad_stack               !!  83         move    a0, sp
567         ASM_BUG()                              !!  84         jal     rseq_syscall
568 SYM_CODE_END(__bad_stack)                      !!  85 #endif
569 #endif /* CONFIG_VMAP_STACK */                 !!  86         local_irq_disable               # make sure need_resched and
570                                                !!  87                                         # signals dont change between
571                                                !!  88                                         # sampling and return
572         .macro entry_handler el:req, ht:req, r !!  89         LONG_L  a2, TI_FLAGS($28)       # current->work
573 SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\ !!  90         li      t0, _TIF_ALLWORK_MASK
574         kernel_entry \el, \regsize             !!  91         and     t0, a2, t0
575         mov     x0, sp                         !!  92         bnez    t0, syscall_exit_work
576         bl      el\el\ht\()_\regsize\()_\label !!  93 
577         .if \el == 0                           !!  94 restore_all:                            # restore full frame
578         b       ret_to_user                    !!  95         .set    noat
579         .else                                  !!  96         RESTORE_TEMP
580         b       ret_to_kernel                  !!  97         RESTORE_AT
581         .endif                                 !!  98         RESTORE_STATIC
582 SYM_CODE_END(el\el\ht\()_\regsize\()_\label)   !!  99 restore_partial:                # restore partial frame
583         .endm                                  !! 100 #ifdef CONFIG_TRACE_IRQFLAGS
584                                                !! 101         SAVE_STATIC
585 /*                                             !! 102         SAVE_AT
586  * Early exception handlers                    !! 103         SAVE_TEMP
587  */                                            !! 104         LONG_L  v0, PT_STATUS(sp)
588         entry_handler   1, t, 64, sync         !! 105 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
589         entry_handler   1, t, 64, irq          !! 106         and     v0, ST0_IEP
590         entry_handler   1, t, 64, fiq          << 
591         entry_handler   1, t, 64, error        << 
592                                                << 
593         entry_handler   1, h, 64, sync         << 
594         entry_handler   1, h, 64, irq          << 
595         entry_handler   1, h, 64, fiq          << 
596         entry_handler   1, h, 64, error        << 
597                                                << 
598         entry_handler   0, t, 64, sync         << 
599         entry_handler   0, t, 64, irq          << 
600         entry_handler   0, t, 64, fiq          << 
601         entry_handler   0, t, 64, error        << 
602                                                << 
603         entry_handler   0, t, 32, sync         << 
604         entry_handler   0, t, 32, irq          << 
605         entry_handler   0, t, 32, fiq          << 
606         entry_handler   0, t, 32, error        << 
607                                                << 
608 SYM_CODE_START_LOCAL(ret_to_kernel)            << 
609         kernel_exit 1                          << 
610 SYM_CODE_END(ret_to_kernel)                    << 
611                                                << 
612 SYM_CODE_START_LOCAL(ret_to_user)              << 
613         ldr     x19, [tsk, #TSK_TI_FLAGS]      << 
614         enable_step_tsk x19, x2                << 
615 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK             << 
616         bl      stackleak_erase_on_task_stack  << 
617 #endif                                         << 
618         kernel_exit 0                          << 
619 SYM_CODE_END(ret_to_user)                      << 
620                                                << 
621         .popsection                            << 
622                                                << 
623         // Move from tramp_pg_dir to swapper_p << 
624         .macro tramp_map_kernel, tmp           << 
625         mrs     \tmp, ttbr1_el1                << 
626         add     \tmp, \tmp, #TRAMP_SWAPPER_OFF << 
627         bic     \tmp, \tmp, #USER_ASID_FLAG    << 
628         msr     ttbr1_el1, \tmp                << 
629 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003         << 
630 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1 << 
631         /* ASID already in \tmp[63:48] */      << 
632         movk    \tmp, #:abs_g2_nc:(TRAMP_VALIA << 
633         movk    \tmp, #:abs_g1_nc:(TRAMP_VALIA << 
634         /* 2MB boundary containing the vectors << 
635         movk    \tmp, #:abs_g0_nc:((TRAMP_VALI << 
636         isb                                    << 
637         tlbi    vae1, \tmp                     << 
638         dsb     nsh                            << 
639 alternative_else_nop_endif                     << 
640 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */   << 
641         .endm                                  << 
642                                                << 
643         // Move from swapper_pg_dir to tramp_p << 
644         .macro tramp_unmap_kernel, tmp         << 
645         mrs     \tmp, ttbr1_el1                << 
646         sub     \tmp, \tmp, #TRAMP_SWAPPER_OFF << 
647         orr     \tmp, \tmp, #USER_ASID_FLAG    << 
648         msr     ttbr1_el1, \tmp                << 
649         /*                                     << 
650          * We avoid running the post_ttbr_upda << 
651          * it's only needed by Cavium ThunderX << 
652          * disabled.                           << 
653          */                                    << 
654         .endm                                  << 
655                                                << 
656         .macro          tramp_data_read_var    << 
657 #ifdef CONFIG_RELOCATABLE                      << 
658         ldr             \dst, .L__tramp_data_\ << 
659         .ifndef         .L__tramp_data_\var    << 
660         .pushsection    ".entry.tramp.rodata", << 
661         .align          3                      << 
662 .L__tramp_data_\var:                           << 
663         .quad           \var                   << 
664         .popsection                            << 
665         .endif                                 << 
666 #else                                             107 #else
667         /*                                     !! 108         and     v0, ST0_IE
668          * As !RELOCATABLE implies !RANDOMIZE_ << 
669          * compile time constant (and hence no << 
670          *                                     << 
671          * As statically allocated kernel code << 
672          * 47 bits of the address space we can << 
673          * instruction to load the upper 16 bi << 
674          */                                    << 
675         movz            \dst, :abs_g2_s:\var   << 
676         movk            \dst, :abs_g1_nc:\var  << 
677         movk            \dst, :abs_g0_nc:\var  << 
678 #endif                                            109 #endif
679         .endm                                  !! 110         beqz    v0, 1f
680                                                !! 111         jal     trace_hardirqs_on
681 #define BHB_MITIGATION_NONE     0              << 
682 #define BHB_MITIGATION_LOOP     1              << 
683 #define BHB_MITIGATION_FW       2              << 
684 #define BHB_MITIGATION_INSN     3              << 
685                                                << 
686         .macro tramp_ventry, vector_start, reg << 
687         .align  7                              << 
688 1:                                             << 
689         .if     \regsize == 64                 << 
690         msr     tpidrro_el0, x30        // Res << 
691         .endif                                 << 
692                                                << 
693         .if     \bhb == BHB_MITIGATION_LOOP    << 
694         /*                                     << 
695          * This sequence must appear before th << 
696          * ret out of tramp_ventry. It appears << 
697          */                                    << 
698         __mitigate_spectre_bhb_loop     x30    << 
699         .endif // \bhb == BHB_MITIGATION_LOOP  << 
700                                                << 
701         .if     \bhb == BHB_MITIGATION_INSN    << 
702         clearbhb                               << 
703         isb                                    << 
704         .endif // \bhb == BHB_MITIGATION_INSN  << 
705                                                << 
706         .if     \kpti == 1                     << 
707         /*                                     << 
708          * Defend against branch aliasing atta << 
709          * entry onto the return stack and usi << 
710          * enter the full-fat kernel vectors.  << 
711          */                                    << 
712         bl      2f                             << 
713         b       .                              << 
714 2:                                             << 
715         tramp_map_kernel        x30            << 
716 alternative_insn isb, nop, ARM64_WORKAROUND_QC << 
717         tramp_data_read_var     x30, vectors   << 
718 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2 << 
719         prfm    plil1strm, [x30, #(1b - \vecto << 
720 alternative_else_nop_endif                     << 
721                                                << 
722         msr     vbar_el1, x30                  << 
723         isb                                    << 
724         .else                                  << 
725         adr_l   x30, vectors                   << 
726         .endif // \kpti == 1                   << 
727                                                << 
728         .if     \bhb == BHB_MITIGATION_FW      << 
729         /*                                     << 
730          * The firmware sequence must appear b << 
731          * i.e. the ret out of tramp_ventry. B << 
732          * mapped to save/restore the register << 
733          */                                    << 
734         __mitigate_spectre_bhb_fw              << 
735         .endif // \bhb == BHB_MITIGATION_FW    << 
736                                                << 
737         add     x30, x30, #(1b - \vector_start << 
738         ret                                    << 
739 .org 1b + 128   // Did we overflow the ventry  << 
740         .endm                                  << 
741                                                << 
742         .macro  generate_tramp_vector,  kpti,  << 
743 .Lvector_start\@:                              << 
744         .space  0x400                          << 
745                                                << 
746         .rept   4                              << 
747         tramp_ventry    .Lvector_start\@, 64,  << 
748         .endr                                  << 
749         .rept   4                              << 
750         tramp_ventry    .Lvector_start\@, 32,  << 
751         .endr                                  << 
752         .endm                                  << 
753                                                << 
754 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              << 
755 /*                                             << 
756  * Exception vectors trampoline.               << 
757  * The order must match __bp_harden_el1_vector << 
758  * arm64_bp_harden_el1_vectors enum.           << 
759  */                                            << 
760         .pushsection ".entry.tramp.text", "ax" << 
761         .align  11                             << 
762 SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)    << 
763 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY  << 
764         generate_tramp_vector   kpti=1, bhb=BH << 
765         generate_tramp_vector   kpti=1, bhb=BH << 
766         generate_tramp_vector   kpti=1, bhb=BH << 
767 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 
768         generate_tramp_vector   kpti=1, bhb=BH << 
769 SYM_CODE_END(tramp_vectors)                    << 
770                                                << 
771 SYM_CODE_START_LOCAL(tramp_exit)               << 
772         tramp_unmap_kernel      x29            << 
773         mrs             x29, far_el1           << 
774         eret                                   << 
775         sb                                     << 
776 SYM_CODE_END(tramp_exit)                       << 
777         .popsection                            << 
778 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */        << 
779                                                << 
780 /*                                             << 
781  * Exception vectors for spectre mitigations o << 
782  * kpti is not in use.                         << 
783  */                                            << 
784         .macro generate_el1_vector, bhb        << 
785 .Lvector_start\@:                              << 
786         kernel_ventry   1, t, 64, sync         << 
787         kernel_ventry   1, t, 64, irq          << 
788         kernel_ventry   1, t, 64, fiq          << 
789         kernel_ventry   1, t, 64, error        << 
790                                                << 
791         kernel_ventry   1, h, 64, sync         << 
792         kernel_ventry   1, h, 64, irq          << 
793         kernel_ventry   1, h, 64, fiq          << 
794         kernel_ventry   1, h, 64, error        << 
795                                                << 
796         .rept   4                              << 
797         tramp_ventry    .Lvector_start\@, 64,  << 
798         .endr                                  << 
799         .rept 4                                << 
800         tramp_ventry    .Lvector_start\@, 32,  << 
801         .endr                                  << 
802         .endm                                  << 
803                                                << 
804 /* The order must match tramp_vecs and the arm << 
805         .pushsection ".entry.text", "ax"       << 
806         .align  11                             << 
807 SYM_CODE_START(__bp_harden_el1_vectors)        << 
808 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY  << 
809         generate_el1_vector     bhb=BHB_MITIGA << 
810         generate_el1_vector     bhb=BHB_MITIGA << 
811         generate_el1_vector     bhb=BHB_MITIGA << 
812 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 
813 SYM_CODE_END(__bp_harden_el1_vectors)          << 
814         .popsection                            << 
815                                                << 
816                                                << 
817 /*                                             << 
818  * Register switch for AArch64. The callee-sav << 
819  * and restored. On entry:                     << 
820  *   x0 = previous task_struct (must be preser << 
821  *   x1 = next task_struct                     << 
822  * Previous and next are guaranteed not to be  << 
823  *                                             << 
824  */                                            << 
825 SYM_FUNC_START(cpu_switch_to)                  << 
826         mov     x10, #THREAD_CPU_CONTEXT       << 
827         add     x8, x0, x10                    << 
828         mov     x9, sp                         << 
829         stp     x19, x20, [x8], #16            << 
830         stp     x21, x22, [x8], #16            << 
831         stp     x23, x24, [x8], #16            << 
832         stp     x25, x26, [x8], #16            << 
833         stp     x27, x28, [x8], #16            << 
834         stp     x29, x9, [x8], #16             << 
835         str     lr, [x8]                       << 
836         add     x8, x1, x10                    << 
837         ldp     x19, x20, [x8], #16            << 
838         ldp     x21, x22, [x8], #16            << 
839         ldp     x23, x24, [x8], #16            << 
840         ldp     x25, x26, [x8], #16            << 
841         ldp     x27, x28, [x8], #16            << 
842         ldp     x29, x9, [x8], #16             << 
843         ldr     lr, [x8]                       << 
844         mov     sp, x9                         << 
845         msr     sp_el0, x1                     << 
846         ptrauth_keys_install_kernel x1, x8, x9 << 
847         scs_save x0                            << 
848         scs_load_current                       << 
849         ret                                    << 
850 SYM_FUNC_END(cpu_switch_to)                    << 
851 NOKPROBE(cpu_switch_to)                        << 
852                                                << 
853 /*                                             << 
854  * This is how we return from a fork.          << 
855  */                                            << 
856 SYM_CODE_START(ret_from_fork)                  << 
857         bl      schedule_tail                  << 
858         cbz     x19, 1f                        << 
859         mov     x0, x20                        << 
860         blr     x19                            << 
861 1:      get_current_task tsk                   << 
862         mov     x0, sp                         << 
863         bl      asm_exit_to_user_mode          << 
864         b       ret_to_user                    << 
865 SYM_CODE_END(ret_from_fork)                    << 
866 NOKPROBE(ret_from_fork)                        << 
867                                                << 
868 /*                                             << 
869  * void call_on_irq_stack(struct pt_regs *regs << 
870  *                        void (*func)(struct  << 
871  *                                             << 
872  * Calls func(regs) using this CPU's irq stack << 
873  */                                            << 
874 SYM_FUNC_START(call_on_irq_stack)              << 
875 #ifdef CONFIG_SHADOW_CALL_STACK                << 
876         get_current_task x16                   << 
877         scs_save x16                           << 
878         ldr_this_cpu scs_sp, irq_shadow_call_s << 
879 #endif                                         << 
880                                                << 
881         /* Create a frame record to save our L << 
882         stp     x29, x30, [sp, #-16]!          << 
883         mov     x29, sp                        << 
884                                                << 
885         ldr_this_cpu x16, irq_stack_ptr, x17   << 
886                                                << 
887         /* Move to the new stack and call the  << 
888         add     sp, x16, #IRQ_STACK_SIZE       << 
889         blr     x1                             << 
890                                                << 
891         /*                                     << 
892          * Restore the SP from the FP, and res << 
893          * record.                             << 
894          */                                    << 
895         mov     sp, x29                        << 
896         ldp     x29, x30, [sp], #16            << 
897         scs_load_current                       << 
898         ret                                    << 
899 SYM_FUNC_END(call_on_irq_stack)                << 
900 NOKPROBE(call_on_irq_stack)                    << 
901                                                << 
902 #ifdef CONFIG_ARM_SDE_INTERFACE                << 
903                                                << 
904 #include <asm/sdei.h>                          << 
905 #include <uapi/linux/arm_sdei.h>               << 
906                                                << 
907 .macro sdei_handler_exit exit_mode             << 
908         /* On success, this call never returns << 
909         cmp     \exit_mode, #SDEI_EXIT_SMC     << 
910         b.ne    99f                            << 
911         smc     #0                             << 
912         b       .                              << 
913 99:     hvc     #0                             << 
914         b       .                              << 
915 .endm                                          << 
916                                                << 
917 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              << 
918 /*                                             << 
919  * The regular SDEI entry point may have been  << 
920  * the kernel. This trampoline restores the ke << 
921  * argument accessible.                        << 
922  *                                             << 
923  * This clobbers x4, __sdei_handler() will res << 
924  * copy.                                       << 
925  */                                            << 
926 .pushsection ".entry.tramp.text", "ax"         << 
927 SYM_CODE_START(__sdei_asm_entry_trampoline)    << 
928         mrs     x4, ttbr1_el1                  << 
929         tbz     x4, #USER_ASID_BIT, 1f         << 
930                                                << 
931         tramp_map_kernel tmp=x4                << 
932         isb                                    << 
933         mov     x4, xzr                        << 
934                                                << 
935         /*                                     << 
936          * Remember whether to unmap the kerne << 
937          */                                    << 
938 1:      str     x4, [x1, #(SDEI_EVENT_INTREGS  << 
939         tramp_data_read_var     x4, __sdei_asm << 
940         br      x4                             << 
941 SYM_CODE_END(__sdei_asm_entry_trampoline)      << 
942 NOKPROBE(__sdei_asm_entry_trampoline)          << 
943                                                << 
944 /*                                             << 
945  * Make the exit call and restore the original << 
946  *                                             << 
947  * x0 & x1: setup for the exit API call        << 
948  * x2: exit_mode                               << 
949  * x4: struct sdei_registered_event argument f << 
950  */                                            << 
951 SYM_CODE_START(__sdei_asm_exit_trampoline)     << 
952         ldr     x4, [x4, #(SDEI_EVENT_INTREGS  << 
953         cbnz    x4, 1f                         << 
954                                                << 
955         tramp_unmap_kernel      tmp=x4         << 
956                                                << 
957 1:      sdei_handler_exit exit_mode=x2         << 
958 SYM_CODE_END(__sdei_asm_exit_trampoline)       << 
959 NOKPROBE(__sdei_asm_exit_trampoline)           << 
960 .popsection             // .entry.tramp.text   << 
961 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */        << 
962                                                << 
963 /*                                             << 
964  * Software Delegated Exception entry point.   << 
965  *                                             << 
966  * x0: Event number                            << 
967  * x1: struct sdei_registered_event argument f << 
968  * x2: interrupted PC                          << 
969  * x3: interrupted PSTATE                      << 
970  * x4: maybe clobbered by the trampoline       << 
971  *                                             << 
972  * Firmware has preserved x0->x17 for us, we m << 
973  * follow SMC-CC. We save (or retrieve) all th << 
974  * want them.                                  << 
975  */                                            << 
976 SYM_CODE_START(__sdei_asm_handler)             << 
977         stp     x2, x3, [x1, #SDEI_EVENT_INTRE << 
978         stp     x4, x5, [x1, #SDEI_EVENT_INTRE << 
979         stp     x6, x7, [x1, #SDEI_EVENT_INTRE << 
980         stp     x8, x9, [x1, #SDEI_EVENT_INTRE << 
981         stp     x10, x11, [x1, #SDEI_EVENT_INT << 
982         stp     x12, x13, [x1, #SDEI_EVENT_INT << 
983         stp     x14, x15, [x1, #SDEI_EVENT_INT << 
984         stp     x16, x17, [x1, #SDEI_EVENT_INT << 
985         stp     x18, x19, [x1, #SDEI_EVENT_INT << 
986         stp     x20, x21, [x1, #SDEI_EVENT_INT << 
987         stp     x22, x23, [x1, #SDEI_EVENT_INT << 
988         stp     x24, x25, [x1, #SDEI_EVENT_INT << 
989         stp     x26, x27, [x1, #SDEI_EVENT_INT << 
990         stp     x28, x29, [x1, #SDEI_EVENT_INT << 
991         mov     x4, sp                         << 
992         stp     lr, x4, [x1, #SDEI_EVENT_INTRE << 
993                                                << 
994         mov     x19, x1                        << 
995                                                << 
996         /* Store the registered-event for cras << 
997         ldrb    w4, [x19, #SDEI_EVENT_PRIORITY << 
998         cbnz    w4, 1f                         << 
999         adr_this_cpu dst=x5, sym=sdei_active_n << 
1000         b       2f                            << 
1001 1:      adr_this_cpu dst=x5, sym=sdei_active_ << 
1002 2:      str     x19, [x5]                     << 
1003                                               << 
1004 #ifdef CONFIG_VMAP_STACK                      << 
1005         /*                                    << 
1006          * entry.S may have been using sp as  << 
1007          * this is a normal or critical event << 
1008          * stack for this CPU.                << 
1009          */                                   << 
1010         cbnz    w4, 1f                        << 
1011         ldr_this_cpu dst=x5, sym=sdei_stack_n << 
1012         b       2f                               112         b       2f
1013 1:      ldr_this_cpu dst=x5, sym=sdei_stack_c !! 113 1:      jal     trace_hardirqs_off
1014 2:      mov     x6, #SDEI_STACK_SIZE          !! 114 2:
1015         add     x5, x5, x6                    !! 115         RESTORE_TEMP
1016         mov     sp, x5                        !! 116         RESTORE_AT
1017 #endif                                        !! 117         RESTORE_STATIC
1018                                               !! 118 #endif
1019 #ifdef CONFIG_SHADOW_CALL_STACK               !! 119         RESTORE_SOME
1020         /* Use a separate shadow call stack f !! 120         RESTORE_SP_AND_RET
1021         cbnz    w4, 3f                        !! 121         .set    at
1022         ldr_this_cpu dst=scs_sp, sym=sdei_sha !! 122 
1023         b       4f                            !! 123 work_pending:
1024 3:      ldr_this_cpu dst=scs_sp, sym=sdei_sha !! 124         andi    t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
1025 4:                                            !! 125         beqz    t0, work_notifysig
1026 #endif                                        !! 126 work_resched:
1027                                               !! 127         TRACE_IRQS_OFF
1028         /*                                    !! 128         jal     schedule
1029          * We may have interrupted userspace, !! 129 
1030          * return-to either of these. We can' !! 130         local_irq_disable               # make sure need_resched and
1031          */                                   !! 131                                         # signals dont change between
1032         mrs     x28, sp_el0                   !! 132                                         # sampling and return
1033         ldr_this_cpu    dst=x0, sym=__entry_t !! 133         LONG_L  a2, TI_FLAGS($28)
1034         msr     sp_el0, x0                    !! 134         andi    t0, a2, _TIF_WORK_MASK  # is there any work to be done
1035                                               !! 135                                         # other than syscall tracing?
1036         /* If we interrupted the kernel point !! 136         beqz    t0, restore_all
1037         and     x0, x3, #0xc                  !! 137         andi    t0, a2, _TIF_NEED_RESCHED
1038         mrs     x1, CurrentEL                 !! 138         bnez    t0, work_resched
1039         cmp     x0, x1                        !! 139 
1040         csel    x29, x29, xzr, eq       // fp !! 140 work_notifysig:                         # deal with pending signals and
1041         csel    x4, x2, xzr, eq         // el !! 141                                         # notify-resume requests
1042                                               !! 142         move    a0, sp
1043         stp     x29, x4, [sp, #-16]!          !! 143         li      a1, 0
1044         mov     x29, sp                       !! 144         jal     do_notify_resume        # a2 already loaded
1045                                               !! 145         j       resume_userspace_check
1046         add     x0, x19, #SDEI_EVENT_INTREGS  !! 146 
1047         mov     x1, x19                       !! 147 FEXPORT(syscall_exit_partial)
1048         bl      __sdei_handler                !! 148 #ifdef CONFIG_DEBUG_RSEQ
1049                                               !! 149         move    a0, sp
1050         msr     sp_el0, x28                   !! 150         jal     rseq_syscall
1051         /* restore regs >x17 that we clobbere !! 151 #endif
1052         mov     x4, x19         // keep x4 fo !! 152         local_irq_disable               # make sure need_resched doesn't
1053         ldp     x28, x29, [x4, #SDEI_EVENT_IN !! 153                                         # change between and return
1054         ldp     x18, x19, [x4, #SDEI_EVENT_IN !! 154         LONG_L  a2, TI_FLAGS($28)       # current->work
1055         ldp     lr, x1, [x4, #SDEI_EVENT_INTR !! 155         li      t0, _TIF_ALLWORK_MASK
1056         mov     sp, x1                        !! 156         and     t0, a2
1057                                               !! 157         beqz    t0, restore_partial
1058         mov     x1, x0                  // ad !! 158         SAVE_STATIC
1059         /* x0 = (x0 <= SDEI_EV_FAILED) ?      !! 159 syscall_exit_work:
1060          * EVENT_COMPLETE:EVENT_COMPLETE_AND_ !! 160         LONG_L  t0, PT_STATUS(sp)               # returning to kernel mode?
1061          */                                   !! 161         andi    t0, t0, KU_USER
1062         cmp     x0, #SDEI_EV_FAILED           !! 162         beqz    t0, resume_kernel
1063         mov_q   x2, SDEI_1_0_FN_SDEI_EVENT_CO !! 163         li      t0, _TIF_WORK_SYSCALL_EXIT
1064         mov_q   x3, SDEI_1_0_FN_SDEI_EVENT_CO !! 164         and     t0, a2                  # a2 is preloaded with TI_FLAGS
1065         csel    x0, x2, x3, ls                !! 165         beqz    t0, work_pending        # trace bit set?
1066                                               !! 166         local_irq_enable                # could let syscall_trace_leave()
1067         ldr_l   x2, sdei_exit_mode            !! 167                                         # call schedule() instead
1068                                               !! 168         TRACE_IRQS_ON
1069         /* Clear the registered-event seen by !! 169         move    a0, sp
1070         ldrb    w3, [x4, #SDEI_EVENT_PRIORITY !! 170         jal     syscall_trace_leave
1071         cbnz    w3, 1f                        !! 171         b       resume_userspace
1072         adr_this_cpu dst=x5, sym=sdei_active_ !! 172 
1073         b       2f                            !! 173 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
1074 1:      adr_this_cpu dst=x5, sym=sdei_active_ !! 174     defined(CONFIG_MIPS_MT)
1075 2:      str     xzr, [x5]                     !! 175 
1076                                               !! 176 /*
1077 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0  !! 177  * MIPS32R2 Instruction Hazard Barrier - must be called
1078         sdei_handler_exit exit_mode=x2        !! 178  *
1079 alternative_else_nop_endif                    !! 179  * For C code use the inline version named instruction_hazard().
1080                                               !! 180  */
1081 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0             !! 181 LEAF(mips_ihb)
1082         tramp_alias     dst=x5, sym=__sdei_as !! 182         .set    MIPS_ISA_LEVEL_RAW
1083         br      x5                            !! 183         jr.hb   ra
1084 #endif                                        !! 184         nop
1085 SYM_CODE_END(__sdei_asm_handler)              !! 185         END(mips_ihb)
1086 NOKPROBE(__sdei_asm_handler)                  << 
1087                                                  186 
1088 SYM_CODE_START(__sdei_handler_abort)          !! 187 #endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
1089         mov_q   x0, SDEI_1_0_FN_SDEI_EVENT_CO << 
1090         adr     x1, 1f                        << 
1091         ldr_l   x2, sdei_exit_mode            << 
1092         sdei_handler_exit exit_mode=x2        << 
1093         // exit the handler and jump to the n << 
1094         // Exit will stomp x0-x17, PSTATE, EL << 
1095 1:      ret                                   << 
1096 SYM_CODE_END(__sdei_handler_abort)            << 
1097 NOKPROBE(__sdei_handler_abort)                << 
1098 #endif /* CONFIG_ARM_SDE_INTERFACE */         << 
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php