~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm64/kernel/entry.S (Version linux-6.12-rc7) and /arch/sparc/kernel/entry.S (Version linux-4.11.12)


  1 /* SPDX-License-Identifier: GPL-2.0-only */    !!   1 /* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
  2 /*                                             << 
  3  * Low-level exception handling code           << 
  4  *                                                  2  *
  5  * Copyright (C) 2012 ARM Ltd.                 !!   3  * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
  6  * Authors:     Catalin Marinas <catalin.marina !!   4  * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
  7  *              Will Deacon <will.deacon@arm.co !!   5  * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
                                                   >>   6  * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
                                                   >>   7  * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
  8  */                                                 8  */
  9                                                     9 
 10 #include <linux/arm-smccc.h>                   << 
 11 #include <linux/init.h>                        << 
 12 #include <linux/linkage.h>                         10 #include <linux/linkage.h>
                                                   >>  11 #include <linux/errno.h>
 13                                                    12 
 14 #include <asm/alternative.h>                   !!  13 #include <asm/head.h>
 15 #include <asm/assembler.h>                     !!  14 #include <asm/asi.h>
 16 #include <asm/asm-offsets.h>                   !!  15 #include <asm/smp.h>
 17 #include <asm/asm_pointer_auth.h>              !!  16 #include <asm/contregs.h>
 18 #include <asm/bug.h>                           << 
 19 #include <asm/cpufeature.h>                    << 
 20 #include <asm/errno.h>                         << 
 21 #include <asm/esr.h>                           << 
 22 #include <asm/irq.h>                           << 
 23 #include <asm/memory.h>                        << 
 24 #include <asm/mmu.h>                           << 
 25 #include <asm/processor.h>                     << 
 26 #include <asm/ptrace.h>                            17 #include <asm/ptrace.h>
 27 #include <asm/scs.h>                           !!  18 #include <asm/asm-offsets.h>
                                                   >>  19 #include <asm/psr.h>
                                                   >>  20 #include <asm/vaddrs.h>
                                                   >>  21 #include <asm/page.h>
                                                   >>  22 #include <asm/pgtable.h>
                                                   >>  23 #include <asm/winmacro.h>
                                                   >>  24 #include <asm/signal.h>
                                                   >>  25 #include <asm/obio.h>
                                                   >>  26 #include <asm/mxcc.h>
 28 #include <asm/thread_info.h>                       27 #include <asm/thread_info.h>
 29 #include <asm/asm-uaccess.h>                   !!  28 #include <asm/param.h>
 30 #include <asm/unistd.h>                            29 #include <asm/unistd.h>
 31                                                    30 
 32         .macro  clear_gp_regs                  !!  31 #include <asm/asmmacro.h>
 33         .irp    n,0,1,2,3,4,5,6,7,8,9,10,11,12 !!  32 #include <asm/export.h>
 34         mov     x\n, xzr                       << 
 35         .endr                                  << 
 36         .endm                                  << 
 37                                                << 
 38         .macro kernel_ventry, el:req, ht:req,  << 
 39         .align 7                               << 
 40 .Lventry_start\@:                              << 
 41         .if     \el == 0                       << 
 42         /*                                     << 
 43          * This must be the first instruction  << 
 44          * skipped by the trampoline vectors,  << 
 45          */                                    << 
 46         b       .Lskip_tramp_vectors_cleanup\@ << 
 47         .if     \regsize == 64                 << 
 48         mrs     x30, tpidrro_el0               << 
 49         msr     tpidrro_el0, xzr               << 
 50         .else                                  << 
 51         mov     x30, xzr                       << 
 52         .endif                                 << 
 53 .Lskip_tramp_vectors_cleanup\@:                << 
 54         .endif                                 << 
 55                                                    33 
 56         sub     sp, sp, #PT_REGS_SIZE          !!  34 #define curptr      g6
 57 #ifdef CONFIG_VMAP_STACK                       << 
 58         /*                                     << 
 59          * Test whether the SP has overflowed, << 
 60          * Task and IRQ stacks are aligned so  << 
 61          * should always be zero.              << 
 62          */                                    << 
 63         add     sp, sp, x0                     << 
 64         sub     x0, sp, x0                     << 
 65         tbnz    x0, #THREAD_SHIFT, 0f          << 
 66         sub     x0, sp, x0                     << 
 67         sub     sp, sp, x0                     << 
 68         b       el\el\ht\()_\regsize\()_\label << 
 69                                                    35 
 70 0:                                             !!  36 /* These are just handy. */
 71         /*                                     !!  37 #define _SV     save    %sp, -STACKFRAME_SZ, %sp
 72          * Either we've just detected an overf !!  38 #define _RS     restore 
 73          * while on the overflow stack. Either !!  39 
 74          * userspace, and can clobber EL0 regi !!  40 #define FLUSH_ALL_KERNEL_WINDOWS \
 75          */                                    !!  41         _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
 76                                                !!  42         _RS; _RS; _RS; _RS; _RS; _RS; _RS;
 77         /* Stash the original SP (minus PT_REG << 
 78         msr     tpidr_el0, x0                  << 
 79                                                    43 
 80         /* Recover the original x0 value and s !!  44         .text
 81         sub     x0, sp, x0                     << 
 82         msr     tpidrro_el0, x0                << 
 83                                                    45 
 84         /* Switch to the overflow stack */     !!  46 #ifdef CONFIG_KGDB
 85         adr_this_cpu sp, overflow_stack + OVER !!  47         .align  4
                                                   >>  48         .globl          arch_kgdb_breakpoint
                                                   >>  49         .type           arch_kgdb_breakpoint,#function
                                                   >>  50 arch_kgdb_breakpoint:
                                                   >>  51         ta              0x7d
                                                   >>  52         retl
                                                   >>  53          nop
                                                   >>  54         .size           arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
                                                   >>  55 #endif
                                                   >>  56 
                                                   >>  57 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
                                                   >>  58         .align  4
                                                   >>  59         .globl  floppy_hardint
                                                   >>  60 floppy_hardint:
                                                   >>  61         /*
                                                   >>  62          * This code cannot touch registers %l0 %l1 and %l2
                                                   >>  63          * because SAVE_ALL depends on their values. It depends
                                                   >>  64          * on %l3 also, but we regenerate it before a call.
                                                   >>  65          * Other registers are:
                                                   >>  66          * %l3 -- base address of fdc registers
                                                   >>  67          * %l4 -- pdma_vaddr
                                                   >>  68          * %l5 -- scratch for ld/st address
                                                   >>  69          * %l6 -- pdma_size
                                                   >>  70          * %l7 -- scratch [floppy byte, ld/st address, aux. data]
                                                   >>  71          */
                                                   >>  72 
                                                   >>  73         /* Do we have work to do? */
                                                   >>  74         sethi   %hi(doing_pdma), %l7
                                                   >>  75         ld      [%l7 + %lo(doing_pdma)], %l7
                                                   >>  76         cmp     %l7, 0
                                                   >>  77         be      floppy_dosoftint
                                                   >>  78          nop
                                                   >>  79 
                                                   >>  80         /* Load fdc register base */
                                                   >>  81         sethi   %hi(fdc_status), %l3
                                                   >>  82         ld      [%l3 + %lo(fdc_status)], %l3
                                                   >>  83 
                                                   >>  84         /* Setup register addresses */
                                                   >>  85         sethi   %hi(pdma_vaddr), %l5    ! transfer buffer
                                                   >>  86         ld      [%l5 + %lo(pdma_vaddr)], %l4
                                                   >>  87         sethi   %hi(pdma_size), %l5     ! bytes to go
                                                   >>  88         ld      [%l5 + %lo(pdma_size)], %l6
                                                   >>  89 next_byte:
                                                   >>  90         ldub    [%l3], %l7
                                                   >>  91 
                                                   >>  92         andcc   %l7, 0x80, %g0          ! Does fifo still have data
                                                   >>  93         bz      floppy_fifo_emptied     ! fifo has been emptied...
                                                   >>  94          andcc  %l7, 0x20, %g0          ! in non-dma mode still?
                                                   >>  95         bz      floppy_overrun          ! nope, overrun
                                                   >>  96          andcc  %l7, 0x40, %g0          ! 0=write 1=read
                                                   >>  97         bz      floppy_write
                                                   >>  98          sub    %l6, 0x1, %l6
                                                   >>  99 
                                                   >> 100         /* Ok, actually read this byte */
                                                   >> 101         ldub    [%l3 + 1], %l7
                                                   >> 102         orcc    %g0, %l6, %g0
                                                   >> 103         stb     %l7, [%l4]
                                                   >> 104         bne     next_byte
                                                   >> 105          add    %l4, 0x1, %l4
                                                   >> 106 
                                                   >> 107         b       floppy_tdone
                                                   >> 108          nop
                                                   >> 109 
                                                   >> 110 floppy_write:
                                                   >> 111         /* Ok, actually write this byte */
                                                   >> 112         ldub    [%l4], %l7
                                                   >> 113         orcc    %g0, %l6, %g0
                                                   >> 114         stb     %l7, [%l3 + 1]
                                                   >> 115         bne     next_byte
                                                   >> 116          add    %l4, 0x1, %l4
                                                   >> 117 
                                                   >> 118         /* fall through... */
                                                   >> 119 floppy_tdone:
                                                   >> 120         sethi   %hi(pdma_vaddr), %l5
                                                   >> 121         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 122         sethi   %hi(pdma_size), %l5
                                                   >> 123         st      %l6, [%l5 + %lo(pdma_size)]
                                                   >> 124         /* Flip terminal count pin */
                                                   >> 125         set     auxio_register, %l7
                                                   >> 126         ld      [%l7], %l7
                                                   >> 127 
                                                   >> 128         ldub    [%l7], %l5
                                                   >> 129 
                                                   >> 130         or      %l5, 0xc2, %l5
                                                   >> 131         stb     %l5, [%l7]
                                                   >> 132         andn    %l5, 0x02, %l5
 86                                                   133 
 87         /*                                     !! 134 2:
 88          * Check whether we were already on th !! 135         /* Kill some time so the bits set */
 89          * after panic() re-enables interrupts !! 136         WRITE_PAUSE
 90          */                                    !! 137         WRITE_PAUSE
 91         mrs     x0, tpidr_el0                  !! 138 
 92         sub     x0, sp, x0                     !! 139         stb     %l5, [%l7]
 93         tst     x0, #~(OVERFLOW_STACK_SIZE - 1 !! 140 
 94         b.ne    __bad_stack                    !! 141         /* Prevent recursion */
 95                                                !! 142         sethi   %hi(doing_pdma), %l7
 96         /* We were already on the overflow sta !! 143         b       floppy_dosoftint
 97         sub     sp, sp, x0                     !! 144          st     %g0, [%l7 + %lo(doing_pdma)]
 98         mrs     x0, tpidrro_el0                !! 145 
 99 #endif                                         !! 146         /* We emptied the FIFO, but we haven't read everything
100         b       el\el\ht\()_\regsize\()_\label !! 147          * as of yet.  Store the current transfer address and
101 .org .Lventry_start\@ + 128     // Did we over !! 148          * bytes left to read so we can continue when the next
102         .endm                                  !! 149          * fast IRQ comes in.
103                                                !! 150          */
104         .macro  tramp_alias, dst, sym          !! 151 floppy_fifo_emptied:
105         .set    .Lalias\@, TRAMP_VALIAS + \sym !! 152         sethi   %hi(pdma_vaddr), %l5
106         movz    \dst, :abs_g2_s:.Lalias\@      !! 153         st      %l4, [%l5 + %lo(pdma_vaddr)]
107         movk    \dst, :abs_g1_nc:.Lalias\@     !! 154         sethi   %hi(pdma_size), %l7
108         movk    \dst, :abs_g0_nc:.Lalias\@     !! 155         st      %l6, [%l7 + %lo(pdma_size)]
109         .endm                                  !! 156 
                                                   >> 157         /* Restore condition codes */
                                                   >> 158         wr      %l0, 0x0, %psr
                                                   >> 159         WRITE_PAUSE
                                                   >> 160 
                                                   >> 161         jmp     %l1
                                                   >> 162         rett    %l2
                                                   >> 163 
                                                   >> 164 floppy_overrun:
                                                   >> 165         sethi   %hi(pdma_vaddr), %l5
                                                   >> 166         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 167         sethi   %hi(pdma_size), %l5
                                                   >> 168         st      %l6, [%l5 + %lo(pdma_size)]
                                                   >> 169         /* Prevent recursion */
                                                   >> 170         sethi   %hi(doing_pdma), %l7
                                                   >> 171         st      %g0, [%l7 + %lo(doing_pdma)]
                                                   >> 172 
                                                   >> 173         /* fall through... */
                                                   >> 174 floppy_dosoftint:
                                                   >> 175         rd      %wim, %l3
                                                   >> 176         SAVE_ALL
                                                   >> 177 
                                                   >> 178         /* Set all IRQs off. */
                                                   >> 179         or      %l0, PSR_PIL, %l4
                                                   >> 180         wr      %l4, 0x0, %psr
                                                   >> 181         WRITE_PAUSE
                                                   >> 182         wr      %l4, PSR_ET, %psr
                                                   >> 183         WRITE_PAUSE
                                                   >> 184 
                                                   >> 185         mov     11, %o0                 ! floppy irq level (unused anyway)
                                                   >> 186         mov     %g0, %o1                ! devid is not used in fast interrupts
                                                   >> 187         call    sparc_floppy_irq
                                                   >> 188          add    %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
                                                   >> 189 
                                                   >> 190         RESTORE_ALL
                                                   >> 191         
                                                   >> 192 #endif /* (CONFIG_BLK_DEV_FD) */
                                                   >> 193 
                                                   >> 194         /* Bad trap handler */
                                                   >> 195         .globl  bad_trap_handler
                                                   >> 196 bad_trap_handler:
                                                   >> 197         SAVE_ALL
                                                   >> 198 
                                                   >> 199         wr      %l0, PSR_ET, %psr
                                                   >> 200         WRITE_PAUSE
                                                   >> 201 
                                                   >> 202         add     %sp, STACKFRAME_SZ, %o0 ! pt_regs
                                                   >> 203         call    do_hw_interrupt
                                                   >> 204          mov    %l7, %o1                ! trap number
                                                   >> 205 
                                                   >> 206         RESTORE_ALL
                                                   >> 207         
                                                   >> 208 /* For now all IRQ's not registered get sent here. handler_irq() will
                                                   >> 209  * see if a routine is registered to handle this interrupt and if not
                                                   >> 210  * it will say so on the console.
                                                   >> 211  */
110                                                   212 
111         /*                                     !! 213         .align  4
112          * This macro corrupts x0-x3. It is th !! 214         .globl  real_irq_entry, patch_handler_irq
113          * them if required.                   !! 215 real_irq_entry:
114          */                                    !! 216         SAVE_ALL
115         .macro  apply_ssbd, state, tmp1, tmp2  !! 217 
116 alternative_cb  ARM64_ALWAYS_SYSTEM, spectre_v !! 218 #ifdef CONFIG_SMP
117         b       .L__asm_ssbd_skip\@            !! 219         .globl  patchme_maybe_smp_msg
118 alternative_cb_end                             !! 220 
119         ldr_this_cpu    \tmp2, arm64_ssbd_call !! 221         cmp     %l7, 11
120         cbz     \tmp2,  .L__asm_ssbd_skip\@    !! 222 patchme_maybe_smp_msg:
121         ldr     \tmp2, [tsk, #TSK_TI_FLAGS]    !! 223         bgu     maybe_smp4m_msg
122         tbnz    \tmp2, #TIF_SSBD, .L__asm_ssbd !! 224          nop
123         mov     w0, #ARM_SMCCC_ARCH_WORKAROUND !! 225 #endif
124         mov     w1, #\state                    !! 226 
125 alternative_cb  ARM64_ALWAYS_SYSTEM, smccc_pat !! 227 real_irq_continue:
126         nop                                    !! 228         or      %l0, PSR_PIL, %g2
127 alternative_cb_end                             !! 229         wr      %g2, 0x0, %psr
128 .L__asm_ssbd_skip\@:                           !! 230         WRITE_PAUSE
129         .endm                                  !! 231         wr      %g2, PSR_ET, %psr
130                                                !! 232         WRITE_PAUSE
131         /* Check for MTE asynchronous tag chec !! 233         mov     %l7, %o0                ! irq level
132         .macro check_mte_async_tcf, tmp, ti_fl !! 234 patch_handler_irq:
133 #ifdef CONFIG_ARM64_MTE                        !! 235         call    handler_irq
134         .arch_extension lse                    !! 236          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
135 alternative_if_not ARM64_MTE                   !! 237         or      %l0, PSR_PIL, %g2       ! restore PIL after handler_irq
136         b       1f                             !! 238         wr      %g2, PSR_ET, %psr       ! keep ET up
137 alternative_else_nop_endif                     !! 239         WRITE_PAUSE
138         /*                                     !! 240 
139          * Asynchronous tag check faults are o !! 241         RESTORE_ALL
140          * ASYM (3) modes. In each of these mo !! 242 
141          * set, so skip the check if it is uns !! 243 #ifdef CONFIG_SMP
                                                   >> 244         /* SMP per-cpu ticker interrupts are handled specially. */
                                                   >> 245 smp4m_ticker:
                                                   >> 246         bne     real_irq_continue+4
                                                   >> 247          or     %l0, PSR_PIL, %g2
                                                   >> 248         wr      %g2, 0x0, %psr
                                                   >> 249         WRITE_PAUSE
                                                   >> 250         wr      %g2, PSR_ET, %psr
                                                   >> 251         WRITE_PAUSE
                                                   >> 252         call    smp4m_percpu_timer_interrupt
                                                   >> 253          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 254         wr      %l0, PSR_ET, %psr
                                                   >> 255         WRITE_PAUSE
                                                   >> 256         RESTORE_ALL
                                                   >> 257 
                                                   >> 258 #define GET_PROCESSOR4M_ID(reg) \
                                                   >> 259         rd      %tbr, %reg;     \
                                                   >> 260         srl     %reg, 12, %reg; \
                                                   >> 261         and     %reg, 3, %reg;
                                                   >> 262 
                                                   >> 263         /* Here is where we check for possible SMP IPI passed to us
                                                   >> 264          * on some level other than 15 which is the NMI and only used
                                                   >> 265          * for cross calls.  That has a separate entry point below.
                                                   >> 266          *
                                                   >> 267          * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
142          */                                       268          */
143         tbz     \thread_sctlr, #(SCTLR_EL1_TCF !! 269 maybe_smp4m_msg:
144         mrs_s   \tmp, SYS_TFSRE0_EL1           !! 270         GET_PROCESSOR4M_ID(o3)
145         tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, !! 271         sethi   %hi(sun4m_irq_percpu), %l5
146         /* Asynchronous TCF occurred for TTBR0 !! 272         sll     %o3, 2, %o3
147         mov     \tmp, #_TIF_MTE_ASYNC_FAULT    !! 273         or      %l5, %lo(sun4m_irq_percpu), %o5
148         add     \ti_flags, tsk, #TSK_TI_FLAGS  !! 274         sethi   %hi(0x70000000), %o2    ! Check all soft-IRQs
149         stset   \tmp, [\ti_flags]              !! 275         ld      [%o5 + %o3], %o1
150 1:                                             !! 276         ld      [%o1 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
151 #endif                                         !! 277         andcc   %o3, %o2, %g0
152         .endm                                  !! 278         be,a    smp4m_ticker
                                                   >> 279          cmp    %l7, 14
                                                   >> 280         /* Soft-IRQ IPI */
                                                   >> 281         st      %o2, [%o1 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x70000000
                                                   >> 282         WRITE_PAUSE
                                                   >> 283         ld      [%o1 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 284         WRITE_PAUSE
                                                   >> 285         or      %l0, PSR_PIL, %l4
                                                   >> 286         wr      %l4, 0x0, %psr
                                                   >> 287         WRITE_PAUSE
                                                   >> 288         wr      %l4, PSR_ET, %psr
                                                   >> 289         WRITE_PAUSE
                                                   >> 290         srl     %o3, 28, %o2            ! shift for simpler checks below
                                                   >> 291 maybe_smp4m_msg_check_single:
                                                   >> 292         andcc   %o2, 0x1, %g0
                                                   >> 293         beq,a   maybe_smp4m_msg_check_mask
                                                   >> 294          andcc  %o2, 0x2, %g0
                                                   >> 295         call    smp_call_function_single_interrupt
                                                   >> 296          nop
                                                   >> 297         andcc   %o2, 0x2, %g0
                                                   >> 298 maybe_smp4m_msg_check_mask:
                                                   >> 299         beq,a   maybe_smp4m_msg_check_resched
                                                   >> 300          andcc  %o2, 0x4, %g0
                                                   >> 301         call    smp_call_function_interrupt
                                                   >> 302          nop
                                                   >> 303         andcc   %o2, 0x4, %g0
                                                   >> 304 maybe_smp4m_msg_check_resched:
                                                   >> 305         /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
                                                   >> 306         beq,a   maybe_smp4m_msg_out
                                                   >> 307          nop
                                                   >> 308         call    smp_resched_interrupt
                                                   >> 309          nop
                                                   >> 310 maybe_smp4m_msg_out:
                                                   >> 311         RESTORE_ALL
                                                   >> 312 
                                                   >> 313         .align  4
                                                   >> 314         .globl  linux_trap_ipi15_sun4m
                                                   >> 315 linux_trap_ipi15_sun4m:
                                                   >> 316         SAVE_ALL
                                                   >> 317         sethi   %hi(0x80000000), %o2
                                                   >> 318         GET_PROCESSOR4M_ID(o0)
                                                   >> 319         sethi   %hi(sun4m_irq_percpu), %l5
                                                   >> 320         or      %l5, %lo(sun4m_irq_percpu), %o5
                                                   >> 321         sll     %o0, 2, %o0
                                                   >> 322         ld      [%o5 + %o0], %o5
                                                   >> 323         ld      [%o5 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 324         andcc   %o3, %o2, %g0
                                                   >> 325         be      sun4m_nmi_error         ! Must be an NMI async memory error
                                                   >> 326          st     %o2, [%o5 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x80000000
                                                   >> 327         WRITE_PAUSE
                                                   >> 328         ld      [%o5 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 329         WRITE_PAUSE
                                                   >> 330         or      %l0, PSR_PIL, %l4
                                                   >> 331         wr      %l4, 0x0, %psr
                                                   >> 332         WRITE_PAUSE
                                                   >> 333         wr      %l4, PSR_ET, %psr
                                                   >> 334         WRITE_PAUSE
                                                   >> 335         call    smp4m_cross_call_irq
                                                   >> 336          nop
                                                   >> 337         b       ret_trap_lockless_ipi
                                                   >> 338          clr    %l6
                                                   >> 339 
                                                   >> 340         .globl  smp4d_ticker
                                                   >> 341         /* SMP per-cpu ticker interrupts are handled specially. */
                                                   >> 342 smp4d_ticker:
                                                   >> 343         SAVE_ALL
                                                   >> 344         or      %l0, PSR_PIL, %g2
                                                   >> 345         sethi   %hi(CC_ICLR), %o0
                                                   >> 346         sethi   %hi(1 << 14), %o1
                                                   >> 347         or      %o0, %lo(CC_ICLR), %o0
                                                   >> 348         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 14 in MXCC's ICLR */
                                                   >> 349         wr      %g2, 0x0, %psr
                                                   >> 350         WRITE_PAUSE
                                                   >> 351         wr      %g2, PSR_ET, %psr
                                                   >> 352         WRITE_PAUSE
                                                   >> 353         call    smp4d_percpu_timer_interrupt
                                                   >> 354          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 355         wr      %l0, PSR_ET, %psr
                                                   >> 356         WRITE_PAUSE
                                                   >> 357         RESTORE_ALL
                                                   >> 358 
                                                   >> 359         .align  4
                                                   >> 360         .globl  linux_trap_ipi15_sun4d
                                                   >> 361 linux_trap_ipi15_sun4d:
                                                   >> 362         SAVE_ALL
                                                   >> 363         sethi   %hi(CC_BASE), %o4
                                                   >> 364         sethi   %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
                                                   >> 365         or      %o4, (CC_EREG - CC_BASE), %o0
                                                   >> 366         ldda    [%o0] ASI_M_MXCC, %o0
                                                   >> 367         andcc   %o0, %o2, %g0
                                                   >> 368         bne     1f
                                                   >> 369          sethi  %hi(BB_STAT2), %o2
                                                   >> 370         lduba   [%o2] ASI_M_CTL, %o2
                                                   >> 371         andcc   %o2, BB_STAT2_MASK, %g0
                                                   >> 372         bne     2f
                                                   >> 373          or     %o4, (CC_ICLR - CC_BASE), %o0
                                                   >> 374         sethi   %hi(1 << 15), %o1
                                                   >> 375         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 15 in MXCC's ICLR */
                                                   >> 376         or      %l0, PSR_PIL, %l4
                                                   >> 377         wr      %l4, 0x0, %psr
                                                   >> 378         WRITE_PAUSE
                                                   >> 379         wr      %l4, PSR_ET, %psr
                                                   >> 380         WRITE_PAUSE
                                                   >> 381         call    smp4d_cross_call_irq
                                                   >> 382          nop
                                                   >> 383         b       ret_trap_lockless_ipi
                                                   >> 384          clr    %l6
                                                   >> 385 
                                                   >> 386 1:      /* MXCC error */
                                                   >> 387 2:      /* BB error */
                                                   >> 388         /* Disable PIL 15 */
                                                   >> 389         set     CC_IMSK, %l4
                                                   >> 390         lduha   [%l4] ASI_M_MXCC, %l5
                                                   >> 391         sethi   %hi(1 << 15), %l7
                                                   >> 392         or      %l5, %l7, %l5
                                                   >> 393         stha    %l5, [%l4] ASI_M_MXCC
                                                   >> 394         /* FIXME */
                                                   >> 395 1:      b,a     1b
                                                   >> 396 
                                                   >> 397         .globl  smpleon_ipi
                                                   >> 398         .extern leon_ipi_interrupt
                                                   >> 399         /* SMP per-cpu IPI interrupts are handled specially. */
                                                   >> 400 smpleon_ipi:
                                                   >> 401         SAVE_ALL
                                                   >> 402         or      %l0, PSR_PIL, %g2
                                                   >> 403         wr      %g2, 0x0, %psr
                                                   >> 404         WRITE_PAUSE
                                                   >> 405         wr      %g2, PSR_ET, %psr
                                                   >> 406         WRITE_PAUSE
                                                   >> 407         call    leonsmp_ipi_interrupt
                                                   >> 408          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs
                                                   >> 409         wr      %l0, PSR_ET, %psr
                                                   >> 410         WRITE_PAUSE
                                                   >> 411         RESTORE_ALL
                                                   >> 412 
                                                   >> 413         .align  4
                                                   >> 414         .globl  linux_trap_ipi15_leon
                                                   >> 415 linux_trap_ipi15_leon:
                                                   >> 416         SAVE_ALL
                                                   >> 417         or      %l0, PSR_PIL, %l4
                                                   >> 418         wr      %l4, 0x0, %psr
                                                   >> 419         WRITE_PAUSE
                                                   >> 420         wr      %l4, PSR_ET, %psr
                                                   >> 421         WRITE_PAUSE
                                                   >> 422         call    leon_cross_call_irq
                                                   >> 423          nop
                                                   >> 424         b       ret_trap_lockless_ipi
                                                   >> 425          clr    %l6
                                                   >> 426 
                                                   >> 427 #endif /* CONFIG_SMP */
                                                   >> 428 
                                                   >> 429         /* This routine handles illegal instructions and privileged
                                                   >> 430          * instruction attempts from user code.
                                                   >> 431          */
                                                   >> 432         .align  4
                                                   >> 433         .globl  bad_instruction
                                                   >> 434 bad_instruction:
                                                   >> 435         sethi   %hi(0xc1f80000), %l4
                                                   >> 436         ld      [%l1], %l5
                                                   >> 437         sethi   %hi(0x81d80000), %l7
                                                   >> 438         and     %l5, %l4, %l5
                                                   >> 439         cmp     %l5, %l7
                                                   >> 440         be      1f
                                                   >> 441         SAVE_ALL
                                                   >> 442 
                                                   >> 443         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 444         WRITE_PAUSE
                                                   >> 445 
                                                   >> 446         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 447         mov     %l1, %o1
                                                   >> 448         mov     %l2, %o2
                                                   >> 449         call    do_illegal_instruction
                                                   >> 450          mov    %l0, %o3
                                                   >> 451 
                                                   >> 452         RESTORE_ALL
                                                   >> 453 
                                                   >> 454 1:      /* unimplemented flush - just skip */
                                                   >> 455         jmpl    %l2, %g0
                                                   >> 456          rett   %l2 + 4
                                                   >> 457 
                                                   >> 458         .align  4
                                                   >> 459         .globl  priv_instruction
                                                   >> 460 priv_instruction:
                                                   >> 461         SAVE_ALL
                                                   >> 462 
                                                   >> 463         wr      %l0, PSR_ET, %psr
                                                   >> 464         WRITE_PAUSE
                                                   >> 465 
                                                   >> 466         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 467         mov     %l1, %o1
                                                   >> 468         mov     %l2, %o2
                                                   >> 469         call    do_priv_instruction
                                                   >> 470          mov    %l0, %o3
                                                   >> 471 
                                                   >> 472         RESTORE_ALL
                                                   >> 473 
                                                   >> 474         /* This routine handles unaligned data accesses. */
                                                   >> 475         .align  4
                                                   >> 476         .globl  mna_handler
                                                   >> 477 mna_handler:
                                                   >> 478         andcc   %l0, PSR_PS, %g0
                                                   >> 479         be      mna_fromuser
                                                   >> 480          nop
                                                   >> 481 
                                                   >> 482         SAVE_ALL
                                                   >> 483 
                                                   >> 484         wr      %l0, PSR_ET, %psr
                                                   >> 485         WRITE_PAUSE
                                                   >> 486 
                                                   >> 487         ld      [%l1], %o1
                                                   >> 488         call    kernel_unaligned_trap
                                                   >> 489          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 490 
                                                   >> 491         RESTORE_ALL
                                                   >> 492 
                                                   >> 493 mna_fromuser:
                                                   >> 494         SAVE_ALL
                                                   >> 495 
                                                   >> 496         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 497         WRITE_PAUSE
                                                   >> 498 
                                                   >> 499         ld      [%l1], %o1
                                                   >> 500         call    user_unaligned_trap
                                                   >> 501          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 502 
                                                   >> 503         RESTORE_ALL
                                                   >> 504 
                                                   >> 505         /* This routine handles floating point disabled traps. */
                                                   >> 506         .align  4
                                                   >> 507         .globl  fpd_trap_handler
                                                   >> 508 fpd_trap_handler:
                                                   >> 509         SAVE_ALL
                                                   >> 510 
                                                   >> 511         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 512         WRITE_PAUSE
                                                   >> 513 
                                                   >> 514         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 515         mov     %l1, %o1
                                                   >> 516         mov     %l2, %o2
                                                   >> 517         call    do_fpd_trap
                                                   >> 518          mov    %l0, %o3
                                                   >> 519 
                                                   >> 520         RESTORE_ALL
                                                   >> 521 
                                                   >> 522         /* This routine handles Floating Point Exceptions. */
                                                   >> 523         .align  4
                                                   >> 524         .globl  fpe_trap_handler
                                                   >> 525 fpe_trap_handler:
                                                   >> 526         set     fpsave_magic, %l5
                                                   >> 527         cmp     %l1, %l5
                                                   >> 528         be      1f
                                                   >> 529          sethi  %hi(fpsave), %l5
                                                   >> 530         or      %l5, %lo(fpsave), %l5
                                                   >> 531         cmp     %l1, %l5
                                                   >> 532         bne     2f
                                                   >> 533          sethi  %hi(fpsave_catch2), %l5
                                                   >> 534         or      %l5, %lo(fpsave_catch2), %l5
                                                   >> 535         wr      %l0, 0x0, %psr
                                                   >> 536         WRITE_PAUSE
                                                   >> 537         jmp     %l5
                                                   >> 538          rett   %l5 + 4
                                                   >> 539 1:      
                                                   >> 540         sethi   %hi(fpsave_catch), %l5
                                                   >> 541         or      %l5, %lo(fpsave_catch), %l5
                                                   >> 542         wr      %l0, 0x0, %psr
                                                   >> 543         WRITE_PAUSE
                                                   >> 544         jmp     %l5
                                                   >> 545          rett   %l5 + 4
153                                                   546 
154         /* Clear the MTE asynchronous tag chec !! 547 2:
155         .macro clear_mte_async_tcf thread_sctl !! 548         SAVE_ALL
156 #ifdef CONFIG_ARM64_MTE                        << 
157 alternative_if ARM64_MTE                       << 
158         /* See comment in check_mte_async_tcf  << 
159         tbz     \thread_sctlr, #(SCTLR_EL1_TCF << 
160         dsb     ish                            << 
161         msr_s   SYS_TFSRE0_EL1, xzr            << 
162 1:                                             << 
163 alternative_else_nop_endif                     << 
164 #endif                                         << 
165         .endm                                  << 
166                                                   549 
167         .macro mte_set_gcr, mte_ctrl, tmp      !! 550         wr      %l0, PSR_ET, %psr               ! re-enable traps
168 #ifdef CONFIG_ARM64_MTE                        !! 551         WRITE_PAUSE
169         ubfx    \tmp, \mte_ctrl, #MTE_CTRL_GCR << 
170         orr     \tmp, \tmp, #SYS_GCR_EL1_RRND  << 
171         msr_s   SYS_GCR_EL1, \tmp              << 
172 #endif                                         << 
173         .endm                                  << 
174                                                   552 
175         .macro mte_set_kernel_gcr, tmp, tmp2   !! 553         add     %sp, STACKFRAME_SZ, %o0
176 #ifdef CONFIG_KASAN_HW_TAGS                    !! 554         mov     %l1, %o1
177 alternative_cb  ARM64_ALWAYS_SYSTEM, kasan_hw_ !! 555         mov     %l2, %o2
178         b       1f                             !! 556         call    do_fpe_trap
179 alternative_cb_end                             !! 557          mov    %l0, %o3
180         mov     \tmp, KERNEL_GCR_EL1           !! 558 
181         msr_s   SYS_GCR_EL1, \tmp              !! 559         RESTORE_ALL
182 1:                                             !! 560 
183 #endif                                         !! 561         /* This routine handles Tag Overflow Exceptions. */
184         .endm                                  !! 562         .align  4
                                                   >> 563         .globl  do_tag_overflow
                                                   >> 564 do_tag_overflow:
                                                   >> 565         SAVE_ALL
                                                   >> 566 
                                                   >> 567         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 568         WRITE_PAUSE
                                                   >> 569 
                                                   >> 570         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 571         mov     %l1, %o1
                                                   >> 572         mov     %l2, %o2
                                                   >> 573         call    handle_tag_overflow
                                                   >> 574          mov    %l0, %o3
                                                   >> 575 
                                                   >> 576         RESTORE_ALL
                                                   >> 577 
                                                   >> 578         /* This routine handles Watchpoint Exceptions. */
                                                   >> 579         .align  4
                                                   >> 580         .globl  do_watchpoint
                                                   >> 581 do_watchpoint:
                                                   >> 582         SAVE_ALL
                                                   >> 583 
                                                   >> 584         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 585         WRITE_PAUSE
                                                   >> 586 
                                                   >> 587         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 588         mov     %l1, %o1
                                                   >> 589         mov     %l2, %o2
                                                   >> 590         call    handle_watchpoint
                                                   >> 591          mov    %l0, %o3
                                                   >> 592 
                                                   >> 593         RESTORE_ALL
                                                   >> 594 
                                                   >> 595         /* This routine handles Register Access Exceptions. */
                                                   >> 596         .align  4
                                                   >> 597         .globl  do_reg_access
                                                   >> 598 do_reg_access:
                                                   >> 599         SAVE_ALL
                                                   >> 600 
                                                   >> 601         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 602         WRITE_PAUSE
                                                   >> 603 
                                                   >> 604         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 605         mov     %l1, %o1
                                                   >> 606         mov     %l2, %o2
                                                   >> 607         call    handle_reg_access
                                                   >> 608          mov    %l0, %o3
                                                   >> 609 
                                                   >> 610         RESTORE_ALL
                                                   >> 611 
                                                   >> 612         /* This routine handles Co-Processor Disabled Exceptions. */
                                                   >> 613         .align  4
                                                   >> 614         .globl  do_cp_disabled
                                                   >> 615 do_cp_disabled:
                                                   >> 616         SAVE_ALL
                                                   >> 617 
                                                   >> 618         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 619         WRITE_PAUSE
                                                   >> 620 
                                                   >> 621         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 622         mov     %l1, %o1
                                                   >> 623         mov     %l2, %o2
                                                   >> 624         call    handle_cp_disabled
                                                   >> 625          mov    %l0, %o3
                                                   >> 626 
                                                   >> 627         RESTORE_ALL
                                                   >> 628 
                                                   >> 629         /* This routine handles Co-Processor Exceptions. */
                                                   >> 630         .align  4
                                                   >> 631         .globl  do_cp_exception
                                                   >> 632 do_cp_exception:
                                                   >> 633         SAVE_ALL
                                                   >> 634 
                                                   >> 635         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 636         WRITE_PAUSE
                                                   >> 637 
                                                   >> 638         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 639         mov     %l1, %o1
                                                   >> 640         mov     %l2, %o2
                                                   >> 641         call    handle_cp_exception
                                                   >> 642          mov    %l0, %o3
                                                   >> 643 
                                                   >> 644         RESTORE_ALL
                                                   >> 645 
                                                   >> 646         /* This routine handles Hardware Divide By Zero Exceptions. */
                                                   >> 647         .align  4
                                                   >> 648         .globl  do_hw_divzero
                                                   >> 649 do_hw_divzero:
                                                   >> 650         SAVE_ALL
                                                   >> 651 
                                                   >> 652         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 653         WRITE_PAUSE
                                                   >> 654 
                                                   >> 655         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 656         mov     %l1, %o1
                                                   >> 657         mov     %l2, %o2
                                                   >> 658         call    handle_hw_divzero
                                                   >> 659          mov    %l0, %o3
                                                   >> 660 
                                                   >> 661         RESTORE_ALL
                                                   >> 662 
                                                   >> 663         .align  4
                                                   >> 664         .globl  do_flush_windows
                                                   >> 665 do_flush_windows:
                                                   >> 666         SAVE_ALL
                                                   >> 667 
                                                   >> 668         wr      %l0, PSR_ET, %psr
                                                   >> 669         WRITE_PAUSE
                                                   >> 670 
                                                   >> 671         andcc   %l0, PSR_PS, %g0
                                                   >> 672         bne     dfw_kernel
                                                   >> 673          nop
                                                   >> 674 
                                                   >> 675         call    flush_user_windows
                                                   >> 676          nop
                                                   >> 677 
                                                   >> 678         /* Advance over the trap instruction. */
                                                   >> 679         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 680         add     %l1, 0x4, %l2
                                                   >> 681         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 682         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 683 
                                                   >> 684         RESTORE_ALL
                                                   >> 685 
                                                   >> 686         .globl  flush_patch_one
                                                   >> 687 
                                                   >> 688         /* We get these for debugging routines using __builtin_return_address() */
                                                   >> 689 dfw_kernel:
                                                   >> 690 flush_patch_one:
                                                   >> 691         FLUSH_ALL_KERNEL_WINDOWS
                                                   >> 692 
                                                   >> 693         /* Advance over the trap instruction. */
                                                   >> 694         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 695         add     %l1, 0x4, %l2
                                                   >> 696         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 697         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 698 
                                                   >> 699         RESTORE_ALL
                                                   >> 700 
                                                   >> 701         /* The getcc software trap.  The user wants the condition codes from
                                                   >> 702          * the %psr in register %g1.
                                                   >> 703          */
                                                   >> 704 
                                                   >> 705         .align  4
                                                   >> 706         .globl  getcc_trap_handler
                                                   >> 707 getcc_trap_handler:
                                                   >> 708         srl     %l0, 20, %g1    ! give user
                                                   >> 709         and     %g1, 0xf, %g1   ! only ICC bits in %psr
                                                   >> 710         jmp     %l2             ! advance over trap instruction
                                                   >> 711         rett    %l2 + 0x4       ! like this...
                                                   >> 712 
                                                   >> 713         /* The setcc software trap.  The user has condition codes in %g1
                                                   >> 714          * that it would like placed in the %psr.  Be careful not to flip
                                                   >> 715          * any unintentional bits!
                                                   >> 716          */
                                                   >> 717 
                                                   >> 718         .align  4
                                                   >> 719         .globl  setcc_trap_handler
                                                   >> 720 setcc_trap_handler:
                                                   >> 721         sll     %g1, 0x14, %l4
                                                   >> 722         set     PSR_ICC, %l5
                                                   >> 723         andn    %l0, %l5, %l0   ! clear ICC bits in %psr
                                                   >> 724         and     %l4, %l5, %l4   ! clear non-ICC bits in user value
                                                   >> 725         or      %l4, %l0, %l4   ! or them in... mix mix mix
                                                   >> 726 
                                                   >> 727         wr      %l4, 0x0, %psr  ! set new %psr
                                                   >> 728         WRITE_PAUSE             ! TI scumbags...
                                                   >> 729 
                                                   >> 730         jmp     %l2             ! advance over trap instruction
                                                   >> 731         rett    %l2 + 0x4       ! like this...
                                                   >> 732 
                                                   >> 733 sun4m_nmi_error:
                                                   >> 734         /* NMI async memory error handling. */
                                                   >> 735         sethi   %hi(0x80000000), %l4
                                                   >> 736         sethi   %hi(sun4m_irq_global), %o5
                                                   >> 737         ld      [%o5 + %lo(sun4m_irq_global)], %l5
                                                   >> 738         st      %l4, [%l5 + 0x0c]       ! sun4m_irq_global->mask_set=0x80000000
                                                   >> 739         WRITE_PAUSE
                                                   >> 740         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
                                                   >> 741         WRITE_PAUSE
                                                   >> 742         or      %l0, PSR_PIL, %l4
                                                   >> 743         wr      %l4, 0x0, %psr
                                                   >> 744         WRITE_PAUSE
                                                   >> 745         wr      %l4, PSR_ET, %psr
                                                   >> 746         WRITE_PAUSE
                                                   >> 747         call    sun4m_nmi
                                                   >> 748          nop
                                                   >> 749         st      %l4, [%l5 + 0x08]       ! sun4m_irq_global->mask_clear=0x80000000
                                                   >> 750         WRITE_PAUSE
                                                   >> 751         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
                                                   >> 752         WRITE_PAUSE
                                                   >> 753         RESTORE_ALL
                                                   >> 754 
                                                   >> 755 #ifndef CONFIG_SMP
                                                   >> 756         .align  4
                                                   >> 757         .globl  linux_trap_ipi15_sun4m
                                                   >> 758 linux_trap_ipi15_sun4m:
                                                   >> 759         SAVE_ALL
                                                   >> 760 
                                                   >> 761         ba      sun4m_nmi_error
                                                   >> 762          nop
                                                   >> 763 #endif /* CONFIG_SMP */
                                                   >> 764 
                                                   >> 765         .align  4
                                                   >> 766         .globl  srmmu_fault
                                                   >> 767 srmmu_fault:
                                                   >> 768         mov     0x400, %l5
                                                   >> 769         mov     0x300, %l4
                                                   >> 770 
                                                   >> 771 LEON_PI(lda     [%l5] ASI_LEON_MMUREGS, %l6)    ! read sfar first
                                                   >> 772 SUN_PI_(lda     [%l5] ASI_M_MMUREGS, %l6)       ! read sfar first
                                                   >> 773 
                                                   >> 774 LEON_PI(lda     [%l4] ASI_LEON_MMUREGS, %l5)    ! read sfsr last
                                                   >> 775 SUN_PI_(lda     [%l4] ASI_M_MMUREGS, %l5)       ! read sfsr last
                                                   >> 776 
                                                   >> 777         andn    %l6, 0xfff, %l6
                                                   >> 778         srl     %l5, 6, %l5                     ! and encode all info into l7
                                                   >> 779 
                                                   >> 780         and     %l5, 2, %l5
                                                   >> 781         or      %l5, %l6, %l6
                                                   >> 782 
                                                   >> 783         or      %l6, %l7, %l7                   ! l7 = [addr,write,txtfault]
                                                   >> 784 
                                                   >> 785         SAVE_ALL
                                                   >> 786 
                                                   >> 787         mov     %l7, %o1
                                                   >> 788         mov     %l7, %o2
                                                   >> 789         and     %o1, 1, %o1             ! arg2 = text_faultp
                                                   >> 790         mov     %l7, %o3
                                                   >> 791         and     %o2, 2, %o2             ! arg3 = writep
                                                   >> 792         andn    %o3, 0xfff, %o3         ! arg4 = faulting address
                                                   >> 793 
                                                   >> 794         wr      %l0, PSR_ET, %psr
                                                   >> 795         WRITE_PAUSE
                                                   >> 796 
                                                   >> 797         call    do_sparc_fault
                                                   >> 798          add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
                                                   >> 799 
                                                   >> 800         RESTORE_ALL
                                                   >> 801 
                                                   >> 802         .align  4
                                                   >> 803         .globl  sys_nis_syscall
                                                   >> 804 sys_nis_syscall:
                                                   >> 805         mov     %o7, %l5
                                                   >> 806         add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
                                                   >> 807         call    c_sys_nis_syscall
                                                   >> 808          mov    %l5, %o7
                                                   >> 809 
                                                   >> 810 sunos_execv:
                                                   >> 811         .globl  sunos_execv
                                                   >> 812         b       sys_execve
                                                   >> 813          clr    %i2
                                                   >> 814 
                                                   >> 815         .align  4
                                                   >> 816         .globl  sys_sparc_pipe
                                                   >> 817 sys_sparc_pipe:
                                                   >> 818         mov     %o7, %l5
                                                   >> 819         add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
                                                   >> 820         call    sparc_pipe
                                                   >> 821          mov    %l5, %o7
                                                   >> 822 
                                                   >> 823         .align  4
                                                   >> 824         .globl  sys_sigstack
                                                   >> 825 sys_sigstack:
                                                   >> 826         mov     %o7, %l5
                                                   >> 827         mov     %fp, %o2
                                                   >> 828         call    do_sys_sigstack
                                                   >> 829          mov    %l5, %o7
                                                   >> 830 
                                                   >> 831         .align  4
                                                   >> 832         .globl  sys_sigreturn
                                                   >> 833 sys_sigreturn:
                                                   >> 834         call    do_sigreturn
                                                   >> 835          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 836 
                                                   >> 837         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 838         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 839         be      1f
                                                   >> 840          nop
185                                                   841 
186         .macro mte_set_user_gcr, tsk, tmp, tmp !! 842         call    syscall_trace
187 #ifdef CONFIG_KASAN_HW_TAGS                    !! 843          mov    1, %o1
188 alternative_cb  ARM64_ALWAYS_SYSTEM, kasan_hw_ << 
189         b       1f                             << 
190 alternative_cb_end                             << 
191         ldr     \tmp, [\tsk, #THREAD_MTE_CTRL] << 
192                                                   844 
193         mte_set_gcr \tmp, \tmp2                << 
194 1:                                                845 1:
195 #endif                                         !! 846         /* We don't want to muck with user registers like a
196         .endm                                  !! 847          * normal syscall, just return.
197                                                << 
198         .macro  kernel_entry, el, regsize = 64 << 
199         .if     \el == 0                       << 
200         alternative_insn nop, SET_PSTATE_DIT(1 << 
201         .endif                                 << 
202         .if     \regsize == 32                 << 
203         mov     w0, w0                         << 
204         .endif                                 << 
205         stp     x0, x1, [sp, #16 * 0]          << 
206         stp     x2, x3, [sp, #16 * 1]          << 
207         stp     x4, x5, [sp, #16 * 2]          << 
208         stp     x6, x7, [sp, #16 * 3]          << 
209         stp     x8, x9, [sp, #16 * 4]          << 
210         stp     x10, x11, [sp, #16 * 5]        << 
211         stp     x12, x13, [sp, #16 * 6]        << 
212         stp     x14, x15, [sp, #16 * 7]        << 
213         stp     x16, x17, [sp, #16 * 8]        << 
214         stp     x18, x19, [sp, #16 * 9]        << 
215         stp     x20, x21, [sp, #16 * 10]       << 
216         stp     x22, x23, [sp, #16 * 11]       << 
217         stp     x24, x25, [sp, #16 * 12]       << 
218         stp     x26, x27, [sp, #16 * 13]       << 
219         stp     x28, x29, [sp, #16 * 14]       << 
220                                                << 
221         .if     \el == 0                       << 
222         clear_gp_regs                          << 
223         mrs     x21, sp_el0                    << 
224         ldr_this_cpu    tsk, __entry_task, x20 << 
225         msr     sp_el0, tsk                    << 
226                                                << 
227         /*                                     << 
228          * Ensure MDSCR_EL1.SS is clear, since << 
229          * when scheduling.                    << 
230          */                                       848          */
231         ldr     x19, [tsk, #TSK_TI_FLAGS]      !! 849         RESTORE_ALL
232         disable_step_tsk x19, x20              << 
233                                                   850 
234         /* Check for asynchronous tag check fa !! 851         .align  4
235         ldr     x0, [tsk, THREAD_SCTLR_USER]   !! 852         .globl  sys_rt_sigreturn
236         check_mte_async_tcf x22, x23, x0       !! 853 sys_rt_sigreturn:
                                                   >> 854         call    do_rt_sigreturn
                                                   >> 855          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 856 
                                                   >> 857         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 858         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 859         be      1f
                                                   >> 860          nop
                                                   >> 861 
                                                   >> 862         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 863         call    syscall_trace
                                                   >> 864          mov    1, %o1
237                                                   865 
238 #ifdef CONFIG_ARM64_PTR_AUTH                   << 
239 alternative_if ARM64_HAS_ADDRESS_AUTH          << 
240         /*                                     << 
241          * Enable IA for in-kernel PAC if the  << 
242          * this could be implemented with an u << 
243          * a load, this was measured to be slo << 
244          *                                     << 
245          * Install the kernel IA key only if I << 
246          * was disabled on kernel exit then we << 
247          * installed so there is no need to in << 
248          */                                    << 
249         tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f   << 
250         __ptrauth_keys_install_kernel_nosync t << 
251         b       2f                             << 
252 1:                                                866 1:
253         mrs     x0, sctlr_el1                  !! 867         /* We are returning to a signal handler. */
254         orr     x0, x0, SCTLR_ELx_ENIA         !! 868         RESTORE_ALL
255         msr     sctlr_el1, x0                  << 
256 2:                                             << 
257 alternative_else_nop_endif                     << 
258 #endif                                         << 
259                                                << 
260         apply_ssbd 1, x22, x23                 << 
261                                                << 
262         mte_set_kernel_gcr x22, x23            << 
263                                                   869 
264         /*                                     !! 870         /* Now that we have a real sys_clone, sys_fork() is
265          * Any non-self-synchronizing system r !! 871          * implemented in terms of it.  Our _real_ implementation
266          * kernel entry should be placed befor !! 872          * of SunOS vfork() will use sys_vfork().
                                                   >> 873          *
                                                   >> 874          * XXX These three should be consolidated into mostly shared
                                                   >> 875          * XXX code just like on sparc64... -DaveM
267          */                                       876          */
268 alternative_if ARM64_MTE                       !! 877         .align  4
269         isb                                    !! 878         .globl  sys_fork, flush_patch_two
270         b       1f                             !! 879 sys_fork:
271 alternative_else_nop_endif                     !! 880         mov     %o7, %l5
272 alternative_if ARM64_HAS_ADDRESS_AUTH          !! 881 flush_patch_two:
273         isb                                    !! 882         FLUSH_ALL_KERNEL_WINDOWS;
274 alternative_else_nop_endif                     !! 883         ld      [%curptr + TI_TASK], %o4
                                                   >> 884         rd      %psr, %g4
                                                   >> 885         WRITE_PAUSE
                                                   >> 886         mov     SIGCHLD, %o0                    ! arg0: clone flags
                                                   >> 887         rd      %wim, %g5
                                                   >> 888         WRITE_PAUSE
                                                   >> 889         mov     %fp, %o1                        ! arg1: usp
                                                   >> 890         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 891         add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
                                                   >> 892         mov     0, %o3
                                                   >> 893         call    sparc_do_fork
                                                   >> 894          mov    %l5, %o7
                                                   >> 895 
                                                   >> 896         /* Whee, kernel threads! */
                                                   >> 897         .globl  sys_clone, flush_patch_three
                                                   >> 898 sys_clone:
                                                   >> 899         mov     %o7, %l5
                                                   >> 900 flush_patch_three:
                                                   >> 901         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 902         ld      [%curptr + TI_TASK], %o4
                                                   >> 903         rd      %psr, %g4
                                                   >> 904         WRITE_PAUSE
                                                   >> 905 
                                                   >> 906         /* arg0,1: flags,usp  -- loaded already */
                                                   >> 907         cmp     %o1, 0x0                        ! Is new_usp NULL?
                                                   >> 908         rd      %wim, %g5
                                                   >> 909         WRITE_PAUSE
                                                   >> 910         be,a    1f
                                                   >> 911          mov    %fp, %o1                        ! yes, use callers usp
                                                   >> 912         andn    %o1, 7, %o1                     ! no, align to 8 bytes
275 1:                                                913 1:
                                                   >> 914         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 915         add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
                                                   >> 916         mov     0, %o3
                                                   >> 917         call    sparc_do_fork
                                                   >> 918          mov    %l5, %o7
                                                   >> 919 
                                                   >> 920         /* Whee, real vfork! */
                                                   >> 921         .globl  sys_vfork, flush_patch_four
                                                   >> 922 sys_vfork:
                                                   >> 923 flush_patch_four:
                                                   >> 924         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 925         ld      [%curptr + TI_TASK], %o4
                                                   >> 926         rd      %psr, %g4
                                                   >> 927         WRITE_PAUSE
                                                   >> 928         rd      %wim, %g5
                                                   >> 929         WRITE_PAUSE
                                                   >> 930         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 931         sethi   %hi(0x4000 | 0x0100 | SIGCHLD), %o0
                                                   >> 932         mov     %fp, %o1
                                                   >> 933         or      %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
                                                   >> 934         sethi   %hi(sparc_do_fork), %l1
                                                   >> 935         mov     0, %o3
                                                   >> 936         jmpl    %l1 + %lo(sparc_do_fork), %g0
                                                   >> 937          add    %sp, STACKFRAME_SZ, %o2
                                                   >> 938 
                                                   >> 939         .align  4
                                                   >> 940 linux_sparc_ni_syscall:
                                                   >> 941         sethi   %hi(sys_ni_syscall), %l7
                                                   >> 942         b       do_syscall
                                                   >> 943          or     %l7, %lo(sys_ni_syscall), %l7
                                                   >> 944 
                                                   >> 945 linux_syscall_trace:
                                                   >> 946         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 947         call    syscall_trace
                                                   >> 948          mov    0, %o1
                                                   >> 949         cmp     %o0, 0
                                                   >> 950         bne     3f
                                                   >> 951          mov    -ENOSYS, %o0
                                                   >> 952 
                                                   >> 953         /* Syscall tracing can modify the registers.  */
                                                   >> 954         ld      [%sp + STACKFRAME_SZ + PT_G1], %g1
                                                   >> 955         sethi   %hi(sys_call_table), %l7
                                                   >> 956         ld      [%sp + STACKFRAME_SZ + PT_I0], %i0
                                                   >> 957         or      %l7, %lo(sys_call_table), %l7
                                                   >> 958         ld      [%sp + STACKFRAME_SZ + PT_I1], %i1
                                                   >> 959         ld      [%sp + STACKFRAME_SZ + PT_I2], %i2
                                                   >> 960         ld      [%sp + STACKFRAME_SZ + PT_I3], %i3
                                                   >> 961         ld      [%sp + STACKFRAME_SZ + PT_I4], %i4
                                                   >> 962         ld      [%sp + STACKFRAME_SZ + PT_I5], %i5
                                                   >> 963         cmp     %g1, NR_syscalls
                                                   >> 964         bgeu    3f
                                                   >> 965          mov    -ENOSYS, %o0
                                                   >> 966 
                                                   >> 967         sll     %g1, 2, %l4
                                                   >> 968         mov     %i0, %o0
                                                   >> 969         ld      [%l7 + %l4], %l7
                                                   >> 970         mov     %i1, %o1
                                                   >> 971         mov     %i2, %o2
                                                   >> 972         mov     %i3, %o3
                                                   >> 973         b       2f
                                                   >> 974          mov    %i4, %o4
276                                                   975 
277         scs_load_current                       !! 976         .globl  ret_from_fork
278         .else                                  !! 977 ret_from_fork:
279         add     x21, sp, #PT_REGS_SIZE         !! 978         call    schedule_tail
280         get_current_task tsk                   !! 979          ld     [%g3 + TI_TASK], %o0
281         .endif /* \el == 0 */                  !! 980         b       ret_sys_call
282         mrs     x22, elr_el1                   !! 981          ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
283         mrs     x23, spsr_el1                  !! 982 
284         stp     lr, x21, [sp, #S_LR]           !! 983         .globl  ret_from_kernel_thread
285                                                !! 984 ret_from_kernel_thread:
286         /*                                     !! 985         call    schedule_tail
287          * For exceptions from EL0, create a f !! 986          ld     [%g3 + TI_TASK], %o0
288          * For exceptions from EL1, create a s !! 987         ld      [%sp + STACKFRAME_SZ + PT_G1], %l0
289          * interrupted code shows up in the ba !! 988         call    %l0
290          */                                    !! 989          ld     [%sp + STACKFRAME_SZ + PT_G2], %o0
291         .if \el == 0                           !! 990         rd      %psr, %l1
292         stp     xzr, xzr, [sp, #S_STACKFRAME]  !! 991         ld      [%sp + STACKFRAME_SZ + PT_PSR], %l0
293         .else                                  !! 992         andn    %l0, PSR_CWP, %l0
294         stp     x29, x22, [sp, #S_STACKFRAME]  !! 993         nop
295         .endif                                 !! 994         and     %l1, PSR_CWP, %l1
296         add     x29, sp, #S_STACKFRAME         !! 995         or      %l0, %l1, %l0
297                                                !! 996         st      %l0, [%sp + STACKFRAME_SZ + PT_PSR]
298 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               !! 997         b       ret_sys_call
299 alternative_if_not ARM64_HAS_PAN               !! 998          mov    0, %o0
300         bl      __swpan_entry_el\el            !! 999 
301 alternative_else_nop_endif                     !! 1000         /* Linux native system calls enter here... */
302 #endif                                         !! 1001         .align  4
303                                                !! 1002         .globl  linux_sparc_syscall
304         stp     x22, x23, [sp, #S_PC]          !! 1003 linux_sparc_syscall:
305                                                !! 1004         sethi   %hi(PSR_SYSCALL), %l4
306         /* Not in a syscall by default (el0_sv !! 1005         or      %l0, %l4, %l0
307         .if     \el == 0                       !! 1006         /* Direct access to user regs, must faster. */
308         mov     w21, #NO_SYSCALL               !! 1007         cmp     %g1, NR_syscalls
309         str     w21, [sp, #S_SYSCALLNO]        !! 1008         bgeu    linux_sparc_ni_syscall
310         .endif                                 !! 1009          sll    %g1, 2, %l4
311                                                !! 1010         ld      [%l7 + %l4], %l7
312 #ifdef CONFIG_ARM64_PSEUDO_NMI                 !! 1011 
313 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING  !! 1012 do_syscall:
314         b       .Lskip_pmr_save\@              !! 1013         SAVE_ALL_HEAD
315 alternative_else_nop_endif                     !! 1014          rd     %wim, %l3
316                                                !! 1015 
317         mrs_s   x20, SYS_ICC_PMR_EL1           !! 1016         wr      %l0, PSR_ET, %psr
318         str     x20, [sp, #S_PMR_SAVE]         !! 1017         mov     %i0, %o0
319         mov     x20, #GIC_PRIO_IRQON | GIC_PRI !! 1018         mov     %i1, %o1
320         msr_s   SYS_ICC_PMR_EL1, x20           !! 1019         mov     %i2, %o2
321                                                !! 1020 
322 .Lskip_pmr_save\@:                             !! 1021         ld      [%curptr + TI_FLAGS], %l5
323 #endif                                         !! 1022         mov     %i3, %o3
324                                                !! 1023         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
325         /*                                     !! 1024         mov     %i4, %o4
326          * Registers that may be useful after  !! 1025         bne     linux_syscall_trace
327          *                                     !! 1026          mov    %i0, %l5
328          * x20 - ICC_PMR_EL1                   !! 1027 2:
329          * x21 - aborted SP                    !! 1028         call    %l7
330          * x22 - aborted PC                    !! 1029          mov    %i5, %o5
331          * x23 - aborted PSTATE                << 
332         */                                     << 
333         .endm                                  << 
334                                                << 
335         .macro  kernel_exit, el                << 
336         .if     \el != 0                       << 
337         disable_daif                           << 
338         .endif                                 << 
339                                                << 
340 #ifdef CONFIG_ARM64_PSEUDO_NMI                 << 
341 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING  << 
342         b       .Lskip_pmr_restore\@           << 
343 alternative_else_nop_endif                     << 
344                                                << 
345         ldr     x20, [sp, #S_PMR_SAVE]         << 
346         msr_s   SYS_ICC_PMR_EL1, x20           << 
347                                                << 
348         /* Ensure priority change is seen by r << 
349 alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_ << 
350         dsb     sy                             << 
351 alternative_else_nop_endif                     << 
352                                                << 
353 .Lskip_pmr_restore\@:                          << 
354 #endif                                         << 
355                                                << 
356         ldp     x21, x22, [sp, #S_PC]          << 
357                                                << 
358 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               << 
359 alternative_if_not ARM64_HAS_PAN               << 
360         bl      __swpan_exit_el\el             << 
361 alternative_else_nop_endif                     << 
362 #endif                                         << 
363                                                   1030 
364         .if     \el == 0                       << 
365         ldr     x23, [sp, #S_SP]               << 
366         msr     sp_el0, x23                    << 
367         tst     x22, #PSR_MODE32_BIT           << 
368         b.eq    3f                             << 
369                                                << 
370 #ifdef CONFIG_ARM64_ERRATUM_845719             << 
371 alternative_if ARM64_WORKAROUND_845719         << 
372 #ifdef CONFIG_PID_IN_CONTEXTIDR                << 
373         mrs     x29, contextidr_el1            << 
374         msr     contextidr_el1, x29            << 
375 #else                                          << 
376         msr contextidr_el1, xzr                << 
377 #endif                                         << 
378 alternative_else_nop_endif                     << 
379 #endif                                         << 
380 3:                                                1031 3:
381         scs_save tsk                           !! 1032         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
382                                                << 
383         /* Ignore asynchronous tag check fault << 
384         ldr     x0, [tsk, THREAD_SCTLR_USER]   << 
385         clear_mte_async_tcf x0                 << 
386                                                   1033 
387 #ifdef CONFIG_ARM64_PTR_AUTH                   !! 1034 ret_sys_call:
388 alternative_if ARM64_HAS_ADDRESS_AUTH          !! 1035         ld      [%curptr + TI_FLAGS], %l6
389         /*                                     !! 1036         cmp     %o0, -ERESTART_RESTARTBLOCK
390          * IA was enabled for in-kernel PAC. D !! 1037         ld      [%sp + STACKFRAME_SZ + PT_PSR], %g3
391          * alternatively install the user's IA !! 1038         set     PSR_C, %g2
392          * SCTLR bits were updated on task swi !! 1039         bgeu    1f
393          *                                     !! 1040          andcc  %l6, _TIF_SYSCALL_TRACE, %g0
394          * No kernel C function calls after th !! 1041 
395          */                                    !! 1042         /* System call success, clear Carry condition code. */
396         tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f   !! 1043         andn    %g3, %g2, %g3
397         __ptrauth_keys_install_user tsk, x0, x !! 1044         clr     %l6
398         b       2f                             !! 1045         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]     
                                                   >> 1046         bne     linux_syscall_trace2
                                                   >> 1047          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
                                                   >> 1048         add     %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1049         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1050         b       ret_trap_entry
                                                   >> 1051          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
399 1:                                                1052 1:
400         mrs     x0, sctlr_el1                  !! 1053         /* System call failure, set Carry condition code.
401         bic     x0, x0, SCTLR_ELx_ENIA         !! 1054          * Also, get abs(errno) to return to the process.
402         msr     sctlr_el1, x0                  << 
403 2:                                             << 
404 alternative_else_nop_endif                     << 
405 #endif                                         << 
406                                                << 
407         mte_set_user_gcr tsk, x0, x1           << 
408                                                << 
409         apply_ssbd 0, x0, x1                   << 
410         .endif                                 << 
411                                                << 
412         msr     elr_el1, x21                   << 
413         msr     spsr_el1, x22                  << 
414         ldp     x0, x1, [sp, #16 * 0]          << 
415         ldp     x2, x3, [sp, #16 * 1]          << 
416         ldp     x4, x5, [sp, #16 * 2]          << 
417         ldp     x6, x7, [sp, #16 * 3]          << 
418         ldp     x8, x9, [sp, #16 * 4]          << 
419         ldp     x10, x11, [sp, #16 * 5]        << 
420         ldp     x12, x13, [sp, #16 * 6]        << 
421         ldp     x14, x15, [sp, #16 * 7]        << 
422         ldp     x16, x17, [sp, #16 * 8]        << 
423         ldp     x18, x19, [sp, #16 * 9]        << 
424         ldp     x20, x21, [sp, #16 * 10]       << 
425         ldp     x22, x23, [sp, #16 * 11]       << 
426         ldp     x24, x25, [sp, #16 * 12]       << 
427         ldp     x26, x27, [sp, #16 * 13]       << 
428         ldp     x28, x29, [sp, #16 * 14]       << 
429                                                << 
430         .if     \el == 0                       << 
431 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              << 
432         alternative_insn "b .L_skip_tramp_exit << 
433                                                << 
434         msr     far_el1, x29                   << 
435                                                << 
436         ldr_this_cpu    x30, this_cpu_vector,  << 
437         tramp_alias     x29, tramp_exit        << 
438         msr             vbar_el1, x30          << 
439         ldr             lr, [sp, #S_LR]        << 
440         add             sp, sp, #PT_REGS_SIZE  << 
441         br              x29                    << 
442                                                << 
443 .L_skip_tramp_exit_\@:                         << 
444 #endif                                         << 
445         .endif                                 << 
446                                                << 
447         ldr     lr, [sp, #S_LR]                << 
448         add     sp, sp, #PT_REGS_SIZE          << 
449                                                << 
450         .if \el == 0                           << 
451         /* This must be after the last explici << 
452 alternative_if ARM64_WORKAROUND_SPECULATIVE_UN << 
453         tlbi    vale1, xzr                     << 
454         dsb     nsh                            << 
455 alternative_else_nop_endif                     << 
456         .else                                  << 
457         /* Ensure any device/NC reads complete << 
458         alternative_insn nop, "dmb sy", ARM64_ << 
459         .endif                                 << 
460                                                << 
461         eret                                   << 
462         sb                                     << 
463         .endm                                  << 
464                                                << 
465 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               << 
466         /*                                     << 
467          * Set the TTBR0 PAN bit in SPSR. When << 
468          * EL0, there is no need to check the  << 
469          * accesses are always enabled.        << 
470          * Note that the meaning of this bit d << 
471          * feature as all TTBR0_EL1 accesses a << 
472          * user mappings.                      << 
473          */                                    << 
474 SYM_CODE_START_LOCAL(__swpan_entry_el1)        << 
475         mrs     x21, ttbr0_el1                 << 
476         tst     x21, #TTBR_ASID_MASK           << 
477         orr     x23, x23, #PSR_PAN_BIT         << 
478         b.eq    1f                             << 
479         and     x23, x23, #~PSR_PAN_BIT        << 
480 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL << 
481         __uaccess_ttbr0_disable x21            << 
482 1:      ret                                    << 
483 SYM_CODE_END(__swpan_entry_el1)                << 
484                                                << 
485         /*                                     << 
486          * Restore access to TTBR0_EL1. If ret << 
487          * PAN bit checking.                   << 
488          */                                    << 
489 SYM_CODE_START_LOCAL(__swpan_exit_el1)         << 
490         tbnz    x22, #22, 1f                   << 
491         __uaccess_ttbr0_enable x0, x1          << 
492 1:      and     x22, x22, #~PSR_PAN_BIT        << 
493         ret                                    << 
494 SYM_CODE_END(__swpan_exit_el1)                 << 
495                                                << 
496 SYM_CODE_START_LOCAL(__swpan_exit_el0)         << 
497         __uaccess_ttbr0_enable x0, x1          << 
498         /*                                     << 
499          * Enable errata workarounds only if r << 
500          * workaround currently required for T << 
501          * Cavium erratum 27456 (broadcast TLB << 
502          * corruption).                        << 
503          */                                       1055          */
504         b       post_ttbr_update_workaround    !! 1056         sub     %g0, %o0, %o0
505 SYM_CODE_END(__swpan_exit_el0)                 !! 1057         or      %g3, %g2, %g3
506 #endif                                         !! 1058         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
507                                                !! 1059         mov     1, %l6
508 /* GPRs used by entry code */                  !! 1060         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
509 tsk     .req    x28             // current thr !! 1061         bne     linux_syscall_trace2
510                                                !! 1062          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
511         .text                                  !! 1063         add     %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1064         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1065         b       ret_trap_entry
                                                   >> 1066          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 1067 
                                                   >> 1068 linux_syscall_trace2:
                                                   >> 1069         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 1070         mov     1, %o1
                                                   >> 1071         call    syscall_trace
                                                   >> 1072          add    %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1073         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1074         b       ret_trap_entry
                                                   >> 1075          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
512                                                   1076 
513 /*                                             << 
514  * Exception vectors.                          << 
515  */                                            << 
516         .pushsection ".entry.text", "ax"       << 
517                                                << 
518         .align  11                             << 
519 SYM_CODE_START(vectors)                        << 
520         kernel_ventry   1, t, 64, sync         << 
521         kernel_ventry   1, t, 64, irq          << 
522         kernel_ventry   1, t, 64, fiq          << 
523         kernel_ventry   1, t, 64, error        << 
524                                                << 
525         kernel_ventry   1, h, 64, sync         << 
526         kernel_ventry   1, h, 64, irq          << 
527         kernel_ventry   1, h, 64, fiq          << 
528         kernel_ventry   1, h, 64, error        << 
529                                                << 
530         kernel_ventry   0, t, 64, sync         << 
531         kernel_ventry   0, t, 64, irq          << 
532         kernel_ventry   0, t, 64, fiq          << 
533         kernel_ventry   0, t, 64, error        << 
534                                                << 
535         kernel_ventry   0, t, 32, sync         << 
536         kernel_ventry   0, t, 32, irq          << 
537         kernel_ventry   0, t, 32, fiq          << 
538         kernel_ventry   0, t, 32, error        << 
539 SYM_CODE_END(vectors)                          << 
540                                                << 
541 #ifdef CONFIG_VMAP_STACK                       << 
542 SYM_CODE_START_LOCAL(__bad_stack)              << 
543         /*                                     << 
544          * We detected an overflow in kernel_v << 
545          * overflow stack. Stash the exception << 
546          * handler.                            << 
547          */                                    << 
548                                                   1077 
549         /* Restore the original x0 value */    !! 1078 /* Saving and restoring the FPU state is best done from lowlevel code.
550         mrs     x0, tpidrro_el0                !! 1079  *
551                                                !! 1080  * void fpsave(unsigned long *fpregs, unsigned long *fsr,
552         /*                                     !! 1081  *             void *fpqueue, unsigned long *fpqdepth)
553          * Store the original GPRs to the new  << 
554          * PT_REGS_SIZE) was stashed in tpidr_ << 
555          */                                    << 
556         sub     sp, sp, #PT_REGS_SIZE          << 
557         kernel_entry 1                         << 
558         mrs     x0, tpidr_el0                  << 
559         add     x0, x0, #PT_REGS_SIZE          << 
560         str     x0, [sp, #S_SP]                << 
561                                                << 
562         /* Stash the regs for handle_bad_stack << 
563         mov     x0, sp                         << 
564                                                << 
565         /* Time to die */                      << 
566         bl      handle_bad_stack               << 
567         ASM_BUG()                              << 
568 SYM_CODE_END(__bad_stack)                      << 
569 #endif /* CONFIG_VMAP_STACK */                 << 
570                                                << 
571                                                << 
572         .macro entry_handler el:req, ht:req, r << 
573 SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\ << 
574         kernel_entry \el, \regsize             << 
575         mov     x0, sp                         << 
576         bl      el\el\ht\()_\regsize\()_\label << 
577         .if \el == 0                           << 
578         b       ret_to_user                    << 
579         .else                                  << 
580         b       ret_to_kernel                  << 
581         .endif                                 << 
582 SYM_CODE_END(el\el\ht\()_\regsize\()_\label)   << 
583         .endm                                  << 
584                                                << 
585 /*                                             << 
586  * Early exception handlers                    << 
587  */                                               1082  */
588         entry_handler   1, t, 64, sync         << 
589         entry_handler   1, t, 64, irq          << 
590         entry_handler   1, t, 64, fiq          << 
591         entry_handler   1, t, 64, error        << 
592                                                << 
593         entry_handler   1, h, 64, sync         << 
594         entry_handler   1, h, 64, irq          << 
595         entry_handler   1, h, 64, fiq          << 
596         entry_handler   1, h, 64, error        << 
597                                                << 
598         entry_handler   0, t, 64, sync         << 
599         entry_handler   0, t, 64, irq          << 
600         entry_handler   0, t, 64, fiq          << 
601         entry_handler   0, t, 64, error        << 
602                                                << 
603         entry_handler   0, t, 32, sync         << 
604         entry_handler   0, t, 32, irq          << 
605         entry_handler   0, t, 32, fiq          << 
606         entry_handler   0, t, 32, error        << 
607                                                << 
608 SYM_CODE_START_LOCAL(ret_to_kernel)            << 
609         kernel_exit 1                          << 
610 SYM_CODE_END(ret_to_kernel)                    << 
611                                                << 
612 SYM_CODE_START_LOCAL(ret_to_user)              << 
613         ldr     x19, [tsk, #TSK_TI_FLAGS]      << 
614         enable_step_tsk x19, x2                << 
615 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK             << 
616         bl      stackleak_erase_on_task_stack  << 
617 #endif                                         << 
618         kernel_exit 0                          << 
619 SYM_CODE_END(ret_to_user)                      << 
620                                                   1083 
621         .popsection                            !! 1084         .globl  fpsave
                                                   >> 1085 fpsave:
                                                   >> 1086         st      %fsr, [%o1]     ! this can trap on us if fpu is in bogon state
                                                   >> 1087         ld      [%o1], %g1
                                                   >> 1088         set     0x2000, %g4
                                                   >> 1089         andcc   %g1, %g4, %g0
                                                   >> 1090         be      2f
                                                   >> 1091          mov    0, %g2
622                                                   1092 
623         // Move from tramp_pg_dir to swapper_p !! 1093         /* We have an fpqueue to save. */
624         .macro tramp_map_kernel, tmp           << 
625         mrs     \tmp, ttbr1_el1                << 
626         add     \tmp, \tmp, #TRAMP_SWAPPER_OFF << 
627         bic     \tmp, \tmp, #USER_ASID_FLAG    << 
628         msr     ttbr1_el1, \tmp                << 
629 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003         << 
630 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1 << 
631         /* ASID already in \tmp[63:48] */      << 
632         movk    \tmp, #:abs_g2_nc:(TRAMP_VALIA << 
633         movk    \tmp, #:abs_g1_nc:(TRAMP_VALIA << 
634         /* 2MB boundary containing the vectors << 
635         movk    \tmp, #:abs_g0_nc:((TRAMP_VALI << 
636         isb                                    << 
637         tlbi    vae1, \tmp                     << 
638         dsb     nsh                            << 
639 alternative_else_nop_endif                     << 
640 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */   << 
641         .endm                                  << 
642                                                << 
643         // Move from swapper_pg_dir to tramp_p << 
644         .macro tramp_unmap_kernel, tmp         << 
645         mrs     \tmp, ttbr1_el1                << 
646         sub     \tmp, \tmp, #TRAMP_SWAPPER_OFF << 
647         orr     \tmp, \tmp, #USER_ASID_FLAG    << 
648         msr     ttbr1_el1, \tmp                << 
649         /*                                     << 
650          * We avoid running the post_ttbr_upda << 
651          * it's only needed by Cavium ThunderX << 
652          * disabled.                           << 
653          */                                    << 
654         .endm                                  << 
655                                                << 
656         .macro          tramp_data_read_var    << 
657 #ifdef CONFIG_RELOCATABLE                      << 
658         ldr             \dst, .L__tramp_data_\ << 
659         .ifndef         .L__tramp_data_\var    << 
660         .pushsection    ".entry.tramp.rodata", << 
661         .align          3                      << 
662 .L__tramp_data_\var:                           << 
663         .quad           \var                   << 
664         .popsection                            << 
665         .endif                                 << 
666 #else                                          << 
667         /*                                     << 
668          * As !RELOCATABLE implies !RANDOMIZE_ << 
669          * compile time constant (and hence no << 
670          *                                     << 
671          * As statically allocated kernel code << 
672          * 47 bits of the address space we can << 
673          * instruction to load the upper 16 bi << 
674          */                                    << 
675         movz            \dst, :abs_g2_s:\var   << 
676         movk            \dst, :abs_g1_nc:\var  << 
677         movk            \dst, :abs_g0_nc:\var  << 
678 #endif                                         << 
679         .endm                                  << 
680                                                << 
681 #define BHB_MITIGATION_NONE     0              << 
682 #define BHB_MITIGATION_LOOP     1              << 
683 #define BHB_MITIGATION_FW       2              << 
684 #define BHB_MITIGATION_INSN     3              << 
685                                                << 
686         .macro tramp_ventry, vector_start, reg << 
687         .align  7                              << 
688 1:                                                1094 1:
689         .if     \regsize == 64                 !! 1095         std     %fq, [%o2]
690         msr     tpidrro_el0, x30        // Res !! 1096 fpsave_magic:
691         .endif                                 !! 1097         st      %fsr, [%o1]
                                                   >> 1098         ld      [%o1], %g3
                                                   >> 1099         andcc   %g3, %g4, %g0
                                                   >> 1100         add     %g2, 1, %g2
                                                   >> 1101         bne     1b
                                                   >> 1102          add    %o2, 8, %o2
692                                                   1103 
693         .if     \bhb == BHB_MITIGATION_LOOP    << 
694         /*                                     << 
695          * This sequence must appear before th << 
696          * ret out of tramp_ventry. It appears << 
697          */                                    << 
698         __mitigate_spectre_bhb_loop     x30    << 
699         .endif // \bhb == BHB_MITIGATION_LOOP  << 
700                                                << 
701         .if     \bhb == BHB_MITIGATION_INSN    << 
702         clearbhb                               << 
703         isb                                    << 
704         .endif // \bhb == BHB_MITIGATION_INSN  << 
705                                                << 
706         .if     \kpti == 1                     << 
707         /*                                     << 
708          * Defend against branch aliasing atta << 
709          * entry onto the return stack and usi << 
710          * enter the full-fat kernel vectors.  << 
711          */                                    << 
712         bl      2f                             << 
713         b       .                              << 
714 2:                                                1104 2:
715         tramp_map_kernel        x30            !! 1105         st      %g2, [%o3]
716 alternative_insn isb, nop, ARM64_WORKAROUND_QC << 
717         tramp_data_read_var     x30, vectors   << 
718 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2 << 
719         prfm    plil1strm, [x30, #(1b - \vecto << 
720 alternative_else_nop_endif                     << 
721                                                << 
722         msr     vbar_el1, x30                  << 
723         isb                                    << 
724         .else                                  << 
725         adr_l   x30, vectors                   << 
726         .endif // \kpti == 1                   << 
727                                                   1106 
728         .if     \bhb == BHB_MITIGATION_FW      !! 1107         std     %f0, [%o0 + 0x00]
729         /*                                     !! 1108         std     %f2, [%o0 + 0x08]
730          * The firmware sequence must appear b !! 1109         std     %f4, [%o0 + 0x10]
731          * i.e. the ret out of tramp_ventry. B !! 1110         std     %f6, [%o0 + 0x18]
732          * mapped to save/restore the register !! 1111         std     %f8, [%o0 + 0x20]
                                                   >> 1112         std     %f10, [%o0 + 0x28]
                                                   >> 1113         std     %f12, [%o0 + 0x30]
                                                   >> 1114         std     %f14, [%o0 + 0x38]
                                                   >> 1115         std     %f16, [%o0 + 0x40]
                                                   >> 1116         std     %f18, [%o0 + 0x48]
                                                   >> 1117         std     %f20, [%o0 + 0x50]
                                                   >> 1118         std     %f22, [%o0 + 0x58]
                                                   >> 1119         std     %f24, [%o0 + 0x60]
                                                   >> 1120         std     %f26, [%o0 + 0x68]
                                                   >> 1121         std     %f28, [%o0 + 0x70]
                                                   >> 1122         retl
                                                   >> 1123          std    %f30, [%o0 + 0x78]
                                                   >> 1124 
                                                   >> 1125         /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
                                                   >> 1126          * code for pointing out this possible deadlock, while we save state
                                                   >> 1127          * above we could trap on the fsr store so our low level fpu trap
                                                   >> 1128          * code has to know how to deal with this.
                                                   >> 1129          */
                                                   >> 1130 fpsave_catch:
                                                   >> 1131         b       fpsave_magic + 4
                                                   >> 1132          st     %fsr, [%o1]
                                                   >> 1133 
                                                   >> 1134 fpsave_catch2:
                                                   >> 1135         b       fpsave + 4
                                                   >> 1136          st     %fsr, [%o1]
                                                   >> 1137 
                                                   >> 1138         /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
                                                   >> 1139 
                                                   >> 1140         .globl  fpload
                                                   >> 1141 fpload:
                                                   >> 1142         ldd     [%o0 + 0x00], %f0
                                                   >> 1143         ldd     [%o0 + 0x08], %f2
                                                   >> 1144         ldd     [%o0 + 0x10], %f4
                                                   >> 1145         ldd     [%o0 + 0x18], %f6
                                                   >> 1146         ldd     [%o0 + 0x20], %f8
                                                   >> 1147         ldd     [%o0 + 0x28], %f10
                                                   >> 1148         ldd     [%o0 + 0x30], %f12
                                                   >> 1149         ldd     [%o0 + 0x38], %f14
                                                   >> 1150         ldd     [%o0 + 0x40], %f16
                                                   >> 1151         ldd     [%o0 + 0x48], %f18
                                                   >> 1152         ldd     [%o0 + 0x50], %f20
                                                   >> 1153         ldd     [%o0 + 0x58], %f22
                                                   >> 1154         ldd     [%o0 + 0x60], %f24
                                                   >> 1155         ldd     [%o0 + 0x68], %f26
                                                   >> 1156         ldd     [%o0 + 0x70], %f28
                                                   >> 1157         ldd     [%o0 + 0x78], %f30
                                                   >> 1158         ld      [%o1], %fsr
                                                   >> 1159         retl
                                                   >> 1160          nop
                                                   >> 1161 
                                                   >> 1162         /* __ndelay and __udelay take two arguments:
                                                   >> 1163          * 0 - nsecs or usecs to delay
                                                   >> 1164          * 1 - per_cpu udelay_val (loops per jiffy)
                                                   >> 1165          *
                                                   >> 1166          * Note that ndelay gives HZ times higher resolution but has a 10ms
                                                   >> 1167          * limit.  udelay can handle up to 1s.
733          */                                       1168          */
734         __mitigate_spectre_bhb_fw              !! 1169         .globl  __ndelay
735         .endif // \bhb == BHB_MITIGATION_FW    !! 1170 __ndelay:
736                                                !! 1171         save    %sp, -STACKFRAME_SZ, %sp
737         add     x30, x30, #(1b - \vector_start !! 1172         mov     %i0, %o0                ! round multiplier up so large ns ok
738         ret                                    !! 1173         mov     0x1ae, %o1              ! 2**32 / (1 000 000 000 / HZ)
739 .org 1b + 128   // Did we overflow the ventry  !! 1174         umul    %o0, %o1, %o0
740         .endm                                  !! 1175         rd      %y, %o1
741                                                !! 1176         mov     %i1, %o1                ! udelay_val
742         .macro  generate_tramp_vector,  kpti,  !! 1177         umul    %o0, %o1, %o0
743 .Lvector_start\@:                              !! 1178         rd      %y, %o1
744         .space  0x400                          !! 1179         ba      delay_continue
745                                                !! 1180          mov    %o1, %o0                ! >>32 later for better resolution
746         .rept   4                              !! 1181 
747         tramp_ventry    .Lvector_start\@, 64,  !! 1182         .globl  __udelay
748         .endr                                  !! 1183 __udelay:
749         .rept   4                              !! 1184         save    %sp, -STACKFRAME_SZ, %sp
750         tramp_ventry    .Lvector_start\@, 32,  !! 1185         mov     %i0, %o0
751         .endr                                  !! 1186         sethi   %hi(0x10c7), %o1        ! round multiplier up so large us ok
752         .endm                                  !! 1187         or      %o1, %lo(0x10c7), %o1   ! 2**32 / 1 000 000
753                                                !! 1188         umul    %o0, %o1, %o0
754 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              !! 1189         rd      %y, %o1
755 /*                                             !! 1190         mov     %i1, %o1                ! udelay_val
756  * Exception vectors trampoline.               !! 1191         umul    %o0, %o1, %o0
757  * The order must match __bp_harden_el1_vector !! 1192         rd      %y, %o1
758  * arm64_bp_harden_el1_vectors enum.           !! 1193         sethi   %hi(0x028f4b62), %l0    ! Add in rounding constant * 2**32,
759  */                                            !! 1194         or      %g0, %lo(0x028f4b62), %l0
760         .pushsection ".entry.tramp.text", "ax" !! 1195         addcc   %o0, %l0, %o0           ! 2**32 * 0.009 999
761         .align  11                             !! 1196         bcs,a   3f
762 SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)    !! 1197          add    %o1, 0x01, %o1
763 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY  !! 1198 3:
764         generate_tramp_vector   kpti=1, bhb=BH !! 1199         mov     HZ, %o0                 ! >>32 earlier for wider range
765         generate_tramp_vector   kpti=1, bhb=BH !! 1200         umul    %o0, %o1, %o0
766         generate_tramp_vector   kpti=1, bhb=BH !! 1201         rd      %y, %o1
767 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 
768         generate_tramp_vector   kpti=1, bhb=BH << 
769 SYM_CODE_END(tramp_vectors)                    << 
770                                                << 
771 SYM_CODE_START_LOCAL(tramp_exit)               << 
772         tramp_unmap_kernel      x29            << 
773         mrs             x29, far_el1           << 
774         eret                                   << 
775         sb                                     << 
776 SYM_CODE_END(tramp_exit)                       << 
777         .popsection                            << 
778 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */        << 
779                                                << 
780 /*                                             << 
781  * Exception vectors for spectre mitigations o << 
782  * kpti is not in use.                         << 
783  */                                            << 
784         .macro generate_el1_vector, bhb        << 
785 .Lvector_start\@:                              << 
786         kernel_ventry   1, t, 64, sync         << 
787         kernel_ventry   1, t, 64, irq          << 
788         kernel_ventry   1, t, 64, fiq          << 
789         kernel_ventry   1, t, 64, error        << 
790                                                << 
791         kernel_ventry   1, h, 64, sync         << 
792         kernel_ventry   1, h, 64, irq          << 
793         kernel_ventry   1, h, 64, fiq          << 
794         kernel_ventry   1, h, 64, error        << 
795                                                << 
796         .rept   4                              << 
797         tramp_ventry    .Lvector_start\@, 64,  << 
798         .endr                                  << 
799         .rept 4                                << 
800         tramp_ventry    .Lvector_start\@, 32,  << 
801         .endr                                  << 
802         .endm                                  << 
803                                                << 
804 /* The order must match tramp_vecs and the arm << 
805         .pushsection ".entry.text", "ax"       << 
806         .align  11                             << 
807 SYM_CODE_START(__bp_harden_el1_vectors)        << 
808 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY  << 
809         generate_el1_vector     bhb=BHB_MITIGA << 
810         generate_el1_vector     bhb=BHB_MITIGA << 
811         generate_el1_vector     bhb=BHB_MITIGA << 
812 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 
813 SYM_CODE_END(__bp_harden_el1_vectors)          << 
814         .popsection                            << 
815                                                << 
816                                                << 
817 /*                                             << 
818  * Register switch for AArch64. The callee-sav << 
819  * and restored. On entry:                     << 
820  *   x0 = previous task_struct (must be preser << 
821  *   x1 = next task_struct                     << 
822  * Previous and next are guaranteed not to be  << 
823  *                                             << 
824  */                                            << 
825 SYM_FUNC_START(cpu_switch_to)                  << 
826         mov     x10, #THREAD_CPU_CONTEXT       << 
827         add     x8, x0, x10                    << 
828         mov     x9, sp                         << 
829         stp     x19, x20, [x8], #16            << 
830         stp     x21, x22, [x8], #16            << 
831         stp     x23, x24, [x8], #16            << 
832         stp     x25, x26, [x8], #16            << 
833         stp     x27, x28, [x8], #16            << 
834         stp     x29, x9, [x8], #16             << 
835         str     lr, [x8]                       << 
836         add     x8, x1, x10                    << 
837         ldp     x19, x20, [x8], #16            << 
838         ldp     x21, x22, [x8], #16            << 
839         ldp     x23, x24, [x8], #16            << 
840         ldp     x25, x26, [x8], #16            << 
841         ldp     x27, x28, [x8], #16            << 
842         ldp     x29, x9, [x8], #16             << 
843         ldr     lr, [x8]                       << 
844         mov     sp, x9                         << 
845         msr     sp_el0, x1                     << 
846         ptrauth_keys_install_kernel x1, x8, x9 << 
847         scs_save x0                            << 
848         scs_load_current                       << 
849         ret                                    << 
850 SYM_FUNC_END(cpu_switch_to)                    << 
851 NOKPROBE(cpu_switch_to)                        << 
852                                                << 
853 /*                                             << 
854  * This is how we return from a fork.          << 
855  */                                            << 
856 SYM_CODE_START(ret_from_fork)                  << 
857         bl      schedule_tail                  << 
858         cbz     x19, 1f                        << 
859         mov     x0, x20                        << 
860         blr     x19                            << 
861 1:      get_current_task tsk                   << 
862         mov     x0, sp                         << 
863         bl      asm_exit_to_user_mode          << 
864         b       ret_to_user                    << 
865 SYM_CODE_END(ret_from_fork)                    << 
866 NOKPROBE(ret_from_fork)                        << 
867                                                << 
868 /*                                             << 
869  * void call_on_irq_stack(struct pt_regs *regs << 
870  *                        void (*func)(struct  << 
871  *                                             << 
872  * Calls func(regs) using this CPU's irq stack << 
873  */                                            << 
874 SYM_FUNC_START(call_on_irq_stack)              << 
875 #ifdef CONFIG_SHADOW_CALL_STACK                << 
876         get_current_task x16                   << 
877         scs_save x16                           << 
878         ldr_this_cpu scs_sp, irq_shadow_call_s << 
879 #endif                                         << 
880                                                << 
881         /* Create a frame record to save our L << 
882         stp     x29, x30, [sp, #-16]!          << 
883         mov     x29, sp                        << 
884                                                << 
885         ldr_this_cpu x16, irq_stack_ptr, x17   << 
886                                                << 
887         /* Move to the new stack and call the  << 
888         add     sp, x16, #IRQ_STACK_SIZE       << 
889         blr     x1                             << 
890                                                   1202 
891         /*                                     !! 1203 delay_continue:
892          * Restore the SP from the FP, and res !! 1204         cmp     %o0, 0x0
893          * record.                             !! 1205 1:
894          */                                    !! 1206         bne     1b
895         mov     sp, x29                        !! 1207          subcc  %o0, 1, %o0
896         ldp     x29, x30, [sp], #16            !! 1208         
897         scs_load_current                       << 
898         ret                                       1209         ret
899 SYM_FUNC_END(call_on_irq_stack)                !! 1210         restore
900 NOKPROBE(call_on_irq_stack)                    !! 1211 EXPORT_SYMBOL(__udelay)
901                                                !! 1212 EXPORT_SYMBOL(__ndelay)
902 #ifdef CONFIG_ARM_SDE_INTERFACE                !! 1213 
903                                                !! 1214         /* Handle a software breakpoint */
904 #include <asm/sdei.h>                          !! 1215         /* We have to inform parent that child has stopped */
905 #include <uapi/linux/arm_sdei.h>               !! 1216         .align 4
906                                                !! 1217         .globl breakpoint_trap
907 .macro sdei_handler_exit exit_mode             !! 1218 breakpoint_trap:
908         /* On success, this call never returns !! 1219         rd      %wim,%l3
909         cmp     \exit_mode, #SDEI_EXIT_SMC     !! 1220         SAVE_ALL
910         b.ne    99f                            !! 1221         wr      %l0, PSR_ET, %psr
911         smc     #0                             !! 1222         WRITE_PAUSE
912         b       .                              !! 1223 
913 99:     hvc     #0                             !! 1224         st      %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
914         b       .                              !! 1225         call    sparc_breakpoint
915 .endm                                          !! 1226          add    %sp, STACKFRAME_SZ, %o0
916                                                !! 1227 
917 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              !! 1228         RESTORE_ALL
918 /*                                             !! 1229 
919  * The regular SDEI entry point may have been  !! 1230 #ifdef CONFIG_KGDB
920  * the kernel. This trampoline restores the ke !! 1231         ENTRY(kgdb_trap_low)
921  * argument accessible.                        !! 1232         rd      %wim,%l3
922  *                                             !! 1233         SAVE_ALL
923  * This clobbers x4, __sdei_handler() will res !! 1234         wr      %l0, PSR_ET, %psr
924  * copy.                                       !! 1235         WRITE_PAUSE
925  */                                            !! 1236 
926 .pushsection ".entry.tramp.text", "ax"         !! 1237         mov     %l7, %o0                ! trap_level
927 SYM_CODE_START(__sdei_asm_entry_trampoline)    !! 1238         call    kgdb_trap
928         mrs     x4, ttbr1_el1                  !! 1239          add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
929         tbz     x4, #USER_ASID_BIT, 1f         !! 1240 
930                                                !! 1241         RESTORE_ALL
931         tramp_map_kernel tmp=x4                !! 1242         ENDPROC(kgdb_trap_low)
932         isb                                    !! 1243 #endif
933         mov     x4, xzr                        !! 1244 
934                                                !! 1245         .align  4
935         /*                                     !! 1246         .globl  flush_patch_exception
936          * Remember whether to unmap the kerne !! 1247 flush_patch_exception:
937          */                                    !! 1248         FLUSH_ALL_KERNEL_WINDOWS;
938 1:      str     x4, [x1, #(SDEI_EVENT_INTREGS  !! 1249         ldd     [%o0], %o6
939         tramp_data_read_var     x4, __sdei_asm !! 1250         jmpl    %o7 + 0xc, %g0                  ! see asm-sparc/processor.h
940         br      x4                             !! 1251          mov    1, %g1                          ! signal EFAULT condition
941 SYM_CODE_END(__sdei_asm_entry_trampoline)      !! 1252 
942 NOKPROBE(__sdei_asm_entry_trampoline)          !! 1253         .align  4
943                                                !! 1254         .globl  kill_user_windows, kuw_patch1_7win
944 /*                                             !! 1255         .globl  kuw_patch1
945  * Make the exit call and restore the original !! 1256 kuw_patch1_7win:        sll     %o3, 6, %o3
946  *                                             !! 1257 
947  * x0 & x1: setup for the exit API call        !! 1258         /* No matter how much overhead this routine has in the worst
948  * x2: exit_mode                               !! 1259          * case scenario, it is several times better than taking the
949  * x4: struct sdei_registered_event argument f !! 1260          * traps with the old method of just doing flush_user_windows().
950  */                                            !! 1261          */
951 SYM_CODE_START(__sdei_asm_exit_trampoline)     !! 1262 kill_user_windows:
952         ldr     x4, [x4, #(SDEI_EVENT_INTREGS  !! 1263         ld      [%g6 + TI_UWINMASK], %o0        ! get current umask
953         cbnz    x4, 1f                         !! 1264         orcc    %g0, %o0, %g0                   ! if no bits set, we are done
954                                                !! 1265         be      3f                              ! nothing to do
955         tramp_unmap_kernel      tmp=x4         !! 1266          rd     %psr, %o5                       ! must clear interrupts
956                                                !! 1267         or      %o5, PSR_PIL, %o4               ! or else that could change
957 1:      sdei_handler_exit exit_mode=x2         !! 1268         wr      %o4, 0x0, %psr                  ! the uwinmask state
958 SYM_CODE_END(__sdei_asm_exit_trampoline)       !! 1269         WRITE_PAUSE                             ! burn them cycles
959 NOKPROBE(__sdei_asm_exit_trampoline)           !! 1270 1:
960 .popsection             // .entry.tramp.text   !! 1271         ld      [%g6 + TI_UWINMASK], %o0        ! get consistent state
961 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */        !! 1272         orcc    %g0, %o0, %g0                   ! did an interrupt come in?
962                                                !! 1273         be      4f                              ! yep, we are done
963 /*                                             !! 1274          rd     %wim, %o3                       ! get current wim
964  * Software Delegated Exception entry point.   !! 1275         srl     %o3, 1, %o4                     ! simulate a save
965  *                                             !! 1276 kuw_patch1:
966  * x0: Event number                            !! 1277         sll     %o3, 7, %o3                     ! compute next wim
967  * x1: struct sdei_registered_event argument f !! 1278         or      %o4, %o3, %o3                   ! result
968  * x2: interrupted PC                          !! 1279         andncc  %o0, %o3, %o0                   ! clean this bit in umask
969  * x3: interrupted PSTATE                      !! 1280         bne     kuw_patch1                      ! not done yet
970  * x4: maybe clobbered by the trampoline       !! 1281          srl    %o3, 1, %o4                     ! begin another save simulation
971  *                                             !! 1282         wr      %o3, 0x0, %wim                  ! set the new wim
972  * Firmware has preserved x0->x17 for us, we m !! 1283         st      %g0, [%g6 + TI_UWINMASK]        ! clear uwinmask
973  * follow SMC-CC. We save (or retrieve) all th !! 1284 4:
974  * want them.                                  !! 1285         wr      %o5, 0x0, %psr                  ! re-enable interrupts
975  */                                            !! 1286         WRITE_PAUSE                             ! burn baby burn
976 SYM_CODE_START(__sdei_asm_handler)             !! 1287 3:
977         stp     x2, x3, [x1, #SDEI_EVENT_INTRE !! 1288         retl                                    ! return
978         stp     x4, x5, [x1, #SDEI_EVENT_INTRE !! 1289          st     %g0, [%g6 + TI_W_SAVED]         ! no windows saved
979         stp     x6, x7, [x1, #SDEI_EVENT_INTRE << 
980         stp     x8, x9, [x1, #SDEI_EVENT_INTRE << 
981         stp     x10, x11, [x1, #SDEI_EVENT_INT << 
982         stp     x12, x13, [x1, #SDEI_EVENT_INT << 
983         stp     x14, x15, [x1, #SDEI_EVENT_INT << 
984         stp     x16, x17, [x1, #SDEI_EVENT_INT << 
985         stp     x18, x19, [x1, #SDEI_EVENT_INT << 
986         stp     x20, x21, [x1, #SDEI_EVENT_INT << 
987         stp     x22, x23, [x1, #SDEI_EVENT_INT << 
988         stp     x24, x25, [x1, #SDEI_EVENT_INT << 
989         stp     x26, x27, [x1, #SDEI_EVENT_INT << 
990         stp     x28, x29, [x1, #SDEI_EVENT_INT << 
991         mov     x4, sp                         << 
992         stp     lr, x4, [x1, #SDEI_EVENT_INTRE << 
993                                                << 
994         mov     x19, x1                        << 
995                                                << 
996         /* Store the registered-event for cras << 
997         ldrb    w4, [x19, #SDEI_EVENT_PRIORITY << 
998         cbnz    w4, 1f                         << 
999         adr_this_cpu dst=x5, sym=sdei_active_n << 
1000         b       2f                            << 
1001 1:      adr_this_cpu dst=x5, sym=sdei_active_ << 
1002 2:      str     x19, [x5]                     << 
1003                                                  1290 
1004 #ifdef CONFIG_VMAP_STACK                      !! 1291         .align  4
1005         /*                                    !! 1292         .globl  restore_current
1006          * entry.S may have been using sp as  !! 1293 restore_current:
1007          * this is a normal or critical event !! 1294         LOAD_CURRENT(g6, o0)
1008          * stack for this CPU.                !! 1295         retl
                                                   >> 1296          nop
                                                   >> 1297 
                                                   >> 1298 #ifdef CONFIG_PCIC_PCI
                                                   >> 1299 #include <asm/pcic.h>
                                                   >> 1300 
                                                   >> 1301         .align  4
                                                   >> 1302         .globl  linux_trap_ipi15_pcic
                                                   >> 1303 linux_trap_ipi15_pcic:
                                                   >> 1304         rd      %wim, %l3
                                                   >> 1305         SAVE_ALL
                                                   >> 1306 
                                                   >> 1307         /*
                                                   >> 1308          * First deactivate NMI
                                                   >> 1309          * or we cannot drop ET, cannot get window spill traps.
                                                   >> 1310          * The busy loop is necessary because the PIO error
                                                   >> 1311          * sometimes does not go away quickly and we trap again.
1009          */                                      1312          */
1010         cbnz    w4, 1f                        !! 1313         sethi   %hi(pcic_regs), %o1
1011         ldr_this_cpu dst=x5, sym=sdei_stack_n !! 1314         ld      [%o1 + %lo(pcic_regs)], %o2
1012         b       2f                            << 
1013 1:      ldr_this_cpu dst=x5, sym=sdei_stack_c << 
1014 2:      mov     x6, #SDEI_STACK_SIZE          << 
1015         add     x5, x5, x6                    << 
1016         mov     sp, x5                        << 
1017 #endif                                        << 
1018                                                  1315 
1019 #ifdef CONFIG_SHADOW_CALL_STACK               !! 1316         ! Get pending status for printouts later.
1020         /* Use a separate shadow call stack f !! 1317         ld      [%o2 + PCI_SYS_INT_PENDING], %o0
1021         cbnz    w4, 3f                        << 
1022         ldr_this_cpu dst=scs_sp, sym=sdei_sha << 
1023         b       4f                            << 
1024 3:      ldr_this_cpu dst=scs_sp, sym=sdei_sha << 
1025 4:                                            << 
1026 #endif                                        << 
1027                                                  1318 
1028         /*                                    !! 1319         mov     PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1029          * We may have interrupted userspace, !! 1320         stb     %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
1030          * return-to either of these. We can' !! 1321 1:
1031          */                                   !! 1322         ld      [%o2 + PCI_SYS_INT_PENDING], %o1
1032         mrs     x28, sp_el0                   !! 1323         andcc   %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1033         ldr_this_cpu    dst=x0, sym=__entry_t !! 1324         bne     1b
1034         msr     sp_el0, x0                    !! 1325          nop
1035                                               !! 1326 
1036         /* If we interrupted the kernel point !! 1327         or      %l0, PSR_PIL, %l4
1037         and     x0, x3, #0xc                  !! 1328         wr      %l4, 0x0, %psr
1038         mrs     x1, CurrentEL                 !! 1329         WRITE_PAUSE
1039         cmp     x0, x1                        !! 1330         wr      %l4, PSR_ET, %psr
1040         csel    x29, x29, xzr, eq       // fp !! 1331         WRITE_PAUSE
1041         csel    x4, x2, xzr, eq         // el !! 1332 
1042                                               !! 1333         call    pcic_nmi
1043         stp     x29, x4, [sp, #-16]!          !! 1334          add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1044         mov     x29, sp                       !! 1335         RESTORE_ALL
1045                                               !! 1336 
1046         add     x0, x19, #SDEI_EVENT_INTREGS  !! 1337         .globl  pcic_nmi_trap_patch
1047         mov     x1, x19                       !! 1338 pcic_nmi_trap_patch:
1048         bl      __sdei_handler                !! 1339         sethi   %hi(linux_trap_ipi15_pcic), %l3
1049                                               !! 1340         jmpl    %l3 + %lo(linux_trap_ipi15_pcic), %g0
1050         msr     sp_el0, x28                   !! 1341          rd     %psr, %l0
1051         /* restore regs >x17 that we clobbere !! 1342         .word   0
1052         mov     x4, x19         // keep x4 fo !! 1343 
1053         ldp     x28, x29, [x4, #SDEI_EVENT_IN !! 1344 #endif /* CONFIG_PCIC_PCI */
1054         ldp     x18, x19, [x4, #SDEI_EVENT_IN !! 1345 
1055         ldp     lr, x1, [x4, #SDEI_EVENT_INTR !! 1346         .globl  flushw_all
1056         mov     sp, x1                        !! 1347 flushw_all:
1057                                               !! 1348         save    %sp, -0x40, %sp
1058         mov     x1, x0                  // ad !! 1349         save    %sp, -0x40, %sp
1059         /* x0 = (x0 <= SDEI_EV_FAILED) ?      !! 1350         save    %sp, -0x40, %sp
1060          * EVENT_COMPLETE:EVENT_COMPLETE_AND_ !! 1351         save    %sp, -0x40, %sp
1061          */                                   !! 1352         save    %sp, -0x40, %sp
1062         cmp     x0, #SDEI_EV_FAILED           !! 1353         save    %sp, -0x40, %sp
1063         mov_q   x2, SDEI_1_0_FN_SDEI_EVENT_CO !! 1354         save    %sp, -0x40, %sp
1064         mov_q   x3, SDEI_1_0_FN_SDEI_EVENT_CO !! 1355         restore
1065         csel    x0, x2, x3, ls                !! 1356         restore
1066                                               !! 1357         restore
1067         ldr_l   x2, sdei_exit_mode            !! 1358         restore
1068                                               !! 1359         restore
1069         /* Clear the registered-event seen by !! 1360         restore
1070         ldrb    w3, [x4, #SDEI_EVENT_PRIORITY !! 1361         ret
1071         cbnz    w3, 1f                        !! 1362          restore
1072         adr_this_cpu dst=x5, sym=sdei_active_ << 
1073         b       2f                            << 
1074 1:      adr_this_cpu dst=x5, sym=sdei_active_ << 
1075 2:      str     xzr, [x5]                     << 
1076                                                  1363 
1077 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0  !! 1364 #ifdef CONFIG_SMP
1078         sdei_handler_exit exit_mode=x2        !! 1365 ENTRY(hard_smp_processor_id)
1079 alternative_else_nop_endif                    !! 1366 661:    rd              %tbr, %g1
1080                                               !! 1367         srl             %g1, 12, %o0
1081 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0             !! 1368         and             %o0, 3, %o0
1082         tramp_alias     dst=x5, sym=__sdei_as !! 1369         .section        .cpuid_patch, "ax"
1083         br      x5                            !! 1370         /* Instruction location. */
                                                   >> 1371         .word           661b
                                                   >> 1372         /* SUN4D implementation. */
                                                   >> 1373         lda             [%g0] ASI_M_VIKING_TMP1, %o0
                                                   >> 1374         nop
                                                   >> 1375         nop
                                                   >> 1376         /* LEON implementation. */
                                                   >> 1377         rd              %asr17, %o0
                                                   >> 1378         srl             %o0, 0x1c, %o0
                                                   >> 1379         nop
                                                   >> 1380         .previous
                                                   >> 1381         retl
                                                   >> 1382          nop
                                                   >> 1383 ENDPROC(hard_smp_processor_id)
1084 #endif                                           1384 #endif
1085 SYM_CODE_END(__sdei_asm_handler)              << 
1086 NOKPROBE(__sdei_asm_handler)                  << 
1087                                                  1385 
1088 SYM_CODE_START(__sdei_handler_abort)          !! 1386 /* End of entry.S */
1089         mov_q   x0, SDEI_1_0_FN_SDEI_EVENT_CO << 
1090         adr     x1, 1f                        << 
1091         ldr_l   x2, sdei_exit_mode            << 
1092         sdei_handler_exit exit_mode=x2        << 
1093         // exit the handler and jump to the n << 
1094         // Exit will stomp x0-x17, PSTATE, EL << 
1095 1:      ret                                   << 
1096 SYM_CODE_END(__sdei_handler_abort)            << 
1097 NOKPROBE(__sdei_handler_abort)                << 
1098 #endif /* CONFIG_ARM_SDE_INTERFACE */         << 
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php