~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm64/kernel/entry.S (Version linux-6.12-rc7) and /arch/sparc/kernel/entry.S (Version linux-4.4.302)


  1 /* SPDX-License-Identifier: GPL-2.0-only */    !!   1 /* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
  2 /*                                             << 
  3  * Low-level exception handling code           << 
  4  *                                                  2  *
  5  * Copyright (C) 2012 ARM Ltd.                 !!   3  * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
  6  * Authors:     Catalin Marinas <catalin.marina !!   4  * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
  7  *              Will Deacon <will.deacon@arm.co !!   5  * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
                                                   >>   6  * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
                                                   >>   7  * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
  8  */                                                 8  */
  9                                                     9 
 10 #include <linux/arm-smccc.h>                   << 
 11 #include <linux/init.h>                        << 
 12 #include <linux/linkage.h>                         10 #include <linux/linkage.h>
                                                   >>  11 #include <linux/errno.h>
 13                                                    12 
 14 #include <asm/alternative.h>                   !!  13 #include <asm/head.h>
 15 #include <asm/assembler.h>                     !!  14 #include <asm/asi.h>
 16 #include <asm/asm-offsets.h>                   !!  15 #include <asm/smp.h>
 17 #include <asm/asm_pointer_auth.h>              !!  16 #include <asm/contregs.h>
 18 #include <asm/bug.h>                           << 
 19 #include <asm/cpufeature.h>                    << 
 20 #include <asm/errno.h>                         << 
 21 #include <asm/esr.h>                           << 
 22 #include <asm/irq.h>                           << 
 23 #include <asm/memory.h>                        << 
 24 #include <asm/mmu.h>                           << 
 25 #include <asm/processor.h>                     << 
 26 #include <asm/ptrace.h>                            17 #include <asm/ptrace.h>
 27 #include <asm/scs.h>                           !!  18 #include <asm/asm-offsets.h>
                                                   >>  19 #include <asm/psr.h>
                                                   >>  20 #include <asm/vaddrs.h>
                                                   >>  21 #include <asm/page.h>
                                                   >>  22 #include <asm/pgtable.h>
                                                   >>  23 #include <asm/winmacro.h>
                                                   >>  24 #include <asm/signal.h>
                                                   >>  25 #include <asm/obio.h>
                                                   >>  26 #include <asm/mxcc.h>
 28 #include <asm/thread_info.h>                       27 #include <asm/thread_info.h>
 29 #include <asm/asm-uaccess.h>                   !!  28 #include <asm/param.h>
 30 #include <asm/unistd.h>                            29 #include <asm/unistd.h>
 31                                                    30 
 32         .macro  clear_gp_regs                  !!  31 #include <asm/asmmacro.h>
 33         .irp    n,0,1,2,3,4,5,6,7,8,9,10,11,12 << 
 34         mov     x\n, xzr                       << 
 35         .endr                                  << 
 36         .endm                                  << 
 37                                                << 
 38         .macro kernel_ventry, el:req, ht:req,  << 
 39         .align 7                               << 
 40 .Lventry_start\@:                              << 
 41         .if     \el == 0                       << 
 42         /*                                     << 
 43          * This must be the first instruction  << 
 44          * skipped by the trampoline vectors,  << 
 45          */                                    << 
 46         b       .Lskip_tramp_vectors_cleanup\@ << 
 47         .if     \regsize == 64                 << 
 48         mrs     x30, tpidrro_el0               << 
 49         msr     tpidrro_el0, xzr               << 
 50         .else                                  << 
 51         mov     x30, xzr                       << 
 52         .endif                                 << 
 53 .Lskip_tramp_vectors_cleanup\@:                << 
 54         .endif                                 << 
 55                                                    32 
 56         sub     sp, sp, #PT_REGS_SIZE          !!  33 #define curptr      g6
 57 #ifdef CONFIG_VMAP_STACK                       << 
 58         /*                                     << 
 59          * Test whether the SP has overflowed, << 
 60          * Task and IRQ stacks are aligned so  << 
 61          * should always be zero.              << 
 62          */                                    << 
 63         add     sp, sp, x0                     << 
 64         sub     x0, sp, x0                     << 
 65         tbnz    x0, #THREAD_SHIFT, 0f          << 
 66         sub     x0, sp, x0                     << 
 67         sub     sp, sp, x0                     << 
 68         b       el\el\ht\()_\regsize\()_\label << 
 69                                                    34 
 70 0:                                             !!  35 /* These are just handy. */
 71         /*                                     !!  36 #define _SV     save    %sp, -STACKFRAME_SZ, %sp
 72          * Either we've just detected an overf !!  37 #define _RS     restore 
 73          * while on the overflow stack. Either << 
 74          * userspace, and can clobber EL0 regi << 
 75          */                                    << 
 76                                                    38 
 77         /* Stash the original SP (minus PT_REG !!  39 #define FLUSH_ALL_KERNEL_WINDOWS \
 78         msr     tpidr_el0, x0                  !!  40         _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
                                                   >>  41         _RS; _RS; _RS; _RS; _RS; _RS; _RS;
 79                                                    42 
 80         /* Recover the original x0 value and s !!  43         .text
 81         sub     x0, sp, x0                     << 
 82         msr     tpidrro_el0, x0                << 
 83                                                    44 
 84         /* Switch to the overflow stack */     !!  45 #ifdef CONFIG_KGDB
 85         adr_this_cpu sp, overflow_stack + OVER !!  46         .align  4
                                                   >>  47         .globl          arch_kgdb_breakpoint
                                                   >>  48         .type           arch_kgdb_breakpoint,#function
                                                   >>  49 arch_kgdb_breakpoint:
                                                   >>  50         ta              0x7d
                                                   >>  51         retl
                                                   >>  52          nop
                                                   >>  53         .size           arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
                                                   >>  54 #endif
                                                   >>  55 
                                                   >>  56 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
                                                   >>  57         .align  4
                                                   >>  58         .globl  floppy_hardint
                                                   >>  59 floppy_hardint:
                                                   >>  60         /*
                                                   >>  61          * This code cannot touch registers %l0 %l1 and %l2
                                                   >>  62          * because SAVE_ALL depends on their values. It depends
                                                   >>  63          * on %l3 also, but we regenerate it before a call.
                                                   >>  64          * Other registers are:
                                                   >>  65          * %l3 -- base address of fdc registers
                                                   >>  66          * %l4 -- pdma_vaddr
                                                   >>  67          * %l5 -- scratch for ld/st address
                                                   >>  68          * %l6 -- pdma_size
                                                   >>  69          * %l7 -- scratch [floppy byte, ld/st address, aux. data]
                                                   >>  70          */
                                                   >>  71 
                                                   >>  72         /* Do we have work to do? */
                                                   >>  73         sethi   %hi(doing_pdma), %l7
                                                   >>  74         ld      [%l7 + %lo(doing_pdma)], %l7
                                                   >>  75         cmp     %l7, 0
                                                   >>  76         be      floppy_dosoftint
                                                   >>  77          nop
                                                   >>  78 
                                                   >>  79         /* Load fdc register base */
                                                   >>  80         sethi   %hi(fdc_status), %l3
                                                   >>  81         ld      [%l3 + %lo(fdc_status)], %l3
                                                   >>  82 
                                                   >>  83         /* Setup register addresses */
                                                   >>  84         sethi   %hi(pdma_vaddr), %l5    ! transfer buffer
                                                   >>  85         ld      [%l5 + %lo(pdma_vaddr)], %l4
                                                   >>  86         sethi   %hi(pdma_size), %l5     ! bytes to go
                                                   >>  87         ld      [%l5 + %lo(pdma_size)], %l6
                                                   >>  88 next_byte:
                                                   >>  89         ldub    [%l3], %l7
                                                   >>  90 
                                                   >>  91         andcc   %l7, 0x80, %g0          ! Does fifo still have data
                                                   >>  92         bz      floppy_fifo_emptied     ! fifo has been emptied...
                                                   >>  93          andcc  %l7, 0x20, %g0          ! in non-dma mode still?
                                                   >>  94         bz      floppy_overrun          ! nope, overrun
                                                   >>  95          andcc  %l7, 0x40, %g0          ! 0=write 1=read
                                                   >>  96         bz      floppy_write
                                                   >>  97          sub    %l6, 0x1, %l6
                                                   >>  98 
                                                   >>  99         /* Ok, actually read this byte */
                                                   >> 100         ldub    [%l3 + 1], %l7
                                                   >> 101         orcc    %g0, %l6, %g0
                                                   >> 102         stb     %l7, [%l4]
                                                   >> 103         bne     next_byte
                                                   >> 104          add    %l4, 0x1, %l4
                                                   >> 105 
                                                   >> 106         b       floppy_tdone
                                                   >> 107          nop
                                                   >> 108 
                                                   >> 109 floppy_write:
                                                   >> 110         /* Ok, actually write this byte */
                                                   >> 111         ldub    [%l4], %l7
                                                   >> 112         orcc    %g0, %l6, %g0
                                                   >> 113         stb     %l7, [%l3 + 1]
                                                   >> 114         bne     next_byte
                                                   >> 115          add    %l4, 0x1, %l4
                                                   >> 116 
                                                   >> 117         /* fall through... */
                                                   >> 118 floppy_tdone:
                                                   >> 119         sethi   %hi(pdma_vaddr), %l5
                                                   >> 120         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 121         sethi   %hi(pdma_size), %l5
                                                   >> 122         st      %l6, [%l5 + %lo(pdma_size)]
                                                   >> 123         /* Flip terminal count pin */
                                                   >> 124         set     auxio_register, %l7
                                                   >> 125         ld      [%l7], %l7
                                                   >> 126 
                                                   >> 127         ldub    [%l7], %l5
                                                   >> 128 
                                                   >> 129         or      %l5, 0xc2, %l5
                                                   >> 130         stb     %l5, [%l7]
                                                   >> 131         andn    %l5, 0x02, %l5
 86                                                   132 
 87         /*                                     !! 133 2:
 88          * Check whether we were already on th !! 134         /* Kill some time so the bits set */
 89          * after panic() re-enables interrupts !! 135         WRITE_PAUSE
 90          */                                    !! 136         WRITE_PAUSE
 91         mrs     x0, tpidr_el0                  !! 137 
 92         sub     x0, sp, x0                     !! 138         stb     %l5, [%l7]
 93         tst     x0, #~(OVERFLOW_STACK_SIZE - 1 !! 139 
 94         b.ne    __bad_stack                    !! 140         /* Prevent recursion */
 95                                                !! 141         sethi   %hi(doing_pdma), %l7
 96         /* We were already on the overflow sta !! 142         b       floppy_dosoftint
 97         sub     sp, sp, x0                     !! 143          st     %g0, [%l7 + %lo(doing_pdma)]
 98         mrs     x0, tpidrro_el0                !! 144 
 99 #endif                                         !! 145         /* We emptied the FIFO, but we haven't read everything
100         b       el\el\ht\()_\regsize\()_\label !! 146          * as of yet.  Store the current transfer address and
101 .org .Lventry_start\@ + 128     // Did we over !! 147          * bytes left to read so we can continue when the next
102         .endm                                  !! 148          * fast IRQ comes in.
103                                                !! 149          */
104         .macro  tramp_alias, dst, sym          !! 150 floppy_fifo_emptied:
105         .set    .Lalias\@, TRAMP_VALIAS + \sym !! 151         sethi   %hi(pdma_vaddr), %l5
106         movz    \dst, :abs_g2_s:.Lalias\@      !! 152         st      %l4, [%l5 + %lo(pdma_vaddr)]
107         movk    \dst, :abs_g1_nc:.Lalias\@     !! 153         sethi   %hi(pdma_size), %l7
108         movk    \dst, :abs_g0_nc:.Lalias\@     !! 154         st      %l6, [%l7 + %lo(pdma_size)]
109         .endm                                  !! 155 
                                                   >> 156         /* Restore condition codes */
                                                   >> 157         wr      %l0, 0x0, %psr
                                                   >> 158         WRITE_PAUSE
                                                   >> 159 
                                                   >> 160         jmp     %l1
                                                   >> 161         rett    %l2
                                                   >> 162 
                                                   >> 163 floppy_overrun:
                                                   >> 164         sethi   %hi(pdma_vaddr), %l5
                                                   >> 165         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 166         sethi   %hi(pdma_size), %l5
                                                   >> 167         st      %l6, [%l5 + %lo(pdma_size)]
                                                   >> 168         /* Prevent recursion */
                                                   >> 169         sethi   %hi(doing_pdma), %l7
                                                   >> 170         st      %g0, [%l7 + %lo(doing_pdma)]
                                                   >> 171 
                                                   >> 172         /* fall through... */
                                                   >> 173 floppy_dosoftint:
                                                   >> 174         rd      %wim, %l3
                                                   >> 175         SAVE_ALL
                                                   >> 176 
                                                   >> 177         /* Set all IRQs off. */
                                                   >> 178         or      %l0, PSR_PIL, %l4
                                                   >> 179         wr      %l4, 0x0, %psr
                                                   >> 180         WRITE_PAUSE
                                                   >> 181         wr      %l4, PSR_ET, %psr
                                                   >> 182         WRITE_PAUSE
                                                   >> 183 
                                                   >> 184         mov     11, %o0                 ! floppy irq level (unused anyway)
                                                   >> 185         mov     %g0, %o1                ! devid is not used in fast interrupts
                                                   >> 186         call    sparc_floppy_irq
                                                   >> 187          add    %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
                                                   >> 188 
                                                   >> 189         RESTORE_ALL
                                                   >> 190         
                                                   >> 191 #endif /* (CONFIG_BLK_DEV_FD) */
                                                   >> 192 
                                                   >> 193         /* Bad trap handler */
                                                   >> 194         .globl  bad_trap_handler
                                                   >> 195 bad_trap_handler:
                                                   >> 196         SAVE_ALL
                                                   >> 197 
                                                   >> 198         wr      %l0, PSR_ET, %psr
                                                   >> 199         WRITE_PAUSE
                                                   >> 200 
                                                   >> 201         add     %sp, STACKFRAME_SZ, %o0 ! pt_regs
                                                   >> 202         call    do_hw_interrupt
                                                   >> 203          mov    %l7, %o1                ! trap number
                                                   >> 204 
                                                   >> 205         RESTORE_ALL
                                                   >> 206         
                                                   >> 207 /* For now all IRQ's not registered get sent here. handler_irq() will
                                                   >> 208  * see if a routine is registered to handle this interrupt and if not
                                                   >> 209  * it will say so on the console.
                                                   >> 210  */
110                                                   211 
111         /*                                     !! 212         .align  4
112          * This macro corrupts x0-x3. It is th !! 213         .globl  real_irq_entry, patch_handler_irq
113          * them if required.                   !! 214 real_irq_entry:
114          */                                    !! 215         SAVE_ALL
115         .macro  apply_ssbd, state, tmp1, tmp2  !! 216 
116 alternative_cb  ARM64_ALWAYS_SYSTEM, spectre_v !! 217 #ifdef CONFIG_SMP
117         b       .L__asm_ssbd_skip\@            !! 218         .globl  patchme_maybe_smp_msg
118 alternative_cb_end                             !! 219 
119         ldr_this_cpu    \tmp2, arm64_ssbd_call !! 220         cmp     %l7, 11
120         cbz     \tmp2,  .L__asm_ssbd_skip\@    !! 221 patchme_maybe_smp_msg:
121         ldr     \tmp2, [tsk, #TSK_TI_FLAGS]    !! 222         bgu     maybe_smp4m_msg
122         tbnz    \tmp2, #TIF_SSBD, .L__asm_ssbd !! 223          nop
123         mov     w0, #ARM_SMCCC_ARCH_WORKAROUND !! 224 #endif
124         mov     w1, #\state                    !! 225 
125 alternative_cb  ARM64_ALWAYS_SYSTEM, smccc_pat !! 226 real_irq_continue:
126         nop                                    !! 227         or      %l0, PSR_PIL, %g2
127 alternative_cb_end                             !! 228         wr      %g2, 0x0, %psr
128 .L__asm_ssbd_skip\@:                           !! 229         WRITE_PAUSE
129         .endm                                  !! 230         wr      %g2, PSR_ET, %psr
130                                                !! 231         WRITE_PAUSE
131         /* Check for MTE asynchronous tag chec !! 232         mov     %l7, %o0                ! irq level
132         .macro check_mte_async_tcf, tmp, ti_fl !! 233 patch_handler_irq:
133 #ifdef CONFIG_ARM64_MTE                        !! 234         call    handler_irq
134         .arch_extension lse                    !! 235          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
135 alternative_if_not ARM64_MTE                   !! 236         or      %l0, PSR_PIL, %g2       ! restore PIL after handler_irq
136         b       1f                             !! 237         wr      %g2, PSR_ET, %psr       ! keep ET up
137 alternative_else_nop_endif                     !! 238         WRITE_PAUSE
138         /*                                     !! 239 
139          * Asynchronous tag check faults are o !! 240         RESTORE_ALL
140          * ASYM (3) modes. In each of these mo !! 241 
141          * set, so skip the check if it is uns !! 242 #ifdef CONFIG_SMP
                                                   >> 243         /* SMP per-cpu ticker interrupts are handled specially. */
                                                   >> 244 smp4m_ticker:
                                                   >> 245         bne     real_irq_continue+4
                                                   >> 246          or     %l0, PSR_PIL, %g2
                                                   >> 247         wr      %g2, 0x0, %psr
                                                   >> 248         WRITE_PAUSE
                                                   >> 249         wr      %g2, PSR_ET, %psr
                                                   >> 250         WRITE_PAUSE
                                                   >> 251         call    smp4m_percpu_timer_interrupt
                                                   >> 252          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 253         wr      %l0, PSR_ET, %psr
                                                   >> 254         WRITE_PAUSE
                                                   >> 255         RESTORE_ALL
                                                   >> 256 
                                                   >> 257 #define GET_PROCESSOR4M_ID(reg) \
                                                   >> 258         rd      %tbr, %reg;     \
                                                   >> 259         srl     %reg, 12, %reg; \
                                                   >> 260         and     %reg, 3, %reg;
                                                   >> 261 
                                                   >> 262         /* Here is where we check for possible SMP IPI passed to us
                                                   >> 263          * on some level other than 15 which is the NMI and only used
                                                   >> 264          * for cross calls.  That has a separate entry point below.
                                                   >> 265          *
                                                   >> 266          * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
142          */                                       267          */
143         tbz     \thread_sctlr, #(SCTLR_EL1_TCF !! 268 maybe_smp4m_msg:
144         mrs_s   \tmp, SYS_TFSRE0_EL1           !! 269         GET_PROCESSOR4M_ID(o3)
145         tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, !! 270         sethi   %hi(sun4m_irq_percpu), %l5
146         /* Asynchronous TCF occurred for TTBR0 !! 271         sll     %o3, 2, %o3
147         mov     \tmp, #_TIF_MTE_ASYNC_FAULT    !! 272         or      %l5, %lo(sun4m_irq_percpu), %o5
148         add     \ti_flags, tsk, #TSK_TI_FLAGS  !! 273         sethi   %hi(0x70000000), %o2    ! Check all soft-IRQs
149         stset   \tmp, [\ti_flags]              !! 274         ld      [%o5 + %o3], %o1
150 1:                                             !! 275         ld      [%o1 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
151 #endif                                         !! 276         andcc   %o3, %o2, %g0
152         .endm                                  !! 277         be,a    smp4m_ticker
                                                   >> 278          cmp    %l7, 14
                                                   >> 279         /* Soft-IRQ IPI */
                                                   >> 280         st      %o2, [%o1 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x70000000
                                                   >> 281         WRITE_PAUSE
                                                   >> 282         ld      [%o1 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 283         WRITE_PAUSE
                                                   >> 284         or      %l0, PSR_PIL, %l4
                                                   >> 285         wr      %l4, 0x0, %psr
                                                   >> 286         WRITE_PAUSE
                                                   >> 287         wr      %l4, PSR_ET, %psr
                                                   >> 288         WRITE_PAUSE
                                                   >> 289         srl     %o3, 28, %o2            ! shift for simpler checks below
                                                   >> 290 maybe_smp4m_msg_check_single:
                                                   >> 291         andcc   %o2, 0x1, %g0
                                                   >> 292         beq,a   maybe_smp4m_msg_check_mask
                                                   >> 293          andcc  %o2, 0x2, %g0
                                                   >> 294         call    smp_call_function_single_interrupt
                                                   >> 295          nop
                                                   >> 296         andcc   %o2, 0x2, %g0
                                                   >> 297 maybe_smp4m_msg_check_mask:
                                                   >> 298         beq,a   maybe_smp4m_msg_check_resched
                                                   >> 299          andcc  %o2, 0x4, %g0
                                                   >> 300         call    smp_call_function_interrupt
                                                   >> 301          nop
                                                   >> 302         andcc   %o2, 0x4, %g0
                                                   >> 303 maybe_smp4m_msg_check_resched:
                                                   >> 304         /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
                                                   >> 305         beq,a   maybe_smp4m_msg_out
                                                   >> 306          nop
                                                   >> 307         call    smp_resched_interrupt
                                                   >> 308          nop
                                                   >> 309 maybe_smp4m_msg_out:
                                                   >> 310         RESTORE_ALL
                                                   >> 311 
                                                   >> 312         .align  4
                                                   >> 313         .globl  linux_trap_ipi15_sun4m
                                                   >> 314 linux_trap_ipi15_sun4m:
                                                   >> 315         SAVE_ALL
                                                   >> 316         sethi   %hi(0x80000000), %o2
                                                   >> 317         GET_PROCESSOR4M_ID(o0)
                                                   >> 318         sethi   %hi(sun4m_irq_percpu), %l5
                                                   >> 319         or      %l5, %lo(sun4m_irq_percpu), %o5
                                                   >> 320         sll     %o0, 2, %o0
                                                   >> 321         ld      [%o5 + %o0], %o5
                                                   >> 322         ld      [%o5 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 323         andcc   %o3, %o2, %g0
                                                   >> 324         be      sun4m_nmi_error         ! Must be an NMI async memory error
                                                   >> 325          st     %o2, [%o5 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x80000000
                                                   >> 326         WRITE_PAUSE
                                                   >> 327         ld      [%o5 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 328         WRITE_PAUSE
                                                   >> 329         or      %l0, PSR_PIL, %l4
                                                   >> 330         wr      %l4, 0x0, %psr
                                                   >> 331         WRITE_PAUSE
                                                   >> 332         wr      %l4, PSR_ET, %psr
                                                   >> 333         WRITE_PAUSE
                                                   >> 334         call    smp4m_cross_call_irq
                                                   >> 335          nop
                                                   >> 336         b       ret_trap_lockless_ipi
                                                   >> 337          clr    %l6
                                                   >> 338 
                                                   >> 339         .globl  smp4d_ticker
                                                   >> 340         /* SMP per-cpu ticker interrupts are handled specially. */
                                                   >> 341 smp4d_ticker:
                                                   >> 342         SAVE_ALL
                                                   >> 343         or      %l0, PSR_PIL, %g2
                                                   >> 344         sethi   %hi(CC_ICLR), %o0
                                                   >> 345         sethi   %hi(1 << 14), %o1
                                                   >> 346         or      %o0, %lo(CC_ICLR), %o0
                                                   >> 347         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 14 in MXCC's ICLR */
                                                   >> 348         wr      %g2, 0x0, %psr
                                                   >> 349         WRITE_PAUSE
                                                   >> 350         wr      %g2, PSR_ET, %psr
                                                   >> 351         WRITE_PAUSE
                                                   >> 352         call    smp4d_percpu_timer_interrupt
                                                   >> 353          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 354         wr      %l0, PSR_ET, %psr
                                                   >> 355         WRITE_PAUSE
                                                   >> 356         RESTORE_ALL
                                                   >> 357 
                                                   >> 358         .align  4
                                                   >> 359         .globl  linux_trap_ipi15_sun4d
                                                   >> 360 linux_trap_ipi15_sun4d:
                                                   >> 361         SAVE_ALL
                                                   >> 362         sethi   %hi(CC_BASE), %o4
                                                   >> 363         sethi   %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
                                                   >> 364         or      %o4, (CC_EREG - CC_BASE), %o0
                                                   >> 365         ldda    [%o0] ASI_M_MXCC, %o0
                                                   >> 366         andcc   %o0, %o2, %g0
                                                   >> 367         bne     1f
                                                   >> 368          sethi  %hi(BB_STAT2), %o2
                                                   >> 369         lduba   [%o2] ASI_M_CTL, %o2
                                                   >> 370         andcc   %o2, BB_STAT2_MASK, %g0
                                                   >> 371         bne     2f
                                                   >> 372          or     %o4, (CC_ICLR - CC_BASE), %o0
                                                   >> 373         sethi   %hi(1 << 15), %o1
                                                   >> 374         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 15 in MXCC's ICLR */
                                                   >> 375         or      %l0, PSR_PIL, %l4
                                                   >> 376         wr      %l4, 0x0, %psr
                                                   >> 377         WRITE_PAUSE
                                                   >> 378         wr      %l4, PSR_ET, %psr
                                                   >> 379         WRITE_PAUSE
                                                   >> 380         call    smp4d_cross_call_irq
                                                   >> 381          nop
                                                   >> 382         b       ret_trap_lockless_ipi
                                                   >> 383          clr    %l6
                                                   >> 384 
                                                   >> 385 1:      /* MXCC error */
                                                   >> 386 2:      /* BB error */
                                                   >> 387         /* Disable PIL 15 */
                                                   >> 388         set     CC_IMSK, %l4
                                                   >> 389         lduha   [%l4] ASI_M_MXCC, %l5
                                                   >> 390         sethi   %hi(1 << 15), %l7
                                                   >> 391         or      %l5, %l7, %l5
                                                   >> 392         stha    %l5, [%l4] ASI_M_MXCC
                                                   >> 393         /* FIXME */
                                                   >> 394 1:      b,a     1b
                                                   >> 395 
                                                   >> 396         .globl  smpleon_ipi
                                                   >> 397         .extern leon_ipi_interrupt
                                                   >> 398         /* SMP per-cpu IPI interrupts are handled specially. */
                                                   >> 399 smpleon_ipi:
                                                   >> 400         SAVE_ALL
                                                   >> 401         or      %l0, PSR_PIL, %g2
                                                   >> 402         wr      %g2, 0x0, %psr
                                                   >> 403         WRITE_PAUSE
                                                   >> 404         wr      %g2, PSR_ET, %psr
                                                   >> 405         WRITE_PAUSE
                                                   >> 406         call    leonsmp_ipi_interrupt
                                                   >> 407          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs
                                                   >> 408         wr      %l0, PSR_ET, %psr
                                                   >> 409         WRITE_PAUSE
                                                   >> 410         RESTORE_ALL
                                                   >> 411 
                                                   >> 412         .align  4
                                                   >> 413         .globl  linux_trap_ipi15_leon
                                                   >> 414 linux_trap_ipi15_leon:
                                                   >> 415         SAVE_ALL
                                                   >> 416         or      %l0, PSR_PIL, %l4
                                                   >> 417         wr      %l4, 0x0, %psr
                                                   >> 418         WRITE_PAUSE
                                                   >> 419         wr      %l4, PSR_ET, %psr
                                                   >> 420         WRITE_PAUSE
                                                   >> 421         call    leon_cross_call_irq
                                                   >> 422          nop
                                                   >> 423         b       ret_trap_lockless_ipi
                                                   >> 424          clr    %l6
                                                   >> 425 
                                                   >> 426 #endif /* CONFIG_SMP */
                                                   >> 427 
                                                   >> 428         /* This routine handles illegal instructions and privileged
                                                   >> 429          * instruction attempts from user code.
                                                   >> 430          */
                                                   >> 431         .align  4
                                                   >> 432         .globl  bad_instruction
                                                   >> 433 bad_instruction:
                                                   >> 434         sethi   %hi(0xc1f80000), %l4
                                                   >> 435         ld      [%l1], %l5
                                                   >> 436         sethi   %hi(0x81d80000), %l7
                                                   >> 437         and     %l5, %l4, %l5
                                                   >> 438         cmp     %l5, %l7
                                                   >> 439         be      1f
                                                   >> 440         SAVE_ALL
                                                   >> 441 
                                                   >> 442         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 443         WRITE_PAUSE
                                                   >> 444 
                                                   >> 445         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 446         mov     %l1, %o1
                                                   >> 447         mov     %l2, %o2
                                                   >> 448         call    do_illegal_instruction
                                                   >> 449          mov    %l0, %o3
                                                   >> 450 
                                                   >> 451         RESTORE_ALL
                                                   >> 452 
                                                   >> 453 1:      /* unimplemented flush - just skip */
                                                   >> 454         jmpl    %l2, %g0
                                                   >> 455          rett   %l2 + 4
                                                   >> 456 
                                                   >> 457         .align  4
                                                   >> 458         .globl  priv_instruction
                                                   >> 459 priv_instruction:
                                                   >> 460         SAVE_ALL
                                                   >> 461 
                                                   >> 462         wr      %l0, PSR_ET, %psr
                                                   >> 463         WRITE_PAUSE
                                                   >> 464 
                                                   >> 465         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 466         mov     %l1, %o1
                                                   >> 467         mov     %l2, %o2
                                                   >> 468         call    do_priv_instruction
                                                   >> 469          mov    %l0, %o3
                                                   >> 470 
                                                   >> 471         RESTORE_ALL
                                                   >> 472 
                                                   >> 473         /* This routine handles unaligned data accesses. */
                                                   >> 474         .align  4
                                                   >> 475         .globl  mna_handler
                                                   >> 476 mna_handler:
                                                   >> 477         andcc   %l0, PSR_PS, %g0
                                                   >> 478         be      mna_fromuser
                                                   >> 479          nop
                                                   >> 480 
                                                   >> 481         SAVE_ALL
                                                   >> 482 
                                                   >> 483         wr      %l0, PSR_ET, %psr
                                                   >> 484         WRITE_PAUSE
                                                   >> 485 
                                                   >> 486         ld      [%l1], %o1
                                                   >> 487         call    kernel_unaligned_trap
                                                   >> 488          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 489 
                                                   >> 490         RESTORE_ALL
                                                   >> 491 
                                                   >> 492 mna_fromuser:
                                                   >> 493         SAVE_ALL
                                                   >> 494 
                                                   >> 495         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 496         WRITE_PAUSE
                                                   >> 497 
                                                   >> 498         ld      [%l1], %o1
                                                   >> 499         call    user_unaligned_trap
                                                   >> 500          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 501 
                                                   >> 502         RESTORE_ALL
                                                   >> 503 
                                                   >> 504         /* This routine handles floating point disabled traps. */
                                                   >> 505         .align  4
                                                   >> 506         .globl  fpd_trap_handler
                                                   >> 507 fpd_trap_handler:
                                                   >> 508         SAVE_ALL
                                                   >> 509 
                                                   >> 510         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 511         WRITE_PAUSE
                                                   >> 512 
                                                   >> 513         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 514         mov     %l1, %o1
                                                   >> 515         mov     %l2, %o2
                                                   >> 516         call    do_fpd_trap
                                                   >> 517          mov    %l0, %o3
                                                   >> 518 
                                                   >> 519         RESTORE_ALL
                                                   >> 520 
                                                   >> 521         /* This routine handles Floating Point Exceptions. */
                                                   >> 522         .align  4
                                                   >> 523         .globl  fpe_trap_handler
                                                   >> 524 fpe_trap_handler:
                                                   >> 525         set     fpsave_magic, %l5
                                                   >> 526         cmp     %l1, %l5
                                                   >> 527         be      1f
                                                   >> 528          sethi  %hi(fpsave), %l5
                                                   >> 529         or      %l5, %lo(fpsave), %l5
                                                   >> 530         cmp     %l1, %l5
                                                   >> 531         bne     2f
                                                   >> 532          sethi  %hi(fpsave_catch2), %l5
                                                   >> 533         or      %l5, %lo(fpsave_catch2), %l5
                                                   >> 534         wr      %l0, 0x0, %psr
                                                   >> 535         WRITE_PAUSE
                                                   >> 536         jmp     %l5
                                                   >> 537          rett   %l5 + 4
                                                   >> 538 1:      
                                                   >> 539         sethi   %hi(fpsave_catch), %l5
                                                   >> 540         or      %l5, %lo(fpsave_catch), %l5
                                                   >> 541         wr      %l0, 0x0, %psr
                                                   >> 542         WRITE_PAUSE
                                                   >> 543         jmp     %l5
                                                   >> 544          rett   %l5 + 4
153                                                   545 
154         /* Clear the MTE asynchronous tag chec !! 546 2:
155         .macro clear_mte_async_tcf thread_sctl !! 547         SAVE_ALL
156 #ifdef CONFIG_ARM64_MTE                        << 
157 alternative_if ARM64_MTE                       << 
158         /* See comment in check_mte_async_tcf  << 
159         tbz     \thread_sctlr, #(SCTLR_EL1_TCF << 
160         dsb     ish                            << 
161         msr_s   SYS_TFSRE0_EL1, xzr            << 
162 1:                                             << 
163 alternative_else_nop_endif                     << 
164 #endif                                         << 
165         .endm                                  << 
166                                                   548 
167         .macro mte_set_gcr, mte_ctrl, tmp      !! 549         wr      %l0, PSR_ET, %psr               ! re-enable traps
168 #ifdef CONFIG_ARM64_MTE                        !! 550         WRITE_PAUSE
169         ubfx    \tmp, \mte_ctrl, #MTE_CTRL_GCR << 
170         orr     \tmp, \tmp, #SYS_GCR_EL1_RRND  << 
171         msr_s   SYS_GCR_EL1, \tmp              << 
172 #endif                                         << 
173         .endm                                  << 
174                                                   551 
175         .macro mte_set_kernel_gcr, tmp, tmp2   !! 552         add     %sp, STACKFRAME_SZ, %o0
176 #ifdef CONFIG_KASAN_HW_TAGS                    !! 553         mov     %l1, %o1
177 alternative_cb  ARM64_ALWAYS_SYSTEM, kasan_hw_ !! 554         mov     %l2, %o2
178         b       1f                             !! 555         call    do_fpe_trap
179 alternative_cb_end                             !! 556          mov    %l0, %o3
180         mov     \tmp, KERNEL_GCR_EL1           !! 557 
181         msr_s   SYS_GCR_EL1, \tmp              !! 558         RESTORE_ALL
182 1:                                             !! 559 
183 #endif                                         !! 560         /* This routine handles Tag Overflow Exceptions. */
184         .endm                                  !! 561         .align  4
                                                   >> 562         .globl  do_tag_overflow
                                                   >> 563 do_tag_overflow:
                                                   >> 564         SAVE_ALL
                                                   >> 565 
                                                   >> 566         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 567         WRITE_PAUSE
                                                   >> 568 
                                                   >> 569         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 570         mov     %l1, %o1
                                                   >> 571         mov     %l2, %o2
                                                   >> 572         call    handle_tag_overflow
                                                   >> 573          mov    %l0, %o3
                                                   >> 574 
                                                   >> 575         RESTORE_ALL
                                                   >> 576 
                                                   >> 577         /* This routine handles Watchpoint Exceptions. */
                                                   >> 578         .align  4
                                                   >> 579         .globl  do_watchpoint
                                                   >> 580 do_watchpoint:
                                                   >> 581         SAVE_ALL
                                                   >> 582 
                                                   >> 583         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 584         WRITE_PAUSE
                                                   >> 585 
                                                   >> 586         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 587         mov     %l1, %o1
                                                   >> 588         mov     %l2, %o2
                                                   >> 589         call    handle_watchpoint
                                                   >> 590          mov    %l0, %o3
                                                   >> 591 
                                                   >> 592         RESTORE_ALL
                                                   >> 593 
                                                   >> 594         /* This routine handles Register Access Exceptions. */
                                                   >> 595         .align  4
                                                   >> 596         .globl  do_reg_access
                                                   >> 597 do_reg_access:
                                                   >> 598         SAVE_ALL
                                                   >> 599 
                                                   >> 600         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 601         WRITE_PAUSE
                                                   >> 602 
                                                   >> 603         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 604         mov     %l1, %o1
                                                   >> 605         mov     %l2, %o2
                                                   >> 606         call    handle_reg_access
                                                   >> 607          mov    %l0, %o3
                                                   >> 608 
                                                   >> 609         RESTORE_ALL
                                                   >> 610 
                                                   >> 611         /* This routine handles Co-Processor Disabled Exceptions. */
                                                   >> 612         .align  4
                                                   >> 613         .globl  do_cp_disabled
                                                   >> 614 do_cp_disabled:
                                                   >> 615         SAVE_ALL
                                                   >> 616 
                                                   >> 617         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 618         WRITE_PAUSE
                                                   >> 619 
                                                   >> 620         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 621         mov     %l1, %o1
                                                   >> 622         mov     %l2, %o2
                                                   >> 623         call    handle_cp_disabled
                                                   >> 624          mov    %l0, %o3
                                                   >> 625 
                                                   >> 626         RESTORE_ALL
                                                   >> 627 
                                                   >> 628         /* This routine handles Co-Processor Exceptions. */
                                                   >> 629         .align  4
                                                   >> 630         .globl  do_cp_exception
                                                   >> 631 do_cp_exception:
                                                   >> 632         SAVE_ALL
                                                   >> 633 
                                                   >> 634         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 635         WRITE_PAUSE
                                                   >> 636 
                                                   >> 637         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 638         mov     %l1, %o1
                                                   >> 639         mov     %l2, %o2
                                                   >> 640         call    handle_cp_exception
                                                   >> 641          mov    %l0, %o3
                                                   >> 642 
                                                   >> 643         RESTORE_ALL
                                                   >> 644 
                                                   >> 645         /* This routine handles Hardware Divide By Zero Exceptions. */
                                                   >> 646         .align  4
                                                   >> 647         .globl  do_hw_divzero
                                                   >> 648 do_hw_divzero:
                                                   >> 649         SAVE_ALL
                                                   >> 650 
                                                   >> 651         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 652         WRITE_PAUSE
                                                   >> 653 
                                                   >> 654         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 655         mov     %l1, %o1
                                                   >> 656         mov     %l2, %o2
                                                   >> 657         call    handle_hw_divzero
                                                   >> 658          mov    %l0, %o3
                                                   >> 659 
                                                   >> 660         RESTORE_ALL
                                                   >> 661 
                                                   >> 662         .align  4
                                                   >> 663         .globl  do_flush_windows
                                                   >> 664 do_flush_windows:
                                                   >> 665         SAVE_ALL
                                                   >> 666 
                                                   >> 667         wr      %l0, PSR_ET, %psr
                                                   >> 668         WRITE_PAUSE
                                                   >> 669 
                                                   >> 670         andcc   %l0, PSR_PS, %g0
                                                   >> 671         bne     dfw_kernel
                                                   >> 672          nop
                                                   >> 673 
                                                   >> 674         call    flush_user_windows
                                                   >> 675          nop
                                                   >> 676 
                                                   >> 677         /* Advance over the trap instruction. */
                                                   >> 678         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 679         add     %l1, 0x4, %l2
                                                   >> 680         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 681         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 682 
                                                   >> 683         RESTORE_ALL
                                                   >> 684 
                                                   >> 685         .globl  flush_patch_one
                                                   >> 686 
                                                   >> 687         /* We get these for debugging routines using __builtin_return_address() */
                                                   >> 688 dfw_kernel:
                                                   >> 689 flush_patch_one:
                                                   >> 690         FLUSH_ALL_KERNEL_WINDOWS
                                                   >> 691 
                                                   >> 692         /* Advance over the trap instruction. */
                                                   >> 693         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 694         add     %l1, 0x4, %l2
                                                   >> 695         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 696         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 697 
                                                   >> 698         RESTORE_ALL
                                                   >> 699 
                                                   >> 700         /* The getcc software trap.  The user wants the condition codes from
                                                   >> 701          * the %psr in register %g1.
                                                   >> 702          */
                                                   >> 703 
                                                   >> 704         .align  4
                                                   >> 705         .globl  getcc_trap_handler
                                                   >> 706 getcc_trap_handler:
                                                   >> 707         srl     %l0, 20, %g1    ! give user
                                                   >> 708         and     %g1, 0xf, %g1   ! only ICC bits in %psr
                                                   >> 709         jmp     %l2             ! advance over trap instruction
                                                   >> 710         rett    %l2 + 0x4       ! like this...
                                                   >> 711 
                                                   >> 712         /* The setcc software trap.  The user has condition codes in %g1
                                                   >> 713          * that it would like placed in the %psr.  Be careful not to flip
                                                   >> 714          * any unintentional bits!
                                                   >> 715          */
                                                   >> 716 
                                                   >> 717         .align  4
                                                   >> 718         .globl  setcc_trap_handler
                                                   >> 719 setcc_trap_handler:
                                                   >> 720         sll     %g1, 0x14, %l4
                                                   >> 721         set     PSR_ICC, %l5
                                                   >> 722         andn    %l0, %l5, %l0   ! clear ICC bits in %psr
                                                   >> 723         and     %l4, %l5, %l4   ! clear non-ICC bits in user value
                                                   >> 724         or      %l4, %l0, %l4   ! or them in... mix mix mix
                                                   >> 725 
                                                   >> 726         wr      %l4, 0x0, %psr  ! set new %psr
                                                   >> 727         WRITE_PAUSE             ! TI scumbags...
                                                   >> 728 
                                                   >> 729         jmp     %l2             ! advance over trap instruction
                                                   >> 730         rett    %l2 + 0x4       ! like this...
                                                   >> 731 
                                                   >> 732 sun4m_nmi_error:
                                                   >> 733         /* NMI async memory error handling. */
                                                   >> 734         sethi   %hi(0x80000000), %l4
                                                   >> 735         sethi   %hi(sun4m_irq_global), %o5
                                                   >> 736         ld      [%o5 + %lo(sun4m_irq_global)], %l5
                                                   >> 737         st      %l4, [%l5 + 0x0c]       ! sun4m_irq_global->mask_set=0x80000000
                                                   >> 738         WRITE_PAUSE
                                                   >> 739         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
                                                   >> 740         WRITE_PAUSE
                                                   >> 741         or      %l0, PSR_PIL, %l4
                                                   >> 742         wr      %l4, 0x0, %psr
                                                   >> 743         WRITE_PAUSE
                                                   >> 744         wr      %l4, PSR_ET, %psr
                                                   >> 745         WRITE_PAUSE
                                                   >> 746         call    sun4m_nmi
                                                   >> 747          nop
                                                   >> 748         st      %l4, [%l5 + 0x08]       ! sun4m_irq_global->mask_clear=0x80000000
                                                   >> 749         WRITE_PAUSE
                                                   >> 750         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
                                                   >> 751         WRITE_PAUSE
                                                   >> 752         RESTORE_ALL
                                                   >> 753 
                                                   >> 754 #ifndef CONFIG_SMP
                                                   >> 755         .align  4
                                                   >> 756         .globl  linux_trap_ipi15_sun4m
                                                   >> 757 linux_trap_ipi15_sun4m:
                                                   >> 758         SAVE_ALL
                                                   >> 759 
                                                   >> 760         ba      sun4m_nmi_error
                                                   >> 761          nop
                                                   >> 762 #endif /* CONFIG_SMP */
                                                   >> 763 
                                                   >> 764         .align  4
                                                   >> 765         .globl  srmmu_fault
                                                   >> 766 srmmu_fault:
                                                   >> 767         mov     0x400, %l5
                                                   >> 768         mov     0x300, %l4
                                                   >> 769 
                                                   >> 770 LEON_PI(lda     [%l5] ASI_LEON_MMUREGS, %l6)    ! read sfar first
                                                   >> 771 SUN_PI_(lda     [%l5] ASI_M_MMUREGS, %l6)       ! read sfar first
                                                   >> 772 
                                                   >> 773 LEON_PI(lda     [%l4] ASI_LEON_MMUREGS, %l5)    ! read sfsr last
                                                   >> 774 SUN_PI_(lda     [%l4] ASI_M_MMUREGS, %l5)       ! read sfsr last
                                                   >> 775 
                                                   >> 776         andn    %l6, 0xfff, %l6
                                                   >> 777         srl     %l5, 6, %l5                     ! and encode all info into l7
                                                   >> 778 
                                                   >> 779         and     %l5, 2, %l5
                                                   >> 780         or      %l5, %l6, %l6
                                                   >> 781 
                                                   >> 782         or      %l6, %l7, %l7                   ! l7 = [addr,write,txtfault]
                                                   >> 783 
                                                   >> 784         SAVE_ALL
                                                   >> 785 
                                                   >> 786         mov     %l7, %o1
                                                   >> 787         mov     %l7, %o2
                                                   >> 788         and     %o1, 1, %o1             ! arg2 = text_faultp
                                                   >> 789         mov     %l7, %o3
                                                   >> 790         and     %o2, 2, %o2             ! arg3 = writep
                                                   >> 791         andn    %o3, 0xfff, %o3         ! arg4 = faulting address
                                                   >> 792 
                                                   >> 793         wr      %l0, PSR_ET, %psr
                                                   >> 794         WRITE_PAUSE
                                                   >> 795 
                                                   >> 796         call    do_sparc_fault
                                                   >> 797          add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
                                                   >> 798 
                                                   >> 799         RESTORE_ALL
                                                   >> 800 
                                                   >> 801         .align  4
                                                   >> 802         .globl  sys_nis_syscall
                                                   >> 803 sys_nis_syscall:
                                                   >> 804         mov     %o7, %l5
                                                   >> 805         add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
                                                   >> 806         call    c_sys_nis_syscall
                                                   >> 807          mov    %l5, %o7
                                                   >> 808 
                                                   >> 809 sunos_execv:
                                                   >> 810         .globl  sunos_execv
                                                   >> 811         b       sys_execve
                                                   >> 812          clr    %i2
                                                   >> 813 
                                                   >> 814         .align  4
                                                   >> 815         .globl  sys_sparc_pipe
                                                   >> 816 sys_sparc_pipe:
                                                   >> 817         mov     %o7, %l5
                                                   >> 818         add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
                                                   >> 819         call    sparc_pipe
                                                   >> 820          mov    %l5, %o7
                                                   >> 821 
                                                   >> 822         .align  4
                                                   >> 823         .globl  sys_sigstack
                                                   >> 824 sys_sigstack:
                                                   >> 825         mov     %o7, %l5
                                                   >> 826         mov     %fp, %o2
                                                   >> 827         call    do_sys_sigstack
                                                   >> 828          mov    %l5, %o7
                                                   >> 829 
                                                   >> 830         .align  4
                                                   >> 831         .globl  sys_sigreturn
                                                   >> 832 sys_sigreturn:
                                                   >> 833         call    do_sigreturn
                                                   >> 834          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 835 
                                                   >> 836         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 837         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 838         be      1f
                                                   >> 839          nop
185                                                   840 
186         .macro mte_set_user_gcr, tsk, tmp, tmp !! 841         call    syscall_trace
187 #ifdef CONFIG_KASAN_HW_TAGS                    !! 842          mov    1, %o1
188 alternative_cb  ARM64_ALWAYS_SYSTEM, kasan_hw_ << 
189         b       1f                             << 
190 alternative_cb_end                             << 
191         ldr     \tmp, [\tsk, #THREAD_MTE_CTRL] << 
192                                                   843 
193         mte_set_gcr \tmp, \tmp2                << 
194 1:                                                844 1:
195 #endif                                         !! 845         /* We don't want to muck with user registers like a
196         .endm                                  !! 846          * normal syscall, just return.
197                                                << 
198         .macro  kernel_entry, el, regsize = 64 << 
199         .if     \el == 0                       << 
200         alternative_insn nop, SET_PSTATE_DIT(1 << 
201         .endif                                 << 
202         .if     \regsize == 32                 << 
203         mov     w0, w0                         << 
204         .endif                                 << 
205         stp     x0, x1, [sp, #16 * 0]          << 
206         stp     x2, x3, [sp, #16 * 1]          << 
207         stp     x4, x5, [sp, #16 * 2]          << 
208         stp     x6, x7, [sp, #16 * 3]          << 
209         stp     x8, x9, [sp, #16 * 4]          << 
210         stp     x10, x11, [sp, #16 * 5]        << 
211         stp     x12, x13, [sp, #16 * 6]        << 
212         stp     x14, x15, [sp, #16 * 7]        << 
213         stp     x16, x17, [sp, #16 * 8]        << 
214         stp     x18, x19, [sp, #16 * 9]        << 
215         stp     x20, x21, [sp, #16 * 10]       << 
216         stp     x22, x23, [sp, #16 * 11]       << 
217         stp     x24, x25, [sp, #16 * 12]       << 
218         stp     x26, x27, [sp, #16 * 13]       << 
219         stp     x28, x29, [sp, #16 * 14]       << 
220                                                << 
221         .if     \el == 0                       << 
222         clear_gp_regs                          << 
223         mrs     x21, sp_el0                    << 
224         ldr_this_cpu    tsk, __entry_task, x20 << 
225         msr     sp_el0, tsk                    << 
226                                                << 
227         /*                                     << 
228          * Ensure MDSCR_EL1.SS is clear, since << 
229          * when scheduling.                    << 
230          */                                       847          */
231         ldr     x19, [tsk, #TSK_TI_FLAGS]      !! 848         RESTORE_ALL
232         disable_step_tsk x19, x20              << 
233                                                   849 
234         /* Check for asynchronous tag check fa !! 850         .align  4
235         ldr     x0, [tsk, THREAD_SCTLR_USER]   !! 851         .globl  sys_rt_sigreturn
236         check_mte_async_tcf x22, x23, x0       !! 852 sys_rt_sigreturn:
                                                   >> 853         call    do_rt_sigreturn
                                                   >> 854          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 855 
                                                   >> 856         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 857         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 858         be      1f
                                                   >> 859          nop
                                                   >> 860 
                                                   >> 861         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 862         call    syscall_trace
                                                   >> 863          mov    1, %o1
237                                                   864 
238 #ifdef CONFIG_ARM64_PTR_AUTH                   << 
239 alternative_if ARM64_HAS_ADDRESS_AUTH          << 
240         /*                                     << 
241          * Enable IA for in-kernel PAC if the  << 
242          * this could be implemented with an u << 
243          * a load, this was measured to be slo << 
244          *                                     << 
245          * Install the kernel IA key only if I << 
246          * was disabled on kernel exit then we << 
247          * installed so there is no need to in << 
248          */                                    << 
249         tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f   << 
250         __ptrauth_keys_install_kernel_nosync t << 
251         b       2f                             << 
252 1:                                                865 1:
253         mrs     x0, sctlr_el1                  !! 866         /* We are returning to a signal handler. */
254         orr     x0, x0, SCTLR_ELx_ENIA         !! 867         RESTORE_ALL
255         msr     sctlr_el1, x0                  << 
256 2:                                             << 
257 alternative_else_nop_endif                     << 
258 #endif                                         << 
259                                                   868 
260         apply_ssbd 1, x22, x23                 !! 869         /* Now that we have a real sys_clone, sys_fork() is
261                                                !! 870          * implemented in terms of it.  Our _real_ implementation
262         mte_set_kernel_gcr x22, x23            !! 871          * of SunOS vfork() will use sys_vfork().
263                                                !! 872          *
264         /*                                     !! 873          * XXX These three should be consolidated into mostly shared
265          * Any non-self-synchronizing system r !! 874          * XXX code just like on sparc64... -DaveM
266          * kernel entry should be placed befor << 
267          */                                       875          */
268 alternative_if ARM64_MTE                       !! 876         .align  4
269         isb                                    !! 877         .globl  sys_fork, flush_patch_two
270         b       1f                             !! 878 sys_fork:
271 alternative_else_nop_endif                     !! 879         mov     %o7, %l5
272 alternative_if ARM64_HAS_ADDRESS_AUTH          !! 880 flush_patch_two:
273         isb                                    !! 881         FLUSH_ALL_KERNEL_WINDOWS;
274 alternative_else_nop_endif                     !! 882         ld      [%curptr + TI_TASK], %o4
                                                   >> 883         rd      %psr, %g4
                                                   >> 884         WRITE_PAUSE
                                                   >> 885         mov     SIGCHLD, %o0                    ! arg0: clone flags
                                                   >> 886         rd      %wim, %g5
                                                   >> 887         WRITE_PAUSE
                                                   >> 888         mov     %fp, %o1                        ! arg1: usp
                                                   >> 889         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 890         add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
                                                   >> 891         mov     0, %o3
                                                   >> 892         call    sparc_do_fork
                                                   >> 893          mov    %l5, %o7
                                                   >> 894 
                                                   >> 895         /* Whee, kernel threads! */
                                                   >> 896         .globl  sys_clone, flush_patch_three
                                                   >> 897 sys_clone:
                                                   >> 898         mov     %o7, %l5
                                                   >> 899 flush_patch_three:
                                                   >> 900         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 901         ld      [%curptr + TI_TASK], %o4
                                                   >> 902         rd      %psr, %g4
                                                   >> 903         WRITE_PAUSE
                                                   >> 904 
                                                   >> 905         /* arg0,1: flags,usp  -- loaded already */
                                                   >> 906         cmp     %o1, 0x0                        ! Is new_usp NULL?
                                                   >> 907         rd      %wim, %g5
                                                   >> 908         WRITE_PAUSE
                                                   >> 909         be,a    1f
                                                   >> 910          mov    %fp, %o1                        ! yes, use callers usp
                                                   >> 911         andn    %o1, 7, %o1                     ! no, align to 8 bytes
275 1:                                                912 1:
                                                   >> 913         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 914         add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
                                                   >> 915         mov     0, %o3
                                                   >> 916         call    sparc_do_fork
                                                   >> 917          mov    %l5, %o7
                                                   >> 918 
                                                   >> 919         /* Whee, real vfork! */
                                                   >> 920         .globl  sys_vfork, flush_patch_four
                                                   >> 921 sys_vfork:
                                                   >> 922 flush_patch_four:
                                                   >> 923         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 924         ld      [%curptr + TI_TASK], %o4
                                                   >> 925         rd      %psr, %g4
                                                   >> 926         WRITE_PAUSE
                                                   >> 927         rd      %wim, %g5
                                                   >> 928         WRITE_PAUSE
                                                   >> 929         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 930         sethi   %hi(0x4000 | 0x0100 | SIGCHLD), %o0
                                                   >> 931         mov     %fp, %o1
                                                   >> 932         or      %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
                                                   >> 933         sethi   %hi(sparc_do_fork), %l1
                                                   >> 934         mov     0, %o3
                                                   >> 935         jmpl    %l1 + %lo(sparc_do_fork), %g0
                                                   >> 936          add    %sp, STACKFRAME_SZ, %o2
                                                   >> 937 
                                                   >> 938         .align  4
                                                   >> 939 linux_sparc_ni_syscall:
                                                   >> 940         sethi   %hi(sys_ni_syscall), %l7
                                                   >> 941         b       do_syscall
                                                   >> 942          or     %l7, %lo(sys_ni_syscall), %l7
                                                   >> 943 
                                                   >> 944 linux_syscall_trace:
                                                   >> 945         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 946         call    syscall_trace
                                                   >> 947          mov    0, %o1
                                                   >> 948         cmp     %o0, 0
                                                   >> 949         bne     3f
                                                   >> 950          mov    -ENOSYS, %o0
                                                   >> 951 
                                                   >> 952         /* Syscall tracing can modify the registers.  */
                                                   >> 953         ld      [%sp + STACKFRAME_SZ + PT_G1], %g1
                                                   >> 954         sethi   %hi(sys_call_table), %l7
                                                   >> 955         ld      [%sp + STACKFRAME_SZ + PT_I0], %i0
                                                   >> 956         or      %l7, %lo(sys_call_table), %l7
                                                   >> 957         ld      [%sp + STACKFRAME_SZ + PT_I1], %i1
                                                   >> 958         ld      [%sp + STACKFRAME_SZ + PT_I2], %i2
                                                   >> 959         ld      [%sp + STACKFRAME_SZ + PT_I3], %i3
                                                   >> 960         ld      [%sp + STACKFRAME_SZ + PT_I4], %i4
                                                   >> 961         ld      [%sp + STACKFRAME_SZ + PT_I5], %i5
                                                   >> 962         cmp     %g1, NR_syscalls
                                                   >> 963         bgeu    3f
                                                   >> 964          mov    -ENOSYS, %o0
                                                   >> 965 
                                                   >> 966         sll     %g1, 2, %l4
                                                   >> 967         mov     %i0, %o0
                                                   >> 968         ld      [%l7 + %l4], %l7
                                                   >> 969         mov     %i1, %o1
                                                   >> 970         mov     %i2, %o2
                                                   >> 971         mov     %i3, %o3
                                                   >> 972         b       2f
                                                   >> 973          mov    %i4, %o4
276                                                   974 
277         scs_load_current                       !! 975         .globl  ret_from_fork
278         .else                                  !! 976 ret_from_fork:
279         add     x21, sp, #PT_REGS_SIZE         !! 977         call    schedule_tail
280         get_current_task tsk                   !! 978          ld     [%g3 + TI_TASK], %o0
281         .endif /* \el == 0 */                  !! 979         b       ret_sys_call
282         mrs     x22, elr_el1                   !! 980          ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
283         mrs     x23, spsr_el1                  !! 981 
284         stp     lr, x21, [sp, #S_LR]           !! 982         .globl  ret_from_kernel_thread
285                                                !! 983 ret_from_kernel_thread:
286         /*                                     !! 984         call    schedule_tail
287          * For exceptions from EL0, create a f !! 985          ld     [%g3 + TI_TASK], %o0
288          * For exceptions from EL1, create a s !! 986         ld      [%sp + STACKFRAME_SZ + PT_G1], %l0
289          * interrupted code shows up in the ba !! 987         call    %l0
290          */                                    !! 988          ld     [%sp + STACKFRAME_SZ + PT_G2], %o0
291         .if \el == 0                           !! 989         rd      %psr, %l1
292         stp     xzr, xzr, [sp, #S_STACKFRAME]  !! 990         ld      [%sp + STACKFRAME_SZ + PT_PSR], %l0
293         .else                                  !! 991         andn    %l0, PSR_CWP, %l0
294         stp     x29, x22, [sp, #S_STACKFRAME]  !! 992         nop
295         .endif                                 !! 993         and     %l1, PSR_CWP, %l1
296         add     x29, sp, #S_STACKFRAME         !! 994         or      %l0, %l1, %l0
297                                                !! 995         st      %l0, [%sp + STACKFRAME_SZ + PT_PSR]
298 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               !! 996         b       ret_sys_call
299 alternative_if_not ARM64_HAS_PAN               !! 997          mov    0, %o0
300         bl      __swpan_entry_el\el            !! 998 
301 alternative_else_nop_endif                     !! 999         /* Linux native system calls enter here... */
302 #endif                                         !! 1000         .align  4
303                                                !! 1001         .globl  linux_sparc_syscall
304         stp     x22, x23, [sp, #S_PC]          !! 1002 linux_sparc_syscall:
305                                                !! 1003         sethi   %hi(PSR_SYSCALL), %l4
306         /* Not in a syscall by default (el0_sv !! 1004         or      %l0, %l4, %l0
307         .if     \el == 0                       !! 1005         /* Direct access to user regs, must faster. */
308         mov     w21, #NO_SYSCALL               !! 1006         cmp     %g1, NR_syscalls
309         str     w21, [sp, #S_SYSCALLNO]        !! 1007         bgeu    linux_sparc_ni_syscall
310         .endif                                 !! 1008          sll    %g1, 2, %l4
311                                                !! 1009         ld      [%l7 + %l4], %l7
312 #ifdef CONFIG_ARM64_PSEUDO_NMI                 !! 1010 
313 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING  !! 1011 do_syscall:
314         b       .Lskip_pmr_save\@              !! 1012         SAVE_ALL_HEAD
315 alternative_else_nop_endif                     !! 1013          rd     %wim, %l3
316                                                !! 1014 
317         mrs_s   x20, SYS_ICC_PMR_EL1           !! 1015         wr      %l0, PSR_ET, %psr
318         str     x20, [sp, #S_PMR_SAVE]         !! 1016         mov     %i0, %o0
319         mov     x20, #GIC_PRIO_IRQON | GIC_PRI !! 1017         mov     %i1, %o1
320         msr_s   SYS_ICC_PMR_EL1, x20           !! 1018         mov     %i2, %o2
321                                                !! 1019 
322 .Lskip_pmr_save\@:                             !! 1020         ld      [%curptr + TI_FLAGS], %l5
323 #endif                                         !! 1021         mov     %i3, %o3
324                                                !! 1022         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
325         /*                                     !! 1023         mov     %i4, %o4
326          * Registers that may be useful after  !! 1024         bne     linux_syscall_trace
327          *                                     !! 1025          mov    %i0, %l5
328          * x20 - ICC_PMR_EL1                   !! 1026 2:
329          * x21 - aborted SP                    !! 1027         call    %l7
330          * x22 - aborted PC                    !! 1028          mov    %i5, %o5
331          * x23 - aborted PSTATE                << 
332         */                                     << 
333         .endm                                  << 
334                                                << 
335         .macro  kernel_exit, el                << 
336         .if     \el != 0                       << 
337         disable_daif                           << 
338         .endif                                 << 
339                                                << 
340 #ifdef CONFIG_ARM64_PSEUDO_NMI                 << 
341 alternative_if_not ARM64_HAS_GIC_PRIO_MASKING  << 
342         b       .Lskip_pmr_restore\@           << 
343 alternative_else_nop_endif                     << 
344                                                << 
345         ldr     x20, [sp, #S_PMR_SAVE]         << 
346         msr_s   SYS_ICC_PMR_EL1, x20           << 
347                                                << 
348         /* Ensure priority change is seen by r << 
349 alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_ << 
350         dsb     sy                             << 
351 alternative_else_nop_endif                     << 
352                                                << 
353 .Lskip_pmr_restore\@:                          << 
354 #endif                                         << 
355                                                << 
356         ldp     x21, x22, [sp, #S_PC]          << 
357                                                << 
358 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               << 
359 alternative_if_not ARM64_HAS_PAN               << 
360         bl      __swpan_exit_el\el             << 
361 alternative_else_nop_endif                     << 
362 #endif                                         << 
363                                                   1029 
364         .if     \el == 0                       << 
365         ldr     x23, [sp, #S_SP]               << 
366         msr     sp_el0, x23                    << 
367         tst     x22, #PSR_MODE32_BIT           << 
368         b.eq    3f                             << 
369                                                << 
370 #ifdef CONFIG_ARM64_ERRATUM_845719             << 
371 alternative_if ARM64_WORKAROUND_845719         << 
372 #ifdef CONFIG_PID_IN_CONTEXTIDR                << 
373         mrs     x29, contextidr_el1            << 
374         msr     contextidr_el1, x29            << 
375 #else                                          << 
376         msr contextidr_el1, xzr                << 
377 #endif                                         << 
378 alternative_else_nop_endif                     << 
379 #endif                                         << 
380 3:                                                1030 3:
381         scs_save tsk                           !! 1031         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
382                                                << 
383         /* Ignore asynchronous tag check fault << 
384         ldr     x0, [tsk, THREAD_SCTLR_USER]   << 
385         clear_mte_async_tcf x0                 << 
386                                                   1032 
387 #ifdef CONFIG_ARM64_PTR_AUTH                   !! 1033 ret_sys_call:
388 alternative_if ARM64_HAS_ADDRESS_AUTH          !! 1034         ld      [%curptr + TI_FLAGS], %l6
389         /*                                     !! 1035         cmp     %o0, -ERESTART_RESTARTBLOCK
390          * IA was enabled for in-kernel PAC. D !! 1036         ld      [%sp + STACKFRAME_SZ + PT_PSR], %g3
391          * alternatively install the user's IA !! 1037         set     PSR_C, %g2
392          * SCTLR bits were updated on task swi !! 1038         bgeu    1f
393          *                                     !! 1039          andcc  %l6, _TIF_SYSCALL_TRACE, %g0
394          * No kernel C function calls after th !! 1040 
395          */                                    !! 1041         /* System call success, clear Carry condition code. */
396         tbz     x0, SCTLR_ELx_ENIA_SHIFT, 1f   !! 1042         andn    %g3, %g2, %g3
397         __ptrauth_keys_install_user tsk, x0, x !! 1043         clr     %l6
398         b       2f                             !! 1044         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]     
                                                   >> 1045         bne     linux_syscall_trace2
                                                   >> 1046          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
                                                   >> 1047         add     %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1048         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1049         b       ret_trap_entry
                                                   >> 1050          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
399 1:                                                1051 1:
400         mrs     x0, sctlr_el1                  !! 1052         /* System call failure, set Carry condition code.
401         bic     x0, x0, SCTLR_ELx_ENIA         !! 1053          * Also, get abs(errno) to return to the process.
402         msr     sctlr_el1, x0                  << 
403 2:                                             << 
404 alternative_else_nop_endif                     << 
405 #endif                                         << 
406                                                << 
407         mte_set_user_gcr tsk, x0, x1           << 
408                                                << 
409         apply_ssbd 0, x0, x1                   << 
410         .endif                                 << 
411                                                << 
412         msr     elr_el1, x21                   << 
413         msr     spsr_el1, x22                  << 
414         ldp     x0, x1, [sp, #16 * 0]          << 
415         ldp     x2, x3, [sp, #16 * 1]          << 
416         ldp     x4, x5, [sp, #16 * 2]          << 
417         ldp     x6, x7, [sp, #16 * 3]          << 
418         ldp     x8, x9, [sp, #16 * 4]          << 
419         ldp     x10, x11, [sp, #16 * 5]        << 
420         ldp     x12, x13, [sp, #16 * 6]        << 
421         ldp     x14, x15, [sp, #16 * 7]        << 
422         ldp     x16, x17, [sp, #16 * 8]        << 
423         ldp     x18, x19, [sp, #16 * 9]        << 
424         ldp     x20, x21, [sp, #16 * 10]       << 
425         ldp     x22, x23, [sp, #16 * 11]       << 
426         ldp     x24, x25, [sp, #16 * 12]       << 
427         ldp     x26, x27, [sp, #16 * 13]       << 
428         ldp     x28, x29, [sp, #16 * 14]       << 
429                                                << 
430         .if     \el == 0                       << 
431 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              << 
432         alternative_insn "b .L_skip_tramp_exit << 
433                                                << 
434         msr     far_el1, x29                   << 
435                                                << 
436         ldr_this_cpu    x30, this_cpu_vector,  << 
437         tramp_alias     x29, tramp_exit        << 
438         msr             vbar_el1, x30          << 
439         ldr             lr, [sp, #S_LR]        << 
440         add             sp, sp, #PT_REGS_SIZE  << 
441         br              x29                    << 
442                                                << 
443 .L_skip_tramp_exit_\@:                         << 
444 #endif                                         << 
445         .endif                                 << 
446                                                << 
447         ldr     lr, [sp, #S_LR]                << 
448         add     sp, sp, #PT_REGS_SIZE          << 
449                                                << 
450         .if \el == 0                           << 
451         /* This must be after the last explici << 
452 alternative_if ARM64_WORKAROUND_SPECULATIVE_UN << 
453         tlbi    vale1, xzr                     << 
454         dsb     nsh                            << 
455 alternative_else_nop_endif                     << 
456         .else                                  << 
457         /* Ensure any device/NC reads complete << 
458         alternative_insn nop, "dmb sy", ARM64_ << 
459         .endif                                 << 
460                                                << 
461         eret                                   << 
462         sb                                     << 
463         .endm                                  << 
464                                                << 
465 #ifdef CONFIG_ARM64_SW_TTBR0_PAN               << 
466         /*                                     << 
467          * Set the TTBR0 PAN bit in SPSR. When << 
468          * EL0, there is no need to check the  << 
469          * accesses are always enabled.        << 
470          * Note that the meaning of this bit d << 
471          * feature as all TTBR0_EL1 accesses a << 
472          * user mappings.                      << 
473          */                                    << 
474 SYM_CODE_START_LOCAL(__swpan_entry_el1)        << 
475         mrs     x21, ttbr0_el1                 << 
476         tst     x21, #TTBR_ASID_MASK           << 
477         orr     x23, x23, #PSR_PAN_BIT         << 
478         b.eq    1f                             << 
479         and     x23, x23, #~PSR_PAN_BIT        << 
480 SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL << 
481         __uaccess_ttbr0_disable x21            << 
482 1:      ret                                    << 
483 SYM_CODE_END(__swpan_entry_el1)                << 
484                                                << 
485         /*                                     << 
486          * Restore access to TTBR0_EL1. If ret << 
487          * PAN bit checking.                   << 
488          */                                    << 
489 SYM_CODE_START_LOCAL(__swpan_exit_el1)         << 
490         tbnz    x22, #22, 1f                   << 
491         __uaccess_ttbr0_enable x0, x1          << 
492 1:      and     x22, x22, #~PSR_PAN_BIT        << 
493         ret                                    << 
494 SYM_CODE_END(__swpan_exit_el1)                 << 
495                                                << 
496 SYM_CODE_START_LOCAL(__swpan_exit_el0)         << 
497         __uaccess_ttbr0_enable x0, x1          << 
498         /*                                     << 
499          * Enable errata workarounds only if r << 
500          * workaround currently required for T << 
501          * Cavium erratum 27456 (broadcast TLB << 
502          * corruption).                        << 
503          */                                       1054          */
504         b       post_ttbr_update_workaround    !! 1055         sub     %g0, %o0, %o0
505 SYM_CODE_END(__swpan_exit_el0)                 !! 1056         or      %g3, %g2, %g3
506 #endif                                         !! 1057         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
507                                                !! 1058         mov     1, %l6
508 /* GPRs used by entry code */                  !! 1059         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
509 tsk     .req    x28             // current thr !! 1060         bne     linux_syscall_trace2
510                                                !! 1061          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
511         .text                                  !! 1062         add     %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1063         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1064         b       ret_trap_entry
                                                   >> 1065          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 1066 
                                                   >> 1067 linux_syscall_trace2:
                                                   >> 1068         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 1069         mov     1, %o1
                                                   >> 1070         call    syscall_trace
                                                   >> 1071          add    %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1072         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1073         b       ret_trap_entry
                                                   >> 1074          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
512                                                   1075 
513 /*                                             << 
514  * Exception vectors.                          << 
515  */                                            << 
516         .pushsection ".entry.text", "ax"       << 
517                                                << 
518         .align  11                             << 
519 SYM_CODE_START(vectors)                        << 
520         kernel_ventry   1, t, 64, sync         << 
521         kernel_ventry   1, t, 64, irq          << 
522         kernel_ventry   1, t, 64, fiq          << 
523         kernel_ventry   1, t, 64, error        << 
524                                                << 
525         kernel_ventry   1, h, 64, sync         << 
526         kernel_ventry   1, h, 64, irq          << 
527         kernel_ventry   1, h, 64, fiq          << 
528         kernel_ventry   1, h, 64, error        << 
529                                                << 
530         kernel_ventry   0, t, 64, sync         << 
531         kernel_ventry   0, t, 64, irq          << 
532         kernel_ventry   0, t, 64, fiq          << 
533         kernel_ventry   0, t, 64, error        << 
534                                                << 
535         kernel_ventry   0, t, 32, sync         << 
536         kernel_ventry   0, t, 32, irq          << 
537         kernel_ventry   0, t, 32, fiq          << 
538         kernel_ventry   0, t, 32, error        << 
539 SYM_CODE_END(vectors)                          << 
540                                                << 
541 #ifdef CONFIG_VMAP_STACK                       << 
542 SYM_CODE_START_LOCAL(__bad_stack)              << 
543         /*                                     << 
544          * We detected an overflow in kernel_v << 
545          * overflow stack. Stash the exception << 
546          * handler.                            << 
547          */                                    << 
548                                                   1076 
549         /* Restore the original x0 value */    !! 1077 /* Saving and restoring the FPU state is best done from lowlevel code.
550         mrs     x0, tpidrro_el0                !! 1078  *
551                                                !! 1079  * void fpsave(unsigned long *fpregs, unsigned long *fsr,
552         /*                                     !! 1080  *             void *fpqueue, unsigned long *fpqdepth)
553          * Store the original GPRs to the new  << 
554          * PT_REGS_SIZE) was stashed in tpidr_ << 
555          */                                    << 
556         sub     sp, sp, #PT_REGS_SIZE          << 
557         kernel_entry 1                         << 
558         mrs     x0, tpidr_el0                  << 
559         add     x0, x0, #PT_REGS_SIZE          << 
560         str     x0, [sp, #S_SP]                << 
561                                                << 
562         /* Stash the regs for handle_bad_stack << 
563         mov     x0, sp                         << 
564                                                << 
565         /* Time to die */                      << 
566         bl      handle_bad_stack               << 
567         ASM_BUG()                              << 
568 SYM_CODE_END(__bad_stack)                      << 
569 #endif /* CONFIG_VMAP_STACK */                 << 
570                                                << 
571                                                << 
572         .macro entry_handler el:req, ht:req, r << 
573 SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\ << 
574         kernel_entry \el, \regsize             << 
575         mov     x0, sp                         << 
576         bl      el\el\ht\()_\regsize\()_\label << 
577         .if \el == 0                           << 
578         b       ret_to_user                    << 
579         .else                                  << 
580         b       ret_to_kernel                  << 
581         .endif                                 << 
582 SYM_CODE_END(el\el\ht\()_\regsize\()_\label)   << 
583         .endm                                  << 
584                                                << 
585 /*                                             << 
586  * Early exception handlers                    << 
587  */                                               1081  */
588         entry_handler   1, t, 64, sync         << 
589         entry_handler   1, t, 64, irq          << 
590         entry_handler   1, t, 64, fiq          << 
591         entry_handler   1, t, 64, error        << 
592                                                << 
593         entry_handler   1, h, 64, sync         << 
594         entry_handler   1, h, 64, irq          << 
595         entry_handler   1, h, 64, fiq          << 
596         entry_handler   1, h, 64, error        << 
597                                                << 
598         entry_handler   0, t, 64, sync         << 
599         entry_handler   0, t, 64, irq          << 
600         entry_handler   0, t, 64, fiq          << 
601         entry_handler   0, t, 64, error        << 
602                                                << 
603         entry_handler   0, t, 32, sync         << 
604         entry_handler   0, t, 32, irq          << 
605         entry_handler   0, t, 32, fiq          << 
606         entry_handler   0, t, 32, error        << 
607                                                << 
608 SYM_CODE_START_LOCAL(ret_to_kernel)            << 
609         kernel_exit 1                          << 
610 SYM_CODE_END(ret_to_kernel)                    << 
611                                                << 
612 SYM_CODE_START_LOCAL(ret_to_user)              << 
613         ldr     x19, [tsk, #TSK_TI_FLAGS]      << 
614         enable_step_tsk x19, x2                << 
615 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK             << 
616         bl      stackleak_erase_on_task_stack  << 
617 #endif                                         << 
618         kernel_exit 0                          << 
619 SYM_CODE_END(ret_to_user)                      << 
620                                                   1082 
621         .popsection                            !! 1083         .globl  fpsave
                                                   >> 1084 fpsave:
                                                   >> 1085         st      %fsr, [%o1]     ! this can trap on us if fpu is in bogon state
                                                   >> 1086         ld      [%o1], %g1
                                                   >> 1087         set     0x2000, %g4
                                                   >> 1088         andcc   %g1, %g4, %g0
                                                   >> 1089         be      2f
                                                   >> 1090          mov    0, %g2
622                                                   1091 
623         // Move from tramp_pg_dir to swapper_p !! 1092         /* We have an fpqueue to save. */
624         .macro tramp_map_kernel, tmp           << 
625         mrs     \tmp, ttbr1_el1                << 
626         add     \tmp, \tmp, #TRAMP_SWAPPER_OFF << 
627         bic     \tmp, \tmp, #USER_ASID_FLAG    << 
628         msr     ttbr1_el1, \tmp                << 
629 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003         << 
630 alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1 << 
631         /* ASID already in \tmp[63:48] */      << 
632         movk    \tmp, #:abs_g2_nc:(TRAMP_VALIA << 
633         movk    \tmp, #:abs_g1_nc:(TRAMP_VALIA << 
634         /* 2MB boundary containing the vectors << 
635         movk    \tmp, #:abs_g0_nc:((TRAMP_VALI << 
636         isb                                    << 
637         tlbi    vae1, \tmp                     << 
638         dsb     nsh                            << 
639 alternative_else_nop_endif                     << 
640 #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */   << 
641         .endm                                  << 
642                                                << 
643         // Move from swapper_pg_dir to tramp_p << 
644         .macro tramp_unmap_kernel, tmp         << 
645         mrs     \tmp, ttbr1_el1                << 
646         sub     \tmp, \tmp, #TRAMP_SWAPPER_OFF << 
647         orr     \tmp, \tmp, #USER_ASID_FLAG    << 
648         msr     ttbr1_el1, \tmp                << 
649         /*                                     << 
650          * We avoid running the post_ttbr_upda << 
651          * it's only needed by Cavium ThunderX << 
652          * disabled.                           << 
653          */                                    << 
654         .endm                                  << 
655                                                << 
656         .macro          tramp_data_read_var    << 
657 #ifdef CONFIG_RELOCATABLE                      << 
658         ldr             \dst, .L__tramp_data_\ << 
659         .ifndef         .L__tramp_data_\var    << 
660         .pushsection    ".entry.tramp.rodata", << 
661         .align          3                      << 
662 .L__tramp_data_\var:                           << 
663         .quad           \var                   << 
664         .popsection                            << 
665         .endif                                 << 
666 #else                                          << 
667         /*                                     << 
668          * As !RELOCATABLE implies !RANDOMIZE_ << 
669          * compile time constant (and hence no << 
670          *                                     << 
671          * As statically allocated kernel code << 
672          * 47 bits of the address space we can << 
673          * instruction to load the upper 16 bi << 
674          */                                    << 
675         movz            \dst, :abs_g2_s:\var   << 
676         movk            \dst, :abs_g1_nc:\var  << 
677         movk            \dst, :abs_g0_nc:\var  << 
678 #endif                                         << 
679         .endm                                  << 
680                                                << 
681 #define BHB_MITIGATION_NONE     0              << 
682 #define BHB_MITIGATION_LOOP     1              << 
683 #define BHB_MITIGATION_FW       2              << 
684 #define BHB_MITIGATION_INSN     3              << 
685                                                << 
686         .macro tramp_ventry, vector_start, reg << 
687         .align  7                              << 
688 1:                                                1093 1:
689         .if     \regsize == 64                 !! 1094         std     %fq, [%o2]
690         msr     tpidrro_el0, x30        // Res !! 1095 fpsave_magic:
691         .endif                                 !! 1096         st      %fsr, [%o1]
                                                   >> 1097         ld      [%o1], %g3
                                                   >> 1098         andcc   %g3, %g4, %g0
                                                   >> 1099         add     %g2, 1, %g2
                                                   >> 1100         bne     1b
                                                   >> 1101          add    %o2, 8, %o2
692                                                   1102 
693         .if     \bhb == BHB_MITIGATION_LOOP    << 
694         /*                                     << 
695          * This sequence must appear before th << 
696          * ret out of tramp_ventry. It appears << 
697          */                                    << 
698         __mitigate_spectre_bhb_loop     x30    << 
699         .endif // \bhb == BHB_MITIGATION_LOOP  << 
700                                                << 
701         .if     \bhb == BHB_MITIGATION_INSN    << 
702         clearbhb                               << 
703         isb                                    << 
704         .endif // \bhb == BHB_MITIGATION_INSN  << 
705                                                << 
706         .if     \kpti == 1                     << 
707         /*                                     << 
708          * Defend against branch aliasing atta << 
709          * entry onto the return stack and usi << 
710          * enter the full-fat kernel vectors.  << 
711          */                                    << 
712         bl      2f                             << 
713         b       .                              << 
714 2:                                                1103 2:
715         tramp_map_kernel        x30            !! 1104         st      %g2, [%o3]
716 alternative_insn isb, nop, ARM64_WORKAROUND_QC << 
717         tramp_data_read_var     x30, vectors   << 
718 alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2 << 
719         prfm    plil1strm, [x30, #(1b - \vecto << 
720 alternative_else_nop_endif                     << 
721                                                << 
722         msr     vbar_el1, x30                  << 
723         isb                                    << 
724         .else                                  << 
725         adr_l   x30, vectors                   << 
726         .endif // \kpti == 1                   << 
727                                                   1105 
728         .if     \bhb == BHB_MITIGATION_FW      !! 1106         std     %f0, [%o0 + 0x00]
729         /*                                     !! 1107         std     %f2, [%o0 + 0x08]
730          * The firmware sequence must appear b !! 1108         std     %f4, [%o0 + 0x10]
731          * i.e. the ret out of tramp_ventry. B !! 1109         std     %f6, [%o0 + 0x18]
732          * mapped to save/restore the register !! 1110         std     %f8, [%o0 + 0x20]
                                                   >> 1111         std     %f10, [%o0 + 0x28]
                                                   >> 1112         std     %f12, [%o0 + 0x30]
                                                   >> 1113         std     %f14, [%o0 + 0x38]
                                                   >> 1114         std     %f16, [%o0 + 0x40]
                                                   >> 1115         std     %f18, [%o0 + 0x48]
                                                   >> 1116         std     %f20, [%o0 + 0x50]
                                                   >> 1117         std     %f22, [%o0 + 0x58]
                                                   >> 1118         std     %f24, [%o0 + 0x60]
                                                   >> 1119         std     %f26, [%o0 + 0x68]
                                                   >> 1120         std     %f28, [%o0 + 0x70]
                                                   >> 1121         retl
                                                   >> 1122          std    %f30, [%o0 + 0x78]
                                                   >> 1123 
                                                   >> 1124         /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
                                                   >> 1125          * code for pointing out this possible deadlock, while we save state
                                                   >> 1126          * above we could trap on the fsr store so our low level fpu trap
                                                   >> 1127          * code has to know how to deal with this.
                                                   >> 1128          */
                                                   >> 1129 fpsave_catch:
                                                   >> 1130         b       fpsave_magic + 4
                                                   >> 1131          st     %fsr, [%o1]
                                                   >> 1132 
                                                   >> 1133 fpsave_catch2:
                                                   >> 1134         b       fpsave + 4
                                                   >> 1135          st     %fsr, [%o1]
                                                   >> 1136 
                                                   >> 1137         /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
                                                   >> 1138 
                                                   >> 1139         .globl  fpload
                                                   >> 1140 fpload:
                                                   >> 1141         ldd     [%o0 + 0x00], %f0
                                                   >> 1142         ldd     [%o0 + 0x08], %f2
                                                   >> 1143         ldd     [%o0 + 0x10], %f4
                                                   >> 1144         ldd     [%o0 + 0x18], %f6
                                                   >> 1145         ldd     [%o0 + 0x20], %f8
                                                   >> 1146         ldd     [%o0 + 0x28], %f10
                                                   >> 1147         ldd     [%o0 + 0x30], %f12
                                                   >> 1148         ldd     [%o0 + 0x38], %f14
                                                   >> 1149         ldd     [%o0 + 0x40], %f16
                                                   >> 1150         ldd     [%o0 + 0x48], %f18
                                                   >> 1151         ldd     [%o0 + 0x50], %f20
                                                   >> 1152         ldd     [%o0 + 0x58], %f22
                                                   >> 1153         ldd     [%o0 + 0x60], %f24
                                                   >> 1154         ldd     [%o0 + 0x68], %f26
                                                   >> 1155         ldd     [%o0 + 0x70], %f28
                                                   >> 1156         ldd     [%o0 + 0x78], %f30
                                                   >> 1157         ld      [%o1], %fsr
                                                   >> 1158         retl
                                                   >> 1159          nop
                                                   >> 1160 
                                                   >> 1161         /* __ndelay and __udelay take two arguments:
                                                   >> 1162          * 0 - nsecs or usecs to delay
                                                   >> 1163          * 1 - per_cpu udelay_val (loops per jiffy)
                                                   >> 1164          *
                                                   >> 1165          * Note that ndelay gives HZ times higher resolution but has a 10ms
                                                   >> 1166          * limit.  udelay can handle up to 1s.
733          */                                       1167          */
734         __mitigate_spectre_bhb_fw              !! 1168         .globl  __ndelay
735         .endif // \bhb == BHB_MITIGATION_FW    !! 1169 __ndelay:
736                                                !! 1170         save    %sp, -STACKFRAME_SZ, %sp
737         add     x30, x30, #(1b - \vector_start !! 1171         mov     %i0, %o0                ! round multiplier up so large ns ok
738         ret                                    !! 1172         mov     0x1ae, %o1              ! 2**32 / (1 000 000 000 / HZ)
739 .org 1b + 128   // Did we overflow the ventry  !! 1173         umul    %o0, %o1, %o0
740         .endm                                  !! 1174         rd      %y, %o1
741                                                !! 1175         mov     %i1, %o1                ! udelay_val
742         .macro  generate_tramp_vector,  kpti,  !! 1176         umul    %o0, %o1, %o0
743 .Lvector_start\@:                              !! 1177         rd      %y, %o1
744         .space  0x400                          !! 1178         ba      delay_continue
745                                                !! 1179          mov    %o1, %o0                ! >>32 later for better resolution
746         .rept   4                              !! 1180 
747         tramp_ventry    .Lvector_start\@, 64,  !! 1181         .globl  __udelay
748         .endr                                  !! 1182 __udelay:
749         .rept   4                              !! 1183         save    %sp, -STACKFRAME_SZ, %sp
750         tramp_ventry    .Lvector_start\@, 32,  !! 1184         mov     %i0, %o0
751         .endr                                  !! 1185         sethi   %hi(0x10c7), %o1        ! round multiplier up so large us ok
752         .endm                                  !! 1186         or      %o1, %lo(0x10c7), %o1   ! 2**32 / 1 000 000
753                                                !! 1187         umul    %o0, %o1, %o0
754 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              !! 1188         rd      %y, %o1
755 /*                                             !! 1189         mov     %i1, %o1                ! udelay_val
756  * Exception vectors trampoline.               !! 1190         umul    %o0, %o1, %o0
757  * The order must match __bp_harden_el1_vector !! 1191         rd      %y, %o1
758  * arm64_bp_harden_el1_vectors enum.           !! 1192         sethi   %hi(0x028f4b62), %l0    ! Add in rounding constant * 2**32,
759  */                                            !! 1193         or      %g0, %lo(0x028f4b62), %l0
760         .pushsection ".entry.tramp.text", "ax" !! 1194         addcc   %o0, %l0, %o0           ! 2**32 * 0.009 999
761         .align  11                             !! 1195         bcs,a   3f
762 SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)    !! 1196          add    %o1, 0x01, %o1
763 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY  !! 1197 3:
764         generate_tramp_vector   kpti=1, bhb=BH !! 1198         mov     HZ, %o0                 ! >>32 earlier for wider range
765         generate_tramp_vector   kpti=1, bhb=BH !! 1199         umul    %o0, %o1, %o0
766         generate_tramp_vector   kpti=1, bhb=BH !! 1200         rd      %y, %o1
767 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 
768         generate_tramp_vector   kpti=1, bhb=BH << 
769 SYM_CODE_END(tramp_vectors)                    << 
770                                                << 
771 SYM_CODE_START_LOCAL(tramp_exit)               << 
772         tramp_unmap_kernel      x29            << 
773         mrs             x29, far_el1           << 
774         eret                                   << 
775         sb                                     << 
776 SYM_CODE_END(tramp_exit)                       << 
777         .popsection                            << 
778 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */        << 
779                                                << 
780 /*                                             << 
781  * Exception vectors for spectre mitigations o << 
782  * kpti is not in use.                         << 
783  */                                            << 
784         .macro generate_el1_vector, bhb        << 
785 .Lvector_start\@:                              << 
786         kernel_ventry   1, t, 64, sync         << 
787         kernel_ventry   1, t, 64, irq          << 
788         kernel_ventry   1, t, 64, fiq          << 
789         kernel_ventry   1, t, 64, error        << 
790                                                << 
791         kernel_ventry   1, h, 64, sync         << 
792         kernel_ventry   1, h, 64, irq          << 
793         kernel_ventry   1, h, 64, fiq          << 
794         kernel_ventry   1, h, 64, error        << 
795                                                << 
796         .rept   4                              << 
797         tramp_ventry    .Lvector_start\@, 64,  << 
798         .endr                                  << 
799         .rept 4                                << 
800         tramp_ventry    .Lvector_start\@, 32,  << 
801         .endr                                  << 
802         .endm                                  << 
803                                                << 
804 /* The order must match tramp_vecs and the arm << 
805         .pushsection ".entry.text", "ax"       << 
806         .align  11                             << 
807 SYM_CODE_START(__bp_harden_el1_vectors)        << 
808 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY  << 
809         generate_el1_vector     bhb=BHB_MITIGA << 
810         generate_el1_vector     bhb=BHB_MITIGA << 
811         generate_el1_vector     bhb=BHB_MITIGA << 
812 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTO << 
813 SYM_CODE_END(__bp_harden_el1_vectors)          << 
814         .popsection                            << 
815                                                << 
816                                                << 
817 /*                                             << 
818  * Register switch for AArch64. The callee-sav << 
819  * and restored. On entry:                     << 
820  *   x0 = previous task_struct (must be preser << 
821  *   x1 = next task_struct                     << 
822  * Previous and next are guaranteed not to be  << 
823  *                                             << 
824  */                                            << 
825 SYM_FUNC_START(cpu_switch_to)                  << 
826         mov     x10, #THREAD_CPU_CONTEXT       << 
827         add     x8, x0, x10                    << 
828         mov     x9, sp                         << 
829         stp     x19, x20, [x8], #16            << 
830         stp     x21, x22, [x8], #16            << 
831         stp     x23, x24, [x8], #16            << 
832         stp     x25, x26, [x8], #16            << 
833         stp     x27, x28, [x8], #16            << 
834         stp     x29, x9, [x8], #16             << 
835         str     lr, [x8]                       << 
836         add     x8, x1, x10                    << 
837         ldp     x19, x20, [x8], #16            << 
838         ldp     x21, x22, [x8], #16            << 
839         ldp     x23, x24, [x8], #16            << 
840         ldp     x25, x26, [x8], #16            << 
841         ldp     x27, x28, [x8], #16            << 
842         ldp     x29, x9, [x8], #16             << 
843         ldr     lr, [x8]                       << 
844         mov     sp, x9                         << 
845         msr     sp_el0, x1                     << 
846         ptrauth_keys_install_kernel x1, x8, x9 << 
847         scs_save x0                            << 
848         scs_load_current                       << 
849         ret                                    << 
850 SYM_FUNC_END(cpu_switch_to)                    << 
851 NOKPROBE(cpu_switch_to)                        << 
852                                                << 
853 /*                                             << 
854  * This is how we return from a fork.          << 
855  */                                            << 
856 SYM_CODE_START(ret_from_fork)                  << 
857         bl      schedule_tail                  << 
858         cbz     x19, 1f                        << 
859         mov     x0, x20                        << 
860         blr     x19                            << 
861 1:      get_current_task tsk                   << 
862         mov     x0, sp                         << 
863         bl      asm_exit_to_user_mode          << 
864         b       ret_to_user                    << 
865 SYM_CODE_END(ret_from_fork)                    << 
866 NOKPROBE(ret_from_fork)                        << 
867                                                << 
868 /*                                             << 
869  * void call_on_irq_stack(struct pt_regs *regs << 
870  *                        void (*func)(struct  << 
871  *                                             << 
872  * Calls func(regs) using this CPU's irq stack << 
873  */                                            << 
874 SYM_FUNC_START(call_on_irq_stack)              << 
875 #ifdef CONFIG_SHADOW_CALL_STACK                << 
876         get_current_task x16                   << 
877         scs_save x16                           << 
878         ldr_this_cpu scs_sp, irq_shadow_call_s << 
879 #endif                                         << 
880                                                << 
881         /* Create a frame record to save our L << 
882         stp     x29, x30, [sp, #-16]!          << 
883         mov     x29, sp                        << 
884                                                << 
885         ldr_this_cpu x16, irq_stack_ptr, x17   << 
886                                                << 
887         /* Move to the new stack and call the  << 
888         add     sp, x16, #IRQ_STACK_SIZE       << 
889         blr     x1                             << 
890                                                   1201 
891         /*                                     !! 1202 delay_continue:
892          * Restore the SP from the FP, and res !! 1203         cmp     %o0, 0x0
893          * record.                             !! 1204 1:
894          */                                    !! 1205         bne     1b
895         mov     sp, x29                        !! 1206          subcc  %o0, 1, %o0
896         ldp     x29, x30, [sp], #16            !! 1207         
897         scs_load_current                       << 
898         ret                                       1208         ret
899 SYM_FUNC_END(call_on_irq_stack)                !! 1209         restore
900 NOKPROBE(call_on_irq_stack)                    << 
901                                                << 
902 #ifdef CONFIG_ARM_SDE_INTERFACE                << 
903                                                << 
904 #include <asm/sdei.h>                          << 
905 #include <uapi/linux/arm_sdei.h>               << 
906                                                << 
907 .macro sdei_handler_exit exit_mode             << 
908         /* On success, this call never returns << 
909         cmp     \exit_mode, #SDEI_EXIT_SMC     << 
910         b.ne    99f                            << 
911         smc     #0                             << 
912         b       .                              << 
913 99:     hvc     #0                             << 
914         b       .                              << 
915 .endm                                          << 
916                                                << 
917 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0              << 
918 /*                                             << 
919  * The regular SDEI entry point may have been  << 
920  * the kernel. This trampoline restores the ke << 
921  * argument accessible.                        << 
922  *                                             << 
923  * This clobbers x4, __sdei_handler() will res << 
924  * copy.                                       << 
925  */                                            << 
926 .pushsection ".entry.tramp.text", "ax"         << 
927 SYM_CODE_START(__sdei_asm_entry_trampoline)    << 
928         mrs     x4, ttbr1_el1                  << 
929         tbz     x4, #USER_ASID_BIT, 1f         << 
930                                                << 
931         tramp_map_kernel tmp=x4                << 
932         isb                                    << 
933         mov     x4, xzr                        << 
934                                                << 
935         /*                                     << 
936          * Remember whether to unmap the kerne << 
937          */                                    << 
938 1:      str     x4, [x1, #(SDEI_EVENT_INTREGS  << 
939         tramp_data_read_var     x4, __sdei_asm << 
940         br      x4                             << 
941 SYM_CODE_END(__sdei_asm_entry_trampoline)      << 
942 NOKPROBE(__sdei_asm_entry_trampoline)          << 
943                                                << 
944 /*                                             << 
945  * Make the exit call and restore the original << 
946  *                                             << 
947  * x0 & x1: setup for the exit API call        << 
948  * x2: exit_mode                               << 
949  * x4: struct sdei_registered_event argument f << 
950  */                                            << 
951 SYM_CODE_START(__sdei_asm_exit_trampoline)     << 
952         ldr     x4, [x4, #(SDEI_EVENT_INTREGS  << 
953         cbnz    x4, 1f                         << 
954                                                << 
955         tramp_unmap_kernel      tmp=x4         << 
956                                                << 
957 1:      sdei_handler_exit exit_mode=x2         << 
958 SYM_CODE_END(__sdei_asm_exit_trampoline)       << 
959 NOKPROBE(__sdei_asm_exit_trampoline)           << 
960 .popsection             // .entry.tramp.text   << 
961 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */        << 
962                                                   1210 
963 /*                                             !! 1211         /* Handle a software breakpoint */
964  * Software Delegated Exception entry point.   !! 1212         /* We have to inform parent that child has stopped */
965  *                                             !! 1213         .align 4
966  * x0: Event number                            !! 1214         .globl breakpoint_trap
967  * x1: struct sdei_registered_event argument f !! 1215 breakpoint_trap:
968  * x2: interrupted PC                          !! 1216         rd      %wim,%l3
969  * x3: interrupted PSTATE                      !! 1217         SAVE_ALL
970  * x4: maybe clobbered by the trampoline       !! 1218         wr      %l0, PSR_ET, %psr
971  *                                             !! 1219         WRITE_PAUSE
972  * Firmware has preserved x0->x17 for us, we m !! 1220 
973  * follow SMC-CC. We save (or retrieve) all th !! 1221         st      %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
974  * want them.                                  !! 1222         call    sparc_breakpoint
975  */                                            !! 1223          add    %sp, STACKFRAME_SZ, %o0
976 SYM_CODE_START(__sdei_asm_handler)             !! 1224 
977         stp     x2, x3, [x1, #SDEI_EVENT_INTRE !! 1225         RESTORE_ALL
978         stp     x4, x5, [x1, #SDEI_EVENT_INTRE !! 1226 
979         stp     x6, x7, [x1, #SDEI_EVENT_INTRE !! 1227 #ifdef CONFIG_KGDB
980         stp     x8, x9, [x1, #SDEI_EVENT_INTRE !! 1228         .align  4
981         stp     x10, x11, [x1, #SDEI_EVENT_INT !! 1229         .globl  kgdb_trap_low
982         stp     x12, x13, [x1, #SDEI_EVENT_INT !! 1230         .type   kgdb_trap_low,#function
983         stp     x14, x15, [x1, #SDEI_EVENT_INT !! 1231 kgdb_trap_low:
984         stp     x16, x17, [x1, #SDEI_EVENT_INT !! 1232         rd      %wim,%l3
985         stp     x18, x19, [x1, #SDEI_EVENT_INT !! 1233         SAVE_ALL
986         stp     x20, x21, [x1, #SDEI_EVENT_INT !! 1234         wr      %l0, PSR_ET, %psr
987         stp     x22, x23, [x1, #SDEI_EVENT_INT !! 1235         WRITE_PAUSE
988         stp     x24, x25, [x1, #SDEI_EVENT_INT !! 1236 
989         stp     x26, x27, [x1, #SDEI_EVENT_INT !! 1237         call    kgdb_trap
990         stp     x28, x29, [x1, #SDEI_EVENT_INT !! 1238          add    %sp, STACKFRAME_SZ, %o0
991         mov     x4, sp                         !! 1239 
992         stp     lr, x4, [x1, #SDEI_EVENT_INTRE !! 1240         RESTORE_ALL
993                                                !! 1241         .size   kgdb_trap_low,.-kgdb_trap_low
994         mov     x19, x1                        !! 1242 #endif
995                                                !! 1243 
996         /* Store the registered-event for cras !! 1244         .align  4
997         ldrb    w4, [x19, #SDEI_EVENT_PRIORITY !! 1245         .globl  flush_patch_exception
998         cbnz    w4, 1f                         !! 1246 flush_patch_exception:
999         adr_this_cpu dst=x5, sym=sdei_active_n !! 1247         FLUSH_ALL_KERNEL_WINDOWS;
1000         b       2f                            !! 1248         ldd     [%o0], %o6
1001 1:      adr_this_cpu dst=x5, sym=sdei_active_ !! 1249         jmpl    %o7 + 0xc, %g0                  ! see asm-sparc/processor.h
1002 2:      str     x19, [x5]                     !! 1250          mov    1, %g1                          ! signal EFAULT condition
                                                   >> 1251 
                                                   >> 1252         .align  4
                                                   >> 1253         .globl  kill_user_windows, kuw_patch1_7win
                                                   >> 1254         .globl  kuw_patch1
                                                   >> 1255 kuw_patch1_7win:        sll     %o3, 6, %o3
                                                   >> 1256 
                                                   >> 1257         /* No matter how much overhead this routine has in the worst
                                                   >> 1258          * case scenerio, it is several times better than taking the
                                                   >> 1259          * traps with the old method of just doing flush_user_windows().
                                                   >> 1260          */
                                                   >> 1261 kill_user_windows:
                                                   >> 1262         ld      [%g6 + TI_UWINMASK], %o0        ! get current umask
                                                   >> 1263         orcc    %g0, %o0, %g0                   ! if no bits set, we are done
                                                   >> 1264         be      3f                              ! nothing to do
                                                   >> 1265          rd     %psr, %o5                       ! must clear interrupts
                                                   >> 1266         or      %o5, PSR_PIL, %o4               ! or else that could change
                                                   >> 1267         wr      %o4, 0x0, %psr                  ! the uwinmask state
                                                   >> 1268         WRITE_PAUSE                             ! burn them cycles
                                                   >> 1269 1:
                                                   >> 1270         ld      [%g6 + TI_UWINMASK], %o0        ! get consistent state
                                                   >> 1271         orcc    %g0, %o0, %g0                   ! did an interrupt come in?
                                                   >> 1272         be      4f                              ! yep, we are done
                                                   >> 1273          rd     %wim, %o3                       ! get current wim
                                                   >> 1274         srl     %o3, 1, %o4                     ! simulate a save
                                                   >> 1275 kuw_patch1:
                                                   >> 1276         sll     %o3, 7, %o3                     ! compute next wim
                                                   >> 1277         or      %o4, %o3, %o3                   ! result
                                                   >> 1278         andncc  %o0, %o3, %o0                   ! clean this bit in umask
                                                   >> 1279         bne     kuw_patch1                      ! not done yet
                                                   >> 1280          srl    %o3, 1, %o4                     ! begin another save simulation
                                                   >> 1281         wr      %o3, 0x0, %wim                  ! set the new wim
                                                   >> 1282         st      %g0, [%g6 + TI_UWINMASK]        ! clear uwinmask
                                                   >> 1283 4:
                                                   >> 1284         wr      %o5, 0x0, %psr                  ! re-enable interrupts
                                                   >> 1285         WRITE_PAUSE                             ! burn baby burn
                                                   >> 1286 3:
                                                   >> 1287         retl                                    ! return
                                                   >> 1288          st     %g0, [%g6 + TI_W_SAVED]         ! no windows saved
1003                                                  1289 
1004 #ifdef CONFIG_VMAP_STACK                      !! 1290         .align  4
1005         /*                                    !! 1291         .globl  restore_current
1006          * entry.S may have been using sp as  !! 1292 restore_current:
1007          * this is a normal or critical event !! 1293         LOAD_CURRENT(g6, o0)
1008          * stack for this CPU.                !! 1294         retl
                                                   >> 1295          nop
                                                   >> 1296 
                                                   >> 1297 #ifdef CONFIG_PCIC_PCI
                                                   >> 1298 #include <asm/pcic.h>
                                                   >> 1299 
                                                   >> 1300         .align  4
                                                   >> 1301         .globl  linux_trap_ipi15_pcic
                                                   >> 1302 linux_trap_ipi15_pcic:
                                                   >> 1303         rd      %wim, %l3
                                                   >> 1304         SAVE_ALL
                                                   >> 1305 
                                                   >> 1306         /*
                                                   >> 1307          * First deactivate NMI
                                                   >> 1308          * or we cannot drop ET, cannot get window spill traps.
                                                   >> 1309          * The busy loop is necessary because the PIO error
                                                   >> 1310          * sometimes does not go away quickly and we trap again.
1009          */                                      1311          */
1010         cbnz    w4, 1f                        !! 1312         sethi   %hi(pcic_regs), %o1
1011         ldr_this_cpu dst=x5, sym=sdei_stack_n !! 1313         ld      [%o1 + %lo(pcic_regs)], %o2
1012         b       2f                            << 
1013 1:      ldr_this_cpu dst=x5, sym=sdei_stack_c << 
1014 2:      mov     x6, #SDEI_STACK_SIZE          << 
1015         add     x5, x5, x6                    << 
1016         mov     sp, x5                        << 
1017 #endif                                        << 
1018                                                  1314 
1019 #ifdef CONFIG_SHADOW_CALL_STACK               !! 1315         ! Get pending status for printouts later.
1020         /* Use a separate shadow call stack f !! 1316         ld      [%o2 + PCI_SYS_INT_PENDING], %o0
1021         cbnz    w4, 3f                        << 
1022         ldr_this_cpu dst=scs_sp, sym=sdei_sha << 
1023         b       4f                            << 
1024 3:      ldr_this_cpu dst=scs_sp, sym=sdei_sha << 
1025 4:                                            << 
1026 #endif                                        << 
1027                                                  1317 
1028         /*                                    !! 1318         mov     PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1029          * We may have interrupted userspace, !! 1319         stb     %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
1030          * return-to either of these. We can' !! 1320 1:
1031          */                                   !! 1321         ld      [%o2 + PCI_SYS_INT_PENDING], %o1
1032         mrs     x28, sp_el0                   !! 1322         andcc   %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1033         ldr_this_cpu    dst=x0, sym=__entry_t !! 1323         bne     1b
1034         msr     sp_el0, x0                    !! 1324          nop
1035                                               !! 1325 
1036         /* If we interrupted the kernel point !! 1326         or      %l0, PSR_PIL, %l4
1037         and     x0, x3, #0xc                  !! 1327         wr      %l4, 0x0, %psr
1038         mrs     x1, CurrentEL                 !! 1328         WRITE_PAUSE
1039         cmp     x0, x1                        !! 1329         wr      %l4, PSR_ET, %psr
1040         csel    x29, x29, xzr, eq       // fp !! 1330         WRITE_PAUSE
1041         csel    x4, x2, xzr, eq         // el !! 1331 
1042                                               !! 1332         call    pcic_nmi
1043         stp     x29, x4, [sp, #-16]!          !! 1333          add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
1044         mov     x29, sp                       !! 1334         RESTORE_ALL
1045                                               !! 1335 
1046         add     x0, x19, #SDEI_EVENT_INTREGS  !! 1336         .globl  pcic_nmi_trap_patch
1047         mov     x1, x19                       !! 1337 pcic_nmi_trap_patch:
1048         bl      __sdei_handler                !! 1338         sethi   %hi(linux_trap_ipi15_pcic), %l3
1049                                               !! 1339         jmpl    %l3 + %lo(linux_trap_ipi15_pcic), %g0
1050         msr     sp_el0, x28                   !! 1340          rd     %psr, %l0
1051         /* restore regs >x17 that we clobbere !! 1341         .word   0
1052         mov     x4, x19         // keep x4 fo !! 1342 
1053         ldp     x28, x29, [x4, #SDEI_EVENT_IN !! 1343 #endif /* CONFIG_PCIC_PCI */
1054         ldp     x18, x19, [x4, #SDEI_EVENT_IN !! 1344 
1055         ldp     lr, x1, [x4, #SDEI_EVENT_INTR !! 1345         .globl  flushw_all
1056         mov     sp, x1                        !! 1346 flushw_all:
1057                                               !! 1347         save    %sp, -0x40, %sp
1058         mov     x1, x0                  // ad !! 1348         save    %sp, -0x40, %sp
1059         /* x0 = (x0 <= SDEI_EV_FAILED) ?      !! 1349         save    %sp, -0x40, %sp
1060          * EVENT_COMPLETE:EVENT_COMPLETE_AND_ !! 1350         save    %sp, -0x40, %sp
1061          */                                   !! 1351         save    %sp, -0x40, %sp
1062         cmp     x0, #SDEI_EV_FAILED           !! 1352         save    %sp, -0x40, %sp
1063         mov_q   x2, SDEI_1_0_FN_SDEI_EVENT_CO !! 1353         save    %sp, -0x40, %sp
1064         mov_q   x3, SDEI_1_0_FN_SDEI_EVENT_CO !! 1354         restore
1065         csel    x0, x2, x3, ls                !! 1355         restore
1066                                               !! 1356         restore
1067         ldr_l   x2, sdei_exit_mode            !! 1357         restore
1068                                               !! 1358         restore
1069         /* Clear the registered-event seen by !! 1359         restore
1070         ldrb    w3, [x4, #SDEI_EVENT_PRIORITY !! 1360         ret
1071         cbnz    w3, 1f                        !! 1361          restore
1072         adr_this_cpu dst=x5, sym=sdei_active_ << 
1073         b       2f                            << 
1074 1:      adr_this_cpu dst=x5, sym=sdei_active_ << 
1075 2:      str     xzr, [x5]                     << 
1076                                                  1362 
1077 alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0  !! 1363 #ifdef CONFIG_SMP
1078         sdei_handler_exit exit_mode=x2        !! 1364 ENTRY(hard_smp_processor_id)
1079 alternative_else_nop_endif                    !! 1365 661:    rd              %tbr, %g1
1080                                               !! 1366         srl             %g1, 12, %o0
1081 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0             !! 1367         and             %o0, 3, %o0
1082         tramp_alias     dst=x5, sym=__sdei_as !! 1368         .section        .cpuid_patch, "ax"
1083         br      x5                            !! 1369         /* Instruction location. */
                                                   >> 1370         .word           661b
                                                   >> 1371         /* SUN4D implementation. */
                                                   >> 1372         lda             [%g0] ASI_M_VIKING_TMP1, %o0
                                                   >> 1373         nop
                                                   >> 1374         nop
                                                   >> 1375         /* LEON implementation. */
                                                   >> 1376         rd              %asr17, %o0
                                                   >> 1377         srl             %o0, 0x1c, %o0
                                                   >> 1378         nop
                                                   >> 1379         .previous
                                                   >> 1380         retl
                                                   >> 1381          nop
                                                   >> 1382 ENDPROC(hard_smp_processor_id)
1084 #endif                                           1383 #endif
1085 SYM_CODE_END(__sdei_asm_handler)              << 
1086 NOKPROBE(__sdei_asm_handler)                  << 
1087                                                  1384 
1088 SYM_CODE_START(__sdei_handler_abort)          !! 1385 /* End of entry.S */
1089         mov_q   x0, SDEI_1_0_FN_SDEI_EVENT_CO << 
1090         adr     x1, 1f                        << 
1091         ldr_l   x2, sdei_exit_mode            << 
1092         sdei_handler_exit exit_mode=x2        << 
1093         // exit the handler and jump to the n << 
1094         // Exit will stomp x0-x17, PSTATE, EL << 
1095 1:      ret                                   << 
1096 SYM_CODE_END(__sdei_handler_abort)            << 
1097 NOKPROBE(__sdei_handler_abort)                << 
1098 #endif /* CONFIG_ARM_SDE_INTERFACE */         << 
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php