~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/riscv/kernel/entry.S (Version linux-6.12-rc7) and /arch/sparc/kernel/entry.S (Version linux-4.14.336)


  1 /* SPDX-License-Identifier: GPL-2.0-only */    !!   1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*                                             !!   2 /* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
  3  * Copyright (C) 2012 Regents of the Universit !!   3  *
  4  * Copyright (C) 2017 SiFive                   !!   4  * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
                                                   >>   5  * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
                                                   >>   6  * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
                                                   >>   7  * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
                                                   >>   8  * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
  5  */                                                 9  */
  6                                                    10 
  7 #include <linux/init.h>                        << 
  8 #include <linux/linkage.h>                         11 #include <linux/linkage.h>
                                                   >>  12 #include <linux/errno.h>
  9                                                    13 
 10 #include <asm/asm.h>                           !!  14 #include <asm/head.h>
 11 #include <asm/csr.h>                           !!  15 #include <asm/asi.h>
 12 #include <asm/scs.h>                           !!  16 #include <asm/smp.h>
 13 #include <asm/unistd.h>                        !!  17 #include <asm/contregs.h>
                                                   >>  18 #include <asm/ptrace.h>
                                                   >>  19 #include <asm/asm-offsets.h>
                                                   >>  20 #include <asm/psr.h>
                                                   >>  21 #include <asm/vaddrs.h>
 14 #include <asm/page.h>                              22 #include <asm/page.h>
                                                   >>  23 #include <asm/pgtable.h>
                                                   >>  24 #include <asm/winmacro.h>
                                                   >>  25 #include <asm/signal.h>
                                                   >>  26 #include <asm/obio.h>
                                                   >>  27 #include <asm/mxcc.h>
 15 #include <asm/thread_info.h>                       28 #include <asm/thread_info.h>
 16 #include <asm/asm-offsets.h>                   !!  29 #include <asm/param.h>
 17 #include <asm/errata_list.h>                   !!  30 #include <asm/unistd.h>
 18 #include <linux/sizes.h>                       !!  31 
                                                   >>  32 #include <asm/asmmacro.h>
                                                   >>  33 #include <asm/export.h>
 19                                                    34 
 20         .section .irqentry.text, "ax"          !!  35 #define curptr      g6
 21                                                    36 
 22 .macro new_vmalloc_check                       !!  37 /* These are just handy. */
 23         REG_S   a0, TASK_TI_A0(tp)             !!  38 #define _SV     save    %sp, -STACKFRAME_SZ, %sp
 24         csrr    a0, CSR_CAUSE                  !!  39 #define _RS     restore 
 25         /* Exclude IRQs */                     !!  40 
 26         blt     a0, zero, _new_vmalloc_restore !!  41 #define FLUSH_ALL_KERNEL_WINDOWS \
 27                                                !!  42         _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
 28         REG_S   a1, TASK_TI_A1(tp)             !!  43         _RS; _RS; _RS; _RS; _RS; _RS; _RS;
 29         /* Only check new_vmalloc if we are in !!  44 
 30         li      a1, EXC_LOAD_PAGE_FAULT        !!  45         .text
 31         beq     a0, a1, _new_vmalloc_kernel_ad !!  46 
 32         li      a1, EXC_STORE_PAGE_FAULT       !!  47 #ifdef CONFIG_KGDB
 33         beq     a0, a1, _new_vmalloc_kernel_ad !!  48         .align  4
 34         li      a1, EXC_INST_PAGE_FAULT        !!  49         .globl          arch_kgdb_breakpoint
 35         bne     a0, a1, _new_vmalloc_restore_c !!  50         .type           arch_kgdb_breakpoint,#function
 36                                                !!  51 arch_kgdb_breakpoint:
 37 _new_vmalloc_kernel_address:                   !!  52         ta              0x7d
 38         /* Is it a kernel address? */          !!  53         retl
 39         csrr    a0, CSR_TVAL                   !!  54          nop
 40         bge     a0, zero, _new_vmalloc_restore !!  55         .size           arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
                                                   >>  56 #endif
 41                                                    57 
 42         /* Check if a new vmalloc mapping appe !!  58 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
 43         REG_S   a2, TASK_TI_A2(tp)             !!  59         .align  4
                                                   >>  60         .globl  floppy_hardint
                                                   >>  61 floppy_hardint:
 44         /*                                         62         /*
 45          * Computes:                           !!  63          * This code cannot touch registers %l0 %l1 and %l2
 46          * a0 = &new_vmalloc[BIT_WORD(cpu)]    !!  64          * because SAVE_ALL depends on their values. It depends
 47          * a1 = BIT_MASK(cpu)                  !!  65          * on %l3 also, but we regenerate it before a call.
                                                   >>  66          * Other registers are:
                                                   >>  67          * %l3 -- base address of fdc registers
                                                   >>  68          * %l4 -- pdma_vaddr
                                                   >>  69          * %l5 -- scratch for ld/st address
                                                   >>  70          * %l6 -- pdma_size
                                                   >>  71          * %l7 -- scratch [floppy byte, ld/st address, aux. data]
 48          */                                        72          */
 49         REG_L   a2, TASK_TI_CPU(tp)            !!  73 
 50         /*                                     !!  74         /* Do we have work to do? */
 51          * Compute the new_vmalloc element pos !!  75         sethi   %hi(doing_pdma), %l7
 52          * (cpu / 64) * 8 = (cpu >> 6) << 3    !!  76         ld      [%l7 + %lo(doing_pdma)], %l7
                                                   >>  77         cmp     %l7, 0
                                                   >>  78         be      floppy_dosoftint
                                                   >>  79          nop
                                                   >>  80 
                                                   >>  81         /* Load fdc register base */
                                                   >>  82         sethi   %hi(fdc_status), %l3
                                                   >>  83         ld      [%l3 + %lo(fdc_status)], %l3
                                                   >>  84 
                                                   >>  85         /* Setup register addresses */
                                                   >>  86         sethi   %hi(pdma_vaddr), %l5    ! transfer buffer
                                                   >>  87         ld      [%l5 + %lo(pdma_vaddr)], %l4
                                                   >>  88         sethi   %hi(pdma_size), %l5     ! bytes to go
                                                   >>  89         ld      [%l5 + %lo(pdma_size)], %l6
                                                   >>  90 next_byte:
                                                   >>  91         ldub    [%l3], %l7
                                                   >>  92 
                                                   >>  93         andcc   %l7, 0x80, %g0          ! Does fifo still have data
                                                   >>  94         bz      floppy_fifo_emptied     ! fifo has been emptied...
                                                   >>  95          andcc  %l7, 0x20, %g0          ! in non-dma mode still?
                                                   >>  96         bz      floppy_overrun          ! nope, overrun
                                                   >>  97          andcc  %l7, 0x40, %g0          ! 0=write 1=read
                                                   >>  98         bz      floppy_write
                                                   >>  99          sub    %l6, 0x1, %l6
                                                   >> 100 
                                                   >> 101         /* Ok, actually read this byte */
                                                   >> 102         ldub    [%l3 + 1], %l7
                                                   >> 103         orcc    %g0, %l6, %g0
                                                   >> 104         stb     %l7, [%l4]
                                                   >> 105         bne     next_byte
                                                   >> 106          add    %l4, 0x1, %l4
                                                   >> 107 
                                                   >> 108         b       floppy_tdone
                                                   >> 109          nop
                                                   >> 110 
                                                   >> 111 floppy_write:
                                                   >> 112         /* Ok, actually write this byte */
                                                   >> 113         ldub    [%l4], %l7
                                                   >> 114         orcc    %g0, %l6, %g0
                                                   >> 115         stb     %l7, [%l3 + 1]
                                                   >> 116         bne     next_byte
                                                   >> 117          add    %l4, 0x1, %l4
                                                   >> 118 
                                                   >> 119         /* fall through... */
                                                   >> 120 floppy_tdone:
                                                   >> 121         sethi   %hi(pdma_vaddr), %l5
                                                   >> 122         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 123         sethi   %hi(pdma_size), %l5
                                                   >> 124         st      %l6, [%l5 + %lo(pdma_size)]
                                                   >> 125         /* Flip terminal count pin */
                                                   >> 126         set     auxio_register, %l7
                                                   >> 127         ld      [%l7], %l7
                                                   >> 128 
                                                   >> 129         ldub    [%l7], %l5
                                                   >> 130 
                                                   >> 131         or      %l5, 0xc2, %l5
                                                   >> 132         stb     %l5, [%l7]
                                                   >> 133         andn    %l5, 0x02, %l5
                                                   >> 134 
                                                   >> 135 2:
                                                   >> 136         /* Kill some time so the bits set */
                                                   >> 137         WRITE_PAUSE
                                                   >> 138         WRITE_PAUSE
                                                   >> 139 
                                                   >> 140         stb     %l5, [%l7]
                                                   >> 141 
                                                   >> 142         /* Prevent recursion */
                                                   >> 143         sethi   %hi(doing_pdma), %l7
                                                   >> 144         b       floppy_dosoftint
                                                   >> 145          st     %g0, [%l7 + %lo(doing_pdma)]
                                                   >> 146 
                                                   >> 147         /* We emptied the FIFO, but we haven't read everything
                                                   >> 148          * as of yet.  Store the current transfer address and
                                                   >> 149          * bytes left to read so we can continue when the next
                                                   >> 150          * fast IRQ comes in.
 53          */                                       151          */
 54         srli    a1, a2, 6                      !! 152 floppy_fifo_emptied:
 55         slli    a1, a1, 3                      !! 153         sethi   %hi(pdma_vaddr), %l5
 56         la      a0, new_vmalloc                !! 154         st      %l4, [%l5 + %lo(pdma_vaddr)]
 57         add     a0, a0, a1                     !! 155         sethi   %hi(pdma_size), %l7
 58         /*                                     !! 156         st      %l6, [%l7 + %lo(pdma_size)]
 59          * Compute the bit position in the new !! 157 
 60          * bit_pos = cpu % 64 = cpu - (cpu / 6 !! 158         /* Restore condition codes */
 61          *         = cpu - ((cpu >> 6) << 3) < !! 159         wr      %l0, 0x0, %psr
 62          */                                    !! 160         WRITE_PAUSE
 63         slli    a1, a1, 3                      !! 161 
 64         sub     a1, a2, a1                     !! 162         jmp     %l1
 65         /* Compute the "get mask": 1 << bit_po !! 163         rett    %l2
 66         li      a2, 1                          !! 164 
 67         sll     a1, a2, a1                     !! 165 floppy_overrun:
 68                                                !! 166         sethi   %hi(pdma_vaddr), %l5
 69         /* Check the value of new_vmalloc for  !! 167         st      %l4, [%l5 + %lo(pdma_vaddr)]
 70         REG_L   a2, 0(a0)                      !! 168         sethi   %hi(pdma_size), %l5
 71         and     a2, a2, a1                     !! 169         st      %l6, [%l5 + %lo(pdma_size)]
 72         beq     a2, zero, _new_vmalloc_restore !! 170         /* Prevent recursion */
 73                                                !! 171         sethi   %hi(doing_pdma), %l7
 74         /* Atomically reset the current cpu bi !! 172         st      %g0, [%l7 + %lo(doing_pdma)]
 75         amoxor.d        a0, a1, (a0)           !! 173 
 76                                                !! 174         /* fall through... */
 77         /* Only emit a sfence.vma if the uarch !! 175 floppy_dosoftint:
 78         ALTERNATIVE("sfence.vma", "nop", 0, RI !! 176         rd      %wim, %l3
 79                                                !! 177         SAVE_ALL
 80         REG_L   a0, TASK_TI_A0(tp)             !! 178 
 81         REG_L   a1, TASK_TI_A1(tp)             !! 179         /* Set all IRQs off. */
 82         REG_L   a2, TASK_TI_A2(tp)             !! 180         or      %l0, PSR_PIL, %l4
 83         csrw    CSR_SCRATCH, x0                !! 181         wr      %l4, 0x0, %psr
 84         sret                                   !! 182         WRITE_PAUSE
 85                                                !! 183         wr      %l4, PSR_ET, %psr
 86 _new_vmalloc_restore_context:                  !! 184         WRITE_PAUSE
 87         REG_L   a2, TASK_TI_A2(tp)             !! 185 
 88 _new_vmalloc_restore_context_a1:               !! 186         mov     11, %o0                 ! floppy irq level (unused anyway)
 89         REG_L   a1, TASK_TI_A1(tp)             !! 187         mov     %g0, %o1                ! devid is not used in fast interrupts
 90 _new_vmalloc_restore_context_a0:               !! 188         call    sparc_floppy_irq
 91         REG_L   a0, TASK_TI_A0(tp)             !! 189          add    %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
 92 .endm                                          !! 190 
                                                   >> 191         RESTORE_ALL
                                                   >> 192         
                                                   >> 193 #endif /* (CONFIG_BLK_DEV_FD) */
                                                   >> 194 
                                                   >> 195         /* Bad trap handler */
                                                   >> 196         .globl  bad_trap_handler
                                                   >> 197 bad_trap_handler:
                                                   >> 198         SAVE_ALL
                                                   >> 199 
                                                   >> 200         wr      %l0, PSR_ET, %psr
                                                   >> 201         WRITE_PAUSE
                                                   >> 202 
                                                   >> 203         add     %sp, STACKFRAME_SZ, %o0 ! pt_regs
                                                   >> 204         call    do_hw_interrupt
                                                   >> 205          mov    %l7, %o1                ! trap number
                                                   >> 206 
                                                   >> 207         RESTORE_ALL
                                                   >> 208         
                                                   >> 209 /* For now all IRQ's not registered get sent here. handler_irq() will
                                                   >> 210  * see if a routine is registered to handle this interrupt and if not
                                                   >> 211  * it will say so on the console.
                                                   >> 212  */
 93                                                   213 
                                                   >> 214         .align  4
                                                   >> 215         .globl  real_irq_entry, patch_handler_irq
                                                   >> 216 real_irq_entry:
                                                   >> 217         SAVE_ALL
                                                   >> 218 
                                                   >> 219 #ifdef CONFIG_SMP
                                                   >> 220         .globl  patchme_maybe_smp_msg
                                                   >> 221 
                                                   >> 222         cmp     %l7, 11
                                                   >> 223 patchme_maybe_smp_msg:
                                                   >> 224         bgu     maybe_smp4m_msg
                                                   >> 225          nop
                                                   >> 226 #endif
 94                                                   227 
 95 SYM_CODE_START(handle_exception)               !! 228 real_irq_continue:
 96         /*                                     !! 229         or      %l0, PSR_PIL, %g2
 97          * If coming from userspace, preserve  !! 230         wr      %g2, 0x0, %psr
 98          * the kernel thread pointer.  If we c !! 231         WRITE_PAUSE
 99          * register will contain 0, and we sho !! 232         wr      %g2, PSR_ET, %psr
                                                   >> 233         WRITE_PAUSE
                                                   >> 234         mov     %l7, %o0                ! irq level
                                                   >> 235 patch_handler_irq:
                                                   >> 236         call    handler_irq
                                                   >> 237          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
                                                   >> 238         or      %l0, PSR_PIL, %g2       ! restore PIL after handler_irq
                                                   >> 239         wr      %g2, PSR_ET, %psr       ! keep ET up
                                                   >> 240         WRITE_PAUSE
                                                   >> 241 
                                                   >> 242         RESTORE_ALL
                                                   >> 243 
                                                   >> 244 #ifdef CONFIG_SMP
                                                   >> 245         /* SMP per-cpu ticker interrupts are handled specially. */
                                                   >> 246 smp4m_ticker:
                                                   >> 247         bne     real_irq_continue+4
                                                   >> 248          or     %l0, PSR_PIL, %g2
                                                   >> 249         wr      %g2, 0x0, %psr
                                                   >> 250         WRITE_PAUSE
                                                   >> 251         wr      %g2, PSR_ET, %psr
                                                   >> 252         WRITE_PAUSE
                                                   >> 253         call    smp4m_percpu_timer_interrupt
                                                   >> 254          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 255         wr      %l0, PSR_ET, %psr
                                                   >> 256         WRITE_PAUSE
                                                   >> 257         RESTORE_ALL
                                                   >> 258 
                                                   >> 259 #define GET_PROCESSOR4M_ID(reg) \
                                                   >> 260         rd      %tbr, %reg;     \
                                                   >> 261         srl     %reg, 12, %reg; \
                                                   >> 262         and     %reg, 3, %reg;
                                                   >> 263 
                                                   >> 264         /* Here is where we check for possible SMP IPI passed to us
                                                   >> 265          * on some level other than 15 which is the NMI and only used
                                                   >> 266          * for cross calls.  That has a separate entry point below.
                                                   >> 267          *
                                                   >> 268          * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
100          */                                       269          */
101         csrrw tp, CSR_SCRATCH, tp              !! 270 maybe_smp4m_msg:
102         bnez tp, .Lsave_context                !! 271         GET_PROCESSOR4M_ID(o3)
                                                   >> 272         sethi   %hi(sun4m_irq_percpu), %l5
                                                   >> 273         sll     %o3, 2, %o3
                                                   >> 274         or      %l5, %lo(sun4m_irq_percpu), %o5
                                                   >> 275         sethi   %hi(0x70000000), %o2    ! Check all soft-IRQs
                                                   >> 276         ld      [%o5 + %o3], %o1
                                                   >> 277         ld      [%o1 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 278         andcc   %o3, %o2, %g0
                                                   >> 279         be,a    smp4m_ticker
                                                   >> 280          cmp    %l7, 14
                                                   >> 281         /* Soft-IRQ IPI */
                                                   >> 282         st      %o2, [%o1 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x70000000
                                                   >> 283         WRITE_PAUSE
                                                   >> 284         ld      [%o1 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 285         WRITE_PAUSE
                                                   >> 286         or      %l0, PSR_PIL, %l4
                                                   >> 287         wr      %l4, 0x0, %psr
                                                   >> 288         WRITE_PAUSE
                                                   >> 289         wr      %l4, PSR_ET, %psr
                                                   >> 290         WRITE_PAUSE
                                                   >> 291         srl     %o3, 28, %o2            ! shift for simpler checks below
                                                   >> 292 maybe_smp4m_msg_check_single:
                                                   >> 293         andcc   %o2, 0x1, %g0
                                                   >> 294         beq,a   maybe_smp4m_msg_check_mask
                                                   >> 295          andcc  %o2, 0x2, %g0
                                                   >> 296         call    smp_call_function_single_interrupt
                                                   >> 297          nop
                                                   >> 298         andcc   %o2, 0x2, %g0
                                                   >> 299 maybe_smp4m_msg_check_mask:
                                                   >> 300         beq,a   maybe_smp4m_msg_check_resched
                                                   >> 301          andcc  %o2, 0x4, %g0
                                                   >> 302         call    smp_call_function_interrupt
                                                   >> 303          nop
                                                   >> 304         andcc   %o2, 0x4, %g0
                                                   >> 305 maybe_smp4m_msg_check_resched:
                                                   >> 306         /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
                                                   >> 307         beq,a   maybe_smp4m_msg_out
                                                   >> 308          nop
                                                   >> 309         call    smp_resched_interrupt
                                                   >> 310          nop
                                                   >> 311 maybe_smp4m_msg_out:
                                                   >> 312         RESTORE_ALL
                                                   >> 313 
                                                   >> 314         .align  4
                                                   >> 315         .globl  linux_trap_ipi15_sun4m
                                                   >> 316 linux_trap_ipi15_sun4m:
                                                   >> 317         SAVE_ALL
                                                   >> 318         sethi   %hi(0x80000000), %o2
                                                   >> 319         GET_PROCESSOR4M_ID(o0)
                                                   >> 320         sethi   %hi(sun4m_irq_percpu), %l5
                                                   >> 321         or      %l5, %lo(sun4m_irq_percpu), %o5
                                                   >> 322         sll     %o0, 2, %o0
                                                   >> 323         ld      [%o5 + %o0], %o5
                                                   >> 324         ld      [%o5 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 325         andcc   %o3, %o2, %g0
                                                   >> 326         be      sun4m_nmi_error         ! Must be an NMI async memory error
                                                   >> 327          st     %o2, [%o5 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x80000000
                                                   >> 328         WRITE_PAUSE
                                                   >> 329         ld      [%o5 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 330         WRITE_PAUSE
                                                   >> 331         or      %l0, PSR_PIL, %l4
                                                   >> 332         wr      %l4, 0x0, %psr
                                                   >> 333         WRITE_PAUSE
                                                   >> 334         wr      %l4, PSR_ET, %psr
                                                   >> 335         WRITE_PAUSE
                                                   >> 336         call    smp4m_cross_call_irq
                                                   >> 337          nop
                                                   >> 338         b       ret_trap_lockless_ipi
                                                   >> 339          clr    %l6
                                                   >> 340 
                                                   >> 341         .globl  smp4d_ticker
                                                   >> 342         /* SMP per-cpu ticker interrupts are handled specially. */
                                                   >> 343 smp4d_ticker:
                                                   >> 344         SAVE_ALL
                                                   >> 345         or      %l0, PSR_PIL, %g2
                                                   >> 346         sethi   %hi(CC_ICLR), %o0
                                                   >> 347         sethi   %hi(1 << 14), %o1
                                                   >> 348         or      %o0, %lo(CC_ICLR), %o0
                                                   >> 349         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 14 in MXCC's ICLR */
                                                   >> 350         wr      %g2, 0x0, %psr
                                                   >> 351         WRITE_PAUSE
                                                   >> 352         wr      %g2, PSR_ET, %psr
                                                   >> 353         WRITE_PAUSE
                                                   >> 354         call    smp4d_percpu_timer_interrupt
                                                   >> 355          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 356         wr      %l0, PSR_ET, %psr
                                                   >> 357         WRITE_PAUSE
                                                   >> 358         RESTORE_ALL
                                                   >> 359 
                                                   >> 360         .align  4
                                                   >> 361         .globl  linux_trap_ipi15_sun4d
                                                   >> 362 linux_trap_ipi15_sun4d:
                                                   >> 363         SAVE_ALL
                                                   >> 364         sethi   %hi(CC_BASE), %o4
                                                   >> 365         sethi   %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
                                                   >> 366         or      %o4, (CC_EREG - CC_BASE), %o0
                                                   >> 367         ldda    [%o0] ASI_M_MXCC, %o0
                                                   >> 368         andcc   %o0, %o2, %g0
                                                   >> 369         bne     1f
                                                   >> 370          sethi  %hi(BB_STAT2), %o2
                                                   >> 371         lduba   [%o2] ASI_M_CTL, %o2
                                                   >> 372         andcc   %o2, BB_STAT2_MASK, %g0
                                                   >> 373         bne     2f
                                                   >> 374          or     %o4, (CC_ICLR - CC_BASE), %o0
                                                   >> 375         sethi   %hi(1 << 15), %o1
                                                   >> 376         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 15 in MXCC's ICLR */
                                                   >> 377         or      %l0, PSR_PIL, %l4
                                                   >> 378         wr      %l4, 0x0, %psr
                                                   >> 379         WRITE_PAUSE
                                                   >> 380         wr      %l4, PSR_ET, %psr
                                                   >> 381         WRITE_PAUSE
                                                   >> 382         call    smp4d_cross_call_irq
                                                   >> 383          nop
                                                   >> 384         b       ret_trap_lockless_ipi
                                                   >> 385          clr    %l6
                                                   >> 386 
                                                   >> 387 1:      /* MXCC error */
                                                   >> 388 2:      /* BB error */
                                                   >> 389         /* Disable PIL 15 */
                                                   >> 390         set     CC_IMSK, %l4
                                                   >> 391         lduha   [%l4] ASI_M_MXCC, %l5
                                                   >> 392         sethi   %hi(1 << 15), %l7
                                                   >> 393         or      %l5, %l7, %l5
                                                   >> 394         stha    %l5, [%l4] ASI_M_MXCC
                                                   >> 395         /* FIXME */
                                                   >> 396 1:      b,a     1b
                                                   >> 397 
                                                   >> 398         .globl  smpleon_ipi
                                                   >> 399         .extern leon_ipi_interrupt
                                                   >> 400         /* SMP per-cpu IPI interrupts are handled specially. */
                                                   >> 401 smpleon_ipi:
                                                   >> 402         SAVE_ALL
                                                   >> 403         or      %l0, PSR_PIL, %g2
                                                   >> 404         wr      %g2, 0x0, %psr
                                                   >> 405         WRITE_PAUSE
                                                   >> 406         wr      %g2, PSR_ET, %psr
                                                   >> 407         WRITE_PAUSE
                                                   >> 408         call    leonsmp_ipi_interrupt
                                                   >> 409          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs
                                                   >> 410         wr      %l0, PSR_ET, %psr
                                                   >> 411         WRITE_PAUSE
                                                   >> 412         RESTORE_ALL
                                                   >> 413 
                                                   >> 414         .align  4
                                                   >> 415         .globl  linux_trap_ipi15_leon
                                                   >> 416 linux_trap_ipi15_leon:
                                                   >> 417         SAVE_ALL
                                                   >> 418         or      %l0, PSR_PIL, %l4
                                                   >> 419         wr      %l4, 0x0, %psr
                                                   >> 420         WRITE_PAUSE
                                                   >> 421         wr      %l4, PSR_ET, %psr
                                                   >> 422         WRITE_PAUSE
                                                   >> 423         call    leon_cross_call_irq
                                                   >> 424          nop
                                                   >> 425         b       ret_trap_lockless_ipi
                                                   >> 426          clr    %l6
103                                                   427 
104 .Lrestore_kernel_tpsp:                         !! 428 #endif /* CONFIG_SMP */
105         csrr tp, CSR_SCRATCH                   << 
106                                                   429 
107 #ifdef CONFIG_64BIT                            !! 430         /* This routine handles illegal instructions and privileged
108         /*                                     !! 431          * instruction attempts from user code.
109          * The RISC-V kernel does not eagerly  << 
110          * new vmalloc mapping, which may resu << 
111          * - if the uarch caches invalid entri << 
112          *   observed by the page table walker << 
113          * - if the uarch does not cache inval << 
114          *   could "miss" the new mapping and  << 
115          *   to retry the access, no sfence.vm << 
116          */                                       432          */
117         new_vmalloc_check                      !! 433         .align  4
118 #endif                                         !! 434         .globl  bad_instruction
                                                   >> 435 bad_instruction:
                                                   >> 436         sethi   %hi(0xc1f80000), %l4
                                                   >> 437         ld      [%l1], %l5
                                                   >> 438         sethi   %hi(0x81d80000), %l7
                                                   >> 439         and     %l5, %l4, %l5
                                                   >> 440         cmp     %l5, %l7
                                                   >> 441         be      1f
                                                   >> 442         SAVE_ALL
                                                   >> 443 
                                                   >> 444         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 445         WRITE_PAUSE
                                                   >> 446 
                                                   >> 447         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 448         mov     %l1, %o1
                                                   >> 449         mov     %l2, %o2
                                                   >> 450         call    do_illegal_instruction
                                                   >> 451          mov    %l0, %o3
                                                   >> 452 
                                                   >> 453         RESTORE_ALL
                                                   >> 454 
                                                   >> 455 1:      /* unimplemented flush - just skip */
                                                   >> 456         jmpl    %l2, %g0
                                                   >> 457          rett   %l2 + 4
                                                   >> 458 
                                                   >> 459         .align  4
                                                   >> 460         .globl  priv_instruction
                                                   >> 461 priv_instruction:
                                                   >> 462         SAVE_ALL
                                                   >> 463 
                                                   >> 464         wr      %l0, PSR_ET, %psr
                                                   >> 465         WRITE_PAUSE
                                                   >> 466 
                                                   >> 467         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 468         mov     %l1, %o1
                                                   >> 469         mov     %l2, %o2
                                                   >> 470         call    do_priv_instruction
                                                   >> 471          mov    %l0, %o3
                                                   >> 472 
                                                   >> 473         RESTORE_ALL
                                                   >> 474 
                                                   >> 475         /* This routine handles unaligned data accesses. */
                                                   >> 476         .align  4
                                                   >> 477         .globl  mna_handler
                                                   >> 478 mna_handler:
                                                   >> 479         andcc   %l0, PSR_PS, %g0
                                                   >> 480         be      mna_fromuser
                                                   >> 481          nop
                                                   >> 482 
                                                   >> 483         SAVE_ALL
                                                   >> 484 
                                                   >> 485         wr      %l0, PSR_ET, %psr
                                                   >> 486         WRITE_PAUSE
                                                   >> 487 
                                                   >> 488         ld      [%l1], %o1
                                                   >> 489         call    kernel_unaligned_trap
                                                   >> 490          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 491 
                                                   >> 492         RESTORE_ALL
                                                   >> 493 
                                                   >> 494 mna_fromuser:
                                                   >> 495         SAVE_ALL
                                                   >> 496 
                                                   >> 497         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 498         WRITE_PAUSE
                                                   >> 499 
                                                   >> 500         ld      [%l1], %o1
                                                   >> 501         call    user_unaligned_trap
                                                   >> 502          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 503 
                                                   >> 504         RESTORE_ALL
                                                   >> 505 
                                                   >> 506         /* This routine handles floating point disabled traps. */
                                                   >> 507         .align  4
                                                   >> 508         .globl  fpd_trap_handler
                                                   >> 509 fpd_trap_handler:
                                                   >> 510         SAVE_ALL
                                                   >> 511 
                                                   >> 512         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 513         WRITE_PAUSE
                                                   >> 514 
                                                   >> 515         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 516         mov     %l1, %o1
                                                   >> 517         mov     %l2, %o2
                                                   >> 518         call    do_fpd_trap
                                                   >> 519          mov    %l0, %o3
                                                   >> 520 
                                                   >> 521         RESTORE_ALL
                                                   >> 522 
                                                   >> 523         /* This routine handles Floating Point Exceptions. */
                                                   >> 524         .align  4
                                                   >> 525         .globl  fpe_trap_handler
                                                   >> 526 fpe_trap_handler:
                                                   >> 527         set     fpsave_magic, %l5
                                                   >> 528         cmp     %l1, %l5
                                                   >> 529         be      1f
                                                   >> 530          sethi  %hi(fpsave), %l5
                                                   >> 531         or      %l5, %lo(fpsave), %l5
                                                   >> 532         cmp     %l1, %l5
                                                   >> 533         bne     2f
                                                   >> 534          sethi  %hi(fpsave_catch2), %l5
                                                   >> 535         or      %l5, %lo(fpsave_catch2), %l5
                                                   >> 536         wr      %l0, 0x0, %psr
                                                   >> 537         WRITE_PAUSE
                                                   >> 538         jmp     %l5
                                                   >> 539          rett   %l5 + 4
                                                   >> 540 1:      
                                                   >> 541         sethi   %hi(fpsave_catch), %l5
                                                   >> 542         or      %l5, %lo(fpsave_catch), %l5
                                                   >> 543         wr      %l0, 0x0, %psr
                                                   >> 544         WRITE_PAUSE
                                                   >> 545         jmp     %l5
                                                   >> 546          rett   %l5 + 4
                                                   >> 547 
                                                   >> 548 2:
                                                   >> 549         SAVE_ALL
                                                   >> 550 
                                                   >> 551         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 552         WRITE_PAUSE
                                                   >> 553 
                                                   >> 554         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 555         mov     %l1, %o1
                                                   >> 556         mov     %l2, %o2
                                                   >> 557         call    do_fpe_trap
                                                   >> 558          mov    %l0, %o3
                                                   >> 559 
                                                   >> 560         RESTORE_ALL
                                                   >> 561 
                                                   >> 562         /* This routine handles Tag Overflow Exceptions. */
                                                   >> 563         .align  4
                                                   >> 564         .globl  do_tag_overflow
                                                   >> 565 do_tag_overflow:
                                                   >> 566         SAVE_ALL
                                                   >> 567 
                                                   >> 568         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 569         WRITE_PAUSE
                                                   >> 570 
                                                   >> 571         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 572         mov     %l1, %o1
                                                   >> 573         mov     %l2, %o2
                                                   >> 574         call    handle_tag_overflow
                                                   >> 575          mov    %l0, %o3
                                                   >> 576 
                                                   >> 577         RESTORE_ALL
                                                   >> 578 
                                                   >> 579         /* This routine handles Watchpoint Exceptions. */
                                                   >> 580         .align  4
                                                   >> 581         .globl  do_watchpoint
                                                   >> 582 do_watchpoint:
                                                   >> 583         SAVE_ALL
                                                   >> 584 
                                                   >> 585         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 586         WRITE_PAUSE
                                                   >> 587 
                                                   >> 588         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 589         mov     %l1, %o1
                                                   >> 590         mov     %l2, %o2
                                                   >> 591         call    handle_watchpoint
                                                   >> 592          mov    %l0, %o3
                                                   >> 593 
                                                   >> 594         RESTORE_ALL
                                                   >> 595 
                                                   >> 596         /* This routine handles Register Access Exceptions. */
                                                   >> 597         .align  4
                                                   >> 598         .globl  do_reg_access
                                                   >> 599 do_reg_access:
                                                   >> 600         SAVE_ALL
                                                   >> 601 
                                                   >> 602         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 603         WRITE_PAUSE
                                                   >> 604 
                                                   >> 605         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 606         mov     %l1, %o1
                                                   >> 607         mov     %l2, %o2
                                                   >> 608         call    handle_reg_access
                                                   >> 609          mov    %l0, %o3
                                                   >> 610 
                                                   >> 611         RESTORE_ALL
                                                   >> 612 
                                                   >> 613         /* This routine handles Co-Processor Disabled Exceptions. */
                                                   >> 614         .align  4
                                                   >> 615         .globl  do_cp_disabled
                                                   >> 616 do_cp_disabled:
                                                   >> 617         SAVE_ALL
                                                   >> 618 
                                                   >> 619         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 620         WRITE_PAUSE
                                                   >> 621 
                                                   >> 622         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 623         mov     %l1, %o1
                                                   >> 624         mov     %l2, %o2
                                                   >> 625         call    handle_cp_disabled
                                                   >> 626          mov    %l0, %o3
                                                   >> 627 
                                                   >> 628         RESTORE_ALL
                                                   >> 629 
                                                   >> 630         /* This routine handles Co-Processor Exceptions. */
                                                   >> 631         .align  4
                                                   >> 632         .globl  do_cp_exception
                                                   >> 633 do_cp_exception:
                                                   >> 634         SAVE_ALL
                                                   >> 635 
                                                   >> 636         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 637         WRITE_PAUSE
                                                   >> 638 
                                                   >> 639         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 640         mov     %l1, %o1
                                                   >> 641         mov     %l2, %o2
                                                   >> 642         call    handle_cp_exception
                                                   >> 643          mov    %l0, %o3
                                                   >> 644 
                                                   >> 645         RESTORE_ALL
                                                   >> 646 
                                                   >> 647         /* This routine handles Hardware Divide By Zero Exceptions. */
                                                   >> 648         .align  4
                                                   >> 649         .globl  do_hw_divzero
                                                   >> 650 do_hw_divzero:
                                                   >> 651         SAVE_ALL
                                                   >> 652 
                                                   >> 653         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 654         WRITE_PAUSE
                                                   >> 655 
                                                   >> 656         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 657         mov     %l1, %o1
                                                   >> 658         mov     %l2, %o2
                                                   >> 659         call    handle_hw_divzero
                                                   >> 660          mov    %l0, %o3
                                                   >> 661 
                                                   >> 662         RESTORE_ALL
                                                   >> 663 
                                                   >> 664         .align  4
                                                   >> 665         .globl  do_flush_windows
                                                   >> 666 do_flush_windows:
                                                   >> 667         SAVE_ALL
                                                   >> 668 
                                                   >> 669         wr      %l0, PSR_ET, %psr
                                                   >> 670         WRITE_PAUSE
                                                   >> 671 
                                                   >> 672         andcc   %l0, PSR_PS, %g0
                                                   >> 673         bne     dfw_kernel
                                                   >> 674          nop
                                                   >> 675 
                                                   >> 676         call    flush_user_windows
                                                   >> 677          nop
                                                   >> 678 
                                                   >> 679         /* Advance over the trap instruction. */
                                                   >> 680         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 681         add     %l1, 0x4, %l2
                                                   >> 682         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 683         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 684 
                                                   >> 685         RESTORE_ALL
                                                   >> 686 
                                                   >> 687         .globl  flush_patch_one
                                                   >> 688 
                                                   >> 689         /* We get these for debugging routines using __builtin_return_address() */
                                                   >> 690 dfw_kernel:
                                                   >> 691 flush_patch_one:
                                                   >> 692         FLUSH_ALL_KERNEL_WINDOWS
                                                   >> 693 
                                                   >> 694         /* Advance over the trap instruction. */
                                                   >> 695         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 696         add     %l1, 0x4, %l2
                                                   >> 697         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 698         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
119                                                   699 
120         REG_S sp, TASK_TI_KERNEL_SP(tp)        !! 700         RESTORE_ALL
121                                                   701 
122 #ifdef CONFIG_VMAP_STACK                       !! 702         /* The getcc software trap.  The user wants the condition codes from
123         addi sp, sp, -(PT_SIZE_ON_STACK)       !! 703          * the %psr in register %g1.
124         srli sp, sp, THREAD_SHIFT              !! 704          */
125         andi sp, sp, 0x1                       << 
126         bnez sp, handle_kernel_stack_overflow  << 
127         REG_L sp, TASK_TI_KERNEL_SP(tp)        << 
128 #endif                                         << 
129                                                << 
130 .Lsave_context:                                << 
131         REG_S sp, TASK_TI_USER_SP(tp)          << 
132         REG_L sp, TASK_TI_KERNEL_SP(tp)        << 
133         addi sp, sp, -(PT_SIZE_ON_STACK)       << 
134         REG_S x1,  PT_RA(sp)                   << 
135         REG_S x3,  PT_GP(sp)                   << 
136         REG_S x5,  PT_T0(sp)                   << 
137         save_from_x6_to_x31                    << 
138                                                   705 
139         /*                                     !! 706         .align  4
140          * Disable user-mode memory access as  !! 707         .globl  getcc_trap_handler
141          * actual user copy routines.          !! 708 getcc_trap_handler:
142          *                                     !! 709         srl     %l0, 20, %g1    ! give user
143          * Disable the FPU/Vector to detect il !! 710         and     %g1, 0xf, %g1   ! only ICC bits in %psr
144          * or vector in kernel space.          !! 711         jmp     %l2             ! advance over trap instruction
                                                   >> 712         rett    %l2 + 0x4       ! like this...
                                                   >> 713 
                                                   >> 714         /* The setcc software trap.  The user has condition codes in %g1
                                                   >> 715          * that it would like placed in the %psr.  Be careful not to flip
                                                   >> 716          * any unintentional bits!
145          */                                       717          */
146         li t0, SR_SUM | SR_FS_VS               << 
147                                                   718 
148         REG_L s0, TASK_TI_USER_SP(tp)          !! 719         .align  4
149         csrrc s1, CSR_STATUS, t0               !! 720         .globl  setcc_trap_handler
150         csrr s2, CSR_EPC                       !! 721 setcc_trap_handler:
151         csrr s3, CSR_TVAL                      !! 722         sll     %g1, 0x14, %l4
152         csrr s4, CSR_CAUSE                     !! 723         set     PSR_ICC, %l5
153         csrr s5, CSR_SCRATCH                   !! 724         andn    %l0, %l5, %l0   ! clear ICC bits in %psr
154         REG_S s0, PT_SP(sp)                    !! 725         and     %l4, %l5, %l4   ! clear non-ICC bits in user value
155         REG_S s1, PT_STATUS(sp)                !! 726         or      %l4, %l0, %l4   ! or them in... mix mix mix
156         REG_S s2, PT_EPC(sp)                   !! 727 
157         REG_S s3, PT_BADADDR(sp)               !! 728         wr      %l4, 0x0, %psr  ! set new %psr
158         REG_S s4, PT_CAUSE(sp)                 !! 729         WRITE_PAUSE             ! TI scumbags...
159         REG_S s5, PT_TP(sp)                    !! 730 
                                                   >> 731         jmp     %l2             ! advance over trap instruction
                                                   >> 732         rett    %l2 + 0x4       ! like this...
                                                   >> 733 
                                                   >> 734 sun4m_nmi_error:
                                                   >> 735         /* NMI async memory error handling. */
                                                   >> 736         sethi   %hi(0x80000000), %l4
                                                   >> 737         sethi   %hi(sun4m_irq_global), %o5
                                                   >> 738         ld      [%o5 + %lo(sun4m_irq_global)], %l5
                                                   >> 739         st      %l4, [%l5 + 0x0c]       ! sun4m_irq_global->mask_set=0x80000000
                                                   >> 740         WRITE_PAUSE
                                                   >> 741         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
                                                   >> 742         WRITE_PAUSE
                                                   >> 743         or      %l0, PSR_PIL, %l4
                                                   >> 744         wr      %l4, 0x0, %psr
                                                   >> 745         WRITE_PAUSE
                                                   >> 746         wr      %l4, PSR_ET, %psr
                                                   >> 747         WRITE_PAUSE
                                                   >> 748         call    sun4m_nmi
                                                   >> 749          nop
                                                   >> 750         st      %l4, [%l5 + 0x08]       ! sun4m_irq_global->mask_clear=0x80000000
                                                   >> 751         WRITE_PAUSE
                                                   >> 752         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
                                                   >> 753         WRITE_PAUSE
                                                   >> 754         RESTORE_ALL
                                                   >> 755 
                                                   >> 756 #ifndef CONFIG_SMP
                                                   >> 757         .align  4
                                                   >> 758         .globl  linux_trap_ipi15_sun4m
                                                   >> 759 linux_trap_ipi15_sun4m:
                                                   >> 760         SAVE_ALL
                                                   >> 761 
                                                   >> 762         ba      sun4m_nmi_error
                                                   >> 763          nop
                                                   >> 764 #endif /* CONFIG_SMP */
                                                   >> 765 
                                                   >> 766         .align  4
                                                   >> 767         .globl  srmmu_fault
                                                   >> 768 srmmu_fault:
                                                   >> 769         mov     0x400, %l5
                                                   >> 770         mov     0x300, %l4
                                                   >> 771 
                                                   >> 772 LEON_PI(lda     [%l5] ASI_LEON_MMUREGS, %l6)    ! read sfar first
                                                   >> 773 SUN_PI_(lda     [%l5] ASI_M_MMUREGS, %l6)       ! read sfar first
                                                   >> 774 
                                                   >> 775 LEON_PI(lda     [%l4] ASI_LEON_MMUREGS, %l5)    ! read sfsr last
                                                   >> 776 SUN_PI_(lda     [%l4] ASI_M_MMUREGS, %l5)       ! read sfsr last
                                                   >> 777 
                                                   >> 778         andn    %l6, 0xfff, %l6
                                                   >> 779         srl     %l5, 6, %l5                     ! and encode all info into l7
                                                   >> 780 
                                                   >> 781         and     %l5, 2, %l5
                                                   >> 782         or      %l5, %l6, %l6
                                                   >> 783 
                                                   >> 784         or      %l6, %l7, %l7                   ! l7 = [addr,write,txtfault]
                                                   >> 785 
                                                   >> 786         SAVE_ALL
                                                   >> 787 
                                                   >> 788         mov     %l7, %o1
                                                   >> 789         mov     %l7, %o2
                                                   >> 790         and     %o1, 1, %o1             ! arg2 = text_faultp
                                                   >> 791         mov     %l7, %o3
                                                   >> 792         and     %o2, 2, %o2             ! arg3 = writep
                                                   >> 793         andn    %o3, 0xfff, %o3         ! arg4 = faulting address
                                                   >> 794 
                                                   >> 795         wr      %l0, PSR_ET, %psr
                                                   >> 796         WRITE_PAUSE
                                                   >> 797 
                                                   >> 798         call    do_sparc_fault
                                                   >> 799          add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
                                                   >> 800 
                                                   >> 801         RESTORE_ALL
                                                   >> 802 
                                                   >> 803         .align  4
                                                   >> 804         .globl  sys_nis_syscall
                                                   >> 805 sys_nis_syscall:
                                                   >> 806         mov     %o7, %l5
                                                   >> 807         add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
                                                   >> 808         call    c_sys_nis_syscall
                                                   >> 809          mov    %l5, %o7
                                                   >> 810 
                                                   >> 811 sunos_execv:
                                                   >> 812         .globl  sunos_execv
                                                   >> 813         b       sys_execve
                                                   >> 814          clr    %i2
                                                   >> 815 
                                                   >> 816         .align  4
                                                   >> 817         .globl  sys_sparc_pipe
                                                   >> 818 sys_sparc_pipe:
                                                   >> 819         mov     %o7, %l5
                                                   >> 820         add     %sp, STACKFRAME_SZ, %o0         ! pt_regs *regs arg
                                                   >> 821         call    sparc_pipe
                                                   >> 822          mov    %l5, %o7
                                                   >> 823 
                                                   >> 824         .align  4
                                                   >> 825         .globl  sys_sigstack
                                                   >> 826 sys_sigstack:
                                                   >> 827         mov     %o7, %l5
                                                   >> 828         mov     %fp, %o2
                                                   >> 829         call    do_sys_sigstack
                                                   >> 830          mov    %l5, %o7
                                                   >> 831 
                                                   >> 832         .align  4
                                                   >> 833         .globl  sys_sigreturn
                                                   >> 834 sys_sigreturn:
                                                   >> 835         call    do_sigreturn
                                                   >> 836          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 837 
                                                   >> 838         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 839         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 840         be      1f
                                                   >> 841          nop
160                                                   842 
161         /*                                     !! 843         call    syscall_trace
162          * Set the scratch register to 0, so t !! 844          mov    1, %o1
163          * occurs, the exception vector knows  << 
164          */                                    << 
165         csrw CSR_SCRATCH, x0                   << 
166                                                   845 
167         /* Load the global pointer */          !! 846 1:
168         load_global_pointer                    !! 847         /* We don't want to muck with user registers like a
                                                   >> 848          * normal syscall, just return.
                                                   >> 849          */
                                                   >> 850         RESTORE_ALL
169                                                   851 
170         /* Load the kernel shadow call stack p !! 852         .align  4
171         scs_load_current_if_task_changed s5    !! 853         .globl  sys_rt_sigreturn
                                                   >> 854 sys_rt_sigreturn:
                                                   >> 855         call    do_rt_sigreturn
                                                   >> 856          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 857 
                                                   >> 858         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 859         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 860         be      1f
                                                   >> 861          nop
                                                   >> 862 
                                                   >> 863         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 864         call    syscall_trace
                                                   >> 865          mov    1, %o1
172                                                   866 
173 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE           !! 867 1:
174         move a0, sp                            !! 868         /* We are returning to a signal handler. */
175         call riscv_v_context_nesting_start     !! 869         RESTORE_ALL
176 #endif                                         << 
177         move a0, sp /* pt_regs */              << 
178                                                   870 
179         /*                                     !! 871         /* Now that we have a real sys_clone, sys_fork() is
180          * MSB of cause differentiates between !! 872          * implemented in terms of it.  Our _real_ implementation
181          * interrupts and exceptions           !! 873          * of SunOS vfork() will use sys_vfork().
                                                   >> 874          *
                                                   >> 875          * XXX These three should be consolidated into mostly shared
                                                   >> 876          * XXX code just like on sparc64... -DaveM
182          */                                       877          */
183         bge s4, zero, 1f                       !! 878         .align  4
184                                                !! 879         .globl  sys_fork, flush_patch_two
185         /* Handle interrupts */                !! 880 sys_fork:
186         call do_irq                            !! 881         mov     %o7, %l5
187         j ret_from_exception                   !! 882 flush_patch_two:
                                                   >> 883         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 884         ld      [%curptr + TI_TASK], %o4
                                                   >> 885         rd      %psr, %g4
                                                   >> 886         WRITE_PAUSE
                                                   >> 887         mov     SIGCHLD, %o0                    ! arg0: clone flags
                                                   >> 888         rd      %wim, %g5
                                                   >> 889         WRITE_PAUSE
                                                   >> 890         mov     %fp, %o1                        ! arg1: usp
                                                   >> 891         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 892         add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
                                                   >> 893         mov     0, %o3
                                                   >> 894         call    sparc_do_fork
                                                   >> 895          mov    %l5, %o7
                                                   >> 896 
                                                   >> 897         /* Whee, kernel threads! */
                                                   >> 898         .globl  sys_clone, flush_patch_three
                                                   >> 899 sys_clone:
                                                   >> 900         mov     %o7, %l5
                                                   >> 901 flush_patch_three:
                                                   >> 902         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 903         ld      [%curptr + TI_TASK], %o4
                                                   >> 904         rd      %psr, %g4
                                                   >> 905         WRITE_PAUSE
                                                   >> 906 
                                                   >> 907         /* arg0,1: flags,usp  -- loaded already */
                                                   >> 908         cmp     %o1, 0x0                        ! Is new_usp NULL?
                                                   >> 909         rd      %wim, %g5
                                                   >> 910         WRITE_PAUSE
                                                   >> 911         be,a    1f
                                                   >> 912          mov    %fp, %o1                        ! yes, use callers usp
                                                   >> 913         andn    %o1, 7, %o1                     ! no, align to 8 bytes
188 1:                                                914 1:
189         /* Handle other exceptions */          !! 915         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
190         slli t0, s4, RISCV_LGPTR               !! 916         add     %sp, STACKFRAME_SZ, %o2         ! arg2: pt_regs ptr
191         la t1, excp_vect_table                 !! 917         mov     0, %o3
192         la t2, excp_vect_table_end             !! 918         call    sparc_do_fork
193         add t0, t1, t0                         !! 919          mov    %l5, %o7
194         /* Check if exception code lies within !! 920 
195         bgeu t0, t2, 3f                        !! 921         /* Whee, real vfork! */
196         REG_L t1, 0(t0)                        !! 922         .globl  sys_vfork, flush_patch_four
197 2:      jalr t1                                !! 923 sys_vfork:
198         j ret_from_exception                   !! 924 flush_patch_four:
                                                   >> 925         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 926         ld      [%curptr + TI_TASK], %o4
                                                   >> 927         rd      %psr, %g4
                                                   >> 928         WRITE_PAUSE
                                                   >> 929         rd      %wim, %g5
                                                   >> 930         WRITE_PAUSE
                                                   >> 931         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 932         sethi   %hi(0x4000 | 0x0100 | SIGCHLD), %o0
                                                   >> 933         mov     %fp, %o1
                                                   >> 934         or      %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
                                                   >> 935         sethi   %hi(sparc_do_fork), %l1
                                                   >> 936         mov     0, %o3
                                                   >> 937         jmpl    %l1 + %lo(sparc_do_fork), %g0
                                                   >> 938          add    %sp, STACKFRAME_SZ, %o2
                                                   >> 939 
                                                   >> 940         .align  4
                                                   >> 941 linux_sparc_ni_syscall:
                                                   >> 942         sethi   %hi(sys_ni_syscall), %l7
                                                   >> 943         b       do_syscall
                                                   >> 944          or     %l7, %lo(sys_ni_syscall), %l7
                                                   >> 945 
                                                   >> 946 linux_syscall_trace:
                                                   >> 947         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 948         call    syscall_trace
                                                   >> 949          mov    0, %o1
                                                   >> 950         cmp     %o0, 0
                                                   >> 951         bne     3f
                                                   >> 952          mov    -ENOSYS, %o0
                                                   >> 953 
                                                   >> 954         /* Syscall tracing can modify the registers.  */
                                                   >> 955         ld      [%sp + STACKFRAME_SZ + PT_G1], %g1
                                                   >> 956         sethi   %hi(sys_call_table), %l7
                                                   >> 957         ld      [%sp + STACKFRAME_SZ + PT_I0], %i0
                                                   >> 958         or      %l7, %lo(sys_call_table), %l7
                                                   >> 959         ld      [%sp + STACKFRAME_SZ + PT_I1], %i1
                                                   >> 960         ld      [%sp + STACKFRAME_SZ + PT_I2], %i2
                                                   >> 961         ld      [%sp + STACKFRAME_SZ + PT_I3], %i3
                                                   >> 962         ld      [%sp + STACKFRAME_SZ + PT_I4], %i4
                                                   >> 963         ld      [%sp + STACKFRAME_SZ + PT_I5], %i5
                                                   >> 964         cmp     %g1, NR_syscalls
                                                   >> 965         bgeu    3f
                                                   >> 966          mov    -ENOSYS, %o0
                                                   >> 967 
                                                   >> 968         sll     %g1, 2, %l4
                                                   >> 969         mov     %i0, %o0
                                                   >> 970         ld      [%l7 + %l4], %l7
                                                   >> 971         mov     %i1, %o1
                                                   >> 972         mov     %i2, %o2
                                                   >> 973         mov     %i3, %o3
                                                   >> 974         b       2f
                                                   >> 975          mov    %i4, %o4
                                                   >> 976 
                                                   >> 977         .globl  ret_from_fork
                                                   >> 978 ret_from_fork:
                                                   >> 979         call    schedule_tail
                                                   >> 980          ld     [%g3 + TI_TASK], %o0
                                                   >> 981         b       ret_sys_call
                                                   >> 982          ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
                                                   >> 983 
                                                   >> 984         .globl  ret_from_kernel_thread
                                                   >> 985 ret_from_kernel_thread:
                                                   >> 986         call    schedule_tail
                                                   >> 987          ld     [%g3 + TI_TASK], %o0
                                                   >> 988         ld      [%sp + STACKFRAME_SZ + PT_G1], %l0
                                                   >> 989         call    %l0
                                                   >> 990          ld     [%sp + STACKFRAME_SZ + PT_G2], %o0
                                                   >> 991         rd      %psr, %l1
                                                   >> 992         ld      [%sp + STACKFRAME_SZ + PT_PSR], %l0
                                                   >> 993         andn    %l0, PSR_CWP, %l0
                                                   >> 994         nop
                                                   >> 995         and     %l1, PSR_CWP, %l1
                                                   >> 996         or      %l0, %l1, %l0
                                                   >> 997         st      %l0, [%sp + STACKFRAME_SZ + PT_PSR]
                                                   >> 998         b       ret_sys_call
                                                   >> 999          mov    0, %o0
                                                   >> 1000 
                                                   >> 1001         /* Linux native system calls enter here... */
                                                   >> 1002         .align  4
                                                   >> 1003         .globl  linux_sparc_syscall
                                                   >> 1004 linux_sparc_syscall:
                                                   >> 1005         sethi   %hi(PSR_SYSCALL), %l4
                                                   >> 1006         or      %l0, %l4, %l0
                                                   >> 1007         /* Direct access to user regs, must faster. */
                                                   >> 1008         cmp     %g1, NR_syscalls
                                                   >> 1009         bgeu    linux_sparc_ni_syscall
                                                   >> 1010          sll    %g1, 2, %l4
                                                   >> 1011         ld      [%l7 + %l4], %l7
                                                   >> 1012 
                                                   >> 1013 do_syscall:
                                                   >> 1014         SAVE_ALL_HEAD
                                                   >> 1015          rd     %wim, %l3
                                                   >> 1016 
                                                   >> 1017         wr      %l0, PSR_ET, %psr
                                                   >> 1018         mov     %i0, %o0
                                                   >> 1019         mov     %i1, %o1
                                                   >> 1020         mov     %i2, %o2
                                                   >> 1021 
                                                   >> 1022         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 1023         mov     %i3, %o3
                                                   >> 1024         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 1025         mov     %i4, %o4
                                                   >> 1026         bne     linux_syscall_trace
                                                   >> 1027          mov    %i0, %l5
                                                   >> 1028 2:
                                                   >> 1029         call    %l7
                                                   >> 1030          mov    %i5, %o5
                                                   >> 1031 
199 3:                                                1032 3:
                                                   >> 1033         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
200                                                   1034 
201         la t1, do_trap_unknown                 !! 1035 ret_sys_call:
202         j 2b                                   !! 1036         ld      [%curptr + TI_FLAGS], %l6
203 SYM_CODE_END(handle_exception)                 !! 1037         cmp     %o0, -ERESTART_RESTARTBLOCK
204 ASM_NOKPROBE(handle_exception)                 !! 1038         ld      [%sp + STACKFRAME_SZ + PT_PSR], %g3
205                                                !! 1039         set     PSR_C, %g2
206 /*                                             !! 1040         bgeu    1f
207  * The ret_from_exception must be called with  !! 1041          andcc  %l6, _TIF_SYSCALL_TRACE, %g0
208  * caller list:                                !! 1042 
209  *  - handle_exception                         !! 1043         /* System call success, clear Carry condition code. */
210  *  - ret_from_fork                            !! 1044         andn    %g3, %g2, %g3
211  */                                            !! 1045         clr     %l6
212 SYM_CODE_START_NOALIGN(ret_from_exception)     !! 1046         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]     
213         REG_L s0, PT_STATUS(sp)                !! 1047         bne     linux_syscall_trace2
214 #ifdef CONFIG_RISCV_M_MODE                     !! 1048          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
215         /* the MPP value is too large to be us !! 1049         add     %l1, 0x4, %l2                   /* npc = npc+4 */
216         li t0, SR_MPP                          !! 1050         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
217         and s0, s0, t0                         !! 1051         b       ret_trap_entry
218 #else                                          !! 1052          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
219         andi s0, s0, SR_SPP                    !! 1053 1:
220 #endif                                         !! 1054         /* System call failure, set Carry condition code.
221         bnez s0, 1f                            !! 1055          * Also, get abs(errno) to return to the process.
                                                   >> 1056          */
                                                   >> 1057         sub     %g0, %o0, %o0
                                                   >> 1058         or      %g3, %g2, %g3
                                                   >> 1059         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
                                                   >> 1060         mov     1, %l6
                                                   >> 1061         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
                                                   >> 1062         bne     linux_syscall_trace2
                                                   >> 1063          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
                                                   >> 1064         add     %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1065         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1066         b       ret_trap_entry
                                                   >> 1067          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 1068 
                                                   >> 1069 linux_syscall_trace2:
                                                   >> 1070         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 1071         mov     1, %o1
                                                   >> 1072         call    syscall_trace
                                                   >> 1073          add    %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1074         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1075         b       ret_trap_entry
                                                   >> 1076          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
222                                                   1077 
223 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK             << 
224         call    stackleak_erase_on_task_stack  << 
225 #endif                                         << 
226                                                   1078 
227         /* Save unwound kernel stack pointer i !! 1079 /* Saving and restoring the FPU state is best done from lowlevel code.
228         addi s0, sp, PT_SIZE_ON_STACK          !! 1080  *
229         REG_S s0, TASK_TI_KERNEL_SP(tp)        !! 1081  * void fpsave(unsigned long *fpregs, unsigned long *fsr,
                                                   >> 1082  *             void *fpqueue, unsigned long *fpqdepth)
                                                   >> 1083  */
230                                                   1084 
231         /* Save the kernel shadow call stack p !! 1085         .globl  fpsave
232         scs_save_current                       !! 1086 fpsave:
                                                   >> 1087         st      %fsr, [%o1]     ! this can trap on us if fpu is in bogon state
                                                   >> 1088         ld      [%o1], %g1
                                                   >> 1089         set     0x2000, %g4
                                                   >> 1090         andcc   %g1, %g4, %g0
                                                   >> 1091         be      2f
                                                   >> 1092          mov    0, %g2
233                                                   1093 
234         /*                                     !! 1094         /* We have an fpqueue to save. */
235          * Save TP into the scratch register , << 
236          * structures again.                   << 
237          */                                    << 
238         csrw CSR_SCRATCH, tp                   << 
239 1:                                                1095 1:
240 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE           !! 1096         std     %fq, [%o2]
241         move a0, sp                            !! 1097 fpsave_magic:
242         call riscv_v_context_nesting_end       !! 1098         st      %fsr, [%o1]
243 #endif                                         !! 1099         ld      [%o1], %g3
244         REG_L a0, PT_STATUS(sp)                !! 1100         andcc   %g3, %g4, %g0
245         /*                                     !! 1101         add     %g2, 1, %g2
246          * The current load reservation is eff !! 1102         bne     1b
247          * state, in the sense that load reser !! 1103          add    %o2, 8, %o2
248          * different hart contexts.  We can't  !! 1104 
249          * reservation, so instead here we cle !! 1105 2:
250          * it's always legal for implementatio !! 1106         st      %g2, [%o3]
251          * any point (as long as the forward p !! 1107 
252          * we'll ignore that here).            !! 1108         std     %f0, [%o0 + 0x00]
                                                   >> 1109         std     %f2, [%o0 + 0x08]
                                                   >> 1110         std     %f4, [%o0 + 0x10]
                                                   >> 1111         std     %f6, [%o0 + 0x18]
                                                   >> 1112         std     %f8, [%o0 + 0x20]
                                                   >> 1113         std     %f10, [%o0 + 0x28]
                                                   >> 1114         std     %f12, [%o0 + 0x30]
                                                   >> 1115         std     %f14, [%o0 + 0x38]
                                                   >> 1116         std     %f16, [%o0 + 0x40]
                                                   >> 1117         std     %f18, [%o0 + 0x48]
                                                   >> 1118         std     %f20, [%o0 + 0x50]
                                                   >> 1119         std     %f22, [%o0 + 0x58]
                                                   >> 1120         std     %f24, [%o0 + 0x60]
                                                   >> 1121         std     %f26, [%o0 + 0x68]
                                                   >> 1122         std     %f28, [%o0 + 0x70]
                                                   >> 1123         retl
                                                   >> 1124          std    %f30, [%o0 + 0x78]
                                                   >> 1125 
                                                   >> 1126         /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
                                                   >> 1127          * code for pointing out this possible deadlock, while we save state
                                                   >> 1128          * above we could trap on the fsr store so our low level fpu trap
                                                   >> 1129          * code has to know how to deal with this.
                                                   >> 1130          */
                                                   >> 1131 fpsave_catch:
                                                   >> 1132         b       fpsave_magic + 4
                                                   >> 1133          st     %fsr, [%o1]
                                                   >> 1134 
                                                   >> 1135 fpsave_catch2:
                                                   >> 1136         b       fpsave + 4
                                                   >> 1137          st     %fsr, [%o1]
                                                   >> 1138 
                                                   >> 1139         /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
                                                   >> 1140 
                                                   >> 1141         .globl  fpload
                                                   >> 1142 fpload:
                                                   >> 1143         ldd     [%o0 + 0x00], %f0
                                                   >> 1144         ldd     [%o0 + 0x08], %f2
                                                   >> 1145         ldd     [%o0 + 0x10], %f4
                                                   >> 1146         ldd     [%o0 + 0x18], %f6
                                                   >> 1147         ldd     [%o0 + 0x20], %f8
                                                   >> 1148         ldd     [%o0 + 0x28], %f10
                                                   >> 1149         ldd     [%o0 + 0x30], %f12
                                                   >> 1150         ldd     [%o0 + 0x38], %f14
                                                   >> 1151         ldd     [%o0 + 0x40], %f16
                                                   >> 1152         ldd     [%o0 + 0x48], %f18
                                                   >> 1153         ldd     [%o0 + 0x50], %f20
                                                   >> 1154         ldd     [%o0 + 0x58], %f22
                                                   >> 1155         ldd     [%o0 + 0x60], %f24
                                                   >> 1156         ldd     [%o0 + 0x68], %f26
                                                   >> 1157         ldd     [%o0 + 0x70], %f28
                                                   >> 1158         ldd     [%o0 + 0x78], %f30
                                                   >> 1159         ld      [%o1], %fsr
                                                   >> 1160         retl
                                                   >> 1161          nop
                                                   >> 1162 
                                                   >> 1163         /* __ndelay and __udelay take two arguments:
                                                   >> 1164          * 0 - nsecs or usecs to delay
                                                   >> 1165          * 1 - per_cpu udelay_val (loops per jiffy)
253          *                                        1166          *
254          * Dangling load reservations can be t !! 1167          * Note that ndelay gives HZ times higher resolution but has a 10ms
255          * middle of an LR/SC sequence, but ca !! 1168          * limit.  udelay can handle up to 1s.
256          * forward branch around an SC -- whic !! 1169          */
257          * result we need to clear reservation !! 1170         .globl  __ndelay
258          * jump back to the new context.  Whil !! 1171 __ndelay:
259          * completes, implementations are allo !! 1172         save    %sp, -STACKFRAME_SZ, %sp
260          * arbitrarily large.                  !! 1173         mov     %i0, %o0                ! round multiplier up so large ns ok
261          */                                    !! 1174         mov     0x1ae, %o1              ! 2**32 / (1 000 000 000 / HZ)
262         REG_L  a2, PT_EPC(sp)                  !! 1175         umul    %o0, %o1, %o0
263         REG_SC x0, a2, PT_EPC(sp)              !! 1176         rd      %y, %o1
264                                                !! 1177         mov     %i1, %o1                ! udelay_val
265         csrw CSR_STATUS, a0                    !! 1178         umul    %o0, %o1, %o0
266         csrw CSR_EPC, a2                       !! 1179         rd      %y, %o1
267                                                !! 1180         ba      delay_continue
268         REG_L x1,  PT_RA(sp)                   !! 1181          mov    %o1, %o0                ! >>32 later for better resolution
269         REG_L x3,  PT_GP(sp)                   !! 1182 
270         REG_L x4,  PT_TP(sp)                   !! 1183         .globl  __udelay
271         REG_L x5,  PT_T0(sp)                   !! 1184 __udelay:
272         restore_from_x6_to_x31                 !! 1185         save    %sp, -STACKFRAME_SZ, %sp
273                                                !! 1186         mov     %i0, %o0
274         REG_L x2,  PT_SP(sp)                   !! 1187         sethi   %hi(0x10c7), %o1        ! round multiplier up so large us ok
275                                                !! 1188         or      %o1, %lo(0x10c7), %o1   ! 2**32 / 1 000 000
276 #ifdef CONFIG_RISCV_M_MODE                     !! 1189         umul    %o0, %o1, %o0
277         mret                                   !! 1190         rd      %y, %o1
278 #else                                          !! 1191         mov     %i1, %o1                ! udelay_val
279         sret                                   !! 1192         umul    %o0, %o1, %o0
280 #endif                                         !! 1193         rd      %y, %o1
281 SYM_CODE_END(ret_from_exception)               !! 1194         sethi   %hi(0x028f4b62), %l0    ! Add in rounding constant * 2**32,
282 ASM_NOKPROBE(ret_from_exception)               !! 1195         or      %g0, %lo(0x028f4b62), %l0
                                                   >> 1196         addcc   %o0, %l0, %o0           ! 2**32 * 0.009 999
                                                   >> 1197         bcs,a   3f
                                                   >> 1198          add    %o1, 0x01, %o1
                                                   >> 1199 3:
                                                   >> 1200         mov     HZ, %o0                 ! >>32 earlier for wider range
                                                   >> 1201         umul    %o0, %o1, %o0
                                                   >> 1202         rd      %y, %o1
283                                                   1203 
284 #ifdef CONFIG_VMAP_STACK                       !! 1204 delay_continue:
285 SYM_CODE_START_LOCAL(handle_kernel_stack_overf !! 1205         cmp     %o0, 0x0
286         /* we reach here from kernel context,  !! 1206 1:
287         csrrw x31, CSR_SCRATCH, x31            !! 1207         bne     1b
288         asm_per_cpu sp, overflow_stack, x31    !! 1208          subcc  %o0, 1, %o0
289         li x31, OVERFLOW_STACK_SIZE            !! 1209         
290         add sp, sp, x31                        !! 1210         ret
291         /* zero out x31 again and restore x31  !! 1211         restore
292         xor x31, x31, x31                      !! 1212 EXPORT_SYMBOL(__udelay)
293         csrrw x31, CSR_SCRATCH, x31            !! 1213 EXPORT_SYMBOL(__ndelay)
294                                                !! 1214 
295         addi sp, sp, -(PT_SIZE_ON_STACK)       !! 1215         /* Handle a software breakpoint */
296                                                !! 1216         /* We have to inform parent that child has stopped */
297         //save context to overflow stack       !! 1217         .align 4
298         REG_S x1,  PT_RA(sp)                   !! 1218         .globl breakpoint_trap
299         REG_S x3,  PT_GP(sp)                   !! 1219 breakpoint_trap:
300         REG_S x5,  PT_T0(sp)                   !! 1220         rd      %wim,%l3
301         save_from_x6_to_x31                    !! 1221         SAVE_ALL
302                                                !! 1222         wr      %l0, PSR_ET, %psr
303         REG_L s0, TASK_TI_KERNEL_SP(tp)        !! 1223         WRITE_PAUSE
304         csrr s1, CSR_STATUS                    !! 1224 
305         csrr s2, CSR_EPC                       !! 1225         st      %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
306         csrr s3, CSR_TVAL                      !! 1226         call    sparc_breakpoint
307         csrr s4, CSR_CAUSE                     !! 1227          add    %sp, STACKFRAME_SZ, %o0
308         csrr s5, CSR_SCRATCH                   !! 1228 
309         REG_S s0, PT_SP(sp)                    !! 1229         RESTORE_ALL
310         REG_S s1, PT_STATUS(sp)                !! 1230 
311         REG_S s2, PT_EPC(sp)                   !! 1231 #ifdef CONFIG_KGDB
312         REG_S s3, PT_BADADDR(sp)               !! 1232         ENTRY(kgdb_trap_low)
313         REG_S s4, PT_CAUSE(sp)                 !! 1233         rd      %wim,%l3
314         REG_S s5, PT_TP(sp)                    !! 1234         SAVE_ALL
315         move a0, sp                            !! 1235         wr      %l0, PSR_ET, %psr
316         tail handle_bad_stack                  !! 1236         WRITE_PAUSE
317 SYM_CODE_END(handle_kernel_stack_overflow)     !! 1237 
318 ASM_NOKPROBE(handle_kernel_stack_overflow)     !! 1238         mov     %l7, %o0                ! trap_level
                                                   >> 1239         call    kgdb_trap
                                                   >> 1240          add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
                                                   >> 1241 
                                                   >> 1242         RESTORE_ALL
                                                   >> 1243         ENDPROC(kgdb_trap_low)
319 #endif                                            1244 #endif
320                                                   1245 
321 SYM_CODE_START(ret_from_fork)                  !! 1246         .align  4
322         call schedule_tail                     !! 1247         .globl  flush_patch_exception
323         beqz s0, 1f     /* not from kernel thr !! 1248 flush_patch_exception:
324         /* Call fn(arg) */                     !! 1249         FLUSH_ALL_KERNEL_WINDOWS;
325         move a0, s1                            !! 1250         ldd     [%o0], %o6
326         jalr s0                                !! 1251         jmpl    %o7 + 0xc, %g0                  ! see asm-sparc/processor.h
                                                   >> 1252          mov    1, %g1                          ! signal EFAULT condition
                                                   >> 1253 
                                                   >> 1254         .align  4
                                                   >> 1255         .globl  kill_user_windows, kuw_patch1_7win
                                                   >> 1256         .globl  kuw_patch1
                                                   >> 1257 kuw_patch1_7win:        sll     %o3, 6, %o3
                                                   >> 1258 
                                                   >> 1259         /* No matter how much overhead this routine has in the worst
                                                   >> 1260          * case scenario, it is several times better than taking the
                                                   >> 1261          * traps with the old method of just doing flush_user_windows().
                                                   >> 1262          */
                                                   >> 1263 kill_user_windows:
                                                   >> 1264         ld      [%g6 + TI_UWINMASK], %o0        ! get current umask
                                                   >> 1265         orcc    %g0, %o0, %g0                   ! if no bits set, we are done
                                                   >> 1266         be      3f                              ! nothing to do
                                                   >> 1267          rd     %psr, %o5                       ! must clear interrupts
                                                   >> 1268         or      %o5, PSR_PIL, %o4               ! or else that could change
                                                   >> 1269         wr      %o4, 0x0, %psr                  ! the uwinmask state
                                                   >> 1270         WRITE_PAUSE                             ! burn them cycles
327 1:                                                1271 1:
328         move a0, sp /* pt_regs */              !! 1272         ld      [%g6 + TI_UWINMASK], %o0        ! get consistent state
329         call syscall_exit_to_user_mode         !! 1273         orcc    %g0, %o0, %g0                   ! did an interrupt come in?
330         j ret_from_exception                   !! 1274         be      4f                              ! yep, we are done
331 SYM_CODE_END(ret_from_fork)                    !! 1275          rd     %wim, %o3                       ! get current wim
332                                                !! 1276         srl     %o3, 1, %o4                     ! simulate a save
333 #ifdef CONFIG_IRQ_STACKS                       !! 1277 kuw_patch1:
334 /*                                             !! 1278         sll     %o3, 7, %o3                     ! compute next wim
335  * void call_on_irq_stack(struct pt_regs *regs !! 1279         or      %o4, %o3, %o3                   ! result
336  *                        void (*func)(struct  !! 1280         andncc  %o0, %o3, %o0                   ! clean this bit in umask
337  *                                             !! 1281         bne     kuw_patch1                      ! not done yet
338  * Calls func(regs) using the per-CPU IRQ stac !! 1282          srl    %o3, 1, %o4                     ! begin another save simulation
339  */                                            !! 1283         wr      %o3, 0x0, %wim                  ! set the new wim
340 SYM_FUNC_START(call_on_irq_stack)              !! 1284         st      %g0, [%g6 + TI_UWINMASK]        ! clear uwinmask
341         /* Create a frame record to save ra an !! 1285 4:
342         addi    sp, sp, -STACKFRAME_SIZE_ON_ST !! 1286         wr      %o5, 0x0, %psr                  ! re-enable interrupts
343         REG_S   ra, STACKFRAME_RA(sp)          !! 1287         WRITE_PAUSE                             ! burn baby burn
344         REG_S   s0, STACKFRAME_FP(sp)          !! 1288 3:
345         addi    s0, sp, STACKFRAME_SIZE_ON_STA !! 1289         retl                                    ! return
346                                                !! 1290          st     %g0, [%g6 + TI_W_SAVED]         ! no windows saved
347         /* Switch to the per-CPU shadow call s << 
348         scs_save_current                       << 
349         scs_load_irq_stack t0                  << 
350                                                << 
351         /* Switch to the per-CPU IRQ stack and << 
352         load_per_cpu t0, irq_stack_ptr, t1     << 
353         li      t1, IRQ_STACK_SIZE             << 
354         add     sp, t0, t1                     << 
355         jalr    a1                             << 
356                                                << 
357         /* Switch back to the thread shadow ca << 
358         scs_load_current                       << 
359                                                << 
360         /* Switch back to the thread stack and << 
361         addi    sp, s0, -STACKFRAME_SIZE_ON_ST << 
362         REG_L   ra, STACKFRAME_RA(sp)          << 
363         REG_L   s0, STACKFRAME_FP(sp)          << 
364         addi    sp, sp, STACKFRAME_SIZE_ON_STA << 
365                                                   1291 
366         ret                                    !! 1292         .align  4
367 SYM_FUNC_END(call_on_irq_stack)                !! 1293         .globl  restore_current
368 #endif /* CONFIG_IRQ_STACKS */                 !! 1294 restore_current:
                                                   >> 1295         LOAD_CURRENT(g6, o0)
                                                   >> 1296         retl
                                                   >> 1297          nop
                                                   >> 1298 
                                                   >> 1299 #ifdef CONFIG_PCIC_PCI
                                                   >> 1300 #include <asm/pcic.h>
                                                   >> 1301 
                                                   >> 1302         .align  4
                                                   >> 1303         .globl  linux_trap_ipi15_pcic
                                                   >> 1304 linux_trap_ipi15_pcic:
                                                   >> 1305         rd      %wim, %l3
                                                   >> 1306         SAVE_ALL
369                                                   1307 
370 /*                                             !! 1308         /*
371  * Integer register context switch             !! 1309          * First deactivate NMI
372  * The callee-saved registers must be saved an !! 1310          * or we cannot drop ET, cannot get window spill traps.
373  *                                             !! 1311          * The busy loop is necessary because the PIO error
374  *   a0: previous task_struct (must be preserv !! 1312          * sometimes does not go away quickly and we trap again.
375  *   a1: next task_struct                      !! 1313          */
376  *                                             !! 1314         sethi   %hi(pcic_regs), %o1
377  * The value of a0 and a1 must be preserved by !! 1315         ld      [%o1 + %lo(pcic_regs)], %o2
378  * arguments are passed to schedule_tail.      !! 1316 
379  */                                            !! 1317         ! Get pending status for printouts later.
380 SYM_FUNC_START(__switch_to)                    !! 1318         ld      [%o2 + PCI_SYS_INT_PENDING], %o0
381         /* Save context into prev->thread */   !! 1319 
382         li    a4,  TASK_THREAD_RA              !! 1320         mov     PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
383         add   a3, a0, a4                       !! 1321         stb     %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
384         add   a4, a1, a4                       !! 1322 1:
385         REG_S ra,  TASK_THREAD_RA_RA(a3)       !! 1323         ld      [%o2 + PCI_SYS_INT_PENDING], %o1
386         REG_S sp,  TASK_THREAD_SP_RA(a3)       !! 1324         andcc   %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
387         REG_S s0,  TASK_THREAD_S0_RA(a3)       !! 1325         bne     1b
388         REG_S s1,  TASK_THREAD_S1_RA(a3)       !! 1326          nop
389         REG_S s2,  TASK_THREAD_S2_RA(a3)       !! 1327 
390         REG_S s3,  TASK_THREAD_S3_RA(a3)       !! 1328         or      %l0, PSR_PIL, %l4
391         REG_S s4,  TASK_THREAD_S4_RA(a3)       !! 1329         wr      %l4, 0x0, %psr
392         REG_S s5,  TASK_THREAD_S5_RA(a3)       !! 1330         WRITE_PAUSE
393         REG_S s6,  TASK_THREAD_S6_RA(a3)       !! 1331         wr      %l4, PSR_ET, %psr
394         REG_S s7,  TASK_THREAD_S7_RA(a3)       !! 1332         WRITE_PAUSE
395         REG_S s8,  TASK_THREAD_S8_RA(a3)       !! 1333 
396         REG_S s9,  TASK_THREAD_S9_RA(a3)       !! 1334         call    pcic_nmi
397         REG_S s10, TASK_THREAD_S10_RA(a3)      !! 1335          add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
398         REG_S s11, TASK_THREAD_S11_RA(a3)      !! 1336         RESTORE_ALL
399         /* Save the kernel shadow call stack p !! 1337 
400         scs_save_current                       !! 1338         .globl  pcic_nmi_trap_patch
401         /* Restore context from next->thread * !! 1339 pcic_nmi_trap_patch:
402         REG_L ra,  TASK_THREAD_RA_RA(a4)       !! 1340         sethi   %hi(linux_trap_ipi15_pcic), %l3
403         REG_L sp,  TASK_THREAD_SP_RA(a4)       !! 1341         jmpl    %l3 + %lo(linux_trap_ipi15_pcic), %g0
404         REG_L s0,  TASK_THREAD_S0_RA(a4)       !! 1342          rd     %psr, %l0
405         REG_L s1,  TASK_THREAD_S1_RA(a4)       !! 1343         .word   0
406         REG_L s2,  TASK_THREAD_S2_RA(a4)       !! 1344 
407         REG_L s3,  TASK_THREAD_S3_RA(a4)       !! 1345 #endif /* CONFIG_PCIC_PCI */
408         REG_L s4,  TASK_THREAD_S4_RA(a4)       !! 1346 
409         REG_L s5,  TASK_THREAD_S5_RA(a4)       !! 1347         .globl  flushw_all
410         REG_L s6,  TASK_THREAD_S6_RA(a4)       !! 1348 flushw_all:
411         REG_L s7,  TASK_THREAD_S7_RA(a4)       !! 1349         save    %sp, -0x40, %sp
412         REG_L s8,  TASK_THREAD_S8_RA(a4)       !! 1350         save    %sp, -0x40, %sp
413         REG_L s9,  TASK_THREAD_S9_RA(a4)       !! 1351         save    %sp, -0x40, %sp
414         REG_L s10, TASK_THREAD_S10_RA(a4)      !! 1352         save    %sp, -0x40, %sp
415         REG_L s11, TASK_THREAD_S11_RA(a4)      !! 1353         save    %sp, -0x40, %sp
416         /* The offset of thread_info in task_s !! 1354         save    %sp, -0x40, %sp
417         move tp, a1                            !! 1355         save    %sp, -0x40, %sp
418         /* Switch to the next shadow call stac !! 1356         restore
419         scs_load_current                       !! 1357         restore
                                                   >> 1358         restore
                                                   >> 1359         restore
                                                   >> 1360         restore
                                                   >> 1361         restore
420         ret                                       1362         ret
421 SYM_FUNC_END(__switch_to)                      !! 1363          restore
422                                                   1364 
423 #ifndef CONFIG_MMU                             !! 1365 #ifdef CONFIG_SMP
424 #define do_page_fault do_trap_unknown          !! 1366 ENTRY(hard_smp_processor_id)
                                                   >> 1367 661:    rd              %tbr, %g1
                                                   >> 1368         srl             %g1, 12, %o0
                                                   >> 1369         and             %o0, 3, %o0
                                                   >> 1370         .section        .cpuid_patch, "ax"
                                                   >> 1371         /* Instruction location. */
                                                   >> 1372         .word           661b
                                                   >> 1373         /* SUN4D implementation. */
                                                   >> 1374         lda             [%g0] ASI_M_VIKING_TMP1, %o0
                                                   >> 1375         nop
                                                   >> 1376         nop
                                                   >> 1377         /* LEON implementation. */
                                                   >> 1378         rd              %asr17, %o0
                                                   >> 1379         srl             %o0, 0x1c, %o0
                                                   >> 1380         nop
                                                   >> 1381         .previous
                                                   >> 1382         retl
                                                   >> 1383          nop
                                                   >> 1384 ENDPROC(hard_smp_processor_id)
425 #endif                                            1385 #endif
426                                                   1386 
427         .section ".rodata"                     !! 1387 /* End of entry.S */
428         .align LGREG                           << 
429         /* Exception vector table */           << 
430 SYM_DATA_START_LOCAL(excp_vect_table)          << 
431         RISCV_PTR do_trap_insn_misaligned      << 
432         ALT_INSN_FAULT(RISCV_PTR do_trap_insn_ << 
433         RISCV_PTR do_trap_insn_illegal         << 
434         RISCV_PTR do_trap_break                << 
435         RISCV_PTR do_trap_load_misaligned      << 
436         RISCV_PTR do_trap_load_fault           << 
437         RISCV_PTR do_trap_store_misaligned     << 
438         RISCV_PTR do_trap_store_fault          << 
439         RISCV_PTR do_trap_ecall_u /* system ca << 
440         RISCV_PTR do_trap_ecall_s              << 
441         RISCV_PTR do_trap_unknown              << 
442         RISCV_PTR do_trap_ecall_m              << 
443         /* instruciton page fault */           << 
444         ALT_PAGE_FAULT(RISCV_PTR do_page_fault << 
445         RISCV_PTR do_page_fault   /* load page << 
446         RISCV_PTR do_trap_unknown              << 
447         RISCV_PTR do_page_fault   /* store pag << 
448 SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCA << 
449                                                << 
450 #ifndef CONFIG_MMU                             << 
451 SYM_DATA_START(__user_rt_sigreturn)            << 
452         li a7, __NR_rt_sigreturn               << 
453         ecall                                  << 
454 SYM_DATA_END(__user_rt_sigreturn)              << 
455 #endif                                         << 
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php