~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/microblaze/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/microblaze/kernel/entry.S (Architecture i386) and /arch/sparc/kernel/entry.S (Architecture sparc)


  1 /*                                             !!   1 /* SPDX-License-Identifier: GPL-2.0 */
  2  * Low-level system-call handling, trap handle !!   2 /* arch/sparc/kernel/entry.S:  Sparc trap low-level entry points.
  3  *                                                  3  *
  4  * Copyright (C) 2008-2009 Michal Simek <monstr !!   4  * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net)
  5  * Copyright (C) 2008-2009 PetaLogix           !!   5  * Copyright (C) 1996 Eddie C. Dost   (ecd@skynet.be)
  6  * Copyright (C) 2003           John Williams < !!   6  * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
  7  * Copyright (C) 2001,2002      NEC Corporatio !!   7  * Copyright (C) 1996-1999 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
  8  * Copyright (C) 2001,2002      Miles Bader <mi !!   8  * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
  9  *                                             << 
 10  * This file is subject to the terms and condi << 
 11  * Public License. See the file COPYING in the << 
 12  * archive for more details.                   << 
 13  *                                             << 
 14  * Written by Miles Bader <miles@gnu.org>       << 
 15  * Heavily modified by John Williams for Micro << 
 16  */                                                 9  */
 17                                                    10 
 18 #include <linux/sys.h>                         !!  11 #include <linux/export.h>
 19 #include <linux/linkage.h>                         12 #include <linux/linkage.h>
                                                   >>  13 #include <linux/errno.h>
                                                   >>  14 #include <linux/pgtable.h>
 20                                                    15 
 21 #include <asm/entry.h>                         !!  16 #include <asm/head.h>
 22 #include <asm/current.h>                       !!  17 #include <asm/asi.h>
 23 #include <asm/processor.h>                     !!  18 #include <asm/smp.h>
 24 #include <asm/exceptions.h>                    !!  19 #include <asm/contregs.h>
                                                   >>  20 #include <asm/ptrace.h>
 25 #include <asm/asm-offsets.h>                       21 #include <asm/asm-offsets.h>
 26 #include <asm/thread_info.h>                   !!  22 #include <asm/psr.h>
 27                                                !!  23 #include <asm/vaddrs.h>
 28 #include <asm/page.h>                              24 #include <asm/page.h>
                                                   >>  25 #include <asm/winmacro.h>
                                                   >>  26 #include <asm/signal.h>
                                                   >>  27 #include <asm/obio.h>
                                                   >>  28 #include <asm/mxcc.h>
                                                   >>  29 #include <asm/thread_info.h>
                                                   >>  30 #include <asm/param.h>
 29 #include <asm/unistd.h>                            31 #include <asm/unistd.h>
 30 #include <asm/xilinx_mb_manager.h>             << 
 31                                                    32 
 32 #include <linux/errno.h>                       !!  33 #include <asm/asmmacro.h>
 33 #include <asm/signal.h>                        << 
 34 #include <asm/mmu.h>                           << 
 35                                                    34 
 36 #undef DEBUG                                   !!  35 #define curptr      g6
 37                                                    36 
 38 #ifdef DEBUG                                   !!  37 /* These are just handy. */
 39 /* Create space for syscalls counting. */      !!  38 #define _SV     save    %sp, -STACKFRAME_SZ, %sp
 40 .section .data                                 !!  39 #define _RS     restore 
 41 .global syscall_debug_table                    << 
 42 .align 4                                       << 
 43 syscall_debug_table:                           << 
 44         .space  (__NR_syscalls * 4)            << 
 45 #endif /* DEBUG */                             << 
 46                                                << 
 47 #define C_ENTRY(name)   .globl name; .align 4; << 
 48                                                << 
 49 /*                                             << 
 50  * Various ways of setting and clearing BIP in << 
 51  * This is mucky, but necessary using microbla << 
 52  * allows msr ops to write to BIP              << 
 53  */                                            << 
 54 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR    << 
 55         .macro  clear_bip                      << 
 56         msrclr  r0, MSR_BIP                    << 
 57         .endm                                  << 
 58                                                << 
 59         .macro  set_bip                        << 
 60         msrset  r0, MSR_BIP                    << 
 61         .endm                                  << 
 62                                                << 
 63         .macro  clear_eip                      << 
 64         msrclr  r0, MSR_EIP                    << 
 65         .endm                                  << 
 66                                                << 
 67         .macro  set_ee                         << 
 68         msrset  r0, MSR_EE                     << 
 69         .endm                                  << 
 70                                                << 
 71         .macro  disable_irq                    << 
 72         msrclr  r0, MSR_IE                     << 
 73         .endm                                  << 
 74                                                << 
 75         .macro  enable_irq                     << 
 76         msrset  r0, MSR_IE                     << 
 77         .endm                                  << 
 78                                                << 
 79         .macro  set_ums                        << 
 80         msrset  r0, MSR_UMS                    << 
 81         msrclr  r0, MSR_VMS                    << 
 82         .endm                                  << 
 83                                                << 
 84         .macro  set_vms                        << 
 85         msrclr  r0, MSR_UMS                    << 
 86         msrset  r0, MSR_VMS                    << 
 87         .endm                                  << 
 88                                                << 
 89         .macro  clear_ums                      << 
 90         msrclr  r0, MSR_UMS                    << 
 91         .endm                                  << 
 92                                                << 
 93         .macro  clear_vms_ums                  << 
 94         msrclr  r0, MSR_VMS | MSR_UMS          << 
 95         .endm                                  << 
 96 #else                                          << 
 97         .macro  clear_bip                      << 
 98         mfs     r11, rmsr                      << 
 99         andi    r11, r11, ~MSR_BIP             << 
100         mts     rmsr, r11                      << 
101         .endm                                  << 
102                                                << 
103         .macro  set_bip                        << 
104         mfs     r11, rmsr                      << 
105         ori     r11, r11, MSR_BIP              << 
106         mts     rmsr, r11                      << 
107         .endm                                  << 
108                                                << 
109         .macro  clear_eip                      << 
110         mfs     r11, rmsr                      << 
111         andi    r11, r11, ~MSR_EIP             << 
112         mts     rmsr, r11                      << 
113         .endm                                  << 
114                                                << 
115         .macro  set_ee                         << 
116         mfs     r11, rmsr                      << 
117         ori     r11, r11, MSR_EE               << 
118         mts     rmsr, r11                      << 
119         .endm                                  << 
120                                                << 
121         .macro  disable_irq                    << 
122         mfs     r11, rmsr                      << 
123         andi    r11, r11, ~MSR_IE              << 
124         mts     rmsr, r11                      << 
125         .endm                                  << 
126                                                << 
127         .macro  enable_irq                     << 
128         mfs     r11, rmsr                      << 
129         ori     r11, r11, MSR_IE               << 
130         mts     rmsr, r11                      << 
131         .endm                                  << 
132                                                << 
133         .macro set_ums                         << 
134         mfs     r11, rmsr                      << 
135         ori     r11, r11, MSR_VMS              << 
136         andni   r11, r11, MSR_UMS              << 
137         mts     rmsr, r11                      << 
138         .endm                                  << 
139                                                << 
140         .macro  set_vms                        << 
141         mfs     r11, rmsr                      << 
142         ori     r11, r11, MSR_VMS              << 
143         andni   r11, r11, MSR_UMS              << 
144         mts     rmsr, r11                      << 
145         .endm                                  << 
146                                                << 
147         .macro  clear_ums                      << 
148         mfs     r11, rmsr                      << 
149         andni   r11, r11, MSR_UMS              << 
150         mts     rmsr,r11                       << 
151         .endm                                  << 
152                                                << 
153         .macro  clear_vms_ums                  << 
154         mfs     r11, rmsr                      << 
155         andni   r11, r11, (MSR_VMS|MSR_UMS)    << 
156         mts     rmsr,r11                       << 
157         .endm                                  << 
158 #endif                                         << 
159                                                    40 
160 /* Define how to call high-level functions. Wi !!  41 #define FLUSH_ALL_KERNEL_WINDOWS \
161  * enabled when calling the high-level functio !!  42         _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
162  * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL      !!  43         _RS; _RS; _RS; _RS; _RS; _RS; _RS;
163  */                                            << 
164                                                    44 
165 /* turn on virtual protected mode save */      !!  45         .text
166 #define VM_ON           \                      << 
167         set_ums;        \                      << 
168         rted    r0, 2f; \                      << 
169         nop; \                                 << 
170 2:                                             << 
171                                                    46 
172 /* turn off virtual protected mode save and us !!  47 #ifdef CONFIG_KGDB
173 #define VM_OFF                  \              !!  48         .align  4
174         clear_vms_ums;          \              !!  49         .globl          arch_kgdb_breakpoint
175         rted    r0, TOPHYS(1f); \              !!  50         .type           arch_kgdb_breakpoint,#function
176         nop; \                                 !!  51 arch_kgdb_breakpoint:
177 1:                                             !!  52         ta              0x7d
                                                   >>  53         retl
                                                   >>  54          nop
                                                   >>  55         .size           arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
                                                   >>  56 #endif
178                                                    57 
179 #define SAVE_REGS \                            !!  58 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
180         swi     r2, r1, PT_R2;  /* Save SDA */ !!  59         .align  4
181         swi     r3, r1, PT_R3;                 !!  60         .globl  floppy_hardint
182         swi     r4, r1, PT_R4;                 !!  61 floppy_hardint:
183         swi     r5, r1, PT_R5;                 !!  62         /*
184         swi     r6, r1, PT_R6;                 !!  63          * This code cannot touch registers %l0 %l1 and %l2
185         swi     r7, r1, PT_R7;                 !!  64          * because SAVE_ALL depends on their values. It depends
186         swi     r8, r1, PT_R8;                 !!  65          * on %l3 also, but we regenerate it before a call.
187         swi     r9, r1, PT_R9;                 !!  66          * Other registers are:
188         swi     r10, r1, PT_R10;               !!  67          * %l3 -- base address of fdc registers
189         swi     r11, r1, PT_R11;        /* sav !!  68          * %l4 -- pdma_vaddr
190         swi     r12, r1, PT_R12;               !!  69          * %l5 -- scratch for ld/st address
191         swi     r13, r1, PT_R13;        /* Sav !!  70          * %l6 -- pdma_size
192         swi     r14, r1, PT_PC; /* PC, before  !!  71          * %l7 -- scratch [floppy byte, ld/st address, aux. data]
193         swi     r15, r1, PT_R15;        /* Sav !!  72          */
194         swi     r16, r1, PT_R16;               << 
195         swi     r17, r1, PT_R17;               << 
196         swi     r18, r1, PT_R18;        /* Sav << 
197         swi     r19, r1, PT_R19;               << 
198         swi     r20, r1, PT_R20;               << 
199         swi     r21, r1, PT_R21;               << 
200         swi     r22, r1, PT_R22;               << 
201         swi     r23, r1, PT_R23;               << 
202         swi     r24, r1, PT_R24;               << 
203         swi     r25, r1, PT_R25;               << 
204         swi     r26, r1, PT_R26;               << 
205         swi     r27, r1, PT_R27;               << 
206         swi     r28, r1, PT_R28;               << 
207         swi     r29, r1, PT_R29;               << 
208         swi     r30, r1, PT_R30;               << 
209         swi     r31, r1, PT_R31;        /* Sav << 
210         mfs     r11, rmsr;              /* sav << 
211         swi     r11, r1, PT_MSR;               << 
212                                                << 
213 #define RESTORE_REGS_GP \                      << 
214         lwi     r2, r1, PT_R2;  /* restore SDA << 
215         lwi     r3, r1, PT_R3;                 << 
216         lwi     r4, r1, PT_R4;                 << 
217         lwi     r5, r1, PT_R5;                 << 
218         lwi     r6, r1, PT_R6;                 << 
219         lwi     r7, r1, PT_R7;                 << 
220         lwi     r8, r1, PT_R8;                 << 
221         lwi     r9, r1, PT_R9;                 << 
222         lwi     r10, r1, PT_R10;               << 
223         lwi     r11, r1, PT_R11;        /* res << 
224         lwi     r12, r1, PT_R12;               << 
225         lwi     r13, r1, PT_R13;        /* res << 
226         lwi     r14, r1, PT_PC; /* RESTORE_LIN << 
227         lwi     r15, r1, PT_R15;        /* res << 
228         lwi     r16, r1, PT_R16;               << 
229         lwi     r17, r1, PT_R17;               << 
230         lwi     r18, r1, PT_R18;        /* res << 
231         lwi     r19, r1, PT_R19;               << 
232         lwi     r20, r1, PT_R20;               << 
233         lwi     r21, r1, PT_R21;               << 
234         lwi     r22, r1, PT_R22;               << 
235         lwi     r23, r1, PT_R23;               << 
236         lwi     r24, r1, PT_R24;               << 
237         lwi     r25, r1, PT_R25;               << 
238         lwi     r26, r1, PT_R26;               << 
239         lwi     r27, r1, PT_R27;               << 
240         lwi     r28, r1, PT_R28;               << 
241         lwi     r29, r1, PT_R29;               << 
242         lwi     r30, r1, PT_R30;               << 
243         lwi     r31, r1, PT_R31;        /* Res << 
244                                                << 
245 #define RESTORE_REGS \                         << 
246         lwi     r11, r1, PT_MSR;               << 
247         mts     rmsr , r11;                    << 
248         RESTORE_REGS_GP                        << 
249                                                << 
250 #define RESTORE_REGS_RTBD \                    << 
251         lwi     r11, r1, PT_MSR;               << 
252         andni   r11, r11, MSR_EIP;          /* << 
253         ori     r11, r11, MSR_EE | MSR_BIP; /* << 
254         mts     rmsr , r11;                    << 
255         RESTORE_REGS_GP                        << 
256                                                << 
257 #define SAVE_STATE      \                      << 
258         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S << 
259         /* See if already in kernel mode.*/    << 
260         mfs     r1, rmsr;                      << 
261         andi    r1, r1, MSR_UMS;               << 
262         bnei    r1, 1f;                        << 
263         /* Kernel-mode state save.  */         << 
264         /* Reload kernel stack-ptr. */         << 
265         lwi     r1, r0, TOPHYS(PER_CPU(ENTRY_S << 
266         /* FIXME: I can add these two lines to << 
267         /* tophys(r1,r1); */                   << 
268         /* addik        r1, r1, -PT_SIZE; */   << 
269         addik   r1, r1, CONFIG_KERNEL_BASE_ADD << 
270         SAVE_REGS                              << 
271         brid    2f;                            << 
272         swi     r1, r1, PT_MODE;               << 
273 1:      /* User-mode state save.  */           << 
274         lwi     r1, r0, TOPHYS(PER_CPU(CURRENT << 
275         tophys(r1,r1);                         << 
276         lwi     r1, r1, TS_THREAD_INFO; /* get << 
277         /* MS these three instructions can be  << 
278         /* addik        r1, r1, THREAD_SIZE; * << 
279         /* tophys(r1,r1); */                   << 
280         /* addik        r1, r1, -PT_SIZE; */   << 
281         addik r1, r1, THREAD_SIZE + CONFIG_KER << 
282         SAVE_REGS                              << 
283         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_ << 
284         swi     r11, r1, PT_R1; /* Store user  << 
285         swi     r0, r1, PT_MODE; /* Was in use << 
286         /* MS: I am clearing UMS even in case  << 
287         clear_ums;                             << 
288 2:      lwi     CURRENT_TASK, r0, TOPHYS(PER_C << 
289                                                << 
290 .text                                          << 
291                                                << 
292 .extern cpuinfo                                << 
293                                                << 
294 C_ENTRY(mb_flush_dcache):                      << 
295         addik   r1, r1, -PT_SIZE               << 
296         SAVE_REGS                              << 
297                                                << 
298         addik   r3, r0, cpuinfo                << 
299         lwi     r7, r3, CI_DCS                 << 
300         lwi     r8, r3, CI_DCL                 << 
301         sub     r9, r7, r8                     << 
302 1:                                             << 
303         wdc.flush r9, r0                       << 
304         bgtid   r9, 1b                         << 
305         addk    r9, r9, r8                     << 
306                                                << 
307         RESTORE_REGS                           << 
308         addik   r1, r1, PT_SIZE                << 
309         rtsd    r15, 8                         << 
310         nop                                    << 
311                                                    73 
312 C_ENTRY(mb_invalidate_icache):                 !!  74         /* Do we have work to do? */
313         addik   r1, r1, -PT_SIZE               !!  75         sethi   %hi(doing_pdma), %l7
314         SAVE_REGS                              !!  76         ld      [%l7 + %lo(doing_pdma)], %l7
315                                                !!  77         cmp     %l7, 0
316         addik   r3, r0, cpuinfo                !!  78         be      floppy_dosoftint
317         lwi     r7, r3, CI_ICS                 !!  79          nop
318         lwi     r8, r3, CI_ICL                 !!  80 
319         sub     r9, r7, r8                     !!  81         /* Load fdc register base */
320 1:                                             !!  82         sethi   %hi(fdc_status), %l3
321         wic     r9, r0                         !!  83         ld      [%l3 + %lo(fdc_status)], %l3
322         bgtid   r9, 1b                         !!  84 
323         addk    r9, r9, r8                     !!  85         /* Setup register addresses */
324                                                !!  86         sethi   %hi(pdma_vaddr), %l5    ! transfer buffer
325         RESTORE_REGS                           !!  87         ld      [%l5 + %lo(pdma_vaddr)], %l4
326         addik   r1, r1, PT_SIZE                !!  88         sethi   %hi(pdma_size), %l5     ! bytes to go
327         rtsd    r15, 8                         !!  89         ld      [%l5 + %lo(pdma_size)], %l6
328         nop                                    !!  90 next_byte:
                                                   >>  91         ldub    [%l3], %l7
                                                   >>  92 
                                                   >>  93         andcc   %l7, 0x80, %g0          ! Does fifo still have data
                                                   >>  94         bz      floppy_fifo_emptied     ! fifo has been emptied...
                                                   >>  95          andcc  %l7, 0x20, %g0          ! in non-dma mode still?
                                                   >>  96         bz      floppy_overrun          ! nope, overrun
                                                   >>  97          andcc  %l7, 0x40, %g0          ! 0=write 1=read
                                                   >>  98         bz      floppy_write
                                                   >>  99          sub    %l6, 0x1, %l6
                                                   >> 100 
                                                   >> 101         /* Ok, actually read this byte */
                                                   >> 102         ldub    [%l3 + 1], %l7
                                                   >> 103         orcc    %g0, %l6, %g0
                                                   >> 104         stb     %l7, [%l4]
                                                   >> 105         bne     next_byte
                                                   >> 106          add    %l4, 0x1, %l4
                                                   >> 107 
                                                   >> 108         b       floppy_tdone
                                                   >> 109          nop
                                                   >> 110 
                                                   >> 111 floppy_write:
                                                   >> 112         /* Ok, actually write this byte */
                                                   >> 113         ldub    [%l4], %l7
                                                   >> 114         orcc    %g0, %l6, %g0
                                                   >> 115         stb     %l7, [%l3 + 1]
                                                   >> 116         bne     next_byte
                                                   >> 117          add    %l4, 0x1, %l4
                                                   >> 118 
                                                   >> 119         /* fall through... */
                                                   >> 120 floppy_tdone:
                                                   >> 121         sethi   %hi(pdma_vaddr), %l5
                                                   >> 122         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 123         sethi   %hi(pdma_size), %l5
                                                   >> 124         st      %l6, [%l5 + %lo(pdma_size)]
                                                   >> 125         /* Flip terminal count pin */
                                                   >> 126         set     auxio_register, %l7
                                                   >> 127         ld      [%l7], %l7
                                                   >> 128 
                                                   >> 129         ldub    [%l7], %l5
                                                   >> 130 
                                                   >> 131         or      %l5, 0xc2, %l5
                                                   >> 132         stb     %l5, [%l7]
                                                   >> 133         andn    %l5, 0x02, %l5
329                                                   134 
330 /*                                             !! 135 2:
331  * User trap.                                  !! 136         /* Kill some time so the bits set */
332  *                                             !! 137         WRITE_PAUSE
333  * System calls are handled here.              !! 138         WRITE_PAUSE
334  *                                             !! 139 
335  * Syscall protocol:                           !! 140         stb     %l5, [%l7]
336  * Syscall number in r12, args in r5-r10       !! 141 
337  * Return value in r3                          !! 142         /* Prevent recursion */
338  *                                             !! 143         sethi   %hi(doing_pdma), %l7
339  * Trap entered via brki instruction, so BIP b !! 144         b       floppy_dosoftint
340  * are masked. This is nice, means we don't ha !! 145          st     %g0, [%l7 + %lo(doing_pdma)]
                                                   >> 146 
                                                   >> 147         /* We emptied the FIFO, but we haven't read everything
                                                   >> 148          * as of yet.  Store the current transfer address and
                                                   >> 149          * bytes left to read so we can continue when the next
                                                   >> 150          * fast IRQ comes in.
                                                   >> 151          */
                                                   >> 152 floppy_fifo_emptied:
                                                   >> 153         sethi   %hi(pdma_vaddr), %l5
                                                   >> 154         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 155         sethi   %hi(pdma_size), %l7
                                                   >> 156         st      %l6, [%l7 + %lo(pdma_size)]
                                                   >> 157 
                                                   >> 158         /* Restore condition codes */
                                                   >> 159         wr      %l0, 0x0, %psr
                                                   >> 160         WRITE_PAUSE
                                                   >> 161 
                                                   >> 162         jmp     %l1
                                                   >> 163         rett    %l2
                                                   >> 164 
                                                   >> 165 floppy_overrun:
                                                   >> 166         sethi   %hi(pdma_vaddr), %l5
                                                   >> 167         st      %l4, [%l5 + %lo(pdma_vaddr)]
                                                   >> 168         sethi   %hi(pdma_size), %l5
                                                   >> 169         st      %l6, [%l5 + %lo(pdma_size)]
                                                   >> 170         /* Prevent recursion */
                                                   >> 171         sethi   %hi(doing_pdma), %l7
                                                   >> 172         st      %g0, [%l7 + %lo(doing_pdma)]
                                                   >> 173 
                                                   >> 174         /* fall through... */
                                                   >> 175 floppy_dosoftint:
                                                   >> 176         rd      %wim, %l3
                                                   >> 177         SAVE_ALL
                                                   >> 178 
                                                   >> 179         /* Set all IRQs off. */
                                                   >> 180         or      %l0, PSR_PIL, %l4
                                                   >> 181         wr      %l4, 0x0, %psr
                                                   >> 182         WRITE_PAUSE
                                                   >> 183         wr      %l4, PSR_ET, %psr
                                                   >> 184         WRITE_PAUSE
                                                   >> 185 
                                                   >> 186         mov     11, %o0                 ! floppy irq level (unused anyway)
                                                   >> 187         mov     %g0, %o1                ! devid is not used in fast interrupts
                                                   >> 188         call    sparc_floppy_irq
                                                   >> 189          add    %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs
                                                   >> 190 
                                                   >> 191         RESTORE_ALL
                                                   >> 192         
                                                   >> 193 #endif /* (CONFIG_BLK_DEV_FD) */
                                                   >> 194 
                                                   >> 195         /* Bad trap handler */
                                                   >> 196         .globl  bad_trap_handler
                                                   >> 197 bad_trap_handler:
                                                   >> 198         SAVE_ALL
                                                   >> 199 
                                                   >> 200         wr      %l0, PSR_ET, %psr
                                                   >> 201         WRITE_PAUSE
                                                   >> 202 
                                                   >> 203         add     %sp, STACKFRAME_SZ, %o0 ! pt_regs
                                                   >> 204         call    do_hw_interrupt
                                                   >> 205          mov    %l7, %o1                ! trap number
                                                   >> 206 
                                                   >> 207         RESTORE_ALL
                                                   >> 208         
                                                   >> 209 /* For now all IRQ's not registered get sent here. handler_irq() will
                                                   >> 210  * see if a routine is registered to handle this interrupt and if not
                                                   >> 211  * it will say so on the console.
341  */                                               212  */
342 C_ENTRY(_user_exception):                      << 
343         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S << 
344         addi    r14, r14, 4     /* return addr << 
345                                                << 
346         lwi     r1, r0, TOPHYS(PER_CPU(CURRENT << 
347         tophys(r1,r1);                         << 
348         lwi     r1, r1, TS_THREAD_INFO; /* get << 
349 /* calculate kernel stack pointer from task st << 
350         addik   r1, r1, THREAD_SIZE;           << 
351         tophys(r1,r1);                         << 
352                                                << 
353         addik   r1, r1, -PT_SIZE; /* Make room << 
354         SAVE_REGS                              << 
355         swi     r0, r1, PT_R3                  << 
356         swi     r0, r1, PT_R4                  << 
357                                                << 
358         swi     r0, r1, PT_MODE;               << 
359         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_ << 
360         swi     r11, r1, PT_R1;         /* Sto << 
361         clear_ums;                             << 
362 2:      lwi     CURRENT_TASK, r0, TOPHYS(PER_C << 
363         /* Save away the syscall number.  */   << 
364         swi     r12, r1, PT_R0;                << 
365         tovirt(r1,r1)                          << 
366                                                << 
367 /* where the trap should return need -8 to adj << 
368 /* Jump to the appropriate function for the sy << 
369  * (r12 is not preserved), or return an error  << 
370  * register should point to the location where << 
371  * the called function should return.  [note t << 
372                                                   213 
373         /* Step into virtual mode */           !! 214         .align  4
374         rtbd    r0, 3f                         !! 215         .globl  real_irq_entry, patch_handler_irq
375         nop                                    !! 216 real_irq_entry:
376 3:                                             !! 217         SAVE_ALL
377         lwi     r11, CURRENT_TASK, TS_THREAD_I !! 218 
378         lwi     r11, r11, TI_FLAGS       /* ge !! 219 #ifdef CONFIG_SMP
379         andi    r11, r11, _TIF_WORK_SYSCALL_MA !! 220         .globl  patchme_maybe_smp_msg
380         beqi    r11, 4f                        !! 221 
381                                                !! 222         cmp     %l7, 11
382         addik   r3, r0, -ENOSYS                !! 223 patchme_maybe_smp_msg:
383         swi     r3, r1, PT_R3                  !! 224         bgu     maybe_smp4m_msg
384         brlid   r15, do_syscall_trace_enter    !! 225          nop
385         addik   r5, r1, PT_R0                  << 
386                                                << 
387         # do_syscall_trace_enter returns the n << 
388         addk    r12, r0, r3                    << 
389         lwi     r5, r1, PT_R5;                 << 
390         lwi     r6, r1, PT_R6;                 << 
391         lwi     r7, r1, PT_R7;                 << 
392         lwi     r8, r1, PT_R8;                 << 
393         lwi     r9, r1, PT_R9;                 << 
394         lwi     r10, r1, PT_R10;               << 
395 4:                                             << 
396 /* Jump to the appropriate function for the sy << 
397  * (r12 is not preserved), or return an error  << 
398  * The LP register should point to the locatio << 
399  * should return.  [note that MAKE_SYS_CALL us << 
400         /* See if the system call number is va << 
401         blti    r12, 5f                        << 
402         addi    r11, r12, -__NR_syscalls;      << 
403         bgei    r11, 5f;                       << 
404         /* Figure out which function to use fo << 
405         /* Note Microblaze barrel shift is opt << 
406         add     r12, r12, r12;                 << 
407         add     r12, r12, r12;                 << 
408         addi    r30, r0, 1                     << 
409                                                << 
410 #ifdef DEBUG                                   << 
411         /* Trac syscalls and stored them to sy << 
412         /* The first syscall location stores t << 
413         lwi     r3, r0, syscall_debug_table    << 
414         addi    r3, r3, 1                      << 
415         swi     r3, r0, syscall_debug_table    << 
416         lwi     r3, r12, syscall_debug_table   << 
417         addi    r3, r3, 1                      << 
418         swi     r3, r12, syscall_debug_table   << 
419 #endif                                            226 #endif
420                                                   227 
421         # Find and jump into the syscall handl !! 228 real_irq_continue:
422         lwi     r12, r12, sys_call_table       !! 229         or      %l0, PSR_PIL, %g2
423         /* where the trap should return need - !! 230         wr      %g2, 0x0, %psr
424         addi    r15, r0, ret_from_trap-8       !! 231         WRITE_PAUSE
425         bra     r12                            !! 232         wr      %g2, PSR_ET, %psr
426                                                !! 233         WRITE_PAUSE
427         /* The syscall number is invalid, retu !! 234         mov     %l7, %o0                ! irq level
428 5:                                             !! 235 patch_handler_irq:
429         braid   ret_from_trap                  !! 236         call    handler_irq
430         addi    r3, r0, -ENOSYS;               !! 237          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr
431                                                !! 238         or      %l0, PSR_PIL, %g2       ! restore PIL after handler_irq
432 /* Entry point used to return from a syscall/t !! 239         wr      %g2, PSR_ET, %psr       ! keep ET up
433 /* We re-enable BIP bit before state restore * !! 240         WRITE_PAUSE
434 C_ENTRY(ret_from_trap):                        !! 241 
435         swi     r3, r1, PT_R3                  !! 242         RESTORE_ALL
436         swi     r4, r1, PT_R4                  !! 243 
437                                                !! 244 #ifdef CONFIG_SMP
438         lwi     r11, r1, PT_MODE;              !! 245         /* SMP per-cpu ticker interrupts are handled specially. */
439 /* See if returning to kernel mode, if so, ski !! 246 smp4m_ticker:
440         bnei    r11, 2f;                       !! 247         bne     real_irq_continue+4
441         /* We're returning to user mode, so ch !! 248          or     %l0, PSR_PIL, %g2
442          * trigger rescheduling. */            !! 249         wr      %g2, 0x0, %psr
443         /* FIXME: Restructure all these flag c !! 250         WRITE_PAUSE
444         lwi     r11, CURRENT_TASK, TS_THREAD_I !! 251         wr      %g2, PSR_ET, %psr
445         lwi     r11, r11, TI_FLAGS;            !! 252         WRITE_PAUSE
446         andi    r11, r11, _TIF_WORK_SYSCALL_MA !! 253         call    smp4m_percpu_timer_interrupt
447         beqi    r11, 1f                        !! 254          add    %sp, STACKFRAME_SZ, %o0
448                                                !! 255         wr      %l0, PSR_ET, %psr
449         brlid   r15, do_syscall_trace_leave    !! 256         WRITE_PAUSE
450         addik   r5, r1, PT_R0                  !! 257         RESTORE_ALL
451 1:                                             !! 258 
452         /* We're returning to user mode, so ch !! 259 #define GET_PROCESSOR4M_ID(reg) \
453          * trigger rescheduling. */            !! 260         rd      %tbr, %reg;     \
454         /* get thread info from current task * !! 261         srl     %reg, 12, %reg; \
455         lwi     r11, CURRENT_TASK, TS_THREAD_I !! 262         and     %reg, 3, %reg;
456         lwi     r19, r11, TI_FLAGS;            !! 263 
457         andi    r11, r19, _TIF_NEED_RESCHED;   !! 264         /* Here is where we check for possible SMP IPI passed to us
458         beqi    r11, 5f;                       !! 265          * on some level other than 15 which is the NMI and only used
459                                                !! 266          * for cross calls.  That has a separate entry point below.
460         bralid  r15, schedule;  /* Call schedu !! 267          *
461         nop;                            /* del !! 268          * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*.
462         bri     1b                             !! 269          */
463                                                !! 270 maybe_smp4m_msg:
464         /* Maybe handle a signal */            !! 271         GET_PROCESSOR4M_ID(o3)
465 5:                                             !! 272         sethi   %hi(sun4m_irq_percpu), %l5
466         andi    r11, r19, _TIF_SIGPENDING | _T !! 273         sll     %o3, 2, %o3
467         beqi    r11, 4f;                /* Sig !! 274         or      %l5, %lo(sun4m_irq_percpu), %o5
468                                                !! 275         sethi   %hi(0x70000000), %o2    ! Check all soft-IRQs
469         addik   r5, r1, 0;              /* Arg !! 276         ld      [%o5 + %o3], %o1
470         bralid  r15, do_notify_resume;  /* Han !! 277         ld      [%o1 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
471         add     r6, r30, r0;            /* Arg !! 278         andcc   %o3, %o2, %g0
472         add     r30, r0, r0             /* no  !! 279         be,a    smp4m_ticker
473         bri     1b                             !! 280          cmp    %l7, 14
474                                                !! 281         /* Soft-IRQ IPI */
475 /* Finally, return to user state.  */          !! 282         st      %o2, [%o1 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x70000000
476 4:      set_bip;                        /*  In !! 283         WRITE_PAUSE
477         swi     CURRENT_TASK, r0, PER_CPU(CURR !! 284         ld      [%o1 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
478         VM_OFF;                                !! 285         WRITE_PAUSE
479         tophys(r1,r1);                         !! 286         or      %l0, PSR_PIL, %l4
480         RESTORE_REGS_RTBD;                     !! 287         wr      %l4, 0x0, %psr
481         addik   r1, r1, PT_SIZE         /* Cle !! 288         WRITE_PAUSE
482         lwi     r1, r1, PT_R1 - PT_SIZE;/* Res !! 289         wr      %l4, PSR_ET, %psr
483         bri     6f;                            !! 290         WRITE_PAUSE
484                                                !! 291         srl     %o3, 28, %o2            ! shift for simpler checks below
485 /* Return to kernel state.  */                 !! 292 maybe_smp4m_msg_check_single:
486 2:      set_bip;                        /*  In !! 293         andcc   %o2, 0x1, %g0
487         VM_OFF;                                !! 294         beq,a   maybe_smp4m_msg_check_mask
488         tophys(r1,r1);                         !! 295          andcc  %o2, 0x2, %g0
489         RESTORE_REGS_RTBD;                     !! 296         call    smp_call_function_single_interrupt
490         addik   r1, r1, PT_SIZE         /* Cle !! 297          nop
491         tovirt(r1,r1);                         !! 298         andcc   %o2, 0x2, %g0
492 6:                                             !! 299 maybe_smp4m_msg_check_mask:
493 TRAP_return:            /* Make global symbol  !! 300         beq,a   maybe_smp4m_msg_check_resched
494         rtbd    r14, 0; /* Instructions to ret !! 301          andcc  %o2, 0x4, %g0
495         nop;                                   !! 302         call    smp_call_function_interrupt
496                                                !! 303          nop
497                                                !! 304         andcc   %o2, 0x4, %g0
498 /* This the initial entry point for a new chil !! 305 maybe_smp4m_msg_check_resched:
499    stack in place that makes it look like the  !! 306         /* rescheduling is done in RESTORE_ALL regardless, but incr stats */
500    syscall.  This function is actually `return !! 307         beq,a   maybe_smp4m_msg_out
501    (copy_thread makes ret_from_fork the return !! 308          nop
502    saved context).  */                         !! 309         call    smp_resched_interrupt
503 C_ENTRY(ret_from_fork):                        !! 310          nop
504         bralid  r15, schedule_tail; /* ...whic !! 311 maybe_smp4m_msg_out:
505         add     r5, r3, r0;     /* switch_thre !! 312         RESTORE_ALL
506                                 /* ( in the de !! 313 
507         brid    ret_from_trap;  /* Do normal t !! 314         .align  4
508         add     r3, r0, r0;     /* Child's for !! 315         .globl  linux_trap_ipi15_sun4m
509                                                !! 316 linux_trap_ipi15_sun4m:
510 C_ENTRY(ret_from_kernel_thread):               !! 317         SAVE_ALL
511         bralid  r15, schedule_tail; /* ...whic !! 318         sethi   %hi(0x80000000), %o2
512         add     r5, r3, r0;     /* switch_thre !! 319         GET_PROCESSOR4M_ID(o0)
513                                 /* ( in the de !! 320         sethi   %hi(sun4m_irq_percpu), %l5
514         brald   r15, r20        /* fn was left !! 321         or      %l5, %lo(sun4m_irq_percpu), %o5
515         addk    r5, r0, r19     /* ... and arg !! 322         sll     %o0, 2, %o0
516         brid    ret_from_trap                  !! 323         ld      [%o5 + %o0], %o5
517         add     r3, r0, r0                     !! 324         ld      [%o5 + 0x00], %o3       ! sun4m_irq_percpu[cpu]->pending
518                                                !! 325         andcc   %o3, %o2, %g0
519 C_ENTRY(sys_rt_sigreturn_wrapper):             !! 326         be      sun4m_nmi_error         ! Must be an NMI async memory error
520         addik   r30, r0, 0              /* no  !! 327          st     %o2, [%o5 + 0x04]       ! sun4m_irq_percpu[cpu]->clear=0x80000000
521         brid    sys_rt_sigreturn        /* Do  !! 328         WRITE_PAUSE
522         addik   r5, r1, 0;              /* add !! 329         ld      [%o5 + 0x00], %g0       ! sun4m_irq_percpu[cpu]->pending
                                                   >> 330         WRITE_PAUSE
                                                   >> 331         or      %l0, PSR_PIL, %l4
                                                   >> 332         wr      %l4, 0x0, %psr
                                                   >> 333         WRITE_PAUSE
                                                   >> 334         wr      %l4, PSR_ET, %psr
                                                   >> 335         WRITE_PAUSE
                                                   >> 336         call    smp4m_cross_call_irq
                                                   >> 337          nop
                                                   >> 338         b       ret_trap_lockless_ipi
                                                   >> 339          clr    %l6
                                                   >> 340 
                                                   >> 341         .globl  smp4d_ticker
                                                   >> 342         /* SMP per-cpu ticker interrupts are handled specially. */
                                                   >> 343 smp4d_ticker:
                                                   >> 344         SAVE_ALL
                                                   >> 345         or      %l0, PSR_PIL, %g2
                                                   >> 346         sethi   %hi(CC_ICLR), %o0
                                                   >> 347         sethi   %hi(1 << 14), %o1
                                                   >> 348         or      %o0, %lo(CC_ICLR), %o0
                                                   >> 349         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 14 in MXCC's ICLR */
                                                   >> 350         wr      %g2, 0x0, %psr
                                                   >> 351         WRITE_PAUSE
                                                   >> 352         wr      %g2, PSR_ET, %psr
                                                   >> 353         WRITE_PAUSE
                                                   >> 354         call    smp4d_percpu_timer_interrupt
                                                   >> 355          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 356         wr      %l0, PSR_ET, %psr
                                                   >> 357         WRITE_PAUSE
                                                   >> 358         RESTORE_ALL
                                                   >> 359 
                                                   >> 360         .align  4
                                                   >> 361         .globl  linux_trap_ipi15_sun4d
                                                   >> 362 linux_trap_ipi15_sun4d:
                                                   >> 363         SAVE_ALL
                                                   >> 364         sethi   %hi(CC_BASE), %o4
                                                   >> 365         sethi   %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2
                                                   >> 366         or      %o4, (CC_EREG - CC_BASE), %o0
                                                   >> 367         ldda    [%o0] ASI_M_MXCC, %o0
                                                   >> 368         andcc   %o0, %o2, %g0
                                                   >> 369         bne     1f
                                                   >> 370          sethi  %hi(BB_STAT2), %o2
                                                   >> 371         lduba   [%o2] ASI_M_CTL, %o2
                                                   >> 372         andcc   %o2, BB_STAT2_MASK, %g0
                                                   >> 373         bne     2f
                                                   >> 374          or     %o4, (CC_ICLR - CC_BASE), %o0
                                                   >> 375         sethi   %hi(1 << 15), %o1
                                                   >> 376         stha    %o1, [%o0] ASI_M_MXCC   /* Clear PIL 15 in MXCC's ICLR */
                                                   >> 377         or      %l0, PSR_PIL, %l4
                                                   >> 378         wr      %l4, 0x0, %psr
                                                   >> 379         WRITE_PAUSE
                                                   >> 380         wr      %l4, PSR_ET, %psr
                                                   >> 381         WRITE_PAUSE
                                                   >> 382         call    smp4d_cross_call_irq
                                                   >> 383          nop
                                                   >> 384         b       ret_trap_lockless_ipi
                                                   >> 385          clr    %l6
                                                   >> 386 
                                                   >> 387 1:      /* MXCC error */
                                                   >> 388 2:      /* BB error */
                                                   >> 389         /* Disable PIL 15 */
                                                   >> 390         set     CC_IMSK, %l4
                                                   >> 391         lduha   [%l4] ASI_M_MXCC, %l5
                                                   >> 392         sethi   %hi(1 << 15), %l7
                                                   >> 393         or      %l5, %l7, %l5
                                                   >> 394         stha    %l5, [%l4] ASI_M_MXCC
                                                   >> 395         /* FIXME */
                                                   >> 396 1:      b,a     1b
                                                   >> 397 
                                                   >> 398         .globl  smpleon_ipi
                                                   >> 399         .extern leon_ipi_interrupt
                                                   >> 400         /* SMP per-cpu IPI interrupts are handled specially. */
                                                   >> 401 smpleon_ipi:
                                                   >> 402         SAVE_ALL
                                                   >> 403         or      %l0, PSR_PIL, %g2
                                                   >> 404         wr      %g2, 0x0, %psr
                                                   >> 405         WRITE_PAUSE
                                                   >> 406         wr      %g2, PSR_ET, %psr
                                                   >> 407         WRITE_PAUSE
                                                   >> 408         call    leonsmp_ipi_interrupt
                                                   >> 409          add    %sp, STACKFRAME_SZ, %o1 ! pt_regs
                                                   >> 410         wr      %l0, PSR_ET, %psr
                                                   >> 411         WRITE_PAUSE
                                                   >> 412         RESTORE_ALL
                                                   >> 413 
                                                   >> 414         .align  4
                                                   >> 415         .globl  linux_trap_ipi15_leon
                                                   >> 416 linux_trap_ipi15_leon:
                                                   >> 417         SAVE_ALL
                                                   >> 418         or      %l0, PSR_PIL, %l4
                                                   >> 419         wr      %l4, 0x0, %psr
                                                   >> 420         WRITE_PAUSE
                                                   >> 421         wr      %l4, PSR_ET, %psr
                                                   >> 422         WRITE_PAUSE
                                                   >> 423         call    leon_cross_call_irq
                                                   >> 424          nop
                                                   >> 425         b       ret_trap_lockless_ipi
                                                   >> 426          clr    %l6
523                                                   427 
524 /*                                             !! 428 #endif /* CONFIG_SMP */
525  * HW EXCEPTION rutine start                   << 
526  */                                            << 
527 C_ENTRY(full_exception_trap):                  << 
528         /* adjust exception address for privil << 
529          * for finding where is it */          << 
530         addik   r17, r17, -4                   << 
531         SAVE_STATE /* Save registers */        << 
532         /* PC, before IRQ/trap - this is one i << 
533         swi     r17, r1, PT_PC;                << 
534         tovirt(r1,r1)                          << 
535         /* FIXME this can be store directly in << 
536          * I tested it but there is a fault */ << 
537         /* where the trap should return need - << 
538         addik   r15, r0, ret_from_exc - 8      << 
539         mfs     r6, resr                       << 
540         mfs     r7, rfsr;               /* sav << 
541         mts     rfsr, r0;       /* Clear stick << 
542         rted    r0, full_exception             << 
543         addik   r5, r1, 0                /* pa << 
544                                                   429 
545 /*                                             !! 430         /* This routine handles illegal instructions and privileged
546  * Unaligned data trap.                        !! 431          * instruction attempts from user code.
547  *                                             !! 432          */
548  * Unaligned data trap last on 4k page is hand !! 433         .align  4
549  *                                             !! 434         .globl  bad_instruction
550  * Trap entered via exception, so EE bit is se !! 435 bad_instruction:
551  * are masked.  This is nice, means we don't h !! 436         sethi   %hi(0xc1f80000), %l4
552  *                                             !! 437         ld      [%l1], %l5
553  * The assembler routine is in "arch/microblaz !! 438         sethi   %hi(0x81d80000), %l7
554  */                                            !! 439         and     %l5, %l4, %l5
555 C_ENTRY(unaligned_data_trap):                  !! 440         cmp     %l5, %l7
556         /* MS: I have to save r11 value and th !! 441         be      1f
557          * set_bit, clear_eip, set_ee use r11  !! 442         SAVE_ALL
558          * instructions are not used. We don't !! 443 
559          * are used and they use r0 instead of !! 444         wr      %l0, PSR_ET, %psr               ! re-enable traps
560          * I am using ENTRY_SP which should be !! 445         WRITE_PAUSE
561          * pointer saving. */                  !! 446 
562         swi     r11, r0, TOPHYS(PER_CPU(ENTRY_ !! 447         add     %sp, STACKFRAME_SZ, %o0
563         set_bip;        /* equalize initial st !! 448         mov     %l1, %o1
564         clear_eip;                             !! 449         mov     %l2, %o2
565         set_ee;                                !! 450         call    do_illegal_instruction
566         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_ !! 451          mov    %l0, %o3
567         SAVE_STATE              /* Save regist !! 452 
568         /* PC, before IRQ/trap - this is one i !! 453         RESTORE_ALL
569         swi     r17, r1, PT_PC;                !! 454 
570         tovirt(r1,r1)                          !! 455 1:      /* unimplemented flush - just skip */
571         /* where the trap should return need - !! 456         jmpl    %l2, %g0
572         addik   r15, r0, ret_from_exc-8        !! 457          rett   %l2 + 4
573         mfs     r3, resr                /* ESR !! 458 
574         mfs     r4, rear                /* EAR !! 459         .align  4
575         rtbd    r0, _unaligned_data_exception  !! 460         .globl  priv_instruction
576         addik   r7, r1, 0               /* par !! 461 priv_instruction:
                                                   >> 462         SAVE_ALL
                                                   >> 463 
                                                   >> 464         wr      %l0, PSR_ET, %psr
                                                   >> 465         WRITE_PAUSE
                                                   >> 466 
                                                   >> 467         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 468         mov     %l1, %o1
                                                   >> 469         mov     %l2, %o2
                                                   >> 470         call    do_priv_instruction
                                                   >> 471          mov    %l0, %o3
                                                   >> 472 
                                                   >> 473         RESTORE_ALL
                                                   >> 474 
                                                   >> 475         /* This routine handles unaligned data accesses. */
                                                   >> 476         .align  4
                                                   >> 477         .globl  mna_handler
                                                   >> 478 mna_handler:
                                                   >> 479         andcc   %l0, PSR_PS, %g0
                                                   >> 480         be      mna_fromuser
                                                   >> 481          nop
                                                   >> 482 
                                                   >> 483         SAVE_ALL
                                                   >> 484 
                                                   >> 485         wr      %l0, PSR_ET, %psr
                                                   >> 486         WRITE_PAUSE
                                                   >> 487 
                                                   >> 488         ld      [%l1], %o1
                                                   >> 489         call    kernel_unaligned_trap
                                                   >> 490          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 491 
                                                   >> 492         RESTORE_ALL
                                                   >> 493 
                                                   >> 494 mna_fromuser:
                                                   >> 495         SAVE_ALL
                                                   >> 496 
                                                   >> 497         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 498         WRITE_PAUSE
                                                   >> 499 
                                                   >> 500         ld      [%l1], %o1
                                                   >> 501         call    user_unaligned_trap
                                                   >> 502          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 503 
                                                   >> 504         RESTORE_ALL
                                                   >> 505 
                                                   >> 506         /* This routine handles floating point disabled traps. */
                                                   >> 507         .align  4
                                                   >> 508         .globl  fpd_trap_handler
                                                   >> 509 fpd_trap_handler:
                                                   >> 510         SAVE_ALL
                                                   >> 511 
                                                   >> 512         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 513         WRITE_PAUSE
                                                   >> 514 
                                                   >> 515         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 516         mov     %l1, %o1
                                                   >> 517         mov     %l2, %o2
                                                   >> 518         call    do_fpd_trap
                                                   >> 519          mov    %l0, %o3
                                                   >> 520 
                                                   >> 521         RESTORE_ALL
                                                   >> 522 
                                                   >> 523         /* This routine handles Floating Point Exceptions. */
                                                   >> 524         .align  4
                                                   >> 525         .globl  fpe_trap_handler
                                                   >> 526 fpe_trap_handler:
                                                   >> 527         set     fpsave_magic, %l5
                                                   >> 528         cmp     %l1, %l5
                                                   >> 529         be      1f
                                                   >> 530          sethi  %hi(fpsave), %l5
                                                   >> 531         or      %l5, %lo(fpsave), %l5
                                                   >> 532         cmp     %l1, %l5
                                                   >> 533         bne     2f
                                                   >> 534          sethi  %hi(fpsave_catch2), %l5
                                                   >> 535         or      %l5, %lo(fpsave_catch2), %l5
                                                   >> 536         wr      %l0, 0x0, %psr
                                                   >> 537         WRITE_PAUSE
                                                   >> 538         jmp     %l5
                                                   >> 539          rett   %l5 + 4
                                                   >> 540 1:      
                                                   >> 541         sethi   %hi(fpsave_catch), %l5
                                                   >> 542         or      %l5, %lo(fpsave_catch), %l5
                                                   >> 543         wr      %l0, 0x0, %psr
                                                   >> 544         WRITE_PAUSE
                                                   >> 545         jmp     %l5
                                                   >> 546          rett   %l5 + 4
577                                                   547 
578 /*                                             !! 548 2:
579  * Page fault traps.                           !! 549         SAVE_ALL
580  *                                             << 
581  * If the real exception handler (from hw_exce << 
582  * the mapping for the process, then we're thr << 
583  *                                             << 
584  * Trap entered via exceptions, so EE bit is s << 
585  * are masked.  This is nice, means we don't h << 
586  *                                             << 
587  * Build a standard exception frame for TLB Ac << 
588  * will bail out to this point if they can't r << 
589  *                                             << 
590  * The C function called is in "arch/microblaz << 
591  * void do_page_fault(struct pt_regs *regs,    << 
592  *                              unsigned long  << 
593  *                              unsigned long  << 
594  */                                            << 
595 /* data and intruction trap - which is choose  << 
596 C_ENTRY(page_fault_data_trap):                 << 
597         SAVE_STATE              /* Save regist << 
598         /* PC, before IRQ/trap - this is one i << 
599         swi     r17, r1, PT_PC;                << 
600         tovirt(r1,r1)                          << 
601         /* where the trap should return need - << 
602         addik   r15, r0, ret_from_exc-8        << 
603         mfs     r6, rear                /* par << 
604         mfs     r7, resr                /* par << 
605         rted    r0, do_page_fault              << 
606         addik   r5, r1, 0               /* par << 
607                                                << 
608 C_ENTRY(page_fault_instr_trap):                << 
609         SAVE_STATE              /* Save regist << 
610         /* PC, before IRQ/trap - this is one i << 
611         swi     r17, r1, PT_PC;                << 
612         tovirt(r1,r1)                          << 
613         /* where the trap should return need - << 
614         addik   r15, r0, ret_from_exc-8        << 
615         mfs     r6, rear                /* par << 
616         ori     r7, r0, 0               /* par << 
617         rted    r0, do_page_fault              << 
618         addik   r5, r1, 0               /* par << 
619                                                << 
620 /* Entry point used to return from an exceptio << 
621 C_ENTRY(ret_from_exc):                         << 
622         lwi     r11, r1, PT_MODE;              << 
623         bnei    r11, 2f;                /* See << 
624                                         /* ... << 
625                                                   550 
626         /* We're returning to user mode, so ch !! 551         wr      %l0, PSR_ET, %psr               ! re-enable traps
627            trigger rescheduling. */            !! 552         WRITE_PAUSE
628 1:                                             << 
629         lwi     r11, CURRENT_TASK, TS_THREAD_I << 
630         lwi     r19, r11, TI_FLAGS;     /* get << 
631         andi    r11, r19, _TIF_NEED_RESCHED;   << 
632         beqi    r11, 5f;                       << 
633                                                << 
634 /* Call the scheduler before returning from a  << 
635         bralid  r15, schedule;  /* Call schedu << 
636         nop;                            /* del << 
637         bri     1b                             << 
638                                                << 
639         /* Maybe handle a signal */            << 
640 5:      andi    r11, r19, _TIF_SIGPENDING | _T << 
641         beqi    r11, 4f;                /* Sig << 
642                                                   553 
643         /*                                     !! 554         add     %sp, STACKFRAME_SZ, %o0
644          * Handle a signal return; Pending sig !! 555         mov     %l1, %o1
645          *                                     !! 556         mov     %l2, %o2
646          * Not all registers are saved by the  !! 557         call    do_fpe_trap
647          * points (for instance, call-saved re !! 558          mov    %l0, %o3
648          * C-compiler calling sequence in the  !! 559 
649          * preserved), and call-clobbered regi !! 560         RESTORE_ALL
650          * traps), but signal handlers may wan !! 561 
651          * complete register state.  Here we s !! 562         /* This routine handles Tag Overflow Exceptions. */
652          * the normal entry sequence, so that  !! 563         .align  4
653          * (in a possibly modified form) after !! 564         .globl  do_tag_overflow
654         addik   r5, r1, 0;              /* Arg !! 565 do_tag_overflow:
655         bralid  r15, do_notify_resume;  /* Han !! 566         SAVE_ALL
656         addi    r6, r0, 0;              /* Arg !! 567 
657         bri     1b                             !! 568         wr      %l0, PSR_ET, %psr               ! re-enable traps
658                                                !! 569         WRITE_PAUSE
659 /* Finally, return to user state.  */          !! 570 
660 4:      set_bip;                        /* Int !! 571         add     %sp, STACKFRAME_SZ, %o0
661         swi     CURRENT_TASK, r0, PER_CPU(CURR !! 572         mov     %l1, %o1
662         VM_OFF;                                !! 573         mov     %l2, %o2
663         tophys(r1,r1);                         !! 574         call    handle_tag_overflow
664                                                !! 575          mov    %l0, %o3
665         RESTORE_REGS_RTBD;                     !! 576 
666         addik   r1, r1, PT_SIZE         /* Cle !! 577         RESTORE_ALL
667                                                !! 578 
668         lwi     r1, r1, PT_R1 - PT_SIZE; /* Re !! 579         /* This routine handles Watchpoint Exceptions. */
669         bri     6f;                            !! 580         .align  4
670 /* Return to kernel state.  */                 !! 581         .globl  do_watchpoint
671 2:      set_bip;                        /* Int !! 582 do_watchpoint:
672         VM_OFF;                                !! 583         SAVE_ALL
673         tophys(r1,r1);                         !! 584 
674         RESTORE_REGS_RTBD;                     !! 585         wr      %l0, PSR_ET, %psr               ! re-enable traps
675         addik   r1, r1, PT_SIZE         /* Cle !! 586         WRITE_PAUSE
676                                                !! 587 
677         tovirt(r1,r1);                         !! 588         add     %sp, STACKFRAME_SZ, %o0
678 6:                                             !! 589         mov     %l1, %o1
679 EXC_return:             /* Make global symbol  !! 590         mov     %l2, %o2
680         rtbd    r14, 0; /* Instructions to ret !! 591         call    handle_watchpoint
681         nop;                                   !! 592          mov    %l0, %o3
                                                   >> 593 
                                                   >> 594         RESTORE_ALL
                                                   >> 595 
                                                   >> 596         /* This routine handles Register Access Exceptions. */
                                                   >> 597         .align  4
                                                   >> 598         .globl  do_reg_access
                                                   >> 599 do_reg_access:
                                                   >> 600         SAVE_ALL
                                                   >> 601 
                                                   >> 602         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 603         WRITE_PAUSE
                                                   >> 604 
                                                   >> 605         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 606         mov     %l1, %o1
                                                   >> 607         mov     %l2, %o2
                                                   >> 608         call    handle_reg_access
                                                   >> 609          mov    %l0, %o3
                                                   >> 610 
                                                   >> 611         RESTORE_ALL
                                                   >> 612 
                                                   >> 613         /* This routine handles Co-Processor Disabled Exceptions. */
                                                   >> 614         .align  4
                                                   >> 615         .globl  do_cp_disabled
                                                   >> 616 do_cp_disabled:
                                                   >> 617         SAVE_ALL
                                                   >> 618 
                                                   >> 619         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 620         WRITE_PAUSE
                                                   >> 621 
                                                   >> 622         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 623         mov     %l1, %o1
                                                   >> 624         mov     %l2, %o2
                                                   >> 625         call    handle_cp_disabled
                                                   >> 626          mov    %l0, %o3
                                                   >> 627 
                                                   >> 628         RESTORE_ALL
                                                   >> 629 
                                                   >> 630         /* This routine handles Co-Processor Exceptions. */
                                                   >> 631         .align  4
                                                   >> 632         .globl  do_cp_exception
                                                   >> 633 do_cp_exception:
                                                   >> 634         SAVE_ALL
                                                   >> 635 
                                                   >> 636         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 637         WRITE_PAUSE
                                                   >> 638 
                                                   >> 639         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 640         mov     %l1, %o1
                                                   >> 641         mov     %l2, %o2
                                                   >> 642         call    handle_cp_exception
                                                   >> 643          mov    %l0, %o3
                                                   >> 644 
                                                   >> 645         RESTORE_ALL
                                                   >> 646 
                                                   >> 647         /* This routine handles Hardware Divide By Zero Exceptions. */
                                                   >> 648         .align  4
                                                   >> 649         .globl  do_hw_divzero
                                                   >> 650 do_hw_divzero:
                                                   >> 651         SAVE_ALL
                                                   >> 652 
                                                   >> 653         wr      %l0, PSR_ET, %psr               ! re-enable traps
                                                   >> 654         WRITE_PAUSE
                                                   >> 655 
                                                   >> 656         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 657         mov     %l1, %o1
                                                   >> 658         mov     %l2, %o2
                                                   >> 659         call    handle_hw_divzero
                                                   >> 660          mov    %l0, %o3
                                                   >> 661 
                                                   >> 662         RESTORE_ALL
                                                   >> 663 
                                                   >> 664         .align  4
                                                   >> 665         .globl  do_flush_windows
                                                   >> 666 do_flush_windows:
                                                   >> 667         SAVE_ALL
                                                   >> 668 
                                                   >> 669         wr      %l0, PSR_ET, %psr
                                                   >> 670         WRITE_PAUSE
                                                   >> 671 
                                                   >> 672         andcc   %l0, PSR_PS, %g0
                                                   >> 673         bne     dfw_kernel
                                                   >> 674          nop
                                                   >> 675 
                                                   >> 676         call    flush_user_windows
                                                   >> 677          nop
                                                   >> 678 
                                                   >> 679         /* Advance over the trap instruction. */
                                                   >> 680         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 681         add     %l1, 0x4, %l2
                                                   >> 682         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 683         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 684 
                                                   >> 685         RESTORE_ALL
                                                   >> 686 
                                                   >> 687         .globl  flush_patch_one
                                                   >> 688 
                                                   >> 689         /* We get these for debugging routines using __builtin_return_address() */
                                                   >> 690 dfw_kernel:
                                                   >> 691 flush_patch_one:
                                                   >> 692         FLUSH_ALL_KERNEL_WINDOWS
                                                   >> 693 
                                                   >> 694         /* Advance over the trap instruction. */
                                                   >> 695         ld      [%sp + STACKFRAME_SZ + PT_NPC], %l1
                                                   >> 696         add     %l1, 0x4, %l2
                                                   >> 697         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 698         st      %l2, [%sp + STACKFRAME_SZ + PT_NPC]
682                                                   699 
683 /*                                             !! 700         RESTORE_ALL
684  * HW EXCEPTION rutine end                     << 
685  */                                            << 
686                                                   701 
687 /*                                             !! 702         /* The getcc software trap.  The user wants the condition codes from
688  * Hardware maskable interrupts.               !! 703          * the %psr in register %g1.
689  *                                             !! 704          */
690  * The stack-pointer (r1) should have already  << 
691  * location PER_CPU(ENTRY_SP).                 << 
692  */                                            << 
693 C_ENTRY(_interrupt):                           << 
694 /* MS: we are in physical address */           << 
695 /* Save registers, switch to proper stack, con << 
696         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S << 
697         /* MS: See if already in kernel mode.  << 
698         mfs     r1, rmsr                       << 
699         nop                                    << 
700         andi    r1, r1, MSR_UMS                << 
701         bnei    r1, 1f                         << 
702                                                   705 
703 /* Kernel-mode state save. */                  !! 706         .align  4
704         lwi     r1, r0, TOPHYS(PER_CPU(ENTRY_S !! 707         .globl  getcc_trap_handler
705         tophys(r1,r1); /* MS: I have in r1 phy !! 708 getcc_trap_handler:
706         /* save registers */                   !! 709         srl     %l0, 20, %g1    ! give user
707 /* MS: Make room on the stack -> activation re !! 710         and     %g1, 0xf, %g1   ! only ICC bits in %psr
708         addik   r1, r1, -PT_SIZE;              !! 711         jmp     %l2             ! advance over trap instruction
709         SAVE_REGS                              !! 712         rett    %l2 + 0x4       ! like this...
710         brid    2f;                            !! 713 
711         swi     r1, r1, PT_MODE; /* 0 - user m !! 714         /* The setcc software trap.  The user has condition codes in %g1
712 1:                                             !! 715          * that it would like placed in the %psr.  Be careful not to flip
713 /* User-mode state save. */                    !! 716          * any unintentional bits!
714  /* MS: get the saved current */               !! 717          */
715         lwi     r1, r0, TOPHYS(PER_CPU(CURRENT << 
716         tophys(r1,r1);                         << 
717         lwi     r1, r1, TS_THREAD_INFO;        << 
718         addik   r1, r1, THREAD_SIZE;           << 
719         tophys(r1,r1);                         << 
720         /* save registers */                   << 
721         addik   r1, r1, -PT_SIZE;              << 
722         SAVE_REGS                              << 
723         /* calculate mode */                   << 
724         swi     r0, r1, PT_MODE;               << 
725         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY_ << 
726         swi     r11, r1, PT_R1;                << 
727         clear_ums;                             << 
728 2:                                             << 
729         lwi     CURRENT_TASK, r0, TOPHYS(PER_C << 
730         tovirt(r1,r1)                          << 
731         addik   r15, r0, irq_call;             << 
732 irq_call:rtbd   r0, do_IRQ;                    << 
733         addik   r5, r1, 0;                     << 
734                                                << 
735 /* MS: we are in virtual mode */               << 
736 ret_from_irq:                                  << 
737         lwi     r11, r1, PT_MODE;              << 
738         bnei    r11, 2f;                       << 
739                                                   718 
740 1:                                             !! 719         .align  4
741         lwi     r11, CURRENT_TASK, TS_THREAD_I !! 720         .globl  setcc_trap_handler
742         lwi     r19, r11, TI_FLAGS; /* MS: get !! 721 setcc_trap_handler:
743         andi    r11, r19, _TIF_NEED_RESCHED;   !! 722         sll     %g1, 0x14, %l4
744         beqi    r11, 5f                        !! 723         set     PSR_ICC, %l5
745         bralid  r15, schedule;                 !! 724         andn    %l0, %l5, %l0   ! clear ICC bits in %psr
746         nop; /* delay slot */                  !! 725         and     %l4, %l5, %l4   ! clear non-ICC bits in user value
747         bri     1b                             !! 726         or      %l4, %l0, %l4   ! or them in... mix mix mix
748                                                !! 727 
749     /* Maybe handle a signal */                !! 728         wr      %l4, 0x0, %psr  ! set new %psr
750 5:      andi    r11, r19, _TIF_SIGPENDING | _T !! 729         WRITE_PAUSE             ! TI scumbags...
751         beqid   r11, no_intr_resched           !! 730 
752 /* Handle a signal return; Pending signals sho !! 731         jmp     %l2             ! advance over trap instruction
753         addik   r5, r1, 0; /* Arg 1: struct pt !! 732         rett    %l2 + 0x4       ! like this...
754         bralid  r15, do_notify_resume;  /* Han !! 733 
755         addi    r6, r0, 0; /* Arg 2: int in_sy !! 734 sun4m_nmi_error:
756         bri     1b                             !! 735         /* NMI async memory error handling. */
757                                                !! 736         sethi   %hi(0x80000000), %l4
758 /* Finally, return to user state. */           !! 737         sethi   %hi(sun4m_irq_global), %o5
759 no_intr_resched:                               !! 738         ld      [%o5 + %lo(sun4m_irq_global)], %l5
760     /* Disable interrupts, we are now committe !! 739         st      %l4, [%l5 + 0x0c]       ! sun4m_irq_global->mask_set=0x80000000
761         disable_irq                            !! 740         WRITE_PAUSE
762         swi     CURRENT_TASK, r0, PER_CPU(CURR !! 741         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
763         VM_OFF;                                !! 742         WRITE_PAUSE
764         tophys(r1,r1);                         !! 743         or      %l0, PSR_PIL, %l4
765         RESTORE_REGS                           !! 744         wr      %l4, 0x0, %psr
766         addik   r1, r1, PT_SIZE /* MS: Clean u !! 745         WRITE_PAUSE
767         lwi     r1, r1, PT_R1 - PT_SIZE;       !! 746         wr      %l4, PSR_ET, %psr
768         bri     6f;                            !! 747         WRITE_PAUSE
769 /* MS: Return to kernel state. */              !! 748         call    sun4m_nmi
770 2:                                             !! 749          nop
771 #ifdef CONFIG_PREEMPTION                       !! 750         st      %l4, [%l5 + 0x08]       ! sun4m_irq_global->mask_clear=0x80000000
772         lwi     r11, CURRENT_TASK, TS_THREAD_I !! 751         WRITE_PAUSE
773         /* MS: get preempt_count from thread i !! 752         ld      [%l5 + 0x00], %g0       ! sun4m_irq_global->pending
774         lwi     r5, r11, TI_PREEMPT_COUNT;     !! 753         WRITE_PAUSE
775         bgti    r5, restore;                   !! 754         RESTORE_ALL
776                                                !! 755 
777         lwi     r5, r11, TI_FLAGS;             !! 756 #ifndef CONFIG_SMP
778         andi    r5, r5, _TIF_NEED_RESCHED;     !! 757         .align  4
779         beqi    r5, restore /* if zero jump ov !! 758         .globl  linux_trap_ipi15_sun4m
                                                   >> 759 linux_trap_ipi15_sun4m:
                                                   >> 760         SAVE_ALL
                                                   >> 761 
                                                   >> 762         ba      sun4m_nmi_error
                                                   >> 763          nop
                                                   >> 764 #endif /* CONFIG_SMP */
                                                   >> 765 
                                                   >> 766         .align  4
                                                   >> 767         .globl  srmmu_fault
                                                   >> 768 srmmu_fault:
                                                   >> 769         mov     0x400, %l5
                                                   >> 770         mov     0x300, %l4
                                                   >> 771 
                                                   >> 772 LEON_PI(lda     [%l5] ASI_LEON_MMUREGS, %l6)    ! read sfar first
                                                   >> 773 SUN_PI_(lda     [%l5] ASI_M_MMUREGS, %l6)       ! read sfar first
                                                   >> 774 
                                                   >> 775 LEON_PI(lda     [%l4] ASI_LEON_MMUREGS, %l5)    ! read sfsr last
                                                   >> 776 SUN_PI_(lda     [%l4] ASI_M_MMUREGS, %l5)       ! read sfsr last
                                                   >> 777 
                                                   >> 778         andn    %l6, 0xfff, %l6
                                                   >> 779         srl     %l5, 6, %l5                     ! and encode all info into l7
                                                   >> 780 
                                                   >> 781         and     %l5, 2, %l5
                                                   >> 782         or      %l5, %l6, %l6
                                                   >> 783 
                                                   >> 784         or      %l6, %l7, %l7                   ! l7 = [addr,write,txtfault]
                                                   >> 785 
                                                   >> 786         SAVE_ALL
                                                   >> 787 
                                                   >> 788         mov     %l7, %o1
                                                   >> 789         mov     %l7, %o2
                                                   >> 790         and     %o1, 1, %o1             ! arg2 = text_faultp
                                                   >> 791         mov     %l7, %o3
                                                   >> 792         and     %o2, 2, %o2             ! arg3 = writep
                                                   >> 793         andn    %o3, 0xfff, %o3         ! arg4 = faulting address
                                                   >> 794 
                                                   >> 795         wr      %l0, PSR_ET, %psr
                                                   >> 796         WRITE_PAUSE
                                                   >> 797 
                                                   >> 798         call    do_sparc_fault
                                                   >> 799          add    %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr
                                                   >> 800 
                                                   >> 801         RESTORE_ALL
                                                   >> 802 
                                                   >> 803         .align  4
                                                   >> 804 sunos_execv:
                                                   >> 805         .globl  sunos_execv
                                                   >> 806         b       sys_execve
                                                   >> 807          clr    %i2
                                                   >> 808 
                                                   >> 809         .align  4
                                                   >> 810         .globl  sys_sigstack
                                                   >> 811 sys_sigstack:
                                                   >> 812         mov     %o7, %l5
                                                   >> 813         mov     %fp, %o2
                                                   >> 814         call    do_sys_sigstack
                                                   >> 815          mov    %l5, %o7
                                                   >> 816 
                                                   >> 817         .align  4
                                                   >> 818         .globl  sys_sigreturn
                                                   >> 819 sys_sigreturn:
                                                   >> 820         call    do_sigreturn
                                                   >> 821          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 822 
                                                   >> 823         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 824         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 825         be      1f
                                                   >> 826          nop
780                                                   827 
781         /* interrupts are off that's why I am  !! 828         call    syscall_trace
782         bralid  r15, preempt_schedule_irq      !! 829          mov    1, %o1
783         nop                                    << 
784 restore:                                       << 
785 #endif                                         << 
786         VM_OFF /* MS: turn off MMU */          << 
787         tophys(r1,r1)                          << 
788         RESTORE_REGS                           << 
789         addik   r1, r1, PT_SIZE /* MS: Clean u << 
790         tovirt(r1,r1);                         << 
791 6:                                             << 
792 IRQ_return: /* MS: Make global symbol for debu << 
793         rtid    r14, 0                         << 
794         nop                                    << 
795                                                   830 
796 #ifdef CONFIG_MB_MANAGER                       !! 831 1:
                                                   >> 832         /* We don't want to muck with user registers like a
                                                   >> 833          * normal syscall, just return.
                                                   >> 834          */
                                                   >> 835         RESTORE_ALL
797                                                   836 
798 #define PT_PID          PT_SIZE                !! 837         .align  4
799 #define PT_TLBI         PT_SIZE + 4            !! 838         .globl  sys_rt_sigreturn
800 #define PT_ZPR          PT_SIZE + 8            !! 839 sys_rt_sigreturn:
801 #define PT_TLBL0        PT_SIZE + 12           !! 840         call    do_rt_sigreturn
802 #define PT_TLBH0        PT_SIZE + 16           !! 841          add    %sp, STACKFRAME_SZ, %o0
803                                                !! 842 
804 C_ENTRY(_xtmr_manager_reset):                  !! 843         ld      [%curptr + TI_FLAGS], %l5
805         lwi     r1, r0, xmb_manager_stackpoint !! 844         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
806                                                !! 845         be      1f
807         /* Restore MSR */                      !! 846          nop
808         lwi     r2, r1, PT_MSR                 !! 847 
809         mts     rmsr, r2                       !! 848         add     %sp, STACKFRAME_SZ, %o0
810         bri     4                              !! 849         call    syscall_trace
811                                                !! 850          mov    1, %o1
812         /* restore Special purpose registers * << 
813         lwi     r2, r1, PT_PID                 << 
814         mts     rpid, r2                       << 
815                                                << 
816         lwi     r2, r1, PT_TLBI                << 
817         mts     rtlbx, r2                      << 
818                                                << 
819         lwi     r2, r1, PT_ZPR                 << 
820         mts     rzpr, r2                       << 
821                                                << 
822 #if CONFIG_XILINX_MICROBLAZE0_USE_FPU          << 
823         lwi     r2, r1, PT_FSR                 << 
824         mts     rfsr, r2                       << 
825 #endif                                         << 
826                                                   851 
827         /* restore all the tlb's */            !! 852 1:
828         addik   r3, r0, TOPHYS(tlb_skip)       !! 853         /* We are returning to a signal handler. */
829         addik   r6, r0, PT_TLBL0               !! 854         RESTORE_ALL
830         addik   r7, r0, PT_TLBH0               << 
831 restore_tlb:                                   << 
832         add     r6, r6, r1                     << 
833         add     r7, r7, r1                     << 
834         lwi     r2, r6, 0                      << 
835         mts     rtlblo, r2                     << 
836         lwi     r2, r7, 0                      << 
837         mts     rtlbhi, r2                     << 
838         addik   r6, r6, 4                      << 
839         addik   r7, r7, 4                      << 
840         bgtid   r3, restore_tlb                << 
841         addik   r3, r3, -1                     << 
842                                                << 
843         lwi     r5, r0, TOPHYS(xmb_manager_dev << 
844         lwi     r8, r0, TOPHYS(xmb_manager_res << 
845         set_vms                                << 
846         /* return from reset need -8 to adjust << 
847         addik   r15, r0, ret_from_reset - 8    << 
848         rtbd    r8, 0                          << 
849         nop                                    << 
850                                                   855 
851 ret_from_reset:                                !! 856         /* Now that we have a real sys_clone, sys_fork() is
852         set_bip /* Ints masked for state resto !! 857          * implemented in terms of it.  Our _real_ implementation
853         VM_OFF                                 !! 858          * of SunOS vfork() will use sys_vfork().
854         /* MS: Restore all regs */             !! 859          *
855         RESTORE_REGS                           !! 860          * XXX These three should be consolidated into mostly shared
856         lwi     r14, r1, PT_R14                !! 861          * XXX code just like on sparc64... -DaveM
857         lwi     r16, r1, PT_PC                 !! 862          */
858         addik   r1, r1, PT_SIZE + 36           !! 863         .align  4
859         rtbd    r16, 0                         !! 864         .globl  sys_fork, flush_patch_two
                                                   >> 865 sys_fork:
                                                   >> 866         mov     %o7, %l5
                                                   >> 867 flush_patch_two:
                                                   >> 868         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 869         ld      [%curptr + TI_TASK], %o4
                                                   >> 870         rd      %psr, %g4
                                                   >> 871         WRITE_PAUSE
                                                   >> 872         rd      %wim, %g5
                                                   >> 873         WRITE_PAUSE
                                                   >> 874         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 875         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 876         call    sparc_fork
                                                   >> 877          mov    %l5, %o7
                                                   >> 878 
                                                   >> 879         /* Whee, kernel threads! */
                                                   >> 880         .globl  sys_clone, flush_patch_three
                                                   >> 881 sys_clone:
                                                   >> 882         mov     %o7, %l5
                                                   >> 883 flush_patch_three:
                                                   >> 884         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 885         ld      [%curptr + TI_TASK], %o4
                                                   >> 886         rd      %psr, %g4
                                                   >> 887         WRITE_PAUSE
                                                   >> 888         rd      %wim, %g5
                                                   >> 889         WRITE_PAUSE
                                                   >> 890         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 891         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 892         call    sparc_clone
                                                   >> 893          mov    %l5, %o7
                                                   >> 894 
                                                   >> 895         /* Whee, real vfork! */
                                                   >> 896         .globl  sys_vfork, flush_patch_four
                                                   >> 897 sys_vfork:
                                                   >> 898 flush_patch_four:
                                                   >> 899         FLUSH_ALL_KERNEL_WINDOWS;
                                                   >> 900         ld      [%curptr + TI_TASK], %o4
                                                   >> 901         rd      %psr, %g4
                                                   >> 902         WRITE_PAUSE
                                                   >> 903         rd      %wim, %g5
                                                   >> 904         WRITE_PAUSE
                                                   >> 905         std     %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr]
                                                   >> 906         sethi   %hi(sparc_vfork), %l1
                                                   >> 907         jmpl    %l1 + %lo(sparc_vfork), %g0
                                                   >> 908          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 909 
                                                   >> 910         .align  4
                                                   >> 911 linux_sparc_ni_syscall:
                                                   >> 912         sethi   %hi(sys_ni_syscall), %l7
                                                   >> 913         b       do_syscall
                                                   >> 914          or     %l7, %lo(sys_ni_syscall), %l7
                                                   >> 915 
                                                   >> 916 linux_syscall_trace:
                                                   >> 917         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 918         call    syscall_trace
                                                   >> 919          mov    0, %o1
                                                   >> 920         cmp     %o0, 0
                                                   >> 921         bne     3f
                                                   >> 922          mov    -ENOSYS, %o0
                                                   >> 923 
                                                   >> 924         /* Syscall tracing can modify the registers.  */
                                                   >> 925         ld      [%sp + STACKFRAME_SZ + PT_G1], %g1
                                                   >> 926         sethi   %hi(sys_call_table), %l7
                                                   >> 927         ld      [%sp + STACKFRAME_SZ + PT_I0], %i0
                                                   >> 928         or      %l7, %lo(sys_call_table), %l7
                                                   >> 929         ld      [%sp + STACKFRAME_SZ + PT_I1], %i1
                                                   >> 930         ld      [%sp + STACKFRAME_SZ + PT_I2], %i2
                                                   >> 931         ld      [%sp + STACKFRAME_SZ + PT_I3], %i3
                                                   >> 932         ld      [%sp + STACKFRAME_SZ + PT_I4], %i4
                                                   >> 933         ld      [%sp + STACKFRAME_SZ + PT_I5], %i5
                                                   >> 934         cmp     %g1, NR_syscalls
                                                   >> 935         bgeu    3f
                                                   >> 936          mov    -ENOSYS, %o0
                                                   >> 937 
                                                   >> 938         sll     %g1, 2, %l4
                                                   >> 939         mov     %i0, %o0
                                                   >> 940         ld      [%l7 + %l4], %l7
                                                   >> 941         mov     %i1, %o1
                                                   >> 942         mov     %i2, %o2
                                                   >> 943         mov     %i3, %o3
                                                   >> 944         b       2f
                                                   >> 945          mov    %i4, %o4
                                                   >> 946 
                                                   >> 947         .globl  ret_from_fork
                                                   >> 948 ret_from_fork:
                                                   >> 949         call    schedule_tail
                                                   >> 950          ld     [%g3 + TI_TASK], %o0
                                                   >> 951         b       ret_sys_call
                                                   >> 952          ld     [%sp + STACKFRAME_SZ + PT_I0], %o0
                                                   >> 953 
                                                   >> 954         .globl  ret_from_kernel_thread
                                                   >> 955 ret_from_kernel_thread:
                                                   >> 956         call    schedule_tail
                                                   >> 957          ld     [%g3 + TI_TASK], %o0
                                                   >> 958         ld      [%sp + STACKFRAME_SZ + PT_G1], %l0
                                                   >> 959         call    %l0
                                                   >> 960          ld     [%sp + STACKFRAME_SZ + PT_G2], %o0
                                                   >> 961         rd      %psr, %l1
                                                   >> 962         ld      [%sp + STACKFRAME_SZ + PT_PSR], %l0
                                                   >> 963         andn    %l0, PSR_CWP, %l0
860         nop                                       964         nop
                                                   >> 965         and     %l1, PSR_CWP, %l1
                                                   >> 966         or      %l0, %l1, %l0
                                                   >> 967         st      %l0, [%sp + STACKFRAME_SZ + PT_PSR]
                                                   >> 968         b       ret_sys_call
                                                   >> 969          mov    0, %o0
                                                   >> 970 
                                                   >> 971         /* Linux native system calls enter here... */
                                                   >> 972         .align  4
                                                   >> 973         .globl  linux_sparc_syscall
                                                   >> 974 linux_sparc_syscall:
                                                   >> 975         sethi   %hi(PSR_SYSCALL), %l4
                                                   >> 976         or      %l0, %l4, %l0
                                                   >> 977         /* Direct access to user regs, must faster. */
                                                   >> 978         cmp     %g1, NR_syscalls
                                                   >> 979         bgeu    linux_sparc_ni_syscall
                                                   >> 980          sll    %g1, 2, %l4
                                                   >> 981         ld      [%l7 + %l4], %l7
                                                   >> 982 
                                                   >> 983 do_syscall:
                                                   >> 984         SAVE_ALL_HEAD
                                                   >> 985          rd     %wim, %l3
                                                   >> 986 
                                                   >> 987         wr      %l0, PSR_ET, %psr
                                                   >> 988         mov     %i0, %o0
                                                   >> 989         mov     %i1, %o1
                                                   >> 990         mov     %i2, %o2
                                                   >> 991 
                                                   >> 992         ld      [%curptr + TI_FLAGS], %l5
                                                   >> 993         mov     %i3, %o3
                                                   >> 994         andcc   %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 995         mov     %i4, %o4
                                                   >> 996         bne     linux_syscall_trace
                                                   >> 997          mov    %i0, %l6
                                                   >> 998 2:
                                                   >> 999         call    %l7
                                                   >> 1000          mov    %i5, %o5
861                                                   1001 
862 /*                                             !! 1002 3:
863  * Break handler for MB Manager. Enter to _xmb !! 1003         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
864  * injecting fault in one of the TMR Microblaz << 
865  * FIXME: This break handler supports getting  << 
866  * called from kernel space only.              << 
867  */                                            << 
868 C_ENTRY(_xmb_manager_break):                   << 
869         /*                                     << 
870          * Reserve memory in the stack for con << 
871          * (which includes memory for storing  << 
872          */                                    << 
873         addik   r1, r1, -PT_SIZE - 36          << 
874         swi     r1, r0, xmb_manager_stackpoint << 
875         SAVE_REGS                              << 
876         swi     r14, r1, PT_R14 /* rewrite sav << 
877         swi     r16, r1, PT_PC; /* PC and r16  << 
878                                                   1004 
879         lwi     r6, r0, TOPHYS(xmb_manager_bas !! 1005 ret_sys_call:
880         lwi     r7, r0, TOPHYS(xmb_manager_crv !! 1006         ld      [%curptr + TI_FLAGS], %l5
881         /*                                     !! 1007         cmp     %o0, -ERESTART_RESTARTBLOCK
882          * When the break vector gets asserted !! 1008         ld      [%sp + STACKFRAME_SZ + PT_PSR], %g3
883          * the break signal must be blocked be !! 1009         set     PSR_C, %g2
884          * break handler, below code configure !! 1010         bgeu    1f
885          * control register to block break sig !! 1011          andcc  %l5, _TIF_SYSCALL_TRACE, %g0
                                                   >> 1012 
                                                   >> 1013         /* System call success, clear Carry condition code. */
                                                   >> 1014         andn    %g3, %g2, %g3
                                                   >> 1015         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]     
                                                   >> 1016         bne     linux_syscall_trace2
                                                   >> 1017          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
                                                   >> 1018         add     %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1019         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1020         b       ret_trap_entry
                                                   >> 1021          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 1022 1:
                                                   >> 1023         /* System call failure, set Carry condition code.
                                                   >> 1024          * Also, get abs(errno) to return to the process.
886          */                                       1025          */
887         swi     r7, r6, 0                      !! 1026         sub     %g0, %o0, %o0
                                                   >> 1027         or      %g3, %g2, %g3
                                                   >> 1028         st      %o0, [%sp + STACKFRAME_SZ + PT_I0]
                                                   >> 1029         st      %g3, [%sp + STACKFRAME_SZ + PT_PSR]
                                                   >> 1030         bne     linux_syscall_trace2
                                                   >> 1031          ld     [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */
                                                   >> 1032         add     %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1033         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1034         b       ret_trap_entry
                                                   >> 1035          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
                                                   >> 1036 
                                                   >> 1037 linux_syscall_trace2:
                                                   >> 1038         add     %sp, STACKFRAME_SZ, %o0
                                                   >> 1039         mov     1, %o1
                                                   >> 1040         call    syscall_trace
                                                   >> 1041          add    %l1, 0x4, %l2                   /* npc = npc+4 */
                                                   >> 1042         st      %l1, [%sp + STACKFRAME_SZ + PT_PC]
                                                   >> 1043         b       ret_trap_entry
                                                   >> 1044          st     %l2, [%sp + STACKFRAME_SZ + PT_NPC]
888                                                   1045 
889         /* Save the special purpose registers  << 
890         mfs     r2, rpid                       << 
891         swi     r2, r1, PT_PID                 << 
892                                                   1046 
893         mfs     r2, rtlbx                      !! 1047 /* Saving and restoring the FPU state is best done from lowlevel code.
894         swi     r2, r1, PT_TLBI                !! 1048  *
895                                                !! 1049  * void fpsave(unsigned long *fpregs, unsigned long *fsr,
896         mfs     r2, rzpr                       !! 1050  *             void *fpqueue, unsigned long *fpqdepth)
897         swi     r2, r1, PT_ZPR                 !! 1051  */
898                                                   1052 
899 #if CONFIG_XILINX_MICROBLAZE0_USE_FPU          !! 1053         .globl  fpsave
900         mfs     r2, rfsr                       !! 1054 fpsave:
901         swi     r2, r1, PT_FSR                 !! 1055         st      %fsr, [%o1]     ! this can trap on us if fpu is in bogon state
902 #endif                                         !! 1056         ld      [%o1], %g1
903         mfs     r2, rmsr                       !! 1057         set     0x2000, %g4
904         swi     r2, r1, PT_MSR                 !! 1058         andcc   %g1, %g4, %g0
                                                   >> 1059         be      2f
                                                   >> 1060          mov    0, %g2
905                                                   1061 
906         /* Save all the tlb's */               !! 1062         /* We have an fpqueue to save. */
907         addik   r3, r0, TOPHYS(tlb_skip)       !! 1063 1:
908         addik   r6, r0, PT_TLBL0               !! 1064         std     %fq, [%o2]
909         addik   r7, r0, PT_TLBH0               !! 1065 fpsave_magic:
910 save_tlb:                                      !! 1066         st      %fsr, [%o1]
911         add     r6, r6, r1                     !! 1067         ld      [%o1], %g3
912         add     r7, r7, r1                     !! 1068         andcc   %g3, %g4, %g0
913         mfs     r2, rtlblo                     !! 1069         add     %g2, 1, %g2
914         swi     r2, r6, 0                      !! 1070         bne     1b
915         mfs     r2, rtlbhi                     !! 1071          add    %o2, 8, %o2
916         swi     r2, r7, 0                      << 
917         addik   r6, r6, 4                      << 
918         addik   r7, r7, 4                      << 
919         bgtid   r3, save_tlb                   << 
920         addik   r3, r3, -1                     << 
921                                                << 
922         lwi     r5, r0, TOPHYS(xmb_manager_dev << 
923         lwi     r8, r0, TOPHYS(xmb_manager_cal << 
924         /* return from break need -8 to adjust << 
925         addik   r15, r0, ret_from_break - 8    << 
926         rtbd    r8, 0                          << 
927         nop                                    << 
928                                                   1072 
929 ret_from_break:                                !! 1073 2:
930         /* flush the d-cache */                !! 1074         st      %g2, [%o3]
931         bralid  r15, mb_flush_dcache           << 
932         nop                                    << 
933                                                   1075 
934         /*                                     !! 1076         std     %f0, [%o0 + 0x00]
935          * To make sure microblaze i-cache is  !! 1077         std     %f2, [%o0 + 0x08]
936          * invalidate the i-cache.             !! 1078         std     %f4, [%o0 + 0x10]
                                                   >> 1079         std     %f6, [%o0 + 0x18]
                                                   >> 1080         std     %f8, [%o0 + 0x20]
                                                   >> 1081         std     %f10, [%o0 + 0x28]
                                                   >> 1082         std     %f12, [%o0 + 0x30]
                                                   >> 1083         std     %f14, [%o0 + 0x38]
                                                   >> 1084         std     %f16, [%o0 + 0x40]
                                                   >> 1085         std     %f18, [%o0 + 0x48]
                                                   >> 1086         std     %f20, [%o0 + 0x50]
                                                   >> 1087         std     %f22, [%o0 + 0x58]
                                                   >> 1088         std     %f24, [%o0 + 0x60]
                                                   >> 1089         std     %f26, [%o0 + 0x68]
                                                   >> 1090         std     %f28, [%o0 + 0x70]
                                                   >> 1091         retl
                                                   >> 1092          std    %f30, [%o0 + 0x78]
                                                   >> 1093 
                                                   >> 1094         /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd
                                                   >> 1095          * code for pointing out this possible deadlock, while we save state
                                                   >> 1096          * above we could trap on the fsr store so our low level fpu trap
                                                   >> 1097          * code has to know how to deal with this.
937          */                                       1098          */
938         bralid  r15, mb_invalidate_icache      !! 1099 fpsave_catch:
939         nop                                    !! 1100         b       fpsave_magic + 4
                                                   >> 1101          st     %fsr, [%o1]
                                                   >> 1102 
                                                   >> 1103 fpsave_catch2:
                                                   >> 1104         b       fpsave + 4
                                                   >> 1105          st     %fsr, [%o1]
                                                   >> 1106 
                                                   >> 1107         /* void fpload(unsigned long *fpregs, unsigned long *fsr); */
                                                   >> 1108 
                                                   >> 1109         .globl  fpload
                                                   >> 1110 fpload:
                                                   >> 1111         ldd     [%o0 + 0x00], %f0
                                                   >> 1112         ldd     [%o0 + 0x08], %f2
                                                   >> 1113         ldd     [%o0 + 0x10], %f4
                                                   >> 1114         ldd     [%o0 + 0x18], %f6
                                                   >> 1115         ldd     [%o0 + 0x20], %f8
                                                   >> 1116         ldd     [%o0 + 0x28], %f10
                                                   >> 1117         ldd     [%o0 + 0x30], %f12
                                                   >> 1118         ldd     [%o0 + 0x38], %f14
                                                   >> 1119         ldd     [%o0 + 0x40], %f16
                                                   >> 1120         ldd     [%o0 + 0x48], %f18
                                                   >> 1121         ldd     [%o0 + 0x50], %f20
                                                   >> 1122         ldd     [%o0 + 0x58], %f22
                                                   >> 1123         ldd     [%o0 + 0x60], %f24
                                                   >> 1124         ldd     [%o0 + 0x68], %f26
                                                   >> 1125         ldd     [%o0 + 0x70], %f28
                                                   >> 1126         ldd     [%o0 + 0x78], %f30
                                                   >> 1127         ld      [%o1], %fsr
                                                   >> 1128         retl
                                                   >> 1129          nop
                                                   >> 1130 
                                                   >> 1131         /* __ndelay and __udelay take two arguments:
                                                   >> 1132          * 0 - nsecs or usecs to delay
                                                   >> 1133          * 1 - per_cpu udelay_val (loops per jiffy)
                                                   >> 1134          *
                                                   >> 1135          * Note that ndelay gives HZ times higher resolution but has a 10ms
                                                   >> 1136          * limit.  udelay can handle up to 1s.
                                                   >> 1137          */
                                                   >> 1138         .globl  __ndelay
                                                   >> 1139 __ndelay:
                                                   >> 1140         save    %sp, -STACKFRAME_SZ, %sp
                                                   >> 1141         mov     %i0, %o0                ! round multiplier up so large ns ok
                                                   >> 1142         mov     0x1ae, %o1              ! 2**32 / (1 000 000 000 / HZ)
                                                   >> 1143         umul    %o0, %o1, %o0
                                                   >> 1144         rd      %y, %o1
                                                   >> 1145         mov     %i1, %o1                ! udelay_val
                                                   >> 1146         umul    %o0, %o1, %o0
                                                   >> 1147         rd      %y, %o1
                                                   >> 1148         ba      delay_continue
                                                   >> 1149          mov    %o1, %o0                ! >>32 later for better resolution
                                                   >> 1150 
                                                   >> 1151         .globl  __udelay
                                                   >> 1152 __udelay:
                                                   >> 1153         save    %sp, -STACKFRAME_SZ, %sp
                                                   >> 1154         mov     %i0, %o0
                                                   >> 1155         sethi   %hi(0x10c7), %o1        ! round multiplier up so large us ok
                                                   >> 1156         or      %o1, %lo(0x10c7), %o1   ! 2**32 / 1 000 000
                                                   >> 1157         umul    %o0, %o1, %o0
                                                   >> 1158         rd      %y, %o1
                                                   >> 1159         mov     %i1, %o1                ! udelay_val
                                                   >> 1160         umul    %o0, %o1, %o0
                                                   >> 1161         rd      %y, %o1
                                                   >> 1162         sethi   %hi(0x028f4b62), %l0    ! Add in rounding constant * 2**32,
                                                   >> 1163         or      %g0, %lo(0x028f4b62), %l0
                                                   >> 1164         addcc   %o0, %l0, %o0           ! 2**32 * 0.009 999
                                                   >> 1165         bcs,a   3f
                                                   >> 1166          add    %o1, 0x01, %o1
                                                   >> 1167 3:
                                                   >> 1168         mov     HZ, %o0                 ! >>32 earlier for wider range
                                                   >> 1169         umul    %o0, %o1, %o0
                                                   >> 1170         rd      %y, %o1
940                                                   1171 
941         set_bip; /* Ints masked for state rest !! 1172 delay_continue:
942         VM_OFF;                                !! 1173         cmp     %o0, 0x0
943         mbar    1                              !! 1174 1:
944         mbar    2                              !! 1175         bne     1b
945         bri     4                              !! 1176          subcc  %o0, 1, %o0
946         suspend                                !! 1177         
947         nop                                    !! 1178         ret
948 #endif                                         !! 1179         restore
                                                   >> 1180 EXPORT_SYMBOL(__udelay)
                                                   >> 1181 EXPORT_SYMBOL(__ndelay)
949                                                   1182 
950 /*                                             !! 1183         /* Handle a software breakpoint */
951  * Debug trap for KGDB. Enter to _debug_except !! 1184         /* We have to inform parent that child has stopped */
952  * and call handling function with saved pt_re !! 1185         .align 4
953  */                                            !! 1186         .globl breakpoint_trap
954 C_ENTRY(_debug_exception):                     !! 1187 breakpoint_trap:
955         /* BIP bit is set on entry, no interru !! 1188         rd      %wim,%l3
956         swi     r1, r0, TOPHYS(PER_CPU(ENTRY_S !! 1189         SAVE_ALL
                                                   >> 1190         wr      %l0, PSR_ET, %psr
                                                   >> 1191         WRITE_PAUSE
                                                   >> 1192 
                                                   >> 1193         st      %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls
                                                   >> 1194         call    sparc_breakpoint
                                                   >> 1195          add    %sp, STACKFRAME_SZ, %o0
                                                   >> 1196 
                                                   >> 1197         RESTORE_ALL
957                                                   1198 
958         mfs     r1, rmsr                       << 
959         nop                                    << 
960         andi    r1, r1, MSR_UMS                << 
961         bnei    r1, 1f                         << 
962 /* MS: Kernel-mode state save - kgdb */        << 
963         lwi     r1, r0, TOPHYS(PER_CPU(ENTRY_S << 
964                                                << 
965         /* BIP bit is set on entry, no interru << 
966         addik   r1, r1, CONFIG_KERNEL_BASE_ADD << 
967         SAVE_REGS;                             << 
968         /* save all regs to pt_reg structure * << 
969         swi     r0, r1, PT_R0;  /* R0 must be  << 
970         swi     r14, r1, PT_R14 /* rewrite sav << 
971         swi     r16, r1, PT_PC; /* PC and r16  << 
972         /* save special purpose registers to p << 
973         mfs     r11, rear;                     << 
974         swi     r11, r1, PT_EAR;               << 
975         mfs     r11, resr;                     << 
976         swi     r11, r1, PT_ESR;               << 
977         mfs     r11, rfsr;                     << 
978         swi     r11, r1, PT_FSR;               << 
979                                                << 
980         /* stack pointer is in physical addres << 
981          * by PT_SIZE but we need to get corre << 
982         addik   r11, r1, CONFIG_KERNEL_START - << 
983         swi     r11, r1, PT_R1                 << 
984         /* MS: r31 - current pointer isn't cha << 
985         tovirt(r1,r1)                          << 
986 #ifdef CONFIG_KGDB                                1199 #ifdef CONFIG_KGDB
987         addi    r5, r1, 0 /* pass pt_reg addre !! 1200         ENTRY(kgdb_trap_low)
988         addik   r15, r0, dbtrap_call; /* retur !! 1201         rd      %wim,%l3
989         rtbd    r0, microblaze_kgdb_break      !! 1202         SAVE_ALL
990         nop;                                   !! 1203         wr      %l0, PSR_ET, %psr
                                                   >> 1204         WRITE_PAUSE
                                                   >> 1205 
                                                   >> 1206         mov     %l7, %o0                ! trap_level
                                                   >> 1207         call    kgdb_trap
                                                   >> 1208          add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
                                                   >> 1209 
                                                   >> 1210         RESTORE_ALL
                                                   >> 1211         ENDPROC(kgdb_trap_low)
991 #endif                                            1212 #endif
992         /* MS: Place handler for brki from ker << 
993          * It is very unlikely that another br << 
994         bri 0                                  << 
995                                                << 
996 /* MS: User-mode state save - gdb */           << 
997 1:      lwi     r1, r0, TOPHYS(PER_CPU(CURRENT << 
998         tophys(r1,r1);                         << 
999         lwi     r1, r1, TS_THREAD_INFO; /* get << 
1000         addik   r1, r1, THREAD_SIZE;    /* ca << 
1001         tophys(r1,r1);                        << 
1002                                               << 
1003         addik   r1, r1, -PT_SIZE; /* Make roo << 
1004         SAVE_REGS;                            << 
1005         swi     r16, r1, PT_PC; /* Save LP */ << 
1006         swi     r0, r1, PT_MODE; /* Was in us << 
1007         lwi     r11, r0, TOPHYS(PER_CPU(ENTRY << 
1008         swi     r11, r1, PT_R1; /* Store user << 
1009         lwi     CURRENT_TASK, r0, TOPHYS(PER_ << 
1010         tovirt(r1,r1)                         << 
1011         set_vms;                              << 
1012         addik   r5, r1, 0;                    << 
1013         addik   r15, r0, dbtrap_call;         << 
1014 dbtrap_call: /* Return point for kernel/user  << 
1015         rtbd    r0, sw_exception              << 
1016         nop                                   << 
1017                                                  1213 
1018         /* MS: The first instruction for the  !! 1214         .align  4
1019         set_bip; /* Ints masked for state res !! 1215         .globl  flush_patch_exception
1020         lwi     r11, r1, PT_MODE;             !! 1216 flush_patch_exception:
1021         bnei    r11, 2f;                      !! 1217         FLUSH_ALL_KERNEL_WINDOWS;
1022 /* MS: Return to user space - gdb */          !! 1218         ldd     [%o0], %o6
                                                   >> 1219         jmpl    %o7 + 0xc, %g0                  ! see asm-sparc/processor.h
                                                   >> 1220          mov    1, %g1                          ! signal EFAULT condition
                                                   >> 1221 
                                                   >> 1222         .align  4
                                                   >> 1223         .globl  kill_user_windows, kuw_patch1_7win
                                                   >> 1224         .globl  kuw_patch1
                                                   >> 1225 kuw_patch1_7win:        sll     %o3, 6, %o3
                                                   >> 1226 
                                                   >> 1227         /* No matter how much overhead this routine has in the worst
                                                   >> 1228          * case scenario, it is several times better than taking the
                                                   >> 1229          * traps with the old method of just doing flush_user_windows().
                                                   >> 1230          */
                                                   >> 1231 kill_user_windows:
                                                   >> 1232         ld      [%g6 + TI_UWINMASK], %o0        ! get current umask
                                                   >> 1233         orcc    %g0, %o0, %g0                   ! if no bits set, we are done
                                                   >> 1234         be      3f                              ! nothing to do
                                                   >> 1235          rd     %psr, %o5                       ! must clear interrupts
                                                   >> 1236         or      %o5, PSR_PIL, %o4               ! or else that could change
                                                   >> 1237         wr      %o4, 0x0, %psr                  ! the uwinmask state
                                                   >> 1238         WRITE_PAUSE                             ! burn them cycles
1023 1:                                               1239 1:
1024         /* Get current task ptr into r11 */   !! 1240         ld      [%g6 + TI_UWINMASK], %o0        ! get consistent state
1025         lwi     r11, CURRENT_TASK, TS_THREAD_ !! 1241         orcc    %g0, %o0, %g0                   ! did an interrupt come in?
1026         lwi     r19, r11, TI_FLAGS;     /* ge !! 1242         be      4f                              ! yep, we are done
1027         andi    r11, r19, _TIF_NEED_RESCHED;  !! 1243          rd     %wim, %o3                       ! get current wim
1028         beqi    r11, 5f;                      !! 1244         srl     %o3, 1, %o4                     ! simulate a save
1029                                               !! 1245 kuw_patch1:
1030         /* Call the scheduler before returnin !! 1246         sll     %o3, 7, %o3                     ! compute next wim
1031         bralid  r15, schedule;  /* Call sched !! 1247         or      %o4, %o3, %o3                   ! result
1032         nop;                            /* de !! 1248         andncc  %o0, %o3, %o0                   ! clean this bit in umask
1033         bri     1b                            !! 1249         bne     kuw_patch1                      ! not done yet
1034                                               !! 1250          srl    %o3, 1, %o4                     ! begin another save simulation
1035         /* Maybe handle a signal */           !! 1251         wr      %o3, 0x0, %wim                  ! set the new wim
1036 5:      andi    r11, r19, _TIF_SIGPENDING | _ !! 1252         st      %g0, [%g6 + TI_UWINMASK]        ! clear uwinmask
1037         beqi    r11, 4f;                /* Si !! 1253 4:
1038                                               !! 1254         wr      %o5, 0x0, %psr                  ! re-enable interrupts
1039         addik   r5, r1, 0;              /* Ar !! 1255         WRITE_PAUSE                             ! burn baby burn
1040         bralid  r15, do_notify_resume;  /* Ha !! 1256 3:
1041         addi  r6, r0, 0;        /* Arg 2: int !! 1257         retl                                    ! return
1042         bri     1b                            !! 1258          st     %g0, [%g6 + TI_W_SAVED]         ! no windows saved
1043                                               << 
1044 /* Finally, return to user state.  */         << 
1045 4:      swi     CURRENT_TASK, r0, PER_CPU(CUR << 
1046         VM_OFF;                               << 
1047         tophys(r1,r1);                        << 
1048         /* MS: Restore all regs */            << 
1049         RESTORE_REGS_RTBD                     << 
1050         addik   r1, r1, PT_SIZE  /* Clean up  << 
1051         lwi     r1, r1, PT_R1 - PT_SIZE; /* R << 
1052 DBTRAP_return_user: /* MS: Make global symbol << 
1053         rtbd    r16, 0; /* MS: Instructions t << 
1054         nop;                                  << 
1055                                               << 
1056 /* MS: Return to kernel state - kgdb */       << 
1057 2:      VM_OFF;                               << 
1058         tophys(r1,r1);                        << 
1059         /* MS: Restore all regs */            << 
1060         RESTORE_REGS_RTBD                     << 
1061         lwi     r14, r1, PT_R14;              << 
1062         lwi     r16, r1, PT_PC;               << 
1063         addik   r1, r1, PT_SIZE; /* MS: Clean << 
1064         tovirt(r1,r1);                        << 
1065 DBTRAP_return_kernel: /* MS: Make global symb << 
1066         rtbd    r16, 0; /* MS: Instructions t << 
1067         nop;                                  << 
1068                                               << 
1069                                               << 
1070 ENTRY(_switch_to)                             << 
1071         /* prepare return value */            << 
1072         addk    r3, r0, CURRENT_TASK          << 
1073                                               << 
1074         /* save registers in cpu_context */   << 
1075         /* use r11 and r12, volatile register << 
1076         /* give start of cpu_context for prev << 
1077         addik   r11, r5, TI_CPU_CONTEXT       << 
1078         swi     r1, r11, CC_R1                << 
1079         swi     r2, r11, CC_R2                << 
1080         /* skip volatile registers.           << 
1081          * they are saved on stack when we ju << 
1082         /* dedicated registers */             << 
1083         swi     r13, r11, CC_R13              << 
1084         swi     r14, r11, CC_R14              << 
1085         swi     r15, r11, CC_R15              << 
1086         swi     r16, r11, CC_R16              << 
1087         swi     r17, r11, CC_R17              << 
1088         swi     r18, r11, CC_R18              << 
1089         /* save non-volatile registers */     << 
1090         swi     r19, r11, CC_R19              << 
1091         swi     r20, r11, CC_R20              << 
1092         swi     r21, r11, CC_R21              << 
1093         swi     r22, r11, CC_R22              << 
1094         swi     r23, r11, CC_R23              << 
1095         swi     r24, r11, CC_R24              << 
1096         swi     r25, r11, CC_R25              << 
1097         swi     r26, r11, CC_R26              << 
1098         swi     r27, r11, CC_R27              << 
1099         swi     r28, r11, CC_R28              << 
1100         swi     r29, r11, CC_R29              << 
1101         swi     r30, r11, CC_R30              << 
1102         /* special purpose registers */       << 
1103         mfs     r12, rmsr                     << 
1104         swi     r12, r11, CC_MSR              << 
1105         mfs     r12, rear                     << 
1106         swi     r12, r11, CC_EAR              << 
1107         mfs     r12, resr                     << 
1108         swi     r12, r11, CC_ESR              << 
1109         mfs     r12, rfsr                     << 
1110         swi     r12, r11, CC_FSR              << 
1111                                               << 
1112         /* update r31, the current-give me po << 
1113         lwi     CURRENT_TASK, r6, TI_TASK     << 
1114         /* stored it to current_save too */   << 
1115         swi     CURRENT_TASK, r0, PER_CPU(CUR << 
1116                                               << 
1117         /* get new process' cpu context and r << 
1118         /* give me start where start context  << 
1119         addik   r11, r6, TI_CPU_CONTEXT       << 
1120                                               << 
1121         /* non-volatile registers */          << 
1122         lwi     r30, r11, CC_R30              << 
1123         lwi     r29, r11, CC_R29              << 
1124         lwi     r28, r11, CC_R28              << 
1125         lwi     r27, r11, CC_R27              << 
1126         lwi     r26, r11, CC_R26              << 
1127         lwi     r25, r11, CC_R25              << 
1128         lwi     r24, r11, CC_R24              << 
1129         lwi     r23, r11, CC_R23              << 
1130         lwi     r22, r11, CC_R22              << 
1131         lwi     r21, r11, CC_R21              << 
1132         lwi     r20, r11, CC_R20              << 
1133         lwi     r19, r11, CC_R19              << 
1134         /* dedicated registers */             << 
1135         lwi     r18, r11, CC_R18              << 
1136         lwi     r17, r11, CC_R17              << 
1137         lwi     r16, r11, CC_R16              << 
1138         lwi     r15, r11, CC_R15              << 
1139         lwi     r14, r11, CC_R14              << 
1140         lwi     r13, r11, CC_R13              << 
1141         /* skip volatile registers */         << 
1142         lwi     r2, r11, CC_R2                << 
1143         lwi     r1, r11, CC_R1                << 
1144                                               << 
1145         /* special purpose registers */       << 
1146         lwi     r12, r11, CC_FSR              << 
1147         mts     rfsr, r12                     << 
1148         lwi     r12, r11, CC_MSR              << 
1149         mts     rmsr, r12                     << 
1150                                                  1259 
1151         rtsd    r15, 8                        !! 1260         .align  4
1152         nop                                   !! 1261         .globl  restore_current
                                                   >> 1262 restore_current:
                                                   >> 1263         LOAD_CURRENT(g6, o0)
                                                   >> 1264         retl
                                                   >> 1265          nop
                                                   >> 1266 
                                                   >> 1267 #ifdef CONFIG_PCIC_PCI
                                                   >> 1268 #include <asm/pcic.h>
                                                   >> 1269 
                                                   >> 1270         .align  4
                                                   >> 1271         .globl  linux_trap_ipi15_pcic
                                                   >> 1272 linux_trap_ipi15_pcic:
                                                   >> 1273         rd      %wim, %l3
                                                   >> 1274         SAVE_ALL
1153                                                  1275 
1154 #ifdef CONFIG_MB_MANAGER                      << 
1155 .global xmb_inject_err                        << 
1156 .section .text                                << 
1157 .align 2                                      << 
1158 .ent xmb_inject_err                           << 
1159 .type xmb_inject_err, @function               << 
1160 xmb_inject_err:                               << 
1161         addik   r1, r1, -PT_SIZE              << 
1162         SAVE_REGS                             << 
1163                                               << 
1164         /* Switch to real mode */             << 
1165         VM_OFF;                               << 
1166         set_bip;                              << 
1167         mbar    1                             << 
1168         mbar    2                             << 
1169         bralid  r15, XMB_INJECT_ERR_OFFSET    << 
1170         nop;                                  << 
1171                                               << 
1172         /* enable virtual mode */             << 
1173         set_vms;                              << 
1174         /* barrier for instructions and data  << 
1175         mbar    1                             << 
1176         mbar    2                             << 
1177         /*                                       1276         /*
1178          * Enable Interrupts, Virtual Protect !! 1277          * First deactivate NMI
1179          * initial state for all possible ent !! 1278          * or we cannot drop ET, cannot get window spill traps.
                                                   >> 1279          * The busy loop is necessary because the PIO error
                                                   >> 1280          * sometimes does not go away quickly and we trap again.
1180          */                                      1281          */
1181         rtbd    r0, 1f                        !! 1282         sethi   %hi(pcic_regs), %o1
1182         nop;                                  !! 1283         ld      [%o1 + %lo(pcic_regs)], %o2
1183 1:                                            << 
1184         RESTORE_REGS                          << 
1185         addik   r1, r1, PT_SIZE               << 
1186         rtsd    r15, 8;                       << 
1187         nop;                                  << 
1188 .end xmb_inject_err                           << 
1189                                               << 
1190 .section .data                                << 
1191 .global xmb_manager_dev                       << 
1192 .global xmb_manager_baseaddr                  << 
1193 .global xmb_manager_crval                     << 
1194 .global xmb_manager_callback                  << 
1195 .global xmb_manager_reset_callback            << 
1196 .global xmb_manager_stackpointer              << 
1197 .align 4                                      << 
1198 xmb_manager_dev:                              << 
1199         .long 0                               << 
1200 xmb_manager_baseaddr:                         << 
1201         .long 0                               << 
1202 xmb_manager_crval:                            << 
1203         .long 0                               << 
1204 xmb_manager_callback:                         << 
1205         .long 0                               << 
1206 xmb_manager_reset_callback:                   << 
1207         .long 0                               << 
1208 xmb_manager_stackpointer:                     << 
1209         .long 0                               << 
1210                                               << 
1211 /*                                            << 
1212  * When the break vector gets asserted becaus << 
1213  * the break signal must be blocked before ex << 
1214  * break handler, Below api updates the manag << 
1215  * control register and error count callback  << 
1216  * which will be used by the break handler to << 
1217  * break and call the callback function.      << 
1218  */                                           << 
1219 .global xmb_manager_register                  << 
1220 .section .text                                << 
1221 .align 2                                      << 
1222 .ent xmb_manager_register                     << 
1223 .type xmb_manager_register, @function         << 
1224 xmb_manager_register:                         << 
1225         swi     r5, r0, xmb_manager_baseaddr  << 
1226         swi     r6, r0, xmb_manager_crval     << 
1227         swi     r7, r0, xmb_manager_callback  << 
1228         swi     r8, r0, xmb_manager_dev       << 
1229         swi     r9, r0, xmb_manager_reset_cal << 
1230                                               << 
1231         rtsd    r15, 8;                       << 
1232         nop;                                  << 
1233 .end xmb_manager_register                     << 
1234 #endif                                        << 
1235                                                  1284 
1236 ENTRY(_reset)                                 !! 1285         ! Get pending status for printouts later.
1237         VM_OFF                                !! 1286         ld      [%o2 + PCI_SYS_INT_PENDING], %o0
1238         brai    0; /* Jump to reset vector */ << 
1239                                               << 
1240         /* These are compiled and loaded into << 
1241          * copied into place in mach_early_se << 
1242         .section        .init.ivt, "ax"       << 
1243 #if CONFIG_MANUAL_RESET_VECTOR && !defined(CO << 
1244         .org    0x0                           << 
1245         brai    CONFIG_MANUAL_RESET_VECTOR    << 
1246 #elif defined(CONFIG_MB_MANAGER)              << 
1247         .org    0x0                           << 
1248         brai    TOPHYS(_xtmr_manager_reset);  << 
1249 #endif                                        << 
1250         .org    0x8                           << 
1251         brai    TOPHYS(_user_exception); /* s << 
1252         .org    0x10                          << 
1253         brai    TOPHYS(_interrupt);     /* In << 
1254 #ifdef CONFIG_MB_MANAGER                      << 
1255         .org    0x18                          << 
1256         brai    TOPHYS(_xmb_manager_break);   << 
1257 #else                                         << 
1258         .org    0x18                          << 
1259         brai    TOPHYS(_debug_exception);     << 
1260 #endif                                        << 
1261         .org    0x20                          << 
1262         brai    TOPHYS(_hw_exception_handler) << 
1263                                                  1287 
1264 #ifdef CONFIG_MB_MANAGER                      !! 1288         mov     PCI_SYS_INT_PENDING_CLEAR_ALL, %o1
1265         /*                                    !! 1289         stb     %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR]
1266          * For TMR Inject API which injects t !! 1290 1:
1267          * be executed from LMB.              !! 1291         ld      [%o2 + PCI_SYS_INT_PENDING], %o1
1268          * TMR Inject is programmed with addr !! 1292         andcc   %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0
1269          * when program counter matches with  !! 1293         bne     1b
1270          * be injected. 0x200 is expected to  !! 1294          nop
1271          * offset, hence used for this api.   !! 1295 
1272          */                                   !! 1296         or      %l0, PSR_PIL, %l4
1273         .org    XMB_INJECT_ERR_OFFSET         !! 1297         wr      %l4, 0x0, %psr
1274 xmb_inject_error:                             !! 1298         WRITE_PAUSE
                                                   >> 1299         wr      %l4, PSR_ET, %psr
                                                   >> 1300         WRITE_PAUSE
                                                   >> 1301 
                                                   >> 1302         call    pcic_nmi
                                                   >> 1303          add    %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs
                                                   >> 1304         RESTORE_ALL
                                                   >> 1305 
                                                   >> 1306         .globl  pcic_nmi_trap_patch
                                                   >> 1307 pcic_nmi_trap_patch:
                                                   >> 1308         sethi   %hi(linux_trap_ipi15_pcic), %l3
                                                   >> 1309         jmpl    %l3 + %lo(linux_trap_ipi15_pcic), %g0
                                                   >> 1310          rd     %psr, %l0
                                                   >> 1311         .word   0
                                                   >> 1312 
                                                   >> 1313 #endif /* CONFIG_PCIC_PCI */
                                                   >> 1314 
                                                   >> 1315         .globl  flushw_all
                                                   >> 1316 flushw_all:
                                                   >> 1317         save    %sp, -0x40, %sp
                                                   >> 1318         save    %sp, -0x40, %sp
                                                   >> 1319         save    %sp, -0x40, %sp
                                                   >> 1320         save    %sp, -0x40, %sp
                                                   >> 1321         save    %sp, -0x40, %sp
                                                   >> 1322         save    %sp, -0x40, %sp
                                                   >> 1323         save    %sp, -0x40, %sp
                                                   >> 1324         restore
                                                   >> 1325         restore
                                                   >> 1326         restore
                                                   >> 1327         restore
                                                   >> 1328         restore
                                                   >> 1329         restore
                                                   >> 1330         ret
                                                   >> 1331          restore
                                                   >> 1332 
                                                   >> 1333 #ifdef CONFIG_SMP
                                                   >> 1334 ENTRY(hard_smp_processor_id)
                                                   >> 1335 661:    rd              %tbr, %g1
                                                   >> 1336         srl             %g1, 12, %o0
                                                   >> 1337         and             %o0, 3, %o0
                                                   >> 1338         .section        .cpuid_patch, "ax"
                                                   >> 1339         /* Instruction location. */
                                                   >> 1340         .word           661b
                                                   >> 1341         /* SUN4D implementation. */
                                                   >> 1342         lda             [%g0] ASI_M_VIKING_TMP1, %o0
                                                   >> 1343         nop
1275         nop                                      1344         nop
1276         rtsd    r15, 8                        !! 1345         /* LEON implementation. */
                                                   >> 1346         rd              %asr17, %o0
                                                   >> 1347         srl             %o0, 0x1c, %o0
1277         nop                                      1348         nop
                                                   >> 1349         .previous
                                                   >> 1350         retl
                                                   >> 1351          nop
                                                   >> 1352 ENDPROC(hard_smp_processor_id)
1278 #endif                                           1353 #endif
1279                                                  1354 
1280 .section .rodata,"a"                          !! 1355 /* End of entry.S */
1281 #include "syscall_table.S"                    << 
1282                                               << 
1283 syscall_table_size=(.-sys_call_table)         << 
1284                                               << 
1285 type_SYSCALL:                                 << 
1286         .ascii "SYSCALL\0"                    << 
1287 type_IRQ:                                     << 
1288         .ascii "IRQ\0"                        << 
1289 type_IRQ_PREEMPT:                             << 
1290         .ascii "IRQ (PREEMPTED)\0"            << 
1291 type_SYSCALL_PREEMPT:                         << 
1292         .ascii " SYSCALL (PREEMPTED)\0"       << 
1293                                               << 
1294         /*                                    << 
1295          * Trap decoding for stack unwinder   << 
1296          * Tuples are (start addr, end addr,  << 
1297          * If return address lies on [start a << 
1298          * unwinder displays 'string'         << 
1299          */                                   << 
1300                                               << 
1301         .align 4                              << 
1302 .global microblaze_trap_handlers              << 
1303 microblaze_trap_handlers:                     << 
1304         /* Exact matches come first */        << 
1305         .word ret_from_trap; .word ret_from_t << 
1306         .word ret_from_irq ; .word ret_from_i << 
1307         /* Fuzzy matches go here */           << 
1308         .word ret_from_irq ; .word no_intr_re << 
1309         .word ret_from_trap; .word TRAP_retur << 
1310         /* End of table */                    << 
1311         .word 0               ; .word 0       << 
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php