1 /* !! 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 * Low-level exception handling !! 2 /* arch/sparc/kernel/entry.S: Sparc trap low-level entry points. 3 * << 4 * This file is subject to the terms and condi << 5 * License. See the file "COPYING" in the mai << 6 * for more details. << 7 * << 8 * Copyright (C) 2004 - 2008 by Tensilica Inc. << 9 * Copyright (C) 2015 Cadence Design Systems I << 10 * << 11 * Chris Zankel <chris@zankel.net> << 12 * 3 * >> 4 * Copyright (C) 1995, 2007 David S. Miller (davem@davemloft.net) >> 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) >> 6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) >> 7 * Copyright (C) 1996-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) >> 8 * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) 13 */ 9 */ 14 10 >> 11 #include <linux/export.h> 15 #include <linux/linkage.h> 12 #include <linux/linkage.h> >> 13 #include <linux/errno.h> 16 #include <linux/pgtable.h> 14 #include <linux/pgtable.h> 17 #include <asm/asm-offsets.h> !! 15 18 #include <asm/asmmacro.h> !! 16 #include <asm/head.h> 19 #include <asm/processor.h> !! 17 #include <asm/asi.h> 20 #include <asm/coprocessor.h> !! 18 #include <asm/smp.h> 21 #include <asm/thread_info.h> !! 19 #include <asm/contregs.h> 22 #include <asm/asm-uaccess.h> << 23 #include <asm/unistd.h> << 24 #include <asm/ptrace.h> 20 #include <asm/ptrace.h> 25 #include <asm/current.h> !! 21 #include <asm/asm-offsets.h> >> 22 #include <asm/psr.h> >> 23 #include <asm/vaddrs.h> 26 #include <asm/page.h> 24 #include <asm/page.h> >> 25 #include <asm/winmacro.h> 27 #include <asm/signal.h> 26 #include <asm/signal.h> 28 #include <asm/tlbflush.h> !! 27 #include <asm/obio.h> 29 #include <variant/tie-asm.h> !! 28 #include <asm/mxcc.h> 30 !! 29 #include <asm/thread_info.h> 31 /* !! 30 #include <asm/param.h> 32 * Macro to find first bit set in WINDOWBASE f !! 31 #include <asm/unistd.h> 33 * << 34 * 100....0 -> 1 << 35 * 010....0 -> 2 << 36 * 000....1 -> WSBITS << 37 */ << 38 << 39 .macro ffs_ws bit mask << 40 << 41 #if XCHAL_HAVE_NSA << 42 nsau \bit, \mask << 43 addi \bit, \bit, WSBITS - 32 + 1 << 44 #else << 45 movi \bit, WSBITS << 46 #if WSBITS > 16 << 47 _bltui \mask, 0x10000, 99f << 48 addi \bit, \bit, -16 << 49 extui \mask, \mask, 16, 16 << 50 #endif << 51 #if WSBITS > 8 << 52 99: _bltui \mask, 0x100, 99f << 53 addi \bit, \bit, -8 << 54 srli \mask, \mask, 8 << 55 #endif << 56 99: _bltui \mask, 0x10, 99f << 57 addi \bit, \bit, -4 << 58 srli \mask, \mask, 4 << 59 99: _bltui \mask, 0x4, 99f << 60 addi \bit, \bit, -2 << 61 srli \mask, \mask, 2 << 62 99: _bltui \mask, 0x2, 99f << 63 addi \bit, \bit, -1 << 64 99: << 65 << 66 #endif << 67 .endm << 68 << 69 << 70 .macro irq_save flags tmp << 71 #if XTENSA_FAKE_NMI << 72 #if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL << 73 rsr \flags, ps << 74 extui \tmp, \flags, PS_INTLEVEL_SHIF << 75 bgei \tmp, LOCKLEVEL, 99f << 76 rsil \tmp, LOCKLEVEL << 77 99: << 78 #else << 79 movi \tmp, LOCKLEVEL << 80 rsr \flags, ps << 81 or \flags, \flags, \tmp << 82 xsr \flags, ps << 83 rsync << 84 #endif << 85 #else << 86 rsil \flags, LOCKLEVEL << 87 #endif << 88 .endm << 89 << 90 /* ----------------- DEFAULT FIRST LEVEL EXCEP << 91 << 92 /* << 93 * First-level exception handler for user exce << 94 * Save some special registers, extra states a << 95 * register file that were in use in the user << 96 * exception code. << 97 * We save SAR (used to calculate WMASK), and << 98 * save them for kernel exceptions). << 99 * << 100 * Entry condition for user_exception: << 101 * << 102 * a0: trashed, original value saved << 103 * a1: a1 << 104 * a2: new stack pointer, original va << 105 * a3: a3 << 106 * depc: a2, original value saved on st << 107 * excsave1: dispatch table << 108 * << 109 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS << 110 * < VALID_DOUBLE_EXCEPTION_ADDRESS << 111 * << 112 * Entry condition for _user_exception: << 113 * << 114 * a0-a3 and depc have been saved to PT_AREG << 115 * excsave has been restored, and << 116 * stack pointer (a1) has been set. << 117 * << 118 * Note: _user_exception might be at an odd ad << 119 */ << 120 .literal_position << 121 << 122 ENTRY(user_exception) << 123 << 124 /* Save a1, a2, a3, and set SP. */ << 125 << 126 rsr a0, depc << 127 s32i a1, a2, PT_AREG1 << 128 s32i a0, a2, PT_AREG2 << 129 s32i a3, a2, PT_AREG3 << 130 mov a1, a2 << 131 << 132 .globl _user_exception << 133 _user_exception: << 134 << 135 /* Save SAR and turn off single steppi << 136 << 137 movi a2, 0 << 138 wsr a2, depc # term << 139 rsr a3, sar << 140 xsr a2, icountlevel << 141 s32i a3, a1, PT_SAR << 142 s32i a2, a1, PT_ICOUNTLEVEL << 143 << 144 #if XCHAL_HAVE_THREADPTR << 145 rur a2, threadptr << 146 s32i a2, a1, PT_THREADPTR << 147 #endif << 148 << 149 /* Rotate ws so that the current windo << 150 /* Assume ws = xxwww1yyyy. Rotate ws r << 151 32 152 #if defined(USER_SUPPORT_WINDOWED) !! 33 #include <asm/asmmacro.h> 153 rsr a2, windowbase << 154 rsr a3, windowstart << 155 ssr a2 << 156 s32i a2, a1, PT_WINDOWBASE << 157 s32i a3, a1, PT_WINDOWSTART << 158 slli a2, a3, 32-WSBITS << 159 src a2, a3, a2 << 160 srli a2, a2, 32-WSBITS << 161 s32i a2, a1, PT_WMASK # need << 162 #else << 163 movi a2, 0 << 164 movi a3, 1 << 165 s32i a2, a1, PT_WINDOWBASE << 166 s32i a3, a1, PT_WINDOWSTART << 167 s32i a3, a1, PT_WMASK << 168 #endif << 169 34 170 /* Save only live registers. */ !! 35 #define curptr g6 171 36 172 UABI_W _bbsi.l a2, 1, .Lsave_window_registers !! 37 /* These are just handy. */ 173 s32i a4, a1, PT_AREG4 !! 38 #define _SV save %sp, -STACKFRAME_SZ, %sp 174 s32i a5, a1, PT_AREG5 !! 39 #define _RS restore 175 s32i a6, a1, PT_AREG6 !! 40 176 s32i a7, a1, PT_AREG7 !! 41 #define FLUSH_ALL_KERNEL_WINDOWS \ 177 UABI_W _bbsi.l a2, 2, .Lsave_window_registers !! 42 _SV; _SV; _SV; _SV; _SV; _SV; _SV; \ 178 s32i a8, a1, PT_AREG8 !! 43 _RS; _RS; _RS; _RS; _RS; _RS; _RS; 179 s32i a9, a1, PT_AREG9 << 180 s32i a10, a1, PT_AREG10 << 181 s32i a11, a1, PT_AREG11 << 182 UABI_W _bbsi.l a2, 3, .Lsave_window_registers << 183 s32i a12, a1, PT_AREG12 << 184 s32i a13, a1, PT_AREG13 << 185 s32i a14, a1, PT_AREG14 << 186 s32i a15, a1, PT_AREG15 << 187 << 188 #if defined(USER_SUPPORT_WINDOWED) << 189 /* If only one valid frame skip saving << 190 << 191 beqi a2, 1, common_exception << 192 << 193 /* Save the remaining registers. << 194 * We have to save all registers up to << 195 * the right, except the current frame << 196 * Assume a2 is: 001001000110001 << 197 * All register frames starting from t << 198 * must be saved. << 199 */ << 200 .Lsave_window_registers: << 201 addi a3, a2, -1 # elim << 202 neg a3, a3 # yyyy << 203 and a3, a3, a2 # max. << 204 << 205 /* Find number of frames to save */ << 206 << 207 ffs_ws a0, a3 # numb << 208 << 209 /* Store information into WMASK: << 210 * bits 0..3: xxx1 masked lower 4 bits << 211 * bits 4...: number of valid 4-regist << 212 */ << 213 44 214 slli a3, a0, 4 # numb !! 45 .text 215 extui a2, a2, 0, 4 # mask << 216 or a2, a3, a2 << 217 s32i a2, a1, PT_WMASK # need << 218 << 219 /* Save 4 registers at a time */ << 220 << 221 1: rotw -1 << 222 s32i a0, a5, PT_AREG_END - 16 << 223 s32i a1, a5, PT_AREG_END - 12 << 224 s32i a2, a5, PT_AREG_END - 8 << 225 s32i a3, a5, PT_AREG_END - 4 << 226 addi a0, a4, -1 << 227 addi a1, a5, -16 << 228 _bnez a0, 1b << 229 << 230 /* WINDOWBASE still in SAR! */ << 231 << 232 rsr a2, sar # orig << 233 movi a3, 1 << 234 ssl a2 << 235 sll a3, a3 << 236 wsr a3, windowstart # set << 237 wsr a2, windowbase # and << 238 rsync << 239 46 240 /* We are back to the original stack p !! 47 #ifdef CONFIG_KGDB >> 48 .align 4 >> 49 .globl arch_kgdb_breakpoint >> 50 .type arch_kgdb_breakpoint,#function >> 51 arch_kgdb_breakpoint: >> 52 ta 0x7d >> 53 retl >> 54 nop >> 55 .size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint 241 #endif 56 #endif 242 /* Now, jump to the common exception h << 243 << 244 j common_exception << 245 << 246 ENDPROC(user_exception) << 247 57 248 /* !! 58 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 249 * First-level exit handler for kernel excepti !! 59 .align 4 250 * Save special registers and the live window !! 60 .globl floppy_hardint 251 * Note: Even though we changes the stack poin !! 61 floppy_hardint: 252 * MOVSP here, as we do that when we ret !! 62 /* 253 * (See comment in the kernel exception !! 63 * This code cannot touch registers %l0 %l1 and %l2 254 * !! 64 * because SAVE_ALL depends on their values. It depends 255 * Entry condition for kernel_exception: !! 65 * on %l3 also, but we regenerate it before a call. 256 * !! 66 * Other registers are: 257 * a0: trashed, original value saved !! 67 * %l3 -- base address of fdc registers 258 * a1: a1 !! 68 * %l4 -- pdma_vaddr 259 * a2: new stack pointer, original in !! 69 * %l5 -- scratch for ld/st address 260 * a3: a3 !! 70 * %l6 -- pdma_size 261 * depc: a2, original value saved on st !! 71 * %l7 -- scratch [floppy byte, ld/st address, aux. data] 262 * excsave_1: dispatch table !! 72 */ 263 * !! 73 264 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS !! 74 /* Do we have work to do? */ 265 * < VALID_DOUBLE_EXCEPTION_ADDRESS !! 75 sethi %hi(doing_pdma), %l7 266 * !! 76 ld [%l7 + %lo(doing_pdma)], %l7 267 * Entry condition for _kernel_exception: !! 77 cmp %l7, 0 268 * !! 78 be floppy_dosoftint 269 * a0-a3 and depc have been saved to PT_AREG !! 79 nop 270 * excsave has been restored, and !! 80 271 * stack pointer (a1) has been set. !! 81 /* Load fdc register base */ 272 * !! 82 sethi %hi(fdc_status), %l3 273 * Note: _kernel_exception might be at an odd !! 83 ld [%l3 + %lo(fdc_status)], %l3 274 */ !! 84 >> 85 /* Setup register addresses */ >> 86 sethi %hi(pdma_vaddr), %l5 ! transfer buffer >> 87 ld [%l5 + %lo(pdma_vaddr)], %l4 >> 88 sethi %hi(pdma_size), %l5 ! bytes to go >> 89 ld [%l5 + %lo(pdma_size)], %l6 >> 90 next_byte: >> 91 ldub [%l3], %l7 >> 92 >> 93 andcc %l7, 0x80, %g0 ! Does fifo still have data >> 94 bz floppy_fifo_emptied ! fifo has been emptied... >> 95 andcc %l7, 0x20, %g0 ! in non-dma mode still? >> 96 bz floppy_overrun ! nope, overrun >> 97 andcc %l7, 0x40, %g0 ! 0=write 1=read >> 98 bz floppy_write >> 99 sub %l6, 0x1, %l6 >> 100 >> 101 /* Ok, actually read this byte */ >> 102 ldub [%l3 + 1], %l7 >> 103 orcc %g0, %l6, %g0 >> 104 stb %l7, [%l4] >> 105 bne next_byte >> 106 add %l4, 0x1, %l4 >> 107 >> 108 b floppy_tdone >> 109 nop >> 110 >> 111 floppy_write: >> 112 /* Ok, actually write this byte */ >> 113 ldub [%l4], %l7 >> 114 orcc %g0, %l6, %g0 >> 115 stb %l7, [%l3 + 1] >> 116 bne next_byte >> 117 add %l4, 0x1, %l4 >> 118 >> 119 /* fall through... */ >> 120 floppy_tdone: >> 121 sethi %hi(pdma_vaddr), %l5 >> 122 st %l4, [%l5 + %lo(pdma_vaddr)] >> 123 sethi %hi(pdma_size), %l5 >> 124 st %l6, [%l5 + %lo(pdma_size)] >> 125 /* Flip terminal count pin */ >> 126 set auxio_register, %l7 >> 127 ld [%l7], %l7 >> 128 >> 129 ldub [%l7], %l5 >> 130 >> 131 or %l5, 0xc2, %l5 >> 132 stb %l5, [%l7] >> 133 andn %l5, 0x02, %l5 275 134 276 ENTRY(kernel_exception) !! 135 2: >> 136 /* Kill some time so the bits set */ >> 137 WRITE_PAUSE >> 138 WRITE_PAUSE >> 139 >> 140 stb %l5, [%l7] >> 141 >> 142 /* Prevent recursion */ >> 143 sethi %hi(doing_pdma), %l7 >> 144 b floppy_dosoftint >> 145 st %g0, [%l7 + %lo(doing_pdma)] >> 146 >> 147 /* We emptied the FIFO, but we haven't read everything >> 148 * as of yet. Store the current transfer address and >> 149 * bytes left to read so we can continue when the next >> 150 * fast IRQ comes in. >> 151 */ >> 152 floppy_fifo_emptied: >> 153 sethi %hi(pdma_vaddr), %l5 >> 154 st %l4, [%l5 + %lo(pdma_vaddr)] >> 155 sethi %hi(pdma_size), %l7 >> 156 st %l6, [%l7 + %lo(pdma_size)] >> 157 >> 158 /* Restore condition codes */ >> 159 wr %l0, 0x0, %psr >> 160 WRITE_PAUSE >> 161 >> 162 jmp %l1 >> 163 rett %l2 >> 164 >> 165 floppy_overrun: >> 166 sethi %hi(pdma_vaddr), %l5 >> 167 st %l4, [%l5 + %lo(pdma_vaddr)] >> 168 sethi %hi(pdma_size), %l5 >> 169 st %l6, [%l5 + %lo(pdma_size)] >> 170 /* Prevent recursion */ >> 171 sethi %hi(doing_pdma), %l7 >> 172 st %g0, [%l7 + %lo(doing_pdma)] >> 173 >> 174 /* fall through... */ >> 175 floppy_dosoftint: >> 176 rd %wim, %l3 >> 177 SAVE_ALL >> 178 >> 179 /* Set all IRQs off. */ >> 180 or %l0, PSR_PIL, %l4 >> 181 wr %l4, 0x0, %psr >> 182 WRITE_PAUSE >> 183 wr %l4, PSR_ET, %psr >> 184 WRITE_PAUSE >> 185 >> 186 mov 11, %o0 ! floppy irq level (unused anyway) >> 187 mov %g0, %o1 ! devid is not used in fast interrupts >> 188 call sparc_floppy_irq >> 189 add %sp, STACKFRAME_SZ, %o2 ! struct pt_regs *regs 277 190 278 /* Save a1, a2, a3, and set SP. */ !! 191 RESTORE_ALL >> 192 >> 193 #endif /* (CONFIG_BLK_DEV_FD) */ 279 194 280 rsr a0, depc # get !! 195 /* Bad trap handler */ 281 s32i a1, a2, PT_AREG1 !! 196 .globl bad_trap_handler 282 s32i a0, a2, PT_AREG2 !! 197 bad_trap_handler: 283 s32i a3, a2, PT_AREG3 !! 198 SAVE_ALL 284 mov a1, a2 << 285 << 286 .globl _kernel_exception << 287 _kernel_exception: << 288 << 289 /* Save SAR and turn off single steppi << 290 << 291 movi a2, 0 << 292 rsr a3, sar << 293 xsr a2, icountlevel << 294 s32i a3, a1, PT_SAR << 295 s32i a2, a1, PT_ICOUNTLEVEL << 296 << 297 #if defined(__XTENSA_WINDOWED_ABI__) << 298 /* Rotate ws so that the current windo << 299 /* Assume ws = xxwww1yyyy. Rotate ws r << 300 << 301 rsr a2, windowbase # don' << 302 rsr a3, windowstart # need << 303 ssr a2 << 304 slli a2, a3, 32-WSBITS << 305 src a2, a3, a2 << 306 srli a2, a2, 32-WSBITS << 307 s32i a2, a1, PT_WMASK # need << 308 #endif << 309 199 310 /* Save only the live window-frame */ !! 200 wr %l0, PSR_ET, %psr >> 201 WRITE_PAUSE 311 202 312 KABI_W _bbsi.l a2, 1, 1f !! 203 add %sp, STACKFRAME_SZ, %o0 ! pt_regs 313 s32i a4, a1, PT_AREG4 !! 204 call do_hw_interrupt 314 s32i a5, a1, PT_AREG5 !! 205 mov %l7, %o1 ! trap number 315 s32i a6, a1, PT_AREG6 << 316 s32i a7, a1, PT_AREG7 << 317 KABI_W _bbsi.l a2, 2, 1f << 318 s32i a8, a1, PT_AREG8 << 319 s32i a9, a1, PT_AREG9 << 320 s32i a10, a1, PT_AREG10 << 321 s32i a11, a1, PT_AREG11 << 322 KABI_W _bbsi.l a2, 3, 1f << 323 s32i a12, a1, PT_AREG12 << 324 s32i a13, a1, PT_AREG13 << 325 s32i a14, a1, PT_AREG14 << 326 s32i a15, a1, PT_AREG15 << 327 << 328 #ifdef __XTENSA_WINDOWED_ABI__ << 329 _bnei a2, 1, 1f << 330 /* Copy spill slots of a0 and a1 to im << 331 * in order to keep exception stack co << 332 */ << 333 l32i a3, a1, PT_KERNEL_SIZE << 334 l32i a0, a1, PT_KERNEL_SIZE + 4 << 335 s32e a3, a1, -16 << 336 s32e a0, a1, -12 << 337 #endif << 338 1: << 339 l32i a0, a1, PT_AREG0 # rest << 340 wsr a0, depc << 341 206 342 /* !! 207 RESTORE_ALL 343 * This is the common exception handler. !! 208 344 * We get here from the user exception handler !! 209 /* For now all IRQ's not registered get sent here. handler_irq() will 345 * from the kernel exception handler. !! 210 * see if a routine is registered to handle this interrupt and if not 346 * Save the remaining special registers, switc !! 211 * it will say so on the console. 347 * to the second-level exception handler. << 348 * << 349 */ 212 */ 350 213 351 common_exception: !! 214 .align 4 352 !! 215 .globl real_irq_entry, patch_handler_irq 353 /* Save some registers, disable loops !! 216 real_irq_entry: >> 217 SAVE_ALL 354 218 355 rsr a2, debugcause !! 219 #ifdef CONFIG_SMP 356 rsr a3, epc1 !! 220 .globl patchme_maybe_smp_msg 357 s32i a2, a1, PT_DEBUGCAUSE << 358 s32i a3, a1, PT_PC << 359 << 360 movi a2, NO_SYSCALL << 361 rsr a3, excvaddr << 362 s32i a2, a1, PT_SYSCALL << 363 movi a2, 0 << 364 s32i a3, a1, PT_EXCVADDR << 365 #if XCHAL_HAVE_LOOPS << 366 xsr a2, lcount << 367 s32i a2, a1, PT_LCOUNT << 368 #endif << 369 221 370 #if XCHAL_HAVE_EXCLUSIVE !! 222 cmp %l7, 11 371 /* Clear exclusive access monitor set !! 223 patchme_maybe_smp_msg: 372 clrex !! 224 bgu maybe_smp4m_msg 373 #endif !! 225 nop >> 226 #endif >> 227 >> 228 real_irq_continue: >> 229 or %l0, PSR_PIL, %g2 >> 230 wr %g2, 0x0, %psr >> 231 WRITE_PAUSE >> 232 wr %g2, PSR_ET, %psr >> 233 WRITE_PAUSE >> 234 mov %l7, %o0 ! irq level >> 235 patch_handler_irq: >> 236 call handler_irq >> 237 add %sp, STACKFRAME_SZ, %o1 ! pt_regs ptr >> 238 or %l0, PSR_PIL, %g2 ! restore PIL after handler_irq >> 239 wr %g2, PSR_ET, %psr ! keep ET up >> 240 WRITE_PAUSE 374 241 375 /* It is now save to restore the EXC_T !! 242 RESTORE_ALL 376 243 377 rsr a2, exccause !! 244 #ifdef CONFIG_SMP 378 movi a3, 0 !! 245 /* SMP per-cpu ticker interrupts are handled specially. */ 379 rsr a0, excsave1 !! 246 smp4m_ticker: 380 s32i a2, a1, PT_EXCCAUSE !! 247 bne real_irq_continue+4 381 s32i a3, a0, EXC_TABLE_FIXUP !! 248 or %l0, PSR_PIL, %g2 382 !! 249 wr %g2, 0x0, %psr 383 /* All unrecoverable states are saved !! 250 WRITE_PAUSE 384 * Now we can allow exceptions again. !! 251 wr %g2, PSR_ET, %psr 385 * PS.INTLEVEL is set to LOCKLEVEL dis !! 252 WRITE_PAUSE 386 * otherwise it's left unchanged. !! 253 call smp4m_percpu_timer_interrupt >> 254 add %sp, STACKFRAME_SZ, %o0 >> 255 wr %l0, PSR_ET, %psr >> 256 WRITE_PAUSE >> 257 RESTORE_ALL >> 258 >> 259 #define GET_PROCESSOR4M_ID(reg) \ >> 260 rd %tbr, %reg; \ >> 261 srl %reg, 12, %reg; \ >> 262 and %reg, 3, %reg; >> 263 >> 264 /* Here is where we check for possible SMP IPI passed to us >> 265 * on some level other than 15 which is the NMI and only used >> 266 * for cross calls. That has a separate entry point below. 387 * 267 * 388 * Set PS(EXCM = 0, UM = 0, RING = 0, !! 268 * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*. 389 */ 269 */ >> 270 maybe_smp4m_msg: >> 271 GET_PROCESSOR4M_ID(o3) >> 272 sethi %hi(sun4m_irq_percpu), %l5 >> 273 sll %o3, 2, %o3 >> 274 or %l5, %lo(sun4m_irq_percpu), %o5 >> 275 sethi %hi(0x70000000), %o2 ! Check all soft-IRQs >> 276 ld [%o5 + %o3], %o1 >> 277 ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending >> 278 andcc %o3, %o2, %g0 >> 279 be,a smp4m_ticker >> 280 cmp %l7, 14 >> 281 /* Soft-IRQ IPI */ >> 282 st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x70000000 >> 283 WRITE_PAUSE >> 284 ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending >> 285 WRITE_PAUSE >> 286 or %l0, PSR_PIL, %l4 >> 287 wr %l4, 0x0, %psr >> 288 WRITE_PAUSE >> 289 wr %l4, PSR_ET, %psr >> 290 WRITE_PAUSE >> 291 srl %o3, 28, %o2 ! shift for simpler checks below >> 292 maybe_smp4m_msg_check_single: >> 293 andcc %o2, 0x1, %g0 >> 294 beq,a maybe_smp4m_msg_check_mask >> 295 andcc %o2, 0x2, %g0 >> 296 call smp_call_function_single_interrupt >> 297 nop >> 298 andcc %o2, 0x2, %g0 >> 299 maybe_smp4m_msg_check_mask: >> 300 beq,a maybe_smp4m_msg_check_resched >> 301 andcc %o2, 0x4, %g0 >> 302 call smp_call_function_interrupt >> 303 nop >> 304 andcc %o2, 0x4, %g0 >> 305 maybe_smp4m_msg_check_resched: >> 306 /* rescheduling is done in RESTORE_ALL regardless, but incr stats */ >> 307 beq,a maybe_smp4m_msg_out >> 308 nop >> 309 call smp_resched_interrupt >> 310 nop >> 311 maybe_smp4m_msg_out: >> 312 RESTORE_ALL 390 313 391 rsr a3, ps !! 314 .align 4 392 s32i a3, a1, PT_PS # save !! 315 .globl linux_trap_ipi15_sun4m 393 !! 316 linux_trap_ipi15_sun4m: 394 #if XTENSA_FAKE_NMI !! 317 SAVE_ALL 395 /* Correct PS needs to be saved in the !! 318 sethi %hi(0x80000000), %o2 396 * - in case of exception or level-1 i !! 319 GET_PROCESSOR4M_ID(o0) 397 * and is already saved. !! 320 sethi %hi(sun4m_irq_percpu), %l5 398 * - in case of medium level interrupt !! 321 or %l5, %lo(sun4m_irq_percpu), %o5 399 */ !! 322 sll %o0, 2, %o0 400 movi a0, EXCCAUSE_MAPPED_NMI !! 323 ld [%o5 + %o0], %o5 401 extui a3, a3, PS_INTLEVEL_SHIFT, PS_ !! 324 ld [%o5 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending 402 beq a2, a0, .Lmedium_level_irq !! 325 andcc %o3, %o2, %g0 403 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, !! 326 be sun4m_nmi_error ! Must be an NMI async memory error 404 beqz a3, .Llevel1_irq # leve !! 327 st %o2, [%o5 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x80000000 405 !! 328 WRITE_PAUSE 406 .Lmedium_level_irq: !! 329 ld [%o5 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending 407 rsr a0, excsave2 !! 330 WRITE_PAUSE 408 s32i a0, a1, PT_PS # save !! 331 or %l0, PSR_PIL, %l4 409 bgei a3, LOCKLEVEL, .Lexception !! 332 wr %l4, 0x0, %psr 410 !! 333 WRITE_PAUSE 411 .Llevel1_irq: !! 334 wr %l4, PSR_ET, %psr 412 movi a3, LOCKLEVEL !! 335 WRITE_PAUSE 413 !! 336 call smp4m_cross_call_irq 414 .Lexception: !! 337 nop 415 KABI_W movi a0, PS_WOE_MASK !! 338 b ret_trap_lockless_ipi 416 KABI_W or a3, a3, a0 !! 339 clr %l6 417 #else !! 340 418 addi a2, a2, -EXCCAUSE_LEVEL1_INTER !! 341 .globl smp4d_ticker 419 movi a0, LOCKLEVEL !! 342 /* SMP per-cpu ticker interrupts are handled specially. */ 420 extui a3, a3, PS_INTLEVEL_SHIFT, PS_ !! 343 smp4d_ticker: 421 # a3 = !! 344 SAVE_ALL 422 moveqz a3, a0, a2 # a3 = !! 345 or %l0, PSR_PIL, %g2 423 KABI_W movi a2, PS_WOE_MASK !! 346 sethi %hi(CC_ICLR), %o0 424 KABI_W or a3, a3, a2 !! 347 sethi %hi(1 << 14), %o1 425 #endif !! 348 or %o0, %lo(CC_ICLR), %o0 426 !! 349 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 14 in MXCC's ICLR */ 427 /* restore return address (or 0 if ret !! 350 wr %g2, 0x0, %psr 428 rsr a0, depc !! 351 WRITE_PAUSE 429 wsr a3, ps !! 352 wr %g2, PSR_ET, %psr 430 rsync # PS.W !! 353 WRITE_PAUSE 431 !! 354 call smp4d_percpu_timer_interrupt 432 /* Save lbeg, lend */ !! 355 add %sp, STACKFRAME_SZ, %o0 433 #if XCHAL_HAVE_LOOPS !! 356 wr %l0, PSR_ET, %psr 434 rsr a4, lbeg !! 357 WRITE_PAUSE 435 rsr a3, lend !! 358 RESTORE_ALL 436 s32i a4, a1, PT_LBEG << 437 s32i a3, a1, PT_LEND << 438 #endif << 439 359 440 /* Save SCOMPARE1 */ !! 360 .align 4 >> 361 .globl linux_trap_ipi15_sun4d >> 362 linux_trap_ipi15_sun4d: >> 363 SAVE_ALL >> 364 sethi %hi(CC_BASE), %o4 >> 365 sethi %hi(MXCC_ERR_ME|MXCC_ERR_PEW|MXCC_ERR_ASE|MXCC_ERR_PEE), %o2 >> 366 or %o4, (CC_EREG - CC_BASE), %o0 >> 367 ldda [%o0] ASI_M_MXCC, %o0 >> 368 andcc %o0, %o2, %g0 >> 369 bne 1f >> 370 sethi %hi(BB_STAT2), %o2 >> 371 lduba [%o2] ASI_M_CTL, %o2 >> 372 andcc %o2, BB_STAT2_MASK, %g0 >> 373 bne 2f >> 374 or %o4, (CC_ICLR - CC_BASE), %o0 >> 375 sethi %hi(1 << 15), %o1 >> 376 stha %o1, [%o0] ASI_M_MXCC /* Clear PIL 15 in MXCC's ICLR */ >> 377 or %l0, PSR_PIL, %l4 >> 378 wr %l4, 0x0, %psr >> 379 WRITE_PAUSE >> 380 wr %l4, PSR_ET, %psr >> 381 WRITE_PAUSE >> 382 call smp4d_cross_call_irq >> 383 nop >> 384 b ret_trap_lockless_ipi >> 385 clr %l6 >> 386 >> 387 1: /* MXCC error */ >> 388 2: /* BB error */ >> 389 /* Disable PIL 15 */ >> 390 set CC_IMSK, %l4 >> 391 lduha [%l4] ASI_M_MXCC, %l5 >> 392 sethi %hi(1 << 15), %l7 >> 393 or %l5, %l7, %l5 >> 394 stha %l5, [%l4] ASI_M_MXCC >> 395 /* FIXME */ >> 396 1: b,a 1b >> 397 >> 398 .globl smpleon_ipi >> 399 .extern leon_ipi_interrupt >> 400 /* SMP per-cpu IPI interrupts are handled specially. */ >> 401 smpleon_ipi: >> 402 SAVE_ALL >> 403 or %l0, PSR_PIL, %g2 >> 404 wr %g2, 0x0, %psr >> 405 WRITE_PAUSE >> 406 wr %g2, PSR_ET, %psr >> 407 WRITE_PAUSE >> 408 call leonsmp_ipi_interrupt >> 409 add %sp, STACKFRAME_SZ, %o1 ! pt_regs >> 410 wr %l0, PSR_ET, %psr >> 411 WRITE_PAUSE >> 412 RESTORE_ALL 441 413 442 #if XCHAL_HAVE_S32C1I !! 414 .align 4 443 rsr a3, scompare1 !! 415 .globl linux_trap_ipi15_leon 444 s32i a3, a1, PT_SCOMPARE1 !! 416 linux_trap_ipi15_leon: 445 #endif !! 417 SAVE_ALL >> 418 or %l0, PSR_PIL, %l4 >> 419 wr %l4, 0x0, %psr >> 420 WRITE_PAUSE >> 421 wr %l4, PSR_ET, %psr >> 422 WRITE_PAUSE >> 423 call leon_cross_call_irq >> 424 nop >> 425 b ret_trap_lockless_ipi >> 426 clr %l6 446 427 447 /* Save optional registers. */ !! 428 #endif /* CONFIG_SMP */ 448 429 449 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_X !! 430 /* This routine handles illegal instructions and privileged 450 !! 431 * instruction attempts from user code. 451 #ifdef CONFIG_TRACE_IRQFLAGS << 452 rsr abi_tmp0, ps << 453 extui abi_tmp0, abi_tmp0, PS << 454 beqz abi_tmp0, 1f << 455 abi_call trace_hardirqs_off << 456 1: << 457 #endif << 458 #ifdef CONFIG_CONTEXT_TRACKING_USER << 459 l32i abi_tmp0, a1, PT_PS << 460 bbci.l abi_tmp0, PS_UM_BIT, 1 << 461 abi_call user_exit_callable << 462 1: << 463 #endif << 464 << 465 /* Go to second-level dispatcher. Set << 466 * exception handler and call the exce << 467 */ 432 */ >> 433 .align 4 >> 434 .globl bad_instruction >> 435 bad_instruction: >> 436 sethi %hi(0xc1f80000), %l4 >> 437 ld [%l1], %l5 >> 438 sethi %hi(0x81d80000), %l7 >> 439 and %l5, %l4, %l5 >> 440 cmp %l5, %l7 >> 441 be 1f >> 442 SAVE_ALL >> 443 >> 444 wr %l0, PSR_ET, %psr ! re-enable traps >> 445 WRITE_PAUSE >> 446 >> 447 add %sp, STACKFRAME_SZ, %o0 >> 448 mov %l1, %o1 >> 449 mov %l2, %o2 >> 450 call do_illegal_instruction >> 451 mov %l0, %o3 >> 452 >> 453 RESTORE_ALL >> 454 >> 455 1: /* unimplemented flush - just skip */ >> 456 jmpl %l2, %g0 >> 457 rett %l2 + 4 468 458 469 l32i abi_arg1, a1, PT_EXCCA !! 459 .align 4 470 rsr abi_tmp0, excsave1 !! 460 .globl priv_instruction 471 addx4 abi_tmp0, abi_arg1, ab !! 461 priv_instruction: 472 l32i abi_tmp0, abi_tmp0, EX !! 462 SAVE_ALL 473 mov abi_arg0, a1 !! 463 474 !! 464 wr %l0, PSR_ET, %psr 475 /* Call the second-level handler */ !! 465 WRITE_PAUSE 476 !! 466 477 abi_callx abi_tmp0 !! 467 add %sp, STACKFRAME_SZ, %o0 478 !! 468 mov %l1, %o1 479 /* Jump here for exception exit */ !! 469 mov %l2, %o2 480 .global common_exception_return !! 470 call do_priv_instruction 481 common_exception_return: !! 471 mov %l0, %o3 482 << 483 #if XTENSA_FAKE_NMI << 484 l32i abi_tmp0, a1, PT_EXCCA << 485 movi abi_tmp1, EXCCAUSE_MAP << 486 l32i abi_saved1, a1, PT_PS << 487 beq abi_tmp0, abi_tmp1, .L << 488 #endif << 489 .Ltif_loop: << 490 irq_save abi_tmp0, abi_tmp1 << 491 #ifdef CONFIG_TRACE_IRQFLAGS << 492 abi_call trace_hardirqs_off << 493 #endif << 494 << 495 /* Jump if we are returning from kerne << 496 << 497 l32i abi_saved1, a1, PT_PS << 498 GET_THREAD_INFO(abi_tmp0, a1) << 499 l32i abi_saved0, abi_tmp0, << 500 _bbci.l abi_saved1, PS_UM_BIT, << 501 << 502 /* Specific to a user exception exit: << 503 * We need to check some flags for sig << 504 * and have to restore WB and WS, extr << 505 * in the register file that were in u << 506 * Note that we don't disable interrup << 507 */ << 508 472 509 _bbsi.l abi_saved0, TIF_NEED_R !! 473 RESTORE_ALL 510 movi abi_tmp0, _TIF_SIGPEND << 511 bnone abi_saved0, abi_tmp0, << 512 474 513 l32i abi_tmp0, a1, PT_DEPC !! 475 /* This routine handles unaligned data accesses. */ 514 bgeui abi_tmp0, VALID_DOUBLE !! 476 .align 4 >> 477 .globl mna_handler >> 478 mna_handler: >> 479 andcc %l0, PSR_PS, %g0 >> 480 be mna_fromuser >> 481 nop 515 482 516 /* Call do_signal() */ !! 483 SAVE_ALL 517 484 518 #ifdef CONFIG_TRACE_IRQFLAGS !! 485 wr %l0, PSR_ET, %psr 519 abi_call trace_hardirqs_on !! 486 WRITE_PAUSE 520 #endif << 521 rsil abi_tmp0, 0 << 522 mov abi_arg0, a1 << 523 abi_call do_notify_resume << 524 j .Ltif_loop << 525 << 526 .Lresched: << 527 #ifdef CONFIG_TRACE_IRQFLAGS << 528 abi_call trace_hardirqs_on << 529 #endif << 530 rsil abi_tmp0, 0 << 531 abi_call schedule # void << 532 j .Ltif_loop << 533 << 534 .Lexit_tif_loop_kernel: << 535 #ifdef CONFIG_PREEMPTION << 536 _bbci.l abi_saved0, TIF_NEED_R << 537 << 538 /* Check current_thread_info->preempt_ << 539 << 540 l32i abi_tmp1, abi_tmp0, TI << 541 bnez abi_tmp1, .Lrestore_st << 542 abi_call preempt_schedule_irq << 543 #endif << 544 j .Lrestore_state << 545 487 546 .Lexit_tif_loop_user: !! 488 ld [%l1], %o1 547 #ifdef CONFIG_CONTEXT_TRACKING_USER !! 489 call kernel_unaligned_trap 548 abi_call user_enter_callable !! 490 add %sp, STACKFRAME_SZ, %o0 549 #endif << 550 #ifdef CONFIG_HAVE_HW_BREAKPOINT << 551 _bbci.l abi_saved0, TIF_DB_DIS << 552 abi_call restore_dbreak << 553 1: << 554 #endif << 555 #ifdef CONFIG_DEBUG_TLB_SANITY << 556 l32i abi_tmp0, a1, PT_DEPC << 557 bgeui abi_tmp0, VALID_DOUBLE << 558 abi_call check_tlb_sanity << 559 #endif << 560 491 561 .Lrestore_state: !! 492 RESTORE_ALL 562 #ifdef CONFIG_TRACE_IRQFLAGS << 563 extui abi_tmp0, abi_saved1, << 564 bgei abi_tmp0, LOCKLEVEL, 1 << 565 abi_call trace_hardirqs_on << 566 1: << 567 #endif << 568 /* << 569 * Restore optional registers. << 570 * abi_arg* are used as temporary regi << 571 */ << 572 493 573 load_xtregs_opt a1 abi_tmp0 abi_arg0 a !! 494 mna_fromuser: >> 495 SAVE_ALL 574 496 575 /* Restore SCOMPARE1 */ !! 497 wr %l0, PSR_ET, %psr ! re-enable traps >> 498 WRITE_PAUSE 576 499 577 #if XCHAL_HAVE_S32C1I !! 500 ld [%l1], %o1 578 l32i abi_tmp0, a1, PT_SCOMP !! 501 call user_unaligned_trap 579 wsr abi_tmp0, scompare1 !! 502 add %sp, STACKFRAME_SZ, %o0 580 #endif << 581 wsr abi_saved1, ps << 582 _bbci.l abi_saved1, PS_UM_BIT, << 583 503 584 user_exception_exit: !! 504 RESTORE_ALL 585 505 586 /* Restore the state of the task and r !! 506 /* This routine handles floating point disabled traps. */ >> 507 .align 4 >> 508 .globl fpd_trap_handler >> 509 fpd_trap_handler: >> 510 SAVE_ALL >> 511 >> 512 wr %l0, PSR_ET, %psr ! re-enable traps >> 513 WRITE_PAUSE >> 514 >> 515 add %sp, STACKFRAME_SZ, %o0 >> 516 mov %l1, %o1 >> 517 mov %l2, %o2 >> 518 call do_fpd_trap >> 519 mov %l0, %o3 587 520 588 #if defined(USER_SUPPORT_WINDOWED) !! 521 RESTORE_ALL 589 /* Switch to the user thread WINDOWBAS << 590 << 591 l32i a2, a1, PT_WINDOWBASE << 592 l32i a3, a1, PT_WINDOWSTART << 593 wsr a1, depc # use << 594 wsr a3, windowstart # rest << 595 ssr a2 # pres << 596 wsr a2, windowbase # swit << 597 rsync << 598 rsr a1, depc # rest << 599 l32i a2, a1, PT_WMASK # regi << 600 rotw -1 # we r << 601 _bltui a6, 16, .Lclear_regs # only << 602 << 603 /* The working registers are a0 and a3 << 604 * a4..a7. Be careful not to destroy << 605 * Note: wmask has the format YYYYM: << 606 * Y: number of registers saved << 607 * M: 4 bit mask of first 16 reg << 608 */ << 609 522 610 mov a2, a6 !! 523 /* This routine handles Floating Point Exceptions. */ 611 mov a3, a5 !! 524 .align 4 >> 525 .globl fpe_trap_handler >> 526 fpe_trap_handler: >> 527 set fpsave_magic, %l5 >> 528 cmp %l1, %l5 >> 529 be 1f >> 530 sethi %hi(fpsave), %l5 >> 531 or %l5, %lo(fpsave), %l5 >> 532 cmp %l1, %l5 >> 533 bne 2f >> 534 sethi %hi(fpsave_catch2), %l5 >> 535 or %l5, %lo(fpsave_catch2), %l5 >> 536 wr %l0, 0x0, %psr >> 537 WRITE_PAUSE >> 538 jmp %l5 >> 539 rett %l5 + 4 >> 540 1: >> 541 sethi %hi(fpsave_catch), %l5 >> 542 or %l5, %lo(fpsave_catch), %l5 >> 543 wr %l0, 0x0, %psr >> 544 WRITE_PAUSE >> 545 jmp %l5 >> 546 rett %l5 + 4 612 547 613 1: rotw -1 # a0.. << 614 addi a3, a7, -4*4 # next << 615 addi a2, a6, -16 # decr << 616 l32i a4, a3, PT_AREG_END + 0 << 617 l32i a5, a3, PT_AREG_END + 4 << 618 l32i a6, a3, PT_AREG_END + 8 << 619 l32i a7, a3, PT_AREG_END + 12 << 620 _bgeui a2, 16, 1b << 621 << 622 /* Clear unrestored registers (don't l << 623 << 624 .Lclear_regs: << 625 rsr a0, windowbase << 626 rsr a3, sar << 627 sub a3, a0, a3 << 628 beqz a3, 2f << 629 extui a3, a3, 0, WBBITS << 630 << 631 1: rotw -1 << 632 addi a3, a7, -1 << 633 movi a4, 0 << 634 movi a5, 0 << 635 movi a6, 0 << 636 movi a7, 0 << 637 bgei a3, 1, 1b << 638 << 639 /* We are back were we were when we st << 640 * Note: a2 still contains WMASK (if w << 641 * frame where we had loaded a2) << 642 * (if we have restored WSBITS-1 << 643 */ << 644 2: 548 2: 645 #else !! 549 SAVE_ALL 646 movi a2, 1 << 647 #endif << 648 #if XCHAL_HAVE_THREADPTR << 649 l32i a3, a1, PT_THREADPTR << 650 wur a3, threadptr << 651 #endif << 652 550 653 j common_exception_exit !! 551 wr %l0, PSR_ET, %psr ! re-enable traps >> 552 WRITE_PAUSE 654 553 655 /* This is the kernel exception exit. !! 554 add %sp, STACKFRAME_SZ, %o0 656 * We avoided to do a MOVSP when we en !! 555 mov %l1, %o1 657 * have to do it here. !! 556 mov %l2, %o2 658 */ !! 557 call do_fpe_trap 659 !! 558 mov %l0, %o3 660 kernel_exception_exit: << 661 << 662 #if defined(__XTENSA_WINDOWED_ABI__) << 663 /* Check if we have to do a movsp. << 664 * << 665 * We only have to do a movsp if the p << 666 * been spilled to the *temporary* exc << 667 * task's stack. This is the case if t << 668 * WINDOWSTART for the previous window << 669 * (not spilled) but is zero now (spil << 670 * If this bit is zero, all other bits << 671 * current window frame are also zero. << 672 * 'and' WINDOWSTART and WINDOWSTART-1 << 673 * << 674 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* << 675 * << 676 * The result is zero only if one bit << 677 * << 678 * (Note: We might have gone through s << 679 * we come back to the current << 680 * different from the time the << 681 */ << 682 559 683 /* Test WINDOWSTART before and after t !! 560 RESTORE_ALL 684 * We actually have WMASK, so we only << 685 */ << 686 << 687 l32i a2, a1, PT_WMASK << 688 _beqi a2, 1, common_exception_exit << 689 << 690 /* Test WINDOWSTART now. If spilled, d << 691 << 692 rsr a3, windowstart << 693 addi a0, a3, -1 << 694 and a3, a3, a0 << 695 _bnez a3, common_exception_exit << 696 << 697 /* Do a movsp (we returned from a call << 698 << 699 addi a0, a1, -16 << 700 l32i a3, a0, 0 << 701 l32i a4, a0, 4 << 702 s32i a3, a1, PT_KERNEL_SIZE + 0 << 703 s32i a4, a1, PT_KERNEL_SIZE + 4 << 704 l32i a3, a0, 8 << 705 l32i a4, a0, 12 << 706 s32i a3, a1, PT_KERNEL_SIZE + 8 << 707 s32i a4, a1, PT_KERNEL_SIZE + 12 << 708 << 709 /* Common exception exit. << 710 * We restore the special register and << 711 * return from the exception. << 712 * << 713 * Note: We expect a2 to hold PT_WMASK << 714 */ << 715 #else << 716 movi a2, 1 << 717 #endif << 718 561 719 common_exception_exit: !! 562 /* This routine handles Tag Overflow Exceptions. */ 720 !! 563 .align 4 721 /* Restore address registers. */ !! 564 .globl do_tag_overflow >> 565 do_tag_overflow: >> 566 SAVE_ALL >> 567 >> 568 wr %l0, PSR_ET, %psr ! re-enable traps >> 569 WRITE_PAUSE >> 570 >> 571 add %sp, STACKFRAME_SZ, %o0 >> 572 mov %l1, %o1 >> 573 mov %l2, %o2 >> 574 call handle_tag_overflow >> 575 mov %l0, %o3 722 576 723 _bbsi.l a2, 1, 1f !! 577 RESTORE_ALL 724 l32i a4, a1, PT_AREG4 << 725 l32i a5, a1, PT_AREG5 << 726 l32i a6, a1, PT_AREG6 << 727 l32i a7, a1, PT_AREG7 << 728 _bbsi.l a2, 2, 1f << 729 l32i a8, a1, PT_AREG8 << 730 l32i a9, a1, PT_AREG9 << 731 l32i a10, a1, PT_AREG10 << 732 l32i a11, a1, PT_AREG11 << 733 _bbsi.l a2, 3, 1f << 734 l32i a12, a1, PT_AREG12 << 735 l32i a13, a1, PT_AREG13 << 736 l32i a14, a1, PT_AREG14 << 737 l32i a15, a1, PT_AREG15 << 738 << 739 /* Restore PC, SAR */ << 740 << 741 1: l32i a2, a1, PT_PC << 742 l32i a3, a1, PT_SAR << 743 wsr a2, epc1 << 744 wsr a3, sar << 745 << 746 /* Restore LBEG, LEND, LCOUNT */ << 747 #if XCHAL_HAVE_LOOPS << 748 l32i a2, a1, PT_LBEG << 749 l32i a3, a1, PT_LEND << 750 wsr a2, lbeg << 751 l32i a2, a1, PT_LCOUNT << 752 wsr a3, lend << 753 wsr a2, lcount << 754 #endif << 755 578 756 /* We control single stepping through !! 579 /* This routine handles Watchpoint Exceptions. */ >> 580 .align 4 >> 581 .globl do_watchpoint >> 582 do_watchpoint: >> 583 SAVE_ALL >> 584 >> 585 wr %l0, PSR_ET, %psr ! re-enable traps >> 586 WRITE_PAUSE >> 587 >> 588 add %sp, STACKFRAME_SZ, %o0 >> 589 mov %l1, %o1 >> 590 mov %l2, %o2 >> 591 call handle_watchpoint >> 592 mov %l0, %o3 757 593 758 l32i a2, a1, PT_ICOUNTLEVEL !! 594 RESTORE_ALL 759 movi a3, -2 << 760 wsr a2, icountlevel << 761 wsr a3, icount << 762 595 763 /* Check if it was double exception. * !! 596 /* This routine handles Register Access Exceptions. */ >> 597 .align 4 >> 598 .globl do_reg_access >> 599 do_reg_access: >> 600 SAVE_ALL >> 601 >> 602 wr %l0, PSR_ET, %psr ! re-enable traps >> 603 WRITE_PAUSE >> 604 >> 605 add %sp, STACKFRAME_SZ, %o0 >> 606 mov %l1, %o1 >> 607 mov %l2, %o2 >> 608 call handle_reg_access >> 609 mov %l0, %o3 764 610 765 l32i a0, a1, PT_DEPC !! 611 RESTORE_ALL 766 l32i a3, a1, PT_AREG3 << 767 l32i a2, a1, PT_AREG2 << 768 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADD << 769 612 770 /* Restore a0...a3 and return */ !! 613 /* This routine handles Co-Processor Disabled Exceptions. */ >> 614 .align 4 >> 615 .globl do_cp_disabled >> 616 do_cp_disabled: >> 617 SAVE_ALL >> 618 >> 619 wr %l0, PSR_ET, %psr ! re-enable traps >> 620 WRITE_PAUSE >> 621 >> 622 add %sp, STACKFRAME_SZ, %o0 >> 623 mov %l1, %o1 >> 624 mov %l2, %o2 >> 625 call handle_cp_disabled >> 626 mov %l0, %o3 771 627 772 l32i a0, a1, PT_AREG0 !! 628 RESTORE_ALL 773 l32i a1, a1, PT_AREG1 << 774 rfe << 775 629 776 1: wsr a0, depc !! 630 /* This routine handles Co-Processor Exceptions. */ 777 l32i a0, a1, PT_AREG0 !! 631 .align 4 778 l32i a1, a1, PT_AREG1 !! 632 .globl do_cp_exception 779 rfde !! 633 do_cp_exception: >> 634 SAVE_ALL >> 635 >> 636 wr %l0, PSR_ET, %psr ! re-enable traps >> 637 WRITE_PAUSE >> 638 >> 639 add %sp, STACKFRAME_SZ, %o0 >> 640 mov %l1, %o1 >> 641 mov %l2, %o2 >> 642 call handle_cp_exception >> 643 mov %l0, %o3 780 644 781 ENDPROC(kernel_exception) !! 645 RESTORE_ALL 782 646 783 /* !! 647 /* This routine handles Hardware Divide By Zero Exceptions. */ 784 * Debug exception handler. !! 648 .align 4 785 * !! 649 .globl do_hw_divzero 786 * Currently, we don't support KGDB, so only u !! 650 do_hw_divzero: 787 * !! 651 SAVE_ALL 788 * When we get here, a0 is trashed and saved !! 652 789 */ !! 653 wr %l0, PSR_ET, %psr ! re-enable traps >> 654 WRITE_PAUSE >> 655 >> 656 add %sp, STACKFRAME_SZ, %o0 >> 657 mov %l1, %o1 >> 658 mov %l2, %o2 >> 659 call handle_hw_divzero >> 660 mov %l0, %o3 790 661 791 .literal_position !! 662 RESTORE_ALL 792 663 793 ENTRY(debug_exception) !! 664 .align 4 >> 665 .globl do_flush_windows >> 666 do_flush_windows: >> 667 SAVE_ALL 794 668 795 rsr a0, SREG_EPS + XCHAL_DEBUGLEVE !! 669 wr %l0, PSR_ET, %psr 796 bbsi.l a0, PS_EXCM_BIT, .Ldebug_excep !! 670 WRITE_PAUSE 797 671 798 /* Set EPC1 and EXCCAUSE */ !! 672 andcc %l0, PSR_PS, %g0 >> 673 bne dfw_kernel >> 674 nop 799 675 800 wsr a2, depc # save !! 676 call flush_user_windows 801 rsr a2, SREG_EPC + XCHAL_DEBUGLEVE !! 677 nop 802 wsr a2, epc1 << 803 678 804 movi a2, EXCCAUSE_MAPPED_DEBUG !! 679 /* Advance over the trap instruction. */ 805 wsr a2, exccause !! 680 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 >> 681 add %l1, 0x4, %l2 >> 682 st %l1, [%sp + STACKFRAME_SZ + PT_PC] >> 683 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 806 684 807 /* Restore PS to the value before the !! 685 RESTORE_ALL 808 686 809 movi a2, 1 << PS_EXCM_BIT !! 687 .globl flush_patch_one 810 or a2, a0, a2 << 811 wsr a2, ps << 812 688 813 /* Switch to kernel/user stack, restor !! 689 /* We get these for debugging routines using __builtin_return_address() */ >> 690 dfw_kernel: >> 691 flush_patch_one: >> 692 FLUSH_ALL_KERNEL_WINDOWS 814 693 815 bbsi.l a2, PS_UM_BIT, .Ldebug_excepti !! 694 /* Advance over the trap instruction. */ 816 addi a2, a1, -16 - PT_KERNEL_SIZE !! 695 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 >> 696 add %l1, 0x4, %l2 >> 697 st %l1, [%sp + STACKFRAME_SZ + PT_PC] >> 698 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 817 699 818 .Ldebug_exception_continue: !! 700 RESTORE_ALL 819 l32i a0, a3, DT_DEBUG_SAVE << 820 s32i a1, a2, PT_AREG1 << 821 s32i a0, a2, PT_AREG0 << 822 movi a0, 0 << 823 s32i a0, a2, PT_DEPC # mark << 824 xsr a3, SREG_EXCSAVE + XCHAL_DEBUG << 825 xsr a0, depc << 826 s32i a3, a2, PT_AREG3 << 827 s32i a0, a2, PT_AREG2 << 828 mov a1, a2 << 829 701 830 /* Debug exception is handled as an ex !! 702 /* The getcc software trap. The user wants the condition codes from 831 * likely be enabled in the common exc !! 703 * the %psr in register %g1. 832 * preemption if we have HW breakpoint << 833 * meaning. << 834 */ 704 */ 835 #if defined(CONFIG_PREEMPT_COUNT) && defined(C << 836 GET_THREAD_INFO(a2, a1) << 837 l32i a3, a2, TI_PRE_COUNT << 838 addi a3, a3, 1 << 839 s32i a3, a2, TI_PRE_COUNT << 840 #endif << 841 705 842 rsr a2, ps !! 706 .align 4 843 bbsi.l a2, PS_UM_BIT, _user_exception !! 707 .globl getcc_trap_handler 844 j _kernel_exception !! 708 getcc_trap_handler: 845 !! 709 srl %l0, 20, %g1 ! give user 846 .Ldebug_exception_user: !! 710 and %g1, 0xf, %g1 ! only ICC bits in %psr 847 rsr a2, excsave1 !! 711 jmp %l2 ! advance over trap instruction 848 l32i a2, a2, EXC_TABLE_KSTK # load !! 712 rett %l2 + 0x4 ! like this... 849 j .Ldebug_exception_continue !! 713 850 !! 714 /* The setcc software trap. The user has condition codes in %g1 851 .Ldebug_exception_in_exception: !! 715 * that it would like placed in the %psr. Be careful not to flip 852 #ifdef CONFIG_HAVE_HW_BREAKPOINT !! 716 * any unintentional bits! 853 /* Debug exception while in exception << 854 * window overflow/underflow handler o << 855 * data breakpoint, in which case save << 856 * breakpoints, single-step faulting i << 857 * breakpoints. << 858 */ 717 */ 859 718 860 bbci.l a0, PS_UM_BIT, .Ldebug_excepti !! 719 .align 4 861 !! 720 .globl setcc_trap_handler 862 rsr a0, debugcause !! 721 setcc_trap_handler: 863 bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ld !! 722 sll %g1, 0x14, %l4 864 !! 723 set PSR_ICC, %l5 865 .set _index, 0 !! 724 andn %l0, %l5, %l0 ! clear ICC bits in %psr 866 .rept XCHAL_NUM_DBREAK !! 725 and %l4, %l5, %l4 ! clear non-ICC bits in user value 867 l32i a0, a3, DT_DBREAKC_SAVE + _ind !! 726 or %l4, %l0, %l4 ! or them in... mix mix mix 868 wsr a0, SREG_DBREAKC + _index !! 727 869 .set _index, _index + 1 !! 728 wr %l4, 0x0, %psr ! set new %psr 870 .endr !! 729 WRITE_PAUSE ! TI scumbags... 871 !! 730 872 l32i a0, a3, DT_ICOUNT_LEVEL_SAVE !! 731 jmp %l2 ! advance over trap instruction 873 wsr a0, icountlevel !! 732 rett %l2 + 0x4 ! like this... 874 !! 733 875 l32i a0, a3, DT_ICOUNT_SAVE !! 734 sun4m_nmi_error: 876 xsr a0, icount !! 735 /* NMI async memory error handling. */ 877 !! 736 sethi %hi(0x80000000), %l4 878 l32i a0, a3, DT_DEBUG_SAVE !! 737 sethi %hi(sun4m_irq_global), %o5 879 xsr a3, SREG_EXCSAVE + XCHAL_DEBUG !! 738 ld [%o5 + %lo(sun4m_irq_global)], %l5 880 rfi XCHAL_DEBUGLEVEL !! 739 st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000 881 !! 740 WRITE_PAUSE 882 .Ldebug_save_dbreak: !! 741 ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending 883 .set _index, 0 !! 742 WRITE_PAUSE 884 .rept XCHAL_NUM_DBREAK !! 743 or %l0, PSR_PIL, %l4 885 movi a0, 0 !! 744 wr %l4, 0x0, %psr 886 xsr a0, SREG_DBREAKC + _index !! 745 WRITE_PAUSE 887 s32i a0, a3, DT_DBREAKC_SAVE + _ind !! 746 wr %l4, PSR_ET, %psr 888 .set _index, _index + 1 !! 747 WRITE_PAUSE 889 .endr !! 748 call sun4m_nmi 890 !! 749 nop 891 movi a0, XCHAL_EXCM_LEVEL + 1 !! 750 st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000 892 xsr a0, icountlevel !! 751 WRITE_PAUSE 893 s32i a0, a3, DT_ICOUNT_LEVEL_SAVE !! 752 ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending 894 !! 753 WRITE_PAUSE 895 movi a0, 0xfffffffe !! 754 RESTORE_ALL 896 xsr a0, icount << 897 s32i a0, a3, DT_ICOUNT_SAVE << 898 << 899 l32i a0, a3, DT_DEBUG_SAVE << 900 xsr a3, SREG_EXCSAVE + XCHAL_DEBUG << 901 rfi XCHAL_DEBUGLEVEL << 902 #else << 903 /* Debug exception while in exception << 904 j .Ldebug_exception_in_exception << 905 #endif << 906 << 907 ENDPROC(debug_exception) << 908 755 909 /* !! 756 #ifndef CONFIG_SMP 910 * We get here in case of an unrecoverable exc !! 757 .align 4 911 * The only thing we can do is to be nice and !! 758 .globl linux_trap_ipi15_sun4m 912 * We only produce a single stack frame for pa !! 759 linux_trap_ipi15_sun4m: 913 * !! 760 SAVE_ALL 914 * !! 761 915 * Entry conditions: !! 762 ba sun4m_nmi_error 916 * !! 763 nop 917 * - a0 contains the caller address; origina !! 764 #endif /* CONFIG_SMP */ 918 * - the original a0 contains a valid return << 919 * - a2 contains a valid stackpointer << 920 * << 921 * Notes: << 922 * << 923 * - If the stack pointer could be invalid, << 924 * dummy stack pointer (e.g. the stack of << 925 * << 926 * - If the return address could be invalid, << 927 * to 0, so the backtrace would stop. << 928 * << 929 */ << 930 .align 4 << 931 unrecoverable_text: << 932 .ascii "Unrecoverable error in excepti << 933 765 934 .literal_position !! 766 .align 4 >> 767 .globl srmmu_fault >> 768 srmmu_fault: >> 769 mov 0x400, %l5 >> 770 mov 0x300, %l4 935 771 936 ENTRY(unrecoverable_exception) !! 772 LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6) ! read sfar first >> 773 SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6) ! read sfar first 937 774 938 #if XCHAL_HAVE_WINDOWED !! 775 LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5) ! read sfsr last 939 movi a0, 1 !! 776 SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last 940 movi a1, 0 << 941 777 942 wsr a0, windowstart !! 778 andn %l6, 0xfff, %l6 943 wsr a1, windowbase !! 779 srl %l5, 6, %l5 ! and encode all info into l7 944 rsync << 945 #endif << 946 780 947 movi a1, KERNEL_PS_WOE_MASK | LOCKL !! 781 and %l5, 2, %l5 948 wsr a1, ps !! 782 or %l5, %l6, %l6 949 rsync << 950 783 951 movi a1, init_task !! 784 or %l6, %l7, %l7 ! l7 = [addr,write,txtfault] 952 movi a0, 0 << 953 addi a1, a1, PT_REGS_OFFSET << 954 785 955 movi abi_arg0, unrecoverable_text !! 786 SAVE_ALL 956 abi_call panic << 957 787 958 1: j 1b !! 788 mov %l7, %o1 >> 789 mov %l7, %o2 >> 790 and %o1, 1, %o1 ! arg2 = text_faultp >> 791 mov %l7, %o3 >> 792 and %o2, 2, %o2 ! arg3 = writep >> 793 andn %o3, 0xfff, %o3 ! arg4 = faulting address 959 794 960 ENDPROC(unrecoverable_exception) !! 795 wr %l0, PSR_ET, %psr >> 796 WRITE_PAUSE 961 797 962 /* -------------------------- FAST EXCEPTION H !! 798 call do_sparc_fault >> 799 add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr 963 800 964 __XTENSA_HANDLER !! 801 RESTORE_ALL 965 .literal_position << 966 802 967 #ifdef SUPPORT_WINDOWED !! 803 .align 4 968 /* !! 804 sunos_execv: 969 * Fast-handler for alloca exceptions !! 805 .globl sunos_execv 970 * !! 806 b sys_execve 971 * The ALLOCA handler is entered when user co !! 807 clr %i2 972 * instruction and the caller's frame is not << 973 * << 974 * This algorithm was taken from the Ross Morl << 975 * << 976 * /home/ross/rtos/porting/XtensaRTOS-Porti << 977 * << 978 * It leverages the existing window spill/fill << 979 * double exceptions. The 'movsp' instruction << 980 * the next window needs to be loaded. In fact << 981 * replaced at some point by changing the hard << 982 * of the proper size instead. << 983 * << 984 * This algorithm simply backs out the registe << 985 * exception handler, makes it appear that we << 986 * by rotating the window back and then settin << 987 * the 'ps' register with the rolled back wind << 988 * will be re-executed and this time since the << 989 * active AR registers it won't cause an excep << 990 * << 991 * If the WindowUnderflow code gets a TLB miss << 992 * the partial WindowUnderflow will be handled << 993 * handler. << 994 * << 995 * Entry condition: << 996 * << 997 * a0: trashed, original value saved << 998 * a1: a1 << 999 * a2: new stack pointer, original in << 1000 * a3: a3 << 1001 * depc: a2, original value saved on s << 1002 * excsave_1: dispatch table << 1003 * << 1004 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1005 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1006 */ << 1007 808 1008 ENTRY(fast_alloca) !! 809 .align 4 1009 rsr a0, windowbase !! 810 .globl sys_sigstack 1010 rotw -1 !! 811 sys_sigstack: 1011 rsr a2, ps !! 812 mov %o7, %l5 1012 extui a3, a2, PS_OWB_SHIFT, PS_OWB_ !! 813 mov %fp, %o2 1013 xor a3, a3, a4 !! 814 call do_sys_sigstack 1014 l32i a4, a6, PT_AREG0 !! 815 mov %l5, %o7 1015 l32i a1, a6, PT_DEPC << 1016 rsr a6, depc << 1017 wsr a1, depc << 1018 slli a3, a3, PS_OWB_SHIFT << 1019 xor a2, a2, a3 << 1020 wsr a2, ps << 1021 rsync << 1022 << 1023 _bbci.l a4, 31, 4f << 1024 rotw -1 << 1025 _bbci.l a8, 30, 8f << 1026 rotw -1 << 1027 j _WindowUnderflow12 << 1028 8: j _WindowUnderflow8 << 1029 4: j _WindowUnderflow4 << 1030 ENDPROC(fast_alloca) << 1031 #endif << 1032 816 1033 #ifdef CONFIG_USER_ABI_CALL0_PROBE !! 817 .align 4 1034 /* !! 818 .globl sys_sigreturn 1035 * fast illegal instruction handler. !! 819 sys_sigreturn: 1036 * !! 820 call do_sigreturn 1037 * This is used to fix up user PS.WOE on the !! 821 add %sp, STACKFRAME_SZ, %o0 1038 * by the first opcode related to register wi !! 822 1039 * already set it goes directly to the common !! 823 ld [%curptr + TI_FLAGS], %l5 1040 * !! 824 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1041 * Entry condition: !! 825 be 1f 1042 * !! 826 nop 1043 * a0: trashed, original value saved << 1044 * a1: a1 << 1045 * a2: new stack pointer, original i << 1046 * a3: a3 << 1047 * depc: a2, original value saved on s << 1048 * excsave_1: dispatch table << 1049 */ << 1050 827 1051 ENTRY(fast_illegal_instruction_user) !! 828 call syscall_trace >> 829 mov 1, %o1 1052 830 1053 rsr a0, ps << 1054 bbsi.l a0, PS_WOE_BIT, 1f << 1055 s32i a3, a2, PT_AREG3 << 1056 movi a3, PS_WOE_MASK << 1057 or a0, a0, a3 << 1058 wsr a0, ps << 1059 #ifdef CONFIG_USER_ABI_CALL0_PROBE << 1060 GET_THREAD_INFO(a3, a2) << 1061 rsr a0, epc1 << 1062 s32i a0, a3, TI_PS_WOE_FIX_ADDR << 1063 #endif << 1064 l32i a3, a2, PT_AREG3 << 1065 l32i a0, a2, PT_AREG0 << 1066 rsr a2, depc << 1067 rfe << 1068 1: 831 1: 1069 call0 user_exception !! 832 /* We don't want to muck with user registers like a 1070 !! 833 * normal syscall, just return. 1071 ENDPROC(fast_illegal_instruction_user) << 1072 #endif << 1073 << 1074 /* << 1075 * fast system calls. << 1076 * << 1077 * WARNING: The kernel doesn't save the enti << 1078 * handling a fast system call. These functi << 1079 * usually offering some functionality not av << 1080 * << 1081 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. << 1082 * << 1083 * Entry condition: << 1084 * << 1085 * a0: trashed, original value saved << 1086 * a1: a1 << 1087 * a2: new stack pointer, original i << 1088 * a3: a3 << 1089 * depc: a2, original value saved on s << 1090 * excsave_1: dispatch table << 1091 */ << 1092 << 1093 ENTRY(fast_syscall_user) << 1094 << 1095 /* Skip syscall. */ << 1096 << 1097 rsr a0, epc1 << 1098 addi a0, a0, 3 << 1099 wsr a0, epc1 << 1100 << 1101 l32i a0, a2, PT_DEPC << 1102 bgeui a0, VALID_DOUBLE_EXCEPTION_AD << 1103 << 1104 rsr a0, depc << 1105 _beqz a0, fast_syscall_spill_regist << 1106 _beqi a0, __NR_xtensa, fast_syscall << 1107 << 1108 call0 user_exception << 1109 << 1110 ENDPROC(fast_syscall_user) << 1111 << 1112 ENTRY(fast_syscall_unrecoverable) << 1113 << 1114 /* Restore all states. */ << 1115 << 1116 l32i a0, a2, PT_AREG0 # res << 1117 xsr a2, depc # res << 1118 << 1119 wsr a0, excsave1 << 1120 call0 unrecoverable_exception << 1121 << 1122 ENDPROC(fast_syscall_unrecoverable) << 1123 << 1124 /* << 1125 * sysxtensa syscall handler << 1126 * << 1127 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, << 1128 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, << 1129 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, << 1130 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, << 1131 * a2 a6 << 1132 * << 1133 * Entry condition: << 1134 * << 1135 * a0: a2 (syscall-nr), original val << 1136 * a1: a1 << 1137 * a2: new stack pointer, original i << 1138 * a3: a3 << 1139 * a4..a15: unchanged << 1140 * depc: a2, original value saved on s << 1141 * excsave_1: dispatch table << 1142 * << 1143 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1144 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1145 * << 1146 * Note: we don't have to save a2; a2 holds t << 1147 */ << 1148 << 1149 .literal_position << 1150 << 1151 #ifdef CONFIG_FAST_SYSCALL_XTENSA << 1152 << 1153 ENTRY(fast_syscall_xtensa) << 1154 << 1155 s32i a7, a2, PT_AREG7 # we << 1156 movi a7, 4 # siz << 1157 access_ok a3, a7, a0, a2, .Leac # a0: << 1158 << 1159 _bgeui a6, SYS_XTENSA_COUNT, .Lill << 1160 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP << 1161 << 1162 /* Fall through for ATOMIC_CMP_SWP. * << 1163 << 1164 .Lswp: /* Atomic compare and swap */ << 1165 << 1166 EX(.Leac) l32i a0, a3, 0 # rea << 1167 bne a0, a4, 1f # sam << 1168 EX(.Leac) s32i a5, a3, 0 # dif << 1169 l32i a7, a2, PT_AREG7 # res << 1170 l32i a0, a2, PT_AREG0 # res << 1171 movi a2, 1 # and << 1172 rfe << 1173 << 1174 1: l32i a7, a2, PT_AREG7 # res << 1175 l32i a0, a2, PT_AREG0 # res << 1176 movi a2, 0 # ret << 1177 rfe << 1178 << 1179 .Lnswp: /* Atomic set, add, and exg_add. */ << 1180 << 1181 EX(.Leac) l32i a7, a3, 0 # ori << 1182 addi a6, a6, -SYS_XTENSA_ATOMIC_SE << 1183 add a0, a4, a7 # + a << 1184 moveqz a0, a4, a6 # set << 1185 addi a6, a6, SYS_XTENSA_ATOMIC_SET << 1186 EX(.Leac) s32i a0, a3, 0 # wri << 1187 << 1188 mov a0, a2 << 1189 mov a2, a7 << 1190 l32i a7, a0, PT_AREG7 # res << 1191 l32i a0, a0, PT_AREG0 # res << 1192 rfe << 1193 << 1194 .Leac: l32i a7, a2, PT_AREG7 # res << 1195 l32i a0, a2, PT_AREG0 # res << 1196 movi a2, -EFAULT << 1197 rfe << 1198 << 1199 .Lill: l32i a7, a2, PT_AREG7 # res << 1200 l32i a0, a2, PT_AREG0 # res << 1201 movi a2, -EINVAL << 1202 rfe << 1203 << 1204 ENDPROC(fast_syscall_xtensa) << 1205 << 1206 #else /* CONFIG_FAST_SYSCALL_XTENSA */ << 1207 << 1208 ENTRY(fast_syscall_xtensa) << 1209 << 1210 l32i a0, a2, PT_AREG0 # res << 1211 movi a2, -ENOSYS << 1212 rfe << 1213 << 1214 ENDPROC(fast_syscall_xtensa) << 1215 << 1216 #endif /* CONFIG_FAST_SYSCALL_XTENSA */ << 1217 << 1218 << 1219 /* fast_syscall_spill_registers. << 1220 * << 1221 * Entry condition: << 1222 * << 1223 * a0: trashed, original value saved << 1224 * a1: a1 << 1225 * a2: new stack pointer, original i << 1226 * a3: a3 << 1227 * depc: a2, original value saved on s << 1228 * excsave_1: dispatch table << 1229 * << 1230 * Note: We assume the stack pointer is EXC_T << 1231 */ << 1232 << 1233 #if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTE << 1234 defined(USER_SUPPORT_WINDOWED << 1235 << 1236 ENTRY(fast_syscall_spill_registers) << 1237 << 1238 /* Register a FIXUP handler (pass cur << 1239 << 1240 xsr a3, excsave1 << 1241 movi a0, fast_syscall_spill_regist << 1242 s32i a0, a3, EXC_TABLE_FIXUP << 1243 rsr a0, windowbase << 1244 s32i a0, a3, EXC_TABLE_PARAM << 1245 xsr a3, excsave1 # res << 1246 << 1247 /* Save a3, a4 and SAR on stack. */ << 1248 << 1249 rsr a0, sar << 1250 s32i a3, a2, PT_AREG3 << 1251 s32i a0, a2, PT_SAR << 1252 << 1253 /* The spill routine might clobber a4 << 1254 << 1255 s32i a4, a2, PT_AREG4 << 1256 s32i a7, a2, PT_AREG7 << 1257 s32i a8, a2, PT_AREG8 << 1258 s32i a11, a2, PT_AREG11 << 1259 s32i a12, a2, PT_AREG12 << 1260 s32i a15, a2, PT_AREG15 << 1261 << 1262 /* << 1263 * Rotate ws so that the current wind << 1264 * Assume ws = xxxwww1yy (www1 curren << 1265 * Rotate ws right so that a4 = yyxxx << 1266 */ << 1267 << 1268 rsr a0, windowbase << 1269 rsr a3, windowstart # a3 << 1270 ssr a0 # hol << 1271 slli a0, a3, WSBITS << 1272 or a3, a3, a0 # a3 << 1273 srl a3, a3 # a3 << 1274 << 1275 /* We are done if there are no more t << 1276 << 1277 extui a3, a3, 1, WSBITS-1 # a3 << 1278 movi a0, (1 << (WSBITS-1)) << 1279 _beqz a3, .Lnospill # onl << 1280 << 1281 /* We want 1 at the top, so that we r << 1282 << 1283 or a3, a3, a0 # 1yy << 1284 << 1285 /* Skip empty frames - get 'oldest' W << 1286 << 1287 wsr a3, windowstart # sav << 1288 neg a0, a3 << 1289 and a3, a0, a3 # fir << 1290 << 1291 ffs_ws a0, a3 # a0: << 1292 movi a3, WSBITS << 1293 sub a0, a3, a0 # WSB << 1294 ssr a0 # sav << 1295 << 1296 rsr a3, windowbase << 1297 add a3, a3, a0 << 1298 wsr a3, windowbase << 1299 rsync << 1300 << 1301 rsr a3, windowstart << 1302 srl a3, a3 # shi << 1303 << 1304 /* WB is now just one frame below the << 1305 window. WS is shifted so the oldes << 1306 and WS differ by one 4-register fr << 1307 << 1308 /* Save frames. Depending what call w << 1309 * we have to save 4,8. or 12 registe << 1310 */ << 1311 << 1312 << 1313 .Lloop: _bbsi.l a3, 1, .Lc4 << 1314 _bbci.l a3, 2, .Lc12 << 1315 << 1316 .Lc8: s32e a4, a13, -16 << 1317 l32e a4, a5, -12 << 1318 s32e a8, a4, -32 << 1319 s32e a5, a13, -12 << 1320 s32e a6, a13, -8 << 1321 s32e a7, a13, -4 << 1322 s32e a9, a4, -28 << 1323 s32e a10, a4, -24 << 1324 s32e a11, a4, -20 << 1325 srli a11, a3, 2 # shi << 1326 rotw 2 << 1327 _bnei a3, 1, .Lloop << 1328 j .Lexit << 1329 << 1330 .Lc4: s32e a4, a9, -16 << 1331 s32e a5, a9, -12 << 1332 s32e a6, a9, -8 << 1333 s32e a7, a9, -4 << 1334 << 1335 srli a7, a3, 1 << 1336 rotw 1 << 1337 _bnei a3, 1, .Lloop << 1338 j .Lexit << 1339 << 1340 .Lc12: _bbci.l a3, 3, .Linvalid_mask # bit << 1341 << 1342 /* 12-register frame (call12) */ << 1343 << 1344 l32e a0, a5, -12 << 1345 s32e a8, a0, -48 << 1346 mov a8, a0 << 1347 << 1348 s32e a9, a8, -44 << 1349 s32e a10, a8, -40 << 1350 s32e a11, a8, -36 << 1351 s32e a12, a8, -32 << 1352 s32e a13, a8, -28 << 1353 s32e a14, a8, -24 << 1354 s32e a15, a8, -20 << 1355 srli a15, a3, 3 << 1356 << 1357 /* The stack pointer for a4..a7 is ou << 1358 * window, grab the stackpointer, and << 1359 * Alternatively, we could also use t << 1360 * makes the fixup routine much more << 1361 * rotw 1 << 1362 * s32e a0, a13, -16 << 1363 * ... << 1364 * rotw 2 << 1365 */ << 1366 << 1367 rotw 1 << 1368 mov a4, a13 << 1369 rotw -1 << 1370 << 1371 s32e a4, a8, -16 << 1372 s32e a5, a8, -12 << 1373 s32e a6, a8, -8 << 1374 s32e a7, a8, -4 << 1375 << 1376 rotw 3 << 1377 << 1378 _beqi a3, 1, .Lexit << 1379 j .Lloop << 1380 << 1381 .Lexit: << 1382 << 1383 /* Done. Do the final rotation and se << 1384 << 1385 rotw 1 << 1386 rsr a3, windowbase << 1387 ssl a3 << 1388 movi a3, 1 << 1389 sll a3, a3 << 1390 wsr a3, windowstart << 1391 .Lnospill: << 1392 << 1393 /* Advance PC, restore registers and << 1394 << 1395 l32i a3, a2, PT_SAR << 1396 l32i a0, a2, PT_AREG0 << 1397 wsr a3, sar << 1398 l32i a3, a2, PT_AREG3 << 1399 << 1400 /* Restore clobbered registers. */ << 1401 << 1402 l32i a4, a2, PT_AREG4 << 1403 l32i a7, a2, PT_AREG7 << 1404 l32i a8, a2, PT_AREG8 << 1405 l32i a11, a2, PT_AREG11 << 1406 l32i a12, a2, PT_AREG12 << 1407 l32i a15, a2, PT_AREG15 << 1408 << 1409 movi a2, 0 << 1410 rfe << 1411 << 1412 .Linvalid_mask: << 1413 << 1414 /* We get here because of an unrecove << 1415 * registers, so set up a dummy frame << 1416 * Note: We assume EXC_TABLE_KSTK con << 1417 */ << 1418 << 1419 movi a0, 1 << 1420 movi a1, 0 << 1421 << 1422 wsr a0, windowstart << 1423 wsr a1, windowbase << 1424 rsync << 1425 << 1426 movi a0, 0 << 1427 << 1428 rsr a3, excsave1 << 1429 l32i a1, a3, EXC_TABLE_KSTK << 1430 << 1431 movi a4, KERNEL_PS_WOE_MASK | LOCK << 1432 wsr a4, ps << 1433 rsync << 1434 << 1435 movi abi_arg0, SIGSEGV << 1436 abi_call make_task_dead << 1437 << 1438 /* shouldn't return, so panic */ << 1439 << 1440 wsr a0, excsave1 << 1441 call0 unrecoverable_exception << 1442 1: j 1b << 1443 << 1444 << 1445 ENDPROC(fast_syscall_spill_registers) << 1446 << 1447 /* Fixup handler. << 1448 * << 1449 * We get here if the spill routine causes an << 1450 * We basically restore WINDOWBASE and WINDOW << 1451 * we entered the spill routine and jump to t << 1452 * << 1453 * Note that we only need to restore the bits << 1454 * been spilled yet by the _spill_register ro << 1455 * rotated windowstart with only those bits s << 1456 * spilled yet. Because a3 is rotated such th << 1457 * frame for the current windowbase - 1, we n << 1458 * value of the current windowbase + 1 and mo << 1459 * << 1460 * a0: value of depc, original value in depc << 1461 * a2: trashed, original value in EXC_TABLE_D << 1462 * a3: exctable, original value in excsave1 << 1463 */ << 1464 << 1465 ENTRY(fast_syscall_spill_registers_fixup) << 1466 << 1467 rsr a2, windowbase # get current << 1468 xsr a0, depc # restore dep << 1469 ssl a2 # set shift ( << 1470 << 1471 /* We need to make sure the current r << 1472 * To do this, we simply set the bit << 1473 * in WS, so that the exception handl << 1474 * << 1475 * Note: we use a3 to set the windowb << 1476 * of it, saving it in the original _ << 1477 * the exception handler call. << 1478 */ << 1479 << 1480 xsr a3, excsave1 # get spill-m << 1481 slli a3, a3, 1 # shift left << 1482 addi a3, a3, 1 # set the bit << 1483 << 1484 slli a2, a3, 32-WSBITS << 1485 src a2, a3, a2 # a2 = xxwww1 << 1486 wsr a2, windowstart # set correct << 1487 << 1488 srli a3, a3, 1 << 1489 rsr a2, excsave1 << 1490 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE << 1491 xsr a2, excsave1 << 1492 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE << 1493 l32i a3, a2, EXC_TABLE_PARAM # ori << 1494 xsr a2, excsave1 << 1495 << 1496 /* Return to the original (user task) << 1497 * We leave the following frame behin << 1498 * a0, a1, a2 same << 1499 * a3: trashed (saved in EXC << 1500 * depc: depc (we have to retu << 1501 * excsave_1: exctable << 1502 */ 834 */ >> 835 RESTORE_ALL 1503 836 1504 wsr a3, windowbase !! 837 .align 4 1505 rsync !! 838 .globl sys_rt_sigreturn 1506 !! 839 sys_rt_sigreturn: 1507 /* We are now in the original frame w !! 840 call do_rt_sigreturn 1508 * a0: return address !! 841 add %sp, STACKFRAME_SZ, %o0 1509 * a1: used, stack pointer !! 842 1510 * a2: kernel stack pointer !! 843 ld [%curptr + TI_FLAGS], %l5 1511 * a3: available !! 844 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1512 * depc: exception address !! 845 be 1f 1513 * excsave: exctable !! 846 nop 1514 * Note: This frame might be the same !! 847 1515 */ !! 848 add %sp, STACKFRAME_SZ, %o0 1516 !! 849 call syscall_trace 1517 /* Setup stack pointer. */ !! 850 mov 1, %o1 1518 << 1519 addi a2, a2, -PT_USER_SIZE << 1520 s32i a0, a2, PT_AREG0 << 1521 << 1522 /* Make sure we return to this fixup << 1523 << 1524 movi a3, fast_syscall_spill_regist << 1525 s32i a3, a2, PT_DEPC # set << 1526 << 1527 /* Jump to the exception handler. */ << 1528 << 1529 rsr a3, excsave1 << 1530 rsr a0, exccause << 1531 addx4 a0, a0, a3 << 1532 l32i a0, a0, EXC_TABLE_FAST_USER << 1533 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE << 1534 jx a0 << 1535 << 1536 ENDPROC(fast_syscall_spill_registers_fixup) << 1537 << 1538 ENTRY(fast_syscall_spill_registers_fixup_retu << 1539 << 1540 /* When we return here, all registers << 1541 << 1542 wsr a2, depc # exc << 1543 << 1544 /* Restore fixup handler. */ << 1545 << 1546 rsr a2, excsave1 << 1547 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE << 1548 movi a3, fast_syscall_spill_regist << 1549 s32i a3, a2, EXC_TABLE_FIXUP << 1550 rsr a3, windowbase << 1551 s32i a3, a2, EXC_TABLE_PARAM << 1552 l32i a2, a2, EXC_TABLE_KSTK << 1553 << 1554 /* Load WB at the time the exception << 1555 << 1556 rsr a3, sar # WB << 1557 neg a3, a3 << 1558 wsr a3, windowbase << 1559 rsync << 1560 << 1561 rsr a3, excsave1 << 1562 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE << 1563 << 1564 rfde << 1565 << 1566 ENDPROC(fast_syscall_spill_registers_fixup_re << 1567 << 1568 #else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS << 1569 << 1570 ENTRY(fast_syscall_spill_registers) << 1571 << 1572 l32i a0, a2, PT_AREG0 # res << 1573 movi a2, -ENOSYS << 1574 rfe << 1575 << 1576 ENDPROC(fast_syscall_spill_registers) << 1577 << 1578 #endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS << 1579 << 1580 #ifdef CONFIG_MMU << 1581 /* << 1582 * We should never get here. Bail out! << 1583 */ << 1584 << 1585 ENTRY(fast_second_level_miss_double_kernel) << 1586 851 1587 1: 852 1: 1588 call0 unrecoverable_exception !! 853 /* We are returning to a signal handler. */ 1589 1: j 1b !! 854 RESTORE_ALL 1590 << 1591 ENDPROC(fast_second_level_miss_double_kernel) << 1592 << 1593 /* First-level entry handler for user, kernel << 1594 * TLB miss exceptions. Note that for now, u << 1595 * exceptions share the same entry point and << 1596 * << 1597 * An old, less-efficient C version of this f << 1598 * We include it below, interleaved as commen << 1599 * << 1600 * Entry condition: << 1601 * << 1602 * a0: trashed, original value saved << 1603 * a1: a1 << 1604 * a2: new stack pointer, original i << 1605 * a3: a3 << 1606 * depc: a2, original value saved on s << 1607 * excsave_1: dispatch table << 1608 * << 1609 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1610 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1611 */ << 1612 << 1613 ENTRY(fast_second_level_miss) << 1614 855 1615 /* Save a1 and a3. Note: we don't exp !! 856 /* Now that we have a real sys_clone, sys_fork() is 1616 !! 857 * implemented in terms of it. Our _real_ implementation 1617 s32i a1, a2, PT_AREG1 !! 858 * of SunOS vfork() will use sys_vfork(). 1618 s32i a3, a2, PT_AREG3 << 1619 << 1620 /* We need to map the page of PTEs fo << 1621 * the pointer to that page. Also, i << 1622 * to be NULL while tsk->active_mm is << 1623 * a vmalloc address. In that rare c << 1624 * active_mm instead to avoid a fault << 1625 * << 1626 * http://mail.nl.linux.org/linux-mm/ << 1627 * (or search Internet on "mm vs. a << 1628 * << 1629 * if (!mm) << 1630 * mm = tsk->active_mm; << 1631 * pgd = pgd_offset (mm, regs->e << 1632 * pmd = pmd_offset (pgd, regs-> << 1633 * pmdval = *pmd; << 1634 */ << 1635 << 1636 GET_CURRENT(a1,a2) << 1637 l32i a0, a1, TASK_MM # tsk << 1638 beqz a0, .Lfast_second_level_miss_ << 1639 << 1640 .Lfast_second_level_miss_continue: << 1641 rsr a3, excvaddr # fau << 1642 _PGD_OFFSET(a0, a3, a1) << 1643 l32i a0, a0, 0 # rea << 1644 beqz a0, .Lfast_second_level_miss_ << 1645 << 1646 /* Read ptevaddr and convert to top o << 1647 * 859 * 1648 * vpnval = read_ptevaddr_regist !! 860 * XXX These three should be consolidated into mostly shared 1649 * vpnval += DTLB_WAY_PGTABLE; !! 861 * XXX code just like on sparc64... -DaveM 1650 * pteval = mk_pte (virt_to_page << 1651 * write_dtlb_entry (pteval, vpn << 1652 * << 1653 * The messy computation for 'pteval' << 1654 * into the following: << 1655 * << 1656 * pteval = ((pmdval - PAGE_OFFSET + << 1657 * | PAGE_DIRECTORY << 1658 */ << 1659 << 1660 movi a1, (PHYS_OFFSET - PAGE_OFFSE << 1661 add a0, a0, a1 # pmd << 1662 extui a1, a0, 0, PAGE_SHIFT # ... << 1663 xor a0, a0, a1 << 1664 << 1665 movi a1, _PAGE_DIRECTORY << 1666 or a0, a0, a1 # ... << 1667 << 1668 /* << 1669 * We utilize all three wired-ways (7 << 1670 * Memory regions are mapped to the D << 1671 * This allows to map the three most << 1672 * DTLBs: << 1673 * 0,1 -> way 7 program (0040 << 1674 * 2 -> way 8 shared libari << 1675 * 3 -> way 0 stack (3000.0 << 1676 */ 862 */ >> 863 .align 4 >> 864 .globl sys_fork, flush_patch_two >> 865 sys_fork: >> 866 mov %o7, %l5 >> 867 flush_patch_two: >> 868 FLUSH_ALL_KERNEL_WINDOWS; >> 869 ld [%curptr + TI_TASK], %o4 >> 870 rd %psr, %g4 >> 871 WRITE_PAUSE >> 872 rd %wim, %g5 >> 873 WRITE_PAUSE >> 874 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] >> 875 add %sp, STACKFRAME_SZ, %o0 >> 876 call sparc_fork >> 877 mov %l5, %o7 >> 878 >> 879 /* Whee, kernel threads! */ >> 880 .globl sys_clone, flush_patch_three >> 881 sys_clone: >> 882 mov %o7, %l5 >> 883 flush_patch_three: >> 884 FLUSH_ALL_KERNEL_WINDOWS; >> 885 ld [%curptr + TI_TASK], %o4 >> 886 rd %psr, %g4 >> 887 WRITE_PAUSE >> 888 rd %wim, %g5 >> 889 WRITE_PAUSE >> 890 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] >> 891 add %sp, STACKFRAME_SZ, %o0 >> 892 call sparc_clone >> 893 mov %l5, %o7 >> 894 >> 895 /* Whee, real vfork! */ >> 896 .globl sys_vfork, flush_patch_four >> 897 sys_vfork: >> 898 flush_patch_four: >> 899 FLUSH_ALL_KERNEL_WINDOWS; >> 900 ld [%curptr + TI_TASK], %o4 >> 901 rd %psr, %g4 >> 902 WRITE_PAUSE >> 903 rd %wim, %g5 >> 904 WRITE_PAUSE >> 905 std %g4, [%o4 + AOFF_task_thread + AOFF_thread_fork_kpsr] >> 906 sethi %hi(sparc_vfork), %l1 >> 907 jmpl %l1 + %lo(sparc_vfork), %g0 >> 908 add %sp, STACKFRAME_SZ, %o0 >> 909 >> 910 .align 4 >> 911 linux_sparc_ni_syscall: >> 912 sethi %hi(sys_ni_syscall), %l7 >> 913 b do_syscall >> 914 or %l7, %lo(sys_ni_syscall), %l7 >> 915 >> 916 linux_syscall_trace: >> 917 add %sp, STACKFRAME_SZ, %o0 >> 918 call syscall_trace >> 919 mov 0, %o1 >> 920 cmp %o0, 0 >> 921 bne 3f >> 922 mov -ENOSYS, %o0 >> 923 >> 924 /* Syscall tracing can modify the registers. */ >> 925 ld [%sp + STACKFRAME_SZ + PT_G1], %g1 >> 926 sethi %hi(sys_call_table), %l7 >> 927 ld [%sp + STACKFRAME_SZ + PT_I0], %i0 >> 928 or %l7, %lo(sys_call_table), %l7 >> 929 ld [%sp + STACKFRAME_SZ + PT_I1], %i1 >> 930 ld [%sp + STACKFRAME_SZ + PT_I2], %i2 >> 931 ld [%sp + STACKFRAME_SZ + PT_I3], %i3 >> 932 ld [%sp + STACKFRAME_SZ + PT_I4], %i4 >> 933 ld [%sp + STACKFRAME_SZ + PT_I5], %i5 >> 934 cmp %g1, NR_syscalls >> 935 bgeu 3f >> 936 mov -ENOSYS, %o0 >> 937 >> 938 sll %g1, 2, %l4 >> 939 mov %i0, %o0 >> 940 ld [%l7 + %l4], %l7 >> 941 mov %i1, %o1 >> 942 mov %i2, %o2 >> 943 mov %i3, %o3 >> 944 b 2f >> 945 mov %i4, %o4 >> 946 >> 947 .globl ret_from_fork >> 948 ret_from_fork: >> 949 call schedule_tail >> 950 ld [%g3 + TI_TASK], %o0 >> 951 b ret_sys_call >> 952 ld [%sp + STACKFRAME_SZ + PT_I0], %o0 >> 953 >> 954 .globl ret_from_kernel_thread >> 955 ret_from_kernel_thread: >> 956 call schedule_tail >> 957 ld [%g3 + TI_TASK], %o0 >> 958 ld [%sp + STACKFRAME_SZ + PT_G1], %l0 >> 959 call %l0 >> 960 ld [%sp + STACKFRAME_SZ + PT_G2], %o0 >> 961 rd %psr, %l1 >> 962 ld [%sp + STACKFRAME_SZ + PT_PSR], %l0 >> 963 andn %l0, PSR_CWP, %l0 >> 964 nop >> 965 and %l1, PSR_CWP, %l1 >> 966 or %l0, %l1, %l0 >> 967 st %l0, [%sp + STACKFRAME_SZ + PT_PSR] >> 968 b ret_sys_call >> 969 mov 0, %o0 1677 970 1678 extui a3, a3, 28, 2 # add !! 971 /* Linux native system calls enter here... */ 1679 rsr a1, ptevaddr !! 972 .align 4 1680 addx2 a3, a3, a3 # -> !! 973 .globl linux_sparc_syscall 1681 srli a1, a1, PAGE_SHIFT !! 974 linux_sparc_syscall: 1682 extui a3, a3, 2, 2 # -> !! 975 sethi %hi(PSR_SYSCALL), %l4 1683 slli a1, a1, PAGE_SHIFT # pte !! 976 or %l0, %l4, %l0 1684 addi a3, a3, DTLB_WAY_PGD !! 977 /* Direct access to user regs, must faster. */ 1685 add a1, a1, a3 # ... !! 978 cmp %g1, NR_syscalls 1686 !! 979 bgeu linux_sparc_ni_syscall 1687 .Lfast_second_level_miss_wdtlb: !! 980 sll %g1, 2, %l4 1688 wdtlb a0, a1 !! 981 ld [%l7 + %l4], %l7 1689 dsync !! 982 1690 !! 983 do_syscall: 1691 /* Exit critical section. */ !! 984 SAVE_ALL_HEAD 1692 .Lfast_second_level_miss_skip_wdtlb: !! 985 rd %wim, %l3 1693 rsr a3, excsave1 !! 986 1694 movi a0, 0 !! 987 wr %l0, PSR_ET, %psr 1695 s32i a0, a3, EXC_TABLE_FIXUP !! 988 mov %i0, %o0 1696 !! 989 mov %i1, %o1 1697 /* Restore the working registers, and !! 990 mov %i2, %o2 1698 !! 991 1699 l32i a0, a2, PT_AREG0 !! 992 ld [%curptr + TI_FLAGS], %l5 1700 l32i a1, a2, PT_AREG1 !! 993 mov %i3, %o3 1701 l32i a3, a2, PT_AREG3 !! 994 andcc %l5, _TIF_SYSCALL_TRACE, %g0 1702 l32i a2, a2, PT_DEPC !! 995 mov %i4, %o4 1703 !! 996 bne linux_syscall_trace 1704 bgeui a2, VALID_DOUBLE_EXCEPTION_AD !! 997 mov %i0, %l6 1705 !! 998 2: 1706 /* Restore excsave1 and return. */ !! 999 call %l7 1707 !! 1000 mov %i5, %o5 1708 rsr a2, depc << 1709 rfe << 1710 << 1711 /* Return from double exception. */ << 1712 << 1713 1: xsr a2, depc << 1714 esync << 1715 rfde << 1716 << 1717 .Lfast_second_level_miss_no_mm: << 1718 l32i a0, a1, TASK_ACTIVE_MM # unl << 1719 bnez a0, .Lfast_second_level_miss_ << 1720 << 1721 /* Even more unlikely case active_mm << 1722 * We can get here with NMI in the mi << 1723 * touches vmalloc area. << 1724 */ << 1725 movi a0, init_mm << 1726 j .Lfast_second_level_miss_cont << 1727 1001 1728 .Lfast_second_level_miss_no_pmd: !! 1002 3: 1729 #if (DCACHE_WAY_SIZE > PAGE_SIZE) !! 1003 st %o0, [%sp + STACKFRAME_SZ + PT_I0] 1730 1004 1731 /* Special case for cache aliasing. !! 1005 ret_sys_call: 1732 * We (should) only get here if a cle !! 1006 ld [%curptr + TI_FLAGS], %l5 1733 * or the aliased cache flush functio !! 1007 cmp %o0, -ERESTART_RESTARTBLOCK 1734 * by another task. Re-establish temp !! 1008 ld [%sp + STACKFRAME_SZ + PT_PSR], %g3 1735 * TLBTEMP_BASE areas. !! 1009 set PSR_C, %g2 >> 1010 bgeu 1f >> 1011 andcc %l5, _TIF_SYSCALL_TRACE, %g0 >> 1012 >> 1013 /* System call success, clear Carry condition code. */ >> 1014 andn %g3, %g2, %g3 >> 1015 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] >> 1016 bne linux_syscall_trace2 >> 1017 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ >> 1018 add %l1, 0x4, %l2 /* npc = npc+4 */ >> 1019 st %l1, [%sp + STACKFRAME_SZ + PT_PC] >> 1020 b ret_trap_entry >> 1021 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] >> 1022 1: >> 1023 /* System call failure, set Carry condition code. >> 1024 * Also, get abs(errno) to return to the process. 1736 */ 1025 */ >> 1026 sub %g0, %o0, %o0 >> 1027 or %g3, %g2, %g3 >> 1028 st %o0, [%sp + STACKFRAME_SZ + PT_I0] >> 1029 st %g3, [%sp + STACKFRAME_SZ + PT_PSR] >> 1030 bne linux_syscall_trace2 >> 1031 ld [%sp + STACKFRAME_SZ + PT_NPC], %l1 /* pc = npc */ >> 1032 add %l1, 0x4, %l2 /* npc = npc+4 */ >> 1033 st %l1, [%sp + STACKFRAME_SZ + PT_PC] >> 1034 b ret_trap_entry >> 1035 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] >> 1036 >> 1037 linux_syscall_trace2: >> 1038 add %sp, STACKFRAME_SZ, %o0 >> 1039 mov 1, %o1 >> 1040 call syscall_trace >> 1041 add %l1, 0x4, %l2 /* npc = npc+4 */ >> 1042 st %l1, [%sp + STACKFRAME_SZ + PT_PC] >> 1043 b ret_trap_entry >> 1044 st %l2, [%sp + STACKFRAME_SZ + PT_NPC] 1737 1045 1738 /* We shouldn't be in a double except << 1739 << 1740 l32i a0, a2, PT_DEPC << 1741 bgeui a0, VALID_DOUBLE_EXCEPTION_AD << 1742 << 1743 /* Make sure the exception originated << 1744 << 1745 movi a0, __tlbtemp_mapping_start << 1746 rsr a3, epc1 << 1747 bltu a3, a0, .Lfast_second_level_m << 1748 movi a0, __tlbtemp_mapping_end << 1749 bgeu a3, a0, .Lfast_second_level_m << 1750 << 1751 /* Check if excvaddr was in one of th << 1752 << 1753 movi a3, TLBTEMP_BASE_1 << 1754 rsr a0, excvaddr << 1755 bltu a0, a3, .Lfast_second_level_m << 1756 << 1757 addi a1, a0, -TLBTEMP_SIZE << 1758 bgeu a1, a3, .Lfast_second_level_m << 1759 << 1760 /* Check if we have to restore an ITL << 1761 << 1762 movi a1, __tlbtemp_mapping_itlb << 1763 rsr a3, epc1 << 1764 sub a3, a3, a1 << 1765 << 1766 /* Calculate VPN */ << 1767 << 1768 movi a1, PAGE_MASK << 1769 and a1, a1, a0 << 1770 << 1771 /* Jump for ITLB entry */ << 1772 << 1773 bgez a3, 1f << 1774 << 1775 /* We can use up to two TLBTEMP areas << 1776 << 1777 extui a3, a0, PAGE_SHIFT + DCACHE_A << 1778 add a1, a3, a1 << 1779 << 1780 /* PPN is in a6 for the first TLBTEMP << 1781 << 1782 mov a0, a6 << 1783 movnez a0, a7, a3 << 1784 j .Lfast_second_level_miss_wdtl << 1785 << 1786 /* ITLB entry. We only use dst in a6. << 1787 << 1788 1: witlb a6, a1 << 1789 isync << 1790 j .Lfast_second_level_miss_skip << 1791 << 1792 << 1793 #endif // DCACHE_WAY_SIZE > PAGE_SIZE << 1794 << 1795 /* Invalid PGD, default exception han << 1796 .Lfast_second_level_miss_slow: << 1797 << 1798 rsr a1, depc << 1799 s32i a1, a2, PT_AREG2 << 1800 mov a1, a2 << 1801 << 1802 rsr a2, ps << 1803 bbsi.l a2, PS_UM_BIT, 1f << 1804 call0 _kernel_exception << 1805 1: call0 _user_exception << 1806 << 1807 ENDPROC(fast_second_level_miss) << 1808 1046 1809 /* !! 1047 /* Saving and restoring the FPU state is best done from lowlevel code. 1810 * StoreProhibitedException << 1811 * 1048 * 1812 * Update the pte and invalidate the itlb map !! 1049 * void fpsave(unsigned long *fpregs, unsigned long *fsr, 1813 * !! 1050 * void *fpqueue, unsigned long *fpqdepth) 1814 * Entry condition: << 1815 * << 1816 * a0: trashed, original value saved << 1817 * a1: a1 << 1818 * a2: new stack pointer, original i << 1819 * a3: a3 << 1820 * depc: a2, original value saved on s << 1821 * excsave_1: dispatch table << 1822 * << 1823 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRES << 1824 * < VALID_DOUBLE_EXCEPTION_ADDRES << 1825 */ 1051 */ 1826 1052 1827 ENTRY(fast_store_prohibited) !! 1053 .globl fpsave 1828 !! 1054 fpsave: 1829 /* Save a1 and a3. */ !! 1055 st %fsr, [%o1] ! this can trap on us if fpu is in bogon state >> 1056 ld [%o1], %g1 >> 1057 set 0x2000, %g4 >> 1058 andcc %g1, %g4, %g0 >> 1059 be 2f >> 1060 mov 0, %g2 1830 1061 1831 s32i a1, a2, PT_AREG1 !! 1062 /* We have an fpqueue to save. */ 1832 s32i a3, a2, PT_AREG3 !! 1063 1: >> 1064 std %fq, [%o2] >> 1065 fpsave_magic: >> 1066 st %fsr, [%o1] >> 1067 ld [%o1], %g3 >> 1068 andcc %g3, %g4, %g0 >> 1069 add %g2, 1, %g2 >> 1070 bne 1b >> 1071 add %o2, 8, %o2 1833 1072 1834 GET_CURRENT(a1,a2) !! 1073 2: 1835 l32i a0, a1, TASK_MM # tsk !! 1074 st %g2, [%o3] 1836 beqz a0, .Lfast_store_no_mm << 1837 << 1838 .Lfast_store_continue: << 1839 rsr a1, excvaddr # fau << 1840 _PGD_OFFSET(a0, a1, a3) << 1841 l32i a0, a0, 0 << 1842 beqz a0, .Lfast_store_slow << 1843 1075 1844 /* !! 1076 std %f0, [%o0 + 0x00] 1845 * Note that we test _PAGE_WRITABLE_B !! 1077 std %f2, [%o0 + 0x08] 1846 * and is not PAGE_NONE. See pgtable. !! 1078 std %f4, [%o0 + 0x10] >> 1079 std %f6, [%o0 + 0x18] >> 1080 std %f8, [%o0 + 0x20] >> 1081 std %f10, [%o0 + 0x28] >> 1082 std %f12, [%o0 + 0x30] >> 1083 std %f14, [%o0 + 0x38] >> 1084 std %f16, [%o0 + 0x40] >> 1085 std %f18, [%o0 + 0x48] >> 1086 std %f20, [%o0 + 0x50] >> 1087 std %f22, [%o0 + 0x58] >> 1088 std %f24, [%o0 + 0x60] >> 1089 std %f26, [%o0 + 0x68] >> 1090 std %f28, [%o0 + 0x70] >> 1091 retl >> 1092 std %f30, [%o0 + 0x78] >> 1093 >> 1094 /* Thanks for Theo Deraadt and the authors of the Sprite/netbsd/openbsd >> 1095 * code for pointing out this possible deadlock, while we save state >> 1096 * above we could trap on the fsr store so our low level fpu trap >> 1097 * code has to know how to deal with this. >> 1098 */ >> 1099 fpsave_catch: >> 1100 b fpsave_magic + 4 >> 1101 st %fsr, [%o1] >> 1102 >> 1103 fpsave_catch2: >> 1104 b fpsave + 4 >> 1105 st %fsr, [%o1] >> 1106 >> 1107 /* void fpload(unsigned long *fpregs, unsigned long *fsr); */ >> 1108 >> 1109 .globl fpload >> 1110 fpload: >> 1111 ldd [%o0 + 0x00], %f0 >> 1112 ldd [%o0 + 0x08], %f2 >> 1113 ldd [%o0 + 0x10], %f4 >> 1114 ldd [%o0 + 0x18], %f6 >> 1115 ldd [%o0 + 0x20], %f8 >> 1116 ldd [%o0 + 0x28], %f10 >> 1117 ldd [%o0 + 0x30], %f12 >> 1118 ldd [%o0 + 0x38], %f14 >> 1119 ldd [%o0 + 0x40], %f16 >> 1120 ldd [%o0 + 0x48], %f18 >> 1121 ldd [%o0 + 0x50], %f20 >> 1122 ldd [%o0 + 0x58], %f22 >> 1123 ldd [%o0 + 0x60], %f24 >> 1124 ldd [%o0 + 0x68], %f26 >> 1125 ldd [%o0 + 0x70], %f28 >> 1126 ldd [%o0 + 0x78], %f30 >> 1127 ld [%o1], %fsr >> 1128 retl >> 1129 nop >> 1130 >> 1131 /* __ndelay and __udelay take two arguments: >> 1132 * 0 - nsecs or usecs to delay >> 1133 * 1 - per_cpu udelay_val (loops per jiffy) >> 1134 * >> 1135 * Note that ndelay gives HZ times higher resolution but has a 10ms >> 1136 * limit. udelay can handle up to 1s. 1847 */ 1137 */ >> 1138 .globl __ndelay >> 1139 __ndelay: >> 1140 save %sp, -STACKFRAME_SZ, %sp >> 1141 mov %i0, %o0 ! round multiplier up so large ns ok >> 1142 mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) >> 1143 umul %o0, %o1, %o0 >> 1144 rd %y, %o1 >> 1145 mov %i1, %o1 ! udelay_val >> 1146 umul %o0, %o1, %o0 >> 1147 rd %y, %o1 >> 1148 ba delay_continue >> 1149 mov %o1, %o0 ! >>32 later for better resolution >> 1150 >> 1151 .globl __udelay >> 1152 __udelay: >> 1153 save %sp, -STACKFRAME_SZ, %sp >> 1154 mov %i0, %o0 >> 1155 sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok >> 1156 or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 >> 1157 umul %o0, %o1, %o0 >> 1158 rd %y, %o1 >> 1159 mov %i1, %o1 ! udelay_val >> 1160 umul %o0, %o1, %o0 >> 1161 rd %y, %o1 >> 1162 sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32, >> 1163 or %g0, %lo(0x028f4b62), %l0 >> 1164 addcc %o0, %l0, %o0 ! 2**32 * 0.009 999 >> 1165 bcs,a 3f >> 1166 add %o1, 0x01, %o1 >> 1167 3: >> 1168 mov HZ, %o0 ! >>32 earlier for wider range >> 1169 umul %o0, %o1, %o0 >> 1170 rd %y, %o1 1848 1171 1849 _PTE_OFFSET(a0, a1, a3) !! 1172 delay_continue: 1850 l32i a3, a0, 0 # rea !! 1173 cmp %o0, 0x0 1851 movi a1, _PAGE_CA_INVALID << 1852 ball a3, a1, .Lfast_store_slow << 1853 bbci.l a3, _PAGE_WRITABLE_BIT, .Lfas << 1854 << 1855 movi a1, _PAGE_ACCESSED | _PAGE_DI << 1856 or a3, a3, a1 << 1857 rsr a1, excvaddr << 1858 s32i a3, a0, 0 << 1859 << 1860 /* We need to flush the cache if we h << 1861 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DC << 1862 dhwb a0, 0 << 1863 #endif << 1864 pdtlb a0, a1 << 1865 wdtlb a3, a0 << 1866 << 1867 /* Exit critical section. */ << 1868 << 1869 movi a0, 0 << 1870 rsr a3, excsave1 << 1871 s32i a0, a3, EXC_TABLE_FIXUP << 1872 << 1873 /* Restore the working registers, and << 1874 << 1875 l32i a3, a2, PT_AREG3 << 1876 l32i a1, a2, PT_AREG1 << 1877 l32i a0, a2, PT_AREG0 << 1878 l32i a2, a2, PT_DEPC << 1879 << 1880 bgeui a2, VALID_DOUBLE_EXCEPTION_AD << 1881 rsr a2, depc << 1882 rfe << 1883 << 1884 /* Double exception. Restore FIXUP ha << 1885 << 1886 1: xsr a2, depc << 1887 esync << 1888 rfde << 1889 << 1890 .Lfast_store_no_mm: << 1891 l32i a0, a1, TASK_ACTIVE_MM # unl << 1892 j .Lfast_store_continue << 1893 << 1894 /* If there was a problem, handle fau << 1895 .Lfast_store_slow: << 1896 rsr a1, excvaddr << 1897 pdtlb a0, a1 << 1898 bbci.l a0, DTLB_HIT_BIT, 1f << 1899 idtlb a0 << 1900 1: 1174 1: 1901 rsr a3, depc # still holds !! 1175 bne 1b 1902 s32i a3, a2, PT_AREG2 !! 1176 subcc %o0, 1, %o0 1903 mov a1, a2 !! 1177 1904 !! 1178 ret 1905 rsr a2, ps !! 1179 restore 1906 bbsi.l a2, PS_UM_BIT, 1f !! 1180 EXPORT_SYMBOL(__udelay) 1907 call0 _kernel_exception !! 1181 EXPORT_SYMBOL(__ndelay) 1908 1: call0 _user_exception << 1909 << 1910 ENDPROC(fast_store_prohibited) << 1911 << 1912 #endif /* CONFIG_MMU */ << 1913 << 1914 .text << 1915 /* << 1916 * System Calls. << 1917 * << 1918 * void system_call (struct pt_regs* regs, in << 1919 * a2 << 1920 */ << 1921 .literal_position << 1922 << 1923 ENTRY(system_call) << 1924 << 1925 #if defined(__XTENSA_WINDOWED_ABI__) << 1926 abi_entry_default << 1927 #elif defined(__XTENSA_CALL0_ABI__) << 1928 abi_entry(12) << 1929 << 1930 s32i a0, sp, 0 << 1931 s32i abi_saved0, sp, 4 << 1932 s32i abi_saved1, sp, 8 << 1933 mov abi_saved0, a2 << 1934 #else << 1935 #error Unsupported Xtensa ABI << 1936 #endif << 1937 << 1938 /* regs->syscall = regs->areg[2] */ << 1939 << 1940 l32i a7, abi_saved0, PT_AREG2 << 1941 s32i a7, abi_saved0, PT_SYSCALL << 1942 << 1943 GET_THREAD_INFO(a4, a1) << 1944 l32i abi_saved1, a4, TI_FLAGS << 1945 movi a4, _TIF_WORK_MASK << 1946 and abi_saved1, abi_saved1, a4 << 1947 beqz abi_saved1, 1f << 1948 << 1949 mov abi_arg0, abi_saved0 << 1950 abi_call do_syscall_trace_ente << 1951 beqz abi_rv, .Lsyscall_exit << 1952 l32i a7, abi_saved0, PT_SYSCALL << 1953 1182 1954 1: !! 1183 /* Handle a software breakpoint */ 1955 /* syscall = sys_call_table[syscall_n !! 1184 /* We have to inform parent that child has stopped */ >> 1185 .align 4 >> 1186 .globl breakpoint_trap >> 1187 breakpoint_trap: >> 1188 rd %wim,%l3 >> 1189 SAVE_ALL >> 1190 wr %l0, PSR_ET, %psr >> 1191 WRITE_PAUSE >> 1192 >> 1193 st %i0, [%sp + STACKFRAME_SZ + PT_G0] ! for restarting syscalls >> 1194 call sparc_breakpoint >> 1195 add %sp, STACKFRAME_SZ, %o0 >> 1196 >> 1197 RESTORE_ALL >> 1198 >> 1199 #ifdef CONFIG_KGDB >> 1200 ENTRY(kgdb_trap_low) >> 1201 rd %wim,%l3 >> 1202 SAVE_ALL >> 1203 wr %l0, PSR_ET, %psr >> 1204 WRITE_PAUSE >> 1205 >> 1206 mov %l7, %o0 ! trap_level >> 1207 call kgdb_trap >> 1208 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs 1956 1209 1957 movi a4, sys_call_table !! 1210 RESTORE_ALL 1958 movi a5, __NR_syscalls !! 1211 ENDPROC(kgdb_trap_low) 1959 movi abi_rv, -ENOSYS << 1960 bgeu a7, a5, 1f << 1961 << 1962 addx4 a4, a7, a4 << 1963 l32i abi_tmp0, a4, 0 << 1964 << 1965 /* Load args: arg0 - arg5 are passed << 1966 << 1967 l32i abi_arg0, abi_saved0, PT_AREG << 1968 l32i abi_arg1, abi_saved0, PT_AREG << 1969 l32i abi_arg2, abi_saved0, PT_AREG << 1970 l32i abi_arg3, abi_saved0, PT_AREG << 1971 l32i abi_arg4, abi_saved0, PT_AREG << 1972 l32i abi_arg5, abi_saved0, PT_AREG << 1973 << 1974 abi_callx abi_tmp0 << 1975 << 1976 1: /* regs->areg[2] = return_value */ << 1977 << 1978 s32i abi_rv, abi_saved0, PT_AREG2 << 1979 bnez abi_saved1, 1f << 1980 .Lsyscall_exit: << 1981 #if defined(__XTENSA_WINDOWED_ABI__) << 1982 abi_ret_default << 1983 #elif defined(__XTENSA_CALL0_ABI__) << 1984 l32i a0, sp, 0 << 1985 l32i abi_saved0, sp, 4 << 1986 l32i abi_saved1, sp, 8 << 1987 abi_ret(12) << 1988 #else << 1989 #error Unsupported Xtensa ABI << 1990 #endif 1212 #endif 1991 1213 1992 1: << 1993 mov abi_arg0, abi_saved0 << 1994 abi_call do_syscall_trace_leav << 1995 j .Lsyscall_exit << 1996 << 1997 ENDPROC(system_call) << 1998 << 1999 /* << 2000 * Spill live registers on the kernel stack m << 2001 * << 2002 * Entry condition: ps.woe is set, ps.excm is << 2003 * Exit condition: windowstart has single bit << 2004 * May clobber: a12, a13 << 2005 */ << 2006 .macro spill_registers_kernel << 2007 << 2008 #if XCHAL_NUM_AREGS > 16 << 2009 call12 1f << 2010 _j 2f << 2011 retw << 2012 .align 4 1214 .align 4 2013 1: !! 1215 .globl flush_patch_exception 2014 _entry a1, 48 !! 1216 flush_patch_exception: 2015 addi a12, a0, 3 !! 1217 FLUSH_ALL_KERNEL_WINDOWS; 2016 #if XCHAL_NUM_AREGS > 32 !! 1218 ldd [%o0], %o6 2017 .rept (XCHAL_NUM_AREGS - 32) / 12 !! 1219 jmpl %o7 + 0xc, %g0 ! see asm-sparc/processor.h 2018 _entry a1, 48 !! 1220 mov 1, %g1 ! signal EFAULT condition 2019 mov a12, a0 << 2020 .endr << 2021 #endif << 2022 _entry a1, 16 << 2023 #if XCHAL_NUM_AREGS % 12 == 0 << 2024 mov a8, a8 << 2025 #elif XCHAL_NUM_AREGS % 12 == 4 << 2026 mov a12, a12 << 2027 #elif XCHAL_NUM_AREGS % 12 == 8 << 2028 mov a4, a4 << 2029 #endif << 2030 retw << 2031 2: << 2032 #else << 2033 mov a12, a12 << 2034 #endif << 2035 .endm << 2036 << 2037 /* << 2038 * Task switch. << 2039 * << 2040 * struct task* _switch_to (struct task* pre << 2041 * a2 a2 << 2042 */ << 2043 << 2044 ENTRY(_switch_to) << 2045 << 2046 #if defined(__XTENSA_WINDOWED_ABI__) << 2047 abi_entry(XTENSA_SPILL_STACK_RESERVE) << 2048 #elif defined(__XTENSA_CALL0_ABI__) << 2049 abi_entry(16) << 2050 << 2051 s32i a12, sp, 0 << 2052 s32i a13, sp, 4 << 2053 s32i a14, sp, 8 << 2054 s32i a15, sp, 12 << 2055 #else << 2056 #error Unsupported Xtensa ABI << 2057 #endif << 2058 mov a11, a3 # and << 2059 << 2060 l32i a4, a2, TASK_THREAD_INFO << 2061 l32i a5, a3, TASK_THREAD_INFO << 2062 << 2063 save_xtregs_user a4 a6 a8 a9 a12 a13 << 2064 << 2065 #if THREAD_RA > 1020 || THREAD_SP > 1020 << 2066 addi a10, a2, TASK_THREAD << 2067 s32i a0, a10, THREAD_RA - TASK_THR << 2068 s32i a1, a10, THREAD_SP - TASK_THR << 2069 #else << 2070 s32i a0, a2, THREAD_RA # sav << 2071 s32i a1, a2, THREAD_SP # sav << 2072 #endif << 2073 << 2074 #if defined(CONFIG_STACKPROTECTOR) && !define << 2075 movi a6, __stack_chk_guard << 2076 l32i a8, a3, TASK_STACK_CANARY << 2077 s32i a8, a6, 0 << 2078 #endif << 2079 1221 2080 /* Disable ints while we manipulate t !! 1222 .align 4 2081 !! 1223 .globl kill_user_windows, kuw_patch1_7win 2082 irq_save a14, a3 !! 1224 .globl kuw_patch1 2083 rsync !! 1225 kuw_patch1_7win: sll %o3, 6, %o3 2084 !! 1226 2085 /* Switch CPENABLE */ !! 1227 /* No matter how much overhead this routine has in the worst 2086 !! 1228 * case scenario, it is several times better than taking the 2087 #if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_ !! 1229 * traps with the old method of just doing flush_user_windows(). 2088 l32i a3, a5, THREAD_CPENABLE !! 1230 */ 2089 #ifdef CONFIG_SMP !! 1231 kill_user_windows: 2090 beqz a3, 1f !! 1232 ld [%g6 + TI_UWINMASK], %o0 ! get current umask 2091 memw # pairs with !! 1233 orcc %g0, %o0, %g0 ! if no bits set, we are done 2092 l32i a6, a5, THREAD_CP_OWNER_CPU !! 1234 be 3f ! nothing to do 2093 l32i a7, a5, THREAD_CPU !! 1235 rd %psr, %o5 ! must clear interrupts 2094 beq a6, a7, 1f # load 0 into !! 1236 or %o5, PSR_PIL, %o4 ! or else that could change 2095 movi a3, 0 !! 1237 wr %o4, 0x0, %psr ! the uwinmask state >> 1238 WRITE_PAUSE ! burn them cycles 2096 1: 1239 1: 2097 #endif !! 1240 ld [%g6 + TI_UWINMASK], %o0 ! get consistent state 2098 wsr a3, cpenable !! 1241 orcc %g0, %o0, %g0 ! did an interrupt come in? 2099 #endif !! 1242 be 4f ! yep, we are done >> 1243 rd %wim, %o3 ! get current wim >> 1244 srl %o3, 1, %o4 ! simulate a save >> 1245 kuw_patch1: >> 1246 sll %o3, 7, %o3 ! compute next wim >> 1247 or %o4, %o3, %o3 ! result >> 1248 andncc %o0, %o3, %o0 ! clean this bit in umask >> 1249 bne kuw_patch1 ! not done yet >> 1250 srl %o3, 1, %o4 ! begin another save simulation >> 1251 wr %o3, 0x0, %wim ! set the new wim >> 1252 st %g0, [%g6 + TI_UWINMASK] ! clear uwinmask >> 1253 4: >> 1254 wr %o5, 0x0, %psr ! re-enable interrupts >> 1255 WRITE_PAUSE ! burn baby burn >> 1256 3: >> 1257 retl ! return >> 1258 st %g0, [%g6 + TI_W_SAVED] ! no windows saved 2100 1259 2101 #if XCHAL_HAVE_EXCLUSIVE !! 1260 .align 4 2102 l32i a3, a5, THREAD_ATOMCTL8 !! 1261 .globl restore_current 2103 getex a3 !! 1262 restore_current: 2104 s32i a3, a4, THREAD_ATOMCTL8 !! 1263 LOAD_CURRENT(g6, o0) 2105 #endif !! 1264 retl 2106 !! 1265 nop 2107 /* Flush register file. */ << 2108 << 2109 #if defined(__XTENSA_WINDOWED_ABI__) << 2110 spill_registers_kernel << 2111 #endif << 2112 << 2113 /* Set kernel stack (and leave critic << 2114 * Note: It's save to set it here. Th << 2115 * because the kernel stack wil << 2116 * we return from kernel space. << 2117 */ << 2118 << 2119 rsr a3, excsave1 # exc << 2120 addi a7, a5, PT_REGS_OFFSET << 2121 s32i a7, a3, EXC_TABLE_KSTK << 2122 << 2123 /* restore context of the task 'next' << 2124 << 2125 l32i a0, a11, THREAD_RA # res << 2126 l32i a1, a11, THREAD_SP # res << 2127 << 2128 load_xtregs_user a5 a6 a8 a9 a12 a13 << 2129 << 2130 wsr a14, ps << 2131 rsync << 2132 << 2133 #if defined(__XTENSA_WINDOWED_ABI__) << 2134 abi_ret(XTENSA_SPILL_STACK_RESERVE) << 2135 #elif defined(__XTENSA_CALL0_ABI__) << 2136 l32i a12, sp, 0 << 2137 l32i a13, sp, 4 << 2138 l32i a14, sp, 8 << 2139 l32i a15, sp, 12 << 2140 abi_ret(16) << 2141 #else << 2142 #error Unsupported Xtensa ABI << 2143 #endif << 2144 1266 2145 ENDPROC(_switch_to) !! 1267 #ifdef CONFIG_PCIC_PCI >> 1268 #include <asm/pcic.h> 2146 1269 2147 ENTRY(ret_from_fork) !! 1270 .align 4 >> 1271 .globl linux_trap_ipi15_pcic >> 1272 linux_trap_ipi15_pcic: >> 1273 rd %wim, %l3 >> 1274 SAVE_ALL 2148 1275 2149 /* void schedule_tail (struct task_st !! 1276 /* 2150 * Note: prev is still in abi_arg0 (r !! 1277 * First deactivate NMI >> 1278 * or we cannot drop ET, cannot get window spill traps. >> 1279 * The busy loop is necessary because the PIO error >> 1280 * sometimes does not go away quickly and we trap again. 2151 */ 1281 */ 2152 abi_call schedule_tail !! 1282 sethi %hi(pcic_regs), %o1 >> 1283 ld [%o1 + %lo(pcic_regs)], %o2 2153 1284 2154 mov abi_arg0, a1 !! 1285 ! Get pending status for printouts later. 2155 abi_call do_syscall_trace_leav !! 1286 ld [%o2 + PCI_SYS_INT_PENDING], %o0 2156 j common_exception_retu << 2157 << 2158 ENDPROC(ret_from_fork) << 2159 << 2160 /* << 2161 * Kernel thread creation helper << 2162 * On entry, set up by copy_thread: abi_saved << 2163 * abi_saved1 = thread_fn arg. Left from _swi << 2164 */ << 2165 ENTRY(ret_from_kernel_thread) << 2166 1287 2167 abi_call schedule_tail !! 1288 mov PCI_SYS_INT_PENDING_CLEAR_ALL, %o1 2168 mov abi_arg0, abi_saved1 !! 1289 stb %o1, [%o2 + PCI_SYS_INT_PENDING_CLEAR] 2169 abi_callx abi_saved0 !! 1290 1: 2170 j common_exception_retu !! 1291 ld [%o2 + PCI_SYS_INT_PENDING], %o1 2171 !! 1292 andcc %o1, ((PCI_SYS_INT_PENDING_PIO|PCI_SYS_INT_PENDING_PCI)>>24), %g0 2172 ENDPROC(ret_from_kernel_thread) !! 1293 bne 1b 2173 !! 1294 nop 2174 #ifdef CONFIG_HIBERNATION !! 1295 2175 !! 1296 or %l0, PSR_PIL, %l4 2176 .section .bss, "aw" !! 1297 wr %l4, 0x0, %psr 2177 .align 4 !! 1298 WRITE_PAUSE 2178 .Lsaved_regs: !! 1299 wr %l4, PSR_ET, %psr 2179 #if defined(__XTENSA_WINDOWED_ABI__) !! 1300 WRITE_PAUSE 2180 .fill 2, 4 !! 1301 2181 #elif defined(__XTENSA_CALL0_ABI__) !! 1302 call pcic_nmi 2182 .fill 6, 4 !! 1303 add %sp, STACKFRAME_SZ, %o1 ! struct pt_regs *regs 2183 #else !! 1304 RESTORE_ALL 2184 #error Unsupported Xtensa ABI !! 1305 2185 #endif !! 1306 .globl pcic_nmi_trap_patch 2186 .align XCHAL_NCP_SA_ALIGN !! 1307 pcic_nmi_trap_patch: 2187 .Lsaved_user_regs: !! 1308 sethi %hi(linux_trap_ipi15_pcic), %l3 2188 .fill XTREGS_USER_SIZE, 1 !! 1309 jmpl %l3 + %lo(linux_trap_ipi15_pcic), %g0 >> 1310 rd %psr, %l0 >> 1311 .word 0 >> 1312 >> 1313 #endif /* CONFIG_PCIC_PCI */ >> 1314 >> 1315 .globl flushw_all >> 1316 flushw_all: >> 1317 save %sp, -0x40, %sp >> 1318 save %sp, -0x40, %sp >> 1319 save %sp, -0x40, %sp >> 1320 save %sp, -0x40, %sp >> 1321 save %sp, -0x40, %sp >> 1322 save %sp, -0x40, %sp >> 1323 save %sp, -0x40, %sp >> 1324 restore >> 1325 restore >> 1326 restore >> 1327 restore >> 1328 restore >> 1329 restore >> 1330 ret >> 1331 restore 2189 1332 >> 1333 #ifdef CONFIG_SMP >> 1334 ENTRY(hard_smp_processor_id) >> 1335 661: rd %tbr, %g1 >> 1336 srl %g1, 12, %o0 >> 1337 and %o0, 3, %o0 >> 1338 .section .cpuid_patch, "ax" >> 1339 /* Instruction location. */ >> 1340 .word 661b >> 1341 /* SUN4D implementation. */ >> 1342 lda [%g0] ASI_M_VIKING_TMP1, %o0 >> 1343 nop >> 1344 nop >> 1345 /* LEON implementation. */ >> 1346 rd %asr17, %o0 >> 1347 srl %o0, 0x1c, %o0 >> 1348 nop 2190 .previous 1349 .previous 2191 !! 1350 retl 2192 ENTRY(swsusp_arch_suspend) !! 1351 nop 2193 !! 1352 ENDPROC(hard_smp_processor_id) 2194 abi_entry_default << 2195 << 2196 movi a2, .Lsaved_regs << 2197 movi a3, .Lsaved_user_regs << 2198 s32i a0, a2, 0 << 2199 s32i a1, a2, 4 << 2200 save_xtregs_user a3 a4 a5 a6 a7 a8 0 << 2201 #if defined(__XTENSA_WINDOWED_ABI__) << 2202 spill_registers_kernel << 2203 #elif defined(__XTENSA_CALL0_ABI__) << 2204 s32i a12, a2, 8 << 2205 s32i a13, a2, 12 << 2206 s32i a14, a2, 16 << 2207 s32i a15, a2, 20 << 2208 #else << 2209 #error Unsupported Xtensa ABI << 2210 #endif << 2211 abi_call swsusp_save << 2212 mov a2, abi_rv << 2213 abi_ret_default << 2214 << 2215 ENDPROC(swsusp_arch_suspend) << 2216 << 2217 ENTRY(swsusp_arch_resume) << 2218 << 2219 abi_entry_default << 2220 << 2221 #if defined(__XTENSA_WINDOWED_ABI__) << 2222 spill_registers_kernel << 2223 #endif 1353 #endif 2224 1354 2225 movi a2, restore_pblist !! 1355 /* End of entry.S */ 2226 l32i a2, a2, 0 << 2227 << 2228 .Lcopy_pbe: << 2229 l32i a3, a2, PBE_ADDRESS << 2230 l32i a4, a2, PBE_ORIG_ADDR << 2231 << 2232 __loopi a3, a9, PAGE_SIZE, 16 << 2233 l32i a5, a3, 0 << 2234 l32i a6, a3, 4 << 2235 l32i a7, a3, 8 << 2236 l32i a8, a3, 12 << 2237 addi a3, a3, 16 << 2238 s32i a5, a4, 0 << 2239 s32i a6, a4, 4 << 2240 s32i a7, a4, 8 << 2241 s32i a8, a4, 12 << 2242 addi a4, a4, 16 << 2243 __endl a3, a9 << 2244 << 2245 l32i a2, a2, PBE_NEXT << 2246 bnez a2, .Lcopy_pbe << 2247 << 2248 movi a2, .Lsaved_regs << 2249 movi a3, .Lsaved_user_regs << 2250 l32i a0, a2, 0 << 2251 l32i a1, a2, 4 << 2252 load_xtregs_user a3 a4 a5 a6 a7 a8 0 << 2253 #if defined(__XTENSA_CALL0_ABI__) << 2254 l32i a12, a2, 8 << 2255 l32i a13, a2, 12 << 2256 l32i a14, a2, 16 << 2257 l32i a15, a2, 20 << 2258 #endif << 2259 movi a2, 0 << 2260 abi_ret_default << 2261 << 2262 ENDPROC(swsusp_arch_resume) << 2263 << 2264 #endif <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.