1 /* 1 /* 2 * This file is subject to the terms and condi 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 4 * for more details. 5 * 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 10 */ 11 #include <linux/init.h> 11 #include <linux/init.h> 12 12 13 #include <asm/asm.h> 13 #include <asm/asm.h> 14 #include <asm/asmmacro.h> 14 #include <asm/asmmacro.h> 15 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 16 #include <asm/irqflags.h> 16 #include <asm/irqflags.h> 17 #include <asm/regdef.h> 17 #include <asm/regdef.h> 18 #include <asm/fpregdef.h> 18 #include <asm/fpregdef.h> 19 #include <asm/mipsregs.h> 19 #include <asm/mipsregs.h> 20 #include <asm/stackframe.h> 20 #include <asm/stackframe.h> 21 #include <asm/sync.h> !! 21 #include <asm/war.h> 22 #include <asm/thread_info.h> 22 #include <asm/thread_info.h> 23 23 >> 24 #ifdef CONFIG_MIPS_MT_SMTC >> 25 #define PANIC_PIC(msg) \ >> 26 .set push; \ >> 27 .set nomicromips; \ >> 28 .set reorder; \ >> 29 PTR_LA a0,8f; \ >> 30 .set noat; \ >> 31 PTR_LA AT, panic; \ >> 32 jr AT; \ >> 33 9: b 9b; \ >> 34 .set pop; \ >> 35 TEXT(msg) >> 36 #endif >> 37 24 __INIT 38 __INIT 25 39 26 /* 40 /* 27 * General exception vector for all other CPUs 41 * General exception vector for all other CPUs. 28 * 42 * 29 * Be careful when changing this, it has to be 43 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exceptio 44 * to fit into space reserved for the exception handler. 31 */ 45 */ 32 NESTED(except_vec3_generic, 0, sp) 46 NESTED(except_vec3_generic, 0, sp) 33 .set push 47 .set push 34 .set noat 48 .set noat >> 49 #if R5432_CP0_INTERRUPT_WAR >> 50 mfc0 k0, CP0_INDEX >> 51 #endif 35 mfc0 k1, CP0_CAUSE 52 mfc0 k1, CP0_CAUSE 36 andi k1, k1, 0x7c 53 andi k1, k1, 0x7c 37 #ifdef CONFIG_64BIT 54 #ifdef CONFIG_64BIT 38 dsll k1, k1, 1 55 dsll k1, k1, 1 39 #endif 56 #endif 40 PTR_L k0, exception_handlers(k1) 57 PTR_L k0, exception_handlers(k1) 41 jr k0 58 jr k0 42 .set pop 59 .set pop 43 END(except_vec3_generic) 60 END(except_vec3_generic) 44 61 45 /* 62 /* 46 * General exception handler for CPUs with vir 63 * General exception handler for CPUs with virtual coherency exception. 47 * 64 * 48 * Be careful when changing this, it has to be 65 * Be careful when changing this, it has to be at most 256 (as a special 49 * exception) bytes to fit into space reserved 66 * exception) bytes to fit into space reserved for the exception handler. 50 */ 67 */ 51 NESTED(except_vec3_r4000, 0, sp) 68 NESTED(except_vec3_r4000, 0, sp) 52 .set push 69 .set push 53 .set arch=r4000 !! 70 .set mips3 54 .set noat 71 .set noat 55 mfc0 k1, CP0_CAUSE 72 mfc0 k1, CP0_CAUSE 56 li k0, 31<<2 73 li k0, 31<<2 57 andi k1, k1, 0x7c 74 andi k1, k1, 0x7c 58 .set push 75 .set push 59 .set noreorder 76 .set noreorder 60 .set nomacro 77 .set nomacro 61 beq k1, k0, handle_vced 78 beq k1, k0, handle_vced 62 li k0, 14<<2 79 li k0, 14<<2 63 beq k1, k0, handle_vcei 80 beq k1, k0, handle_vcei 64 #ifdef CONFIG_64BIT 81 #ifdef CONFIG_64BIT 65 dsll k1, k1, 1 82 dsll k1, k1, 1 66 #endif 83 #endif 67 .set pop 84 .set pop 68 PTR_L k0, exception_handlers(k1) 85 PTR_L k0, exception_handlers(k1) 69 jr k0 86 jr k0 70 87 71 /* 88 /* 72 * Big shit, we now may have two dirty 89 * Big shit, we now may have two dirty primary cache lines for the same 73 * physical address. We can safely in 90 * physical address. We can safely invalidate the line pointed to by 74 * c0_badvaddr because after return fr 91 * c0_badvaddr because after return from this exception handler the 75 * load / store will be re-executed. 92 * load / store will be re-executed. 76 */ 93 */ 77 handle_vced: 94 handle_vced: 78 MFC0 k0, CP0_BADVADDR 95 MFC0 k0, CP0_BADVADDR 79 li k1, -4 96 li k1, -4 # Is this ... 80 and k0, k1 97 and k0, k1 # ... really needed? 81 mtc0 zero, CP0_TAGLO 98 mtc0 zero, CP0_TAGLO 82 cache Index_Store_Tag_D, (k0) 99 cache Index_Store_Tag_D, (k0) 83 cache Hit_Writeback_Inv_SD, (k0) 100 cache Hit_Writeback_Inv_SD, (k0) 84 #ifdef CONFIG_PROC_FS 101 #ifdef CONFIG_PROC_FS 85 PTR_LA k0, vced_count 102 PTR_LA k0, vced_count 86 lw k1, (k0) 103 lw k1, (k0) 87 addiu k1, 1 104 addiu k1, 1 88 sw k1, (k0) 105 sw k1, (k0) 89 #endif 106 #endif 90 eret 107 eret 91 108 92 handle_vcei: 109 handle_vcei: 93 MFC0 k0, CP0_BADVADDR 110 MFC0 k0, CP0_BADVADDR 94 cache Hit_Writeback_Inv_SD, (k0) 111 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 95 #ifdef CONFIG_PROC_FS 112 #ifdef CONFIG_PROC_FS 96 PTR_LA k0, vcei_count 113 PTR_LA k0, vcei_count 97 lw k1, (k0) 114 lw k1, (k0) 98 addiu k1, 1 115 addiu k1, 1 99 sw k1, (k0) 116 sw k1, (k0) 100 #endif 117 #endif 101 eret 118 eret 102 .set pop 119 .set pop 103 END(except_vec3_r4000) 120 END(except_vec3_r4000) 104 121 105 __FINIT 122 __FINIT 106 123 107 .align 5 /* 32 byte rollback re 124 .align 5 /* 32 byte rollback region */ 108 LEAF(__r4k_wait) 125 LEAF(__r4k_wait) 109 .set push 126 .set push 110 .set noreorder 127 .set noreorder 111 /* start of rollback region */ 128 /* start of rollback region */ 112 LONG_L t0, TI_FLAGS($28) 129 LONG_L t0, TI_FLAGS($28) 113 nop 130 nop 114 andi t0, _TIF_NEED_RESCHED 131 andi t0, _TIF_NEED_RESCHED 115 bnez t0, 1f 132 bnez t0, 1f 116 nop 133 nop 117 nop 134 nop 118 nop 135 nop 119 #ifdef CONFIG_CPU_MICROMIPS 136 #ifdef CONFIG_CPU_MICROMIPS 120 nop 137 nop 121 nop 138 nop 122 nop 139 nop 123 nop 140 nop 124 #endif 141 #endif 125 .set MIPS_ISA_ARCH_LEVEL_RAW !! 142 .set mips3 126 wait 143 wait 127 /* end of rollback region (the region 144 /* end of rollback region (the region size must be power of two) */ 128 1: 145 1: 129 jr ra 146 jr ra 130 nop !! 147 nop 131 .set pop 148 .set pop 132 END(__r4k_wait) 149 END(__r4k_wait) 133 150 134 .macro BUILD_ROLLBACK_PROLOGUE handle 151 .macro BUILD_ROLLBACK_PROLOGUE handler 135 FEXPORT(rollback_\handler) 152 FEXPORT(rollback_\handler) 136 .set push 153 .set push 137 .set noat 154 .set noat 138 MFC0 k0, CP0_EPC 155 MFC0 k0, CP0_EPC 139 PTR_LA k1, __r4k_wait 156 PTR_LA k1, __r4k_wait 140 ori k0, 0x1f /* 32 byte rol 157 ori k0, 0x1f /* 32 byte rollback region */ 141 xori k0, 0x1f 158 xori k0, 0x1f 142 bne k0, k1, \handler !! 159 bne k0, k1, 9f 143 MTC0 k0, CP0_EPC 160 MTC0 k0, CP0_EPC >> 161 9: 144 .set pop 162 .set pop 145 .endm 163 .endm 146 164 147 .align 5 165 .align 5 148 BUILD_ROLLBACK_PROLOGUE handle_int 166 BUILD_ROLLBACK_PROLOGUE handle_int 149 NESTED(handle_int, PT_SIZE, sp) 167 NESTED(handle_int, PT_SIZE, sp) 150 .cfi_signal_frame << 151 #ifdef CONFIG_TRACE_IRQFLAGS 168 #ifdef CONFIG_TRACE_IRQFLAGS 152 /* 169 /* 153 * Check to see if the interrupted cod 170 * Check to see if the interrupted code has just disabled 154 * interrupts and ignore this interrup 171 * interrupts and ignore this interrupt for now if so. 155 * 172 * 156 * local_irq_disable() disables interr 173 * local_irq_disable() disables interrupts and then calls 157 * trace_hardirqs_off() to track the s 174 * trace_hardirqs_off() to track the state. If an interrupt is taken 158 * after interrupts are disabled but b 175 * after interrupts are disabled but before the state is updated 159 * it will appear to restore_all that 176 * it will appear to restore_all that it is incorrectly returning with 160 * interrupts disabled 177 * interrupts disabled 161 */ 178 */ 162 .set push 179 .set push 163 .set noat 180 .set noat 164 mfc0 k0, CP0_STATUS 181 mfc0 k0, CP0_STATUS 165 #if defined(CONFIG_CPU_R3000) !! 182 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 166 and k0, ST0_IEP 183 and k0, ST0_IEP 167 bnez k0, 1f 184 bnez k0, 1f 168 185 169 mfc0 k0, CP0_EPC 186 mfc0 k0, CP0_EPC 170 .set noreorder 187 .set noreorder 171 j k0 188 j k0 172 rfe !! 189 rfe 173 #else 190 #else 174 and k0, ST0_IE 191 and k0, ST0_IE 175 bnez k0, 1f 192 bnez k0, 1f 176 193 177 eret 194 eret 178 #endif 195 #endif 179 1: 196 1: 180 .set pop 197 .set pop 181 #endif 198 #endif 182 SAVE_ALL docfi=1 !! 199 SAVE_ALL 183 CLI 200 CLI 184 TRACE_IRQS_OFF 201 TRACE_IRQS_OFF 185 202 186 LONG_L s0, TI_REGS($28) 203 LONG_L s0, TI_REGS($28) 187 LONG_S sp, TI_REGS($28) 204 LONG_S sp, TI_REGS($28) 188 !! 205 PTR_LA ra, ret_from_irq 189 /* !! 206 PTR_LA v0, plat_irq_dispatch 190 * SAVE_ALL ensures we are using a val !! 207 jr v0 191 * Check if we are already using the I << 192 */ << 193 move s1, sp # Preserve the sp << 194 << 195 /* Get IRQ stack for this CPU */ << 196 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG << 197 #if defined(CONFIG_32BIT) || defined(KBUILD_64 << 198 lui k1, %hi(irq_stack) << 199 #else << 200 lui k1, %highest(irq_stack) << 201 daddiu k1, %higher(irq_stack) << 202 dsll k1, 16 << 203 daddiu k1, %hi(irq_stack) << 204 dsll k1, 16 << 205 #endif << 206 LONG_SRL k0, SMP_CPUID_PTRSHIFT << 207 LONG_ADDU k1, k0 << 208 LONG_L t0, %lo(irq_stack)(k1) << 209 << 210 # Check if already on IRQ stack << 211 PTR_LI t1, ~(_THREAD_SIZE-1) << 212 and t1, t1, sp << 213 beq t0, t1, 2f << 214 << 215 /* Switch to IRQ stack */ << 216 li t1, _IRQ_STACK_START << 217 PTR_ADD sp, t0, t1 << 218 << 219 /* Save task's sp on IRQ stack so that << 220 LONG_S s1, 0(sp) << 221 2: << 222 jal plat_irq_dispatch << 223 << 224 /* Restore sp */ << 225 move sp, s1 << 226 << 227 j ret_from_irq << 228 #ifdef CONFIG_CPU_MICROMIPS 208 #ifdef CONFIG_CPU_MICROMIPS 229 nop 209 nop 230 #endif 210 #endif 231 END(handle_int) 211 END(handle_int) 232 212 233 __INIT 213 __INIT 234 214 235 /* 215 /* 236 * Special interrupt vector for MIPS64 ISA & e 216 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 237 * This is a dedicated interrupt exception vec 217 * This is a dedicated interrupt exception vector which reduces the 238 * interrupt processing overhead. The jump in 218 * interrupt processing overhead. The jump instruction will be replaced 239 * at the initialization time. 219 * at the initialization time. 240 * 220 * 241 * Be careful when changing this, it has to be 221 * Be careful when changing this, it has to be at most 128 bytes 242 * to fit into space reserved for the exceptio 222 * to fit into space reserved for the exception handler. 243 */ 223 */ 244 NESTED(except_vec4, 0, sp) 224 NESTED(except_vec4, 0, sp) 245 1: j 1b /* Dum 225 1: j 1b /* Dummy, will be replaced */ 246 END(except_vec4) 226 END(except_vec4) 247 227 248 /* 228 /* 249 * EJTAG debug exception handler. 229 * EJTAG debug exception handler. 250 * The EJTAG debug exception entry point is 0x 230 * The EJTAG debug exception entry point is 0xbfc00480, which 251 * normally is in the boot PROM, so the boot P 231 * normally is in the boot PROM, so the boot PROM must do an 252 * unconditional jump to this vector. 232 * unconditional jump to this vector. 253 */ 233 */ 254 NESTED(except_vec_ejtag_debug, 0, sp) 234 NESTED(except_vec_ejtag_debug, 0, sp) 255 j ejtag_debug_handler 235 j ejtag_debug_handler 256 #ifdef CONFIG_CPU_MICROMIPS 236 #ifdef CONFIG_CPU_MICROMIPS 257 nop 237 nop 258 #endif 238 #endif 259 END(except_vec_ejtag_debug) 239 END(except_vec_ejtag_debug) 260 240 261 __FINIT 241 __FINIT 262 242 263 /* 243 /* 264 * Vectored interrupt handler. 244 * Vectored interrupt handler. 265 * This prototype is copied to ebase + n*IntCt 245 * This prototype is copied to ebase + n*IntCtl.VS and patched 266 * to invoke the handler 246 * to invoke the handler 267 */ 247 */ 268 BUILD_ROLLBACK_PROLOGUE except_vec_vi 248 BUILD_ROLLBACK_PROLOGUE except_vec_vi 269 NESTED(except_vec_vi, 0, sp) 249 NESTED(except_vec_vi, 0, sp) 270 SAVE_SOME docfi=1 !! 250 SAVE_SOME 271 SAVE_AT docfi=1 !! 251 SAVE_AT 272 .set push 252 .set push 273 .set noreorder 253 .set noreorder >> 254 #ifdef CONFIG_MIPS_MT_SMTC >> 255 /* >> 256 * To keep from blindly blocking *all* interrupts >> 257 * during service by SMTC kernel, we also want to >> 258 * pass the IM value to be cleared. >> 259 */ >> 260 FEXPORT(except_vec_vi_mori) >> 261 ori a0, $0, 0 >> 262 #endif /* CONFIG_MIPS_MT_SMTC */ 274 PTR_LA v1, except_vec_vi_handler 263 PTR_LA v1, except_vec_vi_handler >> 264 FEXPORT(except_vec_vi_lui) >> 265 lui v0, 0 /* Patched */ 275 jr v1 266 jr v1 276 FEXPORT(except_vec_vi_ori) 267 FEXPORT(except_vec_vi_ori) 277 ori v0, zero, 0 /* Off !! 268 ori v0, 0 /* Patched */ 278 .set pop 269 .set pop 279 END(except_vec_vi) 270 END(except_vec_vi) 280 EXPORT(except_vec_vi_end) 271 EXPORT(except_vec_vi_end) 281 272 282 /* 273 /* 283 * Common Vectored Interrupt code 274 * Common Vectored Interrupt code 284 * Complete the register saves and invoke the !! 275 * Complete the register saves and invoke the handler which is passed in $v0 285 * offset into vi_handlers[] << 286 */ 276 */ 287 NESTED(except_vec_vi_handler, 0, sp) 277 NESTED(except_vec_vi_handler, 0, sp) 288 SAVE_TEMP 278 SAVE_TEMP 289 SAVE_STATIC 279 SAVE_STATIC >> 280 #ifdef CONFIG_MIPS_MT_SMTC >> 281 /* >> 282 * SMTC has an interesting problem that interrupts are level-triggered, >> 283 * and the CLI macro will clear EXL, potentially causing a duplicate >> 284 * interrupt service invocation. So we need to clear the associated >> 285 * IM bit of Status prior to doing CLI, and restore it after the >> 286 * service routine has been invoked - we must assume that the >> 287 * service routine will have cleared the state, and any active >> 288 * level represents a new or otherwised unserviced event... >> 289 */ >> 290 mfc0 t1, CP0_STATUS >> 291 and t0, a0, t1 >> 292 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP >> 293 mfc0 t2, CP0_TCCONTEXT >> 294 or t2, t0, t2 >> 295 mtc0 t2, CP0_TCCONTEXT >> 296 #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ >> 297 xor t1, t1, t0 >> 298 mtc0 t1, CP0_STATUS >> 299 _ehb >> 300 #endif /* CONFIG_MIPS_MT_SMTC */ 290 CLI 301 CLI 291 #ifdef CONFIG_TRACE_IRQFLAGS 302 #ifdef CONFIG_TRACE_IRQFLAGS 292 move s0, v0 303 move s0, v0 >> 304 #ifdef CONFIG_MIPS_MT_SMTC >> 305 move s1, a0 >> 306 #endif 293 TRACE_IRQS_OFF 307 TRACE_IRQS_OFF >> 308 #ifdef CONFIG_MIPS_MT_SMTC >> 309 move a0, s1 >> 310 #endif 294 move v0, s0 311 move v0, s0 295 #endif 312 #endif 296 313 297 LONG_L s0, TI_REGS($28) 314 LONG_L s0, TI_REGS($28) 298 LONG_S sp, TI_REGS($28) 315 LONG_S sp, TI_REGS($28) 299 !! 316 PTR_LA ra, ret_from_irq 300 /* !! 317 jr v0 301 * SAVE_ALL ensures we are using a val << 302 * Check if we are already using the I << 303 */ << 304 move s1, sp # Preserve the sp << 305 << 306 /* Get IRQ stack for this CPU */ << 307 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG << 308 #if defined(CONFIG_32BIT) || defined(KBUILD_64 << 309 lui k1, %hi(irq_stack) << 310 #else << 311 lui k1, %highest(irq_stack) << 312 daddiu k1, %higher(irq_stack) << 313 dsll k1, 16 << 314 daddiu k1, %hi(irq_stack) << 315 dsll k1, 16 << 316 #endif << 317 LONG_SRL k0, SMP_CPUID_PTRSHIFT << 318 LONG_ADDU k1, k0 << 319 LONG_L t0, %lo(irq_stack)(k1) << 320 << 321 # Check if already on IRQ stack << 322 PTR_LI t1, ~(_THREAD_SIZE-1) << 323 and t1, t1, sp << 324 beq t0, t1, 2f << 325 << 326 /* Switch to IRQ stack */ << 327 li t1, _IRQ_STACK_START << 328 PTR_ADD sp, t0, t1 << 329 << 330 /* Save task's sp on IRQ stack so that << 331 LONG_S s1, 0(sp) << 332 2: << 333 PTR_L v0, vi_handlers(v0) << 334 jalr v0 << 335 << 336 /* Restore sp */ << 337 move sp, s1 << 338 << 339 j ret_from_irq << 340 END(except_vec_vi_handler) 318 END(except_vec_vi_handler) 341 319 342 /* 320 /* 343 * EJTAG debug exception handler. 321 * EJTAG debug exception handler. 344 */ 322 */ 345 NESTED(ejtag_debug_handler, PT_SIZE, sp) 323 NESTED(ejtag_debug_handler, PT_SIZE, sp) 346 .set push 324 .set push 347 .set noat 325 .set noat 348 MTC0 k0, CP0_DESAVE 326 MTC0 k0, CP0_DESAVE 349 mfc0 k0, CP0_DEBUG 327 mfc0 k0, CP0_DEBUG 350 328 351 andi k0, k0, MIPS_DEBUG_DBP # Chec !! 329 sll k0, k0, 30 # Check for SDBBP. 352 beqz k0, ejtag_return !! 330 bgez k0, ejtag_return 353 << 354 #ifdef CONFIG_SMP << 355 1: PTR_LA k0, ejtag_debug_buffer_spinloc << 356 __SYNC(full, loongson3_war) << 357 2: ll k0, 0(k0) << 358 bnez k0, 2b << 359 PTR_LA k0, ejtag_debug_buffer_spinloc << 360 sc k0, 0(k0) << 361 beqz k0, 1b << 362 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC << 363 sync << 364 # endif << 365 331 366 PTR_LA k0, ejtag_debug_buffer 332 PTR_LA k0, ejtag_debug_buffer 367 LONG_S k1, 0(k0) 333 LONG_S k1, 0(k0) 368 << 369 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG << 370 PTR_SRL k1, SMP_CPUID_PTRSHIFT << 371 PTR_SLL k1, LONGLOG << 372 PTR_LA k0, ejtag_debug_buffer_per_cpu << 373 PTR_ADDU k0, k1 << 374 << 375 PTR_LA k1, ejtag_debug_buffer << 376 LONG_L k1, 0(k1) << 377 LONG_S k1, 0(k0) << 378 << 379 PTR_LA k0, ejtag_debug_buffer_spinloc << 380 sw zero, 0(k0) << 381 #else << 382 PTR_LA k0, ejtag_debug_buffer << 383 LONG_S k1, 0(k0) << 384 #endif << 385 << 386 SAVE_ALL 334 SAVE_ALL 387 move a0, sp 335 move a0, sp 388 jal ejtag_exception_handler 336 jal ejtag_exception_handler 389 RESTORE_ALL 337 RESTORE_ALL 390 << 391 #ifdef CONFIG_SMP << 392 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG << 393 PTR_SRL k1, SMP_CPUID_PTRSHIFT << 394 PTR_SLL k1, LONGLOG << 395 PTR_LA k0, ejtag_debug_buffer_per_cpu << 396 PTR_ADDU k0, k1 << 397 LONG_L k1, 0(k0) << 398 #else << 399 PTR_LA k0, ejtag_debug_buffer 338 PTR_LA k0, ejtag_debug_buffer 400 LONG_L k1, 0(k0) 339 LONG_L k1, 0(k0) 401 #endif << 402 340 403 ejtag_return: 341 ejtag_return: 404 back_to_back_c0_hazard << 405 MFC0 k0, CP0_DESAVE 342 MFC0 k0, CP0_DESAVE 406 .set mips32 343 .set mips32 407 deret 344 deret 408 .set pop !! 345 .set pop 409 END(ejtag_debug_handler) 346 END(ejtag_debug_handler) 410 347 411 /* 348 /* 412 * This buffer is reserved for the use of the 349 * This buffer is reserved for the use of the EJTAG debug 413 * handler. 350 * handler. 414 */ 351 */ 415 .data 352 .data 416 EXPORT(ejtag_debug_buffer) 353 EXPORT(ejtag_debug_buffer) 417 .fill LONGSIZE 354 .fill LONGSIZE 418 #ifdef CONFIG_SMP << 419 EXPORT(ejtag_debug_buffer_spinlock) << 420 .fill LONGSIZE << 421 EXPORT(ejtag_debug_buffer_per_cpu) << 422 .fill LONGSIZE * NR_CPUS << 423 #endif << 424 .previous 355 .previous 425 356 426 __INIT 357 __INIT 427 358 428 /* 359 /* 429 * NMI debug exception handler for MIPS refere 360 * NMI debug exception handler for MIPS reference boards. 430 * The NMI debug exception entry point is 0xbf 361 * The NMI debug exception entry point is 0xbfc00000, which 431 * normally is in the boot PROM, so the boot P 362 * normally is in the boot PROM, so the boot PROM must do a 432 * unconditional jump to this vector. 363 * unconditional jump to this vector. 433 */ 364 */ 434 NESTED(except_vec_nmi, 0, sp) 365 NESTED(except_vec_nmi, 0, sp) 435 j nmi_handler 366 j nmi_handler 436 #ifdef CONFIG_CPU_MICROMIPS 367 #ifdef CONFIG_CPU_MICROMIPS 437 nop 368 nop 438 #endif 369 #endif 439 END(except_vec_nmi) 370 END(except_vec_nmi) 440 371 441 __FINIT 372 __FINIT 442 373 443 NESTED(nmi_handler, PT_SIZE, sp) 374 NESTED(nmi_handler, PT_SIZE, sp) 444 .cfi_signal_frame << 445 .set push 375 .set push 446 .set noat 376 .set noat 447 /* << 448 * Clear ERL - restore segment mapping << 449 * Clear BEV - required for page fault << 450 */ << 451 mfc0 k0, CP0_STATUS << 452 ori k0, k0, ST0_EXL << 453 li k1, ~(ST0_BEV | ST0_ERL) << 454 and k0, k0, k1 << 455 mtc0 k0, CP0_STATUS << 456 _ehb << 457 SAVE_ALL 377 SAVE_ALL 458 move a0, sp 378 move a0, sp 459 jal nmi_exception_handler 379 jal nmi_exception_handler 460 /* nmi_exception_handler never returns !! 380 RESTORE_ALL >> 381 .set mips3 >> 382 eret 461 .set pop 383 .set pop 462 END(nmi_handler) 384 END(nmi_handler) 463 385 464 .macro __build_clear_none 386 .macro __build_clear_none 465 .endm 387 .endm 466 388 467 .macro __build_clear_sti 389 .macro __build_clear_sti 468 TRACE_IRQS_ON 390 TRACE_IRQS_ON 469 STI 391 STI 470 .endm 392 .endm 471 393 472 .macro __build_clear_cli 394 .macro __build_clear_cli 473 CLI 395 CLI 474 TRACE_IRQS_OFF 396 TRACE_IRQS_OFF 475 .endm 397 .endm 476 398 477 .macro __build_clear_fpe 399 .macro __build_clear_fpe 478 CLI << 479 TRACE_IRQS_OFF << 480 .set push 400 .set push 481 /* gas fails to assemble cfc1 for some 401 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 482 .set mips1 402 .set mips1 483 .set hardfloat << 484 cfc1 a1, fcr31 403 cfc1 a1, fcr31 >> 404 li a2, ~(0x3f << 12) >> 405 and a2, a1 >> 406 ctc1 a2, fcr31 485 .set pop 407 .set pop 486 .endm !! 408 TRACE_IRQS_ON 487 !! 409 STI 488 .macro __build_clear_msa_fpe << 489 CLI << 490 TRACE_IRQS_OFF << 491 _cfcmsa a1, MSA_CSR << 492 .endm 410 .endm 493 411 494 .macro __build_clear_ade 412 .macro __build_clear_ade 495 MFC0 t0, CP0_BADVADDR 413 MFC0 t0, CP0_BADVADDR 496 PTR_S t0, PT_BVADDR(sp) 414 PTR_S t0, PT_BVADDR(sp) 497 KMODE 415 KMODE 498 .endm 416 .endm 499 417 500 .macro __build_clear_gsexc << 501 .set push << 502 /* << 503 * We need to specify a selector to ac << 504 * register. All GSExc-equipped proces << 505 */ << 506 .set mips32 << 507 mfc0 a1, CP0_DIAGNOSTIC1 << 508 .set pop << 509 TRACE_IRQS_ON << 510 STI << 511 .endm << 512 << 513 .macro __BUILD_silent exception 418 .macro __BUILD_silent exception 514 .endm 419 .endm 515 420 516 /* Gas tries to parse the ASM_PRINT ar !! 421 /* Gas tries to parse the PRINT argument as a string containing 517 string escapes and emits bogus warn 422 string escapes and emits bogus warnings if it believes to 518 recognize an unknown escape code. 423 recognize an unknown escape code. So make the arguments 519 start with an n and gas will believ 424 start with an n and gas will believe \n is ok ... */ 520 .macro __BUILD_verbose nexception 425 .macro __BUILD_verbose nexception 521 LONG_L a1, PT_EPC(sp) 426 LONG_L a1, PT_EPC(sp) 522 #ifdef CONFIG_32BIT 427 #ifdef CONFIG_32BIT 523 ASM_PRINT("Got \nexception at %08lx\01 !! 428 PRINT("Got \nexception at %08lx\012") 524 #endif 429 #endif 525 #ifdef CONFIG_64BIT 430 #ifdef CONFIG_64BIT 526 ASM_PRINT("Got \nexception at %016lx\0 !! 431 PRINT("Got \nexception at %016lx\012") 527 #endif 432 #endif 528 .endm 433 .endm 529 434 530 .macro __BUILD_count exception 435 .macro __BUILD_count exception 531 LONG_L t0,exception_count_\exception 436 LONG_L t0,exception_count_\exception 532 LONG_ADDIU t0, 1 !! 437 LONG_ADDIU t0, 1 533 LONG_S t0,exception_count_\exception 438 LONG_S t0,exception_count_\exception 534 .comm exception_count\exception, 8, 439 .comm exception_count\exception, 8, 8 535 .endm 440 .endm 536 441 537 .macro __BUILD_HANDLER exception hand 442 .macro __BUILD_HANDLER exception handler clear verbose ext 538 .align 5 443 .align 5 539 NESTED(handle_\exception, PT_SIZE, sp) 444 NESTED(handle_\exception, PT_SIZE, sp) 540 .cfi_signal_frame << 541 .set noat 445 .set noat 542 SAVE_ALL 446 SAVE_ALL 543 FEXPORT(handle_\exception\ext) 447 FEXPORT(handle_\exception\ext) 544 __build_clear_\clear !! 448 __BUILD_clear_\clear 545 .set at 449 .set at 546 __BUILD_\verbose \exception 450 __BUILD_\verbose \exception 547 move a0, sp 451 move a0, sp 548 jal do_\handler !! 452 PTR_LA ra, ret_from_exception 549 j ret_from_exception !! 453 j do_\handler 550 END(handle_\exception) 454 END(handle_\exception) 551 .endm 455 .endm 552 456 553 .macro BUILD_HANDLER exception handle 457 .macro BUILD_HANDLER exception handler clear verbose 554 __BUILD_HANDLER \exception \handler \c 458 __BUILD_HANDLER \exception \handler \clear \verbose _int 555 .endm 459 .endm 556 460 557 BUILD_HANDLER adel ade ade silent 461 BUILD_HANDLER adel ade ade silent /* #4 */ 558 BUILD_HANDLER ades ade ade silent 462 BUILD_HANDLER ades ade ade silent /* #5 */ 559 BUILD_HANDLER ibe be cli silent 463 BUILD_HANDLER ibe be cli silent /* #6 */ 560 BUILD_HANDLER dbe be cli silent 464 BUILD_HANDLER dbe be cli silent /* #7 */ 561 BUILD_HANDLER bp bp sti silent 465 BUILD_HANDLER bp bp sti silent /* #9 */ 562 BUILD_HANDLER ri ri sti silent 466 BUILD_HANDLER ri ri sti silent /* #10 */ 563 BUILD_HANDLER cpu cpu sti silent 467 BUILD_HANDLER cpu cpu sti silent /* #11 */ 564 BUILD_HANDLER ov ov sti silent 468 BUILD_HANDLER ov ov sti silent /* #12 */ 565 BUILD_HANDLER tr tr sti silent 469 BUILD_HANDLER tr tr sti silent /* #13 */ 566 BUILD_HANDLER msa_fpe msa_fpe msa_fpe << 567 #ifdef CONFIG_MIPS_FP_SUPPORT << 568 BUILD_HANDLER fpe fpe fpe silent 470 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 569 #endif << 570 BUILD_HANDLER ftlb ftlb none silent << 571 BUILD_HANDLER gsexc gsexc gsexc silent << 572 BUILD_HANDLER msa msa sti silent << 573 BUILD_HANDLER mdmx mdmx sti silent 471 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 574 #ifdef CONFIG_HARDWARE_WATCHPOINTS 472 #ifdef CONFIG_HARDWARE_WATCHPOINTS 575 /* 473 /* 576 * For watch, interrupts will be enabl 474 * For watch, interrupts will be enabled after the watch 577 * registers are read. 475 * registers are read. 578 */ 476 */ 579 BUILD_HANDLER watch watch cli silent 477 BUILD_HANDLER watch watch cli silent /* #23 */ 580 #else 478 #else 581 BUILD_HANDLER watch watch sti verbose 479 BUILD_HANDLER watch watch sti verbose /* #23 */ 582 #endif 480 #endif 583 BUILD_HANDLER mcheck mcheck cli verbos 481 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 584 BUILD_HANDLER mt mt sti silent 482 BUILD_HANDLER mt mt sti silent /* #25 */ 585 BUILD_HANDLER dsp dsp sti silent 483 BUILD_HANDLER dsp dsp sti silent /* #26 */ 586 BUILD_HANDLER reserved reserved sti ve 484 BUILD_HANDLER reserved reserved sti verbose /* others */ 587 485 588 .align 5 486 .align 5 589 LEAF(handle_ri_rdhwr_tlbp) !! 487 LEAF(handle_ri_rdhwr_vivt) >> 488 #ifdef CONFIG_MIPS_MT_SMTC >> 489 PANIC_PIC("handle_ri_rdhwr_vivt called") >> 490 #else 590 .set push 491 .set push 591 .set noat 492 .set noat 592 .set noreorder 493 .set noreorder 593 /* check if TLB contains a entry for E 494 /* check if TLB contains a entry for EPC */ 594 MFC0 k1, CP0_ENTRYHI 495 MFC0 k1, CP0_ENTRYHI 595 andi k1, MIPS_ENTRYHI_ASID | MIPS_E !! 496 andi k1, 0xff /* ASID_MASK */ 596 MFC0 k0, CP0_EPC 497 MFC0 k0, CP0_EPC 597 PTR_SRL k0, _PAGE_SHIFT + 1 !! 498 PTR_SRL k0, _PAGE_SHIFT + 1 598 PTR_SLL k0, _PAGE_SHIFT + 1 !! 499 PTR_SLL k0, _PAGE_SHIFT + 1 599 or k1, k0 500 or k1, k0 600 MTC0 k1, CP0_ENTRYHI 501 MTC0 k1, CP0_ENTRYHI 601 mtc0_tlbw_hazard 502 mtc0_tlbw_hazard 602 tlbp 503 tlbp 603 tlb_probe_hazard 504 tlb_probe_hazard 604 mfc0 k1, CP0_INDEX 505 mfc0 k1, CP0_INDEX 605 .set pop 506 .set pop 606 bltz k1, handle_ri /* slow path * 507 bltz k1, handle_ri /* slow path */ 607 /* fall thru */ 508 /* fall thru */ 608 END(handle_ri_rdhwr_tlbp) !! 509 #endif >> 510 END(handle_ri_rdhwr_vivt) 609 511 610 LEAF(handle_ri_rdhwr) 512 LEAF(handle_ri_rdhwr) 611 .set push 513 .set push 612 .set noat 514 .set noat 613 .set noreorder 515 .set noreorder 614 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 516 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 615 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 517 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 616 MFC0 k1, CP0_EPC 518 MFC0 k1, CP0_EPC 617 #if defined(CONFIG_CPU_MICROMIPS) || defined(C 519 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 618 and k0, k1, 1 !! 520 and k0, k1, 1 619 beqz k0, 1f !! 521 beqz k0, 1f 620 xor k1, k0 !! 522 xor k1, k0 621 lhu k0, (k1) !! 523 lhu k0, (k1) 622 lhu k1, 2(k1) !! 524 lhu k1, 2(k1) 623 ins k1, k0, 16, 16 !! 525 ins k1, k0, 16, 16 624 lui k0, 0x007d !! 526 lui k0, 0x007d 625 b docheck !! 527 b docheck 626 ori k0, 0x6b3c !! 528 ori k0, 0x6b3c 627 1: 529 1: 628 lui k0, 0x7c03 !! 530 lui k0, 0x7c03 629 lw k1, (k1) !! 531 lw k1, (k1) 630 ori k0, 0xe83b !! 532 ori k0, 0xe83b 631 #else 533 #else 632 andi k0, k1, 1 !! 534 andi k0, k1, 1 633 bnez k0, handle_ri !! 535 bnez k0, handle_ri 634 lui k0, 0x7c03 !! 536 lui k0, 0x7c03 635 lw k1, (k1) !! 537 lw k1, (k1) 636 ori k0, 0xe83b !! 538 ori k0, 0xe83b 637 #endif 539 #endif 638 .set reorder !! 540 .set reorder 639 docheck: 541 docheck: 640 bne k0, k1, handle_ri /* if 542 bne k0, k1, handle_ri /* if not ours */ 641 543 642 isrdhwr: 544 isrdhwr: 643 /* The insn is rdhwr. No need to chec 545 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 644 get_saved_sp /* k1 := current_threa 546 get_saved_sp /* k1 := current_thread_info */ 645 .set noreorder 547 .set noreorder 646 MFC0 k0, CP0_EPC 548 MFC0 k0, CP0_EPC 647 #if defined(CONFIG_CPU_R3000) !! 549 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 648 ori k1, _THREAD_MASK 550 ori k1, _THREAD_MASK 649 xori k1, _THREAD_MASK 551 xori k1, _THREAD_MASK 650 LONG_L v1, TI_TP_VALUE(k1) 552 LONG_L v1, TI_TP_VALUE(k1) 651 LONG_ADDIU k0, 4 553 LONG_ADDIU k0, 4 652 jr k0 554 jr k0 653 rfe 555 rfe 654 #else 556 #else 655 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 557 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 656 LONG_ADDIU k0, 4 /* sta 558 LONG_ADDIU k0, 4 /* stall on $k0 */ 657 #else 559 #else 658 .set at=v1 560 .set at=v1 659 LONG_ADDIU k0, 4 561 LONG_ADDIU k0, 4 660 .set noat 562 .set noat 661 #endif 563 #endif 662 MTC0 k0, CP0_EPC 564 MTC0 k0, CP0_EPC 663 /* I hope three instructions between M 565 /* I hope three instructions between MTC0 and ERET are enough... */ 664 ori k1, _THREAD_MASK 566 ori k1, _THREAD_MASK 665 xori k1, _THREAD_MASK 567 xori k1, _THREAD_MASK 666 LONG_L v1, TI_TP_VALUE(k1) 568 LONG_L v1, TI_TP_VALUE(k1) 667 .set push !! 569 .set mips3 668 .set arch=r4000 << 669 eret 570 eret 670 .set pop !! 571 .set mips0 671 #endif 572 #endif 672 .set pop 573 .set pop 673 END(handle_ri_rdhwr) 574 END(handle_ri_rdhwr) 674 575 675 #ifdef CONFIG_CPU_R4X00_BUGS64 !! 576 #ifdef CONFIG_64BIT 676 /* A temporary overflow handler used by check_ 577 /* A temporary overflow handler used by check_daddi(). */ 677 578 678 __INIT 579 __INIT 679 580 680 BUILD_HANDLER daddi_ov daddi_ov none 581 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 681 #endif 582 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.