1 /* 1 /* 2 * This file is subject to the terms and condi 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 4 * for more details. 5 * 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 10 */ 11 #include <linux/init.h> 11 #include <linux/init.h> 12 12 13 #include <asm/asm.h> 13 #include <asm/asm.h> 14 #include <asm/asmmacro.h> 14 #include <asm/asmmacro.h> 15 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 16 #include <asm/irqflags.h> 16 #include <asm/irqflags.h> 17 #include <asm/regdef.h> 17 #include <asm/regdef.h> 18 #include <asm/fpregdef.h> 18 #include <asm/fpregdef.h> 19 #include <asm/mipsregs.h> 19 #include <asm/mipsregs.h> 20 #include <asm/stackframe.h> 20 #include <asm/stackframe.h> 21 #include <asm/sync.h> 21 #include <asm/sync.h> >> 22 #include <asm/war.h> 22 #include <asm/thread_info.h> 23 #include <asm/thread_info.h> 23 24 24 __INIT 25 __INIT 25 26 26 /* 27 /* 27 * General exception vector for all other CPUs 28 * General exception vector for all other CPUs. 28 * 29 * 29 * Be careful when changing this, it has to be 30 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exceptio 31 * to fit into space reserved for the exception handler. 31 */ 32 */ 32 NESTED(except_vec3_generic, 0, sp) 33 NESTED(except_vec3_generic, 0, sp) 33 .set push 34 .set push 34 .set noat 35 .set noat 35 mfc0 k1, CP0_CAUSE 36 mfc0 k1, CP0_CAUSE 36 andi k1, k1, 0x7c 37 andi k1, k1, 0x7c 37 #ifdef CONFIG_64BIT 38 #ifdef CONFIG_64BIT 38 dsll k1, k1, 1 39 dsll k1, k1, 1 39 #endif 40 #endif 40 PTR_L k0, exception_handlers(k1) 41 PTR_L k0, exception_handlers(k1) 41 jr k0 42 jr k0 42 .set pop 43 .set pop 43 END(except_vec3_generic) 44 END(except_vec3_generic) 44 45 45 /* 46 /* 46 * General exception handler for CPUs with vir 47 * General exception handler for CPUs with virtual coherency exception. 47 * 48 * 48 * Be careful when changing this, it has to be 49 * Be careful when changing this, it has to be at most 256 (as a special 49 * exception) bytes to fit into space reserved 50 * exception) bytes to fit into space reserved for the exception handler. 50 */ 51 */ 51 NESTED(except_vec3_r4000, 0, sp) 52 NESTED(except_vec3_r4000, 0, sp) 52 .set push 53 .set push 53 .set arch=r4000 54 .set arch=r4000 54 .set noat 55 .set noat 55 mfc0 k1, CP0_CAUSE 56 mfc0 k1, CP0_CAUSE 56 li k0, 31<<2 57 li k0, 31<<2 57 andi k1, k1, 0x7c 58 andi k1, k1, 0x7c 58 .set push 59 .set push 59 .set noreorder 60 .set noreorder 60 .set nomacro 61 .set nomacro 61 beq k1, k0, handle_vced 62 beq k1, k0, handle_vced 62 li k0, 14<<2 63 li k0, 14<<2 63 beq k1, k0, handle_vcei 64 beq k1, k0, handle_vcei 64 #ifdef CONFIG_64BIT 65 #ifdef CONFIG_64BIT 65 dsll k1, k1, 1 66 dsll k1, k1, 1 66 #endif 67 #endif 67 .set pop 68 .set pop 68 PTR_L k0, exception_handlers(k1) 69 PTR_L k0, exception_handlers(k1) 69 jr k0 70 jr k0 70 71 71 /* 72 /* 72 * Big shit, we now may have two dirty 73 * Big shit, we now may have two dirty primary cache lines for the same 73 * physical address. We can safely in 74 * physical address. We can safely invalidate the line pointed to by 74 * c0_badvaddr because after return fr 75 * c0_badvaddr because after return from this exception handler the 75 * load / store will be re-executed. 76 * load / store will be re-executed. 76 */ 77 */ 77 handle_vced: 78 handle_vced: 78 MFC0 k0, CP0_BADVADDR 79 MFC0 k0, CP0_BADVADDR 79 li k1, -4 80 li k1, -4 # Is this ... 80 and k0, k1 81 and k0, k1 # ... really needed? 81 mtc0 zero, CP0_TAGLO 82 mtc0 zero, CP0_TAGLO 82 cache Index_Store_Tag_D, (k0) 83 cache Index_Store_Tag_D, (k0) 83 cache Hit_Writeback_Inv_SD, (k0) 84 cache Hit_Writeback_Inv_SD, (k0) 84 #ifdef CONFIG_PROC_FS 85 #ifdef CONFIG_PROC_FS 85 PTR_LA k0, vced_count 86 PTR_LA k0, vced_count 86 lw k1, (k0) 87 lw k1, (k0) 87 addiu k1, 1 88 addiu k1, 1 88 sw k1, (k0) 89 sw k1, (k0) 89 #endif 90 #endif 90 eret 91 eret 91 92 92 handle_vcei: 93 handle_vcei: 93 MFC0 k0, CP0_BADVADDR 94 MFC0 k0, CP0_BADVADDR 94 cache Hit_Writeback_Inv_SD, (k0) 95 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 95 #ifdef CONFIG_PROC_FS 96 #ifdef CONFIG_PROC_FS 96 PTR_LA k0, vcei_count 97 PTR_LA k0, vcei_count 97 lw k1, (k0) 98 lw k1, (k0) 98 addiu k1, 1 99 addiu k1, 1 99 sw k1, (k0) 100 sw k1, (k0) 100 #endif 101 #endif 101 eret 102 eret 102 .set pop 103 .set pop 103 END(except_vec3_r4000) 104 END(except_vec3_r4000) 104 105 105 __FINIT 106 __FINIT 106 107 107 .align 5 /* 32 byte rollback re 108 .align 5 /* 32 byte rollback region */ 108 LEAF(__r4k_wait) 109 LEAF(__r4k_wait) 109 .set push 110 .set push 110 .set noreorder 111 .set noreorder 111 /* start of rollback region */ 112 /* start of rollback region */ 112 LONG_L t0, TI_FLAGS($28) 113 LONG_L t0, TI_FLAGS($28) 113 nop 114 nop 114 andi t0, _TIF_NEED_RESCHED 115 andi t0, _TIF_NEED_RESCHED 115 bnez t0, 1f 116 bnez t0, 1f 116 nop 117 nop 117 nop 118 nop 118 nop 119 nop 119 #ifdef CONFIG_CPU_MICROMIPS 120 #ifdef CONFIG_CPU_MICROMIPS 120 nop 121 nop 121 nop 122 nop 122 nop 123 nop 123 nop 124 nop 124 #endif 125 #endif 125 .set MIPS_ISA_ARCH_LEVEL_RAW 126 .set MIPS_ISA_ARCH_LEVEL_RAW 126 wait 127 wait 127 /* end of rollback region (the region 128 /* end of rollback region (the region size must be power of two) */ 128 1: 129 1: 129 jr ra 130 jr ra 130 nop 131 nop 131 .set pop 132 .set pop 132 END(__r4k_wait) 133 END(__r4k_wait) 133 134 134 .macro BUILD_ROLLBACK_PROLOGUE handle 135 .macro BUILD_ROLLBACK_PROLOGUE handler 135 FEXPORT(rollback_\handler) 136 FEXPORT(rollback_\handler) 136 .set push 137 .set push 137 .set noat 138 .set noat 138 MFC0 k0, CP0_EPC 139 MFC0 k0, CP0_EPC 139 PTR_LA k1, __r4k_wait 140 PTR_LA k1, __r4k_wait 140 ori k0, 0x1f /* 32 byte rol 141 ori k0, 0x1f /* 32 byte rollback region */ 141 xori k0, 0x1f 142 xori k0, 0x1f 142 bne k0, k1, \handler 143 bne k0, k1, \handler 143 MTC0 k0, CP0_EPC 144 MTC0 k0, CP0_EPC 144 .set pop 145 .set pop 145 .endm 146 .endm 146 147 147 .align 5 148 .align 5 148 BUILD_ROLLBACK_PROLOGUE handle_int 149 BUILD_ROLLBACK_PROLOGUE handle_int 149 NESTED(handle_int, PT_SIZE, sp) 150 NESTED(handle_int, PT_SIZE, sp) 150 .cfi_signal_frame 151 .cfi_signal_frame 151 #ifdef CONFIG_TRACE_IRQFLAGS 152 #ifdef CONFIG_TRACE_IRQFLAGS 152 /* 153 /* 153 * Check to see if the interrupted cod 154 * Check to see if the interrupted code has just disabled 154 * interrupts and ignore this interrup 155 * interrupts and ignore this interrupt for now if so. 155 * 156 * 156 * local_irq_disable() disables interr 157 * local_irq_disable() disables interrupts and then calls 157 * trace_hardirqs_off() to track the s 158 * trace_hardirqs_off() to track the state. If an interrupt is taken 158 * after interrupts are disabled but b 159 * after interrupts are disabled but before the state is updated 159 * it will appear to restore_all that 160 * it will appear to restore_all that it is incorrectly returning with 160 * interrupts disabled 161 * interrupts disabled 161 */ 162 */ 162 .set push 163 .set push 163 .set noat 164 .set noat 164 mfc0 k0, CP0_STATUS 165 mfc0 k0, CP0_STATUS 165 #if defined(CONFIG_CPU_R3000) !! 166 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 166 and k0, ST0_IEP 167 and k0, ST0_IEP 167 bnez k0, 1f 168 bnez k0, 1f 168 169 169 mfc0 k0, CP0_EPC 170 mfc0 k0, CP0_EPC 170 .set noreorder 171 .set noreorder 171 j k0 172 j k0 172 rfe 173 rfe 173 #else 174 #else 174 and k0, ST0_IE 175 and k0, ST0_IE 175 bnez k0, 1f 176 bnez k0, 1f 176 177 177 eret 178 eret 178 #endif 179 #endif 179 1: 180 1: 180 .set pop 181 .set pop 181 #endif 182 #endif 182 SAVE_ALL docfi=1 183 SAVE_ALL docfi=1 183 CLI 184 CLI 184 TRACE_IRQS_OFF 185 TRACE_IRQS_OFF 185 186 186 LONG_L s0, TI_REGS($28) 187 LONG_L s0, TI_REGS($28) 187 LONG_S sp, TI_REGS($28) 188 LONG_S sp, TI_REGS($28) 188 189 189 /* 190 /* 190 * SAVE_ALL ensures we are using a val 191 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 191 * Check if we are already using the I 192 * Check if we are already using the IRQ stack. 192 */ 193 */ 193 move s1, sp # Preserve the sp 194 move s1, sp # Preserve the sp 194 195 195 /* Get IRQ stack for this CPU */ 196 /* Get IRQ stack for this CPU */ 196 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 197 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 197 #if defined(CONFIG_32BIT) || defined(KBUILD_64 198 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 198 lui k1, %hi(irq_stack) 199 lui k1, %hi(irq_stack) 199 #else 200 #else 200 lui k1, %highest(irq_stack) 201 lui k1, %highest(irq_stack) 201 daddiu k1, %higher(irq_stack) 202 daddiu k1, %higher(irq_stack) 202 dsll k1, 16 203 dsll k1, 16 203 daddiu k1, %hi(irq_stack) 204 daddiu k1, %hi(irq_stack) 204 dsll k1, 16 205 dsll k1, 16 205 #endif 206 #endif 206 LONG_SRL k0, SMP_CPUID_PTRSHIFT 207 LONG_SRL k0, SMP_CPUID_PTRSHIFT 207 LONG_ADDU k1, k0 208 LONG_ADDU k1, k0 208 LONG_L t0, %lo(irq_stack)(k1) 209 LONG_L t0, %lo(irq_stack)(k1) 209 210 210 # Check if already on IRQ stack 211 # Check if already on IRQ stack 211 PTR_LI t1, ~(_THREAD_SIZE-1) 212 PTR_LI t1, ~(_THREAD_SIZE-1) 212 and t1, t1, sp 213 and t1, t1, sp 213 beq t0, t1, 2f 214 beq t0, t1, 2f 214 215 215 /* Switch to IRQ stack */ 216 /* Switch to IRQ stack */ 216 li t1, _IRQ_STACK_START 217 li t1, _IRQ_STACK_START 217 PTR_ADD sp, t0, t1 218 PTR_ADD sp, t0, t1 218 219 219 /* Save task's sp on IRQ stack so that 220 /* Save task's sp on IRQ stack so that unwinding can follow it */ 220 LONG_S s1, 0(sp) 221 LONG_S s1, 0(sp) 221 2: 222 2: 222 jal plat_irq_dispatch 223 jal plat_irq_dispatch 223 224 224 /* Restore sp */ 225 /* Restore sp */ 225 move sp, s1 226 move sp, s1 226 227 227 j ret_from_irq 228 j ret_from_irq 228 #ifdef CONFIG_CPU_MICROMIPS 229 #ifdef CONFIG_CPU_MICROMIPS 229 nop 230 nop 230 #endif 231 #endif 231 END(handle_int) 232 END(handle_int) 232 233 233 __INIT 234 __INIT 234 235 235 /* 236 /* 236 * Special interrupt vector for MIPS64 ISA & e 237 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 237 * This is a dedicated interrupt exception vec 238 * This is a dedicated interrupt exception vector which reduces the 238 * interrupt processing overhead. The jump in 239 * interrupt processing overhead. The jump instruction will be replaced 239 * at the initialization time. 240 * at the initialization time. 240 * 241 * 241 * Be careful when changing this, it has to be 242 * Be careful when changing this, it has to be at most 128 bytes 242 * to fit into space reserved for the exceptio 243 * to fit into space reserved for the exception handler. 243 */ 244 */ 244 NESTED(except_vec4, 0, sp) 245 NESTED(except_vec4, 0, sp) 245 1: j 1b /* Dum 246 1: j 1b /* Dummy, will be replaced */ 246 END(except_vec4) 247 END(except_vec4) 247 248 248 /* 249 /* 249 * EJTAG debug exception handler. 250 * EJTAG debug exception handler. 250 * The EJTAG debug exception entry point is 0x 251 * The EJTAG debug exception entry point is 0xbfc00480, which 251 * normally is in the boot PROM, so the boot P 252 * normally is in the boot PROM, so the boot PROM must do an 252 * unconditional jump to this vector. 253 * unconditional jump to this vector. 253 */ 254 */ 254 NESTED(except_vec_ejtag_debug, 0, sp) 255 NESTED(except_vec_ejtag_debug, 0, sp) 255 j ejtag_debug_handler 256 j ejtag_debug_handler 256 #ifdef CONFIG_CPU_MICROMIPS 257 #ifdef CONFIG_CPU_MICROMIPS 257 nop 258 nop 258 #endif 259 #endif 259 END(except_vec_ejtag_debug) 260 END(except_vec_ejtag_debug) 260 261 261 __FINIT 262 __FINIT 262 263 263 /* 264 /* 264 * Vectored interrupt handler. 265 * Vectored interrupt handler. 265 * This prototype is copied to ebase + n*IntCt 266 * This prototype is copied to ebase + n*IntCtl.VS and patched 266 * to invoke the handler 267 * to invoke the handler 267 */ 268 */ 268 BUILD_ROLLBACK_PROLOGUE except_vec_vi 269 BUILD_ROLLBACK_PROLOGUE except_vec_vi 269 NESTED(except_vec_vi, 0, sp) 270 NESTED(except_vec_vi, 0, sp) 270 SAVE_SOME docfi=1 271 SAVE_SOME docfi=1 271 SAVE_AT docfi=1 272 SAVE_AT docfi=1 272 .set push 273 .set push 273 .set noreorder 274 .set noreorder 274 PTR_LA v1, except_vec_vi_handler 275 PTR_LA v1, except_vec_vi_handler >> 276 FEXPORT(except_vec_vi_lui) >> 277 lui v0, 0 /* Patched */ 275 jr v1 278 jr v1 276 FEXPORT(except_vec_vi_ori) 279 FEXPORT(except_vec_vi_ori) 277 ori v0, zero, 0 /* Off !! 280 ori v0, 0 /* Patched */ 278 .set pop 281 .set pop 279 END(except_vec_vi) 282 END(except_vec_vi) 280 EXPORT(except_vec_vi_end) 283 EXPORT(except_vec_vi_end) 281 284 282 /* 285 /* 283 * Common Vectored Interrupt code 286 * Common Vectored Interrupt code 284 * Complete the register saves and invoke the !! 287 * Complete the register saves and invoke the handler which is passed in $v0 285 * offset into vi_handlers[] << 286 */ 288 */ 287 NESTED(except_vec_vi_handler, 0, sp) 289 NESTED(except_vec_vi_handler, 0, sp) 288 SAVE_TEMP 290 SAVE_TEMP 289 SAVE_STATIC 291 SAVE_STATIC 290 CLI 292 CLI 291 #ifdef CONFIG_TRACE_IRQFLAGS 293 #ifdef CONFIG_TRACE_IRQFLAGS 292 move s0, v0 294 move s0, v0 293 TRACE_IRQS_OFF 295 TRACE_IRQS_OFF 294 move v0, s0 296 move v0, s0 295 #endif 297 #endif 296 298 297 LONG_L s0, TI_REGS($28) 299 LONG_L s0, TI_REGS($28) 298 LONG_S sp, TI_REGS($28) 300 LONG_S sp, TI_REGS($28) 299 301 300 /* 302 /* 301 * SAVE_ALL ensures we are using a val 303 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 302 * Check if we are already using the I 304 * Check if we are already using the IRQ stack. 303 */ 305 */ 304 move s1, sp # Preserve the sp 306 move s1, sp # Preserve the sp 305 307 306 /* Get IRQ stack for this CPU */ 308 /* Get IRQ stack for this CPU */ 307 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 309 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 308 #if defined(CONFIG_32BIT) || defined(KBUILD_64 310 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 309 lui k1, %hi(irq_stack) 311 lui k1, %hi(irq_stack) 310 #else 312 #else 311 lui k1, %highest(irq_stack) 313 lui k1, %highest(irq_stack) 312 daddiu k1, %higher(irq_stack) 314 daddiu k1, %higher(irq_stack) 313 dsll k1, 16 315 dsll k1, 16 314 daddiu k1, %hi(irq_stack) 316 daddiu k1, %hi(irq_stack) 315 dsll k1, 16 317 dsll k1, 16 316 #endif 318 #endif 317 LONG_SRL k0, SMP_CPUID_PTRSHIFT 319 LONG_SRL k0, SMP_CPUID_PTRSHIFT 318 LONG_ADDU k1, k0 320 LONG_ADDU k1, k0 319 LONG_L t0, %lo(irq_stack)(k1) 321 LONG_L t0, %lo(irq_stack)(k1) 320 322 321 # Check if already on IRQ stack 323 # Check if already on IRQ stack 322 PTR_LI t1, ~(_THREAD_SIZE-1) 324 PTR_LI t1, ~(_THREAD_SIZE-1) 323 and t1, t1, sp 325 and t1, t1, sp 324 beq t0, t1, 2f 326 beq t0, t1, 2f 325 327 326 /* Switch to IRQ stack */ 328 /* Switch to IRQ stack */ 327 li t1, _IRQ_STACK_START 329 li t1, _IRQ_STACK_START 328 PTR_ADD sp, t0, t1 330 PTR_ADD sp, t0, t1 329 331 330 /* Save task's sp on IRQ stack so that 332 /* Save task's sp on IRQ stack so that unwinding can follow it */ 331 LONG_S s1, 0(sp) 333 LONG_S s1, 0(sp) 332 2: 334 2: 333 PTR_L v0, vi_handlers(v0) << 334 jalr v0 335 jalr v0 335 336 336 /* Restore sp */ 337 /* Restore sp */ 337 move sp, s1 338 move sp, s1 338 339 339 j ret_from_irq 340 j ret_from_irq 340 END(except_vec_vi_handler) 341 END(except_vec_vi_handler) 341 342 342 /* 343 /* 343 * EJTAG debug exception handler. 344 * EJTAG debug exception handler. 344 */ 345 */ 345 NESTED(ejtag_debug_handler, PT_SIZE, sp) 346 NESTED(ejtag_debug_handler, PT_SIZE, sp) 346 .set push 347 .set push 347 .set noat 348 .set noat 348 MTC0 k0, CP0_DESAVE 349 MTC0 k0, CP0_DESAVE 349 mfc0 k0, CP0_DEBUG 350 mfc0 k0, CP0_DEBUG 350 351 351 andi k0, k0, MIPS_DEBUG_DBP # Chec !! 352 sll k0, k0, 30 # Check for SDBBP. 352 beqz k0, ejtag_return !! 353 bgez k0, ejtag_return 353 354 354 #ifdef CONFIG_SMP 355 #ifdef CONFIG_SMP 355 1: PTR_LA k0, ejtag_debug_buffer_spinloc 356 1: PTR_LA k0, ejtag_debug_buffer_spinlock 356 __SYNC(full, loongson3_war) 357 __SYNC(full, loongson3_war) 357 2: ll k0, 0(k0) 358 2: ll k0, 0(k0) 358 bnez k0, 2b 359 bnez k0, 2b 359 PTR_LA k0, ejtag_debug_buffer_spinloc 360 PTR_LA k0, ejtag_debug_buffer_spinlock 360 sc k0, 0(k0) 361 sc k0, 0(k0) 361 beqz k0, 1b 362 beqz k0, 1b 362 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC 363 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC 363 sync 364 sync 364 # endif 365 # endif 365 366 366 PTR_LA k0, ejtag_debug_buffer 367 PTR_LA k0, ejtag_debug_buffer 367 LONG_S k1, 0(k0) 368 LONG_S k1, 0(k0) 368 369 369 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 370 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 370 PTR_SRL k1, SMP_CPUID_PTRSHIFT 371 PTR_SRL k1, SMP_CPUID_PTRSHIFT 371 PTR_SLL k1, LONGLOG 372 PTR_SLL k1, LONGLOG 372 PTR_LA k0, ejtag_debug_buffer_per_cpu 373 PTR_LA k0, ejtag_debug_buffer_per_cpu 373 PTR_ADDU k0, k1 374 PTR_ADDU k0, k1 374 375 375 PTR_LA k1, ejtag_debug_buffer 376 PTR_LA k1, ejtag_debug_buffer 376 LONG_L k1, 0(k1) 377 LONG_L k1, 0(k1) 377 LONG_S k1, 0(k0) 378 LONG_S k1, 0(k0) 378 379 379 PTR_LA k0, ejtag_debug_buffer_spinloc 380 PTR_LA k0, ejtag_debug_buffer_spinlock 380 sw zero, 0(k0) 381 sw zero, 0(k0) 381 #else 382 #else 382 PTR_LA k0, ejtag_debug_buffer 383 PTR_LA k0, ejtag_debug_buffer 383 LONG_S k1, 0(k0) 384 LONG_S k1, 0(k0) 384 #endif 385 #endif 385 386 386 SAVE_ALL 387 SAVE_ALL 387 move a0, sp 388 move a0, sp 388 jal ejtag_exception_handler 389 jal ejtag_exception_handler 389 RESTORE_ALL 390 RESTORE_ALL 390 391 391 #ifdef CONFIG_SMP 392 #ifdef CONFIG_SMP 392 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 393 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 393 PTR_SRL k1, SMP_CPUID_PTRSHIFT 394 PTR_SRL k1, SMP_CPUID_PTRSHIFT 394 PTR_SLL k1, LONGLOG 395 PTR_SLL k1, LONGLOG 395 PTR_LA k0, ejtag_debug_buffer_per_cpu 396 PTR_LA k0, ejtag_debug_buffer_per_cpu 396 PTR_ADDU k0, k1 397 PTR_ADDU k0, k1 397 LONG_L k1, 0(k0) 398 LONG_L k1, 0(k0) 398 #else 399 #else 399 PTR_LA k0, ejtag_debug_buffer 400 PTR_LA k0, ejtag_debug_buffer 400 LONG_L k1, 0(k0) 401 LONG_L k1, 0(k0) 401 #endif 402 #endif 402 403 403 ejtag_return: 404 ejtag_return: 404 back_to_back_c0_hazard 405 back_to_back_c0_hazard 405 MFC0 k0, CP0_DESAVE 406 MFC0 k0, CP0_DESAVE 406 .set mips32 407 .set mips32 407 deret 408 deret 408 .set pop 409 .set pop 409 END(ejtag_debug_handler) 410 END(ejtag_debug_handler) 410 411 411 /* 412 /* 412 * This buffer is reserved for the use of the 413 * This buffer is reserved for the use of the EJTAG debug 413 * handler. 414 * handler. 414 */ 415 */ 415 .data 416 .data 416 EXPORT(ejtag_debug_buffer) 417 EXPORT(ejtag_debug_buffer) 417 .fill LONGSIZE 418 .fill LONGSIZE 418 #ifdef CONFIG_SMP 419 #ifdef CONFIG_SMP 419 EXPORT(ejtag_debug_buffer_spinlock) 420 EXPORT(ejtag_debug_buffer_spinlock) 420 .fill LONGSIZE 421 .fill LONGSIZE 421 EXPORT(ejtag_debug_buffer_per_cpu) 422 EXPORT(ejtag_debug_buffer_per_cpu) 422 .fill LONGSIZE * NR_CPUS 423 .fill LONGSIZE * NR_CPUS 423 #endif 424 #endif 424 .previous 425 .previous 425 426 426 __INIT 427 __INIT 427 428 428 /* 429 /* 429 * NMI debug exception handler for MIPS refere 430 * NMI debug exception handler for MIPS reference boards. 430 * The NMI debug exception entry point is 0xbf 431 * The NMI debug exception entry point is 0xbfc00000, which 431 * normally is in the boot PROM, so the boot P 432 * normally is in the boot PROM, so the boot PROM must do a 432 * unconditional jump to this vector. 433 * unconditional jump to this vector. 433 */ 434 */ 434 NESTED(except_vec_nmi, 0, sp) 435 NESTED(except_vec_nmi, 0, sp) 435 j nmi_handler 436 j nmi_handler 436 #ifdef CONFIG_CPU_MICROMIPS 437 #ifdef CONFIG_CPU_MICROMIPS 437 nop 438 nop 438 #endif 439 #endif 439 END(except_vec_nmi) 440 END(except_vec_nmi) 440 441 441 __FINIT 442 __FINIT 442 443 443 NESTED(nmi_handler, PT_SIZE, sp) 444 NESTED(nmi_handler, PT_SIZE, sp) 444 .cfi_signal_frame 445 .cfi_signal_frame 445 .set push 446 .set push 446 .set noat 447 .set noat 447 /* 448 /* 448 * Clear ERL - restore segment mapping 449 * Clear ERL - restore segment mapping 449 * Clear BEV - required for page fault 450 * Clear BEV - required for page fault exception handler to work 450 */ 451 */ 451 mfc0 k0, CP0_STATUS 452 mfc0 k0, CP0_STATUS 452 ori k0, k0, ST0_EXL 453 ori k0, k0, ST0_EXL 453 li k1, ~(ST0_BEV | ST0_ERL) 454 li k1, ~(ST0_BEV | ST0_ERL) 454 and k0, k0, k1 455 and k0, k0, k1 455 mtc0 k0, CP0_STATUS 456 mtc0 k0, CP0_STATUS 456 _ehb 457 _ehb 457 SAVE_ALL 458 SAVE_ALL 458 move a0, sp 459 move a0, sp 459 jal nmi_exception_handler 460 jal nmi_exception_handler 460 /* nmi_exception_handler never returns 461 /* nmi_exception_handler never returns */ 461 .set pop 462 .set pop 462 END(nmi_handler) 463 END(nmi_handler) 463 464 464 .macro __build_clear_none 465 .macro __build_clear_none 465 .endm 466 .endm 466 467 467 .macro __build_clear_sti 468 .macro __build_clear_sti 468 TRACE_IRQS_ON 469 TRACE_IRQS_ON 469 STI 470 STI 470 .endm 471 .endm 471 472 472 .macro __build_clear_cli 473 .macro __build_clear_cli 473 CLI 474 CLI 474 TRACE_IRQS_OFF 475 TRACE_IRQS_OFF 475 .endm 476 .endm 476 477 477 .macro __build_clear_fpe 478 .macro __build_clear_fpe 478 CLI 479 CLI 479 TRACE_IRQS_OFF 480 TRACE_IRQS_OFF 480 .set push 481 .set push 481 /* gas fails to assemble cfc1 for some 482 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 482 .set mips1 483 .set mips1 483 .set hardfloat !! 484 SET_HARDFLOAT 484 cfc1 a1, fcr31 485 cfc1 a1, fcr31 485 .set pop 486 .set pop 486 .endm 487 .endm 487 488 488 .macro __build_clear_msa_fpe 489 .macro __build_clear_msa_fpe 489 CLI 490 CLI 490 TRACE_IRQS_OFF 491 TRACE_IRQS_OFF 491 _cfcmsa a1, MSA_CSR 492 _cfcmsa a1, MSA_CSR 492 .endm 493 .endm 493 494 494 .macro __build_clear_ade 495 .macro __build_clear_ade 495 MFC0 t0, CP0_BADVADDR 496 MFC0 t0, CP0_BADVADDR 496 PTR_S t0, PT_BVADDR(sp) 497 PTR_S t0, PT_BVADDR(sp) 497 KMODE 498 KMODE 498 .endm 499 .endm 499 500 500 .macro __build_clear_gsexc << 501 .set push << 502 /* << 503 * We need to specify a selector to ac << 504 * register. All GSExc-equipped proces << 505 */ << 506 .set mips32 << 507 mfc0 a1, CP0_DIAGNOSTIC1 << 508 .set pop << 509 TRACE_IRQS_ON << 510 STI << 511 .endm << 512 << 513 .macro __BUILD_silent exception 501 .macro __BUILD_silent exception 514 .endm 502 .endm 515 503 516 /* Gas tries to parse the ASM_PRINT ar 504 /* Gas tries to parse the ASM_PRINT argument as a string containing 517 string escapes and emits bogus warn 505 string escapes and emits bogus warnings if it believes to 518 recognize an unknown escape code. 506 recognize an unknown escape code. So make the arguments 519 start with an n and gas will believ 507 start with an n and gas will believe \n is ok ... */ 520 .macro __BUILD_verbose nexception 508 .macro __BUILD_verbose nexception 521 LONG_L a1, PT_EPC(sp) 509 LONG_L a1, PT_EPC(sp) 522 #ifdef CONFIG_32BIT 510 #ifdef CONFIG_32BIT 523 ASM_PRINT("Got \nexception at %08lx\01 511 ASM_PRINT("Got \nexception at %08lx\012") 524 #endif 512 #endif 525 #ifdef CONFIG_64BIT 513 #ifdef CONFIG_64BIT 526 ASM_PRINT("Got \nexception at %016lx\0 514 ASM_PRINT("Got \nexception at %016lx\012") 527 #endif 515 #endif 528 .endm 516 .endm 529 517 530 .macro __BUILD_count exception 518 .macro __BUILD_count exception 531 LONG_L t0,exception_count_\exception 519 LONG_L t0,exception_count_\exception 532 LONG_ADDIU t0, 1 520 LONG_ADDIU t0, 1 533 LONG_S t0,exception_count_\exception 521 LONG_S t0,exception_count_\exception 534 .comm exception_count\exception, 8, 522 .comm exception_count\exception, 8, 8 535 .endm 523 .endm 536 524 537 .macro __BUILD_HANDLER exception hand 525 .macro __BUILD_HANDLER exception handler clear verbose ext 538 .align 5 526 .align 5 539 NESTED(handle_\exception, PT_SIZE, sp) 527 NESTED(handle_\exception, PT_SIZE, sp) 540 .cfi_signal_frame 528 .cfi_signal_frame 541 .set noat 529 .set noat 542 SAVE_ALL 530 SAVE_ALL 543 FEXPORT(handle_\exception\ext) 531 FEXPORT(handle_\exception\ext) 544 __build_clear_\clear 532 __build_clear_\clear 545 .set at 533 .set at 546 __BUILD_\verbose \exception 534 __BUILD_\verbose \exception 547 move a0, sp 535 move a0, sp 548 jal do_\handler 536 jal do_\handler 549 j ret_from_exception 537 j ret_from_exception 550 END(handle_\exception) 538 END(handle_\exception) 551 .endm 539 .endm 552 540 553 .macro BUILD_HANDLER exception handle 541 .macro BUILD_HANDLER exception handler clear verbose 554 __BUILD_HANDLER \exception \handler \c 542 __BUILD_HANDLER \exception \handler \clear \verbose _int 555 .endm 543 .endm 556 544 557 BUILD_HANDLER adel ade ade silent 545 BUILD_HANDLER adel ade ade silent /* #4 */ 558 BUILD_HANDLER ades ade ade silent 546 BUILD_HANDLER ades ade ade silent /* #5 */ 559 BUILD_HANDLER ibe be cli silent 547 BUILD_HANDLER ibe be cli silent /* #6 */ 560 BUILD_HANDLER dbe be cli silent 548 BUILD_HANDLER dbe be cli silent /* #7 */ 561 BUILD_HANDLER bp bp sti silent 549 BUILD_HANDLER bp bp sti silent /* #9 */ 562 BUILD_HANDLER ri ri sti silent 550 BUILD_HANDLER ri ri sti silent /* #10 */ 563 BUILD_HANDLER cpu cpu sti silent 551 BUILD_HANDLER cpu cpu sti silent /* #11 */ 564 BUILD_HANDLER ov ov sti silent 552 BUILD_HANDLER ov ov sti silent /* #12 */ 565 BUILD_HANDLER tr tr sti silent 553 BUILD_HANDLER tr tr sti silent /* #13 */ 566 BUILD_HANDLER msa_fpe msa_fpe msa_fpe 554 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 567 #ifdef CONFIG_MIPS_FP_SUPPORT 555 #ifdef CONFIG_MIPS_FP_SUPPORT 568 BUILD_HANDLER fpe fpe fpe silent 556 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 569 #endif 557 #endif 570 BUILD_HANDLER ftlb ftlb none silent 558 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 571 BUILD_HANDLER gsexc gsexc gsexc silent << 572 BUILD_HANDLER msa msa sti silent 559 BUILD_HANDLER msa msa sti silent /* #21 */ 573 BUILD_HANDLER mdmx mdmx sti silent 560 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 574 #ifdef CONFIG_HARDWARE_WATCHPOINTS 561 #ifdef CONFIG_HARDWARE_WATCHPOINTS 575 /* 562 /* 576 * For watch, interrupts will be enabl 563 * For watch, interrupts will be enabled after the watch 577 * registers are read. 564 * registers are read. 578 */ 565 */ 579 BUILD_HANDLER watch watch cli silent 566 BUILD_HANDLER watch watch cli silent /* #23 */ 580 #else 567 #else 581 BUILD_HANDLER watch watch sti verbose 568 BUILD_HANDLER watch watch sti verbose /* #23 */ 582 #endif 569 #endif 583 BUILD_HANDLER mcheck mcheck cli verbos 570 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 584 BUILD_HANDLER mt mt sti silent 571 BUILD_HANDLER mt mt sti silent /* #25 */ 585 BUILD_HANDLER dsp dsp sti silent 572 BUILD_HANDLER dsp dsp sti silent /* #26 */ 586 BUILD_HANDLER reserved reserved sti ve 573 BUILD_HANDLER reserved reserved sti verbose /* others */ 587 574 588 .align 5 575 .align 5 589 LEAF(handle_ri_rdhwr_tlbp) 576 LEAF(handle_ri_rdhwr_tlbp) 590 .set push 577 .set push 591 .set noat 578 .set noat 592 .set noreorder 579 .set noreorder 593 /* check if TLB contains a entry for E 580 /* check if TLB contains a entry for EPC */ 594 MFC0 k1, CP0_ENTRYHI 581 MFC0 k1, CP0_ENTRYHI 595 andi k1, MIPS_ENTRYHI_ASID | MIPS_E 582 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX 596 MFC0 k0, CP0_EPC 583 MFC0 k0, CP0_EPC 597 PTR_SRL k0, _PAGE_SHIFT + 1 584 PTR_SRL k0, _PAGE_SHIFT + 1 598 PTR_SLL k0, _PAGE_SHIFT + 1 585 PTR_SLL k0, _PAGE_SHIFT + 1 599 or k1, k0 586 or k1, k0 600 MTC0 k1, CP0_ENTRYHI 587 MTC0 k1, CP0_ENTRYHI 601 mtc0_tlbw_hazard 588 mtc0_tlbw_hazard 602 tlbp 589 tlbp 603 tlb_probe_hazard 590 tlb_probe_hazard 604 mfc0 k1, CP0_INDEX 591 mfc0 k1, CP0_INDEX 605 .set pop 592 .set pop 606 bltz k1, handle_ri /* slow path * 593 bltz k1, handle_ri /* slow path */ 607 /* fall thru */ 594 /* fall thru */ 608 END(handle_ri_rdhwr_tlbp) 595 END(handle_ri_rdhwr_tlbp) 609 596 610 LEAF(handle_ri_rdhwr) 597 LEAF(handle_ri_rdhwr) 611 .set push 598 .set push 612 .set noat 599 .set noat 613 .set noreorder 600 .set noreorder 614 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 601 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 615 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 602 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 616 MFC0 k1, CP0_EPC 603 MFC0 k1, CP0_EPC 617 #if defined(CONFIG_CPU_MICROMIPS) || defined(C 604 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 618 and k0, k1, 1 605 and k0, k1, 1 619 beqz k0, 1f 606 beqz k0, 1f 620 xor k1, k0 607 xor k1, k0 621 lhu k0, (k1) 608 lhu k0, (k1) 622 lhu k1, 2(k1) 609 lhu k1, 2(k1) 623 ins k1, k0, 16, 16 610 ins k1, k0, 16, 16 624 lui k0, 0x007d 611 lui k0, 0x007d 625 b docheck 612 b docheck 626 ori k0, 0x6b3c 613 ori k0, 0x6b3c 627 1: 614 1: 628 lui k0, 0x7c03 615 lui k0, 0x7c03 629 lw k1, (k1) 616 lw k1, (k1) 630 ori k0, 0xe83b 617 ori k0, 0xe83b 631 #else 618 #else 632 andi k0, k1, 1 619 andi k0, k1, 1 633 bnez k0, handle_ri 620 bnez k0, handle_ri 634 lui k0, 0x7c03 621 lui k0, 0x7c03 635 lw k1, (k1) 622 lw k1, (k1) 636 ori k0, 0xe83b 623 ori k0, 0xe83b 637 #endif 624 #endif 638 .set reorder 625 .set reorder 639 docheck: 626 docheck: 640 bne k0, k1, handle_ri /* if 627 bne k0, k1, handle_ri /* if not ours */ 641 628 642 isrdhwr: 629 isrdhwr: 643 /* The insn is rdhwr. No need to chec 630 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 644 get_saved_sp /* k1 := current_threa 631 get_saved_sp /* k1 := current_thread_info */ 645 .set noreorder 632 .set noreorder 646 MFC0 k0, CP0_EPC 633 MFC0 k0, CP0_EPC 647 #if defined(CONFIG_CPU_R3000) !! 634 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 648 ori k1, _THREAD_MASK 635 ori k1, _THREAD_MASK 649 xori k1, _THREAD_MASK 636 xori k1, _THREAD_MASK 650 LONG_L v1, TI_TP_VALUE(k1) 637 LONG_L v1, TI_TP_VALUE(k1) 651 LONG_ADDIU k0, 4 638 LONG_ADDIU k0, 4 652 jr k0 639 jr k0 653 rfe 640 rfe 654 #else 641 #else 655 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 642 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 656 LONG_ADDIU k0, 4 /* sta 643 LONG_ADDIU k0, 4 /* stall on $k0 */ 657 #else 644 #else 658 .set at=v1 645 .set at=v1 659 LONG_ADDIU k0, 4 646 LONG_ADDIU k0, 4 660 .set noat 647 .set noat 661 #endif 648 #endif 662 MTC0 k0, CP0_EPC 649 MTC0 k0, CP0_EPC 663 /* I hope three instructions between M 650 /* I hope three instructions between MTC0 and ERET are enough... */ 664 ori k1, _THREAD_MASK 651 ori k1, _THREAD_MASK 665 xori k1, _THREAD_MASK 652 xori k1, _THREAD_MASK 666 LONG_L v1, TI_TP_VALUE(k1) 653 LONG_L v1, TI_TP_VALUE(k1) 667 .set push 654 .set push 668 .set arch=r4000 655 .set arch=r4000 669 eret 656 eret 670 .set pop 657 .set pop 671 #endif 658 #endif 672 .set pop 659 .set pop 673 END(handle_ri_rdhwr) 660 END(handle_ri_rdhwr) 674 661 675 #ifdef CONFIG_CPU_R4X00_BUGS64 662 #ifdef CONFIG_CPU_R4X00_BUGS64 676 /* A temporary overflow handler used by check_ 663 /* A temporary overflow handler used by check_daddi(). */ 677 664 678 __INIT 665 __INIT 679 666 680 BUILD_HANDLER daddi_ov daddi_ov none 667 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 681 #endif 668 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.