1 /* 1 /* 2 * This file is subject to the terms and condi 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 4 * for more details. 5 * 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 10 */ 11 #include <linux/init.h> 11 #include <linux/init.h> 12 12 13 #include <asm/asm.h> 13 #include <asm/asm.h> 14 #include <asm/asmmacro.h> 14 #include <asm/asmmacro.h> 15 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 16 #include <asm/irqflags.h> 16 #include <asm/irqflags.h> 17 #include <asm/regdef.h> 17 #include <asm/regdef.h> 18 #include <asm/fpregdef.h> 18 #include <asm/fpregdef.h> 19 #include <asm/mipsregs.h> 19 #include <asm/mipsregs.h> 20 #include <asm/stackframe.h> 20 #include <asm/stackframe.h> 21 #include <asm/sync.h> 21 #include <asm/sync.h> 22 #include <asm/thread_info.h> 22 #include <asm/thread_info.h> 23 23 24 __INIT 24 __INIT 25 25 26 /* 26 /* 27 * General exception vector for all other CPUs 27 * General exception vector for all other CPUs. 28 * 28 * 29 * Be careful when changing this, it has to be 29 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exceptio 30 * to fit into space reserved for the exception handler. 31 */ 31 */ 32 NESTED(except_vec3_generic, 0, sp) 32 NESTED(except_vec3_generic, 0, sp) 33 .set push 33 .set push 34 .set noat 34 .set noat 35 mfc0 k1, CP0_CAUSE 35 mfc0 k1, CP0_CAUSE 36 andi k1, k1, 0x7c 36 andi k1, k1, 0x7c 37 #ifdef CONFIG_64BIT 37 #ifdef CONFIG_64BIT 38 dsll k1, k1, 1 38 dsll k1, k1, 1 39 #endif 39 #endif 40 PTR_L k0, exception_handlers(k1) 40 PTR_L k0, exception_handlers(k1) 41 jr k0 41 jr k0 42 .set pop 42 .set pop 43 END(except_vec3_generic) 43 END(except_vec3_generic) 44 44 45 /* 45 /* 46 * General exception handler for CPUs with vir 46 * General exception handler for CPUs with virtual coherency exception. 47 * 47 * 48 * Be careful when changing this, it has to be 48 * Be careful when changing this, it has to be at most 256 (as a special 49 * exception) bytes to fit into space reserved 49 * exception) bytes to fit into space reserved for the exception handler. 50 */ 50 */ 51 NESTED(except_vec3_r4000, 0, sp) 51 NESTED(except_vec3_r4000, 0, sp) 52 .set push 52 .set push 53 .set arch=r4000 53 .set arch=r4000 54 .set noat 54 .set noat 55 mfc0 k1, CP0_CAUSE 55 mfc0 k1, CP0_CAUSE 56 li k0, 31<<2 56 li k0, 31<<2 57 andi k1, k1, 0x7c 57 andi k1, k1, 0x7c 58 .set push 58 .set push 59 .set noreorder 59 .set noreorder 60 .set nomacro 60 .set nomacro 61 beq k1, k0, handle_vced 61 beq k1, k0, handle_vced 62 li k0, 14<<2 62 li k0, 14<<2 63 beq k1, k0, handle_vcei 63 beq k1, k0, handle_vcei 64 #ifdef CONFIG_64BIT 64 #ifdef CONFIG_64BIT 65 dsll k1, k1, 1 65 dsll k1, k1, 1 66 #endif 66 #endif 67 .set pop 67 .set pop 68 PTR_L k0, exception_handlers(k1) 68 PTR_L k0, exception_handlers(k1) 69 jr k0 69 jr k0 70 70 71 /* 71 /* 72 * Big shit, we now may have two dirty 72 * Big shit, we now may have two dirty primary cache lines for the same 73 * physical address. We can safely in 73 * physical address. We can safely invalidate the line pointed to by 74 * c0_badvaddr because after return fr 74 * c0_badvaddr because after return from this exception handler the 75 * load / store will be re-executed. 75 * load / store will be re-executed. 76 */ 76 */ 77 handle_vced: 77 handle_vced: 78 MFC0 k0, CP0_BADVADDR 78 MFC0 k0, CP0_BADVADDR 79 li k1, -4 79 li k1, -4 # Is this ... 80 and k0, k1 80 and k0, k1 # ... really needed? 81 mtc0 zero, CP0_TAGLO 81 mtc0 zero, CP0_TAGLO 82 cache Index_Store_Tag_D, (k0) 82 cache Index_Store_Tag_D, (k0) 83 cache Hit_Writeback_Inv_SD, (k0) 83 cache Hit_Writeback_Inv_SD, (k0) 84 #ifdef CONFIG_PROC_FS 84 #ifdef CONFIG_PROC_FS 85 PTR_LA k0, vced_count 85 PTR_LA k0, vced_count 86 lw k1, (k0) 86 lw k1, (k0) 87 addiu k1, 1 87 addiu k1, 1 88 sw k1, (k0) 88 sw k1, (k0) 89 #endif 89 #endif 90 eret 90 eret 91 91 92 handle_vcei: 92 handle_vcei: 93 MFC0 k0, CP0_BADVADDR 93 MFC0 k0, CP0_BADVADDR 94 cache Hit_Writeback_Inv_SD, (k0) 94 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 95 #ifdef CONFIG_PROC_FS 95 #ifdef CONFIG_PROC_FS 96 PTR_LA k0, vcei_count 96 PTR_LA k0, vcei_count 97 lw k1, (k0) 97 lw k1, (k0) 98 addiu k1, 1 98 addiu k1, 1 99 sw k1, (k0) 99 sw k1, (k0) 100 #endif 100 #endif 101 eret 101 eret 102 .set pop 102 .set pop 103 END(except_vec3_r4000) 103 END(except_vec3_r4000) 104 104 105 __FINIT 105 __FINIT 106 106 107 .align 5 /* 32 byte rollback re 107 .align 5 /* 32 byte rollback region */ 108 LEAF(__r4k_wait) 108 LEAF(__r4k_wait) 109 .set push 109 .set push 110 .set noreorder 110 .set noreorder 111 /* start of rollback region */ 111 /* start of rollback region */ 112 LONG_L t0, TI_FLAGS($28) 112 LONG_L t0, TI_FLAGS($28) 113 nop 113 nop 114 andi t0, _TIF_NEED_RESCHED 114 andi t0, _TIF_NEED_RESCHED 115 bnez t0, 1f 115 bnez t0, 1f 116 nop 116 nop 117 nop 117 nop 118 nop 118 nop 119 #ifdef CONFIG_CPU_MICROMIPS 119 #ifdef CONFIG_CPU_MICROMIPS 120 nop 120 nop 121 nop 121 nop 122 nop 122 nop 123 nop 123 nop 124 #endif 124 #endif 125 .set MIPS_ISA_ARCH_LEVEL_RAW 125 .set MIPS_ISA_ARCH_LEVEL_RAW 126 wait 126 wait 127 /* end of rollback region (the region 127 /* end of rollback region (the region size must be power of two) */ 128 1: 128 1: 129 jr ra 129 jr ra 130 nop 130 nop 131 .set pop 131 .set pop 132 END(__r4k_wait) 132 END(__r4k_wait) 133 133 134 .macro BUILD_ROLLBACK_PROLOGUE handle 134 .macro BUILD_ROLLBACK_PROLOGUE handler 135 FEXPORT(rollback_\handler) 135 FEXPORT(rollback_\handler) 136 .set push 136 .set push 137 .set noat 137 .set noat 138 MFC0 k0, CP0_EPC 138 MFC0 k0, CP0_EPC 139 PTR_LA k1, __r4k_wait 139 PTR_LA k1, __r4k_wait 140 ori k0, 0x1f /* 32 byte rol 140 ori k0, 0x1f /* 32 byte rollback region */ 141 xori k0, 0x1f 141 xori k0, 0x1f 142 bne k0, k1, \handler 142 bne k0, k1, \handler 143 MTC0 k0, CP0_EPC 143 MTC0 k0, CP0_EPC 144 .set pop 144 .set pop 145 .endm 145 .endm 146 146 147 .align 5 147 .align 5 148 BUILD_ROLLBACK_PROLOGUE handle_int 148 BUILD_ROLLBACK_PROLOGUE handle_int 149 NESTED(handle_int, PT_SIZE, sp) 149 NESTED(handle_int, PT_SIZE, sp) 150 .cfi_signal_frame 150 .cfi_signal_frame 151 #ifdef CONFIG_TRACE_IRQFLAGS 151 #ifdef CONFIG_TRACE_IRQFLAGS 152 /* 152 /* 153 * Check to see if the interrupted cod 153 * Check to see if the interrupted code has just disabled 154 * interrupts and ignore this interrup 154 * interrupts and ignore this interrupt for now if so. 155 * 155 * 156 * local_irq_disable() disables interr 156 * local_irq_disable() disables interrupts and then calls 157 * trace_hardirqs_off() to track the s 157 * trace_hardirqs_off() to track the state. If an interrupt is taken 158 * after interrupts are disabled but b 158 * after interrupts are disabled but before the state is updated 159 * it will appear to restore_all that 159 * it will appear to restore_all that it is incorrectly returning with 160 * interrupts disabled 160 * interrupts disabled 161 */ 161 */ 162 .set push 162 .set push 163 .set noat 163 .set noat 164 mfc0 k0, CP0_STATUS 164 mfc0 k0, CP0_STATUS 165 #if defined(CONFIG_CPU_R3000) 165 #if defined(CONFIG_CPU_R3000) 166 and k0, ST0_IEP 166 and k0, ST0_IEP 167 bnez k0, 1f 167 bnez k0, 1f 168 168 169 mfc0 k0, CP0_EPC 169 mfc0 k0, CP0_EPC 170 .set noreorder 170 .set noreorder 171 j k0 171 j k0 172 rfe 172 rfe 173 #else 173 #else 174 and k0, ST0_IE 174 and k0, ST0_IE 175 bnez k0, 1f 175 bnez k0, 1f 176 176 177 eret 177 eret 178 #endif 178 #endif 179 1: 179 1: 180 .set pop 180 .set pop 181 #endif 181 #endif 182 SAVE_ALL docfi=1 182 SAVE_ALL docfi=1 183 CLI 183 CLI 184 TRACE_IRQS_OFF 184 TRACE_IRQS_OFF 185 185 186 LONG_L s0, TI_REGS($28) 186 LONG_L s0, TI_REGS($28) 187 LONG_S sp, TI_REGS($28) 187 LONG_S sp, TI_REGS($28) 188 188 189 /* 189 /* 190 * SAVE_ALL ensures we are using a val 190 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 191 * Check if we are already using the I 191 * Check if we are already using the IRQ stack. 192 */ 192 */ 193 move s1, sp # Preserve the sp 193 move s1, sp # Preserve the sp 194 194 195 /* Get IRQ stack for this CPU */ 195 /* Get IRQ stack for this CPU */ 196 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 196 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 197 #if defined(CONFIG_32BIT) || defined(KBUILD_64 197 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 198 lui k1, %hi(irq_stack) 198 lui k1, %hi(irq_stack) 199 #else 199 #else 200 lui k1, %highest(irq_stack) 200 lui k1, %highest(irq_stack) 201 daddiu k1, %higher(irq_stack) 201 daddiu k1, %higher(irq_stack) 202 dsll k1, 16 202 dsll k1, 16 203 daddiu k1, %hi(irq_stack) 203 daddiu k1, %hi(irq_stack) 204 dsll k1, 16 204 dsll k1, 16 205 #endif 205 #endif 206 LONG_SRL k0, SMP_CPUID_PTRSHIFT 206 LONG_SRL k0, SMP_CPUID_PTRSHIFT 207 LONG_ADDU k1, k0 207 LONG_ADDU k1, k0 208 LONG_L t0, %lo(irq_stack)(k1) 208 LONG_L t0, %lo(irq_stack)(k1) 209 209 210 # Check if already on IRQ stack 210 # Check if already on IRQ stack 211 PTR_LI t1, ~(_THREAD_SIZE-1) 211 PTR_LI t1, ~(_THREAD_SIZE-1) 212 and t1, t1, sp 212 and t1, t1, sp 213 beq t0, t1, 2f 213 beq t0, t1, 2f 214 214 215 /* Switch to IRQ stack */ 215 /* Switch to IRQ stack */ 216 li t1, _IRQ_STACK_START 216 li t1, _IRQ_STACK_START 217 PTR_ADD sp, t0, t1 217 PTR_ADD sp, t0, t1 218 218 219 /* Save task's sp on IRQ stack so that 219 /* Save task's sp on IRQ stack so that unwinding can follow it */ 220 LONG_S s1, 0(sp) 220 LONG_S s1, 0(sp) 221 2: 221 2: 222 jal plat_irq_dispatch 222 jal plat_irq_dispatch 223 223 224 /* Restore sp */ 224 /* Restore sp */ 225 move sp, s1 225 move sp, s1 226 226 227 j ret_from_irq 227 j ret_from_irq 228 #ifdef CONFIG_CPU_MICROMIPS 228 #ifdef CONFIG_CPU_MICROMIPS 229 nop 229 nop 230 #endif 230 #endif 231 END(handle_int) 231 END(handle_int) 232 232 233 __INIT 233 __INIT 234 234 235 /* 235 /* 236 * Special interrupt vector for MIPS64 ISA & e 236 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 237 * This is a dedicated interrupt exception vec 237 * This is a dedicated interrupt exception vector which reduces the 238 * interrupt processing overhead. The jump in 238 * interrupt processing overhead. The jump instruction will be replaced 239 * at the initialization time. 239 * at the initialization time. 240 * 240 * 241 * Be careful when changing this, it has to be 241 * Be careful when changing this, it has to be at most 128 bytes 242 * to fit into space reserved for the exceptio 242 * to fit into space reserved for the exception handler. 243 */ 243 */ 244 NESTED(except_vec4, 0, sp) 244 NESTED(except_vec4, 0, sp) 245 1: j 1b /* Dum 245 1: j 1b /* Dummy, will be replaced */ 246 END(except_vec4) 246 END(except_vec4) 247 247 248 /* 248 /* 249 * EJTAG debug exception handler. 249 * EJTAG debug exception handler. 250 * The EJTAG debug exception entry point is 0x 250 * The EJTAG debug exception entry point is 0xbfc00480, which 251 * normally is in the boot PROM, so the boot P 251 * normally is in the boot PROM, so the boot PROM must do an 252 * unconditional jump to this vector. 252 * unconditional jump to this vector. 253 */ 253 */ 254 NESTED(except_vec_ejtag_debug, 0, sp) 254 NESTED(except_vec_ejtag_debug, 0, sp) 255 j ejtag_debug_handler 255 j ejtag_debug_handler 256 #ifdef CONFIG_CPU_MICROMIPS 256 #ifdef CONFIG_CPU_MICROMIPS 257 nop 257 nop 258 #endif 258 #endif 259 END(except_vec_ejtag_debug) 259 END(except_vec_ejtag_debug) 260 260 261 __FINIT 261 __FINIT 262 262 263 /* 263 /* 264 * Vectored interrupt handler. 264 * Vectored interrupt handler. 265 * This prototype is copied to ebase + n*IntCt 265 * This prototype is copied to ebase + n*IntCtl.VS and patched 266 * to invoke the handler 266 * to invoke the handler 267 */ 267 */ 268 BUILD_ROLLBACK_PROLOGUE except_vec_vi 268 BUILD_ROLLBACK_PROLOGUE except_vec_vi 269 NESTED(except_vec_vi, 0, sp) 269 NESTED(except_vec_vi, 0, sp) 270 SAVE_SOME docfi=1 270 SAVE_SOME docfi=1 271 SAVE_AT docfi=1 271 SAVE_AT docfi=1 272 .set push 272 .set push 273 .set noreorder 273 .set noreorder 274 PTR_LA v1, except_vec_vi_handler 274 PTR_LA v1, except_vec_vi_handler 275 jr v1 275 jr v1 276 FEXPORT(except_vec_vi_ori) 276 FEXPORT(except_vec_vi_ori) 277 ori v0, zero, 0 /* Off 277 ori v0, zero, 0 /* Offset in vi_handlers[] */ 278 .set pop 278 .set pop 279 END(except_vec_vi) 279 END(except_vec_vi) 280 EXPORT(except_vec_vi_end) 280 EXPORT(except_vec_vi_end) 281 281 282 /* 282 /* 283 * Common Vectored Interrupt code 283 * Common Vectored Interrupt code 284 * Complete the register saves and invoke the 284 * Complete the register saves and invoke the handler, $v0 holds 285 * offset into vi_handlers[] 285 * offset into vi_handlers[] 286 */ 286 */ 287 NESTED(except_vec_vi_handler, 0, sp) 287 NESTED(except_vec_vi_handler, 0, sp) 288 SAVE_TEMP 288 SAVE_TEMP 289 SAVE_STATIC 289 SAVE_STATIC 290 CLI 290 CLI 291 #ifdef CONFIG_TRACE_IRQFLAGS 291 #ifdef CONFIG_TRACE_IRQFLAGS 292 move s0, v0 292 move s0, v0 293 TRACE_IRQS_OFF 293 TRACE_IRQS_OFF 294 move v0, s0 294 move v0, s0 295 #endif 295 #endif 296 296 297 LONG_L s0, TI_REGS($28) 297 LONG_L s0, TI_REGS($28) 298 LONG_S sp, TI_REGS($28) 298 LONG_S sp, TI_REGS($28) 299 299 300 /* 300 /* 301 * SAVE_ALL ensures we are using a val 301 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 302 * Check if we are already using the I 302 * Check if we are already using the IRQ stack. 303 */ 303 */ 304 move s1, sp # Preserve the sp 304 move s1, sp # Preserve the sp 305 305 306 /* Get IRQ stack for this CPU */ 306 /* Get IRQ stack for this CPU */ 307 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 307 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 308 #if defined(CONFIG_32BIT) || defined(KBUILD_64 308 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 309 lui k1, %hi(irq_stack) 309 lui k1, %hi(irq_stack) 310 #else 310 #else 311 lui k1, %highest(irq_stack) 311 lui k1, %highest(irq_stack) 312 daddiu k1, %higher(irq_stack) 312 daddiu k1, %higher(irq_stack) 313 dsll k1, 16 313 dsll k1, 16 314 daddiu k1, %hi(irq_stack) 314 daddiu k1, %hi(irq_stack) 315 dsll k1, 16 315 dsll k1, 16 316 #endif 316 #endif 317 LONG_SRL k0, SMP_CPUID_PTRSHIFT 317 LONG_SRL k0, SMP_CPUID_PTRSHIFT 318 LONG_ADDU k1, k0 318 LONG_ADDU k1, k0 319 LONG_L t0, %lo(irq_stack)(k1) 319 LONG_L t0, %lo(irq_stack)(k1) 320 320 321 # Check if already on IRQ stack 321 # Check if already on IRQ stack 322 PTR_LI t1, ~(_THREAD_SIZE-1) 322 PTR_LI t1, ~(_THREAD_SIZE-1) 323 and t1, t1, sp 323 and t1, t1, sp 324 beq t0, t1, 2f 324 beq t0, t1, 2f 325 325 326 /* Switch to IRQ stack */ 326 /* Switch to IRQ stack */ 327 li t1, _IRQ_STACK_START 327 li t1, _IRQ_STACK_START 328 PTR_ADD sp, t0, t1 328 PTR_ADD sp, t0, t1 329 329 330 /* Save task's sp on IRQ stack so that 330 /* Save task's sp on IRQ stack so that unwinding can follow it */ 331 LONG_S s1, 0(sp) 331 LONG_S s1, 0(sp) 332 2: 332 2: 333 PTR_L v0, vi_handlers(v0) 333 PTR_L v0, vi_handlers(v0) 334 jalr v0 334 jalr v0 335 335 336 /* Restore sp */ 336 /* Restore sp */ 337 move sp, s1 337 move sp, s1 338 338 339 j ret_from_irq 339 j ret_from_irq 340 END(except_vec_vi_handler) 340 END(except_vec_vi_handler) 341 341 342 /* 342 /* 343 * EJTAG debug exception handler. 343 * EJTAG debug exception handler. 344 */ 344 */ 345 NESTED(ejtag_debug_handler, PT_SIZE, sp) 345 NESTED(ejtag_debug_handler, PT_SIZE, sp) 346 .set push 346 .set push 347 .set noat 347 .set noat 348 MTC0 k0, CP0_DESAVE 348 MTC0 k0, CP0_DESAVE 349 mfc0 k0, CP0_DEBUG 349 mfc0 k0, CP0_DEBUG 350 350 351 andi k0, k0, MIPS_DEBUG_DBP # Chec 351 andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP. 352 beqz k0, ejtag_return 352 beqz k0, ejtag_return 353 353 354 #ifdef CONFIG_SMP 354 #ifdef CONFIG_SMP 355 1: PTR_LA k0, ejtag_debug_buffer_spinloc 355 1: PTR_LA k0, ejtag_debug_buffer_spinlock 356 __SYNC(full, loongson3_war) 356 __SYNC(full, loongson3_war) 357 2: ll k0, 0(k0) 357 2: ll k0, 0(k0) 358 bnez k0, 2b 358 bnez k0, 2b 359 PTR_LA k0, ejtag_debug_buffer_spinloc 359 PTR_LA k0, ejtag_debug_buffer_spinlock 360 sc k0, 0(k0) 360 sc k0, 0(k0) 361 beqz k0, 1b 361 beqz k0, 1b 362 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC 362 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC 363 sync 363 sync 364 # endif 364 # endif 365 365 366 PTR_LA k0, ejtag_debug_buffer 366 PTR_LA k0, ejtag_debug_buffer 367 LONG_S k1, 0(k0) 367 LONG_S k1, 0(k0) 368 368 369 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 369 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 370 PTR_SRL k1, SMP_CPUID_PTRSHIFT 370 PTR_SRL k1, SMP_CPUID_PTRSHIFT 371 PTR_SLL k1, LONGLOG 371 PTR_SLL k1, LONGLOG 372 PTR_LA k0, ejtag_debug_buffer_per_cpu 372 PTR_LA k0, ejtag_debug_buffer_per_cpu 373 PTR_ADDU k0, k1 373 PTR_ADDU k0, k1 374 374 375 PTR_LA k1, ejtag_debug_buffer 375 PTR_LA k1, ejtag_debug_buffer 376 LONG_L k1, 0(k1) 376 LONG_L k1, 0(k1) 377 LONG_S k1, 0(k0) 377 LONG_S k1, 0(k0) 378 378 379 PTR_LA k0, ejtag_debug_buffer_spinloc 379 PTR_LA k0, ejtag_debug_buffer_spinlock 380 sw zero, 0(k0) 380 sw zero, 0(k0) 381 #else 381 #else 382 PTR_LA k0, ejtag_debug_buffer 382 PTR_LA k0, ejtag_debug_buffer 383 LONG_S k1, 0(k0) 383 LONG_S k1, 0(k0) 384 #endif 384 #endif 385 385 386 SAVE_ALL 386 SAVE_ALL 387 move a0, sp 387 move a0, sp 388 jal ejtag_exception_handler 388 jal ejtag_exception_handler 389 RESTORE_ALL 389 RESTORE_ALL 390 390 391 #ifdef CONFIG_SMP 391 #ifdef CONFIG_SMP 392 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 392 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG 393 PTR_SRL k1, SMP_CPUID_PTRSHIFT 393 PTR_SRL k1, SMP_CPUID_PTRSHIFT 394 PTR_SLL k1, LONGLOG 394 PTR_SLL k1, LONGLOG 395 PTR_LA k0, ejtag_debug_buffer_per_cpu 395 PTR_LA k0, ejtag_debug_buffer_per_cpu 396 PTR_ADDU k0, k1 396 PTR_ADDU k0, k1 397 LONG_L k1, 0(k0) 397 LONG_L k1, 0(k0) 398 #else 398 #else 399 PTR_LA k0, ejtag_debug_buffer 399 PTR_LA k0, ejtag_debug_buffer 400 LONG_L k1, 0(k0) 400 LONG_L k1, 0(k0) 401 #endif 401 #endif 402 402 403 ejtag_return: 403 ejtag_return: 404 back_to_back_c0_hazard 404 back_to_back_c0_hazard 405 MFC0 k0, CP0_DESAVE 405 MFC0 k0, CP0_DESAVE 406 .set mips32 406 .set mips32 407 deret 407 deret 408 .set pop 408 .set pop 409 END(ejtag_debug_handler) 409 END(ejtag_debug_handler) 410 410 411 /* 411 /* 412 * This buffer is reserved for the use of the 412 * This buffer is reserved for the use of the EJTAG debug 413 * handler. 413 * handler. 414 */ 414 */ 415 .data 415 .data 416 EXPORT(ejtag_debug_buffer) 416 EXPORT(ejtag_debug_buffer) 417 .fill LONGSIZE 417 .fill LONGSIZE 418 #ifdef CONFIG_SMP 418 #ifdef CONFIG_SMP 419 EXPORT(ejtag_debug_buffer_spinlock) 419 EXPORT(ejtag_debug_buffer_spinlock) 420 .fill LONGSIZE 420 .fill LONGSIZE 421 EXPORT(ejtag_debug_buffer_per_cpu) 421 EXPORT(ejtag_debug_buffer_per_cpu) 422 .fill LONGSIZE * NR_CPUS 422 .fill LONGSIZE * NR_CPUS 423 #endif 423 #endif 424 .previous 424 .previous 425 425 426 __INIT 426 __INIT 427 427 428 /* 428 /* 429 * NMI debug exception handler for MIPS refere 429 * NMI debug exception handler for MIPS reference boards. 430 * The NMI debug exception entry point is 0xbf 430 * The NMI debug exception entry point is 0xbfc00000, which 431 * normally is in the boot PROM, so the boot P 431 * normally is in the boot PROM, so the boot PROM must do a 432 * unconditional jump to this vector. 432 * unconditional jump to this vector. 433 */ 433 */ 434 NESTED(except_vec_nmi, 0, sp) 434 NESTED(except_vec_nmi, 0, sp) 435 j nmi_handler 435 j nmi_handler 436 #ifdef CONFIG_CPU_MICROMIPS 436 #ifdef CONFIG_CPU_MICROMIPS 437 nop 437 nop 438 #endif 438 #endif 439 END(except_vec_nmi) 439 END(except_vec_nmi) 440 440 441 __FINIT 441 __FINIT 442 442 443 NESTED(nmi_handler, PT_SIZE, sp) 443 NESTED(nmi_handler, PT_SIZE, sp) 444 .cfi_signal_frame 444 .cfi_signal_frame 445 .set push 445 .set push 446 .set noat 446 .set noat 447 /* 447 /* 448 * Clear ERL - restore segment mapping 448 * Clear ERL - restore segment mapping 449 * Clear BEV - required for page fault 449 * Clear BEV - required for page fault exception handler to work 450 */ 450 */ 451 mfc0 k0, CP0_STATUS 451 mfc0 k0, CP0_STATUS 452 ori k0, k0, ST0_EXL 452 ori k0, k0, ST0_EXL 453 li k1, ~(ST0_BEV | ST0_ERL) 453 li k1, ~(ST0_BEV | ST0_ERL) 454 and k0, k0, k1 454 and k0, k0, k1 455 mtc0 k0, CP0_STATUS 455 mtc0 k0, CP0_STATUS 456 _ehb 456 _ehb 457 SAVE_ALL 457 SAVE_ALL 458 move a0, sp 458 move a0, sp 459 jal nmi_exception_handler 459 jal nmi_exception_handler 460 /* nmi_exception_handler never returns 460 /* nmi_exception_handler never returns */ 461 .set pop 461 .set pop 462 END(nmi_handler) 462 END(nmi_handler) 463 463 464 .macro __build_clear_none 464 .macro __build_clear_none 465 .endm 465 .endm 466 466 467 .macro __build_clear_sti 467 .macro __build_clear_sti 468 TRACE_IRQS_ON 468 TRACE_IRQS_ON 469 STI 469 STI 470 .endm 470 .endm 471 471 472 .macro __build_clear_cli 472 .macro __build_clear_cli 473 CLI 473 CLI 474 TRACE_IRQS_OFF 474 TRACE_IRQS_OFF 475 .endm 475 .endm 476 476 477 .macro __build_clear_fpe 477 .macro __build_clear_fpe 478 CLI 478 CLI 479 TRACE_IRQS_OFF 479 TRACE_IRQS_OFF 480 .set push 480 .set push 481 /* gas fails to assemble cfc1 for some 481 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 482 .set mips1 482 .set mips1 483 .set hardfloat 483 .set hardfloat 484 cfc1 a1, fcr31 484 cfc1 a1, fcr31 485 .set pop 485 .set pop 486 .endm 486 .endm 487 487 488 .macro __build_clear_msa_fpe 488 .macro __build_clear_msa_fpe 489 CLI 489 CLI 490 TRACE_IRQS_OFF 490 TRACE_IRQS_OFF 491 _cfcmsa a1, MSA_CSR 491 _cfcmsa a1, MSA_CSR 492 .endm 492 .endm 493 493 494 .macro __build_clear_ade 494 .macro __build_clear_ade 495 MFC0 t0, CP0_BADVADDR 495 MFC0 t0, CP0_BADVADDR 496 PTR_S t0, PT_BVADDR(sp) 496 PTR_S t0, PT_BVADDR(sp) 497 KMODE 497 KMODE 498 .endm 498 .endm 499 499 500 .macro __build_clear_gsexc 500 .macro __build_clear_gsexc 501 .set push 501 .set push 502 /* 502 /* 503 * We need to specify a selector to ac 503 * We need to specify a selector to access the CP0.Diag1 (GSCause) 504 * register. All GSExc-equipped proces 504 * register. All GSExc-equipped processors have MIPS32. 505 */ 505 */ 506 .set mips32 506 .set mips32 507 mfc0 a1, CP0_DIAGNOSTIC1 507 mfc0 a1, CP0_DIAGNOSTIC1 508 .set pop 508 .set pop 509 TRACE_IRQS_ON 509 TRACE_IRQS_ON 510 STI 510 STI 511 .endm 511 .endm 512 512 513 .macro __BUILD_silent exception 513 .macro __BUILD_silent exception 514 .endm 514 .endm 515 515 516 /* Gas tries to parse the ASM_PRINT ar 516 /* Gas tries to parse the ASM_PRINT argument as a string containing 517 string escapes and emits bogus warn 517 string escapes and emits bogus warnings if it believes to 518 recognize an unknown escape code. 518 recognize an unknown escape code. So make the arguments 519 start with an n and gas will believ 519 start with an n and gas will believe \n is ok ... */ 520 .macro __BUILD_verbose nexception 520 .macro __BUILD_verbose nexception 521 LONG_L a1, PT_EPC(sp) 521 LONG_L a1, PT_EPC(sp) 522 #ifdef CONFIG_32BIT 522 #ifdef CONFIG_32BIT 523 ASM_PRINT("Got \nexception at %08lx\01 523 ASM_PRINT("Got \nexception at %08lx\012") 524 #endif 524 #endif 525 #ifdef CONFIG_64BIT 525 #ifdef CONFIG_64BIT 526 ASM_PRINT("Got \nexception at %016lx\0 526 ASM_PRINT("Got \nexception at %016lx\012") 527 #endif 527 #endif 528 .endm 528 .endm 529 529 530 .macro __BUILD_count exception 530 .macro __BUILD_count exception 531 LONG_L t0,exception_count_\exception 531 LONG_L t0,exception_count_\exception 532 LONG_ADDIU t0, 1 532 LONG_ADDIU t0, 1 533 LONG_S t0,exception_count_\exception 533 LONG_S t0,exception_count_\exception 534 .comm exception_count\exception, 8, 534 .comm exception_count\exception, 8, 8 535 .endm 535 .endm 536 536 537 .macro __BUILD_HANDLER exception hand 537 .macro __BUILD_HANDLER exception handler clear verbose ext 538 .align 5 538 .align 5 539 NESTED(handle_\exception, PT_SIZE, sp) 539 NESTED(handle_\exception, PT_SIZE, sp) 540 .cfi_signal_frame 540 .cfi_signal_frame 541 .set noat 541 .set noat 542 SAVE_ALL 542 SAVE_ALL 543 FEXPORT(handle_\exception\ext) 543 FEXPORT(handle_\exception\ext) 544 __build_clear_\clear 544 __build_clear_\clear 545 .set at 545 .set at 546 __BUILD_\verbose \exception 546 __BUILD_\verbose \exception 547 move a0, sp 547 move a0, sp 548 jal do_\handler 548 jal do_\handler 549 j ret_from_exception 549 j ret_from_exception 550 END(handle_\exception) 550 END(handle_\exception) 551 .endm 551 .endm 552 552 553 .macro BUILD_HANDLER exception handle 553 .macro BUILD_HANDLER exception handler clear verbose 554 __BUILD_HANDLER \exception \handler \c 554 __BUILD_HANDLER \exception \handler \clear \verbose _int 555 .endm 555 .endm 556 556 557 BUILD_HANDLER adel ade ade silent 557 BUILD_HANDLER adel ade ade silent /* #4 */ 558 BUILD_HANDLER ades ade ade silent 558 BUILD_HANDLER ades ade ade silent /* #5 */ 559 BUILD_HANDLER ibe be cli silent 559 BUILD_HANDLER ibe be cli silent /* #6 */ 560 BUILD_HANDLER dbe be cli silent 560 BUILD_HANDLER dbe be cli silent /* #7 */ 561 BUILD_HANDLER bp bp sti silent 561 BUILD_HANDLER bp bp sti silent /* #9 */ 562 BUILD_HANDLER ri ri sti silent 562 BUILD_HANDLER ri ri sti silent /* #10 */ 563 BUILD_HANDLER cpu cpu sti silent 563 BUILD_HANDLER cpu cpu sti silent /* #11 */ 564 BUILD_HANDLER ov ov sti silent 564 BUILD_HANDLER ov ov sti silent /* #12 */ 565 BUILD_HANDLER tr tr sti silent 565 BUILD_HANDLER tr tr sti silent /* #13 */ 566 BUILD_HANDLER msa_fpe msa_fpe msa_fpe 566 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 567 #ifdef CONFIG_MIPS_FP_SUPPORT 567 #ifdef CONFIG_MIPS_FP_SUPPORT 568 BUILD_HANDLER fpe fpe fpe silent 568 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 569 #endif 569 #endif 570 BUILD_HANDLER ftlb ftlb none silent 570 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 571 BUILD_HANDLER gsexc gsexc gsexc silent 571 BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */ 572 BUILD_HANDLER msa msa sti silent 572 BUILD_HANDLER msa msa sti silent /* #21 */ 573 BUILD_HANDLER mdmx mdmx sti silent 573 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 574 #ifdef CONFIG_HARDWARE_WATCHPOINTS 574 #ifdef CONFIG_HARDWARE_WATCHPOINTS 575 /* 575 /* 576 * For watch, interrupts will be enabl 576 * For watch, interrupts will be enabled after the watch 577 * registers are read. 577 * registers are read. 578 */ 578 */ 579 BUILD_HANDLER watch watch cli silent 579 BUILD_HANDLER watch watch cli silent /* #23 */ 580 #else 580 #else 581 BUILD_HANDLER watch watch sti verbose 581 BUILD_HANDLER watch watch sti verbose /* #23 */ 582 #endif 582 #endif 583 BUILD_HANDLER mcheck mcheck cli verbos 583 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 584 BUILD_HANDLER mt mt sti silent 584 BUILD_HANDLER mt mt sti silent /* #25 */ 585 BUILD_HANDLER dsp dsp sti silent 585 BUILD_HANDLER dsp dsp sti silent /* #26 */ 586 BUILD_HANDLER reserved reserved sti ve 586 BUILD_HANDLER reserved reserved sti verbose /* others */ 587 587 588 .align 5 588 .align 5 589 LEAF(handle_ri_rdhwr_tlbp) 589 LEAF(handle_ri_rdhwr_tlbp) 590 .set push 590 .set push 591 .set noat 591 .set noat 592 .set noreorder 592 .set noreorder 593 /* check if TLB contains a entry for E 593 /* check if TLB contains a entry for EPC */ 594 MFC0 k1, CP0_ENTRYHI 594 MFC0 k1, CP0_ENTRYHI 595 andi k1, MIPS_ENTRYHI_ASID | MIPS_E 595 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX 596 MFC0 k0, CP0_EPC 596 MFC0 k0, CP0_EPC 597 PTR_SRL k0, _PAGE_SHIFT + 1 597 PTR_SRL k0, _PAGE_SHIFT + 1 598 PTR_SLL k0, _PAGE_SHIFT + 1 598 PTR_SLL k0, _PAGE_SHIFT + 1 599 or k1, k0 599 or k1, k0 600 MTC0 k1, CP0_ENTRYHI 600 MTC0 k1, CP0_ENTRYHI 601 mtc0_tlbw_hazard 601 mtc0_tlbw_hazard 602 tlbp 602 tlbp 603 tlb_probe_hazard 603 tlb_probe_hazard 604 mfc0 k1, CP0_INDEX 604 mfc0 k1, CP0_INDEX 605 .set pop 605 .set pop 606 bltz k1, handle_ri /* slow path * 606 bltz k1, handle_ri /* slow path */ 607 /* fall thru */ 607 /* fall thru */ 608 END(handle_ri_rdhwr_tlbp) 608 END(handle_ri_rdhwr_tlbp) 609 609 610 LEAF(handle_ri_rdhwr) 610 LEAF(handle_ri_rdhwr) 611 .set push 611 .set push 612 .set noat 612 .set noat 613 .set noreorder 613 .set noreorder 614 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 614 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 615 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 615 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 616 MFC0 k1, CP0_EPC 616 MFC0 k1, CP0_EPC 617 #if defined(CONFIG_CPU_MICROMIPS) || defined(C 617 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 618 and k0, k1, 1 618 and k0, k1, 1 619 beqz k0, 1f 619 beqz k0, 1f 620 xor k1, k0 620 xor k1, k0 621 lhu k0, (k1) 621 lhu k0, (k1) 622 lhu k1, 2(k1) 622 lhu k1, 2(k1) 623 ins k1, k0, 16, 16 623 ins k1, k0, 16, 16 624 lui k0, 0x007d 624 lui k0, 0x007d 625 b docheck 625 b docheck 626 ori k0, 0x6b3c 626 ori k0, 0x6b3c 627 1: 627 1: 628 lui k0, 0x7c03 628 lui k0, 0x7c03 629 lw k1, (k1) 629 lw k1, (k1) 630 ori k0, 0xe83b 630 ori k0, 0xe83b 631 #else 631 #else 632 andi k0, k1, 1 632 andi k0, k1, 1 633 bnez k0, handle_ri 633 bnez k0, handle_ri 634 lui k0, 0x7c03 634 lui k0, 0x7c03 635 lw k1, (k1) 635 lw k1, (k1) 636 ori k0, 0xe83b 636 ori k0, 0xe83b 637 #endif 637 #endif 638 .set reorder 638 .set reorder 639 docheck: 639 docheck: 640 bne k0, k1, handle_ri /* if 640 bne k0, k1, handle_ri /* if not ours */ 641 641 642 isrdhwr: 642 isrdhwr: 643 /* The insn is rdhwr. No need to chec 643 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 644 get_saved_sp /* k1 := current_threa 644 get_saved_sp /* k1 := current_thread_info */ 645 .set noreorder 645 .set noreorder 646 MFC0 k0, CP0_EPC 646 MFC0 k0, CP0_EPC 647 #if defined(CONFIG_CPU_R3000) 647 #if defined(CONFIG_CPU_R3000) 648 ori k1, _THREAD_MASK 648 ori k1, _THREAD_MASK 649 xori k1, _THREAD_MASK 649 xori k1, _THREAD_MASK 650 LONG_L v1, TI_TP_VALUE(k1) 650 LONG_L v1, TI_TP_VALUE(k1) 651 LONG_ADDIU k0, 4 651 LONG_ADDIU k0, 4 652 jr k0 652 jr k0 653 rfe 653 rfe 654 #else 654 #else 655 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 655 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 656 LONG_ADDIU k0, 4 /* sta 656 LONG_ADDIU k0, 4 /* stall on $k0 */ 657 #else 657 #else 658 .set at=v1 658 .set at=v1 659 LONG_ADDIU k0, 4 659 LONG_ADDIU k0, 4 660 .set noat 660 .set noat 661 #endif 661 #endif 662 MTC0 k0, CP0_EPC 662 MTC0 k0, CP0_EPC 663 /* I hope three instructions between M 663 /* I hope three instructions between MTC0 and ERET are enough... */ 664 ori k1, _THREAD_MASK 664 ori k1, _THREAD_MASK 665 xori k1, _THREAD_MASK 665 xori k1, _THREAD_MASK 666 LONG_L v1, TI_TP_VALUE(k1) 666 LONG_L v1, TI_TP_VALUE(k1) 667 .set push 667 .set push 668 .set arch=r4000 668 .set arch=r4000 669 eret 669 eret 670 .set pop 670 .set pop 671 #endif 671 #endif 672 .set pop 672 .set pop 673 END(handle_ri_rdhwr) 673 END(handle_ri_rdhwr) 674 674 675 #ifdef CONFIG_CPU_R4X00_BUGS64 675 #ifdef CONFIG_CPU_R4X00_BUGS64 676 /* A temporary overflow handler used by check_ 676 /* A temporary overflow handler used by check_daddi(). */ 677 677 678 __INIT 678 __INIT 679 679 680 BUILD_HANDLER daddi_ov daddi_ov none 680 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 681 #endif 681 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.