1 /* 1 /* 2 * This file is subject to the terms and condi 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 4 * for more details. 5 * 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki 9 * Copyright (C) 2001, 2012 MIPS Technologies, 9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. 10 */ 10 */ 11 #include <linux/init.h> 11 #include <linux/init.h> 12 12 13 #include <asm/asm.h> 13 #include <asm/asm.h> 14 #include <asm/asmmacro.h> 14 #include <asm/asmmacro.h> 15 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 16 #include <asm/irqflags.h> 16 #include <asm/irqflags.h> 17 #include <asm/regdef.h> 17 #include <asm/regdef.h> 18 #include <asm/fpregdef.h> 18 #include <asm/fpregdef.h> 19 #include <asm/mipsregs.h> 19 #include <asm/mipsregs.h> 20 #include <asm/stackframe.h> 20 #include <asm/stackframe.h> 21 #include <asm/sync.h> !! 21 #include <asm/war.h> 22 #include <asm/thread_info.h> 22 #include <asm/thread_info.h> 23 23 24 __INIT 24 __INIT 25 25 26 /* 26 /* 27 * General exception vector for all other CPUs 27 * General exception vector for all other CPUs. 28 * 28 * 29 * Be careful when changing this, it has to be 29 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exceptio 30 * to fit into space reserved for the exception handler. 31 */ 31 */ 32 NESTED(except_vec3_generic, 0, sp) 32 NESTED(except_vec3_generic, 0, sp) 33 .set push 33 .set push 34 .set noat 34 .set noat >> 35 #if R5432_CP0_INTERRUPT_WAR >> 36 mfc0 k0, CP0_INDEX >> 37 #endif 35 mfc0 k1, CP0_CAUSE 38 mfc0 k1, CP0_CAUSE 36 andi k1, k1, 0x7c 39 andi k1, k1, 0x7c 37 #ifdef CONFIG_64BIT 40 #ifdef CONFIG_64BIT 38 dsll k1, k1, 1 41 dsll k1, k1, 1 39 #endif 42 #endif 40 PTR_L k0, exception_handlers(k1) 43 PTR_L k0, exception_handlers(k1) 41 jr k0 44 jr k0 42 .set pop 45 .set pop 43 END(except_vec3_generic) 46 END(except_vec3_generic) 44 47 45 /* 48 /* 46 * General exception handler for CPUs with vir 49 * General exception handler for CPUs with virtual coherency exception. 47 * 50 * 48 * Be careful when changing this, it has to be 51 * Be careful when changing this, it has to be at most 256 (as a special 49 * exception) bytes to fit into space reserved 52 * exception) bytes to fit into space reserved for the exception handler. 50 */ 53 */ 51 NESTED(except_vec3_r4000, 0, sp) 54 NESTED(except_vec3_r4000, 0, sp) 52 .set push 55 .set push 53 .set arch=r4000 56 .set arch=r4000 54 .set noat 57 .set noat 55 mfc0 k1, CP0_CAUSE 58 mfc0 k1, CP0_CAUSE 56 li k0, 31<<2 59 li k0, 31<<2 57 andi k1, k1, 0x7c 60 andi k1, k1, 0x7c 58 .set push 61 .set push 59 .set noreorder 62 .set noreorder 60 .set nomacro 63 .set nomacro 61 beq k1, k0, handle_vced 64 beq k1, k0, handle_vced 62 li k0, 14<<2 65 li k0, 14<<2 63 beq k1, k0, handle_vcei 66 beq k1, k0, handle_vcei 64 #ifdef CONFIG_64BIT 67 #ifdef CONFIG_64BIT 65 dsll k1, k1, 1 68 dsll k1, k1, 1 66 #endif 69 #endif 67 .set pop 70 .set pop 68 PTR_L k0, exception_handlers(k1) 71 PTR_L k0, exception_handlers(k1) 69 jr k0 72 jr k0 70 73 71 /* 74 /* 72 * Big shit, we now may have two dirty 75 * Big shit, we now may have two dirty primary cache lines for the same 73 * physical address. We can safely in 76 * physical address. We can safely invalidate the line pointed to by 74 * c0_badvaddr because after return fr 77 * c0_badvaddr because after return from this exception handler the 75 * load / store will be re-executed. 78 * load / store will be re-executed. 76 */ 79 */ 77 handle_vced: 80 handle_vced: 78 MFC0 k0, CP0_BADVADDR 81 MFC0 k0, CP0_BADVADDR 79 li k1, -4 82 li k1, -4 # Is this ... 80 and k0, k1 83 and k0, k1 # ... really needed? 81 mtc0 zero, CP0_TAGLO 84 mtc0 zero, CP0_TAGLO 82 cache Index_Store_Tag_D, (k0) 85 cache Index_Store_Tag_D, (k0) 83 cache Hit_Writeback_Inv_SD, (k0) 86 cache Hit_Writeback_Inv_SD, (k0) 84 #ifdef CONFIG_PROC_FS 87 #ifdef CONFIG_PROC_FS 85 PTR_LA k0, vced_count 88 PTR_LA k0, vced_count 86 lw k1, (k0) 89 lw k1, (k0) 87 addiu k1, 1 90 addiu k1, 1 88 sw k1, (k0) 91 sw k1, (k0) 89 #endif 92 #endif 90 eret 93 eret 91 94 92 handle_vcei: 95 handle_vcei: 93 MFC0 k0, CP0_BADVADDR 96 MFC0 k0, CP0_BADVADDR 94 cache Hit_Writeback_Inv_SD, (k0) 97 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 95 #ifdef CONFIG_PROC_FS 98 #ifdef CONFIG_PROC_FS 96 PTR_LA k0, vcei_count 99 PTR_LA k0, vcei_count 97 lw k1, (k0) 100 lw k1, (k0) 98 addiu k1, 1 101 addiu k1, 1 99 sw k1, (k0) 102 sw k1, (k0) 100 #endif 103 #endif 101 eret 104 eret 102 .set pop 105 .set pop 103 END(except_vec3_r4000) 106 END(except_vec3_r4000) 104 107 105 __FINIT 108 __FINIT 106 109 107 .align 5 /* 32 byte rollback re 110 .align 5 /* 32 byte rollback region */ 108 LEAF(__r4k_wait) 111 LEAF(__r4k_wait) 109 .set push 112 .set push 110 .set noreorder 113 .set noreorder 111 /* start of rollback region */ 114 /* start of rollback region */ 112 LONG_L t0, TI_FLAGS($28) 115 LONG_L t0, TI_FLAGS($28) 113 nop 116 nop 114 andi t0, _TIF_NEED_RESCHED 117 andi t0, _TIF_NEED_RESCHED 115 bnez t0, 1f 118 bnez t0, 1f 116 nop 119 nop 117 nop 120 nop 118 nop 121 nop 119 #ifdef CONFIG_CPU_MICROMIPS 122 #ifdef CONFIG_CPU_MICROMIPS 120 nop 123 nop 121 nop 124 nop 122 nop 125 nop 123 nop 126 nop 124 #endif 127 #endif 125 .set MIPS_ISA_ARCH_LEVEL_RAW 128 .set MIPS_ISA_ARCH_LEVEL_RAW 126 wait 129 wait 127 /* end of rollback region (the region 130 /* end of rollback region (the region size must be power of two) */ 128 1: 131 1: 129 jr ra 132 jr ra 130 nop 133 nop 131 .set pop 134 .set pop 132 END(__r4k_wait) 135 END(__r4k_wait) 133 136 134 .macro BUILD_ROLLBACK_PROLOGUE handle 137 .macro BUILD_ROLLBACK_PROLOGUE handler 135 FEXPORT(rollback_\handler) 138 FEXPORT(rollback_\handler) 136 .set push 139 .set push 137 .set noat 140 .set noat 138 MFC0 k0, CP0_EPC 141 MFC0 k0, CP0_EPC 139 PTR_LA k1, __r4k_wait 142 PTR_LA k1, __r4k_wait 140 ori k0, 0x1f /* 32 byte rol 143 ori k0, 0x1f /* 32 byte rollback region */ 141 xori k0, 0x1f 144 xori k0, 0x1f 142 bne k0, k1, \handler 145 bne k0, k1, \handler 143 MTC0 k0, CP0_EPC 146 MTC0 k0, CP0_EPC 144 .set pop 147 .set pop 145 .endm 148 .endm 146 149 147 .align 5 150 .align 5 148 BUILD_ROLLBACK_PROLOGUE handle_int 151 BUILD_ROLLBACK_PROLOGUE handle_int 149 NESTED(handle_int, PT_SIZE, sp) 152 NESTED(handle_int, PT_SIZE, sp) 150 .cfi_signal_frame << 151 #ifdef CONFIG_TRACE_IRQFLAGS 153 #ifdef CONFIG_TRACE_IRQFLAGS 152 /* 154 /* 153 * Check to see if the interrupted cod 155 * Check to see if the interrupted code has just disabled 154 * interrupts and ignore this interrup 156 * interrupts and ignore this interrupt for now if so. 155 * 157 * 156 * local_irq_disable() disables interr 158 * local_irq_disable() disables interrupts and then calls 157 * trace_hardirqs_off() to track the s 159 * trace_hardirqs_off() to track the state. If an interrupt is taken 158 * after interrupts are disabled but b 160 * after interrupts are disabled but before the state is updated 159 * it will appear to restore_all that 161 * it will appear to restore_all that it is incorrectly returning with 160 * interrupts disabled 162 * interrupts disabled 161 */ 163 */ 162 .set push 164 .set push 163 .set noat 165 .set noat 164 mfc0 k0, CP0_STATUS 166 mfc0 k0, CP0_STATUS 165 #if defined(CONFIG_CPU_R3000) !! 167 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 166 and k0, ST0_IEP 168 and k0, ST0_IEP 167 bnez k0, 1f 169 bnez k0, 1f 168 170 169 mfc0 k0, CP0_EPC 171 mfc0 k0, CP0_EPC 170 .set noreorder 172 .set noreorder 171 j k0 173 j k0 172 rfe 174 rfe 173 #else 175 #else 174 and k0, ST0_IE 176 and k0, ST0_IE 175 bnez k0, 1f 177 bnez k0, 1f 176 178 177 eret 179 eret 178 #endif 180 #endif 179 1: 181 1: 180 .set pop 182 .set pop 181 #endif 183 #endif 182 SAVE_ALL docfi=1 !! 184 SAVE_ALL 183 CLI 185 CLI 184 TRACE_IRQS_OFF 186 TRACE_IRQS_OFF 185 187 186 LONG_L s0, TI_REGS($28) 188 LONG_L s0, TI_REGS($28) 187 LONG_S sp, TI_REGS($28) 189 LONG_S sp, TI_REGS($28) 188 190 189 /* 191 /* 190 * SAVE_ALL ensures we are using a val 192 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 191 * Check if we are already using the I 193 * Check if we are already using the IRQ stack. 192 */ 194 */ 193 move s1, sp # Preserve the sp 195 move s1, sp # Preserve the sp 194 196 195 /* Get IRQ stack for this CPU */ 197 /* Get IRQ stack for this CPU */ 196 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 198 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 197 #if defined(CONFIG_32BIT) || defined(KBUILD_64 199 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 198 lui k1, %hi(irq_stack) 200 lui k1, %hi(irq_stack) 199 #else 201 #else 200 lui k1, %highest(irq_stack) 202 lui k1, %highest(irq_stack) 201 daddiu k1, %higher(irq_stack) 203 daddiu k1, %higher(irq_stack) 202 dsll k1, 16 204 dsll k1, 16 203 daddiu k1, %hi(irq_stack) 205 daddiu k1, %hi(irq_stack) 204 dsll k1, 16 206 dsll k1, 16 205 #endif 207 #endif 206 LONG_SRL k0, SMP_CPUID_PTRSHIFT 208 LONG_SRL k0, SMP_CPUID_PTRSHIFT 207 LONG_ADDU k1, k0 209 LONG_ADDU k1, k0 208 LONG_L t0, %lo(irq_stack)(k1) 210 LONG_L t0, %lo(irq_stack)(k1) 209 211 210 # Check if already on IRQ stack 212 # Check if already on IRQ stack 211 PTR_LI t1, ~(_THREAD_SIZE-1) 213 PTR_LI t1, ~(_THREAD_SIZE-1) 212 and t1, t1, sp 214 and t1, t1, sp 213 beq t0, t1, 2f 215 beq t0, t1, 2f 214 216 215 /* Switch to IRQ stack */ 217 /* Switch to IRQ stack */ 216 li t1, _IRQ_STACK_START 218 li t1, _IRQ_STACK_START 217 PTR_ADD sp, t0, t1 219 PTR_ADD sp, t0, t1 218 220 219 /* Save task's sp on IRQ stack so that 221 /* Save task's sp on IRQ stack so that unwinding can follow it */ 220 LONG_S s1, 0(sp) 222 LONG_S s1, 0(sp) 221 2: 223 2: 222 jal plat_irq_dispatch 224 jal plat_irq_dispatch 223 225 224 /* Restore sp */ 226 /* Restore sp */ 225 move sp, s1 227 move sp, s1 226 228 227 j ret_from_irq 229 j ret_from_irq 228 #ifdef CONFIG_CPU_MICROMIPS 230 #ifdef CONFIG_CPU_MICROMIPS 229 nop 231 nop 230 #endif 232 #endif 231 END(handle_int) 233 END(handle_int) 232 234 233 __INIT 235 __INIT 234 236 235 /* 237 /* 236 * Special interrupt vector for MIPS64 ISA & e 238 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 237 * This is a dedicated interrupt exception vec 239 * This is a dedicated interrupt exception vector which reduces the 238 * interrupt processing overhead. The jump in 240 * interrupt processing overhead. The jump instruction will be replaced 239 * at the initialization time. 241 * at the initialization time. 240 * 242 * 241 * Be careful when changing this, it has to be 243 * Be careful when changing this, it has to be at most 128 bytes 242 * to fit into space reserved for the exceptio 244 * to fit into space reserved for the exception handler. 243 */ 245 */ 244 NESTED(except_vec4, 0, sp) 246 NESTED(except_vec4, 0, sp) 245 1: j 1b /* Dum 247 1: j 1b /* Dummy, will be replaced */ 246 END(except_vec4) 248 END(except_vec4) 247 249 248 /* 250 /* 249 * EJTAG debug exception handler. 251 * EJTAG debug exception handler. 250 * The EJTAG debug exception entry point is 0x 252 * The EJTAG debug exception entry point is 0xbfc00480, which 251 * normally is in the boot PROM, so the boot P 253 * normally is in the boot PROM, so the boot PROM must do an 252 * unconditional jump to this vector. 254 * unconditional jump to this vector. 253 */ 255 */ 254 NESTED(except_vec_ejtag_debug, 0, sp) 256 NESTED(except_vec_ejtag_debug, 0, sp) 255 j ejtag_debug_handler 257 j ejtag_debug_handler 256 #ifdef CONFIG_CPU_MICROMIPS 258 #ifdef CONFIG_CPU_MICROMIPS 257 nop 259 nop 258 #endif 260 #endif 259 END(except_vec_ejtag_debug) 261 END(except_vec_ejtag_debug) 260 262 261 __FINIT 263 __FINIT 262 264 263 /* 265 /* 264 * Vectored interrupt handler. 266 * Vectored interrupt handler. 265 * This prototype is copied to ebase + n*IntCt 267 * This prototype is copied to ebase + n*IntCtl.VS and patched 266 * to invoke the handler 268 * to invoke the handler 267 */ 269 */ 268 BUILD_ROLLBACK_PROLOGUE except_vec_vi 270 BUILD_ROLLBACK_PROLOGUE except_vec_vi 269 NESTED(except_vec_vi, 0, sp) 271 NESTED(except_vec_vi, 0, sp) 270 SAVE_SOME docfi=1 !! 272 SAVE_SOME 271 SAVE_AT docfi=1 !! 273 SAVE_AT 272 .set push 274 .set push 273 .set noreorder 275 .set noreorder 274 PTR_LA v1, except_vec_vi_handler 276 PTR_LA v1, except_vec_vi_handler >> 277 FEXPORT(except_vec_vi_lui) >> 278 lui v0, 0 /* Patched */ 275 jr v1 279 jr v1 276 FEXPORT(except_vec_vi_ori) 280 FEXPORT(except_vec_vi_ori) 277 ori v0, zero, 0 /* Off !! 281 ori v0, 0 /* Patched */ 278 .set pop 282 .set pop 279 END(except_vec_vi) 283 END(except_vec_vi) 280 EXPORT(except_vec_vi_end) 284 EXPORT(except_vec_vi_end) 281 285 282 /* 286 /* 283 * Common Vectored Interrupt code 287 * Common Vectored Interrupt code 284 * Complete the register saves and invoke the !! 288 * Complete the register saves and invoke the handler which is passed in $v0 285 * offset into vi_handlers[] << 286 */ 289 */ 287 NESTED(except_vec_vi_handler, 0, sp) 290 NESTED(except_vec_vi_handler, 0, sp) 288 SAVE_TEMP 291 SAVE_TEMP 289 SAVE_STATIC 292 SAVE_STATIC 290 CLI 293 CLI 291 #ifdef CONFIG_TRACE_IRQFLAGS 294 #ifdef CONFIG_TRACE_IRQFLAGS 292 move s0, v0 295 move s0, v0 293 TRACE_IRQS_OFF 296 TRACE_IRQS_OFF 294 move v0, s0 297 move v0, s0 295 #endif 298 #endif 296 299 297 LONG_L s0, TI_REGS($28) 300 LONG_L s0, TI_REGS($28) 298 LONG_S sp, TI_REGS($28) 301 LONG_S sp, TI_REGS($28) 299 302 300 /* 303 /* 301 * SAVE_ALL ensures we are using a val 304 * SAVE_ALL ensures we are using a valid kernel stack for the thread. 302 * Check if we are already using the I 305 * Check if we are already using the IRQ stack. 303 */ 306 */ 304 move s1, sp # Preserve the sp 307 move s1, sp # Preserve the sp 305 308 306 /* Get IRQ stack for this CPU */ 309 /* Get IRQ stack for this CPU */ 307 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 310 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 308 #if defined(CONFIG_32BIT) || defined(KBUILD_64 311 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 309 lui k1, %hi(irq_stack) 312 lui k1, %hi(irq_stack) 310 #else 313 #else 311 lui k1, %highest(irq_stack) 314 lui k1, %highest(irq_stack) 312 daddiu k1, %higher(irq_stack) 315 daddiu k1, %higher(irq_stack) 313 dsll k1, 16 316 dsll k1, 16 314 daddiu k1, %hi(irq_stack) 317 daddiu k1, %hi(irq_stack) 315 dsll k1, 16 318 dsll k1, 16 316 #endif 319 #endif 317 LONG_SRL k0, SMP_CPUID_PTRSHIFT 320 LONG_SRL k0, SMP_CPUID_PTRSHIFT 318 LONG_ADDU k1, k0 321 LONG_ADDU k1, k0 319 LONG_L t0, %lo(irq_stack)(k1) 322 LONG_L t0, %lo(irq_stack)(k1) 320 323 321 # Check if already on IRQ stack 324 # Check if already on IRQ stack 322 PTR_LI t1, ~(_THREAD_SIZE-1) 325 PTR_LI t1, ~(_THREAD_SIZE-1) 323 and t1, t1, sp 326 and t1, t1, sp 324 beq t0, t1, 2f 327 beq t0, t1, 2f 325 328 326 /* Switch to IRQ stack */ 329 /* Switch to IRQ stack */ 327 li t1, _IRQ_STACK_START 330 li t1, _IRQ_STACK_START 328 PTR_ADD sp, t0, t1 331 PTR_ADD sp, t0, t1 329 332 330 /* Save task's sp on IRQ stack so that 333 /* Save task's sp on IRQ stack so that unwinding can follow it */ 331 LONG_S s1, 0(sp) 334 LONG_S s1, 0(sp) 332 2: 335 2: 333 PTR_L v0, vi_handlers(v0) << 334 jalr v0 336 jalr v0 335 337 336 /* Restore sp */ 338 /* Restore sp */ 337 move sp, s1 339 move sp, s1 338 340 339 j ret_from_irq 341 j ret_from_irq 340 END(except_vec_vi_handler) 342 END(except_vec_vi_handler) 341 343 342 /* 344 /* 343 * EJTAG debug exception handler. 345 * EJTAG debug exception handler. 344 */ 346 */ 345 NESTED(ejtag_debug_handler, PT_SIZE, sp) 347 NESTED(ejtag_debug_handler, PT_SIZE, sp) 346 .set push 348 .set push 347 .set noat 349 .set noat 348 MTC0 k0, CP0_DESAVE 350 MTC0 k0, CP0_DESAVE 349 mfc0 k0, CP0_DEBUG 351 mfc0 k0, CP0_DEBUG 350 352 351 andi k0, k0, MIPS_DEBUG_DBP # Chec !! 353 sll k0, k0, 30 # Check for SDBBP. 352 beqz k0, ejtag_return !! 354 bgez k0, ejtag_return 353 << 354 #ifdef CONFIG_SMP << 355 1: PTR_LA k0, ejtag_debug_buffer_spinloc << 356 __SYNC(full, loongson3_war) << 357 2: ll k0, 0(k0) << 358 bnez k0, 2b << 359 PTR_LA k0, ejtag_debug_buffer_spinloc << 360 sc k0, 0(k0) << 361 beqz k0, 1b << 362 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC << 363 sync << 364 # endif << 365 << 366 PTR_LA k0, ejtag_debug_buffer << 367 LONG_S k1, 0(k0) << 368 << 369 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG << 370 PTR_SRL k1, SMP_CPUID_PTRSHIFT << 371 PTR_SLL k1, LONGLOG << 372 PTR_LA k0, ejtag_debug_buffer_per_cpu << 373 PTR_ADDU k0, k1 << 374 355 375 PTR_LA k1, ejtag_debug_buffer << 376 LONG_L k1, 0(k1) << 377 LONG_S k1, 0(k0) << 378 << 379 PTR_LA k0, ejtag_debug_buffer_spinloc << 380 sw zero, 0(k0) << 381 #else << 382 PTR_LA k0, ejtag_debug_buffer 356 PTR_LA k0, ejtag_debug_buffer 383 LONG_S k1, 0(k0) 357 LONG_S k1, 0(k0) 384 #endif << 385 << 386 SAVE_ALL 358 SAVE_ALL 387 move a0, sp 359 move a0, sp 388 jal ejtag_exception_handler 360 jal ejtag_exception_handler 389 RESTORE_ALL 361 RESTORE_ALL 390 << 391 #ifdef CONFIG_SMP << 392 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG << 393 PTR_SRL k1, SMP_CPUID_PTRSHIFT << 394 PTR_SLL k1, LONGLOG << 395 PTR_LA k0, ejtag_debug_buffer_per_cpu << 396 PTR_ADDU k0, k1 << 397 LONG_L k1, 0(k0) << 398 #else << 399 PTR_LA k0, ejtag_debug_buffer 362 PTR_LA k0, ejtag_debug_buffer 400 LONG_L k1, 0(k0) 363 LONG_L k1, 0(k0) 401 #endif << 402 364 403 ejtag_return: 365 ejtag_return: 404 back_to_back_c0_hazard << 405 MFC0 k0, CP0_DESAVE 366 MFC0 k0, CP0_DESAVE 406 .set mips32 367 .set mips32 407 deret 368 deret 408 .set pop 369 .set pop 409 END(ejtag_debug_handler) 370 END(ejtag_debug_handler) 410 371 411 /* 372 /* 412 * This buffer is reserved for the use of the 373 * This buffer is reserved for the use of the EJTAG debug 413 * handler. 374 * handler. 414 */ 375 */ 415 .data 376 .data 416 EXPORT(ejtag_debug_buffer) 377 EXPORT(ejtag_debug_buffer) 417 .fill LONGSIZE 378 .fill LONGSIZE 418 #ifdef CONFIG_SMP << 419 EXPORT(ejtag_debug_buffer_spinlock) << 420 .fill LONGSIZE << 421 EXPORT(ejtag_debug_buffer_per_cpu) << 422 .fill LONGSIZE * NR_CPUS << 423 #endif << 424 .previous 379 .previous 425 380 426 __INIT 381 __INIT 427 382 428 /* 383 /* 429 * NMI debug exception handler for MIPS refere 384 * NMI debug exception handler for MIPS reference boards. 430 * The NMI debug exception entry point is 0xbf 385 * The NMI debug exception entry point is 0xbfc00000, which 431 * normally is in the boot PROM, so the boot P 386 * normally is in the boot PROM, so the boot PROM must do a 432 * unconditional jump to this vector. 387 * unconditional jump to this vector. 433 */ 388 */ 434 NESTED(except_vec_nmi, 0, sp) 389 NESTED(except_vec_nmi, 0, sp) 435 j nmi_handler 390 j nmi_handler 436 #ifdef CONFIG_CPU_MICROMIPS 391 #ifdef CONFIG_CPU_MICROMIPS 437 nop 392 nop 438 #endif 393 #endif 439 END(except_vec_nmi) 394 END(except_vec_nmi) 440 395 441 __FINIT 396 __FINIT 442 397 443 NESTED(nmi_handler, PT_SIZE, sp) 398 NESTED(nmi_handler, PT_SIZE, sp) 444 .cfi_signal_frame << 445 .set push 399 .set push 446 .set noat 400 .set noat 447 /* 401 /* 448 * Clear ERL - restore segment mapping 402 * Clear ERL - restore segment mapping 449 * Clear BEV - required for page fault 403 * Clear BEV - required for page fault exception handler to work 450 */ 404 */ 451 mfc0 k0, CP0_STATUS 405 mfc0 k0, CP0_STATUS 452 ori k0, k0, ST0_EXL 406 ori k0, k0, ST0_EXL 453 li k1, ~(ST0_BEV | ST0_ERL) 407 li k1, ~(ST0_BEV | ST0_ERL) 454 and k0, k0, k1 408 and k0, k0, k1 455 mtc0 k0, CP0_STATUS 409 mtc0 k0, CP0_STATUS 456 _ehb 410 _ehb 457 SAVE_ALL 411 SAVE_ALL 458 move a0, sp 412 move a0, sp 459 jal nmi_exception_handler 413 jal nmi_exception_handler 460 /* nmi_exception_handler never returns 414 /* nmi_exception_handler never returns */ 461 .set pop 415 .set pop 462 END(nmi_handler) 416 END(nmi_handler) 463 417 464 .macro __build_clear_none 418 .macro __build_clear_none 465 .endm 419 .endm 466 420 467 .macro __build_clear_sti 421 .macro __build_clear_sti 468 TRACE_IRQS_ON 422 TRACE_IRQS_ON 469 STI 423 STI 470 .endm 424 .endm 471 425 472 .macro __build_clear_cli 426 .macro __build_clear_cli 473 CLI 427 CLI 474 TRACE_IRQS_OFF 428 TRACE_IRQS_OFF 475 .endm 429 .endm 476 430 477 .macro __build_clear_fpe 431 .macro __build_clear_fpe 478 CLI << 479 TRACE_IRQS_OFF << 480 .set push 432 .set push 481 /* gas fails to assemble cfc1 for some 433 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 482 .set mips1 434 .set mips1 483 .set hardfloat !! 435 SET_HARDFLOAT 484 cfc1 a1, fcr31 436 cfc1 a1, fcr31 485 .set pop 437 .set pop >> 438 CLI >> 439 TRACE_IRQS_OFF 486 .endm 440 .endm 487 441 488 .macro __build_clear_msa_fpe 442 .macro __build_clear_msa_fpe >> 443 _cfcmsa a1, MSA_CSR 489 CLI 444 CLI 490 TRACE_IRQS_OFF 445 TRACE_IRQS_OFF 491 _cfcmsa a1, MSA_CSR << 492 .endm 446 .endm 493 447 494 .macro __build_clear_ade 448 .macro __build_clear_ade 495 MFC0 t0, CP0_BADVADDR 449 MFC0 t0, CP0_BADVADDR 496 PTR_S t0, PT_BVADDR(sp) 450 PTR_S t0, PT_BVADDR(sp) 497 KMODE 451 KMODE 498 .endm 452 .endm 499 453 500 .macro __build_clear_gsexc << 501 .set push << 502 /* << 503 * We need to specify a selector to ac << 504 * register. All GSExc-equipped proces << 505 */ << 506 .set mips32 << 507 mfc0 a1, CP0_DIAGNOSTIC1 << 508 .set pop << 509 TRACE_IRQS_ON << 510 STI << 511 .endm << 512 << 513 .macro __BUILD_silent exception 454 .macro __BUILD_silent exception 514 .endm 455 .endm 515 456 516 /* Gas tries to parse the ASM_PRINT ar !! 457 /* Gas tries to parse the PRINT argument as a string containing 517 string escapes and emits bogus warn 458 string escapes and emits bogus warnings if it believes to 518 recognize an unknown escape code. 459 recognize an unknown escape code. So make the arguments 519 start with an n and gas will believ 460 start with an n and gas will believe \n is ok ... */ 520 .macro __BUILD_verbose nexception 461 .macro __BUILD_verbose nexception 521 LONG_L a1, PT_EPC(sp) 462 LONG_L a1, PT_EPC(sp) 522 #ifdef CONFIG_32BIT 463 #ifdef CONFIG_32BIT 523 ASM_PRINT("Got \nexception at %08lx\01 !! 464 PRINT("Got \nexception at %08lx\012") 524 #endif 465 #endif 525 #ifdef CONFIG_64BIT 466 #ifdef CONFIG_64BIT 526 ASM_PRINT("Got \nexception at %016lx\0 !! 467 PRINT("Got \nexception at %016lx\012") 527 #endif 468 #endif 528 .endm 469 .endm 529 470 530 .macro __BUILD_count exception 471 .macro __BUILD_count exception 531 LONG_L t0,exception_count_\exception 472 LONG_L t0,exception_count_\exception 532 LONG_ADDIU t0, 1 473 LONG_ADDIU t0, 1 533 LONG_S t0,exception_count_\exception 474 LONG_S t0,exception_count_\exception 534 .comm exception_count\exception, 8, 475 .comm exception_count\exception, 8, 8 535 .endm 476 .endm 536 477 537 .macro __BUILD_HANDLER exception hand 478 .macro __BUILD_HANDLER exception handler clear verbose ext 538 .align 5 479 .align 5 539 NESTED(handle_\exception, PT_SIZE, sp) 480 NESTED(handle_\exception, PT_SIZE, sp) 540 .cfi_signal_frame << 541 .set noat 481 .set noat 542 SAVE_ALL 482 SAVE_ALL 543 FEXPORT(handle_\exception\ext) 483 FEXPORT(handle_\exception\ext) 544 __build_clear_\clear 484 __build_clear_\clear 545 .set at 485 .set at 546 __BUILD_\verbose \exception 486 __BUILD_\verbose \exception 547 move a0, sp 487 move a0, sp 548 jal do_\handler !! 488 PTR_LA ra, ret_from_exception 549 j ret_from_exception !! 489 j do_\handler 550 END(handle_\exception) 490 END(handle_\exception) 551 .endm 491 .endm 552 492 553 .macro BUILD_HANDLER exception handle 493 .macro BUILD_HANDLER exception handler clear verbose 554 __BUILD_HANDLER \exception \handler \c 494 __BUILD_HANDLER \exception \handler \clear \verbose _int 555 .endm 495 .endm 556 496 557 BUILD_HANDLER adel ade ade silent 497 BUILD_HANDLER adel ade ade silent /* #4 */ 558 BUILD_HANDLER ades ade ade silent 498 BUILD_HANDLER ades ade ade silent /* #5 */ 559 BUILD_HANDLER ibe be cli silent 499 BUILD_HANDLER ibe be cli silent /* #6 */ 560 BUILD_HANDLER dbe be cli silent 500 BUILD_HANDLER dbe be cli silent /* #7 */ 561 BUILD_HANDLER bp bp sti silent 501 BUILD_HANDLER bp bp sti silent /* #9 */ 562 BUILD_HANDLER ri ri sti silent 502 BUILD_HANDLER ri ri sti silent /* #10 */ 563 BUILD_HANDLER cpu cpu sti silent 503 BUILD_HANDLER cpu cpu sti silent /* #11 */ 564 BUILD_HANDLER ov ov sti silent 504 BUILD_HANDLER ov ov sti silent /* #12 */ 565 BUILD_HANDLER tr tr sti silent 505 BUILD_HANDLER tr tr sti silent /* #13 */ 566 BUILD_HANDLER msa_fpe msa_fpe msa_fpe 506 BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ 567 #ifdef CONFIG_MIPS_FP_SUPPORT << 568 BUILD_HANDLER fpe fpe fpe silent 507 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 569 #endif << 570 BUILD_HANDLER ftlb ftlb none silent 508 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 571 BUILD_HANDLER gsexc gsexc gsexc silent << 572 BUILD_HANDLER msa msa sti silent 509 BUILD_HANDLER msa msa sti silent /* #21 */ 573 BUILD_HANDLER mdmx mdmx sti silent 510 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 574 #ifdef CONFIG_HARDWARE_WATCHPOINTS 511 #ifdef CONFIG_HARDWARE_WATCHPOINTS 575 /* 512 /* 576 * For watch, interrupts will be enabl 513 * For watch, interrupts will be enabled after the watch 577 * registers are read. 514 * registers are read. 578 */ 515 */ 579 BUILD_HANDLER watch watch cli silent 516 BUILD_HANDLER watch watch cli silent /* #23 */ 580 #else 517 #else 581 BUILD_HANDLER watch watch sti verbose 518 BUILD_HANDLER watch watch sti verbose /* #23 */ 582 #endif 519 #endif 583 BUILD_HANDLER mcheck mcheck cli verbos 520 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 584 BUILD_HANDLER mt mt sti silent 521 BUILD_HANDLER mt mt sti silent /* #25 */ 585 BUILD_HANDLER dsp dsp sti silent 522 BUILD_HANDLER dsp dsp sti silent /* #26 */ 586 BUILD_HANDLER reserved reserved sti ve 523 BUILD_HANDLER reserved reserved sti verbose /* others */ 587 524 588 .align 5 525 .align 5 589 LEAF(handle_ri_rdhwr_tlbp) 526 LEAF(handle_ri_rdhwr_tlbp) 590 .set push 527 .set push 591 .set noat 528 .set noat 592 .set noreorder 529 .set noreorder 593 /* check if TLB contains a entry for E 530 /* check if TLB contains a entry for EPC */ 594 MFC0 k1, CP0_ENTRYHI 531 MFC0 k1, CP0_ENTRYHI 595 andi k1, MIPS_ENTRYHI_ASID | MIPS_E 532 andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX 596 MFC0 k0, CP0_EPC 533 MFC0 k0, CP0_EPC 597 PTR_SRL k0, _PAGE_SHIFT + 1 534 PTR_SRL k0, _PAGE_SHIFT + 1 598 PTR_SLL k0, _PAGE_SHIFT + 1 535 PTR_SLL k0, _PAGE_SHIFT + 1 599 or k1, k0 536 or k1, k0 600 MTC0 k1, CP0_ENTRYHI 537 MTC0 k1, CP0_ENTRYHI 601 mtc0_tlbw_hazard 538 mtc0_tlbw_hazard 602 tlbp 539 tlbp 603 tlb_probe_hazard 540 tlb_probe_hazard 604 mfc0 k1, CP0_INDEX 541 mfc0 k1, CP0_INDEX 605 .set pop 542 .set pop 606 bltz k1, handle_ri /* slow path * 543 bltz k1, handle_ri /* slow path */ 607 /* fall thru */ 544 /* fall thru */ 608 END(handle_ri_rdhwr_tlbp) 545 END(handle_ri_rdhwr_tlbp) 609 546 610 LEAF(handle_ri_rdhwr) 547 LEAF(handle_ri_rdhwr) 611 .set push 548 .set push 612 .set noat 549 .set noat 613 .set noreorder 550 .set noreorder 614 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 551 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 615 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 552 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ 616 MFC0 k1, CP0_EPC 553 MFC0 k1, CP0_EPC 617 #if defined(CONFIG_CPU_MICROMIPS) || defined(C 554 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) 618 and k0, k1, 1 555 and k0, k1, 1 619 beqz k0, 1f 556 beqz k0, 1f 620 xor k1, k0 557 xor k1, k0 621 lhu k0, (k1) 558 lhu k0, (k1) 622 lhu k1, 2(k1) 559 lhu k1, 2(k1) 623 ins k1, k0, 16, 16 560 ins k1, k0, 16, 16 624 lui k0, 0x007d 561 lui k0, 0x007d 625 b docheck 562 b docheck 626 ori k0, 0x6b3c 563 ori k0, 0x6b3c 627 1: 564 1: 628 lui k0, 0x7c03 565 lui k0, 0x7c03 629 lw k1, (k1) 566 lw k1, (k1) 630 ori k0, 0xe83b 567 ori k0, 0xe83b 631 #else 568 #else 632 andi k0, k1, 1 569 andi k0, k1, 1 633 bnez k0, handle_ri 570 bnez k0, handle_ri 634 lui k0, 0x7c03 571 lui k0, 0x7c03 635 lw k1, (k1) 572 lw k1, (k1) 636 ori k0, 0xe83b 573 ori k0, 0xe83b 637 #endif 574 #endif 638 .set reorder 575 .set reorder 639 docheck: 576 docheck: 640 bne k0, k1, handle_ri /* if 577 bne k0, k1, handle_ri /* if not ours */ 641 578 642 isrdhwr: 579 isrdhwr: 643 /* The insn is rdhwr. No need to chec 580 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 644 get_saved_sp /* k1 := current_threa 581 get_saved_sp /* k1 := current_thread_info */ 645 .set noreorder 582 .set noreorder 646 MFC0 k0, CP0_EPC 583 MFC0 k0, CP0_EPC 647 #if defined(CONFIG_CPU_R3000) !! 584 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 648 ori k1, _THREAD_MASK 585 ori k1, _THREAD_MASK 649 xori k1, _THREAD_MASK 586 xori k1, _THREAD_MASK 650 LONG_L v1, TI_TP_VALUE(k1) 587 LONG_L v1, TI_TP_VALUE(k1) 651 LONG_ADDIU k0, 4 588 LONG_ADDIU k0, 4 652 jr k0 589 jr k0 653 rfe 590 rfe 654 #else 591 #else 655 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 592 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS 656 LONG_ADDIU k0, 4 /* sta 593 LONG_ADDIU k0, 4 /* stall on $k0 */ 657 #else 594 #else 658 .set at=v1 595 .set at=v1 659 LONG_ADDIU k0, 4 596 LONG_ADDIU k0, 4 660 .set noat 597 .set noat 661 #endif 598 #endif 662 MTC0 k0, CP0_EPC 599 MTC0 k0, CP0_EPC 663 /* I hope three instructions between M 600 /* I hope three instructions between MTC0 and ERET are enough... */ 664 ori k1, _THREAD_MASK 601 ori k1, _THREAD_MASK 665 xori k1, _THREAD_MASK 602 xori k1, _THREAD_MASK 666 LONG_L v1, TI_TP_VALUE(k1) 603 LONG_L v1, TI_TP_VALUE(k1) 667 .set push << 668 .set arch=r4000 604 .set arch=r4000 669 eret 605 eret 670 .set pop !! 606 .set mips0 671 #endif 607 #endif 672 .set pop 608 .set pop 673 END(handle_ri_rdhwr) 609 END(handle_ri_rdhwr) 674 610 675 #ifdef CONFIG_CPU_R4X00_BUGS64 !! 611 #ifdef CONFIG_64BIT 676 /* A temporary overflow handler used by check_ 612 /* A temporary overflow handler used by check_daddi(). */ 677 613 678 __INIT 614 __INIT 679 615 680 BUILD_HANDLER daddi_ov daddi_ov none 616 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 681 #endif 617 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.