1 /* 1 /* 2 * This file is subject to the terms and condi 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 4 * for more details. 5 * 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2002, 2007 Maciej W. Rozycki !! 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 * Copyright (C) 2001, 2012 MIPS Technologies, !! 9 * Copyright (C) 2002 Maciej W. Rozycki 10 */ 10 */ >> 11 #include <linux/config.h> 11 #include <linux/init.h> 12 #include <linux/init.h> 12 13 13 #include <asm/asm.h> 14 #include <asm/asm.h> 14 #include <asm/asmmacro.h> << 15 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 16 #include <asm/irqflags.h> << 17 #include <asm/regdef.h> 16 #include <asm/regdef.h> 18 #include <asm/fpregdef.h> 17 #include <asm/fpregdef.h> 19 #include <asm/mipsregs.h> 18 #include <asm/mipsregs.h> 20 #include <asm/stackframe.h> 19 #include <asm/stackframe.h> 21 #include <asm/sync.h> << 22 #include <asm/thread_info.h> << 23 20 24 __INIT 21 __INIT 25 22 >> 23 NESTED(except_vec0_generic, 0, sp) >> 24 PANIC("Exception vector 0 called") >> 25 END(except_vec0_generic) >> 26 >> 27 NESTED(except_vec1_generic, 0, sp) >> 28 PANIC("Exception vector 1 called") >> 29 END(except_vec1_generic) >> 30 26 /* 31 /* 27 * General exception vector for all other CPUs 32 * General exception vector for all other CPUs. 28 * 33 * 29 * Be careful when changing this, it has to be 34 * Be careful when changing this, it has to be at most 128 bytes 30 * to fit into space reserved for the exceptio 35 * to fit into space reserved for the exception handler. 31 */ 36 */ 32 NESTED(except_vec3_generic, 0, sp) 37 NESTED(except_vec3_generic, 0, sp) 33 .set push 38 .set push 34 .set noat 39 .set noat >> 40 #if R5432_CP0_INTERRUPT_WAR >> 41 mfc0 k0, CP0_INDEX >> 42 #endif 35 mfc0 k1, CP0_CAUSE 43 mfc0 k1, CP0_CAUSE 36 andi k1, k1, 0x7c 44 andi k1, k1, 0x7c 37 #ifdef CONFIG_64BIT !! 45 #ifdef CONFIG_MIPS64 38 dsll k1, k1, 1 46 dsll k1, k1, 1 39 #endif 47 #endif 40 PTR_L k0, exception_handlers(k1) 48 PTR_L k0, exception_handlers(k1) 41 jr k0 49 jr k0 42 .set pop 50 .set pop 43 END(except_vec3_generic) 51 END(except_vec3_generic) 44 52 45 /* 53 /* 46 * General exception handler for CPUs with vir 54 * General exception handler for CPUs with virtual coherency exception. 47 * 55 * 48 * Be careful when changing this, it has to be 56 * Be careful when changing this, it has to be at most 256 (as a special 49 * exception) bytes to fit into space reserved 57 * exception) bytes to fit into space reserved for the exception handler. 50 */ 58 */ 51 NESTED(except_vec3_r4000, 0, sp) 59 NESTED(except_vec3_r4000, 0, sp) 52 .set push 60 .set push 53 .set arch=r4000 !! 61 .set mips3 54 .set noat 62 .set noat 55 mfc0 k1, CP0_CAUSE 63 mfc0 k1, CP0_CAUSE 56 li k0, 31<<2 64 li k0, 31<<2 57 andi k1, k1, 0x7c 65 andi k1, k1, 0x7c 58 .set push 66 .set push 59 .set noreorder 67 .set noreorder 60 .set nomacro 68 .set nomacro 61 beq k1, k0, handle_vced 69 beq k1, k0, handle_vced 62 li k0, 14<<2 70 li k0, 14<<2 63 beq k1, k0, handle_vcei 71 beq k1, k0, handle_vcei 64 #ifdef CONFIG_64BIT !! 72 #ifdef CONFIG_MIPS64 65 dsll k1, k1, 1 !! 73 dsll k1, k1, 1 66 #endif 74 #endif 67 .set pop 75 .set pop 68 PTR_L k0, exception_handlers(k1) 76 PTR_L k0, exception_handlers(k1) 69 jr k0 77 jr k0 70 78 71 /* 79 /* 72 * Big shit, we now may have two dirty 80 * Big shit, we now may have two dirty primary cache lines for the same 73 * physical address. We can safely in !! 81 * physical address. We can savely invalidate the line pointed to by 74 * c0_badvaddr because after return fr 82 * c0_badvaddr because after return from this exception handler the 75 * load / store will be re-executed. 83 * load / store will be re-executed. 76 */ 84 */ 77 handle_vced: 85 handle_vced: 78 MFC0 k0, CP0_BADVADDR !! 86 DMFC0 k0, CP0_BADVADDR 79 li k1, -4 87 li k1, -4 # Is this ... 80 and k0, k1 88 and k0, k1 # ... really needed? 81 mtc0 zero, CP0_TAGLO 89 mtc0 zero, CP0_TAGLO 82 cache Index_Store_Tag_D, (k0) !! 90 cache Index_Store_Tag_D,(k0) 83 cache Hit_Writeback_Inv_SD, (k0) !! 91 cache Hit_Writeback_Inv_SD,(k0) 84 #ifdef CONFIG_PROC_FS 92 #ifdef CONFIG_PROC_FS 85 PTR_LA k0, vced_count 93 PTR_LA k0, vced_count 86 lw k1, (k0) 94 lw k1, (k0) 87 addiu k1, 1 95 addiu k1, 1 88 sw k1, (k0) 96 sw k1, (k0) 89 #endif 97 #endif 90 eret 98 eret 91 99 92 handle_vcei: 100 handle_vcei: 93 MFC0 k0, CP0_BADVADDR 101 MFC0 k0, CP0_BADVADDR 94 cache Hit_Writeback_Inv_SD, (k0) 102 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 95 #ifdef CONFIG_PROC_FS 103 #ifdef CONFIG_PROC_FS 96 PTR_LA k0, vcei_count 104 PTR_LA k0, vcei_count 97 lw k1, (k0) 105 lw k1, (k0) 98 addiu k1, 1 106 addiu k1, 1 99 sw k1, (k0) 107 sw k1, (k0) 100 #endif 108 #endif 101 eret 109 eret 102 .set pop 110 .set pop 103 END(except_vec3_r4000) 111 END(except_vec3_r4000) 104 112 105 __FINIT << 106 << 107 .align 5 /* 32 byte rollback re << 108 LEAF(__r4k_wait) << 109 .set push << 110 .set noreorder << 111 /* start of rollback region */ << 112 LONG_L t0, TI_FLAGS($28) << 113 nop << 114 andi t0, _TIF_NEED_RESCHED << 115 bnez t0, 1f << 116 nop << 117 nop << 118 nop << 119 #ifdef CONFIG_CPU_MICROMIPS << 120 nop << 121 nop << 122 nop << 123 nop << 124 #endif << 125 .set MIPS_ISA_ARCH_LEVEL_RAW << 126 wait << 127 /* end of rollback region (the region << 128 1: << 129 jr ra << 130 nop << 131 .set pop << 132 END(__r4k_wait) << 133 << 134 .macro BUILD_ROLLBACK_PROLOGUE handle << 135 FEXPORT(rollback_\handler) << 136 .set push << 137 .set noat << 138 MFC0 k0, CP0_EPC << 139 PTR_LA k1, __r4k_wait << 140 ori k0, 0x1f /* 32 byte rol << 141 xori k0, 0x1f << 142 bne k0, k1, \handler << 143 MTC0 k0, CP0_EPC << 144 .set pop << 145 .endm << 146 << 147 .align 5 << 148 BUILD_ROLLBACK_PROLOGUE handle_int << 149 NESTED(handle_int, PT_SIZE, sp) << 150 .cfi_signal_frame << 151 #ifdef CONFIG_TRACE_IRQFLAGS << 152 /* << 153 * Check to see if the interrupted cod << 154 * interrupts and ignore this interrup << 155 * << 156 * local_irq_disable() disables interr << 157 * trace_hardirqs_off() to track the s << 158 * after interrupts are disabled but b << 159 * it will appear to restore_all that << 160 * interrupts disabled << 161 */ << 162 .set push << 163 .set noat << 164 mfc0 k0, CP0_STATUS << 165 #if defined(CONFIG_CPU_R3000) << 166 and k0, ST0_IEP << 167 bnez k0, 1f << 168 << 169 mfc0 k0, CP0_EPC << 170 .set noreorder << 171 j k0 << 172 rfe << 173 #else << 174 and k0, ST0_IE << 175 bnez k0, 1f << 176 << 177 eret << 178 #endif << 179 1: << 180 .set pop << 181 #endif << 182 SAVE_ALL docfi=1 << 183 CLI << 184 TRACE_IRQS_OFF << 185 << 186 LONG_L s0, TI_REGS($28) << 187 LONG_S sp, TI_REGS($28) << 188 << 189 /* << 190 * SAVE_ALL ensures we are using a val << 191 * Check if we are already using the I << 192 */ << 193 move s1, sp # Preserve the sp << 194 << 195 /* Get IRQ stack for this CPU */ << 196 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG << 197 #if defined(CONFIG_32BIT) || defined(KBUILD_64 << 198 lui k1, %hi(irq_stack) << 199 #else << 200 lui k1, %highest(irq_stack) << 201 daddiu k1, %higher(irq_stack) << 202 dsll k1, 16 << 203 daddiu k1, %hi(irq_stack) << 204 dsll k1, 16 << 205 #endif << 206 LONG_SRL k0, SMP_CPUID_PTRSHIFT << 207 LONG_ADDU k1, k0 << 208 LONG_L t0, %lo(irq_stack)(k1) << 209 << 210 # Check if already on IRQ stack << 211 PTR_LI t1, ~(_THREAD_SIZE-1) << 212 and t1, t1, sp << 213 beq t0, t1, 2f << 214 << 215 /* Switch to IRQ stack */ << 216 li t1, _IRQ_STACK_START << 217 PTR_ADD sp, t0, t1 << 218 << 219 /* Save task's sp on IRQ stack so that << 220 LONG_S s1, 0(sp) << 221 2: << 222 jal plat_irq_dispatch << 223 << 224 /* Restore sp */ << 225 move sp, s1 << 226 << 227 j ret_from_irq << 228 #ifdef CONFIG_CPU_MICROMIPS << 229 nop << 230 #endif << 231 END(handle_int) << 232 << 233 __INIT << 234 << 235 /* 113 /* 236 * Special interrupt vector for MIPS64 ISA & e 114 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 237 * This is a dedicated interrupt exception vec 115 * This is a dedicated interrupt exception vector which reduces the 238 * interrupt processing overhead. The jump in 116 * interrupt processing overhead. The jump instruction will be replaced 239 * at the initialization time. 117 * at the initialization time. 240 * 118 * 241 * Be careful when changing this, it has to be 119 * Be careful when changing this, it has to be at most 128 bytes 242 * to fit into space reserved for the exceptio 120 * to fit into space reserved for the exception handler. 243 */ 121 */ 244 NESTED(except_vec4, 0, sp) 122 NESTED(except_vec4, 0, sp) 245 1: j 1b /* Dum 123 1: j 1b /* Dummy, will be replaced */ 246 END(except_vec4) 124 END(except_vec4) 247 125 248 /* !! 126 /* 249 * EJTAG debug exception handler. !! 127 * EJTAG debug exception handler. 250 * The EJTAG debug exception entry point is 0x !! 128 * The EJTAG debug exception entry point is 0xbfc00480, which 251 * normally is in the boot PROM, so the boot P !! 129 * normally is in the boot PROM, so the boot PROM must do a 252 * unconditional jump to this vector. !! 130 * unconditional jump to this vector. 253 */ !! 131 */ 254 NESTED(except_vec_ejtag_debug, 0, sp) 132 NESTED(except_vec_ejtag_debug, 0, sp) 255 j ejtag_debug_handler 133 j ejtag_debug_handler 256 #ifdef CONFIG_CPU_MICROMIPS << 257 nop 134 nop 258 #endif << 259 END(except_vec_ejtag_debug) 135 END(except_vec_ejtag_debug) 260 136 261 __FINIT 137 __FINIT 262 138 263 /* << 264 * Vectored interrupt handler. << 265 * This prototype is copied to ebase + n*IntCt << 266 * to invoke the handler << 267 */ << 268 BUILD_ROLLBACK_PROLOGUE except_vec_vi << 269 NESTED(except_vec_vi, 0, sp) << 270 SAVE_SOME docfi=1 << 271 SAVE_AT docfi=1 << 272 .set push << 273 .set noreorder << 274 PTR_LA v1, except_vec_vi_handler << 275 jr v1 << 276 FEXPORT(except_vec_vi_ori) << 277 ori v0, zero, 0 /* Off << 278 .set pop << 279 END(except_vec_vi) << 280 EXPORT(except_vec_vi_end) << 281 << 282 /* << 283 * Common Vectored Interrupt code << 284 * Complete the register saves and invoke the << 285 * offset into vi_handlers[] << 286 */ << 287 NESTED(except_vec_vi_handler, 0, sp) << 288 SAVE_TEMP << 289 SAVE_STATIC << 290 CLI << 291 #ifdef CONFIG_TRACE_IRQFLAGS << 292 move s0, v0 << 293 TRACE_IRQS_OFF << 294 move v0, s0 << 295 #endif << 296 << 297 LONG_L s0, TI_REGS($28) << 298 LONG_S sp, TI_REGS($28) << 299 << 300 /* 139 /* 301 * SAVE_ALL ensures we are using a val !! 140 * EJTAG debug exception handler. 302 * Check if we are already using the I << 303 */ 141 */ 304 move s1, sp # Preserve the sp !! 142 NESTED(ejtag_debug_handler, PT_SIZE, sp) 305 << 306 /* Get IRQ stack for this CPU */ << 307 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG << 308 #if defined(CONFIG_32BIT) || defined(KBUILD_64 << 309 lui k1, %hi(irq_stack) << 310 #else << 311 lui k1, %highest(irq_stack) << 312 daddiu k1, %higher(irq_stack) << 313 dsll k1, 16 << 314 daddiu k1, %hi(irq_stack) << 315 dsll k1, 16 << 316 #endif << 317 LONG_SRL k0, SMP_CPUID_PTRSHIFT << 318 LONG_ADDU k1, k0 << 319 LONG_L t0, %lo(irq_stack)(k1) << 320 << 321 # Check if already on IRQ stack << 322 PTR_LI t1, ~(_THREAD_SIZE-1) << 323 and t1, t1, sp << 324 beq t0, t1, 2f << 325 << 326 /* Switch to IRQ stack */ << 327 li t1, _IRQ_STACK_START << 328 PTR_ADD sp, t0, t1 << 329 << 330 /* Save task's sp on IRQ stack so that << 331 LONG_S s1, 0(sp) << 332 2: << 333 PTR_L v0, vi_handlers(v0) << 334 jalr v0 << 335 << 336 /* Restore sp */ << 337 move sp, s1 << 338 << 339 j ret_from_irq << 340 END(except_vec_vi_handler) << 341 << 342 /* << 343 * EJTAG debug exception handler. << 344 */ << 345 NESTED(ejtag_debug_handler, PT_SIZE, sp) << 346 .set push << 347 .set noat 143 .set noat 348 MTC0 k0, CP0_DESAVE !! 144 .set noreorder >> 145 mtc0 k0, CP0_DESAVE 349 mfc0 k0, CP0_DEBUG 146 mfc0 k0, CP0_DEBUG 350 147 351 andi k0, k0, MIPS_DEBUG_DBP # Chec !! 148 sll k0, k0, 30 # Check for SDBBP. 352 beqz k0, ejtag_return !! 149 bgez k0, ejtag_return 353 << 354 #ifdef CONFIG_SMP << 355 1: PTR_LA k0, ejtag_debug_buffer_spinloc << 356 __SYNC(full, loongson3_war) << 357 2: ll k0, 0(k0) << 358 bnez k0, 2b << 359 PTR_LA k0, ejtag_debug_buffer_spinloc << 360 sc k0, 0(k0) << 361 beqz k0, 1b << 362 # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC << 363 sync << 364 # endif << 365 << 366 PTR_LA k0, ejtag_debug_buffer << 367 LONG_S k1, 0(k0) << 368 << 369 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG << 370 PTR_SRL k1, SMP_CPUID_PTRSHIFT << 371 PTR_SLL k1, LONGLOG << 372 PTR_LA k0, ejtag_debug_buffer_per_cpu << 373 PTR_ADDU k0, k1 << 374 << 375 PTR_LA k1, ejtag_debug_buffer << 376 LONG_L k1, 0(k1) << 377 LONG_S k1, 0(k0) << 378 << 379 PTR_LA k0, ejtag_debug_buffer_spinloc << 380 sw zero, 0(k0) << 381 #else << 382 PTR_LA k0, ejtag_debug_buffer << 383 LONG_S k1, 0(k0) << 384 #endif << 385 150 >> 151 la k0, ejtag_debug_buffer >> 152 sw k1, 0(k0) 386 SAVE_ALL 153 SAVE_ALL 387 move a0, sp << 388 jal ejtag_exception_handler 154 jal ejtag_exception_handler >> 155 move a0, sp 389 RESTORE_ALL 156 RESTORE_ALL 390 !! 157 la k0, ejtag_debug_buffer 391 #ifdef CONFIG_SMP !! 158 lw k1, 0(k0) 392 ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG << 393 PTR_SRL k1, SMP_CPUID_PTRSHIFT << 394 PTR_SLL k1, LONGLOG << 395 PTR_LA k0, ejtag_debug_buffer_per_cpu << 396 PTR_ADDU k0, k1 << 397 LONG_L k1, 0(k0) << 398 #else << 399 PTR_LA k0, ejtag_debug_buffer << 400 LONG_L k1, 0(k0) << 401 #endif << 402 159 403 ejtag_return: 160 ejtag_return: 404 back_to_back_c0_hazard !! 161 mfc0 k0, CP0_DESAVE 405 MFC0 k0, CP0_DESAVE << 406 .set mips32 162 .set mips32 407 deret 163 deret 408 .set pop !! 164 .set mips0 >> 165 nop >> 166 .set at 409 END(ejtag_debug_handler) 167 END(ejtag_debug_handler) 410 168 411 /* !! 169 /* 412 * This buffer is reserved for the use of the !! 170 * This buffer is reserved for the use of the EJTAG debug 413 * handler. !! 171 * handler. 414 */ !! 172 */ 415 .data 173 .data 416 EXPORT(ejtag_debug_buffer) !! 174 EXPORT(ejtag_debug_buffer) 417 .fill LONGSIZE 175 .fill LONGSIZE 418 #ifdef CONFIG_SMP << 419 EXPORT(ejtag_debug_buffer_spinlock) << 420 .fill LONGSIZE << 421 EXPORT(ejtag_debug_buffer_per_cpu) << 422 .fill LONGSIZE * NR_CPUS << 423 #endif << 424 .previous 176 .previous 425 177 426 __INIT 178 __INIT 427 179 428 /* !! 180 /* 429 * NMI debug exception handler for MIPS refere !! 181 * NMI debug exception handler for MIPS reference boards. 430 * The NMI debug exception entry point is 0xbf !! 182 * The NMI debug exception entry point is 0xbfc00000, which 431 * normally is in the boot PROM, so the boot P !! 183 * normally is in the boot PROM, so the boot PROM must do a 432 * unconditional jump to this vector. !! 184 * unconditional jump to this vector. 433 */ !! 185 */ 434 NESTED(except_vec_nmi, 0, sp) !! 186 NESTED(except_vec_nmi, 0, sp) 435 j nmi_handler 187 j nmi_handler 436 #ifdef CONFIG_CPU_MICROMIPS << 437 nop 188 nop 438 #endif << 439 END(except_vec_nmi) 189 END(except_vec_nmi) 440 190 441 __FINIT 191 __FINIT 442 192 443 NESTED(nmi_handler, PT_SIZE, sp) !! 193 NESTED(nmi_handler, PT_SIZE, sp) 444 .cfi_signal_frame << 445 .set push << 446 .set noat 194 .set noat 447 /* !! 195 .set noreorder 448 * Clear ERL - restore segment mapping !! 196 .set mips3 449 * Clear BEV - required for page fault << 450 */ << 451 mfc0 k0, CP0_STATUS << 452 ori k0, k0, ST0_EXL << 453 li k1, ~(ST0_BEV | ST0_ERL) << 454 and k0, k0, k1 << 455 mtc0 k0, CP0_STATUS << 456 _ehb << 457 SAVE_ALL 197 SAVE_ALL 458 move a0, sp << 459 jal nmi_exception_handler 198 jal nmi_exception_handler 460 /* nmi_exception_handler never returns !! 199 move a0, sp 461 .set pop !! 200 RESTORE_ALL >> 201 eret >> 202 .set at >> 203 .set mips0 462 END(nmi_handler) 204 END(nmi_handler) 463 205 464 .macro __build_clear_none 206 .macro __build_clear_none 465 .endm 207 .endm 466 208 467 .macro __build_clear_sti 209 .macro __build_clear_sti 468 TRACE_IRQS_ON << 469 STI 210 STI 470 .endm 211 .endm 471 212 472 .macro __build_clear_cli 213 .macro __build_clear_cli 473 CLI 214 CLI 474 TRACE_IRQS_OFF << 475 .endm 215 .endm 476 216 477 .macro __build_clear_fpe 217 .macro __build_clear_fpe 478 CLI << 479 TRACE_IRQS_OFF << 480 .set push << 481 /* gas fails to assemble cfc1 for some << 482 .set mips1 << 483 .set hardfloat << 484 cfc1 a1, fcr31 218 cfc1 a1, fcr31 485 .set pop !! 219 li a2, ~(0x3f << 12) 486 .endm !! 220 and a2, a1 487 !! 221 ctc1 a2, fcr31 488 .macro __build_clear_msa_fpe !! 222 STI 489 CLI << 490 TRACE_IRQS_OFF << 491 _cfcmsa a1, MSA_CSR << 492 .endm 223 .endm 493 224 494 .macro __build_clear_ade 225 .macro __build_clear_ade 495 MFC0 t0, CP0_BADVADDR 226 MFC0 t0, CP0_BADVADDR 496 PTR_S t0, PT_BVADDR(sp) 227 PTR_S t0, PT_BVADDR(sp) 497 KMODE 228 KMODE 498 .endm 229 .endm 499 230 500 .macro __build_clear_gsexc << 501 .set push << 502 /* << 503 * We need to specify a selector to ac << 504 * register. All GSExc-equipped proces << 505 */ << 506 .set mips32 << 507 mfc0 a1, CP0_DIAGNOSTIC1 << 508 .set pop << 509 TRACE_IRQS_ON << 510 STI << 511 .endm << 512 << 513 .macro __BUILD_silent exception 231 .macro __BUILD_silent exception 514 .endm 232 .endm 515 233 516 /* Gas tries to parse the ASM_PRINT ar !! 234 /* Gas tries to parse the PRINT argument as a string containing 517 string escapes and emits bogus warn 235 string escapes and emits bogus warnings if it believes to 518 recognize an unknown escape code. 236 recognize an unknown escape code. So make the arguments 519 start with an n and gas will believ 237 start with an n and gas will believe \n is ok ... */ 520 .macro __BUILD_verbose nexception !! 238 .macro __BUILD_verbose nexception 521 LONG_L a1, PT_EPC(sp) !! 239 ld a1, PT_EPC(sp) 522 #ifdef CONFIG_32BIT !! 240 PRINT("Got \nexception at %016lx\012") 523 ASM_PRINT("Got \nexception at %08lx\01 << 524 #endif << 525 #ifdef CONFIG_64BIT << 526 ASM_PRINT("Got \nexception at %016lx\0 << 527 #endif << 528 .endm 241 .endm 529 242 530 .macro __BUILD_count exception 243 .macro __BUILD_count exception >> 244 .set reorder 531 LONG_L t0,exception_count_\exception 245 LONG_L t0,exception_count_\exception 532 LONG_ADDIU t0, 1 !! 246 LONG_ADDIU t0, 1 533 LONG_S t0,exception_count_\exception 247 LONG_S t0,exception_count_\exception >> 248 .set noreorder 534 .comm exception_count\exception, 8, 249 .comm exception_count\exception, 8, 8 535 .endm 250 .endm 536 251 537 .macro __BUILD_HANDLER exception hand !! 252 .macro BUILD_HANDLER exception handler clear verbose 538 .align 5 253 .align 5 539 NESTED(handle_\exception, PT_SIZE, sp) 254 NESTED(handle_\exception, PT_SIZE, sp) 540 .cfi_signal_frame << 541 .set noat 255 .set noat 542 SAVE_ALL 256 SAVE_ALL 543 FEXPORT(handle_\exception\ext) !! 257 __BUILD_clear_\clear 544 __build_clear_\clear << 545 .set at 258 .set at 546 __BUILD_\verbose \exception 259 __BUILD_\verbose \exception 547 move a0, sp 260 move a0, sp 548 jal do_\handler 261 jal do_\handler 549 j ret_from_exception 262 j ret_from_exception >> 263 nop 550 END(handle_\exception) 264 END(handle_\exception) 551 .endm 265 .endm 552 266 553 .macro BUILD_HANDLER exception handle << 554 __BUILD_HANDLER \exception \handler \c << 555 .endm << 556 << 557 BUILD_HANDLER adel ade ade silent 267 BUILD_HANDLER adel ade ade silent /* #4 */ 558 BUILD_HANDLER ades ade ade silent 268 BUILD_HANDLER ades ade ade silent /* #5 */ 559 BUILD_HANDLER ibe be cli silent 269 BUILD_HANDLER ibe be cli silent /* #6 */ 560 BUILD_HANDLER dbe be cli silent 270 BUILD_HANDLER dbe be cli silent /* #7 */ 561 BUILD_HANDLER bp bp sti silent 271 BUILD_HANDLER bp bp sti silent /* #9 */ 562 BUILD_HANDLER ri ri sti silent 272 BUILD_HANDLER ri ri sti silent /* #10 */ 563 BUILD_HANDLER cpu cpu sti silent 273 BUILD_HANDLER cpu cpu sti silent /* #11 */ 564 BUILD_HANDLER ov ov sti silent 274 BUILD_HANDLER ov ov sti silent /* #12 */ 565 BUILD_HANDLER tr tr sti silent 275 BUILD_HANDLER tr tr sti silent /* #13 */ 566 BUILD_HANDLER msa_fpe msa_fpe msa_fpe << 567 #ifdef CONFIG_MIPS_FP_SUPPORT << 568 BUILD_HANDLER fpe fpe fpe silent 276 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 569 #endif << 570 BUILD_HANDLER ftlb ftlb none silent << 571 BUILD_HANDLER gsexc gsexc gsexc silent << 572 BUILD_HANDLER msa msa sti silent << 573 BUILD_HANDLER mdmx mdmx sti silent 277 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 574 #ifdef CONFIG_HARDWARE_WATCHPOINTS << 575 /* << 576 * For watch, interrupts will be enabl << 577 * registers are read. << 578 */ << 579 BUILD_HANDLER watch watch cli silent << 580 #else << 581 BUILD_HANDLER watch watch sti verbose 278 BUILD_HANDLER watch watch sti verbose /* #23 */ 582 #endif << 583 BUILD_HANDLER mcheck mcheck cli verbos 279 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 584 BUILD_HANDLER mt mt sti silent << 585 BUILD_HANDLER dsp dsp sti silent << 586 BUILD_HANDLER reserved reserved sti ve 280 BUILD_HANDLER reserved reserved sti verbose /* others */ 587 281 588 .align 5 !! 282 #ifdef CONFIG_MIPS64 589 LEAF(handle_ri_rdhwr_tlbp) << 590 .set push << 591 .set noat << 592 .set noreorder << 593 /* check if TLB contains a entry for E << 594 MFC0 k1, CP0_ENTRYHI << 595 andi k1, MIPS_ENTRYHI_ASID | MIPS_E << 596 MFC0 k0, CP0_EPC << 597 PTR_SRL k0, _PAGE_SHIFT + 1 << 598 PTR_SLL k0, _PAGE_SHIFT + 1 << 599 or k1, k0 << 600 MTC0 k1, CP0_ENTRYHI << 601 mtc0_tlbw_hazard << 602 tlbp << 603 tlb_probe_hazard << 604 mfc0 k1, CP0_INDEX << 605 .set pop << 606 bltz k1, handle_ri /* slow path * << 607 /* fall thru */ << 608 END(handle_ri_rdhwr_tlbp) << 609 << 610 LEAF(handle_ri_rdhwr) << 611 .set push << 612 .set noat << 613 .set noreorder << 614 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 << 615 /* microMIPS: 0x007d6b3c: rdhwr v1,$29 << 616 MFC0 k1, CP0_EPC << 617 #if defined(CONFIG_CPU_MICROMIPS) || defined(C << 618 and k0, k1, 1 << 619 beqz k0, 1f << 620 xor k1, k0 << 621 lhu k0, (k1) << 622 lhu k1, 2(k1) << 623 ins k1, k0, 16, 16 << 624 lui k0, 0x007d << 625 b docheck << 626 ori k0, 0x6b3c << 627 1: << 628 lui k0, 0x7c03 << 629 lw k1, (k1) << 630 ori k0, 0xe83b << 631 #else << 632 andi k0, k1, 1 << 633 bnez k0, handle_ri << 634 lui k0, 0x7c03 << 635 lw k1, (k1) << 636 ori k0, 0xe83b << 637 #endif << 638 .set reorder << 639 docheck: << 640 bne k0, k1, handle_ri /* if << 641 << 642 isrdhwr: << 643 /* The insn is rdhwr. No need to chec << 644 get_saved_sp /* k1 := current_threa << 645 .set noreorder << 646 MFC0 k0, CP0_EPC << 647 #if defined(CONFIG_CPU_R3000) << 648 ori k1, _THREAD_MASK << 649 xori k1, _THREAD_MASK << 650 LONG_L v1, TI_TP_VALUE(k1) << 651 LONG_ADDIU k0, 4 << 652 jr k0 << 653 rfe << 654 #else << 655 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS << 656 LONG_ADDIU k0, 4 /* sta << 657 #else << 658 .set at=v1 << 659 LONG_ADDIU k0, 4 << 660 .set noat << 661 #endif << 662 MTC0 k0, CP0_EPC << 663 /* I hope three instructions between M << 664 ori k1, _THREAD_MASK << 665 xori k1, _THREAD_MASK << 666 LONG_L v1, TI_TP_VALUE(k1) << 667 .set push << 668 .set arch=r4000 << 669 eret << 670 .set pop << 671 #endif << 672 .set pop << 673 END(handle_ri_rdhwr) << 674 << 675 #ifdef CONFIG_CPU_R4X00_BUGS64 << 676 /* A temporary overflow handler used by check_ 283 /* A temporary overflow handler used by check_daddi(). */ 677 284 678 __INIT 285 __INIT 679 286 680 BUILD_HANDLER daddi_ov daddi_ov none 287 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 681 #endif 288 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.