1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle 7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. 8 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 2007 Maciej W. Rozycki 10 */ 11 #ifndef _ASM_STACKFRAME_H 12 #define _ASM_STACKFRAME_H 13 14 #include <linux/threads.h> 15 16 #include <asm/asm.h> 17 #include <asm/asmmacro.h> 18 #include <asm/mipsregs.h> 19 #include <asm/asm-offsets.h> 20 #include <asm/thread_info.h> 21 22 /* Make the addition of cfi info a little easier. */ 23 .macro cfi_rel_offset reg offset=0 docfi=0 24 .if \docfi 25 .cfi_rel_offset \reg, \offset 26 .endif 27 .endm 28 29 .macro cfi_st reg offset=0 docfi=0 30 LONG_S \reg, \offset(sp) 31 cfi_rel_offset \reg, \offset, \docfi 32 .endm 33 34 .macro cfi_restore reg offset=0 docfi=0 35 .if \docfi 36 .cfi_restore \reg 37 .endif 38 .endm 39 40 .macro cfi_ld reg offset=0 docfi=0 41 LONG_L \reg, \offset(sp) 42 cfi_restore \reg \offset \docfi 43 .endm 44 45 #if defined(CONFIG_CPU_R3000) 46 #define STATMASK 0x3f 47 #else 48 #define STATMASK 0x1f 49 #endif 50 51 .macro SAVE_AT docfi=0 52 .set push 53 .set noat 54 cfi_st $1, PT_R1, \docfi 55 .set pop 56 .endm 57 58 .macro SAVE_TEMP docfi=0 59 #ifdef CONFIG_CPU_HAS_SMARTMIPS 60 mflhxu v1 61 LONG_S v1, PT_LO(sp) 62 mflhxu v1 63 LONG_S v1, PT_HI(sp) 64 mflhxu v1 65 LONG_S v1, PT_ACX(sp) 66 #elif !defined(CONFIG_CPU_MIPSR6) 67 mfhi v1 68 #endif 69 #ifdef CONFIG_32BIT 70 cfi_st $8, PT_R8, \docfi 71 cfi_st $9, PT_R9, \docfi 72 #endif 73 cfi_st $10, PT_R10, \docfi 74 cfi_st $11, PT_R11, \docfi 75 cfi_st $12, PT_R12, \docfi 76 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 77 LONG_S v1, PT_HI(sp) 78 mflo v1 79 #endif 80 cfi_st $13, PT_R13, \docfi 81 cfi_st $14, PT_R14, \docfi 82 cfi_st $15, PT_R15, \docfi 83 cfi_st $24, PT_R24, \docfi 84 #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 85 LONG_S v1, PT_LO(sp) 86 #endif 87 #ifdef CONFIG_CPU_CAVIUM_OCTEON 88 /* 89 * The Octeon multiplier state is affected by general 90 * multiply instructions. It must be saved before and 91 * kernel code might corrupt it 92 */ 93 jal octeon_mult_save 94 #endif 95 .endm 96 97 .macro SAVE_STATIC docfi=0 98 cfi_st $16, PT_R16, \docfi 99 cfi_st $17, PT_R17, \docfi 100 cfi_st $18, PT_R18, \docfi 101 cfi_st $19, PT_R19, \docfi 102 cfi_st $20, PT_R20, \docfi 103 cfi_st $21, PT_R21, \docfi 104 cfi_st $22, PT_R22, \docfi 105 cfi_st $23, PT_R23, \docfi 106 cfi_st $30, PT_R30, \docfi 107 .endm 108 109 /* 110 * get_saved_sp returns the SP for the current CPU by looking in the 111 * kernelsp array for it. If tosp is set, it stores the current sp in 112 * k0 and loads the new value in sp. If not, it clobbers k0 and 113 * stores the new value in k1, leaving sp unaffected. 114 */ 115 #ifdef CONFIG_SMP 116 117 /* SMP variation */ 118 .macro get_saved_sp docfi=0 tosp=0 119 ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG 120 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 121 lui k1, %hi(kernelsp) 122 #else 123 lui k1, %highest(kernelsp) 124 daddiu k1, %higher(kernelsp) 125 dsll k1, 16 126 daddiu k1, %hi(kernelsp) 127 dsll k1, 16 128 #endif 129 LONG_SRL k0, SMP_CPUID_PTRSHIFT 130 LONG_ADDU k1, k0 131 .if \tosp 132 move k0, sp 133 .if \docfi 134 .cfi_register sp, k0 135 .endif 136 LONG_L sp, %lo(kernelsp)(k1) 137 .else 138 LONG_L k1, %lo(kernelsp)(k1) 139 .endif 140 .endm 141 142 .macro set_saved_sp stackp temp temp2 143 ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG 144 LONG_SRL \temp, SMP_CPUID_PTRSHIFT 145 LONG_S \stackp, kernelsp(\temp) 146 .endm 147 #else /* !CONFIG_SMP */ 148 /* Uniprocessor variation */ 149 .macro get_saved_sp docfi=0 tosp=0 150 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS 151 /* 152 * Clear BTB (branch target buffer), forbid RAS (return address 153 * stack) to workaround the Out-of-order Issue in Loongson2F 154 * via its diagnostic register. 155 */ 156 move k0, ra 157 jal 1f 158 nop 159 1: jal 1f 160 nop 161 1: jal 1f 162 nop 163 1: jal 1f 164 nop 165 1: move ra, k0 166 li k0, 3 167 mtc0 k0, $22 168 #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ 169 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) 170 lui k1, %hi(kernelsp) 171 #else 172 lui k1, %highest(kernelsp) 173 daddiu k1, %higher(kernelsp) 174 dsll k1, k1, 16 175 daddiu k1, %hi(kernelsp) 176 dsll k1, k1, 16 177 #endif 178 .if \tosp 179 move k0, sp 180 .if \docfi 181 .cfi_register sp, k0 182 .endif 183 LONG_L sp, %lo(kernelsp)(k1) 184 .else 185 LONG_L k1, %lo(kernelsp)(k1) 186 .endif 187 .endm 188 189 .macro set_saved_sp stackp temp temp2 190 LONG_S \stackp, kernelsp 191 .endm 192 #endif 193 194 .macro SAVE_SOME docfi=0 195 .set push 196 .set noat 197 .set reorder 198 mfc0 k0, CP0_STATUS 199 sll k0, 3 /* extract cu0 bit */ 200 .set noreorder 201 bltz k0, 8f 202 move k0, sp 203 .if \docfi 204 .cfi_register sp, k0 205 .endif 206 #ifdef CONFIG_EVA 207 /* 208 * Flush interAptiv's Return Prediction Stack (RPS) by writing 209 * EntryHi. Toggling Config7.RPS is slower and less portable. 210 * 211 * The RPS isn't automatically flushed when exceptions are 212 * taken, which can result in kernel mode speculative accesses 213 * to user addresses if the RPS mispredicts. That's harmless 214 * when user and kernel share the same address space, but with 215 * EVA the same user segments may be unmapped to kernel mode, 216 * even containing sensitive MMIO regions or invalid memory. 217 * 218 * This can happen when the kernel sets the return address to 219 * ret_from_* and jr's to the exception handler, which looks 220 * more like a tail call than a function call. If nested calls 221 * don't evict the last user address in the RPS, it will 222 * mispredict the return and fetch from a user controlled 223 * address into the icache. 224 * 225 * More recent EVA-capable cores with MAAR to restrict 226 * speculative accesses aren't affected. 227 */ 228 MFC0 k0, CP0_ENTRYHI 229 MTC0 k0, CP0_ENTRYHI 230 #endif 231 .set reorder 232 /* Called from user mode, new stack. */ 233 get_saved_sp docfi=\docfi tosp=1 234 8: 235 #ifdef CONFIG_CPU_DADDI_WORKAROUNDS 236 .set at=k1 237 #endif 238 PTR_SUBU sp, PT_SIZE 239 #ifdef CONFIG_CPU_DADDI_WORKAROUNDS 240 .set noat 241 #endif 242 .if \docfi 243 .cfi_def_cfa sp,0 244 .endif 245 cfi_st k0, PT_R29, \docfi 246 cfi_rel_offset sp, PT_R29, \docfi 247 cfi_st v1, PT_R3, \docfi 248 /* 249 * You might think that you don't need to save $0, 250 * but the FPU emulator and gdb remote debug stub 251 * need it to operate correctly 252 */ 253 LONG_S $0, PT_R0(sp) 254 mfc0 v1, CP0_STATUS 255 cfi_st v0, PT_R2, \docfi 256 LONG_S v1, PT_STATUS(sp) 257 cfi_st $4, PT_R4, \docfi 258 mfc0 v1, CP0_CAUSE 259 cfi_st $5, PT_R5, \docfi 260 LONG_S v1, PT_CAUSE(sp) 261 cfi_st $6, PT_R6, \docfi 262 cfi_st ra, PT_R31, \docfi 263 MFC0 ra, CP0_EPC 264 cfi_st $7, PT_R7, \docfi 265 #ifdef CONFIG_64BIT 266 cfi_st $8, PT_R8, \docfi 267 cfi_st $9, PT_R9, \docfi 268 #endif 269 LONG_S ra, PT_EPC(sp) 270 .if \docfi 271 .cfi_rel_offset ra, PT_EPC 272 .endif 273 cfi_st $25, PT_R25, \docfi 274 cfi_st $28, PT_R28, \docfi 275 276 /* Set thread_info if we're coming from user mode */ 277 mfc0 k0, CP0_STATUS 278 sll k0, 3 /* extract cu0 bit */ 279 bltz k0, 9f 280 281 ori $28, sp, _THREAD_MASK 282 xori $28, _THREAD_MASK 283 #ifdef CONFIG_CPU_CAVIUM_OCTEON 284 .set mips64 285 pref 0, 0($28) /* Prefetch the current pointer */ 286 #endif 287 9: 288 .set pop 289 .endm 290 291 .macro SAVE_ALL docfi=0 292 SAVE_SOME \docfi 293 SAVE_AT \docfi 294 SAVE_TEMP \docfi 295 SAVE_STATIC \docfi 296 .endm 297 298 .macro RESTORE_AT docfi=0 299 .set push 300 .set noat 301 cfi_ld $1, PT_R1, \docfi 302 .set pop 303 .endm 304 305 .macro RESTORE_TEMP docfi=0 306 #ifdef CONFIG_CPU_CAVIUM_OCTEON 307 /* Restore the Octeon multiplier state */ 308 jal octeon_mult_restore 309 #endif 310 #ifdef CONFIG_CPU_HAS_SMARTMIPS 311 LONG_L $14, PT_ACX(sp) 312 LONG_L $24, PT_LO(sp) 313 LONG_L $15, PT_HI(sp) 314 #elif !defined(CONFIG_CPU_MIPSR6) 315 LONG_L $24, PT_LO(sp) 316 LONG_L $15, PT_HI(sp) 317 #endif 318 #ifdef CONFIG_32BIT 319 cfi_ld $8, PT_R8, \docfi 320 cfi_ld $9, PT_R9, \docfi 321 #endif 322 cfi_ld $10, PT_R10, \docfi 323 cfi_ld $11, PT_R11, \docfi 324 cfi_ld $12, PT_R12, \docfi 325 #ifdef CONFIG_CPU_HAS_SMARTMIPS 326 mtlhx $14 327 mtlhx $15 328 mtlhx $24 329 #elif !defined(CONFIG_CPU_MIPSR6) 330 mtlo $24 331 mthi $15 332 #endif 333 cfi_ld $13, PT_R13, \docfi 334 cfi_ld $14, PT_R14, \docfi 335 cfi_ld $15, PT_R15, \docfi 336 cfi_ld $24, PT_R24, \docfi 337 .endm 338 339 .macro RESTORE_STATIC docfi=0 340 cfi_ld $16, PT_R16, \docfi 341 cfi_ld $17, PT_R17, \docfi 342 cfi_ld $18, PT_R18, \docfi 343 cfi_ld $19, PT_R19, \docfi 344 cfi_ld $20, PT_R20, \docfi 345 cfi_ld $21, PT_R21, \docfi 346 cfi_ld $22, PT_R22, \docfi 347 cfi_ld $23, PT_R23, \docfi 348 cfi_ld $30, PT_R30, \docfi 349 .endm 350 351 .macro RESTORE_SP docfi=0 352 cfi_ld sp, PT_R29, \docfi 353 .endm 354 355 #if defined(CONFIG_CPU_R3000) 356 357 .macro RESTORE_SOME docfi=0 358 .set push 359 .set reorder 360 .set noat 361 mfc0 a0, CP0_STATUS 362 li v1, ST0_CU1 | ST0_IM 363 ori a0, STATMASK 364 xori a0, STATMASK 365 mtc0 a0, CP0_STATUS 366 and a0, v1 367 LONG_L v0, PT_STATUS(sp) 368 nor v1, $0, v1 369 and v0, v1 370 or v0, a0 371 mtc0 v0, CP0_STATUS 372 cfi_ld $31, PT_R31, \docfi 373 cfi_ld $28, PT_R28, \docfi 374 cfi_ld $25, PT_R25, \docfi 375 cfi_ld $7, PT_R7, \docfi 376 cfi_ld $6, PT_R6, \docfi 377 cfi_ld $5, PT_R5, \docfi 378 cfi_ld $4, PT_R4, \docfi 379 cfi_ld $3, PT_R3, \docfi 380 cfi_ld $2, PT_R2, \docfi 381 .set pop 382 .endm 383 384 .macro RESTORE_SP_AND_RET docfi=0 385 .set push 386 .set noreorder 387 LONG_L k0, PT_EPC(sp) 388 RESTORE_SP \docfi 389 jr k0 390 rfe 391 .set pop 392 .endm 393 394 #else 395 .macro RESTORE_SOME docfi=0 396 .set push 397 .set reorder 398 .set noat 399 mfc0 a0, CP0_STATUS 400 ori a0, STATMASK 401 xori a0, STATMASK 402 mtc0 a0, CP0_STATUS 403 li v1, ST0_CU1 | ST0_FR | ST0_IM 404 and a0, v1 405 LONG_L v0, PT_STATUS(sp) 406 nor v1, $0, v1 407 and v0, v1 408 or v0, a0 409 mtc0 v0, CP0_STATUS 410 LONG_L v1, PT_EPC(sp) 411 MTC0 v1, CP0_EPC 412 cfi_ld $31, PT_R31, \docfi 413 cfi_ld $28, PT_R28, \docfi 414 cfi_ld $25, PT_R25, \docfi 415 #ifdef CONFIG_64BIT 416 cfi_ld $8, PT_R8, \docfi 417 cfi_ld $9, PT_R9, \docfi 418 #endif 419 cfi_ld $7, PT_R7, \docfi 420 cfi_ld $6, PT_R6, \docfi 421 cfi_ld $5, PT_R5, \docfi 422 cfi_ld $4, PT_R4, \docfi 423 cfi_ld $3, PT_R3, \docfi 424 cfi_ld $2, PT_R2, \docfi 425 .set pop 426 .endm 427 428 .macro RESTORE_SP_AND_RET docfi=0 429 RESTORE_SP \docfi 430 #if defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) 431 eretnc 432 #else 433 .set push 434 .set arch=r4000 435 eret 436 .set pop 437 #endif 438 .endm 439 440 #endif 441 442 .macro RESTORE_ALL docfi=0 443 RESTORE_TEMP \docfi 444 RESTORE_STATIC \docfi 445 RESTORE_AT \docfi 446 RESTORE_SOME \docfi 447 RESTORE_SP \docfi 448 .endm 449 450 /* 451 * Move to kernel mode and disable interrupts. 452 * Set cp0 enable bit as sign that we're running on the kernel stack 453 */ 454 .macro CLI 455 mfc0 t0, CP0_STATUS 456 li t1, ST0_KERNEL_CUMASK | STATMASK 457 or t0, t1 458 xori t0, STATMASK 459 mtc0 t0, CP0_STATUS 460 irq_disable_hazard 461 .endm 462 463 /* 464 * Move to kernel mode and enable interrupts. 465 * Set cp0 enable bit as sign that we're running on the kernel stack 466 */ 467 .macro STI 468 mfc0 t0, CP0_STATUS 469 li t1, ST0_KERNEL_CUMASK | STATMASK 470 or t0, t1 471 xori t0, STATMASK & ~1 472 mtc0 t0, CP0_STATUS 473 irq_enable_hazard 474 .endm 475 476 /* 477 * Just move to kernel mode and leave interrupts as they are. Note 478 * for the R3000 this means copying the previous enable from IEp. 479 * Set cp0 enable bit as sign that we're running on the kernel stack 480 */ 481 .macro KMODE 482 mfc0 t0, CP0_STATUS 483 li t1, ST0_KERNEL_CUMASK | (STATMASK & ~1) 484 #if defined(CONFIG_CPU_R3000) 485 andi t2, t0, ST0_IEP 486 srl t2, 2 487 or t0, t2 488 #endif 489 or t0, t1 490 xori t0, STATMASK & ~1 491 mtc0 t0, CP0_STATUS 492 irq_disable_hazard 493 .endm 494 495 #endif /* _ASM_STACKFRAME_H */ 496
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.