1 /* SPDX-License-Identifier: GPL-2.0-or-later * 2 /* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt 5 * Rewritten by Cort Dougan (cort@fsmlabs.com 6 * Copyright (C) 1996 Cort Dougan <cort@fsml 7 * Adapted for Power Macintosh by Paul Macker 8 * Low-level exception handlers and MMU suppo 9 * rewritten by Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras. 11 * MPC8xx modifications Copyright (C) 1997 Da 12 * 13 * This file contains the system call entry c 14 * code, and exception/interrupt return code 15 */ 16 17 #include <linux/errno.h> 18 #include <linux/err.h> 19 #include <linux/sys.h> 20 #include <linux/threads.h> 21 #include <linux/linkage.h> 22 23 #include <asm/reg.h> 24 #include <asm/page.h> 25 #include <asm/mmu.h> 26 #include <asm/cputable.h> 27 #include <asm/thread_info.h> 28 #include <asm/ppc_asm.h> 29 #include <asm/asm-offsets.h> 30 #include <asm/unistd.h> 31 #include <asm/ptrace.h> 32 #include <asm/feature-fixups.h> 33 #include <asm/barrier.h> 34 #include <asm/kup.h> 35 #include <asm/bug.h> 36 #include <asm/interrupt.h> 37 38 #include "head_32.h" 39 40 /* 41 * powerpc relies on return from interrupt/sys 42 * (which rfi is) to support ARCH_HAS_MEMBARRI 43 * synchronisation instructions. 44 */ 45 46 /* 47 * Align to 4k in order to ensure that all fun 48 * fit into one page in order to not encounter 49 * modification of srr0/srr1 and the associate 50 */ 51 .align 12 52 53 #if defined(CONFIG_PPC_BOOK3S_32) || defined(C 54 .globl prepare_transfer_to_handler 55 prepare_transfer_to_handler: 56 /* if from kernel, check interrupted D 57 lwz r12,TI_LOCAL_FLAGS(r2) 58 mtcrf 0x01,r12 59 bt- 31-TLF_NAPPING,4f 60 bt- 31-TLF_SLEEPING,7f 61 blr 62 63 4: rlwinm r12,r12,0,~_TLF_NAPPING 64 stw r12,TI_LOCAL_FLAGS(r2) 65 b power_save_ppc32_restore 66 67 7: rlwinm r12,r12,0,~_TLF_SLEEPING 68 stw r12,TI_LOCAL_FLAGS(r2) 69 lwz r9,_MSR(r11) /* if 70 rlwinm r9,r9,0,~MSR_EE 71 lwz r12,_LINK(r11) /* and 72 REST_GPR(2, r11) 73 b fast_exception_return 74 _ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handl 75 #endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E 76 77 #if defined(CONFIG_PPC_KUEP) && defined(CONFIG 78 SYM_FUNC_START(__kuep_lock) 79 lwz r9, THREAD+THSR0(r2) 80 update_user_segments_by_4 r9, r10, r11 81 blr 82 SYM_FUNC_END(__kuep_lock) 83 84 SYM_FUNC_START_LOCAL(__kuep_unlock) 85 lwz r9, THREAD+THSR0(r2) 86 rlwinm r9,r9,0,~SR_NX 87 update_user_segments_by_4 r9, r10, r11 88 blr 89 SYM_FUNC_END(__kuep_unlock) 90 91 .macro kuep_lock 92 bl __kuep_lock 93 .endm 94 .macro kuep_unlock 95 bl __kuep_unlock 96 .endm 97 #else 98 .macro kuep_lock 99 .endm 100 .macro kuep_unlock 101 .endm 102 #endif 103 104 .globl transfer_to_syscall 105 transfer_to_syscall: 106 stw r3, ORIG_GPR3(r1) 107 stw r11, GPR1(r1) 108 stw r11, 0(r1) 109 mflr r12 110 stw r12, _LINK(r1) 111 #ifdef CONFIG_BOOKE 112 rlwinm r9,r9,0,14,12 /* cle 113 #endif 114 lis r12,STACK_FRAME_REGS_MARKER@ha 115 SAVE_GPR(2, r1) 116 addi r12,r12,STACK_FRAME_REGS_MARKE 117 stw r9,_MSR(r1) 118 li r2, INTERRUPT_SYSCALL 119 stw r12,STACK_INT_FRAME_MARKER(r1) 120 stw r2,_TRAP(r1) 121 SAVE_GPR(0, r1) 122 SAVE_GPRS(3, 8, r1) 123 addi r2,r10,-THREAD 124 SAVE_NVGPRS(r1) 125 kuep_lock 126 127 /* Calling convention has r3 = regs, r 128 addi r3,r1,STACK_INT_FRAME_REGS 129 mr r4,r0 130 bl system_call_exception 131 132 ret_from_syscall: 133 addi r4,r1,STACK_INT_FRAME_REGS 134 li r5,0 135 bl syscall_exit_prepare 136 #ifdef CONFIG_PPC_47x 137 lis r4,icache_44x_need_flush@ha 138 lwz r5,icache_44x_need_flush@l(r4) 139 cmplwi cr0,r5,0 140 bne- .L44x_icache_flush 141 #endif /* CONFIG_PPC_47x */ 142 .L44x_icache_flush_return: 143 kuep_unlock 144 lwz r4,_LINK(r1) 145 lwz r5,_CCR(r1) 146 mtlr r4 147 lwz r7,_NIP(r1) 148 lwz r8,_MSR(r1) 149 cmpwi r3,0 150 REST_GPR(3, r1) 151 syscall_exit_finish: 152 mtspr SPRN_SRR0,r7 153 mtspr SPRN_SRR1,r8 154 155 bne 3f 156 mtcr r5 157 158 1: REST_GPR(2, r1) 159 REST_GPR(1, r1) 160 rfi 161 162 3: mtcr r5 163 lwz r4,_CTR(r1) 164 lwz r5,_XER(r1) 165 REST_NVGPRS(r1) 166 mtctr r4 167 mtxer r5 168 REST_GPR(0, r1) 169 REST_GPRS(3, 12, r1) 170 b 1b 171 172 #ifdef CONFIG_44x 173 .L44x_icache_flush: 174 li r7,0 175 iccci r0,r0 176 stw r7,icache_44x_need_flush@l(r4) 177 b .L44x_icache_flush_return 178 #endif /* CONFIG_44x */ 179 180 .globl ret_from_fork 181 ret_from_fork: 182 REST_NVGPRS(r1) 183 bl schedule_tail 184 li r3,0 /* fork() return value 185 b ret_from_syscall 186 187 .globl ret_from_kernel_user_thread 188 ret_from_kernel_user_thread: 189 bl schedule_tail 190 mtctr r14 191 mr r3,r15 192 PPC440EP_ERR42 193 bctrl 194 li r3,0 195 b ret_from_syscall 196 197 .globl start_kernel_thread 198 start_kernel_thread: 199 bl schedule_tail 200 mtctr r14 201 mr r3,r15 202 PPC440EP_ERR42 203 bctrl 204 /* 205 * This must not return. We actually w 206 * because BUG will exit the process w 207 * should have done, which may give so 208 */ 209 100: trap 210 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__, 211 212 .globl fast_exception_return 213 fast_exception_return: 214 #ifndef CONFIG_BOOKE 215 andi. r10,r9,MSR_RI /* che 216 beq 3f /* if 217 #endif 218 219 2: lwz r10,_CCR(r11) 220 REST_GPRS(1, 6, r11) 221 mtcr r10 222 lwz r10,_LINK(r11) 223 mtlr r10 224 /* Clear the exception marker on the s 225 li r10, 0 226 stw r10, 8(r11) 227 REST_GPR(10, r11) 228 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_ 229 mtspr SPRN_NRI, r0 230 #endif 231 mtspr SPRN_SRR1,r9 232 mtspr SPRN_SRR0,r12 233 REST_GPR(9, r11) 234 REST_GPR(12, r11) 235 REST_GPR(11, r11) 236 rfi 237 _ASM_NOKPROBE_SYMBOL(fast_exception_return) 238 239 /* aargh, a nonrecoverable interrupt, panic */ 240 /* aargh, we don't know which trap this is */ 241 3: 242 li r10,-1 243 stw r10,_TRAP(r11) 244 prepare_transfer_to_handler 245 bl unrecoverable_exception 246 trap /* should not get here */ 247 248 .globl interrupt_return 249 interrupt_return: 250 lwz r4,_MSR(r1) 251 addi r3,r1,STACK_INT_FRAME_REGS 252 andi. r0,r4,MSR_PR 253 beq .Lkernel_interrupt_return 254 bl interrupt_exit_user_prepare 255 cmpwi r3,0 256 kuep_unlock 257 bne- .Lrestore_nvgprs 258 259 .Lfast_user_interrupt_return: 260 lwz r11,_NIP(r1) 261 lwz r12,_MSR(r1) 262 mtspr SPRN_SRR0,r11 263 mtspr SPRN_SRR1,r12 264 265 BEGIN_FTR_SECTION 266 stwcx. r0,0,r1 /* to clear th 267 FTR_SECTION_ELSE 268 lwarx r0,0,r1 269 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ 270 271 lwz r3,_CCR(r1) 272 lwz r4,_LINK(r1) 273 lwz r5,_CTR(r1) 274 lwz r6,_XER(r1) 275 li r0,0 276 277 /* 278 * Leaving a stale exception marker on 279 * the reliable stack unwinder later o 280 */ 281 stw r0,8(r1) 282 REST_GPRS(7, 12, r1) 283 284 mtcr r3 285 mtlr r4 286 mtctr r5 287 mtspr SPRN_XER,r6 288 289 REST_GPRS(2, 6, r1) 290 REST_GPR(0, r1) 291 REST_GPR(1, r1) 292 rfi 293 294 .Lrestore_nvgprs: 295 REST_NVGPRS(r1) 296 b .Lfast_user_interrupt_return 297 298 .Lkernel_interrupt_return: 299 bl interrupt_exit_kernel_prepare 300 301 .Lfast_kernel_interrupt_return: 302 cmpwi cr1,r3,0 303 lwz r11,_NIP(r1) 304 lwz r12,_MSR(r1) 305 mtspr SPRN_SRR0,r11 306 mtspr SPRN_SRR1,r12 307 308 BEGIN_FTR_SECTION 309 stwcx. r0,0,r1 /* to clear th 310 FTR_SECTION_ELSE 311 lwarx r0,0,r1 312 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ 313 314 lwz r3,_LINK(r1) 315 lwz r4,_CTR(r1) 316 lwz r5,_XER(r1) 317 lwz r6,_CCR(r1) 318 li r0,0 319 320 REST_GPRS(7, 12, r1) 321 322 mtlr r3 323 mtctr r4 324 mtspr SPRN_XER,r5 325 326 /* 327 * Leaving a stale exception marker on 328 * the reliable stack unwinder later o 329 */ 330 stw r0,8(r1) 331 332 REST_GPRS(2, 5, r1) 333 334 bne- cr1,1f /* emulate stack store 335 mtcr r6 336 REST_GPR(6, r1) 337 REST_GPR(0, r1) 338 REST_GPR(1, r1) 339 rfi 340 341 1: /* 342 * Emulate stack store with update. Ne 343 * and updated in our interrupt regs b 344 * store the previous value of r1 to t 345 * registers from it, otherwise they c 346 * SPRG Scratch0 as temporary storage 347 * data, as interrupts are disabled he 348 */ 349 mtcr r6 350 #ifdef CONFIG_BOOKE 351 mtspr SPRN_SPRG_WSCRATCH0, r9 352 #else 353 mtspr SPRN_SPRG_SCRATCH0, r9 354 #endif 355 addi r9,r1,INT_FRAME_SIZE /* get or 356 REST_GPR(6, r1) 357 REST_GPR(0, r1) 358 REST_GPR(1, r1) 359 stw r9,0(r1) /* perform store comp 360 #ifdef CONFIG_BOOKE 361 mfspr r9, SPRN_SPRG_RSCRATCH0 362 #else 363 mfspr r9, SPRN_SPRG_SCRATCH0 364 #endif 365 rfi 366 _ASM_NOKPROBE_SYMBOL(interrupt_return) 367 368 #ifdef CONFIG_BOOKE 369 370 /* 371 * Returning from a critical interrupt in user 372 * to be any different from a normal exception 373 * interrupt in the kernel, we just return (wi 374 * preemption) since the interrupt may have ha 375 * place (e.g. inside the TLB miss handler), a 376 * running with r1 pointing into critical_stac 377 * process's kernel stack (and therefore curre 378 * give the wrong answer). 379 * We have to restore various SPRs that may ha 380 * time of the critical interrupt. 381 * 382 */ 383 384 #define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_l 385 REST_NVGPRS(r1); 386 lwz r3,_MSR(r1); 387 andi. r3,r3,MSR_PR; 388 bne interrupt_return; 389 REST_GPR(0, r1); 390 REST_GPRS(2, 8, r1); 391 lwz r10,_XER(r1); 392 lwz r11,_CTR(r1); 393 mtspr SPRN_XER,r10; 394 mtctr r11; 395 stwcx. r0,0,r1; /* to 396 lwz r11,_LINK(r1); 397 mtlr r11; 398 lwz r10,_CCR(r1); 399 mtcrf 0xff,r10; 400 lwz r9,_DEAR(r1); 401 lwz r10,_ESR(r1); 402 mtspr SPRN_DEAR,r9; 403 mtspr SPRN_ESR,r10; 404 lwz r11,_NIP(r1); 405 lwz r12,_MSR(r1); 406 mtspr exc_lvl_srr0,r11; 407 mtspr exc_lvl_srr1,r12; 408 REST_GPRS(9, 12, r1); 409 REST_GPR(1, r1); 410 exc_lvl_rfi; 411 b .; /* prevent pre 412 413 #define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr 414 lwz r9,_##exc_lvl_srr0(r1); 415 lwz r10,_##exc_lvl_srr1(r1); 416 mtspr SPRN_##exc_lvl_srr0,r9; 417 mtspr SPRN_##exc_lvl_srr1,r10; 418 419 #if defined(CONFIG_PPC_E500) 420 #ifdef CONFIG_PHYS_64BIT 421 #define RESTORE_MAS7 422 lwz r11,MAS7(r1); 423 mtspr SPRN_MAS7,r11; 424 #else 425 #define RESTORE_MAS7 426 #endif /* CONFIG_PHYS_64BIT */ 427 #define RESTORE_MMU_REGS 428 lwz r9,MAS0(r1); 429 lwz r10,MAS1(r1); 430 lwz r11,MAS2(r1); 431 mtspr SPRN_MAS0,r9; 432 lwz r9,MAS3(r1); 433 mtspr SPRN_MAS1,r10; 434 lwz r10,MAS6(r1); 435 mtspr SPRN_MAS2,r11; 436 mtspr SPRN_MAS3,r9; 437 mtspr SPRN_MAS6,r10; 438 RESTORE_MAS7; 439 #elif defined(CONFIG_44x) 440 #define RESTORE_MMU_REGS 441 lwz r9,MMUCR(r1); 442 mtspr SPRN_MMUCR,r9; 443 #else 444 #define RESTORE_MMU_REGS 445 #endif 446 447 .globl ret_from_crit_exc 448 ret_from_crit_exc: 449 RESTORE_xSRR(SRR0,SRR1); 450 RESTORE_MMU_REGS; 451 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CS 452 _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc) 453 454 .globl ret_from_debug_exc 455 ret_from_debug_exc: 456 RESTORE_xSRR(SRR0,SRR1); 457 RESTORE_xSRR(CSRR0,CSRR1); 458 RESTORE_MMU_REGS; 459 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DS 460 _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc) 461 462 .globl ret_from_mcheck_exc 463 ret_from_mcheck_exc: 464 RESTORE_xSRR(SRR0,SRR1); 465 RESTORE_xSRR(CSRR0,CSRR1); 466 RESTORE_xSRR(DSRR0,DSRR1); 467 RESTORE_MMU_REGS; 468 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_M 469 _ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc) 470 #endif /* CONFIG_BOOKE */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.