1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Common Low Level Interrupts/Traps/Exception !! 2 * PowerPC version 4 * (included from entry-<isa>.S !! 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) >> 4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP >> 5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com> >> 6 * Adapted for Power Macintosh by Paul Mackerras. >> 7 * Low-level exception handlers and MMU support >> 8 * rewritten by Paul Mackerras. >> 9 * Copyright (C) 1996 Paul Mackerras. >> 10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 5 * 11 * 6 * Copyright (C) 2014-15 Synopsys, Inc. (www.s !! 12 * This file contains the system call entry code, context switch 7 * Copyright (C) 2004, 2007-2010, 2011-2012 Sy !! 13 * code, and exception/interrupt return code for PowerPC. 8 */ << 9 << 10 /*-------------------------------------------- << 11 * Function ABI << 12 *-------------------------------------------- << 13 * 14 * 14 * Arguments r0 - r !! 15 * This program is free software; you can redistribute it and/or 15 * Caller Saved Registers r0 - r !! 16 * modify it under the terms of the GNU General Public License 16 * Callee Saved Registers r13- r !! 17 * as published by the Free Software Foundation; either version 17 * Global Pointer (gp) r26 !! 18 * 2 of the License, or (at your option) any later version. 18 * Frame Pointer (fp) r27 !! 19 * 19 * Stack Pointer (sp) r28 !! 20 */ 20 * Branch link register (blink) r31 << 21 *-------------------------------------------- << 22 */ << 23 << 24 ;################### Special Sys Call Wrappers << 25 << 26 ENTRY(sys_clone_wrapper) << 27 SAVE_CALLEE_SAVED_USER << 28 bl @sys_clone << 29 DISCARD_CALLEE_SAVED_USER << 30 << 31 GET_CURR_THR_INFO_FLAGS r10 << 32 and.f 0, r10, _TIF_SYSCALL_WORK << 33 bnz tracesys_exit << 34 << 35 b .Lret_from_system_call << 36 END(sys_clone_wrapper) << 37 << 38 ENTRY(sys_clone3_wrapper) << 39 SAVE_CALLEE_SAVED_USER << 40 bl @sys_clone3 << 41 DISCARD_CALLEE_SAVED_USER << 42 << 43 GET_CURR_THR_INFO_FLAGS r10 << 44 and.f 0, r10, _TIF_SYSCALL_WORK << 45 bnz tracesys_exit << 46 << 47 b .Lret_from_system_call << 48 END(sys_clone3_wrapper) << 49 << 50 ENTRY(ret_from_fork) << 51 ; when the forked child comes here fro << 52 ; r0 has the last task pointer. << 53 ; put last task in scheduler queue << 54 jl @schedule_tail << 55 << 56 ld r9, [sp, PT_status32] << 57 brne r9, 0, 1f << 58 << 59 jl.d [r14] ; kernel threa << 60 mov r0, r13 ; (see PF_KTHR << 61 << 62 1: << 63 ; Return to user space << 64 ; 1. Any forked task (Reach here via B << 65 ; 2. First ever init task (Reach here << 66 ; This is the historic "kernel_exec << 67 ; user mode, in a round about way s << 68 ; a kernel thread which is executed << 69 ; out whenever kernel_execve (now i << 70 b ret_from_exception << 71 END(ret_from_fork) << 72 << 73 ;################### Non TLB Exception Handlin << 74 << 75 ; -------------------------------------------- << 76 ; Instruction Error Exception Handler << 77 ; -------------------------------------------- << 78 << 79 ENTRY(instr_service) << 80 << 81 EXCEPTION_PROLOGUE << 82 << 83 bl do_insterror_or_kprobe << 84 b ret_from_exception << 85 END(instr_service) << 86 << 87 ; -------------------------------------------- << 88 ; Machine Check Exception Handler << 89 ; -------------------------------------------- << 90 21 91 ENTRY(EV_MachineCheck) !! 22 #include <linux/config.h> >> 23 #include <linux/errno.h> >> 24 #include <linux/sys.h> >> 25 #include <linux/threads.h> >> 26 #include <asm/processor.h> >> 27 #include <asm/page.h> >> 28 #include <asm/mmu.h> >> 29 #include <asm/cputable.h> >> 30 #include <asm/thread_info.h> >> 31 #include <asm/ppc_asm.h> >> 32 #include <asm/offsets.h> >> 33 #include <asm/unistd.h> 92 34 93 EXCEPTION_PROLOGUE_KEEP_AE ; ECR !! 35 #undef SHOW_SYSCALLS >> 36 #undef SHOW_SYSCALLS_TASK 94 37 95 lr r0, [efa] !! 38 /* 96 mov r1, sp !! 39 * MSR_KERNEL is > 0x10000 on 4xx since it include MSR_CE. >> 40 */ >> 41 #if MSR_KERNEL >= 0x10000 >> 42 #define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l >> 43 #else >> 44 #define LOAD_MSR_KERNEL(r, x) li r,(x) >> 45 #endif 97 46 98 ; MC exceptions disable MMU !! 47 #ifdef CONFIG_4xx 99 ARC_MMU_REENABLE r3 !! 48 .globl crit_transfer_to_handler >> 49 crit_transfer_to_handler: >> 50 lwz r0,crit_r10@l(0) >> 51 stw r0,GPR10(r11) >> 52 lwz r0,crit_r11@l(0) >> 53 stw r0,GPR11(r11) >> 54 /* fall through */ >> 55 #endif 100 56 101 lsr r3, r10, 8 !! 57 /* 102 bmsk r3, r3, 7 !! 58 * This code finishes saving the registers to the exception frame 103 brne r3, ECR_C_MCHK_DUP_TLB, 1f !! 59 * and jumps to the appropriate handler for the exception, turning >> 60 * on address translation. >> 61 * Note that we rely on the caller having set cr0.eq iff the exception >> 62 * occurred in kernel mode (i.e. MSR:PR = 0). >> 63 */ >> 64 .globl transfer_to_handler_full >> 65 transfer_to_handler_full: >> 66 SAVE_NVGPRS(r11) >> 67 /* fall through */ >> 68 >> 69 .globl transfer_to_handler >> 70 transfer_to_handler: >> 71 stw r2,GPR2(r11) >> 72 stw r12,_NIP(r11) >> 73 stw r9,_MSR(r11) >> 74 andi. r2,r9,MSR_PR >> 75 mfctr r12 >> 76 mfspr r2,XER >> 77 stw r12,_CTR(r11) >> 78 stw r2,_XER(r11) >> 79 mfspr r12,SPRG3 >> 80 addi r2,r12,-THREAD >> 81 tovirt(r2,r2) /* set r2 to current */ >> 82 beq 2f /* if from user, fix up THREAD.regs */ >> 83 addi r11,r1,STACK_FRAME_OVERHEAD >> 84 stw r11,PT_REGS(r12) >> 85 #ifdef CONFIG_4xx >> 86 lwz r12,PTRACE-THREAD(r12) >> 87 andi. r12,r12,PT_PTRACED >> 88 beq+ 3f >> 89 /* From user and task is ptraced - load up global dbcr0 */ >> 90 li r12,-1 /* clear all pending debug events */ >> 91 mtspr SPRN_DBSR,r12 >> 92 lis r11,global_dbcr0@ha >> 93 tophys(r11,r11) >> 94 addi r11,r11,global_dbcr0@l >> 95 lwz r12,0(r11) >> 96 mtspr SPRN_DBCR0,r12 >> 97 lwz r12,4(r11) >> 98 addi r12,r12,-1 >> 99 stw r12,4(r11) >> 100 #endif >> 101 b 3f >> 102 2: /* if from kernel, check interrupted DOZE/NAP mode and >> 103 * check for stack overflow >> 104 */ >> 105 #ifdef CONFIG_6xx >> 106 mfspr r11,SPRN_HID0 >> 107 mtcr r11 >> 108 BEGIN_FTR_SECTION >> 109 bt- 8,power_save_6xx_restore /* Check DOZE */ >> 110 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) >> 111 BEGIN_FTR_SECTION >> 112 bt- 9,power_save_6xx_restore /* Check NAP */ >> 113 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) >> 114 #endif /* CONFIG_6xx */ >> 115 .globl transfer_to_handler_cont >> 116 transfer_to_handler_cont: >> 117 lwz r11,THREAD_INFO-THREAD(r12) >> 118 cmplw r1,r11 /* if r1 <= current->thread_info */ >> 119 ble- stack_ovf /* then the kernel stack overflowed */ >> 120 3: >> 121 mflr r9 >> 122 lwz r11,0(r9) /* virtual address of handler */ >> 123 lwz r9,4(r9) /* where to go when done */ >> 124 FIX_SRR1(r10,r12) >> 125 mtspr SRR0,r11 >> 126 mtspr SRR1,r10 >> 127 mtlr r9 >> 128 SYNC >> 129 RFI /* jump to handler, enable MMU */ 104 130 105 bl do_tlb_overlap_fault !! 131 /* 106 b ret_from_exception !! 132 * On kernel stack overflow, load up an initial stack pointer >> 133 * and call StackOverflow(regs), which should not return. >> 134 */ >> 135 stack_ovf: >> 136 /* sometimes we use a statically-allocated stack, which is OK. */ >> 137 lis r11,_end@h >> 138 ori r11,r11,_end@l >> 139 cmplw r1,r11 >> 140 ble 3b /* r1 <= &_end is OK */ >> 141 SAVE_NVGPRS(r11) >> 142 addi r3,r1,STACK_FRAME_OVERHEAD >> 143 lis r1,init_thread_union@ha >> 144 addi r1,r1,init_thread_union@l >> 145 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD >> 146 lis r9,StackOverflow@ha >> 147 addi r9,r9,StackOverflow@l >> 148 LOAD_MSR_KERNEL(r10,MSR_KERNEL) >> 149 FIX_SRR1(r10,r12) >> 150 mtspr SRR0,r9 >> 151 mtspr SRR1,r10 >> 152 SYNC >> 153 RFI 107 154 >> 155 /* >> 156 * Handle a system call. >> 157 */ >> 158 .stabs "arch/ppc/kernel/",N_SO,0,0,0f >> 159 .stabs "entry.S",N_SO,0,0,0f >> 160 0: >> 161 >> 162 _GLOBAL(DoSyscall) >> 163 stw r0,THREAD+LAST_SYSCALL(r2) >> 164 stw r3,ORIG_GPR3(r1) >> 165 li r12,0 >> 166 stw r12,RESULT(r1) >> 167 lwz r11,_CCR(r1) /* Clear SO bit in CR */ >> 168 rlwinm r11,r11,0,4,2 >> 169 stw r11,_CCR(r1) >> 170 #ifdef SHOW_SYSCALLS >> 171 bl do_show_syscall >> 172 #endif /* SHOW_SYSCALLS */ >> 173 rlwinm r10,r1,0,0,18 /* current_thread_info() */ >> 174 lwz r11,TI_FLAGS(r10) >> 175 rlwinm r11,r11,0,~_TIF_FORCE_NOERROR >> 176 stw r11,TI_FLAGS(r10) >> 177 andi. r11,r11,_TIF_SYSCALL_TRACE >> 178 bne- syscall_dotrace >> 179 syscall_dotrace_cont: >> 180 cmpli 0,r0,NR_syscalls >> 181 lis r10,sys_call_table@h >> 182 ori r10,r10,sys_call_table@l >> 183 slwi r0,r0,2 >> 184 bge- 66f >> 185 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ >> 186 mtlr r10 >> 187 addi r9,r1,STACK_FRAME_OVERHEAD >> 188 blrl /* Call handler */ >> 189 .globl ret_from_syscall >> 190 ret_from_syscall: >> 191 #ifdef SHOW_SYSCALLS >> 192 bl do_show_syscall_exit >> 193 #endif >> 194 mr r6,r3 >> 195 li r11,-_LAST_ERRNO >> 196 cmpl 0,r3,r11 >> 197 rlwinm r12,r1,0,0,18 /* current_thread_info() */ >> 198 blt+ 30f >> 199 lwz r11,TI_FLAGS(r12) >> 200 andi. r11,r11,_TIF_FORCE_NOERROR >> 201 bne 30f >> 202 neg r3,r3 >> 203 lwz r10,_CCR(r1) /* Set SO bit in CR */ >> 204 oris r10,r10,0x1000 >> 205 stw r10,_CCR(r1) >> 206 >> 207 /* disable interrupts so current_thread_info()->flags can't change */ >> 208 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ >> 209 SYNC >> 210 MTMSRD(r10) >> 211 lwz r9,TI_FLAGS(r12) >> 212 andi. r0,r9,(_TIF_SYSCALL_TRACE|_TIF_SIGPENDING|_TIF_NEED_RESCHED) >> 213 bne- syscall_exit_work >> 214 syscall_exit_cont: >> 215 #ifdef CONFIG_4xx >> 216 /* If the process has its own DBCR0 value, load it up */ >> 217 lwz r0,PTRACE(r2) >> 218 andi. r0,r0,PT_PTRACED >> 219 bnel- load_4xx_dbcr0 >> 220 #endif >> 221 stwcx. r0,0,r1 /* to clear the reservation */ >> 222 lwz r4,_LINK(r1) >> 223 lwz r5,_CCR(r1) >> 224 mtlr r4 >> 225 mtcr r5 >> 226 lwz r7,_NIP(r1) >> 227 lwz r8,_MSR(r1) >> 228 FIX_SRR1(r8, r0) >> 229 lwz r2,GPR2(r1) >> 230 lwz r1,GPR1(r1) >> 231 mtspr SRR0,r7 >> 232 mtspr SRR1,r8 >> 233 SYNC >> 234 RFI >> 235 >> 236 66: li r3,-ENOSYS >> 237 b ret_from_syscall >> 238 >> 239 .globl ret_from_fork >> 240 ret_from_fork: >> 241 REST_NVGPRS(r1) >> 242 bl schedule_tail >> 243 li r3,0 >> 244 b ret_from_syscall >> 245 >> 246 /* Traced system call support */ >> 247 syscall_dotrace: >> 248 SAVE_NVGPRS(r1) >> 249 li r0,0xc00 >> 250 stw r0,TRAP(r1) >> 251 bl do_syscall_trace >> 252 lwz r0,GPR0(r1) /* Restore original registers */ >> 253 lwz r3,GPR3(r1) >> 254 lwz r4,GPR4(r1) >> 255 lwz r5,GPR5(r1) >> 256 lwz r6,GPR6(r1) >> 257 lwz r7,GPR7(r1) >> 258 lwz r8,GPR8(r1) >> 259 REST_NVGPRS(r1) >> 260 b syscall_dotrace_cont >> 261 >> 262 syscall_exit_work: >> 263 stw r6,RESULT(r1) /* Save result */ >> 264 stw r3,GPR3(r1) /* Update return value */ >> 265 andi. r0,r9,_TIF_SYSCALL_TRACE >> 266 beq 5f >> 267 ori r10,r10,MSR_EE >> 268 SYNC >> 269 MTMSRD(r10) /* re-enable interrupts */ >> 270 lwz r4,TRAP(r1) >> 271 andi. r4,r4,1 >> 272 beq 4f >> 273 SAVE_NVGPRS(r1) >> 274 li r4,0xc00 >> 275 stw r4,TRAP(r1) >> 276 4: >> 277 bl do_syscall_trace >> 278 REST_NVGPRS(r1) >> 279 2: >> 280 lwz r3,GPR3(r1) >> 281 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ >> 282 SYNC >> 283 MTMSRD(r10) /* disable interrupts again */ >> 284 rlwinm r12,r1,0,0,18 /* current_thread_info() */ >> 285 lwz r9,TI_FLAGS(r12) >> 286 5: >> 287 andi. r0,r9,_TIF_NEED_RESCHED >> 288 bne 1f >> 289 lwz r5,_MSR(r1) >> 290 andi. r5,r5,MSR_PR >> 291 beq syscall_exit_cont >> 292 andi. r0,r9,_TIF_SIGPENDING >> 293 beq syscall_exit_cont >> 294 b do_user_signal 108 1: 295 1: 109 ; DEAD END: can't do much, display Reg !! 296 ori r10,r10,MSR_EE 110 SAVE_CALLEE_SAVED_USER !! 297 SYNC 111 !! 298 MTMSRD(r10) /* re-enable interrupts */ 112 GET_CURR_TASK_FIELD_PTR TASK_THREAD, !! 299 bl schedule 113 st sp, [r10, THREAD_CALLEE_REG] !! 300 b 2b 114 !! 301 115 j do_machine_check_fault !! 302 #ifdef SHOW_SYSCALLS 116 !! 303 do_show_syscall: 117 END(EV_MachineCheck) !! 304 #ifdef SHOW_SYSCALLS_TASK 118 !! 305 lis r11,show_syscalls_task@ha 119 ; -------------------------------------------- !! 306 lwz r11,show_syscalls_task@l(r11) 120 ; Privilege Violation Exception Handler !! 307 cmp 0,r2,r11 121 ; -------------------------------------------- !! 308 bnelr 122 ENTRY(EV_PrivilegeV) !! 309 #endif 123 !! 310 stw r31,GPR31(r1) 124 EXCEPTION_PROLOGUE !! 311 mflr r31 125 !! 312 lis r3,7f@ha 126 bl do_privilege_fault !! 313 addi r3,r3,7f@l 127 b ret_from_exception !! 314 lwz r4,GPR0(r1) 128 END(EV_PrivilegeV) !! 315 lwz r5,GPR3(r1) 129 !! 316 lwz r6,GPR4(r1) 130 ; -------------------------------------------- !! 317 lwz r7,GPR5(r1) 131 ; Extension Instruction Exception Handler !! 318 lwz r8,GPR6(r1) 132 ; -------------------------------------------- !! 319 lwz r9,GPR7(r1) 133 ENTRY(EV_Extension) !! 320 bl printk 134 !! 321 lis r3,77f@ha 135 EXCEPTION_PROLOGUE !! 322 addi r3,r3,77f@l 136 !! 323 lwz r4,GPR8(r1) 137 bl do_extension_fault !! 324 mr r5,r2 138 b ret_from_exception !! 325 bl printk 139 END(EV_Extension) !! 326 lwz r0,GPR0(r1) 140 !! 327 lwz r3,GPR3(r1) 141 ;################ Trap Handling (Syscall, Brea !! 328 lwz r4,GPR4(r1) 142 !! 329 lwz r5,GPR5(r1) 143 ; -------------------------------------------- !! 330 lwz r6,GPR6(r1) 144 ; syscall Tracing !! 331 lwz r7,GPR7(r1) 145 ; -------------------------------------------- !! 332 lwz r8,GPR8(r1) 146 tracesys: !! 333 mtlr r31 147 ; safekeep EFA (r12) if syscall tracer !! 334 lwz r31,GPR31(r1) 148 ; for traps, ERET is pre-commit so poi !! 335 blr 149 GET_CURR_TASK_FIELD_PTR TASK_THREAD, !! 336 150 st r12, [r11, THREAD_FAULT_ADDR] !! 337 do_show_syscall_exit: 151 !! 338 #ifdef SHOW_SYSCALLS_TASK 152 ; PRE syscall trace hook !! 339 lis r11,show_syscalls_task@ha 153 mov r0, sp !! 340 lwz r11,show_syscalls_task@l(r11) 154 bl @syscall_trace_enter !! 341 cmp 0,r2,r11 155 !! 342 bnelr 156 ; Tracing code now returns the syscall !! 343 #endif 157 mov r8, r0 !! 344 stw r31,GPR31(r1) 158 !! 345 mflr r31 159 ; Do the Sys Call as we normally would !! 346 stw r3,RESULT(r1) /* Save result */ 160 cmp r8, NR_syscalls - 1 !! 347 mr r4,r3 161 mov.hi r0, -ENOSYS !! 348 lis r3,79f@ha 162 bhi tracesys_exit !! 349 addi r3,r3,79f@l 163 !! 350 bl printk 164 ; Restore the sys-call args. Mere invo !! 351 lwz r3,RESULT(r1) 165 ; clobbered them (since they are in sc !! 352 mtlr r31 166 ; have deliberately changed the syscal !! 353 lwz r31,GPR31(r1) 167 ld r0, [sp, PT_r0] !! 354 blr 168 ld r1, [sp, PT_r1] !! 355 169 ld r2, [sp, PT_r2] !! 356 7: .string "syscall %d(%x, %x, %x, %x, %x, " 170 ld r3, [sp, PT_r3] !! 357 77: .string "%x), current=%p\n" 171 ld r4, [sp, PT_r4] !! 358 79: .string " -> %x\n" 172 ld r5, [sp, PT_r5] !! 359 .align 2,0 173 ld r6, [sp, PT_r6] !! 360 174 ld r7, [sp, PT_r7] !! 361 #ifdef SHOW_SYSCALLS_TASK 175 ld.as r9, [sys_call_table, r8] !! 362 .data 176 jl [r9] !! 363 .globl show_syscalls_task 177 !! 364 show_syscalls_task: 178 tracesys_exit: !! 365 .long -1 179 st r0, [sp, PT_r0] !! 366 .text 180 !! 367 #endif 181 ; POST syscall trace hook !! 368 #endif /* SHOW_SYSCALLS */ 182 mov r0, sp ; pt_regs need << 183 bl @syscall_trace_exit << 184 << 185 ; don't call ret_from_system_call as i << 186 b ret_from_exception << 187 << 188 ; -------------------------------------------- << 189 ; Breakpoint TRAP << 190 ; -------------------------------------------- << 191 trap_with_param: << 192 mov r0, r12 ; EFA in case ptracer/ << 193 mov r1, sp ; pt_regs << 194 << 195 ; save callee regs in case tracer/gdb << 196 SAVE_CALLEE_SAVED_USER << 197 << 198 ; safekeep ref to callee regs << 199 GET_CURR_TASK_FIELD_PTR TASK_THREAD, << 200 st sp, [r10, THREAD_CALLEE_REG] << 201 << 202 ; call the non syscall trap handler << 203 bl do_non_swi_trap << 204 << 205 ; unwind stack to discard callee regs << 206 DISCARD_CALLEE_SAVED_USER << 207 << 208 b ret_from_exception << 209 << 210 ; -------------------------------------------- << 211 ; syscall TRAP << 212 ; ABI: (r0-r7) up to 8 args, (r8) syscall numb << 213 ; -------------------------------------------- << 214 << 215 ENTRY(EV_Trap) << 216 << 217 EXCEPTION_PROLOGUE_KEEP_AE << 218 << 219 lr r12, [efa] << 220 << 221 FAKE_RET_FROM_EXCPN << 222 << 223 ;============ TRAP N : breakpoints, kp << 224 bmsk.f 0, r10, 7 << 225 bnz trap_with_param << 226 << 227 ;============ TRAP 0 (no param): sysca << 228 << 229 ; syscall tracing ongoing, invoke pre- << 230 GET_CURR_THR_INFO_FLAGS r10 << 231 and.f 0, r10, _TIF_SYSCALL_WORK << 232 bnz tracesys ; this never comes bac << 233 << 234 ;============ Normal syscall case << 235 << 236 cmp r8, NR_syscalls - 1 << 237 mov.hi r0, -ENOSYS << 238 bhi .Lret_from_system_call << 239 << 240 ld.as r9,[sys_call_table, r8] << 241 jl [r9] << 242 << 243 .Lret_from_system_call: << 244 st r0, [sp, PT_r0] ; sys call ret << 245 << 246 ; fall through to ret_from_exception << 247 END(EV_Trap) << 248 << 249 ;############# Return from Intr/Excp/Trap (Lin << 250 ; << 251 ; If ret to user mode do we need to handle sig << 252 << 253 ENTRY(ret_from_exception) << 254 << 255 ; Pre-{IRQ,Trap,Exception} K/U mode fr << 256 ld r8, [sp, PT_status32] ; returnin << 257 << 258 bbit0 r8, STATUS_U_BIT, resume_kernel << 259 << 260 ; Before returning to User mode check- << 261 ; such as rescheduling/signal-delivery << 262 resume_user_mode_begin: << 263 << 264 ; Disable IRQs to ensures that chk for << 265 ; (and we don't end up missing a NEED_ << 266 ; interim IRQ). << 267 IRQ_DISABLE r10 << 268 << 269 ; Fast Path return to user mode if no << 270 GET_CURR_THR_INFO_FLAGS r9 << 271 and.f 0, r9, _TIF_WORK_MASK << 272 bz .Lrestore_regs << 273 << 274 ; --- (Slow Path #1) task preemption - << 275 bbit0 r9, TIF_NEED_RESCHED, .Lchk_pen << 276 mov blink, resume_user_mode_begin << 277 j @schedule ; BTST+Bnz cau << 278 << 279 .Lchk_pend_signals: << 280 IRQ_ENABLE r10 << 281 << 282 ; --- (Slow Path #2) pending signal - << 283 mov r0, sp ; pt_regs for arg to d << 284 << 285 GET_CURR_THR_INFO_FLAGS r9 << 286 and.f 0, r9, _TIF_SIGPENDING|_TIF_NO << 287 bz .Lchk_notify_resume << 288 << 289 ; Normal Trap/IRQ entry only saves Scr << 290 ; in pt_reg since the "C" ABI (kernel << 291 ; save/restore callee-saved regs. << 292 ; << 293 ; However, here we need to explicitly << 294 ; (i) If this signal causes coredump << 295 ; (ii) If signal is SIGTRAP/SIGSTOP, t << 296 ; tracer might call PEEKUSR(CALLE << 297 ; << 298 ; NOTE: SP will grow up by size of CAL << 299 SAVE_CALLEE_SAVED_USER << 300 << 301 ; save location of saved Callee Regs @ << 302 GET_CURR_TASK_FIELD_PTR TASK_THREAD, << 303 st sp, [r10, THREAD_CALLEE_REG] << 304 << 305 bl @do_signal << 306 << 307 ; Ideally we want to discard the Calle << 308 ; a tracing signal, tracer could have << 309 RESTORE_CALLEE_SAVED_USER << 310 << 311 b resume_user_mode_begin ; loop << 312 << 313 ; --- (Slow Path #3) notify_resume --- << 314 .Lchk_notify_resume: << 315 btst r9, TIF_NOTIFY_RESUME << 316 blnz @do_notify_resume << 317 b resume_user_mode_begin ; unco << 318 ; for << 319 369 320 resume_kernel_mode: !! 370 /* >> 371 * The sigsuspend and rt_sigsuspend system calls can call do_signal >> 372 * and thus put the process into the stopped state where we might >> 373 * want to examine its user state with ptrace. Therefore we need >> 374 * to save all the nonvolatile registers (r13 - r31) before calling >> 375 * the C code. >> 376 */ >> 377 .globl ppc_sigsuspend >> 378 ppc_sigsuspend: >> 379 SAVE_NVGPRS(r1) >> 380 lwz r0,TRAP(r1) >> 381 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ >> 382 stw r0,TRAP(r1) /* register set saved */ >> 383 b sys_sigsuspend >> 384 >> 385 .globl ppc_rt_sigsuspend >> 386 ppc_rt_sigsuspend: >> 387 SAVE_NVGPRS(r1) >> 388 lwz r0,TRAP(r1) >> 389 rlwinm r0,r0,0,0,30 >> 390 stw r0,TRAP(r1) >> 391 b sys_rt_sigsuspend >> 392 >> 393 .globl ppc_fork >> 394 ppc_fork: >> 395 SAVE_NVGPRS(r1) >> 396 lwz r0,TRAP(r1) >> 397 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ >> 398 stw r0,TRAP(r1) /* register set saved */ >> 399 b sys_fork >> 400 >> 401 .globl ppc_vfork >> 402 ppc_vfork: >> 403 SAVE_NVGPRS(r1) >> 404 lwz r0,TRAP(r1) >> 405 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ >> 406 stw r0,TRAP(r1) /* register set saved */ >> 407 b sys_vfork >> 408 >> 409 .globl ppc_clone >> 410 ppc_clone: >> 411 SAVE_NVGPRS(r1) >> 412 lwz r0,TRAP(r1) >> 413 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ >> 414 stw r0,TRAP(r1) /* register set saved */ >> 415 b sys_clone 321 416 322 ; Disable Interrupts from this point o !! 417 /* 323 ; CONFIG_PREEMPTION: This is a must fo !! 418 * This routine switches between two different tasks. The process 324 ; !CONFIG_PREEMPTION: To ensure restor !! 419 * state of one is saved on its kernel stack. Then the state 325 IRQ_DISABLE r9 !! 420 * of the other is restored from its kernel stack. The memory >> 421 * management hardware is updated to the second process's state. >> 422 * Finally, we can return to the second process. >> 423 * On entry, r3 points to the THREAD for the current task, r4 >> 424 * points to the THREAD for the new task. >> 425 * >> 426 * This routine is always called with interrupts disabled. >> 427 * >> 428 * Note: there are two ways to get to the "going out" portion >> 429 * of this code; either by coming in via the entry (_switch) >> 430 * or via "fork" which must set up an environment equivalent >> 431 * to the "_switch" path. If you change this , you'll have to >> 432 * change the fork code also. >> 433 * >> 434 * The code which creates the new task context is in 'copy_thread' >> 435 * in arch/ppc/kernel/process.c >> 436 */ >> 437 _GLOBAL(_switch) >> 438 stwu r1,-INT_FRAME_SIZE(r1) >> 439 mflr r0 >> 440 stw r0,INT_FRAME_SIZE+4(r1) >> 441 /* r3-r12 are caller saved -- Cort */ >> 442 SAVE_NVGPRS(r1) >> 443 stw r0,_NIP(r1) /* Return to switch caller */ >> 444 mfmsr r11 >> 445 li r0,MSR_FP /* Disable floating-point */ >> 446 #ifdef CONFIG_ALTIVEC >> 447 BEGIN_FTR_SECTION >> 448 oris r0,r0,MSR_VEC@h /* Disable altivec */ >> 449 mfspr r12,SPRN_VRSAVE /* save vrsave register value */ >> 450 stw r12,THREAD+THREAD_VRSAVE(r2) >> 451 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) >> 452 #endif /* CONFIG_ALTIVEC */ >> 453 and. r0,r0,r11 /* FP or altivec enabled? */ >> 454 beq+ 1f >> 455 andc r11,r11,r0 >> 456 MTMSRD(r11) >> 457 isync >> 458 1: stw r11,_MSR(r1) >> 459 mfcr r10 >> 460 stw r10,_CCR(r1) >> 461 stw r1,KSP(r3) /* Set old stack pointer */ >> 462 >> 463 tophys(r0,r4) >> 464 CLR_TOP32(r0) >> 465 mtspr SPRG3,r0 /* Update current THREAD phys addr */ >> 466 lwz r1,KSP(r4) /* Load new stack pointer */ >> 467 /* save the old current 'last' for return value */ >> 468 mr r3,r2 >> 469 addi r2,r4,-THREAD /* Update current */ >> 470 >> 471 #ifdef CONFIG_ALTIVEC >> 472 BEGIN_FTR_SECTION >> 473 lwz r0,THREAD+THREAD_VRSAVE(r2) >> 474 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ >> 475 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) >> 476 #endif /* CONFIG_ALTIVEC */ >> 477 >> 478 lwz r0,_CCR(r1) >> 479 mtcrf 0xFF,r0 >> 480 /* r3-r12 are destroyed -- Cort */ >> 481 REST_NVGPRS(r1) >> 482 >> 483 lwz r4,_NIP(r1) /* Return to _switch caller in new task */ >> 484 mtlr r4 >> 485 addi r1,r1,INT_FRAME_SIZE >> 486 blr >> 487 >> 488 .globl sigreturn_exit >> 489 sigreturn_exit: >> 490 subi r1,r3,STACK_FRAME_OVERHEAD >> 491 rlwinm r12,r1,0,0,18 /* current_thread_info() */ >> 492 lwz r9,TI_FLAGS(r12) >> 493 andi. r0,r9,_TIF_SYSCALL_TRACE >> 494 bnel- do_syscall_trace >> 495 /* fall through */ >> 496 >> 497 .globl ret_from_except_full >> 498 ret_from_except_full: >> 499 REST_NVGPRS(r1) >> 500 /* fall through */ >> 501 >> 502 .globl ret_from_except >> 503 ret_from_except: >> 504 /* Hard-disable interrupts so that current_thread_info()->flags >> 505 * can't change between when we test it and when we return >> 506 * from the interrupt. */ >> 507 LOAD_MSR_KERNEL(r10,MSR_KERNEL) >> 508 SYNC /* Some chip revs have problems here... */ >> 509 MTMSRD(r10) /* disable interrupts */ >> 510 >> 511 lwz r3,_MSR(r1) /* Returning to user mode? */ >> 512 andi. r0,r3,MSR_PR >> 513 beq resume_kernel >> 514 >> 515 user_exc_return: /* r10 contains MSR_KERNEL here */ >> 516 /* Check current_thread_info()->flags */ >> 517 rlwinm r9,r1,0,0,18 >> 518 lwz r9,TI_FLAGS(r9) >> 519 andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) >> 520 bne do_work >> 521 >> 522 restore_user: >> 523 #ifdef CONFIG_4xx >> 524 /* Check whether this process has its own DBCR0 value */ >> 525 lwz r0,PTRACE(r2) >> 526 andi. r0,r0,PT_PTRACED >> 527 bnel- load_4xx_dbcr0 >> 528 #endif 326 529 327 #ifdef CONFIG_PREEMPTION !! 530 #ifdef CONFIG_PREEMPT >> 531 b restore 328 532 329 ; Can't preempt if preemption disabled !! 533 /* N.B. the only way to get here is from the beq following ret_from_except. */ 330 GET_CURR_THR_INFO_FROM_SP r10 !! 534 resume_kernel: 331 ld r8, [r10, THREAD_INFO_PREEMPT_COUN !! 535 /* check current_thread_info->preempt_count */ 332 brne r8, 0, .Lrestore_regs !! 536 rlwinm r9,r1,0,0,18 >> 537 lwz r0,TI_PREEMPT(r9) >> 538 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ >> 539 bne restore >> 540 lwz r0,TI_FLAGS(r9) >> 541 andi. r0,r0,_TIF_NEED_RESCHED >> 542 beq+ restore >> 543 andi. r0,r3,MSR_EE /* interrupts off? */ >> 544 beq restore /* don't schedule if so */ >> 545 1: lis r0,PREEMPT_ACTIVE@h >> 546 stw r0,TI_PREEMPT(r9) >> 547 ori r10,r10,MSR_EE >> 548 SYNC >> 549 MTMSRD(r10) /* hard-enable interrupts */ >> 550 bl schedule >> 551 LOAD_MSR_KERNEL(r10,MSR_KERNEL) >> 552 SYNC >> 553 MTMSRD(r10) /* disable interrupts */ >> 554 rlwinm r9,r1,0,0,18 >> 555 li r0,0 >> 556 stw r0,TI_PREEMPT(r9) >> 557 lwz r3,TI_FLAGS(r9) >> 558 andi. r0,r3,_TIF_NEED_RESCHED >> 559 bne- 1b >> 560 #else >> 561 resume_kernel: >> 562 #endif /* CONFIG_PREEMPT */ >> 563 >> 564 /* interrupts are hard-disabled at this point */ >> 565 restore: >> 566 lwz r0,GPR0(r1) >> 567 lwz r2,GPR2(r1) >> 568 REST_4GPRS(3, r1) >> 569 REST_2GPRS(7, r1) >> 570 >> 571 lwz r10,_XER(r1) >> 572 lwz r11,_CTR(r1) >> 573 mtspr XER,r10 >> 574 mtctr r11 >> 575 >> 576 PPC405_ERR77(0,r1) >> 577 stwcx. r0,0,r1 /* to clear the reservation */ >> 578 >> 579 #ifndef CONFIG_4xx >> 580 lwz r9,_MSR(r1) >> 581 andi. r10,r9,MSR_RI /* check if this exception occurred */ >> 582 beql nonrecoverable /* at a bad place (MSR:RI = 0) */ >> 583 >> 584 lwz r10,_CCR(r1) >> 585 lwz r11,_LINK(r1) >> 586 mtcrf 0xFF,r10 >> 587 mtlr r11 >> 588 >> 589 /* >> 590 * Once we put values in SRR0 and SRR1, we are in a state >> 591 * where exceptions are not recoverable, since taking an >> 592 * exception will trash SRR0 and SRR1. Therefore we clear the >> 593 * MSR:RI bit to indicate this. If we do take an exception, >> 594 * we can't return to the point of the exception but we >> 595 * can restart the exception exit path at the label >> 596 * exc_exit_restart below. -- paulus >> 597 */ >> 598 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI) >> 599 SYNC >> 600 MTMSRD(r10) /* clear the RI bit */ >> 601 .globl exc_exit_restart >> 602 exc_exit_restart: >> 603 lwz r9,_MSR(r1) >> 604 lwz r12,_NIP(r1) >> 605 FIX_SRR1(r9,r10) >> 606 mtspr SRR0,r12 >> 607 mtspr SRR1,r9 >> 608 REST_4GPRS(9, r1) >> 609 lwz r1,GPR1(r1) >> 610 .globl exc_exit_restart_end >> 611 exc_exit_restart_end: >> 612 SYNC >> 613 RFI >> 614 >> 615 #else /* CONFIG_4xx */ >> 616 /* >> 617 * This is a bit different on 4xx because 4xx doesn't have >> 618 * the RI bit in the MSR. >> 619 * The TLB miss handler checks if we have interrupted >> 620 * the exception exit path and restarts it if so >> 621 * (well maybe one day it will... :). >> 622 */ >> 623 lwz r11,_LINK(r1) >> 624 mtlr r11 >> 625 lwz r10,_CCR(r1) >> 626 mtcrf 0xff,r10 >> 627 REST_2GPRS(9, r1) >> 628 .globl exc_exit_restart >> 629 exc_exit_restart: >> 630 lwz r11,_NIP(r1) >> 631 lwz r12,_MSR(r1) >> 632 exc_exit_start: >> 633 mtspr SRR0,r11 >> 634 mtspr SRR1,r12 >> 635 REST_2GPRS(11, r1) >> 636 lwz r1,GPR1(r1) >> 637 .globl exc_exit_restart_end >> 638 exc_exit_restart_end: >> 639 PPC405_ERR77_SYNC >> 640 rfi >> 641 b . /* prevent prefetch past rfi */ 333 642 334 ; check if this task's NEED_RESCHED fl !! 643 /* 335 ld r9, [r10, THREAD_INFO_FLAGS] !! 644 * Returning from a critical interrupt in user mode doesn't need 336 bbit0 r9, TIF_NEED_RESCHED, .Lrestore !! 645 * to be any different from a normal exception. For a critical >> 646 * interrupt in the kernel, we just return (without checking for >> 647 * preemption) since the interrupt may have happened at some crucial >> 648 * place (e.g. inside the TLB miss handler), and because we will be >> 649 * running with r1 pointing into critical_stack, not the current >> 650 * process's kernel stack (and therefore current_thread_info() will >> 651 * give the wrong answer). >> 652 * We have to restore various SPRs that may have been in use at the >> 653 * time of the critical interrupt. >> 654 */ >> 655 .globl ret_from_crit_exc >> 656 ret_from_crit_exc: >> 657 REST_NVGPRS(r1) >> 658 lwz r3,_MSR(r1) >> 659 andi. r3,r3,MSR_PR >> 660 LOAD_MSR_KERNEL(r10,MSR_KERNEL) >> 661 bne user_exc_return >> 662 >> 663 lwz r0,GPR0(r1) >> 664 lwz r2,GPR2(r1) >> 665 REST_4GPRS(3, r1) >> 666 REST_2GPRS(7, r1) >> 667 >> 668 lwz r10,_XER(r1) >> 669 lwz r11,_CTR(r1) >> 670 mtspr XER,r10 >> 671 mtctr r11 >> 672 >> 673 PPC405_ERR77(0,r1) >> 674 stwcx. r0,0,r1 /* to clear the reservation */ >> 675 >> 676 lwz r11,_LINK(r1) >> 677 mtlr r11 >> 678 lwz r10,_CCR(r1) >> 679 mtcrf 0xff,r10 >> 680 /* avoid any possible TLB misses here by turning off MSR.DR, we >> 681 * assume the instructions here are mapped by a pinned TLB entry */ >> 682 li r10,MSR_IR >> 683 mtmsr r10 >> 684 isync >> 685 tophys(r1, r1) >> 686 lwz r9,_DEAR(r1) >> 687 lwz r10,_ESR(r1) >> 688 mtspr SPRN_DEAR,r9 >> 689 mtspr SPRN_ESR,r10 >> 690 lwz r11,_NIP(r1) >> 691 lwz r12,_MSR(r1) >> 692 mtspr SRR2,r11 >> 693 mtspr SRR3,r12 >> 694 lwz r9,GPR9(r1) >> 695 lwz r12,GPR12(r1) >> 696 lwz r10,crit_sprg0@l(0) >> 697 mtspr SPRN_SPRG0,r10 >> 698 lwz r10,crit_sprg1@l(0) >> 699 mtspr SPRN_SPRG1,r10 >> 700 lwz r10,crit_sprg4@l(0) >> 701 mtspr SPRN_SPRG4,r10 >> 702 lwz r10,crit_sprg5@l(0) >> 703 mtspr SPRN_SPRG5,r10 >> 704 lwz r10,crit_sprg6@l(0) >> 705 mtspr SPRN_SPRG6,r10 >> 706 lwz r10,crit_sprg7@l(0) >> 707 mtspr SPRN_SPRG7,r10 >> 708 lwz r10,crit_srr0@l(0) >> 709 mtspr SRR0,r10 >> 710 lwz r10,crit_srr1@l(0) >> 711 mtspr SRR1,r10 >> 712 lwz r10,crit_pid@l(0) >> 713 mtspr SPRN_PID,r10 >> 714 lwz r10,GPR10(r1) >> 715 lwz r11,GPR11(r1) >> 716 lwz r1,GPR1(r1) >> 717 PPC405_ERR77_SYNC >> 718 rfci >> 719 b . /* prevent prefetch past rfci */ 337 720 338 ; Invoke PREEMPTION !! 721 /* 339 jl preempt_schedule_irq !! 722 * Load the DBCR0 value for a task that is being ptraced, >> 723 * having first saved away the global DBCR0. >> 724 */ >> 725 load_4xx_dbcr0: >> 726 mfmsr r0 /* first disable debug exceptions */ >> 727 rlwinm r0,r0,0,~MSR_DE >> 728 mtmsr r0 >> 729 isync >> 730 mfspr r10,SPRN_DBCR0 >> 731 lis r11,global_dbcr0@ha >> 732 addi r11,r11,global_dbcr0@l >> 733 lwz r0,THREAD+THREAD_DBCR0(r2) >> 734 stw r10,0(r11) >> 735 mtspr SPRN_DBCR0,r0 >> 736 lwz r10,4(r11) >> 737 addi r10,r10,1 >> 738 stw r10,4(r11) >> 739 li r11,-1 >> 740 mtspr SPRN_DBSR,r11 /* clear all pending debug events */ >> 741 blr >> 742 >> 743 .comm global_dbcr0,8 >> 744 #endif /* CONFIG_4xx */ >> 745 >> 746 do_work: /* r10 contains MSR_KERNEL here */ >> 747 andi. r0,r9,_TIF_NEED_RESCHED >> 748 beq do_user_signal >> 749 >> 750 do_resched: /* r10 contains MSR_KERNEL here */ >> 751 ori r10,r10,MSR_EE >> 752 SYNC >> 753 MTMSRD(r10) /* hard-enable interrupts */ >> 754 bl schedule >> 755 recheck: >> 756 LOAD_MSR_KERNEL(r10,MSR_KERNEL) >> 757 SYNC >> 758 MTMSRD(r10) /* disable interrupts */ >> 759 rlwinm r9,r1,0,0,18 >> 760 lwz r9,TI_FLAGS(r9) >> 761 andi. r0,r9,_TIF_NEED_RESCHED >> 762 bne- do_resched >> 763 andi. r0,r9,_TIF_SIGPENDING >> 764 beq restore_user >> 765 do_user_signal: /* r10 contains MSR_KERNEL here */ >> 766 ori r10,r10,MSR_EE >> 767 SYNC >> 768 MTMSRD(r10) /* hard-enable interrupts */ >> 769 /* save r13-r31 in the exception frame, if not already done */ >> 770 lwz r3,TRAP(r1) >> 771 andi. r0,r3,1 >> 772 beq 2f >> 773 SAVE_NVGPRS(r1) >> 774 rlwinm r3,r3,0,0,30 >> 775 stw r3,TRAP(r1) >> 776 2: li r3,0 >> 777 addi r4,r1,STACK_FRAME_OVERHEAD >> 778 bl do_signal >> 779 REST_NVGPRS(r1) >> 780 b recheck 340 781 341 ; preempt_schedule_irq() always return !! 782 /* 342 #endif !! 783 * We come here when we are at the end of handling an exception >> 784 * that occurred at a place where taking an exception will lose >> 785 * state information, such as the contents of SRR0 and SRR1. >> 786 */ >> 787 nonrecoverable: >> 788 lis r10,exc_exit_restart_end@ha >> 789 addi r10,r10,exc_exit_restart_end@l >> 790 cmplw r12,r10 >> 791 bge 3f >> 792 lis r11,exc_exit_restart@ha >> 793 addi r11,r11,exc_exit_restart@l >> 794 cmplw r12,r11 >> 795 blt 3f >> 796 lis r10,ee_restarts@ha >> 797 lwz r12,ee_restarts@l(r10) >> 798 addi r12,r12,1 >> 799 stw r12,ee_restarts@l(r10) >> 800 mr r12,r11 /* restart at exc_exit_restart */ >> 801 blr >> 802 3: /* OK, we can't recover, kill this process */ >> 803 /* but the 601 doesn't implement the RI bit, so assume it's OK */ >> 804 BEGIN_FTR_SECTION >> 805 blr >> 806 END_FTR_SECTION_IFSET(CPU_FTR_601) >> 807 lwz r3,TRAP(r1) >> 808 andi. r0,r3,1 >> 809 beq 4f >> 810 SAVE_NVGPRS(r1) >> 811 rlwinm r3,r3,0,0,30 >> 812 stw r3,TRAP(r1) >> 813 4: addi r3,r1,STACK_FRAME_OVERHEAD >> 814 bl nonrecoverable_exception >> 815 /* shouldn't return */ >> 816 b 4b 343 817 344 b .Lrestore_regs !! 818 .comm ee_restarts,4 345 819 346 ##### DONT ADD CODE HERE - .Lrestore_regs actu !! 820 /* >> 821 * PROM code for specific machines follows. Put it >> 822 * here so it's easy to add arch-specific sections later. >> 823 * -- Cort >> 824 */ >> 825 #ifdef CONFIG_PPC_OF >> 826 /* >> 827 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be >> 828 * called with the MMU off. >> 829 */ >> 830 _GLOBAL(enter_rtas) >> 831 stwu r1,-INT_FRAME_SIZE(r1) >> 832 mflr r0 >> 833 stw r0,INT_FRAME_SIZE+4(r1) >> 834 lis r4,rtas_data@ha >> 835 lwz r4,rtas_data@l(r4) >> 836 lis r6,1f@ha /* physical return address for rtas */ >> 837 addi r6,r6,1f@l >> 838 tophys(r6,r6) >> 839 tophys(r7,r1) >> 840 lis r8,rtas_entry@ha >> 841 lwz r8,rtas_entry@l(r8) >> 842 mfmsr r9 >> 843 stw r9,8(r1) >> 844 LOAD_MSR_KERNEL(r0,MSR_KERNEL) >> 845 SYNC /* disable interrupts so SRR0/1 */ >> 846 MTMSRD(r0) /* don't get trashed */ >> 847 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR) >> 848 mtlr r6 >> 849 CLR_TOP32(r7) >> 850 mtspr SPRG2,r7 >> 851 mtspr SRR0,r8 >> 852 mtspr SRR1,r9 >> 853 RFI >> 854 1: tophys(r9,r1) >> 855 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */ >> 856 lwz r9,8(r9) /* original msr value */ >> 857 FIX_SRR1(r9,r0) >> 858 addi r1,r1,INT_FRAME_SIZE >> 859 li r0,0 >> 860 mtspr SPRG2,r0 >> 861 mtspr SRR0,r8 >> 862 mtspr SRR1,r9 >> 863 RFI /* return to caller */ >> 864 >> 865 .globl machine_check_in_rtas >> 866 machine_check_in_rtas: >> 867 twi 31,0,0 >> 868 /* XXX load up BATs and panic */ 347 869 >> 870 #endif /* CONFIG_PPC_OF */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.