1 /* SPDX-License-Identifier: GPL-2.0-or-later * !! 1 /* SPDX-License-Identifier: GPL-2.0-or-later 2 /* !! 2 * -*- mode: asm -*- 3 * OpenRISC entry.S << 4 * 3 * 5 * Linux architectural port borrowing liberall !! 4 * linux/arch/m68k/kernel/entry.S 6 * others. All original copyrights apply as p !! 5 * 7 * declaration. !! 6 * Copyright (C) 1991, 1992 Linus Torvalds >> 7 * >> 8 * Linux/m68k support by Hamish Macdonald >> 9 * >> 10 * 68060 fixes by Jesper Skov 8 * 11 * 9 * Modifications for the OpenRISC architecture << 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@ << 11 * Copyright (C) 2005 Gyorgy Jeney <nog@bsemi.c << 12 * Copyright (C) 2010-2011 Jonas Bonn <jonas@so << 13 */ << 14 << 15 #include <linux/linkage.h> << 16 #include <linux/pgtable.h> << 17 << 18 #include <asm/processor.h> << 19 #include <asm/unistd.h> << 20 #include <asm/thread_info.h> << 21 #include <asm/errno.h> << 22 #include <asm/spr_defs.h> << 23 #include <asm/page.h> << 24 #include <asm/mmu.h> << 25 #include <asm/asm-offsets.h> << 26 << 27 #define DISABLE_INTERRUPTS(t1,t2) << 28 l.mfspr t2,r0,SPR_SR << 29 l.movhi t1,hi(~(SPR_SR_IEE|SPR_SR_TEE) << 30 l.ori t1,t1,lo(~(SPR_SR_IEE|SPR_SR_T << 31 l.and t2,t2,t1 << 32 l.mtspr r0,t2,SPR_SR << 33 << 34 #define ENABLE_INTERRUPTS(t1) << 35 l.mfspr t1,r0,SPR_SR << 36 l.ori t1,t1,lo(SPR_SR_IEE|SPR_SR_TEE << 37 l.mtspr r0,t1,SPR_SR << 38 << 39 /* =========================================== << 40 << 41 #ifdef CONFIG_TRACE_IRQFLAGS << 42 /* << 43 * Trace irq on/off creating a stack frame. << 44 */ << 45 #define TRACE_IRQS_OP(trace_op) << 46 l.sw -8(r1),r2 /* store frame << 47 l.sw -4(r1),r9 /* store retur << 48 l.addi r2,r1,0 /* move sp to << 49 l.jal trace_op << 50 l.addi r1,r1,-8 << 51 l.ori r1,r2,0 /* restore sp << 52 l.lwz r9,-4(r1) /* restore ret << 53 l.lwz r2,-8(r1) /* restore fp << 54 /* << 55 * Trace irq on/off and save registers we need << 56 * clobbered. << 57 */ 12 */ 58 #define TRACE_IRQS_SAVE(t1,trace_op) << 59 l.sw -12(r1),t1 /* save extra << 60 l.sw -8(r1),r2 /* store frame << 61 l.sw -4(r1),r9 /* store retur << 62 l.addi r2,r1,0 /* move sp to << 63 l.jal trace_op << 64 l.addi r1,r1,-12 << 65 l.ori r1,r2,0 /* restore sp << 66 l.lwz r9,-4(r1) /* restore ret << 67 l.lwz r2,-8(r1) /* restore fp << 68 l.lwz t1,-12(r1) /* restore ext << 69 << 70 #define TRACE_IRQS_OFF TRACE_IRQS_OP(trace_ha << 71 #define TRACE_IRQS_ON TRACE_IRQS_OP(trace_ha << 72 #define TRACE_IRQS_ON_SYSCALL << 73 TRACE_IRQS_SAVE(r10,trace_hardirqs_on) << 74 l.lwz r3,PT_GPR3(r1) << 75 l.lwz r4,PT_GPR4(r1) << 76 l.lwz r5,PT_GPR5(r1) << 77 l.lwz r6,PT_GPR6(r1) << 78 l.lwz r7,PT_GPR7(r1) << 79 l.lwz r8,PT_GPR8(r1) << 80 l.lwz r11,PT_GPR11(r1) << 81 #define TRACE_IRQS_OFF_ENTRY << 82 l.lwz r5,PT_SR(r1) << 83 l.andi r3,r5,(SPR_SR_IEE|SPR_SR_TEE) << 84 l.sfeq r5,r0 /* skip trace << 85 l.bf 1f << 86 l.nop << 87 TRACE_IRQS_SAVE(r4,trace_hardirqs_off) << 88 1: << 89 #else << 90 #define TRACE_IRQS_OFF << 91 #define TRACE_IRQS_ON << 92 #define TRACE_IRQS_OFF_ENTRY << 93 #define TRACE_IRQS_ON_SYSCALL << 94 #endif << 95 13 96 /* 14 /* 97 * We need to disable interrupts at beginning !! 15 * entry.S contains the system-call and fault low-level handling routines. 98 * since interrupt might come in after we've l !! 16 * This also contains the timer-interrupt handler, as well as all interrupts 99 * and overwrite EPC with address somewhere in !! 17 * and faults that can result in a task-switch. 100 * which is of course wrong! !! 18 * >> 19 * NOTE: This code handles signal-recognition, which happens every time >> 20 * after a timer-interrupt and after each system call. >> 21 * 101 */ 22 */ 102 23 103 #define RESTORE_ALL << 104 DISABLE_INTERRUPTS(r3,r4) << 105 l.lwz r3,PT_PC(r1) << 106 l.mtspr r0,r3,SPR_EPCR_BASE << 107 l.lwz r3,PT_SR(r1) << 108 l.mtspr r0,r3,SPR_ESR_BASE << 109 l.lwz r2,PT_GPR2(r1) << 110 l.lwz r3,PT_GPR3(r1) << 111 l.lwz r4,PT_GPR4(r1) << 112 l.lwz r5,PT_GPR5(r1) << 113 l.lwz r6,PT_GPR6(r1) << 114 l.lwz r7,PT_GPR7(r1) << 115 l.lwz r8,PT_GPR8(r1) << 116 l.lwz r9,PT_GPR9(r1) << 117 l.lwz r10,PT_GPR10(r1) << 118 l.lwz r11,PT_GPR11(r1) << 119 l.lwz r12,PT_GPR12(r1) << 120 l.lwz r13,PT_GPR13(r1) << 121 l.lwz r14,PT_GPR14(r1) << 122 l.lwz r15,PT_GPR15(r1) << 123 l.lwz r16,PT_GPR16(r1) << 124 l.lwz r17,PT_GPR17(r1) << 125 l.lwz r18,PT_GPR18(r1) << 126 l.lwz r19,PT_GPR19(r1) << 127 l.lwz r20,PT_GPR20(r1) << 128 l.lwz r21,PT_GPR21(r1) << 129 l.lwz r22,PT_GPR22(r1) << 130 l.lwz r23,PT_GPR23(r1) << 131 l.lwz r24,PT_GPR24(r1) << 132 l.lwz r25,PT_GPR25(r1) << 133 l.lwz r26,PT_GPR26(r1) << 134 l.lwz r27,PT_GPR27(r1) << 135 l.lwz r28,PT_GPR28(r1) << 136 l.lwz r29,PT_GPR29(r1) << 137 l.lwz r30,PT_GPR30(r1) << 138 l.lwz r31,PT_GPR31(r1) << 139 l.lwz r1,PT_SP(r1) << 140 l.rfe << 141 << 142 << 143 #define EXCEPTION_ENTRY(handler) << 144 .global handler << 145 handler: << 146 /* r1, EPCR, ESR a already saved */ << 147 l.sw PT_GPR2(r1),r2 << 148 l.sw PT_GPR3(r1),r3 << 149 /* r4 already save */ << 150 l.sw PT_GPR5(r1),r5 << 151 l.sw PT_GPR6(r1),r6 << 152 l.sw PT_GPR7(r1),r7 << 153 l.sw PT_GPR8(r1),r8 << 154 l.sw PT_GPR9(r1),r9 << 155 /* r10 already saved */ << 156 l.sw PT_GPR11(r1),r11 << 157 /* r12 already saved */ << 158 l.sw PT_GPR13(r1),r13 << 159 l.sw PT_GPR14(r1),r14 << 160 l.sw PT_GPR15(r1),r15 << 161 l.sw PT_GPR16(r1),r16 << 162 l.sw PT_GPR17(r1),r17 << 163 l.sw PT_GPR18(r1),r18 << 164 l.sw PT_GPR19(r1),r19 << 165 l.sw PT_GPR20(r1),r20 << 166 l.sw PT_GPR21(r1),r21 << 167 l.sw PT_GPR22(r1),r22 << 168 l.sw PT_GPR23(r1),r23 << 169 l.sw PT_GPR24(r1),r24 << 170 l.sw PT_GPR25(r1),r25 << 171 l.sw PT_GPR26(r1),r26 << 172 l.sw PT_GPR27(r1),r27 << 173 l.sw PT_GPR28(r1),r28 << 174 l.sw PT_GPR29(r1),r29 << 175 /* r30 already save */ << 176 l.sw PT_GPR31(r1),r31 << 177 TRACE_IRQS_OFF_ENTRY << 178 /* Store -1 in orig_gpr11 for non-sysc << 179 l.addi r30,r0,-1 << 180 l.sw PT_ORIG_GPR11(r1),r30 << 181 << 182 #define UNHANDLED_EXCEPTION(handler,vector) << 183 .global handler << 184 handler: << 185 /* r1, EPCR, ESR already saved */ << 186 l.sw PT_GPR2(r1),r2 << 187 l.sw PT_GPR3(r1),r3 << 188 l.sw PT_GPR5(r1),r5 << 189 l.sw PT_GPR6(r1),r6 << 190 l.sw PT_GPR7(r1),r7 << 191 l.sw PT_GPR8(r1),r8 << 192 l.sw PT_GPR9(r1),r9 << 193 /* r10 already saved */ << 194 l.sw PT_GPR11(r1),r11 << 195 /* r12 already saved */ << 196 l.sw PT_GPR13(r1),r13 << 197 l.sw PT_GPR14(r1),r14 << 198 l.sw PT_GPR15(r1),r15 << 199 l.sw PT_GPR16(r1),r16 << 200 l.sw PT_GPR17(r1),r17 << 201 l.sw PT_GPR18(r1),r18 << 202 l.sw PT_GPR19(r1),r19 << 203 l.sw PT_GPR20(r1),r20 << 204 l.sw PT_GPR21(r1),r21 << 205 l.sw PT_GPR22(r1),r22 << 206 l.sw PT_GPR23(r1),r23 << 207 l.sw PT_GPR24(r1),r24 << 208 l.sw PT_GPR25(r1),r25 << 209 l.sw PT_GPR26(r1),r26 << 210 l.sw PT_GPR27(r1),r27 << 211 l.sw PT_GPR28(r1),r28 << 212 l.sw PT_GPR29(r1),r29 << 213 /* r30 already saved */ << 214 l.sw PT_GPR31(r1),r31 << 215 /* Store -1 in orig_gpr11 for non-sysc << 216 l.addi r30,r0,-1 << 217 l.sw PT_ORIG_GPR11(r1),r30 << 218 l.addi r3,r1,0 << 219 /* r4 is exception EA */ << 220 l.addi r5,r0,vector << 221 l.jal unhandled_exception << 222 l.nop << 223 l.j _ret_from_exception << 224 l.nop << 225 << 226 /* clobbers 'reg' */ << 227 #define CLEAR_LWA_FLAG(reg) \ << 228 l.movhi reg,hi(lwa_flag) ;\ << 229 l.ori reg,reg,lo(lwa_flag) ;\ << 230 l.sw 0(reg),r0 << 231 /* 24 /* 232 * NOTE: one should never assume that SPR_EPC, !! 25 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so 233 * contain the same values as when excep !! 26 * all pointers that used to be 'current' are now entry 234 * occured. in fact they never do. if yo !! 27 * number 0 in the 'current_set' list. 235 * values saved on stack (for SPR_EPC, S !! 28 * 236 * of r4 (for SPR_EEAR). for details loo !! 29 * 6/05/00 RZ: addedd writeback completion after return from sighandler 237 * in 'arch/openrisc/kernel/head.S' !! 30 * for 68040 238 */ 31 */ 239 32 240 /* =========================================== !! 33 #include <linux/linkage.h> >> 34 #include <asm/errno.h> >> 35 #include <asm/setup.h> >> 36 #include <asm/traps.h> >> 37 #include <asm/unistd.h> >> 38 #include <asm/asm-offsets.h> >> 39 #include <asm/entry.h> 241 40 242 /* ---[ 0x100: RESET exception ]-------------- !! 41 .globl system_call, buserr, trap, resume >> 42 .globl sys_call_table >> 43 .globl __sys_fork, __sys_clone, __sys_vfork >> 44 .globl bad_interrupt >> 45 .globl auto_irqhandler_fixup >> 46 .globl user_irqvec_fixup 243 47 244 EXCEPTION_ENTRY(_tng_kernel_start) !! 48 .text 245 l.jal _start !! 49 ENTRY(__sys_fork) 246 l.andi r0,r0,0 !! 50 SAVE_SWITCH_STACK 247 !! 51 jbsr sys_fork 248 /* ---[ 0x200: BUS exception ]---------------- !! 52 lea %sp@(24),%sp 249 !! 53 rts 250 EXCEPTION_ENTRY(_bus_fault_handler) << 251 CLEAR_LWA_FLAG(r3) << 252 /* r4: EA of fault (set by EXCEPTION_H << 253 l.jal do_bus_fault << 254 l.addi r3,r1,0 /* pt_regs */ << 255 << 256 l.j _ret_from_exception << 257 l.nop << 258 << 259 /* ---[ 0x300: Data Page Fault exception ]---- << 260 EXCEPTION_ENTRY(_dtlb_miss_page_fault_handler) << 261 CLEAR_LWA_FLAG(r3) << 262 l.and r5,r5,r0 << 263 l.j 1f << 264 l.nop << 265 << 266 EXCEPTION_ENTRY(_data_page_fault_handler) << 267 CLEAR_LWA_FLAG(r3) << 268 /* set up parameters for do_page_fault << 269 l.ori r5,r0,0x300 // << 270 1: << 271 l.addi r3,r1,0 // << 272 /* r4 set be EXCEPTION_HANDLE */ // << 273 54 274 #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX !! 55 ENTRY(__sys_clone) 275 l.lwz r6,PT_PC(r3) // !! 56 SAVE_SWITCH_STACK 276 l.lwz r6,0(r6) // !! 57 pea %sp@(SWITCH_STACK_SIZE) 277 !! 58 jbsr m68k_clone 278 l.srli r6,r6,26 // !! 59 lea %sp@(28),%sp 279 l.sfeqi r6,0 // !! 60 rts 280 l.bf 8f !! 61 281 l.sfeqi r6,1 // !! 62 ENTRY(__sys_vfork) 282 l.bf 8f !! 63 SAVE_SWITCH_STACK 283 l.sfeqi r6,3 // !! 64 jbsr sys_vfork 284 l.bf 8f !! 65 lea %sp@(24),%sp 285 l.sfeqi r6,4 // !! 66 rts 286 l.bf 8f << 287 l.sfeqi r6,0x11 // << 288 l.bf 8f << 289 l.sfeqi r6,0x12 // << 290 l.bf 8f << 291 l.nop << 292 << 293 l.j 9f << 294 l.nop << 295 << 296 8: // offending insn is in delay slot << 297 l.lwz r6,PT_PC(r3) // << 298 l.addi r6,r6,4 << 299 l.lwz r6,0(r6) // << 300 l.srli r6,r6,26 // << 301 9: // offending instruction opcode loaded in r << 302 67 303 #else !! 68 ENTRY(__sys_clone3) >> 69 SAVE_SWITCH_STACK >> 70 pea %sp@(SWITCH_STACK_SIZE) >> 71 jbsr m68k_clone3 >> 72 lea %sp@(28),%sp >> 73 rts >> 74 >> 75 ENTRY(sys_sigreturn) >> 76 SAVE_SWITCH_STACK >> 77 movel %sp,%a1 | switch_stack pointer >> 78 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer >> 79 lea %sp@(-84),%sp | leave a gap >> 80 movel %a1,%sp@- >> 81 movel %a0,%sp@- >> 82 jbsr do_sigreturn >> 83 jra 1f | shared with rt_sigreturn() 304 84 305 l.mfspr r6,r0,SPR_SR // !! 85 ENTRY(sys_rt_sigreturn) 306 l.andi r6,r6,SPR_SR_DSX // !! 86 SAVE_SWITCH_STACK 307 l.sfne r6,r0 // !! 87 movel %sp,%a1 | switch_stack pointer 308 l.bnf 7f !! 88 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 309 l.lwz r6,PT_PC(r3) // !! 89 lea %sp@(-84),%sp | leave a gap 310 !! 90 movel %a1,%sp@- 311 l.addi r6,r6,4 // !! 91 movel %a0,%sp@- 312 7: !! 92 | stack contents: 313 l.lwz r6,0(r6) // !! 93 | [original pt_regs address] [original switch_stack address] 314 l.srli r6,r6,26 // !! 94 | [gap] [switch_stack] [pt_regs] [exception frame] 315 #endif !! 95 jbsr do_rt_sigreturn 316 96 317 l.sfgeui r6,0x33 // << 318 l.bnf 1f << 319 l.sfleui r6,0x37 << 320 l.bnf 1f << 321 l.ori r6,r0,0x1 // << 322 l.j 2f << 323 l.nop << 324 1: l.ori r6,r0,0x0 // << 325 2: << 326 << 327 /* call fault.c handler in openrisc/mm << 328 l.jal do_page_fault << 329 l.nop << 330 l.j _ret_from_exception << 331 l.nop << 332 << 333 /* ---[ 0x400: Insn Page Fault exception ]---- << 334 EXCEPTION_ENTRY(_itlb_miss_page_fault_handler) << 335 CLEAR_LWA_FLAG(r3) << 336 l.and r5,r5,r0 << 337 l.j 1f << 338 l.nop << 339 << 340 EXCEPTION_ENTRY(_insn_page_fault_handler) << 341 CLEAR_LWA_FLAG(r3) << 342 /* set up parameters for do_page_fault << 343 l.ori r5,r0,0x400 // << 344 1: 97 1: 345 l.addi r3,r1,0 // !! 98 | stack contents now: 346 /* r4 set be EXCEPTION_HANDLE */ // !! 99 | [original pt_regs address] [original switch_stack address] 347 l.ori r6,r0,0x0 // !! 100 | [unused part of the gap] [moved switch_stack] [moved pt_regs] 348 !! 101 | [replacement exception frame] 349 /* call fault.c handler in openrisc/mm !! 102 | return value of do_{rt_,}sigreturn() points to moved switch_stack. 350 l.jal do_page_fault !! 103 351 l.nop !! 104 movel %d0,%sp | discard the leftover junk 352 l.j _ret_from_exception !! 105 RESTORE_SWITCH_STACK 353 l.nop !! 106 | stack contents now is just [syscall return address] [pt_regs] [frame] 354 !! 107 | return pt_regs.d0 355 !! 108 movel %sp@(PT_OFF_D0+4),%d0 356 /* ---[ 0x500: Timer exception ]-------------- !! 109 rts 357 !! 110 358 EXCEPTION_ENTRY(_timer_handler) !! 111 ENTRY(buserr) 359 CLEAR_LWA_FLAG(r3) !! 112 SAVE_ALL_INT 360 l.jal timer_interrupt !! 113 GET_CURRENT(%d0) 361 l.addi r3,r1,0 /* pt_regs */ !! 114 movel %sp,%sp@- | stack frame pointer argument 362 !! 115 jbsr buserr_c 363 l.j _ret_from_intr !! 116 addql #4,%sp 364 l.nop !! 117 jra ret_from_exception 365 !! 118 366 /* ---[ 0x600: Alignment exception ]---------- !! 119 ENTRY(trap) 367 !! 120 SAVE_ALL_INT 368 EXCEPTION_ENTRY(_alignment_handler) !! 121 GET_CURRENT(%d0) 369 CLEAR_LWA_FLAG(r3) !! 122 movel %sp,%sp@- | stack frame pointer argument 370 /* r4: EA of fault (set by EXCEPTION_H !! 123 jbsr trap_c 371 l.jal do_unaligned_access !! 124 addql #4,%sp 372 l.addi r3,r1,0 /* pt_regs */ !! 125 jra ret_from_exception 373 !! 126 374 l.j _ret_from_exception !! 127 | After a fork we jump here directly from resume, 375 l.nop !! 128 | so that %d1 contains the previous task 376 !! 129 | schedule_tail now used regardless of CONFIG_SMP 377 #if 0 !! 130 ENTRY(ret_from_fork) 378 EXCEPTION_ENTRY(_alignment_handler) !! 131 movel %d1,%sp@- 379 // l.mfspr r2,r0,SPR_EEAR_BASE /* L !! 132 jsr schedule_tail 380 l.addi r2,r4,0 !! 133 addql #4,%sp 381 // l.mfspr r5,r0,SPR_EPCR_BASE /* L !! 134 jra ret_from_exception 382 l.lwz r5,PT_PC(r1) !! 135 383 !! 136 ENTRY(ret_from_kernel_thread) 384 l.lwz r3,0(r5) /* Loa !! 137 | a3 contains the kernel thread payload, d7 - its argument 385 l.srli r4,r3,26 /* Shi !! 138 movel %d1,%sp@- 386 !! 139 jsr schedule_tail 387 l.sfeqi r4,0x00 /* Che !! 140 movel %d7,(%sp) 388 l.bf jmp !! 141 jsr %a3@ 389 l.sfeqi r4,0x01 !! 142 addql #4,%sp 390 l.bf jmp !! 143 jra ret_from_exception 391 l.sfeqi r4,0x03 !! 144 392 l.bf jmp !! 145 #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 393 l.sfeqi r4,0x04 !! 146 394 l.bf jmp !! 147 #ifdef TRAP_DBG_INTERRUPT 395 l.sfeqi r4,0x11 !! 148 396 l.bf jr !! 149 .globl dbginterrupt 397 l.sfeqi r4,0x12 !! 150 ENTRY(dbginterrupt) 398 l.bf jr !! 151 SAVE_ALL_INT 399 l.nop !! 152 GET_CURRENT(%d0) 400 l.j 1f !! 153 movel %sp,%sp@- /* stack frame pointer argument */ 401 l.addi r5,r5,4 /* Inc !! 154 jsr dbginterrupt_c 402 !! 155 addql #4,%sp 403 jmp: !! 156 jra ret_from_exception 404 l.slli r4,r3,6 /* Get !! 157 #endif 405 l.srai r4,r4,4 << 406 << 407 l.lwz r3,4(r5) /* Loa << 408 << 409 l.add r5,r5,r4 /* Cal << 410 << 411 l.j 1f << 412 l.srli r4,r3,26 /* Shi << 413 << 414 jr: << 415 l.slli r4,r3,9 /* Shi << 416 l.andi r4,r4,0x7c << 417 << 418 l.lwz r3,4(r5) /* Loa << 419 158 420 l.add r4,r4,r1 /* Loa !! 159 ENTRY(reschedule) 421 l.lwz r5,0(r4) !! 160 /* save top of frame */ >> 161 pea %sp@ >> 162 jbsr set_esp0 >> 163 addql #4,%sp >> 164 pea ret_from_exception >> 165 jmp schedule >> 166 >> 167 ENTRY(ret_from_user_signal) >> 168 moveq #__NR_sigreturn,%d0 >> 169 trap #0 >> 170 >> 171 ENTRY(ret_from_user_rt_signal) >> 172 movel #__NR_rt_sigreturn,%d0 >> 173 trap #0 422 174 423 l.srli r4,r3,26 /* Shi !! 175 #else 424 176 >> 177 do_trace_entry: >> 178 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace >> 179 subql #4,%sp >> 180 SAVE_SWITCH_STACK >> 181 jbsr syscall_trace_enter >> 182 RESTORE_SWITCH_STACK >> 183 addql #4,%sp >> 184 addql #1,%d0 | optimization for cmpil #-1,%d0 >> 185 jeq ret_from_syscall >> 186 movel %sp@(PT_OFF_ORIG_D0),%d0 >> 187 cmpl #NR_syscalls,%d0 >> 188 jcs syscall >> 189 jra ret_from_syscall >> 190 badsys: >> 191 movel #-ENOSYS,%sp@(PT_OFF_D0) >> 192 jra ret_from_syscall >> 193 >> 194 do_trace_exit: >> 195 subql #4,%sp >> 196 SAVE_SWITCH_STACK >> 197 jbsr syscall_trace_leave >> 198 RESTORE_SWITCH_STACK >> 199 addql #4,%sp >> 200 jra .Lret_from_exception >> 201 >> 202 ENTRY(system_call) >> 203 SAVE_ALL_SYS >> 204 >> 205 GET_CURRENT(%d1) >> 206 movel %d1,%a1 >> 207 >> 208 | save top of frame >> 209 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 210 >> 211 | syscall trace? >> 212 tstb %a1@(TINFO_FLAGS+2) >> 213 jmi do_trace_entry >> 214 | seccomp filter active? >> 215 btst #5,%a1@(TINFO_FLAGS+2) >> 216 bnes do_trace_entry >> 217 cmpl #NR_syscalls,%d0 >> 218 jcc badsys >> 219 syscall: >> 220 jbsr @(sys_call_table,%d0:l:4)@(0) >> 221 movel %d0,%sp@(PT_OFF_D0) | save the return value >> 222 ret_from_syscall: >> 223 |oriw #0x0700,%sr >> 224 movel %curptr@(TASK_STACK),%a1 >> 225 movew %a1@(TINFO_FLAGS+2),%d0 >> 226 jne syscall_exit_work >> 227 1: RESTORE_ALL >> 228 >> 229 syscall_exit_work: >> 230 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 231 bnes 1b | if so, skip resched, signals >> 232 lslw #1,%d0 >> 233 jcs do_trace_exit >> 234 jmi do_delayed_trace >> 235 lslw #8,%d0 >> 236 jne do_signal_return >> 237 pea resume_userspace >> 238 jra schedule >> 239 >> 240 >> 241 ENTRY(ret_from_exception) >> 242 .Lret_from_exception: >> 243 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 244 bnes 1f | if so, skip resched, signals >> 245 | only allow interrupts when we are really the last one on the >> 246 | kernel stack, otherwise stack overflow can occur during >> 247 | heavy interrupt load >> 248 andw #ALLOWINT,%sr >> 249 >> 250 resume_userspace: >> 251 movel %curptr@(TASK_STACK),%a1 >> 252 moveb %a1@(TINFO_FLAGS+3),%d0 >> 253 jne exit_work >> 254 1: RESTORE_ALL >> 255 >> 256 exit_work: >> 257 | save top of frame >> 258 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 259 lslb #1,%d0 >> 260 jne do_signal_return >> 261 pea resume_userspace >> 262 jra schedule >> 263 >> 264 >> 265 do_signal_return: >> 266 |andw #ALLOWINT,%sr >> 267 subql #4,%sp | dummy return address >> 268 SAVE_SWITCH_STACK >> 269 pea %sp@(SWITCH_STACK_SIZE) >> 270 bsrl do_notify_resume >> 271 addql #4,%sp >> 272 RESTORE_SWITCH_STACK >> 273 addql #4,%sp >> 274 jbra resume_userspace >> 275 >> 276 do_delayed_trace: >> 277 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR >> 278 pea 1 | send SIGTRAP >> 279 movel %curptr,%sp@- >> 280 pea LSIGTRAP >> 281 jbsr send_sig >> 282 addql #8,%sp >> 283 addql #4,%sp >> 284 jbra resume_userspace >> 285 >> 286 >> 287 /* This is the main interrupt handler for autovector interrupts */ >> 288 >> 289 ENTRY(auto_inthandler) >> 290 SAVE_ALL_INT >> 291 GET_CURRENT(%d0) >> 292 | put exception # in d0 >> 293 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 294 subw #VEC_SPUR,%d0 >> 295 >> 296 movel %sp,%sp@- >> 297 movel %d0,%sp@- | put vector # on stack >> 298 auto_irqhandler_fixup = . + 2 >> 299 jsr do_IRQ | process the IRQ >> 300 addql #8,%sp | pop parameters off stack >> 301 jra ret_from_exception >> 302 >> 303 /* Handler for user defined interrupt vectors */ >> 304 >> 305 ENTRY(user_inthandler) >> 306 SAVE_ALL_INT >> 307 GET_CURRENT(%d0) >> 308 | put exception # in d0 >> 309 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 310 user_irqvec_fixup = . + 2 >> 311 subw #VEC_USER,%d0 >> 312 >> 313 movel %sp,%sp@- >> 314 movel %d0,%sp@- | put vector # on stack >> 315 jsr do_IRQ | process the IRQ >> 316 addql #8,%sp | pop parameters off stack >> 317 jra ret_from_exception >> 318 >> 319 /* Handler for uninitialized and spurious interrupts */ >> 320 >> 321 ENTRY(bad_inthandler) >> 322 SAVE_ALL_INT >> 323 GET_CURRENT(%d0) >> 324 >> 325 movel %sp,%sp@- >> 326 jsr handle_badint >> 327 addql #4,%sp >> 328 jra ret_from_exception 425 329 426 1: !! 330 resume: 427 // l.mtspr r0,r5,SPR_EPCR_BASE !! 331 /* 428 l.sw PT_PC(r1),r5 !! 332 * Beware - when entering resume, prev (the current task) is 429 !! 333 * in a0, next (the new task) is in a1,so don't change these 430 l.sfeqi r4,0x26 !! 334 * registers until their contents are no longer needed. 431 l.bf lhs !! 335 */ 432 l.sfeqi r4,0x25 !! 336 433 l.bf lhz !! 337 /* save sr */ 434 l.sfeqi r4,0x22 !! 338 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 435 l.bf lws !! 339 436 l.sfeqi r4,0x21 !! 340 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 437 l.bf lwz !! 341 movec %sfc,%d0 438 l.sfeqi r4,0x37 !! 342 movew %d0,%a0@(TASK_THREAD+THREAD_FC) 439 l.bf sh !! 343 440 l.sfeqi r4,0x35 !! 344 /* save usp */ 441 l.bf sw !! 345 /* it is better to use a movel here instead of a movew 8*) */ 442 l.nop !! 346 movec %usp,%d0 443 !! 347 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 444 1: l.j 1b /* I d !! 348 445 l.nop !! 349 /* save non-scratch registers on stack */ 446 !! 350 SAVE_SWITCH_STACK 447 lhs: l.lbs r5,0(r2) !! 351 448 l.slli r5,r5,8 !! 352 /* save current kernel stack pointer */ 449 l.lbz r6,1(r2) !! 353 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 450 l.or r5,r5,r6 !! 354 451 l.srli r4,r3,19 !! 355 /* save floating point context */ 452 l.andi r4,r4,0x7c !! 356 #ifndef CONFIG_M68KFPU_EMU_ONLY 453 l.add r4,r4,r1 !! 357 #ifdef CONFIG_M68KFPU_EMU 454 l.j align_end !! 358 tstl m68k_fputype 455 l.sw 0(r4),r5 !! 359 jeq 3f 456 << 457 lhz: l.lbz r5,0(r2) << 458 l.slli r5,r5,8 << 459 l.lbz r6,1(r2) << 460 l.or r5,r5,r6 << 461 l.srli r4,r3,19 << 462 l.andi r4,r4,0x7c << 463 l.add r4,r4,r1 << 464 l.j align_end << 465 l.sw 0(r4),r5 << 466 << 467 lws: l.lbs r5,0(r2) << 468 l.slli r5,r5,24 << 469 l.lbz r6,1(r2) << 470 l.slli r6,r6,16 << 471 l.or r5,r5,r6 << 472 l.lbz r6,2(r2) << 473 l.slli r6,r6,8 << 474 l.or r5,r5,r6 << 475 l.lbz r6,3(r2) << 476 l.or r5,r5,r6 << 477 l.srli r4,r3,19 << 478 l.andi r4,r4,0x7c << 479 l.add r4,r4,r1 << 480 l.j align_end << 481 l.sw 0(r4),r5 << 482 << 483 lwz: l.lbz r5,0(r2) << 484 l.slli r5,r5,24 << 485 l.lbz r6,1(r2) << 486 l.slli r6,r6,16 << 487 l.or r5,r5,r6 << 488 l.lbz r6,2(r2) << 489 l.slli r6,r6,8 << 490 l.or r5,r5,r6 << 491 l.lbz r6,3(r2) << 492 l.or r5,r5,r6 << 493 l.srli r4,r3,19 << 494 l.andi r4,r4,0x7c << 495 l.add r4,r4,r1 << 496 l.j align_end << 497 l.sw 0(r4),r5 << 498 << 499 sh: << 500 l.srli r4,r3,9 << 501 l.andi r4,r4,0x7c << 502 l.add r4,r4,r1 << 503 l.lwz r5,0(r4) << 504 l.sb 1(r2),r5 << 505 l.srli r5,r5,8 << 506 l.j align_end << 507 l.sb 0(r2),r5 << 508 << 509 sw: << 510 l.srli r4,r3,9 << 511 l.andi r4,r4,0x7c << 512 l.add r4,r4,r1 << 513 l.lwz r5,0(r4) << 514 l.sb 3(r2),r5 << 515 l.srli r5,r5,8 << 516 l.sb 2(r2),r5 << 517 l.srli r5,r5,8 << 518 l.sb 1(r2),r5 << 519 l.srli r5,r5,8 << 520 l.j align_end << 521 l.sb 0(r2),r5 << 522 << 523 align_end: << 524 l.j _ret_from_intr << 525 l.nop << 526 #endif 360 #endif >> 361 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 527 362 528 /* ---[ 0x700: Illegal insn exception ]------- !! 363 #if defined(CONFIG_M68060) 529 !! 364 #if !defined(CPU_M68060_ONLY) 530 EXCEPTION_ENTRY(_illegal_instruction_handler) !! 365 btst #3,m68k_cputype+3 531 /* r4: EA of fault (set by EXCEPTION_H !! 366 beqs 1f 532 l.jal do_illegal_instruction << 533 l.addi r3,r1,0 /* pt_regs */ << 534 << 535 l.j _ret_from_exception << 536 l.nop << 537 << 538 /* ---[ 0x800: External interrupt exception ]- << 539 << 540 EXCEPTION_ENTRY(_external_irq_handler) << 541 #ifdef CONFIG_OPENRISC_ESR_EXCEPTION_BUG_CHECK << 542 l.lwz r4,PT_SR(r1) // wer << 543 l.andi r4,r4,SPR_SR_IEE << 544 l.sfeqi r4,0 << 545 l.bnf 1f // ext << 546 l.nop << 547 << 548 #ifdef CONFIG_PRINTK << 549 l.addi r1,r1,-0x8 << 550 l.movhi r3,hi(42f) << 551 l.ori r3,r3,lo(42f) << 552 l.sw 0x0(r1),r3 << 553 l.jal _printk << 554 l.sw 0x4(r1),r4 << 555 l.addi r1,r1,0x8 << 556 << 557 .section .rodata, "a" << 558 42: << 559 .string "\n\rESR interrupt bug << 560 .align 4 << 561 .previous << 562 #endif 367 #endif 563 !! 368 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 564 l.ori r4,r4,SPR_SR_IEE // fix !! 369 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 565 // l.sw PT_SR(r1),r4 !! 370 jeq 3f 566 1: !! 371 #if !defined(CPU_M68060_ONLY) >> 372 jra 2f 567 #endif 373 #endif 568 CLEAR_LWA_FLAG(r3) !! 374 #endif /* CONFIG_M68060 */ 569 l.addi r3,r1,0 !! 375 #if !defined(CPU_M68060_ONLY) 570 l.movhi r8,hi(generic_handle_arch_irq) !! 376 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 571 l.ori r8,r8,lo(generic_handle_arch_i !! 377 jeq 3f 572 l.jalr r8 << 573 l.nop << 574 l.j _ret_from_intr << 575 l.nop << 576 << 577 /* ---[ 0x900: DTLB miss exception ]---------- << 578 << 579 << 580 /* ---[ 0xa00: ITLB miss exception ]---------- << 581 << 582 << 583 /* ---[ 0xb00: Range exception ]-------------- << 584 << 585 UNHANDLED_EXCEPTION(_vector_0xb00,0xb00) << 586 << 587 /* ---[ 0xc00: Syscall exception ]------------ << 588 << 589 /* << 590 * Syscalls are a special type of exception in << 591 * _explicitly_ invoked by userspace and can t << 592 * held to conform to the same ABI as normal f << 593 * respect to whether registers are preserved << 594 * or not. << 595 */ << 596 << 597 /* Upon syscall entry we just save the callee- << 598 * and not the call-clobbered ones. << 599 */ << 600 << 601 _string_syscall_return: << 602 .string "syscall r9:0x%08x -> syscall( << 603 .align 4 << 604 << 605 ENTRY(_sys_call_handler) << 606 /* r1, EPCR, ESR a already saved */ << 607 l.sw PT_GPR2(r1),r2 << 608 /* r3-r8 must be saved because syscall << 609 * on us being able to restart the sys << 610 * they should be clobbered, otherwise << 611 */ << 612 l.sw PT_GPR3(r1),r3 << 613 /* << 614 * r4 already saved << 615 * r4 holds the EEAR address of the fa << 616 * then load the original r4 << 617 */ << 618 CLEAR_LWA_FLAG(r4) << 619 l.lwz r4,PT_GPR4(r1) << 620 l.sw PT_GPR5(r1),r5 << 621 l.sw PT_GPR6(r1),r6 << 622 l.sw PT_GPR7(r1),r7 << 623 l.sw PT_GPR8(r1),r8 << 624 l.sw PT_GPR9(r1),r9 << 625 /* r10 already saved */ << 626 l.sw PT_GPR11(r1),r11 << 627 /* orig_gpr11 must be set for syscalls << 628 l.sw PT_ORIG_GPR11(r1),r11 << 629 /* r12,r13 already saved */ << 630 << 631 /* r14-r28 (even) aren't touched by th << 632 * so we don't need to save them. How << 633 * to userspace via a call to switch() << 634 * switch() effectively clobbers them. << 635 * such functions is handled in their << 636 * and clone, below). << 637 << 638 /* r30 is the only register we clobber << 639 /* r30 already saved */ << 640 /* l.sw PT_GPR30(r1),r30 */ << 641 << 642 _syscall_check_trace_enter: << 643 /* syscalls run with interrupts enable << 644 TRACE_IRQS_ON_SYSCALL << 645 ENABLE_INTERRUPTS(r29) // ena << 646 << 647 /* If TIF_SYSCALL_TRACE is set, then w << 648 l.lwz r30,TI_FLAGS(r10) << 649 l.andi r30,r30,_TIF_SYSCALL_TRACE << 650 l.sfne r30,r0 << 651 l.bf _syscall_trace_enter << 652 l.nop << 653 << 654 _syscall_check: << 655 /* Ensure that the syscall number is r << 656 l.sfgeui r11,__NR_syscalls << 657 l.bf _syscall_badsys << 658 l.nop << 659 << 660 _syscall_call: << 661 l.movhi r29,hi(sys_call_table) << 662 l.ori r29,r29,lo(sys_call_table) << 663 l.slli r11,r11,2 << 664 l.add r29,r29,r11 << 665 l.lwz r29,0(r29) << 666 << 667 l.jalr r29 << 668 l.nop << 669 << 670 _syscall_return: << 671 /* All syscalls return here... just pa << 672 * which does it in a round-about way. << 673 */ << 674 l.sw PT_GPR11(r1),r11 // << 675 << 676 #if 0 << 677 _syscall_debug: << 678 l.movhi r3,hi(_string_syscall_return) << 679 l.ori r3,r3,lo(_string_syscall_retur << 680 l.ori r27,r0,2 << 681 l.sw -4(r1),r27 << 682 l.sw -8(r1),r11 << 683 l.lwz r29,PT_ORIG_GPR11(r1) << 684 l.sw -12(r1),r29 << 685 l.lwz r29,PT_GPR9(r1) << 686 l.sw -16(r1),r29 << 687 l.movhi r27,hi(_printk) << 688 l.ori r27,r27,lo(_printk) << 689 l.jalr r27 << 690 l.addi r1,r1,-16 << 691 l.addi r1,r1,16 << 692 #endif 378 #endif 693 #if 0 !! 379 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 694 _syscall_show_regs: !! 380 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 695 l.movhi r27,hi(show_registers) !! 381 3: 696 l.ori r27,r27,lo(show_registers) !! 382 #endif /* CONFIG_M68KFPU_EMU_ONLY */ 697 l.jalr r27 !! 383 /* Return previous task in %d1 */ 698 l.or r3,r1,r1 !! 384 movel %curptr,%d1 >> 385 >> 386 /* switch to new task (a1 contains new task) */ >> 387 movel %a1,%curptr >> 388 >> 389 /* restore floating point context */ >> 390 #ifndef CONFIG_M68KFPU_EMU_ONLY >> 391 #ifdef CONFIG_M68KFPU_EMU >> 392 tstl m68k_fputype >> 393 jeq 4f 699 #endif 394 #endif 700 !! 395 #if defined(CONFIG_M68060) 701 _syscall_check_trace_leave: !! 396 #if !defined(CPU_M68060_ONLY) 702 /* r30 is a callee-saved register so t !! 397 btst #3,m68k_cputype+3 703 * _TIF_SYSCALL_TRACE flag from _sysca !! 398 beqs 1f 704 * _syscall_trace_leave expects syscal << 705 */ << 706 l.sfne r30,r0 << 707 l.bf _syscall_trace_leave << 708 l.nop << 709 << 710 /* This is where the exception-return code beg << 711 * disabled the rest of the way here because w << 712 * interrupts that set NEED_RESCHED or SIGNALP << 713 << 714 _syscall_check_work: << 715 /* Here we need to disable interrupts << 716 DISABLE_INTERRUPTS(r27,r29) << 717 TRACE_IRQS_OFF << 718 l.lwz r30,TI_FLAGS(r10) << 719 l.andi r30,r30,_TIF_WORK_MASK << 720 l.sfne r30,r0 << 721 << 722 l.bnf _syscall_resume_userspace << 723 l.nop << 724 << 725 /* Work pending follows a different re << 726 * make sure that all the call-saved r << 727 * before branching... << 728 */ << 729 l.sw PT_GPR14(r1),r14 << 730 l.sw PT_GPR16(r1),r16 << 731 l.sw PT_GPR18(r1),r18 << 732 l.sw PT_GPR20(r1),r20 << 733 l.sw PT_GPR22(r1),r22 << 734 l.sw PT_GPR24(r1),r24 << 735 l.sw PT_GPR26(r1),r26 << 736 l.sw PT_GPR28(r1),r28 << 737 << 738 /* _work_pending needs to be called wi << 739 l.j _work_pending << 740 l.nop << 741 << 742 _syscall_resume_userspace: << 743 // ENABLE_INTERRUPTS(r29) << 744 << 745 << 746 /* This is the hot path for returning to users << 747 * work to be done and the branch to _work_pen << 748 * return to userspace will be done via the no << 749 * that path restores _all_ registers and will << 750 * registers with whatever garbage is in pt_re << 751 * registers are clobbered anyway and because << 752 * in the context of the extra work that _work << 753 << 754 /* Once again, syscalls are special and only g << 755 * same registers as a normal function call */ << 756 << 757 /* The assumption here is that the registers r << 758 * don't need to be restored... be sure that t << 759 */ << 760 << 761 /* This is still too much... we should only be << 762 * clobbered... we should even be using 'scrat << 763 * we don't need to restore anything, hardly.. << 764 */ << 765 << 766 l.lwz r2,PT_GPR2(r1) << 767 << 768 /* Restore args */ << 769 /* r3-r8 are technically clobbered, bu << 770 * to be restored... << 771 */ << 772 l.lwz r3,PT_GPR3(r1) << 773 l.lwz r4,PT_GPR4(r1) << 774 l.lwz r5,PT_GPR5(r1) << 775 l.lwz r6,PT_GPR6(r1) << 776 l.lwz r7,PT_GPR7(r1) << 777 l.lwz r8,PT_GPR8(r1) << 778 << 779 l.lwz r9,PT_GPR9(r1) << 780 l.lwz r10,PT_GPR10(r1) << 781 l.lwz r11,PT_GPR11(r1) << 782 << 783 /* r30 is the only register we clobber << 784 l.lwz r30,PT_GPR30(r1) << 785 << 786 /* Here we use r13-r19 (odd) as scratc << 787 l.lwz r13,PT_PC(r1) << 788 l.lwz r15,PT_SR(r1) << 789 l.lwz r1,PT_SP(r1) << 790 /* Interrupts need to be disabled for << 791 * so that another interrupt doesn't c << 792 * them before we can use them for our << 793 DISABLE_INTERRUPTS(r17,r19) << 794 l.mtspr r0,r13,SPR_EPCR_BASE << 795 l.mtspr r0,r15,SPR_ESR_BASE << 796 l.rfe << 797 << 798 /* End of hot path! << 799 * Keep the below tracing and error handling o << 800 */ << 801 << 802 _syscall_trace_enter: << 803 /* Here we pass pt_regs to do_syscall_ << 804 * that function is really getting all << 805 * pt_regs isn't a complete set of use << 806 * ones relevant to the syscall... << 807 * << 808 * Note use of delay slot for setting << 809 */ << 810 l.jal do_syscall_trace_enter << 811 l.addi r3,r1,0 << 812 << 813 /* Restore arguments (not preserved ac << 814 * so that we can do the syscall for r << 815 * hot path. << 816 */ << 817 l.lwz r11,PT_GPR11(r1) << 818 l.lwz r3,PT_GPR3(r1) << 819 l.lwz r4,PT_GPR4(r1) << 820 l.lwz r5,PT_GPR5(r1) << 821 l.lwz r6,PT_GPR6(r1) << 822 l.lwz r7,PT_GPR7(r1) << 823 << 824 l.j _syscall_check << 825 l.lwz r8,PT_GPR8(r1) << 826 << 827 _syscall_trace_leave: << 828 l.jal do_syscall_trace_leave << 829 l.addi r3,r1,0 << 830 << 831 l.j _syscall_check_work << 832 l.nop << 833 << 834 _syscall_badsys: << 835 /* Here we effectively pretend to have << 836 * syscall that returns -ENOSYS and th << 837 * syscall hot path. << 838 * Note that "return value" is set in << 839 */ << 840 l.j _syscall_return << 841 l.addi r11,r0,-ENOSYS << 842 << 843 /******* END SYSCALL HANDLING *******/ << 844 << 845 /* ---[ 0xd00: Floating Point exception ]----- << 846 << 847 EXCEPTION_ENTRY(_fpe_trap_handler) << 848 CLEAR_LWA_FLAG(r3) << 849 << 850 /* r4: EA of fault (set by EXCEPTION_H << 851 l.jal do_fpe_trap << 852 l.addi r3,r1,0 /* pt_regs */ << 853 << 854 l.j _ret_from_exception << 855 l.nop << 856 << 857 /* ---[ 0xe00: Trap exception ]--------------- << 858 << 859 EXCEPTION_ENTRY(_trap_handler) << 860 CLEAR_LWA_FLAG(r3) << 861 /* r4: EA of fault (set by EXCEPTION_H << 862 l.jal do_trap << 863 l.addi r3,r1,0 /* pt_regs */ << 864 << 865 l.j _ret_from_exception << 866 l.nop << 867 << 868 /* ---[ 0xf00: Reserved exception ]----------- << 869 << 870 UNHANDLED_EXCEPTION(_vector_0xf00,0xf00) << 871 << 872 /* ---[ 0x1000: Reserved exception ]---------- << 873 << 874 UNHANDLED_EXCEPTION(_vector_0x1000,0x1000) << 875 << 876 /* ---[ 0x1100: Reserved exception ]---------- << 877 << 878 UNHANDLED_EXCEPTION(_vector_0x1100,0x1100) << 879 << 880 /* ---[ 0x1200: Reserved exception ]---------- << 881 << 882 UNHANDLED_EXCEPTION(_vector_0x1200,0x1200) << 883 << 884 /* ---[ 0x1300: Reserved exception ]---------- << 885 << 886 UNHANDLED_EXCEPTION(_vector_0x1300,0x1300) << 887 << 888 /* ---[ 0x1400: Reserved exception ]---------- << 889 << 890 UNHANDLED_EXCEPTION(_vector_0x1400,0x1400) << 891 << 892 /* ---[ 0x1500: Reserved exception ]---------- << 893 << 894 UNHANDLED_EXCEPTION(_vector_0x1500,0x1500) << 895 << 896 /* ---[ 0x1600: Reserved exception ]---------- << 897 << 898 UNHANDLED_EXCEPTION(_vector_0x1600,0x1600) << 899 << 900 /* ---[ 0x1700: Reserved exception ]---------- << 901 << 902 UNHANDLED_EXCEPTION(_vector_0x1700,0x1700) << 903 << 904 /* ---[ 0x1800: Reserved exception ]---------- << 905 << 906 UNHANDLED_EXCEPTION(_vector_0x1800,0x1800) << 907 << 908 /* ---[ 0x1900: Reserved exception ]---------- << 909 << 910 UNHANDLED_EXCEPTION(_vector_0x1900,0x1900) << 911 << 912 /* ---[ 0x1a00: Reserved exception ]---------- << 913 << 914 UNHANDLED_EXCEPTION(_vector_0x1a00,0x1a00) << 915 << 916 /* ---[ 0x1b00: Reserved exception ]---------- << 917 << 918 UNHANDLED_EXCEPTION(_vector_0x1b00,0x1b00) << 919 << 920 /* ---[ 0x1c00: Reserved exception ]---------- << 921 << 922 UNHANDLED_EXCEPTION(_vector_0x1c00,0x1c00) << 923 << 924 /* ---[ 0x1d00: Reserved exception ]---------- << 925 << 926 UNHANDLED_EXCEPTION(_vector_0x1d00,0x1d00) << 927 << 928 /* ---[ 0x1e00: Reserved exception ]---------- << 929 << 930 UNHANDLED_EXCEPTION(_vector_0x1e00,0x1e00) << 931 << 932 /* ---[ 0x1f00: Reserved exception ]---------- << 933 << 934 UNHANDLED_EXCEPTION(_vector_0x1f00,0x1f00) << 935 << 936 /* =========================================== << 937 << 938 _resume_userspace: << 939 DISABLE_INTERRUPTS(r3,r4) << 940 TRACE_IRQS_OFF << 941 l.lwz r4,TI_FLAGS(r10) << 942 l.andi r13,r4,_TIF_WORK_MASK << 943 l.sfeqi r13,0 << 944 l.bf _restore_all << 945 l.nop << 946 << 947 _work_pending: << 948 l.lwz r5,PT_ORIG_GPR11(r1) << 949 l.sfltsi r5,0 << 950 l.bnf 1f << 951 l.nop << 952 l.andi r5,r5,0 << 953 1: << 954 l.jal do_work_pending << 955 l.ori r3,r1,0 /* pt_ << 956 << 957 l.sfeqi r11,0 << 958 l.bf _restore_all << 959 l.nop << 960 l.sfltsi r11,0 << 961 l.bnf 1f << 962 l.nop << 963 l.and r11,r11,r0 << 964 l.ori r11,r11,__NR_restart_syscall << 965 l.j _syscall_check_trace_enter << 966 l.nop << 967 1: << 968 l.lwz r11,PT_ORIG_GPR11(r1) << 969 /* Restore arg registers */ << 970 l.lwz r3,PT_GPR3(r1) << 971 l.lwz r4,PT_GPR4(r1) << 972 l.lwz r5,PT_GPR5(r1) << 973 l.lwz r6,PT_GPR6(r1) << 974 l.lwz r7,PT_GPR7(r1) << 975 l.j _syscall_check_trace_enter << 976 l.lwz r8,PT_GPR8(r1) << 977 << 978 _restore_all: << 979 #ifdef CONFIG_TRACE_IRQFLAGS << 980 l.lwz r4,PT_SR(r1) << 981 l.andi r3,r4,(SPR_SR_IEE|SPR_SR_TEE) << 982 l.sfeq r3,r0 /* skip trace << 983 l.bf skip_hardirqs_on << 984 l.nop << 985 TRACE_IRQS_ON << 986 skip_hardirqs_on: << 987 #endif 399 #endif 988 RESTORE_ALL !! 400 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 989 /* This returns to userspace code */ !! 401 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 990 !! 402 jeq 3f 991 !! 403 #if !defined(CPU_M68060_ONLY) 992 ENTRY(_ret_from_intr) !! 404 jra 2f 993 ENTRY(_ret_from_exception) !! 405 #endif 994 l.lwz r4,PT_SR(r1) !! 406 #endif /* CONFIG_M68060 */ 995 l.andi r3,r4,SPR_SR_SM !! 407 #if !defined(CPU_M68060_ONLY) 996 l.sfeqi r3,0 !! 408 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) 997 l.bnf _restore_all !! 409 jeq 3f 998 l.nop !! 410 #endif 999 l.j _resume_userspace !! 411 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 1000 l.nop !! 412 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar 1001 !! 413 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) 1002 ENTRY(ret_from_fork) !! 414 4: 1003 l.jal schedule_tail !! 415 #endif /* CONFIG_M68KFPU_EMU_ONLY */ 1004 l.nop !! 416 1005 !! 417 /* restore the kernel stack pointer */ 1006 /* Check if we are a kernel thread */ !! 418 movel %a1@(TASK_THREAD+THREAD_KSP),%sp 1007 l.sfeqi r20,0 !! 419 1008 l.bf 1f !! 420 /* restore non-scratch registers */ 1009 l.nop !! 421 RESTORE_SWITCH_STACK 1010 !! 422 1011 /* ...we are a kernel thread so invok !! 423 /* restore user stack pointer */ 1012 l.jalr r20 !! 424 movel %a1@(TASK_THREAD+THREAD_USP),%a0 1013 l.or r3,r22,r0 !! 425 movel %a0,%usp 1014 !! 426 1015 1: !! 427 /* restore fs (sfc,%dfc) */ 1016 /* _syscall_returns expect r11 to con !! 428 movew %a1@(TASK_THREAD+THREAD_FC),%a0 1017 l.lwz r11,PT_GPR11(r1) !! 429 movec %a0,%sfc 1018 !! 430 movec %a0,%dfc 1019 /* The syscall fast path return expec !! 431 1020 * r14-r28 to be untouched, so we res !! 432 /* restore status register */ 1021 * will have been effectively clobber !! 433 movew %a1@(TASK_THREAD+THREAD_SR),%d0 1022 * via the call to switch() !! 434 oriw #0x0700,%d0 1023 */ !! 435 movew %d0,%sr 1024 l.lwz r14,PT_GPR14(r1) << 1025 l.lwz r16,PT_GPR16(r1) << 1026 l.lwz r18,PT_GPR18(r1) << 1027 l.lwz r20,PT_GPR20(r1) << 1028 l.lwz r22,PT_GPR22(r1) << 1029 l.lwz r24,PT_GPR24(r1) << 1030 l.lwz r26,PT_GPR26(r1) << 1031 l.lwz r28,PT_GPR28(r1) << 1032 << 1033 l.j _syscall_return << 1034 l.nop << 1035 << 1036 /* ========================================== << 1037 << 1038 /* << 1039 * This routine switches between two differen << 1040 * state of one is saved on its kernel stack. << 1041 * of the other is restored from its kernel s << 1042 * management hardware is updated to the seco << 1043 * Finally, we can return to the second proce << 1044 * << 1045 * Note: there are two ways to get to the "go << 1046 * of this code; either by coming in via the << 1047 * or via "fork" which must set up an environ << 1048 * to the "_switch" path. If you change this << 1049 * SAVE_REGS macro), you'll have to change th << 1050 */ << 1051 << 1052 << 1053 /* _switch MUST never lay on page boundry, ca << 1054 * effective addresses and beeing interrupted << 1055 * dTLB miss seems to never accour in the bad << 1056 * are from task structures which are always << 1057 * << 1058 * The problem happens in RESTORE_ALL where w << 1059 * register, then load the previous register << 1060 * the l.rfe instruction. If get TLB miss in << 1061 * garbled and we end up calling l.rfe with t << 1062 * holds for ESR) << 1063 * << 1064 * To avoid this problems it is sufficient to << 1065 * some nice round number smaller than it's s << 1066 */ << 1067 << 1068 /* ABI rules apply here... we either enter _s << 1069 * an imaginary call to which we shall return << 1070 * way, we are a function call and only need << 1071 * registers when we return. As such, we don << 1072 * on the stack that we won't be returning as << 1073 */ << 1074 << 1075 .align 0x400 << 1076 ENTRY(_switch) << 1077 /* We don't store SR as _switch only << 1078 * the SR will be the same going in a << 1079 << 1080 /* Set up new pt_regs struct for savi << 1081 l.addi r1,r1,-(INT_FRAME_SIZE) << 1082 << 1083 /* No need to store r1/PT_SP as it go << 1084 l.sw PT_GPR2(r1),r2 << 1085 l.sw PT_GPR9(r1),r9 << 1086 << 1087 /* Save callee-saved registers to the << 1088 l.sw PT_GPR14(r1),r14 << 1089 l.sw PT_GPR16(r1),r16 << 1090 l.sw PT_GPR18(r1),r18 << 1091 l.sw PT_GPR20(r1),r20 << 1092 l.sw PT_GPR22(r1),r22 << 1093 l.sw PT_GPR24(r1),r24 << 1094 l.sw PT_GPR26(r1),r26 << 1095 l.sw PT_GPR28(r1),r28 << 1096 l.sw PT_GPR30(r1),r30 << 1097 << 1098 l.addi r11,r10,0 << 1099 << 1100 /* We use thread_info->ksp for storin << 1101 * structure so that we can get back << 1102 * to lose the value of thread_info-> << 1103 * pt_regs->sp so that we can easily << 1104 * live again... << 1105 */ << 1106 << 1107 /* Save the old value of thread_info- << 1108 l.lwz r29,TI_KSP(r10) << 1109 l.sw PT_SP(r1),r29 << 1110 << 1111 /* Swap kernel stack pointers */ << 1112 l.sw TI_KSP(r10),r1 << 1113 l.or r10,r4,r0 << 1114 l.lwz r1,TI_KSP(r10) << 1115 << 1116 /* Restore the old value of thread_in << 1117 l.lwz r29,PT_SP(r1) << 1118 l.sw TI_KSP(r10),r29 << 1119 << 1120 /* ...and restore the registers, exce << 1121 * has already been set above. << 1122 */ << 1123 l.lwz r2,PT_GPR2(r1) << 1124 l.lwz r9,PT_GPR9(r1) << 1125 /* No need to restore r10 */ << 1126 /* ...and do not restore r11 */ << 1127 << 1128 /* Restore callee-saved registers */ << 1129 l.lwz r14,PT_GPR14(r1) << 1130 l.lwz r16,PT_GPR16(r1) << 1131 l.lwz r18,PT_GPR18(r1) << 1132 l.lwz r20,PT_GPR20(r1) << 1133 l.lwz r22,PT_GPR22(r1) << 1134 l.lwz r24,PT_GPR24(r1) << 1135 l.lwz r26,PT_GPR26(r1) << 1136 l.lwz r28,PT_GPR28(r1) << 1137 l.lwz r30,PT_GPR30(r1) << 1138 << 1139 /* Unwind stack to pre-switch state * << 1140 l.addi r1,r1,(INT_FRAME_SIZE) << 1141 << 1142 /* Return via the link-register back << 1143 * that may be either schedule(), ret << 1144 * ret_from_kernel_thread(). If we a << 1145 * we are expected to have set up the << 1146 * hence we do so here unconditionall << 1147 */ << 1148 l.lwz r3,TI_TASK(r3) /* Lo << 1149 l.jr r9 << 1150 l.nop << 1151 << 1152 /* ========================================== << 1153 << 1154 /* These all use the delay slot for setting t << 1155 * jump is always happening after the l.addi << 1156 * << 1157 * These are all just wrappers that don't tou << 1158 * return from the "real" syscall function wi << 1159 * code that did the l.jal that brought us he << 1160 */ << 1161 << 1162 /* fork requires that we save all the callee- << 1163 * are all effectively clobbered by the call << 1164 * all the registers that aren't touched by t << 1165 * weren't saved there. << 1166 */ << 1167 << 1168 _fork_save_extra_regs_and_call: << 1169 l.sw PT_GPR14(r1),r14 << 1170 l.sw PT_GPR16(r1),r16 << 1171 l.sw PT_GPR18(r1),r18 << 1172 l.sw PT_GPR20(r1),r20 << 1173 l.sw PT_GPR22(r1),r22 << 1174 l.sw PT_GPR24(r1),r24 << 1175 l.sw PT_GPR26(r1),r26 << 1176 l.jr r29 << 1177 l.sw PT_GPR28(r1),r28 << 1178 << 1179 ENTRY(__sys_clone) << 1180 l.movhi r29,hi(sys_clone) << 1181 l.j _fork_save_extra_regs_and_cal << 1182 l.ori r29,r29,lo(sys_clone) << 1183 << 1184 ENTRY(__sys_clone3) << 1185 l.movhi r29,hi(sys_clone3) << 1186 l.j _fork_save_extra_regs_and_cal << 1187 l.ori r29,r29,lo(sys_clone3) << 1188 << 1189 ENTRY(__sys_fork) << 1190 l.movhi r29,hi(sys_fork) << 1191 l.j _fork_save_extra_regs_and_cal << 1192 l.ori r29,r29,lo(sys_fork) << 1193 << 1194 ENTRY(sys_rt_sigreturn) << 1195 l.jal _sys_rt_sigreturn << 1196 l.addi r3,r1,0 << 1197 l.sfne r30,r0 << 1198 l.bnf _no_syscall_trace << 1199 l.nop << 1200 l.jal do_syscall_trace_leave << 1201 l.addi r3,r1,0 << 1202 _no_syscall_trace: << 1203 l.j _resume_userspace << 1204 l.nop << 1205 << 1206 /* This is a catch-all syscall for atomic ins << 1207 * The functions takes a variable number of p << 1208 * particular flavour of atomic you want... p << 1209 * the atomic in question. Currently, this f << 1210 * following variants: << 1211 * << 1212 * XCHG: << 1213 * @flag: 1 << 1214 * @ptr1: << 1215 * @ptr2: << 1216 * Atomically exchange the values in pointers << 1217 * << 1218 */ << 1219 436 1220 ENTRY(sys_or1k_atomic) !! 437 rts 1221 /* FIXME: This ignores r3 and always << 1222 DISABLE_INTERRUPTS(r17,r19) << 1223 l.lwz r29,0(r4) << 1224 l.lwz r27,0(r5) << 1225 l.sw 0(r4),r27 << 1226 l.sw 0(r5),r29 << 1227 ENABLE_INTERRUPTS(r17) << 1228 l.jr r9 << 1229 l.or r11,r0,r0 << 1230 438 1231 /* ========================================== !! 439 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.