1 /* SPDX-License-Identifier: GPL-2.0-only */ !! 1 /* SPDX-License-Identifier: GPL-2.0-or-later >> 2 * -*- mode: asm -*- >> 3 * >> 4 * linux/arch/m68k/kernel/entry.S >> 5 * >> 6 * Copyright (C) 1991, 1992 Linus Torvalds >> 7 * >> 8 * Linux/m68k support by Hamish Macdonald >> 9 * >> 10 * 68060 fixes by Jesper Skov >> 11 * >> 12 */ >> 13 2 /* 14 /* 3 * Copyright (C) 2012 Regents of the Universit !! 15 * entry.S contains the system-call and fault low-level handling routines. 4 * Copyright (C) 2017 SiFive !! 16 * This also contains the timer-interrupt handler, as well as all interrupts >> 17 * and faults that can result in a task-switch. >> 18 * >> 19 * NOTE: This code handles signal-recognition, which happens every time >> 20 * after a timer-interrupt and after each system call. >> 21 * 5 */ 22 */ 6 23 7 #include <linux/init.h> !! 24 /* 8 #include <linux/linkage.h> !! 25 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so >> 26 * all pointers that used to be 'current' are now entry >> 27 * number 0 in the 'current_set' list. >> 28 * >> 29 * 6/05/00 RZ: addedd writeback completion after return from sighandler >> 30 * for 68040 >> 31 */ 9 32 10 #include <asm/asm.h> !! 33 #include <linux/linkage.h> 11 #include <asm/csr.h> !! 34 #include <asm/errno.h> 12 #include <asm/scs.h> !! 35 #include <asm/setup.h> >> 36 #include <asm/traps.h> 13 #include <asm/unistd.h> 37 #include <asm/unistd.h> 14 #include <asm/page.h> << 15 #include <asm/thread_info.h> << 16 #include <asm/asm-offsets.h> 38 #include <asm/asm-offsets.h> 17 #include <asm/errata_list.h> !! 39 #include <asm/entry.h> 18 #include <linux/sizes.h> << 19 << 20 .section .irqentry.text, "ax" << 21 40 22 .macro new_vmalloc_check !! 41 .globl system_call, buserr, trap, resume 23 REG_S a0, TASK_TI_A0(tp) !! 42 .globl sys_call_table 24 csrr a0, CSR_CAUSE !! 43 .globl __sys_fork, __sys_clone, __sys_vfork 25 /* Exclude IRQs */ !! 44 .globl bad_interrupt 26 blt a0, zero, _new_vmalloc_restore !! 45 .globl auto_irqhandler_fixup 27 !! 46 .globl user_irqvec_fixup 28 REG_S a1, TASK_TI_A1(tp) !! 47 29 /* Only check new_vmalloc if we are in !! 48 .text 30 li a1, EXC_LOAD_PAGE_FAULT !! 49 ENTRY(__sys_fork) 31 beq a0, a1, _new_vmalloc_kernel_ad !! 50 SAVE_SWITCH_STACK 32 li a1, EXC_STORE_PAGE_FAULT !! 51 jbsr sys_fork 33 beq a0, a1, _new_vmalloc_kernel_ad !! 52 lea %sp@(24),%sp 34 li a1, EXC_INST_PAGE_FAULT !! 53 rts 35 bne a0, a1, _new_vmalloc_restore_c !! 54 36 !! 55 ENTRY(__sys_clone) 37 _new_vmalloc_kernel_address: !! 56 SAVE_SWITCH_STACK 38 /* Is it a kernel address? */ !! 57 pea %sp@(SWITCH_STACK_SIZE) 39 csrr a0, CSR_TVAL !! 58 jbsr m68k_clone 40 bge a0, zero, _new_vmalloc_restore !! 59 lea %sp@(28),%sp >> 60 rts >> 61 >> 62 ENTRY(__sys_vfork) >> 63 SAVE_SWITCH_STACK >> 64 jbsr sys_vfork >> 65 lea %sp@(24),%sp >> 66 rts >> 67 >> 68 ENTRY(__sys_clone3) >> 69 SAVE_SWITCH_STACK >> 70 pea %sp@(SWITCH_STACK_SIZE) >> 71 jbsr m68k_clone3 >> 72 lea %sp@(28),%sp >> 73 rts >> 74 >> 75 ENTRY(sys_sigreturn) >> 76 SAVE_SWITCH_STACK >> 77 movel %sp,%a1 | switch_stack pointer >> 78 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer >> 79 lea %sp@(-84),%sp | leave a gap >> 80 movel %a1,%sp@- >> 81 movel %a0,%sp@- >> 82 jbsr do_sigreturn >> 83 jra 1f | shared with rt_sigreturn() >> 84 >> 85 ENTRY(sys_rt_sigreturn) >> 86 SAVE_SWITCH_STACK >> 87 movel %sp,%a1 | switch_stack pointer >> 88 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer >> 89 lea %sp@(-84),%sp | leave a gap >> 90 movel %a1,%sp@- >> 91 movel %a0,%sp@- >> 92 | stack contents: >> 93 | [original pt_regs address] [original switch_stack address] >> 94 | [gap] [switch_stack] [pt_regs] [exception frame] >> 95 jbsr do_rt_sigreturn 41 96 42 /* Check if a new vmalloc mapping appe !! 97 1: 43 REG_S a2, TASK_TI_A2(tp) !! 98 | stack contents now: 44 /* !! 99 | [original pt_regs address] [original switch_stack address] 45 * Computes: !! 100 | [unused part of the gap] [moved switch_stack] [moved pt_regs] 46 * a0 = &new_vmalloc[BIT_WORD(cpu)] !! 101 | [replacement exception frame] 47 * a1 = BIT_MASK(cpu) !! 102 | return value of do_{rt_,}sigreturn() points to moved switch_stack. 48 */ !! 103 49 REG_L a2, TASK_TI_CPU(tp) !! 104 movel %d0,%sp | discard the leftover junk 50 /* !! 105 RESTORE_SWITCH_STACK 51 * Compute the new_vmalloc element pos !! 106 | stack contents now is just [syscall return address] [pt_regs] [frame] 52 * (cpu / 64) * 8 = (cpu >> 6) << 3 !! 107 | return pt_regs.d0 53 */ !! 108 movel %sp@(PT_OFF_D0+4),%d0 54 srli a1, a2, 6 !! 109 rts 55 slli a1, a1, 3 !! 110 56 la a0, new_vmalloc !! 111 ENTRY(buserr) 57 add a0, a0, a1 !! 112 SAVE_ALL_INT 58 /* !! 113 GET_CURRENT(%d0) 59 * Compute the bit position in the new !! 114 movel %sp,%sp@- | stack frame pointer argument 60 * bit_pos = cpu % 64 = cpu - (cpu / 6 !! 115 jbsr buserr_c 61 * = cpu - ((cpu >> 6) << 3) < !! 116 addql #4,%sp 62 */ !! 117 jra ret_from_exception 63 slli a1, a1, 3 !! 118 64 sub a1, a2, a1 !! 119 ENTRY(trap) 65 /* Compute the "get mask": 1 << bit_po !! 120 SAVE_ALL_INT 66 li a2, 1 !! 121 GET_CURRENT(%d0) 67 sll a1, a2, a1 !! 122 movel %sp,%sp@- | stack frame pointer argument 68 !! 123 jbsr trap_c 69 /* Check the value of new_vmalloc for !! 124 addql #4,%sp 70 REG_L a2, 0(a0) !! 125 jra ret_from_exception 71 and a2, a2, a1 !! 126 72 beq a2, zero, _new_vmalloc_restore !! 127 | After a fork we jump here directly from resume, 73 !! 128 | so that %d1 contains the previous task 74 /* Atomically reset the current cpu bi !! 129 | schedule_tail now used regardless of CONFIG_SMP 75 amoxor.d a0, a1, (a0) !! 130 ENTRY(ret_from_fork) 76 !! 131 movel %d1,%sp@- 77 /* Only emit a sfence.vma if the uarch !! 132 jsr schedule_tail 78 ALTERNATIVE("sfence.vma", "nop", 0, RI !! 133 addql #4,%sp 79 !! 134 jra ret_from_exception 80 REG_L a0, TASK_TI_A0(tp) !! 135 81 REG_L a1, TASK_TI_A1(tp) !! 136 ENTRY(ret_from_kernel_thread) 82 REG_L a2, TASK_TI_A2(tp) !! 137 | a3 contains the kernel thread payload, d7 - its argument 83 csrw CSR_SCRATCH, x0 !! 138 movel %d1,%sp@- 84 sret !! 139 jsr schedule_tail 85 !! 140 movel %d7,(%sp) 86 _new_vmalloc_restore_context: !! 141 jsr %a3@ 87 REG_L a2, TASK_TI_A2(tp) !! 142 addql #4,%sp 88 _new_vmalloc_restore_context_a1: !! 143 jra ret_from_exception 89 REG_L a1, TASK_TI_A1(tp) !! 144 90 _new_vmalloc_restore_context_a0: !! 145 #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) 91 REG_L a0, TASK_TI_A0(tp) !! 146 92 .endm !! 147 #ifdef TRAP_DBG_INTERRUPT 93 !! 148 94 !! 149 .globl dbginterrupt 95 SYM_CODE_START(handle_exception) !! 150 ENTRY(dbginterrupt) 96 /* !! 151 SAVE_ALL_INT 97 * If coming from userspace, preserve !! 152 GET_CURRENT(%d0) 98 * the kernel thread pointer. If we c !! 153 movel %sp,%sp@- /* stack frame pointer argument */ 99 * register will contain 0, and we sho !! 154 jsr dbginterrupt_c 100 */ !! 155 addql #4,%sp 101 csrrw tp, CSR_SCRATCH, tp !! 156 jra ret_from_exception 102 bnez tp, .Lsave_context << 103 << 104 .Lrestore_kernel_tpsp: << 105 csrr tp, CSR_SCRATCH << 106 << 107 #ifdef CONFIG_64BIT << 108 /* << 109 * The RISC-V kernel does not eagerly << 110 * new vmalloc mapping, which may resu << 111 * - if the uarch caches invalid entri << 112 * observed by the page table walker << 113 * - if the uarch does not cache inval << 114 * could "miss" the new mapping and << 115 * to retry the access, no sfence.vm << 116 */ << 117 new_vmalloc_check << 118 #endif << 119 << 120 REG_S sp, TASK_TI_KERNEL_SP(tp) << 121 << 122 #ifdef CONFIG_VMAP_STACK << 123 addi sp, sp, -(PT_SIZE_ON_STACK) << 124 srli sp, sp, THREAD_SHIFT << 125 andi sp, sp, 0x1 << 126 bnez sp, handle_kernel_stack_overflow << 127 REG_L sp, TASK_TI_KERNEL_SP(tp) << 128 #endif 157 #endif 129 158 130 .Lsave_context: !! 159 ENTRY(reschedule) 131 REG_S sp, TASK_TI_USER_SP(tp) !! 160 /* save top of frame */ 132 REG_L sp, TASK_TI_KERNEL_SP(tp) !! 161 pea %sp@ 133 addi sp, sp, -(PT_SIZE_ON_STACK) !! 162 jbsr set_esp0 134 REG_S x1, PT_RA(sp) !! 163 addql #4,%sp 135 REG_S x3, PT_GP(sp) !! 164 pea ret_from_exception 136 REG_S x5, PT_T0(sp) !! 165 jmp schedule 137 save_from_x6_to_x31 !! 166 >> 167 ENTRY(ret_from_user_signal) >> 168 moveq #__NR_sigreturn,%d0 >> 169 trap #0 >> 170 >> 171 ENTRY(ret_from_user_rt_signal) >> 172 movel #__NR_rt_sigreturn,%d0 >> 173 trap #0 138 174 139 /* !! 175 #else 140 * Disable user-mode memory access as << 141 * actual user copy routines. << 142 * << 143 * Disable the FPU/Vector to detect il << 144 * or vector in kernel space. << 145 */ << 146 li t0, SR_SUM | SR_FS_VS << 147 176 148 REG_L s0, TASK_TI_USER_SP(tp) !! 177 do_trace_entry: 149 csrrc s1, CSR_STATUS, t0 !! 178 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace 150 csrr s2, CSR_EPC !! 179 subql #4,%sp 151 csrr s3, CSR_TVAL !! 180 SAVE_SWITCH_STACK 152 csrr s4, CSR_CAUSE !! 181 jbsr syscall_trace_enter 153 csrr s5, CSR_SCRATCH !! 182 RESTORE_SWITCH_STACK 154 REG_S s0, PT_SP(sp) !! 183 addql #4,%sp 155 REG_S s1, PT_STATUS(sp) !! 184 addql #1,%d0 | optimization for cmpil #-1,%d0 156 REG_S s2, PT_EPC(sp) !! 185 jeq ret_from_syscall 157 REG_S s3, PT_BADADDR(sp) !! 186 movel %sp@(PT_OFF_ORIG_D0),%d0 158 REG_S s4, PT_CAUSE(sp) !! 187 cmpl #NR_syscalls,%d0 159 REG_S s5, PT_TP(sp) !! 188 jcs syscall >> 189 jra ret_from_syscall >> 190 badsys: >> 191 movel #-ENOSYS,%sp@(PT_OFF_D0) >> 192 jra ret_from_syscall >> 193 >> 194 do_trace_exit: >> 195 subql #4,%sp >> 196 SAVE_SWITCH_STACK >> 197 jbsr syscall_trace_leave >> 198 RESTORE_SWITCH_STACK >> 199 addql #4,%sp >> 200 jra .Lret_from_exception >> 201 >> 202 ENTRY(system_call) >> 203 SAVE_ALL_SYS >> 204 >> 205 GET_CURRENT(%d1) >> 206 movel %d1,%a1 >> 207 >> 208 | save top of frame >> 209 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 210 >> 211 | syscall trace? >> 212 tstb %a1@(TINFO_FLAGS+2) >> 213 jmi do_trace_entry >> 214 | seccomp filter active? >> 215 btst #5,%a1@(TINFO_FLAGS+2) >> 216 bnes do_trace_entry >> 217 cmpl #NR_syscalls,%d0 >> 218 jcc badsys >> 219 syscall: >> 220 jbsr @(sys_call_table,%d0:l:4)@(0) >> 221 movel %d0,%sp@(PT_OFF_D0) | save the return value >> 222 ret_from_syscall: >> 223 |oriw #0x0700,%sr >> 224 movel %curptr@(TASK_STACK),%a1 >> 225 movew %a1@(TINFO_FLAGS+2),%d0 >> 226 jne syscall_exit_work >> 227 1: RESTORE_ALL >> 228 >> 229 syscall_exit_work: >> 230 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 231 bnes 1b | if so, skip resched, signals >> 232 lslw #1,%d0 >> 233 jcs do_trace_exit >> 234 jmi do_delayed_trace >> 235 lslw #8,%d0 >> 236 jne do_signal_return >> 237 pea resume_userspace >> 238 jra schedule >> 239 >> 240 >> 241 ENTRY(ret_from_exception) >> 242 .Lret_from_exception: >> 243 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 244 bnes 1f | if so, skip resched, signals >> 245 | only allow interrupts when we are really the last one on the >> 246 | kernel stack, otherwise stack overflow can occur during >> 247 | heavy interrupt load >> 248 andw #ALLOWINT,%sr >> 249 >> 250 resume_userspace: >> 251 movel %curptr@(TASK_STACK),%a1 >> 252 moveb %a1@(TINFO_FLAGS+3),%d0 >> 253 jne exit_work >> 254 1: RESTORE_ALL >> 255 >> 256 exit_work: >> 257 | save top of frame >> 258 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 259 lslb #1,%d0 >> 260 jne do_signal_return >> 261 pea resume_userspace >> 262 jra schedule >> 263 >> 264 >> 265 do_signal_return: >> 266 |andw #ALLOWINT,%sr >> 267 subql #4,%sp | dummy return address >> 268 SAVE_SWITCH_STACK >> 269 pea %sp@(SWITCH_STACK_SIZE) >> 270 bsrl do_notify_resume >> 271 addql #4,%sp >> 272 RESTORE_SWITCH_STACK >> 273 addql #4,%sp >> 274 jbra resume_userspace >> 275 >> 276 do_delayed_trace: >> 277 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR >> 278 pea 1 | send SIGTRAP >> 279 movel %curptr,%sp@- >> 280 pea LSIGTRAP >> 281 jbsr send_sig >> 282 addql #8,%sp >> 283 addql #4,%sp >> 284 jbra resume_userspace >> 285 >> 286 >> 287 /* This is the main interrupt handler for autovector interrupts */ >> 288 >> 289 ENTRY(auto_inthandler) >> 290 SAVE_ALL_INT >> 291 GET_CURRENT(%d0) >> 292 | put exception # in d0 >> 293 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 294 subw #VEC_SPUR,%d0 >> 295 >> 296 movel %sp,%sp@- >> 297 movel %d0,%sp@- | put vector # on stack >> 298 auto_irqhandler_fixup = . + 2 >> 299 jsr do_IRQ | process the IRQ >> 300 addql #8,%sp | pop parameters off stack >> 301 jra ret_from_exception >> 302 >> 303 /* Handler for user defined interrupt vectors */ >> 304 >> 305 ENTRY(user_inthandler) >> 306 SAVE_ALL_INT >> 307 GET_CURRENT(%d0) >> 308 | put exception # in d0 >> 309 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 310 user_irqvec_fixup = . + 2 >> 311 subw #VEC_USER,%d0 >> 312 >> 313 movel %sp,%sp@- >> 314 movel %d0,%sp@- | put vector # on stack >> 315 jsr do_IRQ | process the IRQ >> 316 addql #8,%sp | pop parameters off stack >> 317 jra ret_from_exception >> 318 >> 319 /* Handler for uninitialized and spurious interrupts */ >> 320 >> 321 ENTRY(bad_inthandler) >> 322 SAVE_ALL_INT >> 323 GET_CURRENT(%d0) >> 324 >> 325 movel %sp,%sp@- >> 326 jsr handle_badint >> 327 addql #4,%sp >> 328 jra ret_from_exception 160 329 >> 330 resume: 161 /* 331 /* 162 * Set the scratch register to 0, so t !! 332 * Beware - when entering resume, prev (the current task) is 163 * occurs, the exception vector knows !! 333 * in a0, next (the new task) is in a1,so don't change these >> 334 * registers until their contents are no longer needed. 164 */ 335 */ 165 csrw CSR_SCRATCH, x0 << 166 << 167 /* Load the global pointer */ << 168 load_global_pointer << 169 336 170 /* Load the kernel shadow call stack p !! 337 /* save sr */ 171 scs_load_current_if_task_changed s5 !! 338 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 172 339 173 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE !! 340 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 174 move a0, sp !! 341 movec %sfc,%d0 175 call riscv_v_context_nesting_start !! 342 movew %d0,%a0@(TASK_THREAD+THREAD_FC) >> 343 >> 344 /* save usp */ >> 345 /* it is better to use a movel here instead of a movew 8*) */ >> 346 movec %usp,%d0 >> 347 movel %d0,%a0@(TASK_THREAD+THREAD_USP) >> 348 >> 349 /* save non-scratch registers on stack */ >> 350 SAVE_SWITCH_STACK >> 351 >> 352 /* save current kernel stack pointer */ >> 353 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) >> 354 >> 355 /* save floating point context */ >> 356 #ifndef CONFIG_M68KFPU_EMU_ONLY >> 357 #ifdef CONFIG_M68KFPU_EMU >> 358 tstl m68k_fputype >> 359 jeq 3f 176 #endif 360 #endif 177 move a0, sp /* pt_regs */ !! 361 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 178 << 179 /* << 180 * MSB of cause differentiates between << 181 * interrupts and exceptions << 182 */ << 183 bge s4, zero, 1f << 184 << 185 /* Handle interrupts */ << 186 call do_irq << 187 j ret_from_exception << 188 1: << 189 /* Handle other exceptions */ << 190 slli t0, s4, RISCV_LGPTR << 191 la t1, excp_vect_table << 192 la t2, excp_vect_table_end << 193 add t0, t1, t0 << 194 /* Check if exception code lies within << 195 bgeu t0, t2, 3f << 196 REG_L t1, 0(t0) << 197 2: jalr t1 << 198 j ret_from_exception << 199 3: << 200 << 201 la t1, do_trap_unknown << 202 j 2b << 203 SYM_CODE_END(handle_exception) << 204 ASM_NOKPROBE(handle_exception) << 205 362 206 /* !! 363 #if defined(CONFIG_M68060) 207 * The ret_from_exception must be called with !! 364 #if !defined(CPU_M68060_ONLY) 208 * caller list: !! 365 btst #3,m68k_cputype+3 209 * - handle_exception !! 366 beqs 1f 210 * - ret_from_fork << 211 */ << 212 SYM_CODE_START_NOALIGN(ret_from_exception) << 213 REG_L s0, PT_STATUS(sp) << 214 #ifdef CONFIG_RISCV_M_MODE << 215 /* the MPP value is too large to be us << 216 li t0, SR_MPP << 217 and s0, s0, t0 << 218 #else << 219 andi s0, s0, SR_SPP << 220 #endif 367 #endif 221 bnez s0, 1f !! 368 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 222 !! 369 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 223 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK !! 370 jeq 3f 224 call stackleak_erase_on_task_stack !! 371 #if !defined(CPU_M68060_ONLY) >> 372 jra 2f 225 #endif 373 #endif 226 !! 374 #endif /* CONFIG_M68060 */ 227 /* Save unwound kernel stack pointer i !! 375 #if !defined(CPU_M68060_ONLY) 228 addi s0, sp, PT_SIZE_ON_STACK !! 376 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 229 REG_S s0, TASK_TI_KERNEL_SP(tp) !! 377 jeq 3f 230 << 231 /* Save the kernel shadow call stack p << 232 scs_save_current << 233 << 234 /* << 235 * Save TP into the scratch register , << 236 * structures again. << 237 */ << 238 csrw CSR_SCRATCH, tp << 239 1: << 240 #ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE << 241 move a0, sp << 242 call riscv_v_context_nesting_end << 243 #endif 378 #endif 244 REG_L a0, PT_STATUS(sp) !! 379 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 245 /* !! 380 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 246 * The current load reservation is eff !! 381 3: 247 * state, in the sense that load reser !! 382 #endif /* CONFIG_M68KFPU_EMU_ONLY */ 248 * different hart contexts. We can't !! 383 /* Return previous task in %d1 */ 249 * reservation, so instead here we cle !! 384 movel %curptr,%d1 250 * it's always legal for implementatio !! 385 251 * any point (as long as the forward p !! 386 /* switch to new task (a1 contains new task) */ 252 * we'll ignore that here). !! 387 movel %a1,%curptr 253 * !! 388 254 * Dangling load reservations can be t !! 389 /* restore floating point context */ 255 * middle of an LR/SC sequence, but ca !! 390 #ifndef CONFIG_M68KFPU_EMU_ONLY 256 * forward branch around an SC -- whic !! 391 #ifdef CONFIG_M68KFPU_EMU 257 * result we need to clear reservation !! 392 tstl m68k_fputype 258 * jump back to the new context. Whil !! 393 jeq 4f 259 * completes, implementations are allo << 260 * arbitrarily large. << 261 */ << 262 REG_L a2, PT_EPC(sp) << 263 REG_SC x0, a2, PT_EPC(sp) << 264 << 265 csrw CSR_STATUS, a0 << 266 csrw CSR_EPC, a2 << 267 << 268 REG_L x1, PT_RA(sp) << 269 REG_L x3, PT_GP(sp) << 270 REG_L x4, PT_TP(sp) << 271 REG_L x5, PT_T0(sp) << 272 restore_from_x6_to_x31 << 273 << 274 REG_L x2, PT_SP(sp) << 275 << 276 #ifdef CONFIG_RISCV_M_MODE << 277 mret << 278 #else << 279 sret << 280 #endif 394 #endif 281 SYM_CODE_END(ret_from_exception) !! 395 #if defined(CONFIG_M68060) 282 ASM_NOKPROBE(ret_from_exception) !! 396 #if !defined(CPU_M68060_ONLY) 283 !! 397 btst #3,m68k_cputype+3 284 #ifdef CONFIG_VMAP_STACK !! 398 beqs 1f 285 SYM_CODE_START_LOCAL(handle_kernel_stack_overf << 286 /* we reach here from kernel context, << 287 csrrw x31, CSR_SCRATCH, x31 << 288 asm_per_cpu sp, overflow_stack, x31 << 289 li x31, OVERFLOW_STACK_SIZE << 290 add sp, sp, x31 << 291 /* zero out x31 again and restore x31 << 292 xor x31, x31, x31 << 293 csrrw x31, CSR_SCRATCH, x31 << 294 << 295 addi sp, sp, -(PT_SIZE_ON_STACK) << 296 << 297 //save context to overflow stack << 298 REG_S x1, PT_RA(sp) << 299 REG_S x3, PT_GP(sp) << 300 REG_S x5, PT_T0(sp) << 301 save_from_x6_to_x31 << 302 << 303 REG_L s0, TASK_TI_KERNEL_SP(tp) << 304 csrr s1, CSR_STATUS << 305 csrr s2, CSR_EPC << 306 csrr s3, CSR_TVAL << 307 csrr s4, CSR_CAUSE << 308 csrr s5, CSR_SCRATCH << 309 REG_S s0, PT_SP(sp) << 310 REG_S s1, PT_STATUS(sp) << 311 REG_S s2, PT_EPC(sp) << 312 REG_S s3, PT_BADADDR(sp) << 313 REG_S s4, PT_CAUSE(sp) << 314 REG_S s5, PT_TP(sp) << 315 move a0, sp << 316 tail handle_bad_stack << 317 SYM_CODE_END(handle_kernel_stack_overflow) << 318 ASM_NOKPROBE(handle_kernel_stack_overflow) << 319 #endif 399 #endif 320 !! 400 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 321 SYM_CODE_START(ret_from_fork) !! 401 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 322 call schedule_tail !! 402 jeq 3f 323 beqz s0, 1f /* not from kernel thr !! 403 #if !defined(CPU_M68060_ONLY) 324 /* Call fn(arg) */ !! 404 jra 2f 325 move a0, s1 << 326 jalr s0 << 327 1: << 328 move a0, sp /* pt_regs */ << 329 call syscall_exit_to_user_mode << 330 j ret_from_exception << 331 SYM_CODE_END(ret_from_fork) << 332 << 333 #ifdef CONFIG_IRQ_STACKS << 334 /* << 335 * void call_on_irq_stack(struct pt_regs *regs << 336 * void (*func)(struct << 337 * << 338 * Calls func(regs) using the per-CPU IRQ stac << 339 */ << 340 SYM_FUNC_START(call_on_irq_stack) << 341 /* Create a frame record to save ra an << 342 addi sp, sp, -STACKFRAME_SIZE_ON_ST << 343 REG_S ra, STACKFRAME_RA(sp) << 344 REG_S s0, STACKFRAME_FP(sp) << 345 addi s0, sp, STACKFRAME_SIZE_ON_STA << 346 << 347 /* Switch to the per-CPU shadow call s << 348 scs_save_current << 349 scs_load_irq_stack t0 << 350 << 351 /* Switch to the per-CPU IRQ stack and << 352 load_per_cpu t0, irq_stack_ptr, t1 << 353 li t1, IRQ_STACK_SIZE << 354 add sp, t0, t1 << 355 jalr a1 << 356 << 357 /* Switch back to the thread shadow ca << 358 scs_load_current << 359 << 360 /* Switch back to the thread stack and << 361 addi sp, s0, -STACKFRAME_SIZE_ON_ST << 362 REG_L ra, STACKFRAME_RA(sp) << 363 REG_L s0, STACKFRAME_FP(sp) << 364 addi sp, sp, STACKFRAME_SIZE_ON_STA << 365 << 366 ret << 367 SYM_FUNC_END(call_on_irq_stack) << 368 #endif /* CONFIG_IRQ_STACKS */ << 369 << 370 /* << 371 * Integer register context switch << 372 * The callee-saved registers must be saved an << 373 * << 374 * a0: previous task_struct (must be preserv << 375 * a1: next task_struct << 376 * << 377 * The value of a0 and a1 must be preserved by << 378 * arguments are passed to schedule_tail. << 379 */ << 380 SYM_FUNC_START(__switch_to) << 381 /* Save context into prev->thread */ << 382 li a4, TASK_THREAD_RA << 383 add a3, a0, a4 << 384 add a4, a1, a4 << 385 REG_S ra, TASK_THREAD_RA_RA(a3) << 386 REG_S sp, TASK_THREAD_SP_RA(a3) << 387 REG_S s0, TASK_THREAD_S0_RA(a3) << 388 REG_S s1, TASK_THREAD_S1_RA(a3) << 389 REG_S s2, TASK_THREAD_S2_RA(a3) << 390 REG_S s3, TASK_THREAD_S3_RA(a3) << 391 REG_S s4, TASK_THREAD_S4_RA(a3) << 392 REG_S s5, TASK_THREAD_S5_RA(a3) << 393 REG_S s6, TASK_THREAD_S6_RA(a3) << 394 REG_S s7, TASK_THREAD_S7_RA(a3) << 395 REG_S s8, TASK_THREAD_S8_RA(a3) << 396 REG_S s9, TASK_THREAD_S9_RA(a3) << 397 REG_S s10, TASK_THREAD_S10_RA(a3) << 398 REG_S s11, TASK_THREAD_S11_RA(a3) << 399 /* Save the kernel shadow call stack p << 400 scs_save_current << 401 /* Restore context from next->thread * << 402 REG_L ra, TASK_THREAD_RA_RA(a4) << 403 REG_L sp, TASK_THREAD_SP_RA(a4) << 404 REG_L s0, TASK_THREAD_S0_RA(a4) << 405 REG_L s1, TASK_THREAD_S1_RA(a4) << 406 REG_L s2, TASK_THREAD_S2_RA(a4) << 407 REG_L s3, TASK_THREAD_S3_RA(a4) << 408 REG_L s4, TASK_THREAD_S4_RA(a4) << 409 REG_L s5, TASK_THREAD_S5_RA(a4) << 410 REG_L s6, TASK_THREAD_S6_RA(a4) << 411 REG_L s7, TASK_THREAD_S7_RA(a4) << 412 REG_L s8, TASK_THREAD_S8_RA(a4) << 413 REG_L s9, TASK_THREAD_S9_RA(a4) << 414 REG_L s10, TASK_THREAD_S10_RA(a4) << 415 REG_L s11, TASK_THREAD_S11_RA(a4) << 416 /* The offset of thread_info in task_s << 417 move tp, a1 << 418 /* Switch to the next shadow call stac << 419 scs_load_current << 420 ret << 421 SYM_FUNC_END(__switch_to) << 422 << 423 #ifndef CONFIG_MMU << 424 #define do_page_fault do_trap_unknown << 425 #endif 405 #endif 426 !! 406 #endif /* CONFIG_M68060 */ 427 .section ".rodata" !! 407 #if !defined(CPU_M68060_ONLY) 428 .align LGREG !! 408 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) 429 /* Exception vector table */ !! 409 jeq 3f 430 SYM_DATA_START_LOCAL(excp_vect_table) << 431 RISCV_PTR do_trap_insn_misaligned << 432 ALT_INSN_FAULT(RISCV_PTR do_trap_insn_ << 433 RISCV_PTR do_trap_insn_illegal << 434 RISCV_PTR do_trap_break << 435 RISCV_PTR do_trap_load_misaligned << 436 RISCV_PTR do_trap_load_fault << 437 RISCV_PTR do_trap_store_misaligned << 438 RISCV_PTR do_trap_store_fault << 439 RISCV_PTR do_trap_ecall_u /* system ca << 440 RISCV_PTR do_trap_ecall_s << 441 RISCV_PTR do_trap_unknown << 442 RISCV_PTR do_trap_ecall_m << 443 /* instruciton page fault */ << 444 ALT_PAGE_FAULT(RISCV_PTR do_page_fault << 445 RISCV_PTR do_page_fault /* load page << 446 RISCV_PTR do_trap_unknown << 447 RISCV_PTR do_page_fault /* store pag << 448 SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCA << 449 << 450 #ifndef CONFIG_MMU << 451 SYM_DATA_START(__user_rt_sigreturn) << 452 li a7, __NR_rt_sigreturn << 453 ecall << 454 SYM_DATA_END(__user_rt_sigreturn) << 455 #endif 410 #endif >> 411 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 >> 412 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar >> 413 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) >> 414 4: >> 415 #endif /* CONFIG_M68KFPU_EMU_ONLY */ >> 416 >> 417 /* restore the kernel stack pointer */ >> 418 movel %a1@(TASK_THREAD+THREAD_KSP),%sp >> 419 >> 420 /* restore non-scratch registers */ >> 421 RESTORE_SWITCH_STACK >> 422 >> 423 /* restore user stack pointer */ >> 424 movel %a1@(TASK_THREAD+THREAD_USP),%a0 >> 425 movel %a0,%usp >> 426 >> 427 /* restore fs (sfc,%dfc) */ >> 428 movew %a1@(TASK_THREAD+THREAD_FC),%a0 >> 429 movec %a0,%sfc >> 430 movec %a0,%dfc >> 431 >> 432 /* restore status register */ >> 433 movew %a1@(TASK_THREAD+THREAD_SR),%d0 >> 434 oriw #0x0700,%d0 >> 435 movew %d0,%sr >> 436 >> 437 rts >> 438 >> 439 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.