1 /* SPDX-License-Identifier: GPL-2.0 */ !! 1 /* SPDX-License-Identifier: GPL-2.0-or-later >> 2 * -*- mode: asm -*- >> 3 * >> 4 * linux/arch/m68k/kernel/entry.S >> 5 * >> 6 * Copyright (C) 1991, 1992 Linus Torvalds >> 7 * >> 8 * Linux/m68k support by Hamish Macdonald >> 9 * >> 10 * 68060 fixes by Jesper Skov >> 11 * >> 12 */ >> 13 2 /* 14 /* 3 * Copyright (C) 2020-2022 Loongson Technology !! 15 * entry.S contains the system-call and fault low-level handling routines. >> 16 * This also contains the timer-interrupt handler, as well as all interrupts >> 17 * and faults that can result in a task-switch. 4 * 18 * 5 * Derived from MIPS: !! 19 * NOTE: This code handles signal-recognition, which happens every time 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf !! 20 * after a timer-interrupt and after each system call. 7 * Copyright (C) 1999, 2000 Silicon Graphics, !! 21 * 8 * Copyright (C) 2001 MIPS Technologies, Inc. !! 22 */ >> 23 >> 24 /* >> 25 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so >> 26 * all pointers that used to be 'current' are now entry >> 27 * number 0 in the 'current_set' list. >> 28 * >> 29 * 6/05/00 RZ: addedd writeback completion after return from sighandler >> 30 * for 68040 9 */ 31 */ 10 32 11 #include <asm/asm.h> !! 33 #include <linux/linkage.h> 12 #include <asm/asmmacro.h> !! 34 #include <asm/errno.h> 13 #include <asm/loongarch.h> !! 35 #include <asm/setup.h> 14 #include <asm/regdef.h> !! 36 #include <asm/traps.h> 15 #include <asm/stackframe.h> !! 37 #include <asm/unistd.h> 16 #include <asm/thread_info.h> !! 38 #include <asm/asm-offsets.h> 17 #include <asm/unwind_hints.h> !! 39 #include <asm/entry.h> 18 !! 40 19 .text !! 41 .globl system_call, buserr, trap, resume 20 .cfi_sections .debug_frame !! 42 .globl sys_call_table 21 .align 5 !! 43 .globl __sys_fork, __sys_clone, __sys_vfork 22 SYM_CODE_START(handle_syscall) !! 44 .globl bad_interrupt 23 UNWIND_HINT_UNDEFINED !! 45 .globl auto_irqhandler_fixup 24 csrrd t0, PERCPU_BASE_KS !! 46 .globl user_irqvec_fixup 25 la.pcrel t1, kernelsp !! 47 26 add.d t1, t1, t0 !! 48 .text 27 move t2, sp !! 49 ENTRY(__sys_fork) 28 ld.d sp, t1, 0 !! 50 SAVE_SWITCH_STACK 29 !! 51 jbsr sys_fork 30 addi.d sp, sp, -PT_SIZE !! 52 lea %sp@(24),%sp 31 cfi_st t2, PT_R3 !! 53 rts 32 cfi_rel_offset sp, PT_R3 !! 54 33 st.d zero, sp, PT_R0 !! 55 ENTRY(__sys_clone) 34 csrrd t2, LOONGARCH_CSR_PRMD !! 56 SAVE_SWITCH_STACK 35 st.d t2, sp, PT_PRMD !! 57 pea %sp@(SWITCH_STACK_SIZE) 36 csrrd t2, LOONGARCH_CSR_CRMD !! 58 jbsr m68k_clone 37 st.d t2, sp, PT_CRMD !! 59 lea %sp@(28),%sp 38 csrrd t2, LOONGARCH_CSR_EUEN !! 60 rts 39 st.d t2, sp, PT_EUEN !! 61 40 csrrd t2, LOONGARCH_CSR_ECFG !! 62 ENTRY(__sys_vfork) 41 st.d t2, sp, PT_ECFG !! 63 SAVE_SWITCH_STACK 42 csrrd t2, LOONGARCH_CSR_ESTA !! 64 jbsr sys_vfork 43 st.d t2, sp, PT_ESTAT !! 65 lea %sp@(24),%sp 44 cfi_st ra, PT_R1 !! 66 rts 45 cfi_st a0, PT_R4 !! 67 46 cfi_st a1, PT_R5 !! 68 ENTRY(__sys_clone3) 47 cfi_st a2, PT_R6 !! 69 SAVE_SWITCH_STACK 48 cfi_st a3, PT_R7 !! 70 pea %sp@(SWITCH_STACK_SIZE) 49 cfi_st a4, PT_R8 !! 71 jbsr m68k_clone3 50 cfi_st a5, PT_R9 !! 72 lea %sp@(28),%sp 51 cfi_st a6, PT_R10 !! 73 rts 52 cfi_st a7, PT_R11 !! 74 53 csrrd ra, LOONGARCH_CSR_ERA !! 75 ENTRY(sys_sigreturn) 54 st.d ra, sp, PT_ERA !! 76 SAVE_SWITCH_STACK 55 cfi_rel_offset ra, PT_ERA !! 77 movel %sp,%a1 | switch_stack pointer 56 !! 78 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 57 cfi_st tp, PT_R2 !! 79 lea %sp@(-84),%sp | leave a gap 58 cfi_st u0, PT_R21 !! 80 movel %a1,%sp@- 59 cfi_st fp, PT_R22 !! 81 movel %a0,%sp@- 60 !! 82 jbsr do_sigreturn 61 SAVE_STATIC !! 83 jra 1f | shared with rt_sigreturn() 62 UNWIND_HINT_REGS !! 84 63 !! 85 ENTRY(sys_rt_sigreturn) 64 #ifdef CONFIG_KGDB !! 86 SAVE_SWITCH_STACK 65 li.w t1, CSR_CRMD_WE !! 87 movel %sp,%a1 | switch_stack pointer 66 csrxchg t1, t1, LOONGARCH_CSR_ !! 88 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer 67 #endif !! 89 lea %sp@(-84),%sp | leave a gap 68 !! 90 movel %a1,%sp@- 69 move u0, t0 !! 91 movel %a0,%sp@- 70 li.d tp, ~_THREAD_MASK !! 92 | stack contents: 71 and tp, tp, sp !! 93 | [original pt_regs address] [original switch_stack address] 72 !! 94 | [gap] [switch_stack] [pt_regs] [exception frame] 73 move a0, sp !! 95 jbsr do_rt_sigreturn 74 bl do_syscall !! 96 75 !! 97 1: 76 RESTORE_ALL_AND_RET !! 98 | stack contents now: 77 SYM_CODE_END(handle_syscall) !! 99 | [original pt_regs address] [original switch_stack address] 78 _ASM_NOKPROBE(handle_syscall) !! 100 | [unused part of the gap] [moved switch_stack] [moved pt_regs] 79 !! 101 | [replacement exception frame] 80 SYM_CODE_START(ret_from_fork) !! 102 | return value of do_{rt_,}sigreturn() points to moved switch_stack. 81 UNWIND_HINT_REGS !! 103 82 bl schedule_tail !! 104 movel %d0,%sp | discard the leftover junk 83 move a0, sp !! 105 RESTORE_SWITCH_STACK 84 bl syscall_exit_to_user_m !! 106 | stack contents now is just [syscall return address] [pt_regs] [frame] 85 RESTORE_STATIC !! 107 | return pt_regs.d0 86 RESTORE_SOME !! 108 movel %sp@(PT_OFF_D0+4),%d0 87 RESTORE_SP_AND_RET !! 109 rts 88 SYM_CODE_END(ret_from_fork) !! 110 89 !! 111 ENTRY(buserr) 90 SYM_CODE_START(ret_from_kernel_thread) !! 112 SAVE_ALL_INT 91 UNWIND_HINT_REGS !! 113 GET_CURRENT(%d0) 92 bl schedule_tail !! 114 movel %sp,%sp@- | stack frame pointer argument 93 move a0, s1 !! 115 jbsr buserr_c 94 jirl ra, s0, 0 !! 116 addql #4,%sp 95 move a0, sp !! 117 jra ret_from_exception 96 bl syscall_exit_to_user_m !! 118 97 RESTORE_STATIC !! 119 ENTRY(trap) 98 RESTORE_SOME !! 120 SAVE_ALL_INT 99 RESTORE_SP_AND_RET !! 121 GET_CURRENT(%d0) 100 SYM_CODE_END(ret_from_kernel_thread) !! 122 movel %sp,%sp@- | stack frame pointer argument >> 123 jbsr trap_c >> 124 addql #4,%sp >> 125 jra ret_from_exception >> 126 >> 127 | After a fork we jump here directly from resume, >> 128 | so that %d1 contains the previous task >> 129 | schedule_tail now used regardless of CONFIG_SMP >> 130 ENTRY(ret_from_fork) >> 131 movel %d1,%sp@- >> 132 jsr schedule_tail >> 133 addql #4,%sp >> 134 jra ret_from_exception >> 135 >> 136 ENTRY(ret_from_kernel_thread) >> 137 | a3 contains the kernel thread payload, d7 - its argument >> 138 movel %d1,%sp@- >> 139 jsr schedule_tail >> 140 movel %d7,(%sp) >> 141 jsr %a3@ >> 142 addql #4,%sp >> 143 jra ret_from_exception >> 144 >> 145 #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) >> 146 >> 147 #ifdef TRAP_DBG_INTERRUPT >> 148 >> 149 .globl dbginterrupt >> 150 ENTRY(dbginterrupt) >> 151 SAVE_ALL_INT >> 152 GET_CURRENT(%d0) >> 153 movel %sp,%sp@- /* stack frame pointer argument */ >> 154 jsr dbginterrupt_c >> 155 addql #4,%sp >> 156 jra ret_from_exception >> 157 #endif >> 158 >> 159 ENTRY(reschedule) >> 160 /* save top of frame */ >> 161 pea %sp@ >> 162 jbsr set_esp0 >> 163 addql #4,%sp >> 164 pea ret_from_exception >> 165 jmp schedule >> 166 >> 167 ENTRY(ret_from_user_signal) >> 168 moveq #__NR_sigreturn,%d0 >> 169 trap #0 >> 170 >> 171 ENTRY(ret_from_user_rt_signal) >> 172 movel #__NR_rt_sigreturn,%d0 >> 173 trap #0 >> 174 >> 175 #else >> 176 >> 177 do_trace_entry: >> 178 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace >> 179 subql #4,%sp >> 180 SAVE_SWITCH_STACK >> 181 jbsr syscall_trace_enter >> 182 RESTORE_SWITCH_STACK >> 183 addql #4,%sp >> 184 addql #1,%d0 | optimization for cmpil #-1,%d0 >> 185 jeq ret_from_syscall >> 186 movel %sp@(PT_OFF_ORIG_D0),%d0 >> 187 cmpl #NR_syscalls,%d0 >> 188 jcs syscall >> 189 jra ret_from_syscall >> 190 badsys: >> 191 movel #-ENOSYS,%sp@(PT_OFF_D0) >> 192 jra ret_from_syscall >> 193 >> 194 do_trace_exit: >> 195 subql #4,%sp >> 196 SAVE_SWITCH_STACK >> 197 jbsr syscall_trace_leave >> 198 RESTORE_SWITCH_STACK >> 199 addql #4,%sp >> 200 jra .Lret_from_exception >> 201 >> 202 ENTRY(system_call) >> 203 SAVE_ALL_SYS >> 204 >> 205 GET_CURRENT(%d1) >> 206 movel %d1,%a1 >> 207 >> 208 | save top of frame >> 209 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 210 >> 211 | syscall trace? >> 212 tstb %a1@(TINFO_FLAGS+2) >> 213 jmi do_trace_entry >> 214 | seccomp filter active? >> 215 btst #5,%a1@(TINFO_FLAGS+2) >> 216 bnes do_trace_entry >> 217 cmpl #NR_syscalls,%d0 >> 218 jcc badsys >> 219 syscall: >> 220 jbsr @(sys_call_table,%d0:l:4)@(0) >> 221 movel %d0,%sp@(PT_OFF_D0) | save the return value >> 222 ret_from_syscall: >> 223 |oriw #0x0700,%sr >> 224 movel %curptr@(TASK_STACK),%a1 >> 225 movew %a1@(TINFO_FLAGS+2),%d0 >> 226 jne syscall_exit_work >> 227 1: RESTORE_ALL >> 228 >> 229 syscall_exit_work: >> 230 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 231 bnes 1b | if so, skip resched, signals >> 232 lslw #1,%d0 >> 233 jcs do_trace_exit >> 234 jmi do_delayed_trace >> 235 lslw #8,%d0 >> 236 jne do_signal_return >> 237 pea resume_userspace >> 238 jra schedule >> 239 >> 240 >> 241 ENTRY(ret_from_exception) >> 242 .Lret_from_exception: >> 243 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 244 bnes 1f | if so, skip resched, signals >> 245 | only allow interrupts when we are really the last one on the >> 246 | kernel stack, otherwise stack overflow can occur during >> 247 | heavy interrupt load >> 248 andw #ALLOWINT,%sr >> 249 >> 250 resume_userspace: >> 251 movel %curptr@(TASK_STACK),%a1 >> 252 moveb %a1@(TINFO_FLAGS+3),%d0 >> 253 jne exit_work >> 254 1: RESTORE_ALL >> 255 >> 256 exit_work: >> 257 | save top of frame >> 258 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 259 lslb #1,%d0 >> 260 jne do_signal_return >> 261 pea resume_userspace >> 262 jra schedule >> 263 >> 264 >> 265 do_signal_return: >> 266 |andw #ALLOWINT,%sr >> 267 subql #4,%sp | dummy return address >> 268 SAVE_SWITCH_STACK >> 269 pea %sp@(SWITCH_STACK_SIZE) >> 270 bsrl do_notify_resume >> 271 addql #4,%sp >> 272 RESTORE_SWITCH_STACK >> 273 addql #4,%sp >> 274 jbra resume_userspace >> 275 >> 276 do_delayed_trace: >> 277 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR >> 278 pea 1 | send SIGTRAP >> 279 movel %curptr,%sp@- >> 280 pea LSIGTRAP >> 281 jbsr send_sig >> 282 addql #8,%sp >> 283 addql #4,%sp >> 284 jbra resume_userspace >> 285 >> 286 >> 287 /* This is the main interrupt handler for autovector interrupts */ >> 288 >> 289 ENTRY(auto_inthandler) >> 290 SAVE_ALL_INT >> 291 GET_CURRENT(%d0) >> 292 | put exception # in d0 >> 293 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 294 subw #VEC_SPUR,%d0 >> 295 >> 296 movel %sp,%sp@- >> 297 movel %d0,%sp@- | put vector # on stack >> 298 auto_irqhandler_fixup = . + 2 >> 299 jsr do_IRQ | process the IRQ >> 300 addql #8,%sp | pop parameters off stack >> 301 jra ret_from_exception >> 302 >> 303 /* Handler for user defined interrupt vectors */ >> 304 >> 305 ENTRY(user_inthandler) >> 306 SAVE_ALL_INT >> 307 GET_CURRENT(%d0) >> 308 | put exception # in d0 >> 309 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 310 user_irqvec_fixup = . + 2 >> 311 subw #VEC_USER,%d0 >> 312 >> 313 movel %sp,%sp@- >> 314 movel %d0,%sp@- | put vector # on stack >> 315 jsr do_IRQ | process the IRQ >> 316 addql #8,%sp | pop parameters off stack >> 317 jra ret_from_exception >> 318 >> 319 /* Handler for uninitialized and spurious interrupts */ >> 320 >> 321 ENTRY(bad_inthandler) >> 322 SAVE_ALL_INT >> 323 GET_CURRENT(%d0) >> 324 >> 325 movel %sp,%sp@- >> 326 jsr handle_badint >> 327 addql #4,%sp >> 328 jra ret_from_exception >> 329 >> 330 resume: >> 331 /* >> 332 * Beware - when entering resume, prev (the current task) is >> 333 * in a0, next (the new task) is in a1,so don't change these >> 334 * registers until their contents are no longer needed. >> 335 */ >> 336 >> 337 /* save sr */ >> 338 movew %sr,%a0@(TASK_THREAD+THREAD_SR) >> 339 >> 340 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ >> 341 movec %sfc,%d0 >> 342 movew %d0,%a0@(TASK_THREAD+THREAD_FC) >> 343 >> 344 /* save usp */ >> 345 /* it is better to use a movel here instead of a movew 8*) */ >> 346 movec %usp,%d0 >> 347 movel %d0,%a0@(TASK_THREAD+THREAD_USP) >> 348 >> 349 /* save non-scratch registers on stack */ >> 350 SAVE_SWITCH_STACK >> 351 >> 352 /* save current kernel stack pointer */ >> 353 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) >> 354 >> 355 /* save floating point context */ >> 356 #ifndef CONFIG_M68KFPU_EMU_ONLY >> 357 #ifdef CONFIG_M68KFPU_EMU >> 358 tstl m68k_fputype >> 359 jeq 3f >> 360 #endif >> 361 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) >> 362 >> 363 #if defined(CONFIG_M68060) >> 364 #if !defined(CPU_M68060_ONLY) >> 365 btst #3,m68k_cputype+3 >> 366 beqs 1f >> 367 #endif >> 368 /* The 060 FPU keeps status in bits 15-8 of the first longword */ >> 369 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) >> 370 jeq 3f >> 371 #if !defined(CPU_M68060_ONLY) >> 372 jra 2f >> 373 #endif >> 374 #endif /* CONFIG_M68060 */ >> 375 #if !defined(CPU_M68060_ONLY) >> 376 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) >> 377 jeq 3f >> 378 #endif >> 379 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) >> 380 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) >> 381 3: >> 382 #endif /* CONFIG_M68KFPU_EMU_ONLY */ >> 383 /* Return previous task in %d1 */ >> 384 movel %curptr,%d1 >> 385 >> 386 /* switch to new task (a1 contains new task) */ >> 387 movel %a1,%curptr >> 388 >> 389 /* restore floating point context */ >> 390 #ifndef CONFIG_M68KFPU_EMU_ONLY >> 391 #ifdef CONFIG_M68KFPU_EMU >> 392 tstl m68k_fputype >> 393 jeq 4f >> 394 #endif >> 395 #if defined(CONFIG_M68060) >> 396 #if !defined(CPU_M68060_ONLY) >> 397 btst #3,m68k_cputype+3 >> 398 beqs 1f >> 399 #endif >> 400 /* The 060 FPU keeps status in bits 15-8 of the first longword */ >> 401 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) >> 402 jeq 3f >> 403 #if !defined(CPU_M68060_ONLY) >> 404 jra 2f >> 405 #endif >> 406 #endif /* CONFIG_M68060 */ >> 407 #if !defined(CPU_M68060_ONLY) >> 408 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) >> 409 jeq 3f >> 410 #endif >> 411 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 >> 412 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar >> 413 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) >> 414 4: >> 415 #endif /* CONFIG_M68KFPU_EMU_ONLY */ >> 416 >> 417 /* restore the kernel stack pointer */ >> 418 movel %a1@(TASK_THREAD+THREAD_KSP),%sp >> 419 >> 420 /* restore non-scratch registers */ >> 421 RESTORE_SWITCH_STACK >> 422 >> 423 /* restore user stack pointer */ >> 424 movel %a1@(TASK_THREAD+THREAD_USP),%a0 >> 425 movel %a0,%usp >> 426 >> 427 /* restore fs (sfc,%dfc) */ >> 428 movew %a1@(TASK_THREAD+THREAD_FC),%a0 >> 429 movec %a0,%sfc >> 430 movec %a0,%dfc >> 431 >> 432 /* restore status register */ >> 433 movew %a1@(TASK_THREAD+THREAD_SR),%d0 >> 434 oriw #0x0700,%d0 >> 435 movew %d0,%sr >> 436 >> 437 rts >> 438 >> 439 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.