1 /* SPDX-License-Identifier: GPL-2.0 */ !! 1 /* -*- mode: asm -*- >> 2 * >> 3 * linux/arch/m68k/kernel/entry.S >> 4 * >> 5 * Copyright (C) 1991, 1992 Linus Torvalds >> 6 * >> 7 * This file is subject to the terms and conditions of the GNU General Public >> 8 * License. See the file README.legal in the main directory of this archive >> 9 * for more details. >> 10 * >> 11 * Linux/m68k support by Hamish Macdonald >> 12 * >> 13 * 68060 fixes by Jesper Skov >> 14 * >> 15 */ >> 16 2 /* 17 /* 3 * S390 low-level entry points. !! 18 * entry.S contains the system-call and fault low-level handling routines. >> 19 * This also contains the timer-interrupt handler, as well as all interrupts >> 20 * and faults that can result in a task-switch. >> 21 * >> 22 * NOTE: This code handles signal-recognition, which happens every time >> 23 * after a timer-interrupt and after each system call. 4 * 24 * 5 * Copyright IBM Corp. 1999, 2012 << 6 * Author(s): Martin Schwidefsky (schwidefs << 7 * Hartmut Penner (hp@de.ibm.com << 8 * Denis Joseph Barrow (djbarrow << 9 */ 25 */ 10 26 11 #include <linux/export.h> !! 27 /* 12 #include <linux/init.h> !! 28 * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so >> 29 * all pointers that used to be 'current' are now entry >> 30 * number 0 in the 'current_set' list. >> 31 * >> 32 * 6/05/00 RZ: addedd writeback completion after return from sighandler >> 33 * for 68040 >> 34 */ >> 35 13 #include <linux/linkage.h> 36 #include <linux/linkage.h> 14 #include <asm/asm-extable.h> << 15 #include <asm/alternative.h> << 16 #include <asm/processor.h> << 17 #include <asm/cache.h> << 18 #include <asm/dwarf.h> << 19 #include <asm/errno.h> 37 #include <asm/errno.h> 20 #include <asm/ptrace.h> << 21 #include <asm/thread_info.h> << 22 #include <asm/asm-offsets.h> << 23 #include <asm/unistd.h> << 24 #include <asm/page.h> << 25 #include <asm/sigp.h> << 26 #include <asm/irq.h> << 27 #include <asm/fpu-insn.h> << 28 #include <asm/setup.h> 38 #include <asm/setup.h> 29 #include <asm/nmi.h> !! 39 #include <asm/traps.h> 30 #include <asm/nospec-insn.h> !! 40 #include <asm/unistd.h> 31 #include <asm/lowcore.h> !! 41 #include <asm/asm-offsets.h> 32 !! 42 #include <asm/entry.h> 33 _LPP_OFFSET = __LC_LPP << 34 << 35 .macro STBEAR address << 36 ALTERNATIVE "nop", ".insn s,0xb2010000 << 37 .endm << 38 << 39 .macro LBEAR address << 40 ALTERNATIVE "nop", ".insn s,0xb2000000 << 41 .endm << 42 << 43 .macro LPSWEY address, lpswe << 44 ALTERNATIVE_2 "b \lpswe;nopr", \ << 45 ".insn siy,0xeb0000000071,\add << 46 __stringify(.insn siy,0xeb0000 << 47 ALT_LOWCORE << 48 .endm << 49 << 50 .macro MBEAR reg, lowcore << 51 ALTERNATIVE "brcl 0,0", __stringify(mv << 52 ALT_FACILITY(193) << 53 .endm << 54 << 55 .macro CHECK_STACK savearea, lowcore << 56 #ifdef CONFIG_CHECK_STACK << 57 tml %r15,THREAD_SIZE - CONFIG_STAC << 58 la %r14,\savearea(\lowcore) << 59 jz stack_overflow << 60 #endif << 61 .endm << 62 43 63 .macro CHECK_VMAP_STACK savearea, low !! 44 .globl system_call, buserr, trap, resume 64 #ifdef CONFIG_VMAP_STACK !! 45 .globl sys_call_table 65 lgr %r14,%r15 !! 46 .globl __sys_fork, __sys_clone, __sys_vfork 66 nill %r14,0x10000 - THREAD_SIZE !! 47 .globl bad_interrupt 67 oill %r14,STACK_INIT_OFFSET !! 48 .globl auto_irqhandler_fixup 68 clg %r14,__LC_KERNEL_STACK(\lowcor !! 49 .globl user_irqvec_fixup 69 je \oklabel !! 50 70 clg %r14,__LC_ASYNC_STACK(\lowcore !! 51 .text 71 je \oklabel !! 52 ENTRY(__sys_fork) 72 clg %r14,__LC_MCCK_STACK(\lowcore) !! 53 SAVE_SWITCH_STACK 73 je \oklabel !! 54 jbsr sys_fork 74 clg %r14,__LC_NODAT_STACK(\lowcore !! 55 lea %sp@(24),%sp 75 je \oklabel !! 56 rts 76 clg %r14,__LC_RESTART_STACK(\lowco !! 57 77 je \oklabel !! 58 ENTRY(__sys_clone) 78 la %r14,\savearea(\lowcore) !! 59 SAVE_SWITCH_STACK 79 j stack_overflow !! 60 pea %sp@(SWITCH_STACK_SIZE) 80 #else !! 61 jbsr m68k_clone 81 j \oklabel !! 62 lea %sp@(28),%sp >> 63 rts >> 64 >> 65 ENTRY(__sys_vfork) >> 66 SAVE_SWITCH_STACK >> 67 jbsr sys_vfork >> 68 lea %sp@(24),%sp >> 69 rts >> 70 >> 71 ENTRY(__sys_clone3) >> 72 SAVE_SWITCH_STACK >> 73 pea %sp@(SWITCH_STACK_SIZE) >> 74 jbsr m68k_clone3 >> 75 lea %sp@(28),%sp >> 76 rts >> 77 >> 78 ENTRY(sys_sigreturn) >> 79 SAVE_SWITCH_STACK >> 80 movel %sp,%a1 | switch_stack pointer >> 81 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer >> 82 lea %sp@(-84),%sp | leave a gap >> 83 movel %a1,%sp@- >> 84 movel %a0,%sp@- >> 85 jbsr do_sigreturn >> 86 jra 1f | shared with rt_sigreturn() >> 87 >> 88 ENTRY(sys_rt_sigreturn) >> 89 SAVE_SWITCH_STACK >> 90 movel %sp,%a1 | switch_stack pointer >> 91 lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer >> 92 lea %sp@(-84),%sp | leave a gap >> 93 movel %a1,%sp@- >> 94 movel %a0,%sp@- >> 95 | stack contents: >> 96 | [original pt_regs address] [original switch_stack address] >> 97 | [gap] [switch_stack] [pt_regs] [exception frame] >> 98 jbsr do_rt_sigreturn >> 99 >> 100 1: >> 101 | stack contents now: >> 102 | [original pt_regs address] [original switch_stack address] >> 103 | [unused part of the gap] [moved switch_stack] [moved pt_regs] >> 104 | [replacement exception frame] >> 105 | return value of do_{rt_,}sigreturn() points to moved switch_stack. >> 106 >> 107 movel %d0,%sp | discard the leftover junk >> 108 RESTORE_SWITCH_STACK >> 109 | stack contents now is just [syscall return address] [pt_regs] [frame] >> 110 | return pt_regs.d0 >> 111 movel %sp@(PT_OFF_D0+4),%d0 >> 112 rts >> 113 >> 114 ENTRY(buserr) >> 115 SAVE_ALL_INT >> 116 GET_CURRENT(%d0) >> 117 movel %sp,%sp@- | stack frame pointer argument >> 118 jbsr buserr_c >> 119 addql #4,%sp >> 120 jra ret_from_exception >> 121 >> 122 ENTRY(trap) >> 123 SAVE_ALL_INT >> 124 GET_CURRENT(%d0) >> 125 movel %sp,%sp@- | stack frame pointer argument >> 126 jbsr trap_c >> 127 addql #4,%sp >> 128 jra ret_from_exception >> 129 >> 130 | After a fork we jump here directly from resume, >> 131 | so that %d1 contains the previous task >> 132 | schedule_tail now used regardless of CONFIG_SMP >> 133 ENTRY(ret_from_fork) >> 134 movel %d1,%sp@- >> 135 jsr schedule_tail >> 136 addql #4,%sp >> 137 jra ret_from_exception >> 138 >> 139 ENTRY(ret_from_kernel_thread) >> 140 | a3 contains the kernel thread payload, d7 - its argument >> 141 movel %d1,%sp@- >> 142 jsr schedule_tail >> 143 movel %d7,(%sp) >> 144 jsr %a3@ >> 145 addql #4,%sp >> 146 jra ret_from_exception >> 147 >> 148 #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU) >> 149 >> 150 #ifdef TRAP_DBG_INTERRUPT >> 151 >> 152 .globl dbginterrupt >> 153 ENTRY(dbginterrupt) >> 154 SAVE_ALL_INT >> 155 GET_CURRENT(%d0) >> 156 movel %sp,%sp@- /* stack frame pointer argument */ >> 157 jsr dbginterrupt_c >> 158 addql #4,%sp >> 159 jra ret_from_exception 82 #endif 160 #endif 83 .endm << 84 161 85 /* !! 162 ENTRY(reschedule) 86 * The TSTMSK macro generates a test-u !! 163 /* save top of frame */ 87 * calculating the memory offset for t !! 164 pea %sp@ 88 * Mask value can be any constant. Th !! 165 jbsr set_esp0 89 * value to calculate the memory offse !! 166 addql #4,%sp 90 * instruction. !! 167 pea ret_from_exception 91 */ !! 168 jmp schedule 92 .macro TSTMSK addr, mask, size=8, byte !! 169 93 .if (\bytepos < \size) && (\ma !! 170 ENTRY(ret_from_user_signal) 94 .if (\mask & 0xff) !! 171 moveq #__NR_sigreturn,%d0 95 .error "Mask e !! 172 trap #0 96 .endif !! 173 97 TSTMSK \addr, "(\mask !! 174 ENTRY(ret_from_user_rt_signal) 98 .exitm !! 175 movel #__NR_rt_sigreturn,%d0 99 .endif !! 176 trap #0 100 .ifeq \mask << 101 .error "Mask must not << 102 .endif << 103 off = \size - \bytepos - 1 << 104 tm off+\addr, \mask << 105 .endm << 106 << 107 .macro BPOFF << 108 ALTERNATIVE "nop", ".insn rrf,0xb2e800 << 109 .endm << 110 << 111 .macro BPON << 112 ALTERNATIVE "nop", ".insn rrf,0xb2e800 << 113 .endm << 114 << 115 .macro BPENTER tif_ptr,tif_mask << 116 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask << 117 "j .+12; nop; nop", ALT_SP << 118 .endm << 119 << 120 .macro BPEXIT tif_ptr,tif_mask << 121 TSTMSK \tif_ptr,\tif_mask << 122 ALTERNATIVE "jz .+8; .insn rrf,0xb2e8 << 123 "jnz .+8; .insn rrf,0xb2e8 << 124 .endm << 125 << 126 #if IS_ENABLED(CONFIG_KVM) << 127 .macro SIEEXIT sie_control,lowcore << 128 lg %r9,\sie_control << 129 ni __SIE_PROG0C+3(%r9),0xfe << 130 lctlg %c1,%c1,__LC_KERNEL_ASCE(\lowc << 131 lg %r9,__LC_CURRENT(\lowcore) << 132 mvi __TI_sie(%r9),0 << 133 larl %r9,sie_exit << 134 .endm << 135 #endif << 136 177 137 .macro STACKLEAK_ERASE !! 178 #else 138 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK << 139 brasl %r14,stackleak_erase_on_task_s << 140 #endif << 141 .endm << 142 179 143 GEN_BR_THUNK %r14 !! 180 do_trace_entry: >> 181 movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace >> 182 subql #4,%sp >> 183 SAVE_SWITCH_STACK >> 184 jbsr syscall_trace_enter >> 185 RESTORE_SWITCH_STACK >> 186 addql #4,%sp >> 187 addql #1,%d0 | optimization for cmpil #-1,%d0 >> 188 jeq ret_from_syscall >> 189 movel %sp@(PT_OFF_ORIG_D0),%d0 >> 190 cmpl #NR_syscalls,%d0 >> 191 jcs syscall >> 192 jra ret_from_syscall >> 193 badsys: >> 194 movel #-ENOSYS,%sp@(PT_OFF_D0) >> 195 jra ret_from_syscall >> 196 >> 197 do_trace_exit: >> 198 subql #4,%sp >> 199 SAVE_SWITCH_STACK >> 200 jbsr syscall_trace_leave >> 201 RESTORE_SWITCH_STACK >> 202 addql #4,%sp >> 203 jra .Lret_from_exception >> 204 >> 205 ENTRY(system_call) >> 206 SAVE_ALL_SYS >> 207 >> 208 GET_CURRENT(%d1) >> 209 movel %d1,%a1 >> 210 >> 211 | save top of frame >> 212 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 213 >> 214 | syscall trace? >> 215 tstb %a1@(TINFO_FLAGS+2) >> 216 jmi do_trace_entry >> 217 | seccomp filter active? >> 218 btst #5,%a1@(TINFO_FLAGS+2) >> 219 bnes do_trace_entry >> 220 cmpl #NR_syscalls,%d0 >> 221 jcc badsys >> 222 syscall: >> 223 jbsr @(sys_call_table,%d0:l:4)@(0) >> 224 movel %d0,%sp@(PT_OFF_D0) | save the return value >> 225 ret_from_syscall: >> 226 |oriw #0x0700,%sr >> 227 movel %curptr@(TASK_STACK),%a1 >> 228 movew %a1@(TINFO_FLAGS+2),%d0 >> 229 jne syscall_exit_work >> 230 1: RESTORE_ALL >> 231 >> 232 syscall_exit_work: >> 233 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 234 bnes 1b | if so, skip resched, signals >> 235 lslw #1,%d0 >> 236 jcs do_trace_exit >> 237 jmi do_delayed_trace >> 238 lslw #8,%d0 >> 239 jne do_signal_return >> 240 pea resume_userspace >> 241 jra schedule >> 242 >> 243 >> 244 ENTRY(ret_from_exception) >> 245 .Lret_from_exception: >> 246 btst #5,%sp@(PT_OFF_SR) | check if returning to kernel >> 247 bnes 1f | if so, skip resched, signals >> 248 | only allow interrupts when we are really the last one on the >> 249 | kernel stack, otherwise stack overflow can occur during >> 250 | heavy interrupt load >> 251 andw #ALLOWINT,%sr >> 252 >> 253 resume_userspace: >> 254 movel %curptr@(TASK_STACK),%a1 >> 255 moveb %a1@(TINFO_FLAGS+3),%d0 >> 256 jne exit_work >> 257 1: RESTORE_ALL >> 258 >> 259 exit_work: >> 260 | save top of frame >> 261 movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0) >> 262 lslb #1,%d0 >> 263 jne do_signal_return >> 264 pea resume_userspace >> 265 jra schedule >> 266 >> 267 >> 268 do_signal_return: >> 269 |andw #ALLOWINT,%sr >> 270 subql #4,%sp | dummy return address >> 271 SAVE_SWITCH_STACK >> 272 pea %sp@(SWITCH_STACK_SIZE) >> 273 bsrl do_notify_resume >> 274 addql #4,%sp >> 275 RESTORE_SWITCH_STACK >> 276 addql #4,%sp >> 277 jbra resume_userspace >> 278 >> 279 do_delayed_trace: >> 280 bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR >> 281 pea 1 | send SIGTRAP >> 282 movel %curptr,%sp@- >> 283 pea LSIGTRAP >> 284 jbsr send_sig >> 285 addql #8,%sp >> 286 addql #4,%sp >> 287 jbra resume_userspace >> 288 >> 289 >> 290 /* This is the main interrupt handler for autovector interrupts */ >> 291 >> 292 ENTRY(auto_inthandler) >> 293 SAVE_ALL_INT >> 294 GET_CURRENT(%d0) >> 295 | put exception # in d0 >> 296 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 297 subw #VEC_SPUR,%d0 >> 298 >> 299 movel %sp,%sp@- >> 300 movel %d0,%sp@- | put vector # on stack >> 301 auto_irqhandler_fixup = . + 2 >> 302 jsr do_IRQ | process the IRQ >> 303 addql #8,%sp | pop parameters off stack >> 304 jra ret_from_exception >> 305 >> 306 /* Handler for user defined interrupt vectors */ >> 307 >> 308 ENTRY(user_inthandler) >> 309 SAVE_ALL_INT >> 310 GET_CURRENT(%d0) >> 311 | put exception # in d0 >> 312 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 >> 313 user_irqvec_fixup = . + 2 >> 314 subw #VEC_USER,%d0 >> 315 >> 316 movel %sp,%sp@- >> 317 movel %d0,%sp@- | put vector # on stack >> 318 jsr do_IRQ | process the IRQ >> 319 addql #8,%sp | pop parameters off stack >> 320 jra ret_from_exception >> 321 >> 322 /* Handler for uninitialized and spurious interrupts */ >> 323 >> 324 ENTRY(bad_inthandler) >> 325 SAVE_ALL_INT >> 326 GET_CURRENT(%d0) >> 327 >> 328 movel %sp,%sp@- >> 329 jsr handle_badint >> 330 addql #4,%sp >> 331 jra ret_from_exception 144 332 145 .section .kprobes.text, "ax" !! 333 resume: 146 .Ldummy: << 147 /* 334 /* 148 * The following nop exists only in or !! 335 * Beware - when entering resume, prev (the current task) is 149 * symbol starts at the beginning of t !! 336 * in a0, next (the new task) is in a1,so don't change these 150 * In that case there would be several !! 337 * registers until their contents are no longer needed. 151 * E.g. objdump would take an arbitrar << 152 * the code. << 153 * With the added nop in between this << 154 */ 338 */ 155 nop 0 << 156 339 157 /* !! 340 /* save sr */ 158 * Scheduler resume function, called by __swit !! 341 movew %sr,%a0@(TASK_THREAD+THREAD_SR) 159 * gpr2 = (task_struct *)prev << 160 * gpr3 = (task_struct *)next << 161 * Returns: << 162 * gpr2 = prev << 163 */ << 164 SYM_FUNC_START(__switch_to_asm) << 165 stmg %r6,%r15,__SF_GPRS(%r15) << 166 lghi %r4,__TASK_stack << 167 lghi %r1,__TASK_thread << 168 llill %r5,STACK_INIT_OFFSET << 169 stg %r15,__THREAD_ksp(%r1,%r2) << 170 lg %r15,0(%r4,%r3) << 171 agr %r15,%r5 << 172 GET_LC %r13 << 173 stg %r3,__LC_CURRENT(%r13) << 174 stg %r15,__LC_KERNEL_STACK(%r13) << 175 lg %r15,__THREAD_ksp(%r1,%r3) << 176 aghi %r3,__TASK_pid << 177 mvc __LC_CURRENT_PID(4,%r13),0(%r3 << 178 ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r << 179 lmg %r6,%r15,__SF_GPRS(%r15) << 180 BR_EX %r14 << 181 SYM_FUNC_END(__switch_to_asm) << 182 342 183 #if IS_ENABLED(CONFIG_KVM) !! 343 /* save fs (sfc,%dfc) (may be pointing to kernel memory) */ 184 /* !! 344 movec %sfc,%d0 185 * __sie64a calling convention: !! 345 movew %d0,%a0@(TASK_THREAD+THREAD_FC) 186 * %r2 pointer to sie control block phys !! 346 187 * %r3 pointer to sie control block virt !! 347 /* save usp */ 188 * %r4 guest register save area !! 348 /* it is better to use a movel here instead of a movew 8*) */ 189 * %r5 guest asce !! 349 movec %usp,%d0 190 */ !! 350 movel %d0,%a0@(TASK_THREAD+THREAD_USP) 191 SYM_FUNC_START(__sie64a) !! 351 192 stmg %r6,%r14,__SF_GPRS(%r15) !! 352 /* save non-scratch registers on stack */ 193 GET_LC %r13 !! 353 SAVE_SWITCH_STACK 194 lg %r14,__LC_CURRENT(%r13) !! 354 195 stg %r2,__SF_SIE_CONTROL_PHYS(%r15 !! 355 /* save current kernel stack pointer */ 196 stg %r3,__SF_SIE_CONTROL(%r15) !! 356 movel %sp,%a0@(TASK_THREAD+THREAD_KSP) 197 stg %r4,__SF_SIE_SAVEAREA(%r15) !! 357 198 stg %r5,__SF_SIE_GUEST_ASCE(%r15) !! 358 /* save floating point context */ 199 xc __SF_SIE_REASON(8,%r15),__SF_S !! 359 #ifndef CONFIG_M68KFPU_EMU_ONLY 200 mvc __SF_SIE_FLAGS(8,%r15),__TI_fl !! 360 #ifdef CONFIG_M68KFPU_EMU 201 lmg %r0,%r13,0(%r4) !! 361 tstl m68k_fputype 202 mvi __TI_sie(%r14),1 !! 362 jeq 3f 203 lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r << 204 lg %r14,__SF_SIE_CONTROL(%r15) << 205 oi __SIE_PROG0C+3(%r14),1 << 206 tm __SIE_PROG20+3(%r14),3 << 207 jnz .Lsie_skip << 208 lg %r14,__SF_SIE_CONTROL_PHYS(%r1 << 209 BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOL << 210 .Lsie_entry: << 211 sie 0(%r14) << 212 # Let the next instruction be NOP to avoid tri << 213 # and handling it in a guest as result of the << 214 nopr 7 << 215 .Lsie_leave: << 216 BPOFF << 217 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOL << 218 .Lsie_skip: << 219 lg %r14,__SF_SIE_CONTROL(%r15) << 220 ni __SIE_PROG0C+3(%r14),0xfe << 221 GET_LC %r14 << 222 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r14) << 223 lg %r14,__LC_CURRENT(%r14) << 224 mvi __TI_sie(%r14),0 << 225 # some program checks are suppressing. C code << 226 # will rewind the PSW by the ILC, which is oft << 227 # are some corner cases (e.g. runtime instrume << 228 # Other instructions between __sie64a and .Lsi << 229 # interrupts. So lets use 3 nops as a landing << 230 .Lrewind_pad6: << 231 nopr 7 << 232 .Lrewind_pad4: << 233 nopr 7 << 234 .Lrewind_pad2: << 235 nopr 7 << 236 SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) << 237 lg %r14,__SF_SIE_SAVEAREA(%r15) << 238 stmg %r0,%r13,0(%r14) << 239 xgr %r0,%r0 << 240 xgr %r1,%r1 << 241 xgr %r3,%r3 << 242 xgr %r4,%r4 << 243 xgr %r5,%r5 << 244 lmg %r6,%r14,__SF_GPRS(%r15) << 245 lg %r2,__SF_SIE_REASON(%r15) << 246 BR_EX %r14 << 247 .Lsie_fault: << 248 lghi %r14,-EFAULT << 249 stg %r14,__SF_SIE_REASON(%r15) << 250 j sie_exit << 251 << 252 EX_TABLE(.Lrewind_pad6,.Lsie_fault) << 253 EX_TABLE(.Lrewind_pad4,.Lsie_fault) << 254 EX_TABLE(.Lrewind_pad2,.Lsie_fault) << 255 EX_TABLE(sie_exit,.Lsie_fault) << 256 SYM_FUNC_END(__sie64a) << 257 EXPORT_SYMBOL(__sie64a) << 258 EXPORT_SYMBOL(sie_exit) << 259 #endif 363 #endif >> 364 fsave %a0@(TASK_THREAD+THREAD_FPSTATE) 260 365 261 /* !! 366 #if defined(CONFIG_M68060) 262 * SVC interrupt handler routine. System calls !! 367 #if !defined(CPU_M68060_ONLY) 263 * are entered with interrupts disabled. !! 368 btst #3,m68k_cputype+3 264 */ !! 369 beqs 1f 265 << 266 SYM_CODE_START(system_call) << 267 STMG_LC %r8,%r15,__LC_SAVE_AREA << 268 GET_LC %r13 << 269 stpt __LC_SYS_ENTER_TIMER(%r13) << 270 BPOFF << 271 lghi %r14,0 << 272 .Lsysc_per: << 273 STBEAR __LC_LAST_BREAK(%r13) << 274 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) << 275 lg %r15,__LC_KERNEL_STACK(%r13) << 276 xc __SF_BACKCHAIN(8,%r15),__SF_BA << 277 stmg %r0,%r7,STACK_FRAME_OVERHEAD+_ << 278 # clear user controlled register to pr << 279 xgr %r0,%r0 << 280 xgr %r1,%r1 << 281 xgr %r4,%r4 << 282 xgr %r5,%r5 << 283 xgr %r6,%r6 << 284 xgr %r7,%r7 << 285 xgr %r8,%r8 << 286 xgr %r9,%r9 << 287 xgr %r10,%r10 << 288 xgr %r11,%r11 << 289 la %r2,STACK_FRAME_OVERHEAD(%r15) << 290 mvc __PT_R8(64,%r2),__LC_SAVE_AREA << 291 MBEAR %r2,%r13 << 292 lgr %r3,%r14 << 293 brasl %r14,__do_syscall << 294 STACKLEAK_ERASE << 295 lctlg %c1,%c1,__LC_USER_ASCE(%r13) << 296 mvc __LC_RETURN_PSW(16,%r13),STACK << 297 BPON << 298 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST << 299 stpt __LC_EXIT_TIMER(%r13) << 300 lmg %r0,%r15,STACK_FRAME_OVERHEAD+ << 301 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LP << 302 SYM_CODE_END(system_call) << 303 << 304 # << 305 # a new process exits the kernel with ret_from << 306 # << 307 SYM_CODE_START(ret_from_fork) << 308 lgr %r3,%r11 << 309 brasl %r14,__ret_from_fork << 310 STACKLEAK_ERASE << 311 GET_LC %r13 << 312 lctlg %c1,%c1,__LC_USER_ASCE(%r13) << 313 mvc __LC_RETURN_PSW(16,%r13),STACK << 314 BPON << 315 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST << 316 stpt __LC_EXIT_TIMER(%r13) << 317 lmg %r0,%r15,STACK_FRAME_OVERHEAD+ << 318 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LP << 319 SYM_CODE_END(ret_from_fork) << 320 << 321 /* << 322 * Program check handler routine << 323 */ << 324 << 325 SYM_CODE_START(pgm_check_handler) << 326 STMG_LC %r8,%r15,__LC_SAVE_AREA << 327 GET_LC %r13 << 328 stpt __LC_SYS_ENTER_TIMER(%r13) << 329 BPOFF << 330 lgr %r10,%r15 << 331 lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) << 332 tmhh %r8,0x0001 # comi << 333 jno .Lpgm_skip_asce << 334 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) << 335 j 3f # -> f << 336 .Lpgm_skip_asce: << 337 1: tmhh %r8,0x4000 # PER << 338 jnz 2f # -> e << 339 tm __LC_PGM_ILC+3(%r13),0x80 << 340 jnz .Lpgm_svcper # -> s << 341 2: CHECK_STACK __LC_SAVE_AREA,%r13 << 342 aghi %r15,-(STACK_FRAME_OVERHEAD + << 343 # CHECK_VMAP_STACK branches to stack_o << 344 CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4 << 345 3: lg %r15,__LC_KERNEL_STACK(%r13) << 346 4: la %r11,STACK_FRAME_OVERHEAD(%r15 << 347 xc __PT_FLAGS(8,%r11),__PT_FLAGS( << 348 xc __SF_BACKCHAIN(8,%r15),__SF_BA << 349 stmg %r0,%r7,__PT_R0(%r11) << 350 mvc __PT_R8(64,%r11),__LC_SAVE_ARE << 351 mvc __PT_LAST_BREAK(8,%r11),__LC_P << 352 stctg %c1,%c1,__PT_CR1(%r11) << 353 #if IS_ENABLED(CONFIG_KVM) << 354 ltg %r12,__LC_GMAP(%r13) << 355 jz 5f << 356 clc __GMAP_ASCE(8,%r12), __PT_CR1( << 357 jne 5f << 358 BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOL << 359 SIEEXIT __SF_SIE_CONTROL(%r10),%r13 << 360 #endif 370 #endif 361 5: stmg %r8,%r9,__PT_PSW(%r11) !! 371 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 362 # clear user controlled registers to p !! 372 tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2) 363 xgr %r0,%r0 !! 373 jeq 3f 364 xgr %r1,%r1 !! 374 #if !defined(CPU_M68060_ONLY) 365 xgr %r3,%r3 !! 375 jra 2f 366 xgr %r4,%r4 << 367 xgr %r5,%r5 << 368 xgr %r6,%r6 << 369 xgr %r7,%r7 << 370 lgr %r2,%r11 << 371 brasl %r14,__do_pgm_check << 372 tmhh %r8,0x0001 # retu << 373 jno .Lpgm_exit_kernel << 374 STACKLEAK_ERASE << 375 lctlg %c1,%c1,__LC_USER_ASCE(%r13) << 376 BPON << 377 stpt __LC_EXIT_TIMER(%r13) << 378 .Lpgm_exit_kernel: << 379 mvc __LC_RETURN_PSW(16,%r13),STACK << 380 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST << 381 lmg %r0,%r15,STACK_FRAME_OVERHEAD+ << 382 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LP << 383 << 384 # << 385 # single stepped system call << 386 # << 387 .Lpgm_svcper: << 388 mvc __LC_RETURN_PSW(8,%r13),__LC_S << 389 larl %r14,.Lsysc_per << 390 stg %r14,__LC_RETURN_PSW+8(%r13) << 391 lghi %r14,1 << 392 LBEAR __LC_PGM_LAST_BREAK(%r13) << 393 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LP << 394 SYM_CODE_END(pgm_check_handler) << 395 << 396 /* << 397 * Interrupt handler macro used for external a << 398 */ << 399 .macro INT_HANDLER name,lc_old_psw,handler << 400 SYM_CODE_START(\name) << 401 STMG_LC %r8,%r15,__LC_SAVE_AREA << 402 GET_LC %r13 << 403 stckf __LC_INT_CLOCK(%r13) << 404 stpt __LC_SYS_ENTER_TIMER(%r13) << 405 STBEAR __LC_LAST_BREAK(%r13) << 406 BPOFF << 407 lmg %r8,%r9,\lc_old_psw(%r13) << 408 tmhh %r8,0x0001 << 409 jnz 1f << 410 #if IS_ENABLED(CONFIG_KVM) << 411 lg %r10,__LC_CURRENT(%r13) << 412 tm __TI_sie(%r10),0xff << 413 jz 0f << 414 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOL << 415 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 << 416 #endif 376 #endif 417 0: CHECK_STACK __LC_SAVE_AREA,%r13 !! 377 #endif /* CONFIG_M68060 */ 418 aghi %r15,-(STACK_FRAME_OVERHEAD + !! 378 #if !defined(CPU_M68060_ONLY) 419 j 2f !! 379 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE) 420 1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) !! 380 jeq 3f 421 lg %r15,__LC_KERNEL_STACK(%r13) << 422 2: xc __SF_BACKCHAIN(8,%r15),__SF_BA << 423 la %r11,STACK_FRAME_OVERHEAD(%r15 << 424 stmg %r0,%r7,__PT_R0(%r11) << 425 # clear user controlled registers to p << 426 xgr %r0,%r0 << 427 xgr %r1,%r1 << 428 xgr %r3,%r3 << 429 xgr %r4,%r4 << 430 xgr %r5,%r5 << 431 xgr %r6,%r6 << 432 xgr %r7,%r7 << 433 xgr %r10,%r10 << 434 xc __PT_FLAGS(8,%r11),__PT_FLAGS( << 435 mvc __PT_R8(64,%r11),__LC_SAVE_ARE << 436 MBEAR %r11,%r13 << 437 stmg %r8,%r9,__PT_PSW(%r11) << 438 lgr %r2,%r11 # pass << 439 brasl %r14,\handler << 440 mvc __LC_RETURN_PSW(16,%r13),__PT_ << 441 tmhh %r8,0x0001 # retu << 442 jno 2f << 443 STACKLEAK_ERASE << 444 lctlg %c1,%c1,__LC_USER_ASCE(%r13) << 445 BPON << 446 stpt __LC_EXIT_TIMER(%r13) << 447 2: LBEAR __PT_LAST_BREAK(%r11) << 448 lmg %r0,%r15,__PT_R0(%r11) << 449 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LP << 450 SYM_CODE_END(\name) << 451 .endm << 452 << 453 INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,d << 454 INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_ << 455 << 456 /* << 457 * Machine check handler routines << 458 */ << 459 SYM_CODE_START(mcck_int_handler) << 460 BPOFF << 461 GET_LC %r13 << 462 lmg %r8,%r9,__LC_MCK_OLD_PSW(%r13) << 463 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE << 464 jo .Lmcck_panic # yes << 465 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE << 466 jno .Lmcck_panic # cont << 467 ptlb << 468 lay %r14,__LC_CPU_TIMER_SAVE_AREA( << 469 mvc __LC_MCCK_ENTER_TIMER(8,%r13), << 470 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE << 471 jo 3f << 472 la %r14,__LC_SYS_ENTER_TIMER(%r13 << 473 clc 0(8,%r14),__LC_EXIT_TIMER(%r13 << 474 jl 1f << 475 la %r14,__LC_EXIT_TIMER(%r13) << 476 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIM << 477 jl 2f << 478 la %r14,__LC_LAST_UPDATE_TIMER(%r << 479 2: spt 0(%r14) << 480 mvc __LC_MCCK_ENTER_TIMER(8,%r13), << 481 3: TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE << 482 jno .Lmcck_panic << 483 tmhh %r8,0x0001 # inte << 484 jnz .Lmcck_user << 485 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE << 486 jno .Lmcck_panic << 487 #if IS_ENABLED(CONFIG_KVM) << 488 lg %r10,__LC_CURRENT(%r13) << 489 tm __TI_sie(%r10),0xff << 490 jz .Lmcck_user << 491 # Need to compare the address instead << 492 # Otherwise there would be a race betw << 493 # and entering SIE (or leaving and cle << 494 # would cause machine checks targeted << 495 # handled by the host. << 496 larl %r14,.Lsie_entry << 497 clgrjl %r9,%r14, 4f << 498 larl %r14,.Lsie_leave << 499 clgrjhe %r9,%r14, 4f << 500 lg %r10,__LC_PCPU << 501 oi __PCPU_FLAGS+7(%r10), _CIF_MCC << 502 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOL << 503 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 << 504 #endif 381 #endif 505 .Lmcck_user: !! 382 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG) 506 lg %r15,__LC_MCCK_STACK(%r13) !! 383 fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL) 507 la %r11,STACK_FRAME_OVERHEAD(%r15 !! 384 3: 508 stctg %c1,%c1,__PT_CR1(%r11) !! 385 #endif /* CONFIG_M68KFPU_EMU_ONLY */ 509 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) !! 386 /* Return previous task in %d1 */ 510 xc __SF_BACKCHAIN(8,%r15),__SF_BA !! 387 movel %curptr,%d1 511 lay %r14,__LC_GPREGS_SAVE_AREA(%r1 !! 388 512 mvc __PT_R0(128,%r11),0(%r14) !! 389 /* switch to new task (a1 contains new task) */ 513 # clear user controlled registers to p !! 390 movel %a1,%curptr 514 xgr %r0,%r0 !! 391 515 xgr %r1,%r1 !! 392 /* restore floating point context */ 516 xgr %r3,%r3 !! 393 #ifndef CONFIG_M68KFPU_EMU_ONLY 517 xgr %r4,%r4 !! 394 #ifdef CONFIG_M68KFPU_EMU 518 xgr %r5,%r5 !! 395 tstl m68k_fputype 519 xgr %r6,%r6 !! 396 jeq 4f 520 xgr %r7,%r7 << 521 xgr %r10,%r10 << 522 stmg %r8,%r9,__PT_PSW(%r11) << 523 xc __PT_FLAGS(8,%r11),__PT_FLAGS( << 524 xc __SF_BACKCHAIN(8,%r15),__SF_BA << 525 lgr %r2,%r11 # pass << 526 brasl %r14,s390_do_machine_check << 527 lctlg %c1,%c1,__PT_CR1(%r11) << 528 lmg %r0,%r10,__PT_R0(%r11) << 529 mvc __LC_RETURN_MCCK_PSW(16,%r13), << 530 tm __LC_RETURN_MCCK_PSW+1(%r13),0 << 531 jno 0f << 532 BPON << 533 stpt __LC_EXIT_TIMER(%r13) << 534 0: ALTERNATIVE "brcl 0,0", __stringify(la << 535 ALT_FACILITY(193) << 536 LBEAR 0(%r12) << 537 lmg %r11,%r15,__PT_R11(%r11) << 538 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETU << 539 << 540 .Lmcck_panic: << 541 /* << 542 * Iterate over all possible CPU addre << 543 * and stop each CPU using signal proc << 544 * to allow just one CPU-stopper and p << 545 * stopping each other while leaving t << 546 */ << 547 lhi %r5,0 << 548 lhi %r6,1 << 549 larl %r7,stop_lock << 550 cs %r5,%r6,0(%r7) # sing << 551 jnz 4f << 552 larl %r7,this_cpu << 553 stap 0(%r7) # this << 554 lh %r4,0(%r7) << 555 nilh %r4,0 << 556 lhi %r0,1 << 557 sll %r0,16 # CPU << 558 lhi %r3,0 # next << 559 0: cr %r3,%r4 << 560 je 2f << 561 1: sigp %r1,%r3,SIGP_STOP # stop << 562 brc SIGP_CC_BUSY,1b << 563 2: ahi %r3,1 << 564 brct %r0,0b << 565 3: sigp %r1,%r4,SIGP_STOP # stop << 566 brc SIGP_CC_BUSY,3b << 567 4: j 4b << 568 SYM_CODE_END(mcck_int_handler) << 569 << 570 SYM_CODE_START(restart_int_handler) << 571 ALTERNATIVE "nop", "lpp _LPP_OFFSET", << 572 stg %r15,__LC_SAVE_AREA_RESTART << 573 TSTMSK __LC_RESTART_FLAGS,RESTART_FLA << 574 jz 0f << 575 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA << 576 0: larl %r15,daton_psw << 577 lpswe 0(%r15) << 578 .Ldaton: << 579 GET_LC %r15 << 580 lg %r15,__LC_RESTART_STACK(%r15) << 581 xc STACK_FRAME_OVERHEAD(__PT_SIZE << 582 stmg %r0,%r14,STACK_FRAME_OVERHEAD+ << 583 GET_LC %r13 << 584 mvc STACK_FRAME_OVERHEAD+__PT_R15( << 585 mvc STACK_FRAME_OVERHEAD+__PT_PSW( << 586 xc 0(STACK_FRAME_OVERHEAD,%r15),0 << 587 lg %r1,__LC_RESTART_FN(%r13) << 588 lg %r2,__LC_RESTART_DATA(%r13) << 589 lgf %r3,__LC_RESTART_SOURCE(%r13) << 590 ltgr %r3,%r3 << 591 jm 1f << 592 0: sigp %r4,%r3,SIGP_SENSE << 593 brc 10,0b << 594 1: basr %r14,%r1 << 595 stap __SF_EMPTY(%r15) << 596 llgh %r3,__SF_EMPTY(%r15) << 597 2: sigp %r4,%r3,SIGP_STOP << 598 brc 2,2b << 599 3: j 3b << 600 SYM_CODE_END(restart_int_handler) << 601 << 602 __INIT << 603 SYM_CODE_START(early_pgm_check_handler) << 604 STMG_LC %r8,%r15,__LC_SAVE_AREA << 605 GET_LC %r13 << 606 aghi %r15,-(STACK_FRAME_OVERHEAD+__ << 607 la %r11,STACK_FRAME_OVERHEAD(%r15 << 608 xc __SF_BACKCHAIN(8,%r15),__SF_BA << 609 stmg %r0,%r7,__PT_R0(%r11) << 610 mvc __PT_PSW(16,%r11),__LC_PGM_OLD << 611 mvc __PT_R8(64,%r11),__LC_SAVE_ARE << 612 lgr %r2,%r11 << 613 brasl %r14,__do_early_pgm_check << 614 mvc __LC_RETURN_PSW(16,%r13),STACK << 615 lmg %r0,%r15,STACK_FRAME_OVERHEAD+ << 616 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LP << 617 SYM_CODE_END(early_pgm_check_handler) << 618 __FINIT << 619 << 620 .section .kprobes.text, "ax" << 621 << 622 #if defined(CONFIG_CHECK_STACK) || defined(CON << 623 /* << 624 * The synchronous or the asynchronous stack o << 625 * No need to properly save the registers, we << 626 * Setup a pt_regs so that show_trace can prov << 627 */ << 628 SYM_CODE_START(stack_overflow) << 629 GET_LC %r15 << 630 lg %r15,__LC_NODAT_STACK(%r15) # << 631 la %r11,STACK_FRAME_OVERHEAD(%r15 << 632 stmg %r0,%r7,__PT_R0(%r11) << 633 stmg %r8,%r9,__PT_PSW(%r11) << 634 mvc __PT_R8(64,%r11),0(%r14) << 635 stg %r10,__PT_ORIG_GPR2(%r11) # st << 636 xc __SF_BACKCHAIN(8,%r15),__SF_BA << 637 lgr %r2,%r11 # pass << 638 jg kernel_stack_overflow << 639 SYM_CODE_END(stack_overflow) << 640 #endif 397 #endif 641 !! 398 #if defined(CONFIG_M68060) 642 .section .data, "aw" !! 399 #if !defined(CPU_M68060_ONLY) 643 .balign 4 !! 400 btst #3,m68k_cputype+3 644 SYM_DATA_LOCAL(stop_lock, .long 0) !! 401 beqs 1f 645 SYM_DATA_LOCAL(this_cpu, .short 0) !! 402 #endif 646 .balign 8 !! 403 /* The 060 FPU keeps status in bits 15-8 of the first longword */ 647 SYM_DATA_START_LOCAL(daton_psw) !! 404 tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2) 648 .quad PSW_KERNEL_BITS !! 405 jeq 3f 649 .quad .Ldaton !! 406 #if !defined(CPU_M68060_ONLY) 650 SYM_DATA_END(daton_psw) !! 407 jra 2f 651 << 652 .section .rodata, "a" << 653 .balign 8 << 654 #define SYSCALL(esame,emu) .quad __s390x_ << 655 SYM_DATA_START(sys_call_table) << 656 #include "asm/syscall_table.h" << 657 SYM_DATA_END(sys_call_table) << 658 #undef SYSCALL << 659 << 660 #ifdef CONFIG_COMPAT << 661 << 662 #define SYSCALL(esame,emu) .quad __s390_ << 663 SYM_DATA_START(sys_call_table_emu) << 664 #include "asm/syscall_table.h" << 665 SYM_DATA_END(sys_call_table_emu) << 666 #undef SYSCALL << 667 #endif 408 #endif >> 409 #endif /* CONFIG_M68060 */ >> 410 #if !defined(CPU_M68060_ONLY) >> 411 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE) >> 412 jeq 3f >> 413 #endif >> 414 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7 >> 415 fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar >> 416 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE) >> 417 4: >> 418 #endif /* CONFIG_M68KFPU_EMU_ONLY */ >> 419 >> 420 /* restore the kernel stack pointer */ >> 421 movel %a1@(TASK_THREAD+THREAD_KSP),%sp >> 422 >> 423 /* restore non-scratch registers */ >> 424 RESTORE_SWITCH_STACK >> 425 >> 426 /* restore user stack pointer */ >> 427 movel %a1@(TASK_THREAD+THREAD_USP),%a0 >> 428 movel %a0,%usp >> 429 >> 430 /* restore fs (sfc,%dfc) */ >> 431 movew %a1@(TASK_THREAD+THREAD_FC),%a0 >> 432 movec %a0,%sfc >> 433 movec %a0,%dfc >> 434 >> 435 /* restore status register */ >> 436 movew %a1@(TASK_THREAD+THREAD_SR),%d0 >> 437 oriw #0x0700,%d0 >> 438 movew %d0,%sr >> 439 >> 440 rts >> 441 >> 442 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.