~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/kernel/entry.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/s390/kernel/entry.S (Version linux-6.12-rc7) and /arch/m68k/kernel/entry.S (Version linux-4.13.16)


  1 /* SPDX-License-Identifier: GPL-2.0 */         !!   1 /* -*- mode: asm -*-
                                                   >>   2  *
                                                   >>   3  *  linux/arch/m68k/kernel/entry.S
                                                   >>   4  *
                                                   >>   5  *  Copyright (C) 1991, 1992  Linus Torvalds
                                                   >>   6  *
                                                   >>   7  * This file is subject to the terms and conditions of the GNU General Public
                                                   >>   8  * License.  See the file README.legal in the main directory of this archive
                                                   >>   9  * for more details.
                                                   >>  10  *
                                                   >>  11  * Linux/m68k support by Hamish Macdonald
                                                   >>  12  *
                                                   >>  13  * 68060 fixes by Jesper Skov
                                                   >>  14  *
                                                   >>  15  */
                                                   >>  16 
  2 /*                                                 17 /*
  3  *    S390 low-level entry points.             !!  18  * entry.S  contains the system-call and fault low-level handling routines.
                                                   >>  19  * This also contains the timer-interrupt handler, as well as all interrupts
                                                   >>  20  * and faults that can result in a task-switch.
                                                   >>  21  *
                                                   >>  22  * NOTE: This code handles signal-recognition, which happens every time
                                                   >>  23  * after a timer-interrupt and after each system call.
  4  *                                                 24  *
  5  *    Copyright IBM Corp. 1999, 2012           << 
  6  *    Author(s): Martin Schwidefsky (schwidefs << 
  7  *               Hartmut Penner (hp@de.ibm.com << 
  8  *               Denis Joseph Barrow (djbarrow << 
  9  */                                                25  */
 10                                                    26 
 11 #include <linux/export.h>                      !!  27 /*
 12 #include <linux/init.h>                        !!  28  * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
                                                   >>  29  *               all pointers that used to be 'current' are now entry
                                                   >>  30  *               number 0 in the 'current_set' list.
                                                   >>  31  *
                                                   >>  32  *  6/05/00 RZ:  addedd writeback completion after return from sighandler
                                                   >>  33  *               for 68040
                                                   >>  34  */
                                                   >>  35 
 13 #include <linux/linkage.h>                         36 #include <linux/linkage.h>
 14 #include <asm/asm-extable.h>                   << 
 15 #include <asm/alternative.h>                   << 
 16 #include <asm/processor.h>                     << 
 17 #include <asm/cache.h>                         << 
 18 #include <asm/dwarf.h>                         << 
 19 #include <asm/errno.h>                             37 #include <asm/errno.h>
 20 #include <asm/ptrace.h>                        << 
 21 #include <asm/thread_info.h>                   << 
 22 #include <asm/asm-offsets.h>                   << 
 23 #include <asm/unistd.h>                        << 
 24 #include <asm/page.h>                          << 
 25 #include <asm/sigp.h>                          << 
 26 #include <asm/irq.h>                           << 
 27 #include <asm/fpu-insn.h>                      << 
 28 #include <asm/setup.h>                             38 #include <asm/setup.h>
 29 #include <asm/nmi.h>                           !!  39 #include <asm/segment.h>
 30 #include <asm/nospec-insn.h>                   !!  40 #include <asm/traps.h>
 31 #include <asm/lowcore.h>                       !!  41 #include <asm/unistd.h>
 32                                                !!  42 #include <asm/asm-offsets.h>
 33 _LPP_OFFSET     = __LC_LPP                     !!  43 #include <asm/entry.h>
 34                                                !!  44 
 35         .macro STBEAR address                  !!  45 .globl system_call, buserr, trap, resume
 36         ALTERNATIVE "nop", ".insn s,0xb2010000 !!  46 .globl sys_call_table
 37         .endm                                  !!  47 .globl __sys_fork, __sys_clone, __sys_vfork
 38                                                !!  48 .globl bad_interrupt
 39         .macro LBEAR address                   !!  49 .globl auto_irqhandler_fixup
 40         ALTERNATIVE "nop", ".insn s,0xb2000000 !!  50 .globl user_irqvec_fixup
 41         .endm                                  !!  51 
 42                                                !!  52 .text
 43         .macro LPSWEY address, lpswe           !!  53 ENTRY(__sys_fork)
 44         ALTERNATIVE_2 "b \lpswe;nopr", \       !!  54         SAVE_SWITCH_STACK
 45                 ".insn siy,0xeb0000000071,\add !!  55         jbsr    sys_fork
 46                 __stringify(.insn siy,0xeb0000 !!  56         lea     %sp@(24),%sp
 47                 ALT_LOWCORE                    !!  57         rts
 48         .endm                                  !!  58 
 49                                                !!  59 ENTRY(__sys_clone)
 50         .macro MBEAR reg, lowcore              !!  60         SAVE_SWITCH_STACK
 51         ALTERNATIVE "brcl 0,0", __stringify(mv !!  61         pea     %sp@(SWITCH_STACK_SIZE)
 52                 ALT_FACILITY(193)              !!  62         jbsr    m68k_clone
 53         .endm                                  !!  63         lea     %sp@(28),%sp
 54                                                !!  64         rts
 55         .macro  CHECK_STACK savearea, lowcore  !!  65 
 56 #ifdef CONFIG_CHECK_STACK                      !!  66 ENTRY(__sys_vfork)
 57         tml     %r15,THREAD_SIZE - CONFIG_STAC !!  67         SAVE_SWITCH_STACK
 58         la      %r14,\savearea(\lowcore)       !!  68         jbsr    sys_vfork
 59         jz      stack_overflow                 !!  69         lea     %sp@(24),%sp
                                                   >>  70         rts
                                                   >>  71 
                                                   >>  72 ENTRY(sys_sigreturn)
                                                   >>  73         SAVE_SWITCH_STACK
                                                   >>  74         movel   %sp,%sp@-                 | switch_stack pointer
                                                   >>  75         pea     %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
                                                   >>  76         jbsr    do_sigreturn
                                                   >>  77         addql   #8,%sp
                                                   >>  78         RESTORE_SWITCH_STACK
                                                   >>  79         rts
                                                   >>  80 
                                                   >>  81 ENTRY(sys_rt_sigreturn)
                                                   >>  82         SAVE_SWITCH_STACK
                                                   >>  83         movel   %sp,%sp@-                 | switch_stack pointer
                                                   >>  84         pea     %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
                                                   >>  85         jbsr    do_rt_sigreturn
                                                   >>  86         addql   #8,%sp
                                                   >>  87         RESTORE_SWITCH_STACK
                                                   >>  88         rts
                                                   >>  89 
                                                   >>  90 ENTRY(buserr)
                                                   >>  91         SAVE_ALL_INT
                                                   >>  92         GET_CURRENT(%d0)
                                                   >>  93         movel   %sp,%sp@-               | stack frame pointer argument
                                                   >>  94         jbsr    buserr_c
                                                   >>  95         addql   #4,%sp
                                                   >>  96         jra     ret_from_exception
                                                   >>  97 
                                                   >>  98 ENTRY(trap)
                                                   >>  99         SAVE_ALL_INT
                                                   >> 100         GET_CURRENT(%d0)
                                                   >> 101         movel   %sp,%sp@-               | stack frame pointer argument
                                                   >> 102         jbsr    trap_c
                                                   >> 103         addql   #4,%sp
                                                   >> 104         jra     ret_from_exception
                                                   >> 105 
                                                   >> 106         | After a fork we jump here directly from resume,
                                                   >> 107         | so that %d1 contains the previous task
                                                   >> 108         | schedule_tail now used regardless of CONFIG_SMP
                                                   >> 109 ENTRY(ret_from_fork)
                                                   >> 110         movel   %d1,%sp@-
                                                   >> 111         jsr     schedule_tail
                                                   >> 112         addql   #4,%sp
                                                   >> 113         jra     ret_from_exception
                                                   >> 114 
                                                   >> 115 ENTRY(ret_from_kernel_thread)
                                                   >> 116         | a3 contains the kernel thread payload, d7 - its argument
                                                   >> 117         movel   %d1,%sp@-
                                                   >> 118         jsr     schedule_tail
                                                   >> 119         movel   %d7,(%sp)
                                                   >> 120         jsr     %a3@
                                                   >> 121         addql   #4,%sp
                                                   >> 122         jra     ret_from_exception
                                                   >> 123 
                                                   >> 124 #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
                                                   >> 125 
                                                   >> 126 #ifdef TRAP_DBG_INTERRUPT
                                                   >> 127 
                                                   >> 128 .globl dbginterrupt
                                                   >> 129 ENTRY(dbginterrupt)
                                                   >> 130         SAVE_ALL_INT
                                                   >> 131         GET_CURRENT(%d0)
                                                   >> 132         movel   %sp,%sp@-               /* stack frame pointer argument */
                                                   >> 133         jsr     dbginterrupt_c
                                                   >> 134         addql   #4,%sp
                                                   >> 135         jra     ret_from_exception
 60 #endif                                            136 #endif
 61         .endm                                  << 
 62                                                   137 
 63         .macro  CHECK_VMAP_STACK savearea, low !! 138 ENTRY(reschedule)
 64 #ifdef CONFIG_VMAP_STACK                       !! 139         /* save top of frame */
 65         lgr     %r14,%r15                      !! 140         pea     %sp@
 66         nill    %r14,0x10000 - THREAD_SIZE     !! 141         jbsr    set_esp0
 67         oill    %r14,STACK_INIT_OFFSET         !! 142         addql   #4,%sp
 68         clg     %r14,__LC_KERNEL_STACK(\lowcor !! 143         pea     ret_from_exception
 69         je      \oklabel                       !! 144         jmp     schedule
 70         clg     %r14,__LC_ASYNC_STACK(\lowcore !! 145 
 71         je      \oklabel                       !! 146 ENTRY(ret_from_user_signal)
 72         clg     %r14,__LC_MCCK_STACK(\lowcore) !! 147         moveq #__NR_sigreturn,%d0
 73         je      \oklabel                       !! 148         trap #0
 74         clg     %r14,__LC_NODAT_STACK(\lowcore !! 149 
 75         je      \oklabel                       !! 150 ENTRY(ret_from_user_rt_signal)
 76         clg     %r14,__LC_RESTART_STACK(\lowco !! 151         movel #__NR_rt_sigreturn,%d0
 77         je      \oklabel                       !! 152         trap #0
 78         la      %r14,\savearea(\lowcore)       !! 153 
 79         j       stack_overflow                 << 
 80 #else                                             154 #else
 81         j       \oklabel                       << 
 82 #endif                                         << 
 83         .endm                                  << 
 84                                                   155 
 85         /*                                     !! 156 do_trace_entry:
 86          * The TSTMSK macro generates a test-u !! 157         movel   #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
 87          * calculating the memory offset for t !! 158         subql   #4,%sp
 88          * Mask value can be any constant.  Th !! 159         SAVE_SWITCH_STACK
 89          * value to calculate the memory offse !! 160         jbsr    syscall_trace
 90          * instruction.                        !! 161         RESTORE_SWITCH_STACK
 91          */                                    !! 162         addql   #4,%sp
 92         .macro TSTMSK addr, mask, size=8, byte !! 163         movel   %sp@(PT_OFF_ORIG_D0),%d0
 93                 .if (\bytepos < \size) && (\ma !! 164         cmpl    #NR_syscalls,%d0
 94                         .if (\mask & 0xff)     !! 165         jcs     syscall
 95                                 .error "Mask e !! 166 badsys:
 96                         .endif                 !! 167         movel   #-ENOSYS,%sp@(PT_OFF_D0)
 97                         TSTMSK \addr, "(\mask  !! 168         jra     ret_from_syscall
 98                         .exitm                 !! 169 
 99                 .endif                         !! 170 do_trace_exit:
100                 .ifeq \mask                    !! 171         subql   #4,%sp
101                         .error "Mask must not  !! 172         SAVE_SWITCH_STACK
102                 .endif                         !! 173         jbsr    syscall_trace
103                 off = \size - \bytepos - 1     !! 174         RESTORE_SWITCH_STACK
104                 tm      off+\addr, \mask       !! 175         addql   #4,%sp
105         .endm                                  !! 176         jra     .Lret_from_exception
106                                                !! 177 
107         .macro BPOFF                           !! 178 ENTRY(ret_from_signal)
108         ALTERNATIVE "nop", ".insn rrf,0xb2e800 !! 179         movel   %curptr@(TASK_STACK),%a1
109         .endm                                  !! 180         tstb    %a1@(TINFO_FLAGS+2)
110                                                !! 181         jge     1f
111         .macro BPON                            !! 182         jbsr    syscall_trace
112         ALTERNATIVE "nop", ".insn rrf,0xb2e800 !! 183 1:      RESTORE_SWITCH_STACK
113         .endm                                  !! 184         addql   #4,%sp
114                                                !! 185 /* on 68040 complete pending writebacks if any */
115         .macro BPENTER tif_ptr,tif_mask        !! 186 #ifdef CONFIG_M68040
116         ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask !! 187         bfextu  %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
117                     "j .+12; nop; nop", ALT_SP !! 188         subql   #7,%d0                          | bus error frame ?
118         .endm                                  !! 189         jbne    1f
119                                                !! 190         movel   %sp,%sp@-
120         .macro BPEXIT tif_ptr,tif_mask         !! 191         jbsr    berr_040cleanup
121         TSTMSK  \tif_ptr,\tif_mask             !! 192         addql   #4,%sp
122         ALTERNATIVE "jz .+8;  .insn rrf,0xb2e8 !! 193 1:
123                     "jnz .+8; .insn rrf,0xb2e8 << 
124         .endm                                  << 
125                                                << 
126 #if IS_ENABLED(CONFIG_KVM)                     << 
127         .macro SIEEXIT sie_control,lowcore     << 
128         lg      %r9,\sie_control               << 
129         ni      __SIE_PROG0C+3(%r9),0xfe       << 
130         lctlg   %c1,%c1,__LC_KERNEL_ASCE(\lowc << 
131         lg      %r9,__LC_CURRENT(\lowcore)     << 
132         mvi     __TI_sie(%r9),0                << 
133         larl    %r9,sie_exit                   << 
134         .endm                                  << 
135 #endif                                            194 #endif
                                                   >> 195         jra     .Lret_from_exception
136                                                   196 
137         .macro STACKLEAK_ERASE                 !! 197 ENTRY(system_call)
138 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK             !! 198         SAVE_ALL_SYS
139         brasl   %r14,stackleak_erase_on_task_s !! 199 
140 #endif                                         !! 200         GET_CURRENT(%d1)
141         .endm                                  !! 201         movel   %d1,%a1
142                                                   202 
143         GEN_BR_THUNK %r14                      !! 203         | save top of frame
                                                   >> 204         movel   %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
                                                   >> 205 
                                                   >> 206         | syscall trace?
                                                   >> 207         tstb    %a1@(TINFO_FLAGS+2)
                                                   >> 208         jmi     do_trace_entry
                                                   >> 209         cmpl    #NR_syscalls,%d0
                                                   >> 210         jcc     badsys
                                                   >> 211 syscall:
                                                   >> 212         jbsr    @(sys_call_table,%d0:l:4)@(0)
                                                   >> 213         movel   %d0,%sp@(PT_OFF_D0)     | save the return value
                                                   >> 214 ret_from_syscall:
                                                   >> 215         |oriw   #0x0700,%sr
                                                   >> 216         movel   %curptr@(TASK_STACK),%a1
                                                   >> 217         movew   %a1@(TINFO_FLAGS+2),%d0
                                                   >> 218         jne     syscall_exit_work
                                                   >> 219 1:      RESTORE_ALL
                                                   >> 220 
                                                   >> 221 syscall_exit_work:
                                                   >> 222         btst    #5,%sp@(PT_OFF_SR)      | check if returning to kernel
                                                   >> 223         bnes    1b                      | if so, skip resched, signals
                                                   >> 224         lslw    #1,%d0
                                                   >> 225         jcs     do_trace_exit
                                                   >> 226         jmi     do_delayed_trace
                                                   >> 227         lslw    #8,%d0
                                                   >> 228         jne     do_signal_return
                                                   >> 229         pea     resume_userspace
                                                   >> 230         jra     schedule
                                                   >> 231 
                                                   >> 232 
                                                   >> 233 ENTRY(ret_from_exception)
                                                   >> 234 .Lret_from_exception:
                                                   >> 235         btst    #5,%sp@(PT_OFF_SR)      | check if returning to kernel
                                                   >> 236         bnes    1f                      | if so, skip resched, signals
                                                   >> 237         | only allow interrupts when we are really the last one on the
                                                   >> 238         | kernel stack, otherwise stack overflow can occur during
                                                   >> 239         | heavy interrupt load
                                                   >> 240         andw    #ALLOWINT,%sr
                                                   >> 241 
                                                   >> 242 resume_userspace:
                                                   >> 243         movel   %curptr@(TASK_STACK),%a1
                                                   >> 244         moveb   %a1@(TINFO_FLAGS+3),%d0
                                                   >> 245         jne     exit_work
                                                   >> 246 1:      RESTORE_ALL
                                                   >> 247 
                                                   >> 248 exit_work:
                                                   >> 249         | save top of frame
                                                   >> 250         movel   %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
                                                   >> 251         lslb    #1,%d0
                                                   >> 252         jne     do_signal_return
                                                   >> 253         pea     resume_userspace
                                                   >> 254         jra     schedule
                                                   >> 255 
                                                   >> 256 
                                                   >> 257 do_signal_return:
                                                   >> 258         |andw   #ALLOWINT,%sr
                                                   >> 259         subql   #4,%sp                  | dummy return address
                                                   >> 260         SAVE_SWITCH_STACK
                                                   >> 261         pea     %sp@(SWITCH_STACK_SIZE)
                                                   >> 262         bsrl    do_notify_resume
                                                   >> 263         addql   #4,%sp
                                                   >> 264         RESTORE_SWITCH_STACK
                                                   >> 265         addql   #4,%sp
                                                   >> 266         jbra    resume_userspace
                                                   >> 267 
                                                   >> 268 do_delayed_trace:
                                                   >> 269         bclr    #7,%sp@(PT_OFF_SR)      | clear trace bit in SR
                                                   >> 270         pea     1                       | send SIGTRAP
                                                   >> 271         movel   %curptr,%sp@-
                                                   >> 272         pea     LSIGTRAP
                                                   >> 273         jbsr    send_sig
                                                   >> 274         addql   #8,%sp
                                                   >> 275         addql   #4,%sp
                                                   >> 276         jbra    resume_userspace
                                                   >> 277 
                                                   >> 278 
                                                   >> 279 /* This is the main interrupt handler for autovector interrupts */
                                                   >> 280 
                                                   >> 281 ENTRY(auto_inthandler)
                                                   >> 282         SAVE_ALL_INT
                                                   >> 283         GET_CURRENT(%d0)
                                                   >> 284                                         |  put exception # in d0
                                                   >> 285         bfextu  %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
                                                   >> 286         subw    #VEC_SPUR,%d0
                                                   >> 287 
                                                   >> 288         movel   %sp,%sp@-
                                                   >> 289         movel   %d0,%sp@-               |  put vector # on stack
                                                   >> 290 auto_irqhandler_fixup = . + 2
                                                   >> 291         jsr     do_IRQ                  |  process the IRQ
                                                   >> 292         addql   #8,%sp                  |  pop parameters off stack
                                                   >> 293         jra     ret_from_exception
                                                   >> 294 
                                                   >> 295 /* Handler for user defined interrupt vectors */
                                                   >> 296 
                                                   >> 297 ENTRY(user_inthandler)
                                                   >> 298         SAVE_ALL_INT
                                                   >> 299         GET_CURRENT(%d0)
                                                   >> 300                                         |  put exception # in d0
                                                   >> 301         bfextu  %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
                                                   >> 302 user_irqvec_fixup = . + 2
                                                   >> 303         subw    #VEC_USER,%d0
                                                   >> 304 
                                                   >> 305         movel   %sp,%sp@-
                                                   >> 306         movel   %d0,%sp@-               |  put vector # on stack
                                                   >> 307         jsr     do_IRQ                  |  process the IRQ
                                                   >> 308         addql   #8,%sp                  |  pop parameters off stack
                                                   >> 309         jra     ret_from_exception
                                                   >> 310 
                                                   >> 311 /* Handler for uninitialized and spurious interrupts */
                                                   >> 312 
                                                   >> 313 ENTRY(bad_inthandler)
                                                   >> 314         SAVE_ALL_INT
                                                   >> 315         GET_CURRENT(%d0)
                                                   >> 316 
                                                   >> 317         movel   %sp,%sp@-
                                                   >> 318         jsr     handle_badint
                                                   >> 319         addql   #4,%sp
                                                   >> 320         jra     ret_from_exception
144                                                   321 
145         .section .kprobes.text, "ax"           !! 322 resume:
146 .Ldummy:                                       << 
147         /*                                        323         /*
148          * The following nop exists only in or !! 324          * Beware - when entering resume, prev (the current task) is
149          * symbol starts at the beginning of t !! 325          * in a0, next (the new task) is in a1,so don't change these
150          * In that case there would be several !! 326          * registers until their contents are no longer needed.
151          * E.g. objdump would take an arbitrar << 
152          * the code.                           << 
153          * With the added nop in between this  << 
154          */                                       327          */
155         nop     0                              << 
156                                                   328 
157 /*                                             !! 329         /* save sr */
158  * Scheduler resume function, called by __swit !! 330         movew   %sr,%a0@(TASK_THREAD+THREAD_SR)
159  *  gpr2 = (task_struct *)prev                 << 
160  *  gpr3 = (task_struct *)next                 << 
161  * Returns:                                    << 
162  *  gpr2 = prev                                << 
163  */                                            << 
164 SYM_FUNC_START(__switch_to_asm)                << 
165         stmg    %r6,%r15,__SF_GPRS(%r15)       << 
166         lghi    %r4,__TASK_stack               << 
167         lghi    %r1,__TASK_thread              << 
168         llill   %r5,STACK_INIT_OFFSET          << 
169         stg     %r15,__THREAD_ksp(%r1,%r2)     << 
170         lg      %r15,0(%r4,%r3)                << 
171         agr     %r15,%r5                       << 
172         GET_LC  %r13                           << 
173         stg     %r3,__LC_CURRENT(%r13)         << 
174         stg     %r15,__LC_KERNEL_STACK(%r13)   << 
175         lg      %r15,__THREAD_ksp(%r1,%r3)     << 
176         aghi    %r3,__TASK_pid                 << 
177         mvc     __LC_CURRENT_PID(4,%r13),0(%r3 << 
178         ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r << 
179         lmg     %r6,%r15,__SF_GPRS(%r15)       << 
180         BR_EX   %r14                           << 
181 SYM_FUNC_END(__switch_to_asm)                  << 
182                                                   331 
183 #if IS_ENABLED(CONFIG_KVM)                     !! 332         /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
184 /*                                             !! 333         movec   %sfc,%d0
185  * __sie64a calling convention:                !! 334         movew   %d0,%a0@(TASK_THREAD+THREAD_FS)
186  * %r2 pointer to sie control block phys       !! 335 
187  * %r3 pointer to sie control block virt       !! 336         /* save usp */
188  * %r4 guest register save area                !! 337         /* it is better to use a movel here instead of a movew 8*) */
189  * %r5 guest asce                              !! 338         movec   %usp,%d0
190  */                                            !! 339         movel   %d0,%a0@(TASK_THREAD+THREAD_USP)
191 SYM_FUNC_START(__sie64a)                       !! 340 
192         stmg    %r6,%r14,__SF_GPRS(%r15)       !! 341         /* save non-scratch registers on stack */
193         GET_LC  %r13                           !! 342         SAVE_SWITCH_STACK
194         lg      %r14,__LC_CURRENT(%r13)        !! 343 
195         stg     %r2,__SF_SIE_CONTROL_PHYS(%r15 !! 344         /* save current kernel stack pointer */
196         stg     %r3,__SF_SIE_CONTROL(%r15)     !! 345         movel   %sp,%a0@(TASK_THREAD+THREAD_KSP)
197         stg     %r4,__SF_SIE_SAVEAREA(%r15)    !! 346 
198         stg     %r5,__SF_SIE_GUEST_ASCE(%r15)  !! 347         /* save floating point context */
199         xc      __SF_SIE_REASON(8,%r15),__SF_S !! 348 #ifndef CONFIG_M68KFPU_EMU_ONLY
200         mvc     __SF_SIE_FLAGS(8,%r15),__TI_fl !! 349 #ifdef CONFIG_M68KFPU_EMU
201         lmg     %r0,%r13,0(%r4)                !! 350         tstl    m68k_fputype
202         mvi     __TI_sie(%r14),1               !! 351         jeq     3f
203         lctlg   %c1,%c1,__SF_SIE_GUEST_ASCE(%r << 
204         lg      %r14,__SF_SIE_CONTROL(%r15)    << 
205         oi      __SIE_PROG0C+3(%r14),1         << 
206         tm      __SIE_PROG20+3(%r14),3         << 
207         jnz     .Lsie_skip                     << 
208         lg      %r14,__SF_SIE_CONTROL_PHYS(%r1 << 
209         BPEXIT  __SF_SIE_FLAGS(%r15),_TIF_ISOL << 
210 .Lsie_entry:                                   << 
211         sie     0(%r14)                        << 
212 # Let the next instruction be NOP to avoid tri << 
213 # and handling it in a guest as result of the  << 
214         nopr    7                              << 
215 .Lsie_leave:                                   << 
216         BPOFF                                  << 
217         BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOL << 
218 .Lsie_skip:                                    << 
219         lg      %r14,__SF_SIE_CONTROL(%r15)    << 
220         ni      __SIE_PROG0C+3(%r14),0xfe      << 
221         GET_LC  %r14                           << 
222         lctlg   %c1,%c1,__LC_KERNEL_ASCE(%r14) << 
223         lg      %r14,__LC_CURRENT(%r14)        << 
224         mvi     __TI_sie(%r14),0               << 
225 # some program checks are suppressing. C code  << 
226 # will rewind the PSW by the ILC, which is oft << 
227 # are some corner cases (e.g. runtime instrume << 
228 # Other instructions between __sie64a and .Lsi << 
229 # interrupts. So lets use 3 nops as a landing  << 
230 .Lrewind_pad6:                                 << 
231         nopr    7                              << 
232 .Lrewind_pad4:                                 << 
233         nopr    7                              << 
234 .Lrewind_pad2:                                 << 
235         nopr    7                              << 
236 SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL)        << 
237         lg      %r14,__SF_SIE_SAVEAREA(%r15)   << 
238         stmg    %r0,%r13,0(%r14)               << 
239         xgr     %r0,%r0                        << 
240         xgr     %r1,%r1                        << 
241         xgr     %r3,%r3                        << 
242         xgr     %r4,%r4                        << 
243         xgr     %r5,%r5                        << 
244         lmg     %r6,%r14,__SF_GPRS(%r15)       << 
245         lg      %r2,__SF_SIE_REASON(%r15)      << 
246         BR_EX   %r14                           << 
247 .Lsie_fault:                                   << 
248         lghi    %r14,-EFAULT                   << 
249         stg     %r14,__SF_SIE_REASON(%r15)     << 
250         j       sie_exit                       << 
251                                                << 
252         EX_TABLE(.Lrewind_pad6,.Lsie_fault)    << 
253         EX_TABLE(.Lrewind_pad4,.Lsie_fault)    << 
254         EX_TABLE(.Lrewind_pad2,.Lsie_fault)    << 
255         EX_TABLE(sie_exit,.Lsie_fault)         << 
256 SYM_FUNC_END(__sie64a)                         << 
257 EXPORT_SYMBOL(__sie64a)                        << 
258 EXPORT_SYMBOL(sie_exit)                        << 
259 #endif                                            352 #endif
                                                   >> 353         fsave   %a0@(TASK_THREAD+THREAD_FPSTATE)
260                                                   354 
261 /*                                             !! 355 #if defined(CONFIG_M68060)
262  * SVC interrupt handler routine. System calls !! 356 #if !defined(CPU_M68060_ONLY)
263  * are entered with interrupts disabled.       !! 357         btst    #3,m68k_cputype+3
264  */                                            !! 358         beqs    1f
265                                                << 
266 SYM_CODE_START(system_call)                    << 
267         STMG_LC %r8,%r15,__LC_SAVE_AREA        << 
268         GET_LC  %r13                           << 
269         stpt    __LC_SYS_ENTER_TIMER(%r13)     << 
270         BPOFF                                  << 
271         lghi    %r14,0                         << 
272 .Lsysc_per:                                    << 
273         STBEAR  __LC_LAST_BREAK(%r13)          << 
274         lctlg   %c1,%c1,__LC_KERNEL_ASCE(%r13) << 
275         lg      %r15,__LC_KERNEL_STACK(%r13)   << 
276         xc      __SF_BACKCHAIN(8,%r15),__SF_BA << 
277         stmg    %r0,%r7,STACK_FRAME_OVERHEAD+_ << 
278         # clear user controlled register to pr << 
279         xgr     %r0,%r0                        << 
280         xgr     %r1,%r1                        << 
281         xgr     %r4,%r4                        << 
282         xgr     %r5,%r5                        << 
283         xgr     %r6,%r6                        << 
284         xgr     %r7,%r7                        << 
285         xgr     %r8,%r8                        << 
286         xgr     %r9,%r9                        << 
287         xgr     %r10,%r10                      << 
288         xgr     %r11,%r11                      << 
289         la      %r2,STACK_FRAME_OVERHEAD(%r15) << 
290         mvc     __PT_R8(64,%r2),__LC_SAVE_AREA << 
291         MBEAR   %r2,%r13                       << 
292         lgr     %r3,%r14                       << 
293         brasl   %r14,__do_syscall              << 
294         STACKLEAK_ERASE                        << 
295         lctlg   %c1,%c1,__LC_USER_ASCE(%r13)   << 
296         mvc     __LC_RETURN_PSW(16,%r13),STACK << 
297         BPON                                   << 
298         LBEAR   STACK_FRAME_OVERHEAD+__PT_LAST << 
299         stpt    __LC_EXIT_TIMER(%r13)          << 
300         lmg     %r0,%r15,STACK_FRAME_OVERHEAD+ << 
301         LPSWEY  __LC_RETURN_PSW,__LC_RETURN_LP << 
302 SYM_CODE_END(system_call)                      << 
303                                                << 
304 #                                              << 
305 # a new process exits the kernel with ret_from << 
306 #                                              << 
307 SYM_CODE_START(ret_from_fork)                  << 
308         lgr     %r3,%r11                       << 
309         brasl   %r14,__ret_from_fork           << 
310         STACKLEAK_ERASE                        << 
311         GET_LC  %r13                           << 
312         lctlg   %c1,%c1,__LC_USER_ASCE(%r13)   << 
313         mvc     __LC_RETURN_PSW(16,%r13),STACK << 
314         BPON                                   << 
315         LBEAR   STACK_FRAME_OVERHEAD+__PT_LAST << 
316         stpt    __LC_EXIT_TIMER(%r13)          << 
317         lmg     %r0,%r15,STACK_FRAME_OVERHEAD+ << 
318         LPSWEY  __LC_RETURN_PSW,__LC_RETURN_LP << 
319 SYM_CODE_END(ret_from_fork)                    << 
320                                                << 
321 /*                                             << 
322  * Program check handler routine               << 
323  */                                            << 
324                                                << 
325 SYM_CODE_START(pgm_check_handler)              << 
326         STMG_LC %r8,%r15,__LC_SAVE_AREA        << 
327         GET_LC  %r13                           << 
328         stpt    __LC_SYS_ENTER_TIMER(%r13)     << 
329         BPOFF                                  << 
330         lgr     %r10,%r15                      << 
331         lmg     %r8,%r9,__LC_PGM_OLD_PSW(%r13) << 
332         tmhh    %r8,0x0001              # comi << 
333         jno     .Lpgm_skip_asce                << 
334         lctlg   %c1,%c1,__LC_KERNEL_ASCE(%r13) << 
335         j       3f                      # -> f << 
336 .Lpgm_skip_asce:                               << 
337 1:      tmhh    %r8,0x4000              # PER  << 
338         jnz     2f                      # -> e << 
339         tm      __LC_PGM_ILC+3(%r13),0x80      << 
340         jnz     .Lpgm_svcper            # -> s << 
341 2:      CHECK_STACK __LC_SAVE_AREA,%r13        << 
342         aghi    %r15,-(STACK_FRAME_OVERHEAD +  << 
343         # CHECK_VMAP_STACK branches to stack_o << 
344         CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4 << 
345 3:      lg      %r15,__LC_KERNEL_STACK(%r13)   << 
346 4:      la      %r11,STACK_FRAME_OVERHEAD(%r15 << 
347         xc      __PT_FLAGS(8,%r11),__PT_FLAGS( << 
348         xc      __SF_BACKCHAIN(8,%r15),__SF_BA << 
349         stmg    %r0,%r7,__PT_R0(%r11)          << 
350         mvc     __PT_R8(64,%r11),__LC_SAVE_ARE << 
351         mvc     __PT_LAST_BREAK(8,%r11),__LC_P << 
352         stctg   %c1,%c1,__PT_CR1(%r11)         << 
353 #if IS_ENABLED(CONFIG_KVM)                     << 
354         ltg     %r12,__LC_GMAP(%r13)           << 
355         jz      5f                             << 
356         clc     __GMAP_ASCE(8,%r12), __PT_CR1( << 
357         jne     5f                             << 
358         BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOL << 
359         SIEEXIT __SF_SIE_CONTROL(%r10),%r13    << 
360 #endif                                            359 #endif
361 5:      stmg    %r8,%r9,__PT_PSW(%r11)         !! 360         /* The 060 FPU keeps status in bits 15-8 of the first longword */
362         # clear user controlled registers to p !! 361         tstb    %a0@(TASK_THREAD+THREAD_FPSTATE+2)
363         xgr     %r0,%r0                        !! 362         jeq     3f
364         xgr     %r1,%r1                        !! 363 #if !defined(CPU_M68060_ONLY)
365         xgr     %r3,%r3                        !! 364         jra     2f
366         xgr     %r4,%r4                        !! 365 #endif
367         xgr     %r5,%r5                        !! 366 #endif /* CONFIG_M68060 */
368         xgr     %r6,%r6                        !! 367 #if !defined(CPU_M68060_ONLY)
369         xgr     %r7,%r7                        !! 368 1:      tstb    %a0@(TASK_THREAD+THREAD_FPSTATE)
370         lgr     %r2,%r11                       !! 369         jeq     3f
371         brasl   %r14,__do_pgm_check            !! 370 #endif
372         tmhh    %r8,0x0001              # retu !! 371 2:      fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
373         jno     .Lpgm_exit_kernel              !! 372         fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
374         STACKLEAK_ERASE                        !! 373 3:
375         lctlg   %c1,%c1,__LC_USER_ASCE(%r13)   !! 374 #endif  /* CONFIG_M68KFPU_EMU_ONLY */
376         BPON                                   !! 375         /* Return previous task in %d1 */
377         stpt    __LC_EXIT_TIMER(%r13)          !! 376         movel   %curptr,%d1
378 .Lpgm_exit_kernel:                             !! 377 
379         mvc     __LC_RETURN_PSW(16,%r13),STACK !! 378         /* switch to new task (a1 contains new task) */
380         LBEAR   STACK_FRAME_OVERHEAD+__PT_LAST !! 379         movel   %a1,%curptr
381         lmg     %r0,%r15,STACK_FRAME_OVERHEAD+ !! 380 
382         LPSWEY  __LC_RETURN_PSW,__LC_RETURN_LP !! 381         /* restore floating point context */
383                                                !! 382 #ifndef CONFIG_M68KFPU_EMU_ONLY
384 #                                              !! 383 #ifdef CONFIG_M68KFPU_EMU
385 # single stepped system call                   !! 384         tstl    m68k_fputype
386 #                                              !! 385         jeq     4f
387 .Lpgm_svcper:                                  << 
388         mvc     __LC_RETURN_PSW(8,%r13),__LC_S << 
389         larl    %r14,.Lsysc_per                << 
390         stg     %r14,__LC_RETURN_PSW+8(%r13)   << 
391         lghi    %r14,1                         << 
392         LBEAR   __LC_PGM_LAST_BREAK(%r13)      << 
393         LPSWEY  __LC_RETURN_PSW,__LC_RETURN_LP << 
394 SYM_CODE_END(pgm_check_handler)                << 
395                                                << 
396 /*                                             << 
397  * Interrupt handler macro used for external a << 
398  */                                            << 
399 .macro INT_HANDLER name,lc_old_psw,handler     << 
400 SYM_CODE_START(\name)                          << 
401         STMG_LC %r8,%r15,__LC_SAVE_AREA        << 
402         GET_LC  %r13                           << 
403         stckf   __LC_INT_CLOCK(%r13)           << 
404         stpt    __LC_SYS_ENTER_TIMER(%r13)     << 
405         STBEAR  __LC_LAST_BREAK(%r13)          << 
406         BPOFF                                  << 
407         lmg     %r8,%r9,\lc_old_psw(%r13)      << 
408         tmhh    %r8,0x0001                     << 
409         jnz     1f                             << 
410 #if IS_ENABLED(CONFIG_KVM)                     << 
411         lg      %r10,__LC_CURRENT(%r13)        << 
412         tm      __TI_sie(%r10),0xff            << 
413         jz      0f                             << 
414         BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOL << 
415         SIEEXIT __SF_SIE_CONTROL(%r15),%r13    << 
416 #endif                                            386 #endif
417 0:      CHECK_STACK __LC_SAVE_AREA,%r13        !! 387 #if defined(CONFIG_M68060)
418         aghi    %r15,-(STACK_FRAME_OVERHEAD +  !! 388 #if !defined(CPU_M68060_ONLY)
419         j       2f                             !! 389         btst    #3,m68k_cputype+3
420 1:      lctlg   %c1,%c1,__LC_KERNEL_ASCE(%r13) !! 390         beqs    1f
421         lg      %r15,__LC_KERNEL_STACK(%r13)   !! 391 #endif
422 2:      xc      __SF_BACKCHAIN(8,%r15),__SF_BA !! 392         /* The 060 FPU keeps status in bits 15-8 of the first longword */
423         la      %r11,STACK_FRAME_OVERHEAD(%r15 !! 393         tstb    %a1@(TASK_THREAD+THREAD_FPSTATE+2)
424         stmg    %r0,%r7,__PT_R0(%r11)          !! 394         jeq     3f
425         # clear user controlled registers to p !! 395 #if !defined(CPU_M68060_ONLY)
426         xgr     %r0,%r0                        !! 396         jra     2f
427         xgr     %r1,%r1                        !! 397 #endif
428         xgr     %r3,%r3                        !! 398 #endif /* CONFIG_M68060 */
429         xgr     %r4,%r4                        !! 399 #if !defined(CPU_M68060_ONLY)
430         xgr     %r5,%r5                        !! 400 1:      tstb    %a1@(TASK_THREAD+THREAD_FPSTATE)
431         xgr     %r6,%r6                        !! 401         jeq     3f
432         xgr     %r7,%r7                        !! 402 #endif
433         xgr     %r10,%r10                      !! 403 2:      fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
434         xc      __PT_FLAGS(8,%r11),__PT_FLAGS( !! 404         fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
435         mvc     __PT_R8(64,%r11),__LC_SAVE_ARE !! 405 3:      frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
436         MBEAR   %r11,%r13                      !! 406 4:
437         stmg    %r8,%r9,__PT_PSW(%r11)         !! 407 #endif  /* CONFIG_M68KFPU_EMU_ONLY */
438         lgr     %r2,%r11                # pass << 
439         brasl   %r14,\handler                  << 
440         mvc     __LC_RETURN_PSW(16,%r13),__PT_ << 
441         tmhh    %r8,0x0001              # retu << 
442         jno     2f                             << 
443         STACKLEAK_ERASE                        << 
444         lctlg   %c1,%c1,__LC_USER_ASCE(%r13)   << 
445         BPON                                   << 
446         stpt    __LC_EXIT_TIMER(%r13)          << 
447 2:      LBEAR   __PT_LAST_BREAK(%r11)          << 
448         lmg     %r0,%r15,__PT_R0(%r11)         << 
449         LPSWEY  __LC_RETURN_PSW,__LC_RETURN_LP << 
450 SYM_CODE_END(\name)                            << 
451 .endm                                          << 
452                                                   408 
453 INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,d !! 409         /* restore the kernel stack pointer */
454 INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_ !! 410         movel   %a1@(TASK_THREAD+THREAD_KSP),%sp
455                                                   411 
456 /*                                             !! 412         /* restore non-scratch registers */
457  * Machine check handler routines              !! 413         RESTORE_SWITCH_STACK
458  */                                            << 
459 SYM_CODE_START(mcck_int_handler)               << 
460         BPOFF                                  << 
461         GET_LC  %r13                           << 
462         lmg     %r8,%r9,__LC_MCK_OLD_PSW(%r13) << 
463         TSTMSK  __LC_MCCK_CODE(%r13),MCCK_CODE << 
464         jo      .Lmcck_panic            # yes  << 
465         TSTMSK  __LC_MCCK_CODE(%r13),MCCK_CODE << 
466         jno     .Lmcck_panic            # cont << 
467         ptlb                                   << 
468         lay     %r14,__LC_CPU_TIMER_SAVE_AREA( << 
469         mvc     __LC_MCCK_ENTER_TIMER(8,%r13), << 
470         TSTMSK  __LC_MCCK_CODE(%r13),MCCK_CODE << 
471         jo      3f                             << 
472         la      %r14,__LC_SYS_ENTER_TIMER(%r13 << 
473         clc     0(8,%r14),__LC_EXIT_TIMER(%r13 << 
474         jl      1f                             << 
475         la      %r14,__LC_EXIT_TIMER(%r13)     << 
476 1:      clc     0(8,%r14),__LC_LAST_UPDATE_TIM << 
477         jl      2f                             << 
478         la      %r14,__LC_LAST_UPDATE_TIMER(%r << 
479 2:      spt     0(%r14)                        << 
480         mvc     __LC_MCCK_ENTER_TIMER(8,%r13), << 
481 3:      TSTMSK  __LC_MCCK_CODE(%r13),MCCK_CODE << 
482         jno     .Lmcck_panic                   << 
483         tmhh    %r8,0x0001              # inte << 
484         jnz     .Lmcck_user                    << 
485         TSTMSK  __LC_MCCK_CODE(%r13),MCCK_CODE << 
486         jno     .Lmcck_panic                   << 
487 #if IS_ENABLED(CONFIG_KVM)                     << 
488         lg      %r10,__LC_CURRENT(%r13)        << 
489         tm      __TI_sie(%r10),0xff            << 
490         jz      .Lmcck_user                    << 
491         # Need to compare the address instead  << 
492         # Otherwise there would be a race betw << 
493         # and entering SIE (or leaving and cle << 
494         # would cause machine checks targeted  << 
495         # handled by the host.                 << 
496         larl    %r14,.Lsie_entry               << 
497         clgrjl  %r9,%r14, 4f                   << 
498         larl    %r14,.Lsie_leave               << 
499         clgrjhe %r9,%r14, 4f                   << 
500         lg      %r10,__LC_PCPU                 << 
501         oi      __PCPU_FLAGS+7(%r10), _CIF_MCC << 
502 4:      BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOL << 
503         SIEEXIT __SF_SIE_CONTROL(%r15),%r13    << 
504 #endif                                         << 
505 .Lmcck_user:                                   << 
506         lg      %r15,__LC_MCCK_STACK(%r13)     << 
507         la      %r11,STACK_FRAME_OVERHEAD(%r15 << 
508         stctg   %c1,%c1,__PT_CR1(%r11)         << 
509         lctlg   %c1,%c1,__LC_KERNEL_ASCE(%r13) << 
510         xc      __SF_BACKCHAIN(8,%r15),__SF_BA << 
511         lay     %r14,__LC_GPREGS_SAVE_AREA(%r1 << 
512         mvc     __PT_R0(128,%r11),0(%r14)      << 
513         # clear user controlled registers to p << 
514         xgr     %r0,%r0                        << 
515         xgr     %r1,%r1                        << 
516         xgr     %r3,%r3                        << 
517         xgr     %r4,%r4                        << 
518         xgr     %r5,%r5                        << 
519         xgr     %r6,%r6                        << 
520         xgr     %r7,%r7                        << 
521         xgr     %r10,%r10                      << 
522         stmg    %r8,%r9,__PT_PSW(%r11)         << 
523         xc      __PT_FLAGS(8,%r11),__PT_FLAGS( << 
524         xc      __SF_BACKCHAIN(8,%r15),__SF_BA << 
525         lgr     %r2,%r11                # pass << 
526         brasl   %r14,s390_do_machine_check     << 
527         lctlg   %c1,%c1,__PT_CR1(%r11)         << 
528         lmg     %r0,%r10,__PT_R0(%r11)         << 
529         mvc     __LC_RETURN_MCCK_PSW(16,%r13), << 
530         tm      __LC_RETURN_MCCK_PSW+1(%r13),0 << 
531         jno     0f                             << 
532         BPON                                   << 
533         stpt    __LC_EXIT_TIMER(%r13)          << 
534 0:      ALTERNATIVE "brcl 0,0", __stringify(la << 
535                 ALT_FACILITY(193)              << 
536         LBEAR   0(%r12)                        << 
537         lmg     %r11,%r15,__PT_R11(%r11)       << 
538         LPSWEY  __LC_RETURN_MCCK_PSW,__LC_RETU << 
539                                                   414 
540 .Lmcck_panic:                                  !! 415         /* restore user stack pointer */
541         /*                                     !! 416         movel   %a1@(TASK_THREAD+THREAD_USP),%a0
542          * Iterate over all possible CPU addre !! 417         movel   %a0,%usp
543          * and stop each CPU using signal proc << 
544          * to allow just one CPU-stopper and p << 
545          * stopping each other while leaving t << 
546          */                                    << 
547         lhi     %r5,0                          << 
548         lhi     %r6,1                          << 
549         larl    %r7,stop_lock                  << 
550         cs      %r5,%r6,0(%r7)          # sing << 
551         jnz     4f                             << 
552         larl    %r7,this_cpu                   << 
553         stap    0(%r7)                  # this << 
554         lh      %r4,0(%r7)                     << 
555         nilh    %r4,0                          << 
556         lhi     %r0,1                          << 
557         sll     %r0,16                  # CPU  << 
558         lhi     %r3,0                   # next << 
559 0:      cr      %r3,%r4                        << 
560         je      2f                             << 
561 1:      sigp    %r1,%r3,SIGP_STOP       # stop << 
562         brc     SIGP_CC_BUSY,1b                << 
563 2:      ahi     %r3,1                          << 
564         brct    %r0,0b                         << 
565 3:      sigp    %r1,%r4,SIGP_STOP       # stop << 
566         brc     SIGP_CC_BUSY,3b                << 
567 4:      j       4b                             << 
568 SYM_CODE_END(mcck_int_handler)                 << 
569                                                << 
570 SYM_CODE_START(restart_int_handler)            << 
571         ALTERNATIVE "nop", "lpp _LPP_OFFSET",  << 
572         stg     %r15,__LC_SAVE_AREA_RESTART    << 
573         TSTMSK  __LC_RESTART_FLAGS,RESTART_FLA << 
574         jz      0f                             << 
575         lctlg   %c0,%c15,__LC_CREGS_SAVE_AREA  << 
576 0:      larl    %r15,daton_psw                 << 
577         lpswe   0(%r15)                        << 
578 .Ldaton:                                       << 
579         GET_LC  %r15                           << 
580         lg      %r15,__LC_RESTART_STACK(%r15)  << 
581         xc      STACK_FRAME_OVERHEAD(__PT_SIZE << 
582         stmg    %r0,%r14,STACK_FRAME_OVERHEAD+ << 
583         GET_LC  %r13                           << 
584         mvc     STACK_FRAME_OVERHEAD+__PT_R15( << 
585         mvc     STACK_FRAME_OVERHEAD+__PT_PSW( << 
586         xc      0(STACK_FRAME_OVERHEAD,%r15),0 << 
587         lg      %r1,__LC_RESTART_FN(%r13)      << 
588         lg      %r2,__LC_RESTART_DATA(%r13)    << 
589         lgf     %r3,__LC_RESTART_SOURCE(%r13)  << 
590         ltgr    %r3,%r3                        << 
591         jm      1f                             << 
592 0:      sigp    %r4,%r3,SIGP_SENSE             << 
593         brc     10,0b                          << 
594 1:      basr    %r14,%r1                       << 
595         stap    __SF_EMPTY(%r15)               << 
596         llgh    %r3,__SF_EMPTY(%r15)           << 
597 2:      sigp    %r4,%r3,SIGP_STOP              << 
598         brc     2,2b                           << 
599 3:      j       3b                             << 
600 SYM_CODE_END(restart_int_handler)              << 
601                                                << 
602         __INIT                                 << 
603 SYM_CODE_START(early_pgm_check_handler)        << 
604         STMG_LC %r8,%r15,__LC_SAVE_AREA        << 
605         GET_LC  %r13                           << 
606         aghi    %r15,-(STACK_FRAME_OVERHEAD+__ << 
607         la      %r11,STACK_FRAME_OVERHEAD(%r15 << 
608         xc      __SF_BACKCHAIN(8,%r15),__SF_BA << 
609         stmg    %r0,%r7,__PT_R0(%r11)          << 
610         mvc     __PT_PSW(16,%r11),__LC_PGM_OLD << 
611         mvc     __PT_R8(64,%r11),__LC_SAVE_ARE << 
612         lgr     %r2,%r11                       << 
613         brasl   %r14,__do_early_pgm_check      << 
614         mvc     __LC_RETURN_PSW(16,%r13),STACK << 
615         lmg     %r0,%r15,STACK_FRAME_OVERHEAD+ << 
616         LPSWEY  __LC_RETURN_PSW,__LC_RETURN_LP << 
617 SYM_CODE_END(early_pgm_check_handler)          << 
618         __FINIT                                << 
619                                                   418 
620         .section .kprobes.text, "ax"           !! 419         /* restore fs (sfc,%dfc) */
                                                   >> 420         movew   %a1@(TASK_THREAD+THREAD_FS),%a0
                                                   >> 421         movec   %a0,%sfc
                                                   >> 422         movec   %a0,%dfc
621                                                   423 
622 #if defined(CONFIG_CHECK_STACK) || defined(CON !! 424         /* restore status register */
623 /*                                             !! 425         movew   %a1@(TASK_THREAD+THREAD_SR),%sr
624  * The synchronous or the asynchronous stack o << 
625  * No need to properly save the registers, we  << 
626  * Setup a pt_regs so that show_trace can prov << 
627  */                                            << 
628 SYM_CODE_START(stack_overflow)                 << 
629         GET_LC  %r15                           << 
630         lg      %r15,__LC_NODAT_STACK(%r15) #  << 
631         la      %r11,STACK_FRAME_OVERHEAD(%r15 << 
632         stmg    %r0,%r7,__PT_R0(%r11)          << 
633         stmg    %r8,%r9,__PT_PSW(%r11)         << 
634         mvc     __PT_R8(64,%r11),0(%r14)       << 
635         stg     %r10,__PT_ORIG_GPR2(%r11) # st << 
636         xc      __SF_BACKCHAIN(8,%r15),__SF_BA << 
637         lgr     %r2,%r11                # pass << 
638         jg      kernel_stack_overflow          << 
639 SYM_CODE_END(stack_overflow)                   << 
640 #endif                                         << 
641                                                   426 
642         .section .data, "aw"                   !! 427         rts
643         .balign 4                              !! 428 
644 SYM_DATA_LOCAL(stop_lock,       .long 0)       !! 429 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
645 SYM_DATA_LOCAL(this_cpu,        .short 0)      << 
646         .balign 8                              << 
647 SYM_DATA_START_LOCAL(daton_psw)                << 
648         .quad   PSW_KERNEL_BITS                << 
649         .quad   .Ldaton                        << 
650 SYM_DATA_END(daton_psw)                        << 
651                                                << 
652         .section .rodata, "a"                  << 
653         .balign 8                              << 
654 #define SYSCALL(esame,emu)      .quad __s390x_ << 
655 SYM_DATA_START(sys_call_table)                 << 
656 #include "asm/syscall_table.h"                 << 
657 SYM_DATA_END(sys_call_table)                   << 
658 #undef SYSCALL                                 << 
659                                                << 
660 #ifdef CONFIG_COMPAT                           << 
661                                                << 
662 #define SYSCALL(esame,emu)      .quad __s390_  << 
663 SYM_DATA_START(sys_call_table_emu)             << 
664 #include "asm/syscall_table.h"                 << 
665 SYM_DATA_END(sys_call_table_emu)               << 
666 #undef SYSCALL                                 << 
667 #endif                                         << 
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php