1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * S390 low-level entry points. 4 * 5 * Copyright IBM Corp. 1999, 2012 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 9 */ 10 11 #include <linux/export.h> 12 #include <linux/init.h> 13 #include <linux/linkage.h> 14 #include <asm/asm-extable.h> 15 #include <asm/alternative.h> 16 #include <asm/processor.h> 17 #include <asm/cache.h> 18 #include <asm/dwarf.h> 19 #include <asm/errno.h> 20 #include <asm/ptrace.h> 21 #include <asm/thread_info.h> 22 #include <asm/asm-offsets.h> 23 #include <asm/unistd.h> 24 #include <asm/page.h> 25 #include <asm/sigp.h> 26 #include <asm/irq.h> 27 #include <asm/fpu-insn.h> 28 #include <asm/setup.h> 29 #include <asm/nmi.h> 30 #include <asm/nospec-insn.h> 31 #include <asm/lowcore.h> 32 33 _LPP_OFFSET = __LC_LPP 34 35 .macro STBEAR address 36 ALTERNATIVE "nop", ".insn s,0xb2010000,\address", ALT_FACILITY(193) 37 .endm 38 39 .macro LBEAR address 40 ALTERNATIVE "nop", ".insn s,0xb2000000,\address", ALT_FACILITY(193) 41 .endm 42 43 .macro LPSWEY address, lpswe 44 ALTERNATIVE_2 "b \lpswe;nopr", \ 45 ".insn siy,0xeb0000000071,\address,0", ALT_FACILITY(193), \ 46 __stringify(.insn siy,0xeb0000000071,LOWCORE_ALT_ADDRESS+\address,0), \ 47 ALT_LOWCORE 48 .endm 49 50 .macro MBEAR reg, lowcore 51 ALTERNATIVE "brcl 0,0", __stringify(mvc __PT_LAST_BREAK(8,\reg),__LC_LAST_BREAK(\lowcore)),\ 52 ALT_FACILITY(193) 53 .endm 54 55 .macro CHECK_STACK savearea, lowcore 56 #ifdef CONFIG_CHECK_STACK 57 tml %r15,THREAD_SIZE - CONFIG_STACK_GUARD 58 la %r14,\savearea(\lowcore) 59 jz stack_overflow 60 #endif 61 .endm 62 63 .macro CHECK_VMAP_STACK savearea, lowcore, oklabel 64 #ifdef CONFIG_VMAP_STACK 65 lgr %r14,%r15 66 nill %r14,0x10000 - THREAD_SIZE 67 oill %r14,STACK_INIT_OFFSET 68 clg %r14,__LC_KERNEL_STACK(\lowcore) 69 je \oklabel 70 clg %r14,__LC_ASYNC_STACK(\lowcore) 71 je \oklabel 72 clg %r14,__LC_MCCK_STACK(\lowcore) 73 je \oklabel 74 clg %r14,__LC_NODAT_STACK(\lowcore) 75 je \oklabel 76 clg %r14,__LC_RESTART_STACK(\lowcore) 77 je \oklabel 78 la %r14,\savearea(\lowcore) 79 j stack_overflow 80 #else 81 j \oklabel 82 #endif 83 .endm 84 85 /* 86 * The TSTMSK macro generates a test-under-mask instruction by 87 * calculating the memory offset for the specified mask value. 88 * Mask value can be any constant. The macro shifts the mask 89 * value to calculate the memory offset for the test-under-mask 90 * instruction. 91 */ 92 .macro TSTMSK addr, mask, size=8, bytepos=0 93 .if (\bytepos < \size) && (\mask >> 8) 94 .if (\mask & 0xff) 95 .error "Mask exceeds byte boundary" 96 .endif 97 TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" 98 .exitm 99 .endif 100 .ifeq \mask 101 .error "Mask must not be zero" 102 .endif 103 off = \size - \bytepos - 1 104 tm off+\addr, \mask 105 .endm 106 107 .macro BPOFF 108 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,12,0", ALT_SPEC(82) 109 .endm 110 111 .macro BPON 112 ALTERNATIVE "nop", ".insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82) 113 .endm 114 115 .macro BPENTER tif_ptr,tif_mask 116 ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .insn rrf,0xb2e80000,0,0,13,0", \ 117 "j .+12; nop; nop", ALT_SPEC(82) 118 .endm 119 120 .macro BPEXIT tif_ptr,tif_mask 121 TSTMSK \tif_ptr,\tif_mask 122 ALTERNATIVE "jz .+8; .insn rrf,0xb2e80000,0,0,12,0", \ 123 "jnz .+8; .insn rrf,0xb2e80000,0,0,13,0", ALT_SPEC(82) 124 .endm 125 126 #if IS_ENABLED(CONFIG_KVM) 127 .macro SIEEXIT sie_control,lowcore 128 lg %r9,\sie_control # get control block pointer 129 ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE 130 lctlg %c1,%c1,__LC_KERNEL_ASCE(\lowcore) # load primary asce 131 lg %r9,__LC_CURRENT(\lowcore) 132 mvi __TI_sie(%r9),0 133 larl %r9,sie_exit # skip forward to sie_exit 134 .endm 135 #endif 136 137 .macro STACKLEAK_ERASE 138 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK 139 brasl %r14,stackleak_erase_on_task_stack 140 #endif 141 .endm 142 143 GEN_BR_THUNK %r14 144 145 .section .kprobes.text, "ax" 146 .Ldummy: 147 /* 148 * The following nop exists only in order to avoid that the next 149 * symbol starts at the beginning of the kprobes text section. 150 * In that case there would be several symbols at the same address. 151 * E.g. objdump would take an arbitrary symbol when disassembling 152 * the code. 153 * With the added nop in between this cannot happen. 154 */ 155 nop 0 156 157 /* 158 * Scheduler resume function, called by __switch_to 159 * gpr2 = (task_struct *)prev 160 * gpr3 = (task_struct *)next 161 * Returns: 162 * gpr2 = prev 163 */ 164 SYM_FUNC_START(__switch_to_asm) 165 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 166 lghi %r4,__TASK_stack 167 lghi %r1,__TASK_thread 168 llill %r5,STACK_INIT_OFFSET 169 stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev 170 lg %r15,0(%r4,%r3) # start of kernel stack of next 171 agr %r15,%r5 # end of kernel stack of next 172 GET_LC %r13 173 stg %r3,__LC_CURRENT(%r13) # store task struct of next 174 stg %r15,__LC_KERNEL_STACK(%r13) # store end of kernel stack 175 lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next 176 aghi %r3,__TASK_pid 177 mvc __LC_CURRENT_PID(4,%r13),0(%r3) # store pid of next 178 ALTERNATIVE "nop", "lpp _LPP_OFFSET(%r13)", ALT_FACILITY(40) 179 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 180 BR_EX %r14 181 SYM_FUNC_END(__switch_to_asm) 182 183 #if IS_ENABLED(CONFIG_KVM) 184 /* 185 * __sie64a calling convention: 186 * %r2 pointer to sie control block phys 187 * %r3 pointer to sie control block virt 188 * %r4 guest register save area 189 * %r5 guest asce 190 */ 191 SYM_FUNC_START(__sie64a) 192 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers 193 GET_LC %r13 194 lg %r14,__LC_CURRENT(%r13) 195 stg %r2,__SF_SIE_CONTROL_PHYS(%r15) # save sie block physical.. 196 stg %r3,__SF_SIE_CONTROL(%r15) # ...and virtual addresses 197 stg %r4,__SF_SIE_SAVEAREA(%r15) # save guest register save area 198 stg %r5,__SF_SIE_GUEST_ASCE(%r15) # save guest asce 199 xc __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0 200 mvc __SF_SIE_FLAGS(8,%r15),__TI_flags(%r14) # copy thread flags 201 lmg %r0,%r13,0(%r4) # load guest gprs 0-13 202 mvi __TI_sie(%r14),1 203 lctlg %c1,%c1,__SF_SIE_GUEST_ASCE(%r15) # load primary asce 204 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 205 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 206 tm __SIE_PROG20+3(%r14),3 # last exit... 207 jnz .Lsie_skip 208 lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr 209 BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 210 .Lsie_entry: 211 sie 0(%r14) 212 # Let the next instruction be NOP to avoid triggering a machine check 213 # and handling it in a guest as result of the instruction execution. 214 nopr 7 215 .Lsie_leave: 216 BPOFF 217 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 218 .Lsie_skip: 219 lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer 220 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 221 GET_LC %r14 222 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r14) # load primary asce 223 lg %r14,__LC_CURRENT(%r14) 224 mvi __TI_sie(%r14),0 225 # some program checks are suppressing. C code (e.g. do_protection_exception) 226 # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There 227 # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. 228 # Other instructions between __sie64a and .Lsie_done should not cause program 229 # interrupts. So lets use 3 nops as a landing pad for all possible rewinds. 230 .Lrewind_pad6: 231 nopr 7 232 .Lrewind_pad4: 233 nopr 7 234 .Lrewind_pad2: 235 nopr 7 236 SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) 237 lg %r14,__SF_SIE_SAVEAREA(%r15) # load guest register save area 238 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 239 xgr %r0,%r0 # clear guest registers to 240 xgr %r1,%r1 # prevent speculative use 241 xgr %r3,%r3 242 xgr %r4,%r4 243 xgr %r5,%r5 244 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 245 lg %r2,__SF_SIE_REASON(%r15) # return exit reason code 246 BR_EX %r14 247 .Lsie_fault: 248 lghi %r14,-EFAULT 249 stg %r14,__SF_SIE_REASON(%r15) # set exit reason code 250 j sie_exit 251 252 EX_TABLE(.Lrewind_pad6,.Lsie_fault) 253 EX_TABLE(.Lrewind_pad4,.Lsie_fault) 254 EX_TABLE(.Lrewind_pad2,.Lsie_fault) 255 EX_TABLE(sie_exit,.Lsie_fault) 256 SYM_FUNC_END(__sie64a) 257 EXPORT_SYMBOL(__sie64a) 258 EXPORT_SYMBOL(sie_exit) 259 #endif 260 261 /* 262 * SVC interrupt handler routine. System calls are synchronous events and 263 * are entered with interrupts disabled. 264 */ 265 266 SYM_CODE_START(system_call) 267 STMG_LC %r8,%r15,__LC_SAVE_AREA 268 GET_LC %r13 269 stpt __LC_SYS_ENTER_TIMER(%r13) 270 BPOFF 271 lghi %r14,0 272 .Lsysc_per: 273 STBEAR __LC_LAST_BREAK(%r13) 274 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 275 lg %r15,__LC_KERNEL_STACK(%r13) 276 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 277 stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 278 # clear user controlled register to prevent speculative use 279 xgr %r0,%r0 280 xgr %r1,%r1 281 xgr %r4,%r4 282 xgr %r5,%r5 283 xgr %r6,%r6 284 xgr %r7,%r7 285 xgr %r8,%r8 286 xgr %r9,%r9 287 xgr %r10,%r10 288 xgr %r11,%r11 289 la %r2,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 290 mvc __PT_R8(64,%r2),__LC_SAVE_AREA(%r13) 291 MBEAR %r2,%r13 292 lgr %r3,%r14 293 brasl %r14,__do_syscall 294 STACKLEAK_ERASE 295 lctlg %c1,%c1,__LC_USER_ASCE(%r13) 296 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 297 BPON 298 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 299 stpt __LC_EXIT_TIMER(%r13) 300 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 301 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 302 SYM_CODE_END(system_call) 303 304 # 305 # a new process exits the kernel with ret_from_fork 306 # 307 SYM_CODE_START(ret_from_fork) 308 lgr %r3,%r11 309 brasl %r14,__ret_from_fork 310 STACKLEAK_ERASE 311 GET_LC %r13 312 lctlg %c1,%c1,__LC_USER_ASCE(%r13) 313 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 314 BPON 315 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 316 stpt __LC_EXIT_TIMER(%r13) 317 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 318 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 319 SYM_CODE_END(ret_from_fork) 320 321 /* 322 * Program check handler routine 323 */ 324 325 SYM_CODE_START(pgm_check_handler) 326 STMG_LC %r8,%r15,__LC_SAVE_AREA 327 GET_LC %r13 328 stpt __LC_SYS_ENTER_TIMER(%r13) 329 BPOFF 330 lgr %r10,%r15 331 lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) 332 tmhh %r8,0x0001 # coming from user space? 333 jno .Lpgm_skip_asce 334 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 335 j 3f # -> fault in user space 336 .Lpgm_skip_asce: 337 1: tmhh %r8,0x4000 # PER bit set in old PSW ? 338 jnz 2f # -> enabled, can't be a double fault 339 tm __LC_PGM_ILC+3(%r13),0x80 # check for per exception 340 jnz .Lpgm_svcper # -> single stepped svc 341 2: CHECK_STACK __LC_SAVE_AREA,%r13 342 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 343 # CHECK_VMAP_STACK branches to stack_overflow or 4f 344 CHECK_VMAP_STACK __LC_SAVE_AREA,%r13,4f 345 3: lg %r15,__LC_KERNEL_STACK(%r13) 346 4: la %r11,STACK_FRAME_OVERHEAD(%r15) 347 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 348 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 349 stmg %r0,%r7,__PT_R0(%r11) 350 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 351 mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK(%r13) 352 stctg %c1,%c1,__PT_CR1(%r11) 353 #if IS_ENABLED(CONFIG_KVM) 354 ltg %r12,__LC_GMAP(%r13) 355 jz 5f 356 clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) 357 jne 5f 358 BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST 359 SIEEXIT __SF_SIE_CONTROL(%r10),%r13 360 #endif 361 5: stmg %r8,%r9,__PT_PSW(%r11) 362 # clear user controlled registers to prevent speculative use 363 xgr %r0,%r0 364 xgr %r1,%r1 365 xgr %r3,%r3 366 xgr %r4,%r4 367 xgr %r5,%r5 368 xgr %r6,%r6 369 xgr %r7,%r7 370 lgr %r2,%r11 371 brasl %r14,__do_pgm_check 372 tmhh %r8,0x0001 # returning to user space? 373 jno .Lpgm_exit_kernel 374 STACKLEAK_ERASE 375 lctlg %c1,%c1,__LC_USER_ASCE(%r13) 376 BPON 377 stpt __LC_EXIT_TIMER(%r13) 378 .Lpgm_exit_kernel: 379 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 380 LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) 381 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 382 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 383 384 # 385 # single stepped system call 386 # 387 .Lpgm_svcper: 388 mvc __LC_RETURN_PSW(8,%r13),__LC_SVC_NEW_PSW(%r13) 389 larl %r14,.Lsysc_per 390 stg %r14,__LC_RETURN_PSW+8(%r13) 391 lghi %r14,1 392 LBEAR __LC_PGM_LAST_BREAK(%r13) 393 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE # branch to .Lsysc_per 394 SYM_CODE_END(pgm_check_handler) 395 396 /* 397 * Interrupt handler macro used for external and IO interrupts. 398 */ 399 .macro INT_HANDLER name,lc_old_psw,handler 400 SYM_CODE_START(\name) 401 STMG_LC %r8,%r15,__LC_SAVE_AREA 402 GET_LC %r13 403 stckf __LC_INT_CLOCK(%r13) 404 stpt __LC_SYS_ENTER_TIMER(%r13) 405 STBEAR __LC_LAST_BREAK(%r13) 406 BPOFF 407 lmg %r8,%r9,\lc_old_psw(%r13) 408 tmhh %r8,0x0001 # interrupting from user ? 409 jnz 1f 410 #if IS_ENABLED(CONFIG_KVM) 411 lg %r10,__LC_CURRENT(%r13) 412 tm __TI_sie(%r10),0xff 413 jz 0f 414 BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 415 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 416 #endif 417 0: CHECK_STACK __LC_SAVE_AREA,%r13 418 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 419 j 2f 420 1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 421 lg %r15,__LC_KERNEL_STACK(%r13) 422 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 423 la %r11,STACK_FRAME_OVERHEAD(%r15) 424 stmg %r0,%r7,__PT_R0(%r11) 425 # clear user controlled registers to prevent speculative use 426 xgr %r0,%r0 427 xgr %r1,%r1 428 xgr %r3,%r3 429 xgr %r4,%r4 430 xgr %r5,%r5 431 xgr %r6,%r6 432 xgr %r7,%r7 433 xgr %r10,%r10 434 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 435 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 436 MBEAR %r11,%r13 437 stmg %r8,%r9,__PT_PSW(%r11) 438 lgr %r2,%r11 # pass pointer to pt_regs 439 brasl %r14,\handler 440 mvc __LC_RETURN_PSW(16,%r13),__PT_PSW(%r11) 441 tmhh %r8,0x0001 # returning to user ? 442 jno 2f 443 STACKLEAK_ERASE 444 lctlg %c1,%c1,__LC_USER_ASCE(%r13) 445 BPON 446 stpt __LC_EXIT_TIMER(%r13) 447 2: LBEAR __PT_LAST_BREAK(%r11) 448 lmg %r0,%r15,__PT_R0(%r11) 449 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 450 SYM_CODE_END(\name) 451 .endm 452 453 INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq 454 INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq 455 456 /* 457 * Machine check handler routines 458 */ 459 SYM_CODE_START(mcck_int_handler) 460 BPOFF 461 GET_LC %r13 462 lmg %r8,%r9,__LC_MCK_OLD_PSW(%r13) 463 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_SYSTEM_DAMAGE 464 jo .Lmcck_panic # yes -> rest of mcck code invalid 465 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CR_VALID 466 jno .Lmcck_panic # control registers invalid -> panic 467 ptlb 468 lay %r14,__LC_CPU_TIMER_SAVE_AREA(%r13) 469 mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) 470 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_CPU_TIMER_VALID 471 jo 3f 472 la %r14,__LC_SYS_ENTER_TIMER(%r13) 473 clc 0(8,%r14),__LC_EXIT_TIMER(%r13) 474 jl 1f 475 la %r14,__LC_EXIT_TIMER(%r13) 476 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER(%r13) 477 jl 2f 478 la %r14,__LC_LAST_UPDATE_TIMER(%r13) 479 2: spt 0(%r14) 480 mvc __LC_MCCK_ENTER_TIMER(8,%r13),0(%r14) 481 3: TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_MWP_VALID 482 jno .Lmcck_panic 483 tmhh %r8,0x0001 # interrupting from user ? 484 jnz .Lmcck_user 485 TSTMSK __LC_MCCK_CODE(%r13),MCCK_CODE_PSW_IA_VALID 486 jno .Lmcck_panic 487 #if IS_ENABLED(CONFIG_KVM) 488 lg %r10,__LC_CURRENT(%r13) 489 tm __TI_sie(%r10),0xff 490 jz .Lmcck_user 491 # Need to compare the address instead of __TI_SIE flag. 492 # Otherwise there would be a race between setting the flag 493 # and entering SIE (or leaving and clearing the flag). This 494 # would cause machine checks targeted at the guest to be 495 # handled by the host. 496 larl %r14,.Lsie_entry 497 clgrjl %r9,%r14, 4f 498 larl %r14,.Lsie_leave 499 clgrjhe %r9,%r14, 4f 500 lg %r10,__LC_PCPU 501 oi __PCPU_FLAGS+7(%r10), _CIF_MCCK_GUEST 502 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST 503 SIEEXIT __SF_SIE_CONTROL(%r15),%r13 504 #endif 505 .Lmcck_user: 506 lg %r15,__LC_MCCK_STACK(%r13) 507 la %r11,STACK_FRAME_OVERHEAD(%r15) 508 stctg %c1,%c1,__PT_CR1(%r11) 509 lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) 510 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 511 lay %r14,__LC_GPREGS_SAVE_AREA(%r13) 512 mvc __PT_R0(128,%r11),0(%r14) 513 # clear user controlled registers to prevent speculative use 514 xgr %r0,%r0 515 xgr %r1,%r1 516 xgr %r3,%r3 517 xgr %r4,%r4 518 xgr %r5,%r5 519 xgr %r6,%r6 520 xgr %r7,%r7 521 xgr %r10,%r10 522 stmg %r8,%r9,__PT_PSW(%r11) 523 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 524 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 525 lgr %r2,%r11 # pass pointer to pt_regs 526 brasl %r14,s390_do_machine_check 527 lctlg %c1,%c1,__PT_CR1(%r11) 528 lmg %r0,%r10,__PT_R0(%r11) 529 mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW 530 tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ? 531 jno 0f 532 BPON 533 stpt __LC_EXIT_TIMER(%r13) 534 0: ALTERNATIVE "brcl 0,0", __stringify(lay %r12,__LC_LAST_BREAK_SAVE_AREA(%r13)),\ 535 ALT_FACILITY(193) 536 LBEAR 0(%r12) 537 lmg %r11,%r15,__PT_R11(%r11) 538 LPSWEY __LC_RETURN_MCCK_PSW,__LC_RETURN_MCCK_LPSWE 539 540 .Lmcck_panic: 541 /* 542 * Iterate over all possible CPU addresses in the range 0..0xffff 543 * and stop each CPU using signal processor. Use compare and swap 544 * to allow just one CPU-stopper and prevent concurrent CPUs from 545 * stopping each other while leaving the others running. 546 */ 547 lhi %r5,0 548 lhi %r6,1 549 larl %r7,stop_lock 550 cs %r5,%r6,0(%r7) # single CPU-stopper only 551 jnz 4f 552 larl %r7,this_cpu 553 stap 0(%r7) # this CPU address 554 lh %r4,0(%r7) 555 nilh %r4,0 556 lhi %r0,1 557 sll %r0,16 # CPU counter 558 lhi %r3,0 # next CPU address 559 0: cr %r3,%r4 560 je 2f 561 1: sigp %r1,%r3,SIGP_STOP # stop next CPU 562 brc SIGP_CC_BUSY,1b 563 2: ahi %r3,1 564 brct %r0,0b 565 3: sigp %r1,%r4,SIGP_STOP # stop this CPU 566 brc SIGP_CC_BUSY,3b 567 4: j 4b 568 SYM_CODE_END(mcck_int_handler) 569 570 SYM_CODE_START(restart_int_handler) 571 ALTERNATIVE "nop", "lpp _LPP_OFFSET", ALT_FACILITY(40) 572 stg %r15,__LC_SAVE_AREA_RESTART 573 TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4 574 jz 0f 575 lctlg %c0,%c15,__LC_CREGS_SAVE_AREA 576 0: larl %r15,daton_psw 577 lpswe 0(%r15) # turn dat on, keep irqs off 578 .Ldaton: 579 GET_LC %r15 580 lg %r15,__LC_RESTART_STACK(%r15) 581 xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15) 582 stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 583 GET_LC %r13 584 mvc STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART(%r13) 585 mvc STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW(%r13) 586 xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15) 587 lg %r1,__LC_RESTART_FN(%r13) # load fn, parm & source cpu 588 lg %r2,__LC_RESTART_DATA(%r13) 589 lgf %r3,__LC_RESTART_SOURCE(%r13) 590 ltgr %r3,%r3 # test source cpu address 591 jm 1f # negative -> skip source stop 592 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu 593 brc 10,0b # wait for status stored 594 1: basr %r14,%r1 # call function 595 stap __SF_EMPTY(%r15) # store cpu address 596 llgh %r3,__SF_EMPTY(%r15) 597 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu 598 brc 2,2b 599 3: j 3b 600 SYM_CODE_END(restart_int_handler) 601 602 __INIT 603 SYM_CODE_START(early_pgm_check_handler) 604 STMG_LC %r8,%r15,__LC_SAVE_AREA 605 GET_LC %r13 606 aghi %r15,-(STACK_FRAME_OVERHEAD+__PT_SIZE) 607 la %r11,STACK_FRAME_OVERHEAD(%r15) 608 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 609 stmg %r0,%r7,__PT_R0(%r11) 610 mvc __PT_PSW(16,%r11),__LC_PGM_OLD_PSW(%r13) 611 mvc __PT_R8(64,%r11),__LC_SAVE_AREA(%r13) 612 lgr %r2,%r11 613 brasl %r14,__do_early_pgm_check 614 mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) 615 lmg %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15) 616 LPSWEY __LC_RETURN_PSW,__LC_RETURN_LPSWE 617 SYM_CODE_END(early_pgm_check_handler) 618 __FINIT 619 620 .section .kprobes.text, "ax" 621 622 #if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK) 623 /* 624 * The synchronous or the asynchronous stack overflowed. We are dead. 625 * No need to properly save the registers, we are going to panic anyway. 626 * Setup a pt_regs so that show_trace can provide a good call trace. 627 */ 628 SYM_CODE_START(stack_overflow) 629 GET_LC %r15 630 lg %r15,__LC_NODAT_STACK(%r15) # change to panic stack 631 la %r11,STACK_FRAME_OVERHEAD(%r15) 632 stmg %r0,%r7,__PT_R0(%r11) 633 stmg %r8,%r9,__PT_PSW(%r11) 634 mvc __PT_R8(64,%r11),0(%r14) 635 stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 636 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 637 lgr %r2,%r11 # pass pointer to pt_regs 638 jg kernel_stack_overflow 639 SYM_CODE_END(stack_overflow) 640 #endif 641 642 .section .data, "aw" 643 .balign 4 644 SYM_DATA_LOCAL(stop_lock, .long 0) 645 SYM_DATA_LOCAL(this_cpu, .short 0) 646 .balign 8 647 SYM_DATA_START_LOCAL(daton_psw) 648 .quad PSW_KERNEL_BITS 649 .quad .Ldaton 650 SYM_DATA_END(daton_psw) 651 652 .section .rodata, "a" 653 .balign 8 654 #define SYSCALL(esame,emu) .quad __s390x_ ## esame 655 SYM_DATA_START(sys_call_table) 656 #include "asm/syscall_table.h" 657 SYM_DATA_END(sys_call_table) 658 #undef SYSCALL 659 660 #ifdef CONFIG_COMPAT 661 662 #define SYSCALL(esame,emu) .quad __s390_ ## emu 663 SYM_DATA_START(sys_call_table_emu) 664 #include "asm/syscall_table.h" 665 SYM_DATA_END(sys_call_table_emu) 666 #undef SYSCALL 667 #endif
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.