1 /* 1 /* 2 * linux/arch/nios2/kernel/entry.S !! 2 * arch/alpha/kernel/entry.S 3 * 3 * 4 * Copyright (C) 2013-2014 Altera Corporation !! 4 * Kernel entry-points. 5 * Copyright (C) 2009, Wind River Systems Inc << 6 * << 7 * Implemented by fredrik.markstrom@gmail.com << 8 * << 9 * Copyright (C) 1999-2002, Greg Ungerer (ger << 10 * Copyright (C) 1998 D. Jeff Dionne <jeff@li << 11 * Kenneth Albanowski <kja << 12 * Copyright (C) 2000 Lineo Inc. (www.lineo. << 13 * Copyright (C) 2004 Microtronix Datacom Lt << 14 * << 15 * This file is subject to the terms and condi << 16 * License. See the file "COPYING" in the mai << 17 * for more details. << 18 * << 19 * Linux/m68k support by Hamish Macdonald << 20 * << 21 * 68060 fixes by Jesper Skov << 22 * ColdFire support by Greg Ungerer (gerg@snap << 23 * 5307 fixes by David W. Miller << 24 * linux 2.4 support David McCullough <davidm@s << 25 */ 5 */ 26 6 27 #include <linux/sys.h> << 28 #include <linux/linkage.h> << 29 #include <asm/asm-offsets.h> 7 #include <asm/asm-offsets.h> 30 #include <asm/asm-macros.h> << 31 #include <asm/thread_info.h> 8 #include <asm/thread_info.h> >> 9 #include <asm/pal.h> 32 #include <asm/errno.h> 10 #include <asm/errno.h> 33 #include <asm/setup.h> << 34 #include <asm/entry.h> << 35 #include <asm/unistd.h> 11 #include <asm/unistd.h> 36 #include <asm/processor.h> << 37 12 38 .macro GET_THREAD_INFO reg !! 13 .text 39 .if THREAD_SIZE & 0xffff0000 !! 14 .set noat 40 andhi \reg, sp, %hi(~(THREAD_SIZE-1) !! 15 .cfi_sections .debug_frame 41 .else !! 16 42 addi \reg, r0, %lo(~(THREAD_SIZE-1) !! 17 /* Stack offsets. */ 43 and \reg, \reg, sp !! 18 #define SP_OFF 184 44 .endif !! 19 #define SWITCH_STACK_SIZE 320 >> 20 >> 21 .macro CFI_START_OSF_FRAME func >> 22 .align 4 >> 23 .globl \func >> 24 .type \func,@function >> 25 \func: >> 26 .cfi_startproc simple >> 27 .cfi_return_column 64 >> 28 .cfi_def_cfa $sp, 48 >> 29 .cfi_rel_offset 64, 8 >> 30 .cfi_rel_offset $gp, 16 >> 31 .cfi_rel_offset $16, 24 >> 32 .cfi_rel_offset $17, 32 >> 33 .cfi_rel_offset $18, 40 45 .endm 34 .endm 46 35 47 .macro kuser_cmpxchg_check !! 36 .macro CFI_END_OSF_FRAME func 48 /* !! 37 .cfi_endproc 49 * Make sure our user space atomic hel !! 38 .size \func, . - \func 50 * interrupted in a critical region. << 51 * ea-4 = address of interrupted insn << 52 * sp = saved regs. << 53 * cmpxchg_ldw = first critical insn, << 54 * If ea <= cmpxchg_stw and ea > cmpxc << 55 * cmpxchg_ldw + 4. << 56 */ << 57 /* et = cmpxchg_stw + 4 */ << 58 movui et, (KUSER_BASE + 4 + (cmpxchg << 59 bgtu ea, et, 1f << 60 << 61 subi et, et, (cmpxchg_stw - cmpxchg << 62 bltu ea, et, 1f << 63 stw et, PT_EA(sp) /* fix up EA * << 64 mov ea, et << 65 1: << 66 .endm 39 .endm 67 40 68 .section .rodata !! 41 /* 69 .align 4 !! 42 * This defines the normal kernel pt-regs layout. 70 exception_table: !! 43 * 71 .word unhandled_exception /* 0 - !! 44 * regs 9-15 preserved by C code 72 .word unhandled_exception /* 1 - !! 45 * regs 16-18 saved by PAL-code 73 .word external_interrupt /* 2 - !! 46 * regs 29-30 saved and set up by PAL-code 74 .word handle_trap /* 3 - !! 47 * JRP - Save regs 16-18 in a special area of the stack, so that 75 !! 48 * the palcode-provided values are available to the signal handler. 76 .word instruction_trap /* 4 - !! 49 */ 77 .word handle_illegal /* 5 - << 78 .word handle_unaligned /* 6 - << 79 .word handle_unaligned /* 7 - << 80 << 81 .word handle_diverror /* 8 - << 82 .word protection_exception_ba /* 9 - << 83 .word protection_exception_instr /* 10 << 84 .word protection_exception_ba /* 11 << 85 << 86 .word unhandled_exception /* 12 << 87 .word protection_exception_pte /* 13 << 88 .word protection_exception_pte /* 14 << 89 .word protection_exception_pte /* 15 << 90 << 91 .word unhandled_exception /* 16 << 92 << 93 trap_table: << 94 .word handle_system_call /* 0 << 95 .word handle_trap_1 /* 1 << 96 .word handle_trap_2 /* 2 << 97 .word handle_trap_3 /* 3 << 98 .word handle_trap_reserved /* 4 << 99 .word handle_trap_reserved /* 5 << 100 .word handle_trap_reserved /* 6 << 101 .word handle_trap_reserved /* 7 << 102 .word handle_trap_reserved /* 8 << 103 .word handle_trap_reserved /* 9 << 104 .word handle_trap_reserved /* 10 << 105 .word handle_trap_reserved /* 11 << 106 .word handle_trap_reserved /* 12 << 107 .word handle_trap_reserved /* 13 << 108 .word handle_trap_reserved /* 14 << 109 .word handle_trap_reserved /* 15 << 110 .word handle_trap_reserved /* 16 << 111 .word handle_trap_reserved /* 17 << 112 .word handle_trap_reserved /* 18 << 113 .word handle_trap_reserved /* 19 << 114 .word handle_trap_reserved /* 20 << 115 .word handle_trap_reserved /* 21 << 116 .word handle_trap_reserved /* 22 << 117 .word handle_trap_reserved /* 23 << 118 .word handle_trap_reserved /* 24 << 119 .word handle_trap_reserved /* 25 << 120 .word handle_trap_reserved /* 26 << 121 .word handle_trap_reserved /* 27 << 122 .word handle_trap_reserved /* 28 << 123 .word handle_trap_reserved /* 29 << 124 #ifdef CONFIG_KGDB << 125 .word handle_kgdb_breakpoint /* 30 << 126 #else << 127 .word instruction_trap << 128 #endif << 129 .word handle_breakpoint /* 31 << 130 50 131 .text !! 51 .macro SAVE_ALL 132 .set noat !! 52 subq $sp, SP_OFF, $sp 133 .set nobreak !! 53 .cfi_adjust_cfa_offset SP_OFF >> 54 stq $0, 0($sp) >> 55 stq $1, 8($sp) >> 56 stq $2, 16($sp) >> 57 stq $3, 24($sp) >> 58 stq $4, 32($sp) >> 59 stq $28, 144($sp) >> 60 .cfi_rel_offset $0, 0 >> 61 .cfi_rel_offset $1, 8 >> 62 .cfi_rel_offset $2, 16 >> 63 .cfi_rel_offset $3, 24 >> 64 .cfi_rel_offset $4, 32 >> 65 .cfi_rel_offset $28, 144 >> 66 lda $2, alpha_mv >> 67 stq $5, 40($sp) >> 68 stq $6, 48($sp) >> 69 stq $7, 56($sp) >> 70 stq $8, 64($sp) >> 71 stq $19, 72($sp) >> 72 stq $20, 80($sp) >> 73 stq $21, 88($sp) >> 74 ldq $2, HAE_CACHE($2) >> 75 stq $22, 96($sp) >> 76 stq $23, 104($sp) >> 77 stq $24, 112($sp) >> 78 stq $25, 120($sp) >> 79 stq $26, 128($sp) >> 80 stq $27, 136($sp) >> 81 stq $2, 152($sp) >> 82 stq $16, 160($sp) >> 83 stq $17, 168($sp) >> 84 stq $18, 176($sp) >> 85 .cfi_rel_offset $5, 40 >> 86 .cfi_rel_offset $6, 48 >> 87 .cfi_rel_offset $7, 56 >> 88 .cfi_rel_offset $8, 64 >> 89 .cfi_rel_offset $19, 72 >> 90 .cfi_rel_offset $20, 80 >> 91 .cfi_rel_offset $21, 88 >> 92 .cfi_rel_offset $22, 96 >> 93 .cfi_rel_offset $23, 104 >> 94 .cfi_rel_offset $24, 112 >> 95 .cfi_rel_offset $25, 120 >> 96 .cfi_rel_offset $26, 128 >> 97 .cfi_rel_offset $27, 136 >> 98 .endm 134 99 135 ENTRY(inthandler) !! 100 .macro RESTORE_ALL >> 101 lda $19, alpha_mv >> 102 ldq $0, 0($sp) >> 103 ldq $1, 8($sp) >> 104 ldq $2, 16($sp) >> 105 ldq $3, 24($sp) >> 106 ldq $21, 152($sp) >> 107 ldq $20, HAE_CACHE($19) >> 108 ldq $4, 32($sp) >> 109 ldq $5, 40($sp) >> 110 ldq $6, 48($sp) >> 111 ldq $7, 56($sp) >> 112 subq $20, $21, $20 >> 113 ldq $8, 64($sp) >> 114 beq $20, 99f >> 115 ldq $20, HAE_REG($19) >> 116 stq $21, HAE_CACHE($19) >> 117 stq $21, 0($20) >> 118 99: ldq $19, 72($sp) >> 119 ldq $20, 80($sp) >> 120 ldq $21, 88($sp) >> 121 ldq $22, 96($sp) >> 122 ldq $23, 104($sp) >> 123 ldq $24, 112($sp) >> 124 ldq $25, 120($sp) >> 125 ldq $26, 128($sp) >> 126 ldq $27, 136($sp) >> 127 ldq $28, 144($sp) >> 128 addq $sp, SP_OFF, $sp >> 129 .cfi_restore $0 >> 130 .cfi_restore $1 >> 131 .cfi_restore $2 >> 132 .cfi_restore $3 >> 133 .cfi_restore $4 >> 134 .cfi_restore $5 >> 135 .cfi_restore $6 >> 136 .cfi_restore $7 >> 137 .cfi_restore $8 >> 138 .cfi_restore $19 >> 139 .cfi_restore $20 >> 140 .cfi_restore $21 >> 141 .cfi_restore $22 >> 142 .cfi_restore $23 >> 143 .cfi_restore $24 >> 144 .cfi_restore $25 >> 145 .cfi_restore $26 >> 146 .cfi_restore $27 >> 147 .cfi_restore $28 >> 148 .cfi_adjust_cfa_offset -SP_OFF >> 149 .endm >> 150 >> 151 .macro DO_SWITCH_STACK >> 152 bsr $1, do_switch_stack >> 153 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE >> 154 .cfi_rel_offset $9, 0 >> 155 .cfi_rel_offset $10, 8 >> 156 .cfi_rel_offset $11, 16 >> 157 .cfi_rel_offset $12, 24 >> 158 .cfi_rel_offset $13, 32 >> 159 .cfi_rel_offset $14, 40 >> 160 .cfi_rel_offset $15, 48 >> 161 /* We don't really care about the FP registers for debugging. */ >> 162 .endm >> 163 >> 164 .macro UNDO_SWITCH_STACK >> 165 bsr $1, undo_switch_stack >> 166 .cfi_restore $9 >> 167 .cfi_restore $10 >> 168 .cfi_restore $11 >> 169 .cfi_restore $12 >> 170 .cfi_restore $13 >> 171 .cfi_restore $14 >> 172 .cfi_restore $15 >> 173 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE >> 174 .endm >> 175 >> 176 /* >> 177 * Non-syscall kernel entry points. >> 178 */ >> 179 >> 180 CFI_START_OSF_FRAME entInt 136 SAVE_ALL 181 SAVE_ALL >> 182 lda $8, 0x3fff >> 183 lda $26, ret_from_sys_call >> 184 bic $sp, $8, $8 >> 185 mov $sp, $19 >> 186 jsr $31, do_entInt >> 187 CFI_END_OSF_FRAME entInt 137 188 138 kuser_cmpxchg_check !! 189 CFI_START_OSF_FRAME entArith >> 190 SAVE_ALL >> 191 lda $8, 0x3fff >> 192 lda $26, ret_from_sys_call >> 193 bic $sp, $8, $8 >> 194 mov $sp, $18 >> 195 jsr $31, do_entArith >> 196 CFI_END_OSF_FRAME entArith 139 197 140 /* Clear EH bit before we get a new ex !! 198 CFI_START_OSF_FRAME entMM 141 * and after we have saved it to the e !! 199 SAVE_ALL 142 * whether it's trap, tlb-miss or inte !! 200 /* save $9 - $15 so the inline exception code can manipulate them. */ 143 * estatus is not updated the next exc !! 201 subq $sp, 56, $sp 144 */ !! 202 .cfi_adjust_cfa_offset 56 145 rdctl r24, status !! 203 stq $9, 0($sp) 146 movi r9, %lo(~STATUS_EH) !! 204 stq $10, 8($sp) 147 and r24, r24, r9 !! 205 stq $11, 16($sp) 148 wrctl status, r24 !! 206 stq $12, 24($sp) 149 !! 207 stq $13, 32($sp) 150 /* Read cause and vector and branch to !! 208 stq $14, 40($sp) 151 mov r4, sp !! 209 stq $15, 48($sp) 152 rdctl r5, exception !! 210 .cfi_rel_offset $9, 0 153 movia r9, exception_table !! 211 .cfi_rel_offset $10, 8 154 add r24, r9, r5 !! 212 .cfi_rel_offset $11, 16 155 ldw r24, 0(r24) !! 213 .cfi_rel_offset $12, 24 156 jmp r24 !! 214 .cfi_rel_offset $13, 32 157 !! 215 .cfi_rel_offset $14, 40 158 !! 216 .cfi_rel_offset $15, 48 159 /********************************************* !! 217 addq $sp, 56, $19 160 * Handle traps !! 218 /* handle the fault */ 161 ********************************************* !! 219 lda $8, 0x3fff 162 */ !! 220 bic $sp, $8, $8 163 ENTRY(handle_trap) !! 221 jsr $26, do_page_fault 164 ldwio r24, -4(ea) /* instruction !! 222 /* reload the registers after the exception code played. */ 165 srli r24, r24, 4 !! 223 ldq $9, 0($sp) 166 andi r24, r24, 0x7c !! 224 ldq $10, 8($sp) 167 movia r9,trap_table !! 225 ldq $11, 16($sp) 168 add r24, r24, r9 !! 226 ldq $12, 24($sp) 169 ldw r24, 0(r24) !! 227 ldq $13, 32($sp) 170 jmp r24 !! 228 ldq $14, 40($sp) 171 !! 229 ldq $15, 48($sp) 172 !! 230 addq $sp, 56, $sp 173 /********************************************* !! 231 .cfi_restore $9 174 * Handle system calls !! 232 .cfi_restore $10 175 ********************************************* !! 233 .cfi_restore $11 176 */ !! 234 .cfi_restore $12 177 ENTRY(handle_system_call) !! 235 .cfi_restore $13 178 /* Enable interrupts */ !! 236 .cfi_restore $14 179 rdctl r10, status !! 237 .cfi_restore $15 180 ori r10, r10, STATUS_PIE !! 238 .cfi_adjust_cfa_offset -56 181 wrctl status, r10 !! 239 /* finish up the syscall as normal. */ 182 !! 240 br ret_from_sys_call 183 /* Reload registers destroyed by commo !! 241 CFI_END_OSF_FRAME entMM 184 ldw r4, PT_R4(sp) !! 242 185 ldw r5, PT_R5(sp) !! 243 CFI_START_OSF_FRAME entIF 186 !! 244 SAVE_ALL 187 local_restart: !! 245 lda $8, 0x3fff 188 stw r2, PT_ORIG_R2(sp) !! 246 lda $26, ret_from_sys_call 189 /* Check that the requested system cal !! 247 bic $sp, $8, $8 190 movui r1, __NR_syscalls !! 248 mov $sp, $17 191 bgeu r2, r1, ret_invsyscall !! 249 jsr $31, do_entIF 192 slli r1, r2, 2 !! 250 CFI_END_OSF_FRAME entIF 193 movhi r11, %hiadj(sys_call_table) !! 251 194 add r1, r1, r11 !! 252 CFI_START_OSF_FRAME entUna 195 ldw r1, %lo(sys_call_table)(r1) !! 253 lda $sp, -256($sp) 196 !! 254 .cfi_adjust_cfa_offset 256 197 /* Check if we are being traced */ !! 255 stq $0, 0($sp) 198 GET_THREAD_INFO r11 !! 256 .cfi_rel_offset $0, 0 199 ldw r11,TI_FLAGS(r11) !! 257 .cfi_remember_state 200 BTBNZ r11,r11,TIF_SYSCALL_TRACE,trac !! 258 ldq $0, 256($sp) /* get PS */ 201 !! 259 stq $1, 8($sp) 202 /* Execute the system call */ !! 260 stq $2, 16($sp) 203 callr r1 !! 261 stq $3, 24($sp) 204 !! 262 and $0, 8, $0 /* user mode? */ 205 /* If the syscall returns a negative r !! 263 stq $4, 32($sp) 206 * Set r7 to 1 to indicate error, !! 264 bne $0, entUnaUser /* yup -> do user-level unaligned fault */ 207 * Negate r2 to get a positive error !! 265 stq $5, 40($sp) 208 * If the syscall returns zero or a po !! 266 stq $6, 48($sp) 209 * Set r7 to 0. !! 267 stq $7, 56($sp) 210 * The sigreturn system calls will ski !! 268 stq $8, 64($sp) 211 * adding to register ra. To avoid des !! 269 stq $9, 72($sp) 212 */ !! 270 stq $10, 80($sp) 213 translate_rc_and_ret: !! 271 stq $11, 88($sp) 214 movi r1, 0 !! 272 stq $12, 96($sp) 215 bge r2, zero, 3f !! 273 stq $13, 104($sp) 216 ldw r1, PT_ORIG_R2(sp) !! 274 stq $14, 112($sp) 217 addi r1, r1, 1 !! 275 stq $15, 120($sp) 218 beq r1, zero, 3f !! 276 /* 16-18 PAL-saved */ 219 sub r2, zero, r2 !! 277 stq $19, 152($sp) 220 movi r1, 1 !! 278 stq $20, 160($sp) 221 3: !! 279 stq $21, 168($sp) 222 stw r2, PT_R2(sp) !! 280 stq $22, 176($sp) 223 stw r1, PT_R7(sp) !! 281 stq $23, 184($sp) 224 end_translate_rc_and_ret: !! 282 stq $24, 192($sp) 225 !! 283 stq $25, 200($sp) 226 ret_from_exception: !! 284 stq $26, 208($sp) 227 ldw r1, PT_ESTATUS(sp) !! 285 stq $27, 216($sp) 228 /* if so, skip resched, signals */ !! 286 stq $28, 224($sp) 229 TSTBNZ r1, r1, ESTATUS_EU, Luser_retu !! 287 mov $sp, $19 >> 288 stq $gp, 232($sp) >> 289 .cfi_rel_offset $1, 1*8 >> 290 .cfi_rel_offset $2, 2*8 >> 291 .cfi_rel_offset $3, 3*8 >> 292 .cfi_rel_offset $4, 4*8 >> 293 .cfi_rel_offset $5, 5*8 >> 294 .cfi_rel_offset $6, 6*8 >> 295 .cfi_rel_offset $7, 7*8 >> 296 .cfi_rel_offset $8, 8*8 >> 297 .cfi_rel_offset $9, 9*8 >> 298 .cfi_rel_offset $10, 10*8 >> 299 .cfi_rel_offset $11, 11*8 >> 300 .cfi_rel_offset $12, 12*8 >> 301 .cfi_rel_offset $13, 13*8 >> 302 .cfi_rel_offset $14, 14*8 >> 303 .cfi_rel_offset $15, 15*8 >> 304 .cfi_rel_offset $19, 19*8 >> 305 .cfi_rel_offset $20, 20*8 >> 306 .cfi_rel_offset $21, 21*8 >> 307 .cfi_rel_offset $22, 22*8 >> 308 .cfi_rel_offset $23, 23*8 >> 309 .cfi_rel_offset $24, 24*8 >> 310 .cfi_rel_offset $25, 25*8 >> 311 .cfi_rel_offset $26, 26*8 >> 312 .cfi_rel_offset $27, 27*8 >> 313 .cfi_rel_offset $28, 28*8 >> 314 .cfi_rel_offset $29, 29*8 >> 315 lda $8, 0x3fff >> 316 stq $31, 248($sp) >> 317 bic $sp, $8, $8 >> 318 jsr $26, do_entUna >> 319 ldq $0, 0($sp) >> 320 ldq $1, 8($sp) >> 321 ldq $2, 16($sp) >> 322 ldq $3, 24($sp) >> 323 ldq $4, 32($sp) >> 324 ldq $5, 40($sp) >> 325 ldq $6, 48($sp) >> 326 ldq $7, 56($sp) >> 327 ldq $8, 64($sp) >> 328 ldq $9, 72($sp) >> 329 ldq $10, 80($sp) >> 330 ldq $11, 88($sp) >> 331 ldq $12, 96($sp) >> 332 ldq $13, 104($sp) >> 333 ldq $14, 112($sp) >> 334 ldq $15, 120($sp) >> 335 /* 16-18 PAL-saved */ >> 336 ldq $19, 152($sp) >> 337 ldq $20, 160($sp) >> 338 ldq $21, 168($sp) >> 339 ldq $22, 176($sp) >> 340 ldq $23, 184($sp) >> 341 ldq $24, 192($sp) >> 342 ldq $25, 200($sp) >> 343 ldq $26, 208($sp) >> 344 ldq $27, 216($sp) >> 345 ldq $28, 224($sp) >> 346 ldq $gp, 232($sp) >> 347 lda $sp, 256($sp) >> 348 .cfi_restore $1 >> 349 .cfi_restore $2 >> 350 .cfi_restore $3 >> 351 .cfi_restore $4 >> 352 .cfi_restore $5 >> 353 .cfi_restore $6 >> 354 .cfi_restore $7 >> 355 .cfi_restore $8 >> 356 .cfi_restore $9 >> 357 .cfi_restore $10 >> 358 .cfi_restore $11 >> 359 .cfi_restore $12 >> 360 .cfi_restore $13 >> 361 .cfi_restore $14 >> 362 .cfi_restore $15 >> 363 .cfi_restore $19 >> 364 .cfi_restore $20 >> 365 .cfi_restore $21 >> 366 .cfi_restore $22 >> 367 .cfi_restore $23 >> 368 .cfi_restore $24 >> 369 .cfi_restore $25 >> 370 .cfi_restore $26 >> 371 .cfi_restore $27 >> 372 .cfi_restore $28 >> 373 .cfi_restore $29 >> 374 .cfi_adjust_cfa_offset -256 >> 375 call_pal PAL_rti >> 376 >> 377 .align 4 >> 378 entUnaUser: >> 379 .cfi_restore_state >> 380 ldq $0, 0($sp) /* restore original $0 */ >> 381 lda $sp, 256($sp) /* pop entUna's stack frame */ >> 382 .cfi_restore $0 >> 383 .cfi_adjust_cfa_offset -256 >> 384 SAVE_ALL /* setup normal kernel stack */ >> 385 lda $sp, -56($sp) >> 386 .cfi_adjust_cfa_offset 56 >> 387 stq $9, 0($sp) >> 388 stq $10, 8($sp) >> 389 stq $11, 16($sp) >> 390 stq $12, 24($sp) >> 391 stq $13, 32($sp) >> 392 stq $14, 40($sp) >> 393 stq $15, 48($sp) >> 394 .cfi_rel_offset $9, 0 >> 395 .cfi_rel_offset $10, 8 >> 396 .cfi_rel_offset $11, 16 >> 397 .cfi_rel_offset $12, 24 >> 398 .cfi_rel_offset $13, 32 >> 399 .cfi_rel_offset $14, 40 >> 400 .cfi_rel_offset $15, 48 >> 401 lda $8, 0x3fff >> 402 addq $sp, 56, $19 >> 403 bic $sp, $8, $8 >> 404 jsr $26, do_entUnaUser >> 405 ldq $9, 0($sp) >> 406 ldq $10, 8($sp) >> 407 ldq $11, 16($sp) >> 408 ldq $12, 24($sp) >> 409 ldq $13, 32($sp) >> 410 ldq $14, 40($sp) >> 411 ldq $15, 48($sp) >> 412 lda $sp, 56($sp) >> 413 .cfi_restore $9 >> 414 .cfi_restore $10 >> 415 .cfi_restore $11 >> 416 .cfi_restore $12 >> 417 .cfi_restore $13 >> 418 .cfi_restore $14 >> 419 .cfi_restore $15 >> 420 .cfi_adjust_cfa_offset -56 >> 421 br ret_from_sys_call >> 422 CFI_END_OSF_FRAME entUna >> 423 >> 424 CFI_START_OSF_FRAME entDbg >> 425 SAVE_ALL >> 426 lda $8, 0x3fff >> 427 lda $26, ret_from_sys_call >> 428 bic $sp, $8, $8 >> 429 mov $sp, $16 >> 430 jsr $31, do_entDbg >> 431 CFI_END_OSF_FRAME entDbg >> 432 >> 433 /* >> 434 * The system call entry point is special. Most importantly, it looks >> 435 * like a function call to userspace as far as clobbered registers. We >> 436 * do preserve the argument registers (for syscall restarts) and $26 >> 437 * (for leaf syscall functions). >> 438 * >> 439 * So much for theory. We don't take advantage of this yet. >> 440 * >> 441 * Note that a0-a2 are not saved by PALcode as with the other entry points. >> 442 */ 230 443 >> 444 .align 4 >> 445 .globl entSys >> 446 .type entSys, @function >> 447 .cfi_startproc simple >> 448 .cfi_return_column 64 >> 449 .cfi_def_cfa $sp, 48 >> 450 .cfi_rel_offset 64, 8 >> 451 .cfi_rel_offset $gp, 16 >> 452 entSys: >> 453 SAVE_ALL >> 454 lda $8, 0x3fff >> 455 bic $sp, $8, $8 >> 456 lda $4, NR_SYSCALLS($31) >> 457 stq $16, SP_OFF+24($sp) >> 458 lda $5, sys_call_table >> 459 lda $27, sys_ni_syscall >> 460 cmpult $0, $4, $4 >> 461 ldl $3, TI_FLAGS($8) >> 462 stq $17, SP_OFF+32($sp) >> 463 s8addq $0, $5, $5 >> 464 stq $18, SP_OFF+40($sp) >> 465 .cfi_rel_offset $16, SP_OFF+24 >> 466 .cfi_rel_offset $17, SP_OFF+32 >> 467 .cfi_rel_offset $18, SP_OFF+40 >> 468 #ifdef CONFIG_AUDITSYSCALL >> 469 lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT >> 470 and $3, $6, $3 >> 471 bne $3, strace >> 472 #else >> 473 blbs $3, strace /* check for SYSCALL_TRACE in disguise */ >> 474 #endif >> 475 beq $4, 1f >> 476 ldq $27, 0($5) >> 477 1: jsr $26, ($27), alpha_ni_syscall >> 478 ldgp $gp, 0($26) >> 479 blt $0, $syscall_error /* the call failed */ >> 480 stq $0, 0($sp) >> 481 stq $31, 72($sp) /* a3=0 => no error */ >> 482 >> 483 .align 4 >> 484 .globl ret_from_sys_call >> 485 ret_from_sys_call: >> 486 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ >> 487 ldq $0, SP_OFF($sp) >> 488 and $0, 8, $0 >> 489 beq $0, ret_to_kernel >> 490 ret_to_user: >> 491 /* Make sure need_resched and sigpending don't change between >> 492 sampling and the rti. */ >> 493 lda $16, 7 >> 494 call_pal PAL_swpipl >> 495 ldl $17, TI_FLAGS($8) >> 496 and $17, _TIF_WORK_MASK, $2 >> 497 bne $2, work_pending 231 restore_all: 498 restore_all: 232 rdctl r10, status !! 499 .cfi_remember_state 233 andi r10, r10, %lo(~STATUS_PIE) << 234 wrctl status, r10 << 235 RESTORE_ALL 500 RESTORE_ALL 236 eret !! 501 call_pal PAL_rti 237 502 238 /* If the syscall number was invalid r !! 503 ret_to_kernel: 239 ret_invsyscall: !! 504 .cfi_restore_state 240 movi r2, -ENOSYS !! 505 lda $16, 7 241 br translate_rc_and_ret !! 506 call_pal PAL_swpipl 242 !! 507 br restore_all 243 /* This implements the same as above, << 244 * do_syscall_trace_enter and do_sysca << 245 * syscall in order for utilities like << 246 */ << 247 traced_system_call: << 248 SAVE_SWITCH_STACK << 249 call do_syscall_trace_enter << 250 RESTORE_SWITCH_STACK << 251 << 252 /* Create system call register argumen << 253 arguments on stack are already in p << 254 of pt_regs. */ << 255 ldw r2, PT_R2(sp) << 256 ldw r4, PT_R4(sp) << 257 ldw r5, PT_R5(sp) << 258 ldw r6, PT_R6(sp) << 259 ldw r7, PT_R7(sp) << 260 << 261 /* Fetch the syscall function. */ << 262 movui r1, __NR_syscalls << 263 bgeu r2, r1, traced_invsyscall << 264 slli r1, r2, 2 << 265 movhi r11,%hiadj(sys_call_table) << 266 add r1, r1, r11 << 267 ldw r1, %lo(sys_call_table)(r1) << 268 << 269 callr r1 << 270 << 271 /* If the syscall returns a negative r << 272 * Set r7 to 1 to indicate error, << 273 * Negate r2 to get a positive error << 274 * If the syscall returns zero or a po << 275 * Set r7 to 0. << 276 * The sigreturn system calls will ski << 277 * adding to register ra. To avoid des << 278 */ << 279 translate_rc_and_ret2: << 280 movi r1, 0 << 281 bge r2, zero, 4f << 282 ldw r1, PT_ORIG_R2(sp) << 283 addi r1, r1, 1 << 284 beq r1, zero, 4f << 285 sub r2, zero, r2 << 286 movi r1, 1 << 287 4: << 288 stw r2, PT_R2(sp) << 289 stw r1, PT_R7(sp) << 290 end_translate_rc_and_ret2: << 291 SAVE_SWITCH_STACK << 292 call do_syscall_trace_exit << 293 RESTORE_SWITCH_STACK << 294 br ret_from_exception << 295 << 296 /* If the syscall number was invalid r << 297 traced_invsyscall: << 298 movi r2, -ENOSYS << 299 br translate_rc_and_ret2 << 300 << 301 Luser_return: << 302 GET_THREAD_INFO r11 << 303 ldw r10, TI_FLAGS(r11) << 304 ANDI32 r11, r10, _TIF_WORK_MASK << 305 beq r11, r0, restore_all << 306 BTBZ r1, r10, TIF_NEED_RESCHED, Lsi << 307 << 308 /* Reschedule work */ << 309 call schedule << 310 br ret_from_exception << 311 << 312 Lsignal_return: << 313 ANDI32 r1, r10, _TIF_SIGPENDING | _TI << 314 beq r1, r0, restore_all << 315 mov r4, sp /* pt_ << 316 SAVE_SWITCH_STACK << 317 call do_notify_resume << 318 beq r2, r0, no_work_pending << 319 RESTORE_SWITCH_STACK << 320 /* prepare restart syscall here withou << 321 ldw r2, PT_R2(sp) /* reload sysc << 322 ldw r4, PT_R4(sp) /* reload sysc << 323 ldw r5, PT_R5(sp) << 324 ldw r6, PT_R6(sp) << 325 ldw r7, PT_R7(sp) << 326 ldw r8, PT_R8(sp) << 327 ldw r9, PT_R9(sp) << 328 br local_restart /* restart sys << 329 << 330 no_work_pending: << 331 RESTORE_SWITCH_STACK << 332 br ret_from_exception << 333 << 334 /********************************************* << 335 * Handle external interrupts. << 336 ********************************************* << 337 */ << 338 /* << 339 * This is the generic interrupt handler (for << 340 * sources). It figures out the vector number << 341 * interrupt service routine directly. << 342 */ << 343 external_interrupt: << 344 rdctl r12, ipending << 345 rdctl r9, ienable << 346 and r12, r12, r9 << 347 /* skip if no interrupt is pending */ << 348 beq r12, r0, ret_from_interrupt << 349 508 >> 509 .align 3 >> 510 $syscall_error: 350 /* 511 /* 351 * Process an external hardware interr !! 512 * Some system calls (e.g., ptrace) can return arbitrary >> 513 * values which might normally be mistaken as error numbers. >> 514 * Those functions must zero $0 (v0) directly in the stack >> 515 * frame to indicate that a negative return value wasn't an >> 516 * error number.. 352 */ 517 */ >> 518 ldq $18, 0($sp) /* old syscall nr (zero if success) */ >> 519 beq $18, $ret_success 353 520 354 addi ea, ea, -4 /* re-issue th !! 521 ldq $19, 72($sp) /* .. and this a3 */ 355 stw ea, PT_EA(sp) !! 522 subq $31, $0, $0 /* with error in v0 */ 356 2: movi r4, %lo(-1) /* Start from !! 523 addq $31, 1, $1 /* set a3 for errno return */ 357 highes !! 524 stq $0, 0($sp) 358 /* This is the !! 525 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 359 1: andi r10, r12, 1 /* Isolate bit !! 526 stq $1, 72($sp) /* a3 for return */ 360 srli r12, r12, 1 /* shift count !! 527 br ret_from_sys_call 361 multip !! 528 362 addi r4, r4, 1 !! 529 $ret_success: 363 beq r10, r0, 1b !! 530 stq $0, 0($sp) 364 mov r5, sp /* Setup pt_re !! 531 stq $31, 72($sp) /* a3=0 => no error */ 365 call do_IRQ !! 532 br ret_from_sys_call 366 rdctl r12, ipending /* check again << 367 rdctl r9, ienable /* Isolate pos << 368 and r12, r12, r9 << 369 bne r12, r0, 2b << 370 /* br ret_from_interrupt */ /* fall << 371 << 372 ENTRY(ret_from_interrupt) << 373 ldw r1, PT_ESTATUS(sp) /* che << 374 TSTBNZ r1, r1, ESTATUS_EU, Luser_retu << 375 << 376 #ifdef CONFIG_PREEMPTION << 377 GET_THREAD_INFO r1 << 378 ldw r4, TI_PREEMPT_COUNT(r1) << 379 bne r4, r0, restore_all << 380 ldw r4, TI_FLAGS(r1) << 381 BTBZ r10, r4, TIF_NEED_RESCHED, res << 382 ldw r4, PT_ESTATUS(sp) /* ? I << 383 andi r10, r4, ESTATUS_EPIE << 384 beq r10, r0, restore_all << 385 call preempt_schedule_irq << 386 #endif << 387 br restore_all << 388 533 389 /********************************************* << 390 * A few syscall wrappers << 391 ********************************************* << 392 */ << 393 /* 534 /* 394 * int clone(unsigned long clone_flags, unsign !! 535 * Do all cleanup when returning from all interrupts and system calls. 395 * int __user * parent_tidptr, in !! 536 * 396 * int tls_val) !! 537 * Arguments: 397 */ !! 538 * $8: current. 398 ENTRY(sys_clone) !! 539 * $17: TI_FLAGS. 399 SAVE_SWITCH_STACK !! 540 * $18: The old syscall number, or zero if this is not a return 400 subi sp, sp, 4 /* make space for tl !! 541 * from a syscall that errored and is possibly restartable. 401 stw r8, 0(sp) /* pass tls pointer !! 542 * $19: The old a3 value 402 call nios2_clone !! 543 */ 403 addi sp, sp, 4 << 404 RESTORE_SWITCH_STACK << 405 ret << 406 544 407 ENTRY(sys_rt_sigreturn) !! 545 .align 4 408 SAVE_SWITCH_STACK !! 546 .type work_pending, @function 409 mov r4, sp !! 547 work_pending: 410 call do_rt_sigreturn !! 548 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 411 RESTORE_SWITCH_STACK !! 549 bne $2, $work_notifysig 412 addi ra, ra, (end_translate_rc_and_ << 413 ret << 414 550 415 /********************************************* !! 551 $work_resched: 416 * A few other wrappers and stubs !! 552 /* 417 ********************************************* !! 553 * We can get here only if we returned from syscall without SIGPENDING 418 */ !! 554 * or got through work_notifysig already. Either case means no syscall 419 protection_exception_pte: !! 555 * restarts for us, so let $18 and $19 burn. 420 rdctl r6, pteaddr !! 556 */ 421 slli r6, r6, 10 !! 557 jsr $26, schedule 422 call do_page_fault !! 558 mov 0, $18 423 br ret_from_exception !! 559 br ret_to_user 424 !! 560 425 protection_exception_ba: !! 561 $work_notifysig: 426 rdctl r6, badaddr !! 562 mov $sp, $16 427 call do_page_fault !! 563 DO_SWITCH_STACK 428 br ret_from_exception !! 564 jsr $26, do_work_pending 429 !! 565 UNDO_SWITCH_STACK 430 protection_exception_instr: !! 566 br restore_all 431 call handle_supervisor_instr << 432 br ret_from_exception << 433 << 434 handle_breakpoint: << 435 call breakpoint_c << 436 br ret_from_exception << 437 << 438 #ifdef CONFIG_NIOS2_ALIGNMENT_TRAP << 439 handle_unaligned: << 440 SAVE_SWITCH_STACK << 441 call handle_unaligned_c << 442 RESTORE_SWITCH_STACK << 443 br ret_from_exception << 444 #else << 445 handle_unaligned: << 446 call handle_unaligned_c << 447 br ret_from_exception << 448 #endif << 449 567 450 handle_illegal: !! 568 /* 451 call handle_illegal_c !! 569 * PTRACE syscall handler 452 br ret_from_exception !! 570 */ 453 << 454 handle_diverror: << 455 call handle_diverror_c << 456 br ret_from_exception << 457 << 458 #ifdef CONFIG_KGDB << 459 handle_kgdb_breakpoint: << 460 call kgdb_breakpoint_c << 461 br ret_from_exception << 462 #endif << 463 571 464 handle_trap_1: !! 572 .align 4 465 call handle_trap_1_c !! 573 .type strace, @function 466 br ret_from_exception !! 574 strace: 467 !! 575 /* set up signal stack, call syscall_trace */ 468 handle_trap_2: !! 576 DO_SWITCH_STACK 469 call handle_trap_2_c !! 577 jsr $26, syscall_trace_enter /* returns the syscall number */ 470 br ret_from_exception !! 578 UNDO_SWITCH_STACK 471 !! 579 472 handle_trap_3: !! 580 /* get the arguments back.. */ 473 handle_trap_reserved: !! 581 ldq $16, SP_OFF+24($sp) 474 call handle_trap_3_c !! 582 ldq $17, SP_OFF+32($sp) 475 br ret_from_exception !! 583 ldq $18, SP_OFF+40($sp) >> 584 ldq $19, 72($sp) >> 585 ldq $20, 80($sp) >> 586 ldq $21, 88($sp) >> 587 >> 588 /* get the system call pointer.. */ >> 589 lda $1, NR_SYSCALLS($31) >> 590 lda $2, sys_call_table >> 591 lda $27, alpha_ni_syscall >> 592 cmpult $0, $1, $1 >> 593 s8addq $0, $2, $2 >> 594 beq $1, 1f >> 595 ldq $27, 0($2) >> 596 1: jsr $26, ($27), sys_gettimeofday >> 597 ret_from_straced: >> 598 ldgp $gp, 0($26) >> 599 >> 600 /* check return.. */ >> 601 blt $0, $strace_error /* the call failed */ >> 602 stq $31, 72($sp) /* a3=0 => no error */ >> 603 $strace_success: >> 604 stq $0, 0($sp) /* save return value */ >> 605 >> 606 DO_SWITCH_STACK >> 607 jsr $26, syscall_trace_leave >> 608 UNDO_SWITCH_STACK >> 609 br $31, ret_from_sys_call >> 610 >> 611 .align 3 >> 612 $strace_error: >> 613 ldq $18, 0($sp) /* old syscall nr (zero if success) */ >> 614 beq $18, $strace_success >> 615 ldq $19, 72($sp) /* .. and this a3 */ >> 616 >> 617 subq $31, $0, $0 /* with error in v0 */ >> 618 addq $31, 1, $1 /* set a3 for errno return */ >> 619 stq $0, 0($sp) >> 620 stq $1, 72($sp) /* a3 for return */ >> 621 >> 622 DO_SWITCH_STACK >> 623 mov $18, $9 /* save old syscall number */ >> 624 mov $19, $10 /* save old a3 */ >> 625 jsr $26, syscall_trace_leave >> 626 mov $9, $18 >> 627 mov $10, $19 >> 628 UNDO_SWITCH_STACK >> 629 >> 630 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ >> 631 br ret_from_sys_call >> 632 CFI_END_OSF_FRAME entSys >> 633 >> 634 /* >> 635 * Save and restore the switch stack -- aka the balance of the user context. >> 636 */ 476 637 >> 638 .align 4 >> 639 .type do_switch_stack, @function >> 640 .cfi_startproc simple >> 641 .cfi_return_column 64 >> 642 .cfi_def_cfa $sp, 0 >> 643 .cfi_register 64, $1 >> 644 do_switch_stack: >> 645 lda $sp, -SWITCH_STACK_SIZE($sp) >> 646 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE >> 647 stq $9, 0($sp) >> 648 stq $10, 8($sp) >> 649 stq $11, 16($sp) >> 650 stq $12, 24($sp) >> 651 stq $13, 32($sp) >> 652 stq $14, 40($sp) >> 653 stq $15, 48($sp) >> 654 stq $26, 56($sp) >> 655 stt $f0, 64($sp) >> 656 stt $f1, 72($sp) >> 657 stt $f2, 80($sp) >> 658 stt $f3, 88($sp) >> 659 stt $f4, 96($sp) >> 660 stt $f5, 104($sp) >> 661 stt $f6, 112($sp) >> 662 stt $f7, 120($sp) >> 663 stt $f8, 128($sp) >> 664 stt $f9, 136($sp) >> 665 stt $f10, 144($sp) >> 666 stt $f11, 152($sp) >> 667 stt $f12, 160($sp) >> 668 stt $f13, 168($sp) >> 669 stt $f14, 176($sp) >> 670 stt $f15, 184($sp) >> 671 stt $f16, 192($sp) >> 672 stt $f17, 200($sp) >> 673 stt $f18, 208($sp) >> 674 stt $f19, 216($sp) >> 675 stt $f20, 224($sp) >> 676 stt $f21, 232($sp) >> 677 stt $f22, 240($sp) >> 678 stt $f23, 248($sp) >> 679 stt $f24, 256($sp) >> 680 stt $f25, 264($sp) >> 681 stt $f26, 272($sp) >> 682 stt $f27, 280($sp) >> 683 mf_fpcr $f0 # get fpcr >> 684 stt $f28, 288($sp) >> 685 stt $f29, 296($sp) >> 686 stt $f30, 304($sp) >> 687 stt $f0, 312($sp) # save fpcr in slot of $f31 >> 688 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. >> 689 ret $31, ($1), 1 >> 690 .cfi_endproc >> 691 .size do_switch_stack, .-do_switch_stack >> 692 >> 693 .align 4 >> 694 .type undo_switch_stack, @function >> 695 .cfi_startproc simple >> 696 .cfi_def_cfa $sp, 0 >> 697 .cfi_register 64, $1 >> 698 undo_switch_stack: >> 699 ldq $9, 0($sp) >> 700 ldq $10, 8($sp) >> 701 ldq $11, 16($sp) >> 702 ldq $12, 24($sp) >> 703 ldq $13, 32($sp) >> 704 ldq $14, 40($sp) >> 705 ldq $15, 48($sp) >> 706 ldq $26, 56($sp) >> 707 ldt $f30, 312($sp) # get saved fpcr >> 708 ldt $f0, 64($sp) >> 709 ldt $f1, 72($sp) >> 710 ldt $f2, 80($sp) >> 711 ldt $f3, 88($sp) >> 712 mt_fpcr $f30 # install saved fpcr >> 713 ldt $f4, 96($sp) >> 714 ldt $f5, 104($sp) >> 715 ldt $f6, 112($sp) >> 716 ldt $f7, 120($sp) >> 717 ldt $f8, 128($sp) >> 718 ldt $f9, 136($sp) >> 719 ldt $f10, 144($sp) >> 720 ldt $f11, 152($sp) >> 721 ldt $f12, 160($sp) >> 722 ldt $f13, 168($sp) >> 723 ldt $f14, 176($sp) >> 724 ldt $f15, 184($sp) >> 725 ldt $f16, 192($sp) >> 726 ldt $f17, 200($sp) >> 727 ldt $f18, 208($sp) >> 728 ldt $f19, 216($sp) >> 729 ldt $f20, 224($sp) >> 730 ldt $f21, 232($sp) >> 731 ldt $f22, 240($sp) >> 732 ldt $f23, 248($sp) >> 733 ldt $f24, 256($sp) >> 734 ldt $f25, 264($sp) >> 735 ldt $f26, 272($sp) >> 736 ldt $f27, 280($sp) >> 737 ldt $f28, 288($sp) >> 738 ldt $f29, 296($sp) >> 739 ldt $f30, 304($sp) >> 740 lda $sp, SWITCH_STACK_SIZE($sp) >> 741 ret $31, ($1), 1 >> 742 .cfi_endproc >> 743 .size undo_switch_stack, .-undo_switch_stack >> 744 477 /* 745 /* 478 * Beware - when entering resume, prev (the cu !! 746 * The meat of the context switch code. 479 * in r4, next (the new task) is in r5, don't !! 747 */ 480 * registers. << 481 */ << 482 ENTRY(resume) << 483 << 484 rdctl r7, status << 485 stw r7, TASK_THREAD + THREAD_KPSR( << 486 << 487 andi r7, r7, %lo(~STATUS_PIE) << 488 wrctl status, r7 << 489 << 490 SAVE_SWITCH_STACK << 491 stw sp, TASK_THREAD + THREAD_KSP(r << 492 ldw sp, TASK_THREAD + THREAD_KSP(r << 493 movia r24, _current_thread << 494 GET_THREAD_INFO r1 << 495 stw r1, 0(r24) << 496 RESTORE_SWITCH_STACK << 497 748 498 ldw r7, TASK_THREAD + THREAD_KPSR( !! 749 .align 4 499 wrctl status, r7 !! 750 .globl alpha_switch_to >> 751 .type alpha_switch_to, @function >> 752 .cfi_startproc >> 753 alpha_switch_to: >> 754 DO_SWITCH_STACK >> 755 call_pal PAL_swpctx >> 756 lda $8, 0x3fff >> 757 UNDO_SWITCH_STACK >> 758 bic $sp, $8, $8 >> 759 mov $17, $0 500 ret 760 ret 501 !! 761 .cfi_endproc 502 ENTRY(ret_from_fork) !! 762 .size alpha_switch_to, .-alpha_switch_to 503 call schedule_tail << 504 br ret_from_exception << 505 << 506 ENTRY(ret_from_kernel_thread) << 507 call schedule_tail << 508 mov r4,r17 /* arg */ << 509 callr r16 /* function */ << 510 br ret_from_exception << 511 763 512 /* 764 /* 513 * Kernel user helpers. !! 765 * New processes begin life here. 514 * << 515 * Each segment is 64-byte aligned and will be << 516 * New segments (if ever needed) must be added << 517 * This mechanism should be used only for thin << 518 * justified, and not be abused freely. << 519 * << 520 */ 766 */ 521 767 522 /* Filling pads with undefined instructions. !! 768 .globl ret_from_fork 523 .macro kuser_pad sym size !! 769 .align 4 524 .if ((. - \sym) & 3) !! 770 .ent ret_from_fork 525 .rept (4 - (. - \sym) & 3) !! 771 ret_from_fork: 526 .byte 0 !! 772 lda $26, ret_from_sys_call 527 .endr !! 773 mov $17, $16 528 .endif !! 774 jmp $31, schedule_tail 529 .rept ((\size - (. - \sym)) / 4) !! 775 .end ret_from_fork 530 .word 0xdeadbeef << 531 .endr << 532 .endm << 533 776 534 .align 6 !! 777 /* 535 .globl __kuser_helper_start !! 778 * ... and new kernel threads - here 536 __kuser_helper_start: !! 779 */ >> 780 .align 4 >> 781 .globl ret_from_kernel_thread >> 782 .ent ret_from_kernel_thread >> 783 ret_from_kernel_thread: >> 784 mov $17, $16 >> 785 jsr $26, schedule_tail >> 786 mov $9, $27 >> 787 mov $10, $16 >> 788 jsr $26, ($9) >> 789 mov $31, $19 /* to disable syscall restarts */ >> 790 br $31, ret_to_user >> 791 .end ret_from_kernel_thread 537 792 538 __kuser_helper_version: !! 793 539 .word ((__kuser_helper_end - __kuser !! 794 /* >> 795 * Special system calls. Most of these are special in that they either >> 796 * have to play switch_stack games or in some way use the pt_regs struct. >> 797 */ 540 798 541 __kuser_cmpxchg: !! 799 .macro fork_like name 542 /* !! 800 .align 4 543 * r4 pointer to exchange variable !! 801 .globl alpha_\name 544 * r5 old value !! 802 .ent alpha_\name 545 * r6 new value !! 803 alpha_\name: 546 */ !! 804 .prologue 0 547 cmpxchg_ldw: !! 805 bsr $1, do_switch_stack 548 ldw r2, 0(r4) !! 806 jsr $26, sys_\name 549 sub r2, r2, r5 !! 807 ldq $26, 56($sp) 550 bne r2, zero, cmpxchg_ret !! 808 lda $sp, SWITCH_STACK_SIZE($sp) 551 << 552 /* We had a match, store the new value << 553 cmpxchg_stw: << 554 stw r6, 0(r4) << 555 cmpxchg_ret: << 556 ret 809 ret >> 810 .end alpha_\name >> 811 .endm 557 812 558 kuser_pad __kuser_cmpxchg, 64 !! 813 fork_like fork 559 !! 814 fork_like vfork 560 .globl __kuser_sigtramp !! 815 fork_like clone 561 __kuser_sigtramp: !! 816 562 movi r2, __NR_rt_sigreturn !! 817 .align 4 563 trap !! 818 .globl sys_sigreturn 564 !! 819 .ent sys_sigreturn 565 kuser_pad __kuser_sigtramp, 64 !! 820 sys_sigreturn: 566 !! 821 .prologue 0 567 .globl __kuser_helper_end !! 822 lda $9, ret_from_straced 568 __kuser_helper_end: !! 823 cmpult $26, $9, $9 >> 824 lda $sp, -SWITCH_STACK_SIZE($sp) >> 825 jsr $26, do_sigreturn >> 826 bne $9, 1f >> 827 jsr $26, syscall_trace_leave >> 828 1: br $1, undo_switch_stack >> 829 br ret_from_sys_call >> 830 .end sys_sigreturn >> 831 >> 832 .align 4 >> 833 .globl sys_rt_sigreturn >> 834 .ent sys_rt_sigreturn >> 835 sys_rt_sigreturn: >> 836 .prologue 0 >> 837 lda $9, ret_from_straced >> 838 cmpult $26, $9, $9 >> 839 lda $sp, -SWITCH_STACK_SIZE($sp) >> 840 jsr $26, do_rt_sigreturn >> 841 bne $9, 1f >> 842 jsr $26, syscall_trace_leave >> 843 1: br $1, undo_switch_stack >> 844 br ret_from_sys_call >> 845 .end sys_rt_sigreturn >> 846 >> 847 .align 4 >> 848 .globl alpha_ni_syscall >> 849 .ent alpha_ni_syscall >> 850 alpha_ni_syscall: >> 851 .prologue 0 >> 852 /* Special because it also implements overflow handling via >> 853 syscall number 0. And if you recall, zero is a special >> 854 trigger for "not an error". Store large non-zero there. */ >> 855 lda $0, -ENOSYS >> 856 unop >> 857 stq $0, 0($sp) >> 858 ret >> 859 .end alpha_ni_syscall
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.