1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * arch/alpha/kernel/entry.S 3 * arch/alpha/kernel/entry.S 4 * 4 * 5 * Kernel entry-points. 5 * Kernel entry-points. 6 */ 6 */ 7 7 8 #include <asm/asm-offsets.h> 8 #include <asm/asm-offsets.h> 9 #include <asm/thread_info.h> 9 #include <asm/thread_info.h> 10 #include <asm/pal.h> 10 #include <asm/pal.h> 11 #include <asm/errno.h> 11 #include <asm/errno.h> 12 #include <asm/unistd.h> 12 #include <asm/unistd.h> 13 13 14 .text 14 .text 15 .set noat 15 .set noat 16 .cfi_sections .debug_frame 16 .cfi_sections .debug_frame 17 17 18 /* Stack offsets. */ 18 /* Stack offsets. */ 19 #define SP_OFF 184 19 #define SP_OFF 184 20 #define SWITCH_STACK_SIZE 64 !! 20 #define SWITCH_STACK_SIZE 320 21 21 22 .macro CFI_START_OSF_FRAME func 22 .macro CFI_START_OSF_FRAME func 23 .align 4 23 .align 4 24 .globl \func 24 .globl \func 25 .type \func,@function 25 .type \func,@function 26 \func: 26 \func: 27 .cfi_startproc simple 27 .cfi_startproc simple 28 .cfi_return_column 64 28 .cfi_return_column 64 29 .cfi_def_cfa $sp, 48 29 .cfi_def_cfa $sp, 48 30 .cfi_rel_offset 64, 8 30 .cfi_rel_offset 64, 8 31 .cfi_rel_offset $gp, 16 31 .cfi_rel_offset $gp, 16 32 .cfi_rel_offset $16, 24 32 .cfi_rel_offset $16, 24 33 .cfi_rel_offset $17, 32 33 .cfi_rel_offset $17, 32 34 .cfi_rel_offset $18, 40 34 .cfi_rel_offset $18, 40 35 .endm 35 .endm 36 36 37 .macro CFI_END_OSF_FRAME func 37 .macro CFI_END_OSF_FRAME func 38 .cfi_endproc 38 .cfi_endproc 39 .size \func, . - \func 39 .size \func, . - \func 40 .endm 40 .endm 41 41 42 /* 42 /* 43 * This defines the normal kernel pt-regs layo 43 * This defines the normal kernel pt-regs layout. 44 * 44 * 45 * regs 9-15 preserved by C code 45 * regs 9-15 preserved by C code 46 * regs 16-18 saved by PAL-code 46 * regs 16-18 saved by PAL-code 47 * regs 29-30 saved and set up by PAL-code 47 * regs 29-30 saved and set up by PAL-code 48 * JRP - Save regs 16-18 in a special area of 48 * JRP - Save regs 16-18 in a special area of the stack, so that 49 * the palcode-provided values are available t 49 * the palcode-provided values are available to the signal handler. 50 */ 50 */ 51 51 52 .macro SAVE_ALL 52 .macro SAVE_ALL 53 subq $sp, SP_OFF, $sp 53 subq $sp, SP_OFF, $sp 54 .cfi_adjust_cfa_offset SP_OFF 54 .cfi_adjust_cfa_offset SP_OFF 55 stq $0, 0($sp) 55 stq $0, 0($sp) 56 stq $1, 8($sp) 56 stq $1, 8($sp) 57 stq $2, 16($sp) 57 stq $2, 16($sp) 58 stq $3, 24($sp) 58 stq $3, 24($sp) 59 stq $4, 32($sp) 59 stq $4, 32($sp) 60 stq $28, 144($sp) 60 stq $28, 144($sp) 61 .cfi_rel_offset $0, 0 61 .cfi_rel_offset $0, 0 62 .cfi_rel_offset $1, 8 62 .cfi_rel_offset $1, 8 63 .cfi_rel_offset $2, 16 63 .cfi_rel_offset $2, 16 64 .cfi_rel_offset $3, 24 64 .cfi_rel_offset $3, 24 65 .cfi_rel_offset $4, 32 65 .cfi_rel_offset $4, 32 66 .cfi_rel_offset $28, 144 66 .cfi_rel_offset $28, 144 67 lda $2, alpha_mv 67 lda $2, alpha_mv 68 stq $5, 40($sp) 68 stq $5, 40($sp) 69 stq $6, 48($sp) 69 stq $6, 48($sp) 70 stq $7, 56($sp) 70 stq $7, 56($sp) 71 stq $8, 64($sp) 71 stq $8, 64($sp) 72 stq $19, 72($sp) 72 stq $19, 72($sp) 73 stq $20, 80($sp) 73 stq $20, 80($sp) 74 stq $21, 88($sp) 74 stq $21, 88($sp) 75 ldq $2, HAE_CACHE($2) 75 ldq $2, HAE_CACHE($2) 76 stq $22, 96($sp) 76 stq $22, 96($sp) 77 stq $23, 104($sp) 77 stq $23, 104($sp) 78 stq $24, 112($sp) 78 stq $24, 112($sp) 79 stq $25, 120($sp) 79 stq $25, 120($sp) 80 stq $26, 128($sp) 80 stq $26, 128($sp) 81 stq $27, 136($sp) 81 stq $27, 136($sp) 82 stq $2, 152($sp) 82 stq $2, 152($sp) 83 stq $16, 160($sp) 83 stq $16, 160($sp) 84 stq $17, 168($sp) 84 stq $17, 168($sp) 85 stq $18, 176($sp) 85 stq $18, 176($sp) 86 .cfi_rel_offset $5, 40 86 .cfi_rel_offset $5, 40 87 .cfi_rel_offset $6, 48 87 .cfi_rel_offset $6, 48 88 .cfi_rel_offset $7, 56 88 .cfi_rel_offset $7, 56 89 .cfi_rel_offset $8, 64 89 .cfi_rel_offset $8, 64 90 .cfi_rel_offset $19, 72 90 .cfi_rel_offset $19, 72 91 .cfi_rel_offset $20, 80 91 .cfi_rel_offset $20, 80 92 .cfi_rel_offset $21, 88 92 .cfi_rel_offset $21, 88 93 .cfi_rel_offset $22, 96 93 .cfi_rel_offset $22, 96 94 .cfi_rel_offset $23, 104 94 .cfi_rel_offset $23, 104 95 .cfi_rel_offset $24, 112 95 .cfi_rel_offset $24, 112 96 .cfi_rel_offset $25, 120 96 .cfi_rel_offset $25, 120 97 .cfi_rel_offset $26, 128 97 .cfi_rel_offset $26, 128 98 .cfi_rel_offset $27, 136 98 .cfi_rel_offset $27, 136 99 .endm 99 .endm 100 100 101 .macro RESTORE_ALL 101 .macro RESTORE_ALL 102 lda $19, alpha_mv 102 lda $19, alpha_mv 103 ldq $0, 0($sp) 103 ldq $0, 0($sp) 104 ldq $1, 8($sp) 104 ldq $1, 8($sp) 105 ldq $2, 16($sp) 105 ldq $2, 16($sp) 106 ldq $3, 24($sp) 106 ldq $3, 24($sp) 107 ldq $21, 152($sp) 107 ldq $21, 152($sp) 108 ldq $20, HAE_CACHE($19) 108 ldq $20, HAE_CACHE($19) 109 ldq $4, 32($sp) 109 ldq $4, 32($sp) 110 ldq $5, 40($sp) 110 ldq $5, 40($sp) 111 ldq $6, 48($sp) 111 ldq $6, 48($sp) 112 ldq $7, 56($sp) 112 ldq $7, 56($sp) 113 subq $20, $21, $20 113 subq $20, $21, $20 114 ldq $8, 64($sp) 114 ldq $8, 64($sp) 115 beq $20, 99f 115 beq $20, 99f 116 ldq $20, HAE_REG($19) 116 ldq $20, HAE_REG($19) 117 stq $21, HAE_CACHE($19) 117 stq $21, HAE_CACHE($19) 118 stq $21, 0($20) 118 stq $21, 0($20) 119 99: ldq $19, 72($sp) 119 99: ldq $19, 72($sp) 120 ldq $20, 80($sp) 120 ldq $20, 80($sp) 121 ldq $21, 88($sp) 121 ldq $21, 88($sp) 122 ldq $22, 96($sp) 122 ldq $22, 96($sp) 123 ldq $23, 104($sp) 123 ldq $23, 104($sp) 124 ldq $24, 112($sp) 124 ldq $24, 112($sp) 125 ldq $25, 120($sp) 125 ldq $25, 120($sp) 126 ldq $26, 128($sp) 126 ldq $26, 128($sp) 127 ldq $27, 136($sp) 127 ldq $27, 136($sp) 128 ldq $28, 144($sp) 128 ldq $28, 144($sp) 129 addq $sp, SP_OFF, $sp 129 addq $sp, SP_OFF, $sp 130 .cfi_restore $0 130 .cfi_restore $0 131 .cfi_restore $1 131 .cfi_restore $1 132 .cfi_restore $2 132 .cfi_restore $2 133 .cfi_restore $3 133 .cfi_restore $3 134 .cfi_restore $4 134 .cfi_restore $4 135 .cfi_restore $5 135 .cfi_restore $5 136 .cfi_restore $6 136 .cfi_restore $6 137 .cfi_restore $7 137 .cfi_restore $7 138 .cfi_restore $8 138 .cfi_restore $8 139 .cfi_restore $19 139 .cfi_restore $19 140 .cfi_restore $20 140 .cfi_restore $20 141 .cfi_restore $21 141 .cfi_restore $21 142 .cfi_restore $22 142 .cfi_restore $22 143 .cfi_restore $23 143 .cfi_restore $23 144 .cfi_restore $24 144 .cfi_restore $24 145 .cfi_restore $25 145 .cfi_restore $25 146 .cfi_restore $26 146 .cfi_restore $26 147 .cfi_restore $27 147 .cfi_restore $27 148 .cfi_restore $28 148 .cfi_restore $28 149 .cfi_adjust_cfa_offset -SP_OFF 149 .cfi_adjust_cfa_offset -SP_OFF 150 .endm 150 .endm 151 151 152 .macro DO_SWITCH_STACK 152 .macro DO_SWITCH_STACK 153 bsr $1, do_switch_stack 153 bsr $1, do_switch_stack 154 .cfi_adjust_cfa_offset SWITCH_STACK_S 154 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE 155 .cfi_rel_offset $9, 0 155 .cfi_rel_offset $9, 0 156 .cfi_rel_offset $10, 8 156 .cfi_rel_offset $10, 8 157 .cfi_rel_offset $11, 16 157 .cfi_rel_offset $11, 16 158 .cfi_rel_offset $12, 24 158 .cfi_rel_offset $12, 24 159 .cfi_rel_offset $13, 32 159 .cfi_rel_offset $13, 32 160 .cfi_rel_offset $14, 40 160 .cfi_rel_offset $14, 40 161 .cfi_rel_offset $15, 48 161 .cfi_rel_offset $15, 48 >> 162 /* We don't really care about the FP registers for debugging. */ 162 .endm 163 .endm 163 164 164 .macro UNDO_SWITCH_STACK 165 .macro UNDO_SWITCH_STACK 165 bsr $1, undo_switch_stack 166 bsr $1, undo_switch_stack 166 .cfi_restore $9 167 .cfi_restore $9 167 .cfi_restore $10 168 .cfi_restore $10 168 .cfi_restore $11 169 .cfi_restore $11 169 .cfi_restore $12 170 .cfi_restore $12 170 .cfi_restore $13 171 .cfi_restore $13 171 .cfi_restore $14 172 .cfi_restore $14 172 .cfi_restore $15 173 .cfi_restore $15 173 .cfi_adjust_cfa_offset -SWITCH_STACK_ 174 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE 174 .endm 175 .endm 175 176 176 /* 177 /* 177 * Non-syscall kernel entry points. 178 * Non-syscall kernel entry points. 178 */ 179 */ 179 180 180 CFI_START_OSF_FRAME entInt 181 CFI_START_OSF_FRAME entInt 181 SAVE_ALL 182 SAVE_ALL 182 lda $8, 0x3fff 183 lda $8, 0x3fff 183 lda $26, ret_from_sys_call 184 lda $26, ret_from_sys_call 184 bic $sp, $8, $8 185 bic $sp, $8, $8 185 mov $sp, $19 186 mov $sp, $19 186 jsr $31, do_entInt 187 jsr $31, do_entInt 187 CFI_END_OSF_FRAME entInt 188 CFI_END_OSF_FRAME entInt 188 189 189 CFI_START_OSF_FRAME entArith 190 CFI_START_OSF_FRAME entArith 190 SAVE_ALL 191 SAVE_ALL 191 lda $8, 0x3fff 192 lda $8, 0x3fff 192 lda $26, ret_from_sys_call 193 lda $26, ret_from_sys_call 193 bic $sp, $8, $8 194 bic $sp, $8, $8 194 mov $sp, $18 195 mov $sp, $18 195 jsr $31, do_entArith 196 jsr $31, do_entArith 196 CFI_END_OSF_FRAME entArith 197 CFI_END_OSF_FRAME entArith 197 198 198 CFI_START_OSF_FRAME entMM 199 CFI_START_OSF_FRAME entMM 199 SAVE_ALL 200 SAVE_ALL 200 /* save $9 - $15 so the inline exception code 201 /* save $9 - $15 so the inline exception code can manipulate them. */ 201 subq $sp, 56, $sp 202 subq $sp, 56, $sp 202 .cfi_adjust_cfa_offset 56 203 .cfi_adjust_cfa_offset 56 203 stq $9, 0($sp) 204 stq $9, 0($sp) 204 stq $10, 8($sp) 205 stq $10, 8($sp) 205 stq $11, 16($sp) 206 stq $11, 16($sp) 206 stq $12, 24($sp) 207 stq $12, 24($sp) 207 stq $13, 32($sp) 208 stq $13, 32($sp) 208 stq $14, 40($sp) 209 stq $14, 40($sp) 209 stq $15, 48($sp) 210 stq $15, 48($sp) 210 .cfi_rel_offset $9, 0 211 .cfi_rel_offset $9, 0 211 .cfi_rel_offset $10, 8 212 .cfi_rel_offset $10, 8 212 .cfi_rel_offset $11, 16 213 .cfi_rel_offset $11, 16 213 .cfi_rel_offset $12, 24 214 .cfi_rel_offset $12, 24 214 .cfi_rel_offset $13, 32 215 .cfi_rel_offset $13, 32 215 .cfi_rel_offset $14, 40 216 .cfi_rel_offset $14, 40 216 .cfi_rel_offset $15, 48 217 .cfi_rel_offset $15, 48 217 addq $sp, 56, $19 218 addq $sp, 56, $19 218 /* handle the fault */ 219 /* handle the fault */ 219 lda $8, 0x3fff 220 lda $8, 0x3fff 220 bic $sp, $8, $8 221 bic $sp, $8, $8 221 jsr $26, do_page_fault 222 jsr $26, do_page_fault 222 /* reload the registers after the exception co 223 /* reload the registers after the exception code played. */ 223 ldq $9, 0($sp) 224 ldq $9, 0($sp) 224 ldq $10, 8($sp) 225 ldq $10, 8($sp) 225 ldq $11, 16($sp) 226 ldq $11, 16($sp) 226 ldq $12, 24($sp) 227 ldq $12, 24($sp) 227 ldq $13, 32($sp) 228 ldq $13, 32($sp) 228 ldq $14, 40($sp) 229 ldq $14, 40($sp) 229 ldq $15, 48($sp) 230 ldq $15, 48($sp) 230 addq $sp, 56, $sp 231 addq $sp, 56, $sp 231 .cfi_restore $9 232 .cfi_restore $9 232 .cfi_restore $10 233 .cfi_restore $10 233 .cfi_restore $11 234 .cfi_restore $11 234 .cfi_restore $12 235 .cfi_restore $12 235 .cfi_restore $13 236 .cfi_restore $13 236 .cfi_restore $14 237 .cfi_restore $14 237 .cfi_restore $15 238 .cfi_restore $15 238 .cfi_adjust_cfa_offset -56 239 .cfi_adjust_cfa_offset -56 239 /* finish up the syscall as normal. */ 240 /* finish up the syscall as normal. */ 240 br ret_from_sys_call 241 br ret_from_sys_call 241 CFI_END_OSF_FRAME entMM 242 CFI_END_OSF_FRAME entMM 242 243 243 CFI_START_OSF_FRAME entIF 244 CFI_START_OSF_FRAME entIF 244 SAVE_ALL 245 SAVE_ALL 245 lda $8, 0x3fff 246 lda $8, 0x3fff 246 lda $26, ret_from_sys_call 247 lda $26, ret_from_sys_call 247 bic $sp, $8, $8 248 bic $sp, $8, $8 248 mov $sp, $17 249 mov $sp, $17 249 jsr $31, do_entIF 250 jsr $31, do_entIF 250 CFI_END_OSF_FRAME entIF 251 CFI_END_OSF_FRAME entIF 251 252 252 CFI_START_OSF_FRAME entUna 253 CFI_START_OSF_FRAME entUna 253 lda $sp, -256($sp) 254 lda $sp, -256($sp) 254 .cfi_adjust_cfa_offset 256 255 .cfi_adjust_cfa_offset 256 255 stq $0, 0($sp) 256 stq $0, 0($sp) 256 .cfi_rel_offset $0, 0 257 .cfi_rel_offset $0, 0 257 .cfi_remember_state 258 .cfi_remember_state 258 ldq $0, 256($sp) /* get PS */ 259 ldq $0, 256($sp) /* get PS */ 259 stq $1, 8($sp) 260 stq $1, 8($sp) 260 stq $2, 16($sp) 261 stq $2, 16($sp) 261 stq $3, 24($sp) 262 stq $3, 24($sp) 262 and $0, 8, $0 /* use 263 and $0, 8, $0 /* user mode? */ 263 stq $4, 32($sp) 264 stq $4, 32($sp) 264 bne $0, entUnaUser /* yup -> do u 265 bne $0, entUnaUser /* yup -> do user-level unaligned fault */ 265 stq $5, 40($sp) 266 stq $5, 40($sp) 266 stq $6, 48($sp) 267 stq $6, 48($sp) 267 stq $7, 56($sp) 268 stq $7, 56($sp) 268 stq $8, 64($sp) 269 stq $8, 64($sp) 269 stq $9, 72($sp) 270 stq $9, 72($sp) 270 stq $10, 80($sp) 271 stq $10, 80($sp) 271 stq $11, 88($sp) 272 stq $11, 88($sp) 272 stq $12, 96($sp) 273 stq $12, 96($sp) 273 stq $13, 104($sp) 274 stq $13, 104($sp) 274 stq $14, 112($sp) 275 stq $14, 112($sp) 275 stq $15, 120($sp) 276 stq $15, 120($sp) 276 /* 16-18 PAL-saved */ 277 /* 16-18 PAL-saved */ 277 stq $19, 152($sp) 278 stq $19, 152($sp) 278 stq $20, 160($sp) 279 stq $20, 160($sp) 279 stq $21, 168($sp) 280 stq $21, 168($sp) 280 stq $22, 176($sp) 281 stq $22, 176($sp) 281 stq $23, 184($sp) 282 stq $23, 184($sp) 282 stq $24, 192($sp) 283 stq $24, 192($sp) 283 stq $25, 200($sp) 284 stq $25, 200($sp) 284 stq $26, 208($sp) 285 stq $26, 208($sp) 285 stq $27, 216($sp) 286 stq $27, 216($sp) 286 stq $28, 224($sp) 287 stq $28, 224($sp) 287 mov $sp, $19 288 mov $sp, $19 288 stq $gp, 232($sp) 289 stq $gp, 232($sp) 289 .cfi_rel_offset $1, 1*8 290 .cfi_rel_offset $1, 1*8 290 .cfi_rel_offset $2, 2*8 291 .cfi_rel_offset $2, 2*8 291 .cfi_rel_offset $3, 3*8 292 .cfi_rel_offset $3, 3*8 292 .cfi_rel_offset $4, 4*8 293 .cfi_rel_offset $4, 4*8 293 .cfi_rel_offset $5, 5*8 294 .cfi_rel_offset $5, 5*8 294 .cfi_rel_offset $6, 6*8 295 .cfi_rel_offset $6, 6*8 295 .cfi_rel_offset $7, 7*8 296 .cfi_rel_offset $7, 7*8 296 .cfi_rel_offset $8, 8*8 297 .cfi_rel_offset $8, 8*8 297 .cfi_rel_offset $9, 9*8 298 .cfi_rel_offset $9, 9*8 298 .cfi_rel_offset $10, 10*8 299 .cfi_rel_offset $10, 10*8 299 .cfi_rel_offset $11, 11*8 300 .cfi_rel_offset $11, 11*8 300 .cfi_rel_offset $12, 12*8 301 .cfi_rel_offset $12, 12*8 301 .cfi_rel_offset $13, 13*8 302 .cfi_rel_offset $13, 13*8 302 .cfi_rel_offset $14, 14*8 303 .cfi_rel_offset $14, 14*8 303 .cfi_rel_offset $15, 15*8 304 .cfi_rel_offset $15, 15*8 304 .cfi_rel_offset $19, 19*8 305 .cfi_rel_offset $19, 19*8 305 .cfi_rel_offset $20, 20*8 306 .cfi_rel_offset $20, 20*8 306 .cfi_rel_offset $21, 21*8 307 .cfi_rel_offset $21, 21*8 307 .cfi_rel_offset $22, 22*8 308 .cfi_rel_offset $22, 22*8 308 .cfi_rel_offset $23, 23*8 309 .cfi_rel_offset $23, 23*8 309 .cfi_rel_offset $24, 24*8 310 .cfi_rel_offset $24, 24*8 310 .cfi_rel_offset $25, 25*8 311 .cfi_rel_offset $25, 25*8 311 .cfi_rel_offset $26, 26*8 312 .cfi_rel_offset $26, 26*8 312 .cfi_rel_offset $27, 27*8 313 .cfi_rel_offset $27, 27*8 313 .cfi_rel_offset $28, 28*8 314 .cfi_rel_offset $28, 28*8 314 .cfi_rel_offset $29, 29*8 315 .cfi_rel_offset $29, 29*8 315 lda $8, 0x3fff 316 lda $8, 0x3fff 316 stq $31, 248($sp) 317 stq $31, 248($sp) 317 bic $sp, $8, $8 318 bic $sp, $8, $8 318 jsr $26, do_entUna 319 jsr $26, do_entUna 319 ldq $0, 0($sp) 320 ldq $0, 0($sp) 320 ldq $1, 8($sp) 321 ldq $1, 8($sp) 321 ldq $2, 16($sp) 322 ldq $2, 16($sp) 322 ldq $3, 24($sp) 323 ldq $3, 24($sp) 323 ldq $4, 32($sp) 324 ldq $4, 32($sp) 324 ldq $5, 40($sp) 325 ldq $5, 40($sp) 325 ldq $6, 48($sp) 326 ldq $6, 48($sp) 326 ldq $7, 56($sp) 327 ldq $7, 56($sp) 327 ldq $8, 64($sp) 328 ldq $8, 64($sp) 328 ldq $9, 72($sp) 329 ldq $9, 72($sp) 329 ldq $10, 80($sp) 330 ldq $10, 80($sp) 330 ldq $11, 88($sp) 331 ldq $11, 88($sp) 331 ldq $12, 96($sp) 332 ldq $12, 96($sp) 332 ldq $13, 104($sp) 333 ldq $13, 104($sp) 333 ldq $14, 112($sp) 334 ldq $14, 112($sp) 334 ldq $15, 120($sp) 335 ldq $15, 120($sp) 335 /* 16-18 PAL-saved */ 336 /* 16-18 PAL-saved */ 336 ldq $19, 152($sp) 337 ldq $19, 152($sp) 337 ldq $20, 160($sp) 338 ldq $20, 160($sp) 338 ldq $21, 168($sp) 339 ldq $21, 168($sp) 339 ldq $22, 176($sp) 340 ldq $22, 176($sp) 340 ldq $23, 184($sp) 341 ldq $23, 184($sp) 341 ldq $24, 192($sp) 342 ldq $24, 192($sp) 342 ldq $25, 200($sp) 343 ldq $25, 200($sp) 343 ldq $26, 208($sp) 344 ldq $26, 208($sp) 344 ldq $27, 216($sp) 345 ldq $27, 216($sp) 345 ldq $28, 224($sp) 346 ldq $28, 224($sp) 346 ldq $gp, 232($sp) 347 ldq $gp, 232($sp) 347 lda $sp, 256($sp) 348 lda $sp, 256($sp) 348 .cfi_restore $1 349 .cfi_restore $1 349 .cfi_restore $2 350 .cfi_restore $2 350 .cfi_restore $3 351 .cfi_restore $3 351 .cfi_restore $4 352 .cfi_restore $4 352 .cfi_restore $5 353 .cfi_restore $5 353 .cfi_restore $6 354 .cfi_restore $6 354 .cfi_restore $7 355 .cfi_restore $7 355 .cfi_restore $8 356 .cfi_restore $8 356 .cfi_restore $9 357 .cfi_restore $9 357 .cfi_restore $10 358 .cfi_restore $10 358 .cfi_restore $11 359 .cfi_restore $11 359 .cfi_restore $12 360 .cfi_restore $12 360 .cfi_restore $13 361 .cfi_restore $13 361 .cfi_restore $14 362 .cfi_restore $14 362 .cfi_restore $15 363 .cfi_restore $15 363 .cfi_restore $19 364 .cfi_restore $19 364 .cfi_restore $20 365 .cfi_restore $20 365 .cfi_restore $21 366 .cfi_restore $21 366 .cfi_restore $22 367 .cfi_restore $22 367 .cfi_restore $23 368 .cfi_restore $23 368 .cfi_restore $24 369 .cfi_restore $24 369 .cfi_restore $25 370 .cfi_restore $25 370 .cfi_restore $26 371 .cfi_restore $26 371 .cfi_restore $27 372 .cfi_restore $27 372 .cfi_restore $28 373 .cfi_restore $28 373 .cfi_restore $29 374 .cfi_restore $29 374 .cfi_adjust_cfa_offset -256 375 .cfi_adjust_cfa_offset -256 375 call_pal PAL_rti 376 call_pal PAL_rti 376 377 377 .align 4 378 .align 4 378 entUnaUser: 379 entUnaUser: 379 .cfi_restore_state 380 .cfi_restore_state 380 ldq $0, 0($sp) /* restore ori 381 ldq $0, 0($sp) /* restore original $0 */ 381 lda $sp, 256($sp) /* pop entUna' 382 lda $sp, 256($sp) /* pop entUna's stack frame */ 382 .cfi_restore $0 383 .cfi_restore $0 383 .cfi_adjust_cfa_offset -256 384 .cfi_adjust_cfa_offset -256 384 SAVE_ALL /* setup norma 385 SAVE_ALL /* setup normal kernel stack */ 385 lda $sp, -56($sp) 386 lda $sp, -56($sp) 386 .cfi_adjust_cfa_offset 56 387 .cfi_adjust_cfa_offset 56 387 stq $9, 0($sp) 388 stq $9, 0($sp) 388 stq $10, 8($sp) 389 stq $10, 8($sp) 389 stq $11, 16($sp) 390 stq $11, 16($sp) 390 stq $12, 24($sp) 391 stq $12, 24($sp) 391 stq $13, 32($sp) 392 stq $13, 32($sp) 392 stq $14, 40($sp) 393 stq $14, 40($sp) 393 stq $15, 48($sp) 394 stq $15, 48($sp) 394 .cfi_rel_offset $9, 0 395 .cfi_rel_offset $9, 0 395 .cfi_rel_offset $10, 8 396 .cfi_rel_offset $10, 8 396 .cfi_rel_offset $11, 16 397 .cfi_rel_offset $11, 16 397 .cfi_rel_offset $12, 24 398 .cfi_rel_offset $12, 24 398 .cfi_rel_offset $13, 32 399 .cfi_rel_offset $13, 32 399 .cfi_rel_offset $14, 40 400 .cfi_rel_offset $14, 40 400 .cfi_rel_offset $15, 48 401 .cfi_rel_offset $15, 48 401 lda $8, 0x3fff 402 lda $8, 0x3fff 402 addq $sp, 56, $19 403 addq $sp, 56, $19 403 bic $sp, $8, $8 404 bic $sp, $8, $8 404 jsr $26, do_entUnaUser 405 jsr $26, do_entUnaUser 405 ldq $9, 0($sp) 406 ldq $9, 0($sp) 406 ldq $10, 8($sp) 407 ldq $10, 8($sp) 407 ldq $11, 16($sp) 408 ldq $11, 16($sp) 408 ldq $12, 24($sp) 409 ldq $12, 24($sp) 409 ldq $13, 32($sp) 410 ldq $13, 32($sp) 410 ldq $14, 40($sp) 411 ldq $14, 40($sp) 411 ldq $15, 48($sp) 412 ldq $15, 48($sp) 412 lda $sp, 56($sp) 413 lda $sp, 56($sp) 413 .cfi_restore $9 414 .cfi_restore $9 414 .cfi_restore $10 415 .cfi_restore $10 415 .cfi_restore $11 416 .cfi_restore $11 416 .cfi_restore $12 417 .cfi_restore $12 417 .cfi_restore $13 418 .cfi_restore $13 418 .cfi_restore $14 419 .cfi_restore $14 419 .cfi_restore $15 420 .cfi_restore $15 420 .cfi_adjust_cfa_offset -56 421 .cfi_adjust_cfa_offset -56 421 br ret_from_sys_call 422 br ret_from_sys_call 422 CFI_END_OSF_FRAME entUna 423 CFI_END_OSF_FRAME entUna 423 424 424 CFI_START_OSF_FRAME entDbg 425 CFI_START_OSF_FRAME entDbg 425 SAVE_ALL 426 SAVE_ALL 426 lda $8, 0x3fff 427 lda $8, 0x3fff 427 lda $26, ret_from_sys_call 428 lda $26, ret_from_sys_call 428 bic $sp, $8, $8 429 bic $sp, $8, $8 429 mov $sp, $16 430 mov $sp, $16 430 jsr $31, do_entDbg 431 jsr $31, do_entDbg 431 CFI_END_OSF_FRAME entDbg 432 CFI_END_OSF_FRAME entDbg 432 433 433 /* 434 /* 434 * The system call entry point is special. Mo 435 * The system call entry point is special. Most importantly, it looks 435 * like a function call to userspace as far as 436 * like a function call to userspace as far as clobbered registers. We 436 * do preserve the argument registers (for sys 437 * do preserve the argument registers (for syscall restarts) and $26 437 * (for leaf syscall functions). 438 * (for leaf syscall functions). 438 * 439 * 439 * So much for theory. We don't take advantag 440 * So much for theory. We don't take advantage of this yet. 440 * 441 * 441 * Note that a0-a2 are not saved by PALcode as 442 * Note that a0-a2 are not saved by PALcode as with the other entry points. 442 */ 443 */ 443 444 444 .align 4 445 .align 4 445 .globl entSys 446 .globl entSys 446 .type entSys, @function 447 .type entSys, @function 447 .cfi_startproc simple 448 .cfi_startproc simple 448 .cfi_return_column 64 449 .cfi_return_column 64 449 .cfi_def_cfa $sp, 48 450 .cfi_def_cfa $sp, 48 450 .cfi_rel_offset 64, 8 451 .cfi_rel_offset 64, 8 451 .cfi_rel_offset $gp, 16 452 .cfi_rel_offset $gp, 16 452 entSys: 453 entSys: 453 SAVE_ALL 454 SAVE_ALL 454 lda $8, 0x3fff 455 lda $8, 0x3fff 455 bic $sp, $8, $8 456 bic $sp, $8, $8 456 lda $4, NR_syscalls($31) !! 457 lda $4, NR_SYSCALLS($31) 457 stq $16, SP_OFF+24($sp) 458 stq $16, SP_OFF+24($sp) 458 lda $5, sys_call_table 459 lda $5, sys_call_table 459 lda $27, sys_ni_syscall 460 lda $27, sys_ni_syscall 460 cmpult $0, $4, $4 461 cmpult $0, $4, $4 461 ldl $3, TI_FLAGS($8) 462 ldl $3, TI_FLAGS($8) 462 stq $17, SP_OFF+32($sp) 463 stq $17, SP_OFF+32($sp) 463 s8addq $0, $5, $5 464 s8addq $0, $5, $5 464 stq $18, SP_OFF+40($sp) 465 stq $18, SP_OFF+40($sp) 465 .cfi_rel_offset $16, SP_OFF+24 466 .cfi_rel_offset $16, SP_OFF+24 466 .cfi_rel_offset $17, SP_OFF+32 467 .cfi_rel_offset $17, SP_OFF+32 467 .cfi_rel_offset $18, SP_OFF+40 468 .cfi_rel_offset $18, SP_OFF+40 468 #ifdef CONFIG_AUDITSYSCALL 469 #ifdef CONFIG_AUDITSYSCALL 469 lda $6, _TIF_SYSCALL_TRACE | _TIF_ 470 lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 470 and $3, $6, $3 471 and $3, $6, $3 471 bne $3, strace << 472 #else << 473 blbs $3, strace /* che << 474 #endif 472 #endif >> 473 bne $3, strace 475 beq $4, 1f 474 beq $4, 1f 476 ldq $27, 0($5) 475 ldq $27, 0($5) 477 1: jsr $26, ($27), sys_ni_syscall 476 1: jsr $26, ($27), sys_ni_syscall 478 ldgp $gp, 0($26) 477 ldgp $gp, 0($26) 479 blt $0, $syscall_error /* the 478 blt $0, $syscall_error /* the call failed */ 480 $ret_success: << 481 stq $0, 0($sp) 479 stq $0, 0($sp) 482 stq $31, 72($sp) /* a3= 480 stq $31, 72($sp) /* a3=0 => no error */ 483 481 484 .align 4 482 .align 4 485 .globl ret_from_sys_call 483 .globl ret_from_sys_call 486 ret_from_sys_call: 484 ret_from_sys_call: 487 cmovne $26, 0, $18 /* $18 485 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ 488 ldq $0, SP_OFF($sp) 486 ldq $0, SP_OFF($sp) 489 and $0, 8, $0 487 and $0, 8, $0 490 beq $0, ret_to_kernel 488 beq $0, ret_to_kernel 491 ret_to_user: 489 ret_to_user: 492 /* Make sure need_resched and sigpendi 490 /* Make sure need_resched and sigpending don't change between 493 sampling and the rti. */ 491 sampling and the rti. */ 494 lda $16, 7 492 lda $16, 7 495 call_pal PAL_swpipl 493 call_pal PAL_swpipl 496 ldl $17, TI_FLAGS($8) 494 ldl $17, TI_FLAGS($8) 497 and $17, _TIF_WORK_MASK, $2 495 and $17, _TIF_WORK_MASK, $2 498 bne $2, work_pending 496 bne $2, work_pending 499 restore_all: 497 restore_all: 500 ldl $2, TI_STATUS($8) << 501 and $2, TS_SAVED_FP | TS_RESTORE_F << 502 bne $3, restore_fpu << 503 restore_other: << 504 .cfi_remember_state 498 .cfi_remember_state 505 RESTORE_ALL 499 RESTORE_ALL 506 call_pal PAL_rti 500 call_pal PAL_rti 507 501 508 ret_to_kernel: 502 ret_to_kernel: 509 .cfi_restore_state 503 .cfi_restore_state 510 lda $16, 7 504 lda $16, 7 511 call_pal PAL_swpipl 505 call_pal PAL_swpipl 512 br restore_other !! 506 br restore_all 513 507 514 .align 3 508 .align 3 515 $syscall_error: 509 $syscall_error: 516 /* 510 /* 517 * Some system calls (e.g., ptrace) ca 511 * Some system calls (e.g., ptrace) can return arbitrary 518 * values which might normally be mist 512 * values which might normally be mistaken as error numbers. 519 * Those functions must zero $0 (v0) d 513 * Those functions must zero $0 (v0) directly in the stack 520 * frame to indicate that a negative r 514 * frame to indicate that a negative return value wasn't an 521 * error number.. 515 * error number.. 522 */ 516 */ 523 ldq $18, 0($sp) /* old syscall 517 ldq $18, 0($sp) /* old syscall nr (zero if success) */ 524 beq $18, $ret_success 518 beq $18, $ret_success 525 519 526 ldq $19, 72($sp) /* .. and this 520 ldq $19, 72($sp) /* .. and this a3 */ 527 subq $31, $0, $0 /* with error 521 subq $31, $0, $0 /* with error in v0 */ 528 addq $31, 1, $1 /* set a3 for 522 addq $31, 1, $1 /* set a3 for errno return */ 529 stq $0, 0($sp) 523 stq $0, 0($sp) 530 mov $31, $26 /* tell "ret_f 524 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 531 stq $1, 72($sp) /* a3 for retu 525 stq $1, 72($sp) /* a3 for return */ 532 br ret_from_sys_call 526 br ret_from_sys_call 533 527 >> 528 $ret_success: >> 529 stq $0, 0($sp) >> 530 stq $31, 72($sp) /* a3=0 => no error */ >> 531 br ret_from_sys_call >> 532 534 /* 533 /* 535 * Do all cleanup when returning from all inte 534 * Do all cleanup when returning from all interrupts and system calls. 536 * 535 * 537 * Arguments: 536 * Arguments: 538 * $8: current. 537 * $8: current. 539 * $17: TI_FLAGS. 538 * $17: TI_FLAGS. 540 * $18: The old syscall number, or zero i 539 * $18: The old syscall number, or zero if this is not a return 541 * from a syscall that errored and i 540 * from a syscall that errored and is possibly restartable. 542 * $19: The old a3 value 541 * $19: The old a3 value 543 */ 542 */ 544 543 545 .align 4 544 .align 4 546 .type work_pending, @function 545 .type work_pending, @function 547 work_pending: 546 work_pending: 548 and $17, _TIF_NOTIFY_RESUME | _TIF !! 547 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 549 bne $2, $work_notifysig 548 bne $2, $work_notifysig 550 549 551 $work_resched: 550 $work_resched: 552 /* 551 /* 553 * We can get here only if we returned 552 * We can get here only if we returned from syscall without SIGPENDING 554 * or got through work_notifysig alrea 553 * or got through work_notifysig already. Either case means no syscall 555 * restarts for us, so let $18 and $19 554 * restarts for us, so let $18 and $19 burn. 556 */ 555 */ 557 jsr $26, schedule 556 jsr $26, schedule 558 mov 0, $18 557 mov 0, $18 559 br ret_to_user 558 br ret_to_user 560 559 561 $work_notifysig: 560 $work_notifysig: 562 mov $sp, $16 561 mov $sp, $16 563 DO_SWITCH_STACK 562 DO_SWITCH_STACK 564 jsr $26, do_work_pending 563 jsr $26, do_work_pending 565 UNDO_SWITCH_STACK 564 UNDO_SWITCH_STACK 566 br restore_all 565 br restore_all 567 566 568 /* 567 /* 569 * PTRACE syscall handler 568 * PTRACE syscall handler 570 */ 569 */ 571 570 572 .align 4 571 .align 4 573 .type strace, @function 572 .type strace, @function 574 strace: 573 strace: 575 /* set up signal stack, call syscall_t 574 /* set up signal stack, call syscall_trace */ 576 // NB: if anyone adds preemption, this << 577 ldl $1, TI_STATUS($8) << 578 and $1, TS_SAVED_FP, $3 << 579 or $1, TS_SAVED_FP, $2 << 580 bne $3, 1f << 581 stl $2, TI_STATUS($8) << 582 bsr $26, __save_fpu << 583 1: << 584 DO_SWITCH_STACK 575 DO_SWITCH_STACK 585 jsr $26, syscall_trace_enter /* re 576 jsr $26, syscall_trace_enter /* returns the syscall number */ 586 UNDO_SWITCH_STACK 577 UNDO_SWITCH_STACK 587 578 588 /* get the arguments back.. */ 579 /* get the arguments back.. */ 589 ldq $16, SP_OFF+24($sp) 580 ldq $16, SP_OFF+24($sp) 590 ldq $17, SP_OFF+32($sp) 581 ldq $17, SP_OFF+32($sp) 591 ldq $18, SP_OFF+40($sp) 582 ldq $18, SP_OFF+40($sp) 592 ldq $19, 72($sp) 583 ldq $19, 72($sp) 593 ldq $20, 80($sp) 584 ldq $20, 80($sp) 594 ldq $21, 88($sp) 585 ldq $21, 88($sp) 595 586 596 /* get the system call pointer.. */ 587 /* get the system call pointer.. */ 597 lda $1, NR_syscalls($31) !! 588 lda $1, NR_SYSCALLS($31) 598 lda $2, sys_call_table 589 lda $2, sys_call_table 599 lda $27, sys_ni_syscall 590 lda $27, sys_ni_syscall 600 cmpult $0, $1, $1 591 cmpult $0, $1, $1 601 s8addq $0, $2, $2 592 s8addq $0, $2, $2 602 beq $1, 1f 593 beq $1, 1f 603 ldq $27, 0($2) 594 ldq $27, 0($2) 604 1: jsr $26, ($27), sys_gettimeofday 595 1: jsr $26, ($27), sys_gettimeofday 605 ret_from_straced: 596 ret_from_straced: 606 ldgp $gp, 0($26) 597 ldgp $gp, 0($26) 607 598 608 /* check return.. */ 599 /* check return.. */ 609 blt $0, $strace_error /* the 600 blt $0, $strace_error /* the call failed */ 610 $strace_success: << 611 stq $31, 72($sp) /* a3= 601 stq $31, 72($sp) /* a3=0 => no error */ >> 602 $strace_success: 612 stq $0, 0($sp) /* sav 603 stq $0, 0($sp) /* save return value */ 613 604 614 DO_SWITCH_STACK 605 DO_SWITCH_STACK 615 jsr $26, syscall_trace_leave 606 jsr $26, syscall_trace_leave 616 UNDO_SWITCH_STACK 607 UNDO_SWITCH_STACK 617 br $31, ret_from_sys_call 608 br $31, ret_from_sys_call 618 609 619 .align 3 610 .align 3 620 $strace_error: 611 $strace_error: 621 ldq $18, 0($sp) /* old syscall 612 ldq $18, 0($sp) /* old syscall nr (zero if success) */ 622 beq $18, $strace_success 613 beq $18, $strace_success 623 ldq $19, 72($sp) /* .. and this 614 ldq $19, 72($sp) /* .. and this a3 */ 624 615 625 subq $31, $0, $0 /* with error 616 subq $31, $0, $0 /* with error in v0 */ 626 addq $31, 1, $1 /* set a3 for 617 addq $31, 1, $1 /* set a3 for errno return */ 627 stq $0, 0($sp) 618 stq $0, 0($sp) 628 stq $1, 72($sp) /* a3 for retu 619 stq $1, 72($sp) /* a3 for return */ 629 620 630 DO_SWITCH_STACK 621 DO_SWITCH_STACK 631 mov $18, $9 /* save old sy 622 mov $18, $9 /* save old syscall number */ 632 mov $19, $10 /* save old a3 623 mov $19, $10 /* save old a3 */ 633 jsr $26, syscall_trace_leave 624 jsr $26, syscall_trace_leave 634 mov $9, $18 625 mov $9, $18 635 mov $10, $19 626 mov $10, $19 636 UNDO_SWITCH_STACK 627 UNDO_SWITCH_STACK 637 628 638 mov $31, $26 /* tell "ret_f 629 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 639 br ret_from_sys_call 630 br ret_from_sys_call 640 CFI_END_OSF_FRAME entSys 631 CFI_END_OSF_FRAME entSys 641 632 642 /* 633 /* 643 * Save and restore the switch stack -- aka th 634 * Save and restore the switch stack -- aka the balance of the user context. 644 */ 635 */ 645 636 646 .align 4 637 .align 4 647 .type do_switch_stack, @function 638 .type do_switch_stack, @function 648 .cfi_startproc simple 639 .cfi_startproc simple 649 .cfi_return_column 64 640 .cfi_return_column 64 650 .cfi_def_cfa $sp, 0 641 .cfi_def_cfa $sp, 0 651 .cfi_register 64, $1 642 .cfi_register 64, $1 652 do_switch_stack: 643 do_switch_stack: 653 lda $sp, -SWITCH_STACK_SIZE($sp) 644 lda $sp, -SWITCH_STACK_SIZE($sp) 654 .cfi_adjust_cfa_offset SWITCH_STACK_S 645 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE 655 stq $9, 0($sp) 646 stq $9, 0($sp) 656 stq $10, 8($sp) 647 stq $10, 8($sp) 657 stq $11, 16($sp) 648 stq $11, 16($sp) 658 stq $12, 24($sp) 649 stq $12, 24($sp) 659 stq $13, 32($sp) 650 stq $13, 32($sp) 660 stq $14, 40($sp) 651 stq $14, 40($sp) 661 stq $15, 48($sp) 652 stq $15, 48($sp) 662 stq $26, 56($sp) 653 stq $26, 56($sp) >> 654 stt $f0, 64($sp) >> 655 stt $f1, 72($sp) >> 656 stt $f2, 80($sp) >> 657 stt $f3, 88($sp) >> 658 stt $f4, 96($sp) >> 659 stt $f5, 104($sp) >> 660 stt $f6, 112($sp) >> 661 stt $f7, 120($sp) >> 662 stt $f8, 128($sp) >> 663 stt $f9, 136($sp) >> 664 stt $f10, 144($sp) >> 665 stt $f11, 152($sp) >> 666 stt $f12, 160($sp) >> 667 stt $f13, 168($sp) >> 668 stt $f14, 176($sp) >> 669 stt $f15, 184($sp) >> 670 stt $f16, 192($sp) >> 671 stt $f17, 200($sp) >> 672 stt $f18, 208($sp) >> 673 stt $f19, 216($sp) >> 674 stt $f20, 224($sp) >> 675 stt $f21, 232($sp) >> 676 stt $f22, 240($sp) >> 677 stt $f23, 248($sp) >> 678 stt $f24, 256($sp) >> 679 stt $f25, 264($sp) >> 680 stt $f26, 272($sp) >> 681 stt $f27, 280($sp) >> 682 mf_fpcr $f0 # get fpcr >> 683 stt $f28, 288($sp) >> 684 stt $f29, 296($sp) >> 685 stt $f30, 304($sp) >> 686 stt $f0, 312($sp) # save fpcr in slot of $f31 >> 687 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. 663 ret $31, ($1), 1 688 ret $31, ($1), 1 664 .cfi_endproc 689 .cfi_endproc 665 .size do_switch_stack, .-do_switch_s 690 .size do_switch_stack, .-do_switch_stack 666 691 667 .align 4 692 .align 4 668 .type undo_switch_stack, @function 693 .type undo_switch_stack, @function 669 .cfi_startproc simple 694 .cfi_startproc simple 670 .cfi_def_cfa $sp, 0 695 .cfi_def_cfa $sp, 0 671 .cfi_register 64, $1 696 .cfi_register 64, $1 672 undo_switch_stack: 697 undo_switch_stack: 673 ldq $9, 0($sp) 698 ldq $9, 0($sp) 674 ldq $10, 8($sp) 699 ldq $10, 8($sp) 675 ldq $11, 16($sp) 700 ldq $11, 16($sp) 676 ldq $12, 24($sp) 701 ldq $12, 24($sp) 677 ldq $13, 32($sp) 702 ldq $13, 32($sp) 678 ldq $14, 40($sp) 703 ldq $14, 40($sp) 679 ldq $15, 48($sp) 704 ldq $15, 48($sp) 680 ldq $26, 56($sp) 705 ldq $26, 56($sp) >> 706 ldt $f30, 312($sp) # get saved fpcr >> 707 ldt $f0, 64($sp) >> 708 ldt $f1, 72($sp) >> 709 ldt $f2, 80($sp) >> 710 ldt $f3, 88($sp) >> 711 mt_fpcr $f30 # install saved fpcr >> 712 ldt $f4, 96($sp) >> 713 ldt $f5, 104($sp) >> 714 ldt $f6, 112($sp) >> 715 ldt $f7, 120($sp) >> 716 ldt $f8, 128($sp) >> 717 ldt $f9, 136($sp) >> 718 ldt $f10, 144($sp) >> 719 ldt $f11, 152($sp) >> 720 ldt $f12, 160($sp) >> 721 ldt $f13, 168($sp) >> 722 ldt $f14, 176($sp) >> 723 ldt $f15, 184($sp) >> 724 ldt $f16, 192($sp) >> 725 ldt $f17, 200($sp) >> 726 ldt $f18, 208($sp) >> 727 ldt $f19, 216($sp) >> 728 ldt $f20, 224($sp) >> 729 ldt $f21, 232($sp) >> 730 ldt $f22, 240($sp) >> 731 ldt $f23, 248($sp) >> 732 ldt $f24, 256($sp) >> 733 ldt $f25, 264($sp) >> 734 ldt $f26, 272($sp) >> 735 ldt $f27, 280($sp) >> 736 ldt $f28, 288($sp) >> 737 ldt $f29, 296($sp) >> 738 ldt $f30, 304($sp) 681 lda $sp, SWITCH_STACK_SIZE($sp) 739 lda $sp, SWITCH_STACK_SIZE($sp) 682 ret $31, ($1), 1 740 ret $31, ($1), 1 683 .cfi_endproc 741 .cfi_endproc 684 .size undo_switch_stack, .-undo_swit 742 .size undo_switch_stack, .-undo_switch_stack 685 << 686 #define FR(n) n * 8 + TI_FP($8) << 687 .align 4 << 688 .globl __save_fpu << 689 .type __save_fpu, @function << 690 __save_fpu: << 691 #define V(n) stt $f##n, FR(n) << 692 V( 0); V( 1); V( 2); V( 3) << 693 V( 4); V( 5); V( 6); V( 7) << 694 V( 8); V( 9); V(10); V(11) << 695 V(12); V(13); V(14); V(15) << 696 V(16); V(17); V(18); V(19) << 697 V(20); V(21); V(22); V(23) << 698 V(24); V(25); V(26); V(27) << 699 mf_fpcr $f0 # get fpcr << 700 V(28); V(29); V(30) << 701 stt $f0, FR(31) # save fpcr in << 702 ldt $f0, FR(0) # don't let "_ << 703 ret << 704 #undef V << 705 .size __save_fpu, .-__save_fpu << 706 << 707 .align 4 << 708 restore_fpu: << 709 and $3, TS_RESTORE_FP, $3 << 710 bic $2, TS_SAVED_FP | TS_RESTORE_F << 711 beq $3, 1f << 712 #define V(n) ldt $f##n, FR(n) << 713 ldt $f30, FR(31) # get saved fp << 714 V( 0); V( 1); V( 2); V( 3) << 715 mt_fpcr $f30 # install save << 716 V( 4); V( 5); V( 6); V( 7) << 717 V( 8); V( 9); V(10); V(11) << 718 V(12); V(13); V(14); V(15) << 719 V(16); V(17); V(18); V(19) << 720 V(20); V(21); V(22); V(23) << 721 V(24); V(25); V(26); V(27) << 722 V(28); V(29); V(30) << 723 1: stl $2, TI_STATUS($8) << 724 br restore_other << 725 #undef V << 726 << 727 743 728 /* 744 /* 729 * The meat of the context switch code. 745 * The meat of the context switch code. 730 */ 746 */ >> 747 731 .align 4 748 .align 4 732 .globl alpha_switch_to 749 .globl alpha_switch_to 733 .type alpha_switch_to, @function 750 .type alpha_switch_to, @function 734 .cfi_startproc 751 .cfi_startproc 735 alpha_switch_to: 752 alpha_switch_to: 736 DO_SWITCH_STACK 753 DO_SWITCH_STACK 737 ldl $1, TI_STATUS($8) << 738 and $1, TS_RESTORE_FP, $3 << 739 bne $3, 1f << 740 or $1, TS_RESTORE_FP | TS_SAVED_F << 741 and $1, TS_SAVED_FP, $3 << 742 stl $2, TI_STATUS($8) << 743 bne $3, 1f << 744 bsr $26, __save_fpu << 745 1: << 746 call_pal PAL_swpctx 754 call_pal PAL_swpctx 747 lda $8, 0x3fff 755 lda $8, 0x3fff 748 UNDO_SWITCH_STACK 756 UNDO_SWITCH_STACK 749 bic $sp, $8, $8 757 bic $sp, $8, $8 750 mov $17, $0 758 mov $17, $0 751 ret 759 ret 752 .cfi_endproc 760 .cfi_endproc 753 .size alpha_switch_to, .-alpha_switc 761 .size alpha_switch_to, .-alpha_switch_to 754 762 755 /* 763 /* 756 * New processes begin life here. 764 * New processes begin life here. 757 */ 765 */ 758 766 759 .globl ret_from_fork 767 .globl ret_from_fork 760 .align 4 768 .align 4 761 .ent ret_from_fork 769 .ent ret_from_fork 762 ret_from_fork: 770 ret_from_fork: 763 lda $26, ret_to_user !! 771 lda $26, ret_from_sys_call 764 mov $17, $16 772 mov $17, $16 765 jmp $31, schedule_tail 773 jmp $31, schedule_tail 766 .end ret_from_fork 774 .end ret_from_fork 767 775 768 /* 776 /* 769 * ... and new kernel threads - here 777 * ... and new kernel threads - here 770 */ 778 */ 771 .align 4 779 .align 4 772 .globl ret_from_kernel_thread 780 .globl ret_from_kernel_thread 773 .ent ret_from_kernel_thread 781 .ent ret_from_kernel_thread 774 ret_from_kernel_thread: 782 ret_from_kernel_thread: 775 mov $17, $16 783 mov $17, $16 776 jsr $26, schedule_tail 784 jsr $26, schedule_tail 777 mov $9, $27 785 mov $9, $27 778 mov $10, $16 786 mov $10, $16 779 jsr $26, ($9) 787 jsr $26, ($9) 780 br $31, ret_to_user 788 br $31, ret_to_user 781 .end ret_from_kernel_thread 789 .end ret_from_kernel_thread 782 790 783 791 784 /* 792 /* 785 * Special system calls. Most of these are sp 793 * Special system calls. Most of these are special in that they either 786 * have to play switch_stack games. 794 * have to play switch_stack games. 787 */ 795 */ 788 796 789 .macro fork_like name 797 .macro fork_like name 790 .align 4 798 .align 4 791 .globl alpha_\name 799 .globl alpha_\name 792 .ent alpha_\name 800 .ent alpha_\name 793 alpha_\name: 801 alpha_\name: 794 .prologue 0 802 .prologue 0 795 bsr $1, do_switch_stack 803 bsr $1, do_switch_stack 796 // NB: if anyone adds preemption, this << 797 ldl $1, TI_STATUS($8) << 798 and $1, TS_SAVED_FP, $3 << 799 or $1, TS_SAVED_FP, $2 << 800 bne $3, 1f << 801 stl $2, TI_STATUS($8) << 802 bsr $26, __save_fpu << 803 1: << 804 jsr $26, sys_\name 804 jsr $26, sys_\name 805 ldq $26, 56($sp) 805 ldq $26, 56($sp) 806 lda $sp, SWITCH_STACK_SIZE($sp) 806 lda $sp, SWITCH_STACK_SIZE($sp) 807 ret 807 ret 808 .end alpha_\name 808 .end alpha_\name 809 .endm 809 .endm 810 810 811 fork_like fork 811 fork_like fork 812 fork_like vfork 812 fork_like vfork 813 fork_like clone 813 fork_like clone 814 fork_like clone3 << 815 814 816 .macro sigreturn_like name 815 .macro sigreturn_like name 817 .align 4 816 .align 4 818 .globl sys_\name 817 .globl sys_\name 819 .ent sys_\name 818 .ent sys_\name 820 sys_\name: 819 sys_\name: 821 .prologue 0 820 .prologue 0 822 lda $9, ret_from_straced 821 lda $9, ret_from_straced 823 cmpult $26, $9, $9 822 cmpult $26, $9, $9 824 lda $sp, -SWITCH_STACK_SIZE($sp) 823 lda $sp, -SWITCH_STACK_SIZE($sp) 825 jsr $26, do_\name 824 jsr $26, do_\name 826 bne $9, 1f 825 bne $9, 1f 827 jsr $26, syscall_trace_leave 826 jsr $26, syscall_trace_leave 828 1: br $1, undo_switch_stack 827 1: br $1, undo_switch_stack 829 br ret_from_sys_call 828 br ret_from_sys_call 830 .end sys_\name 829 .end sys_\name 831 .endm 830 .endm 832 831 833 sigreturn_like sigreturn 832 sigreturn_like sigreturn 834 sigreturn_like rt_sigreturn 833 sigreturn_like rt_sigreturn 835 834 836 .align 4 835 .align 4 837 .globl alpha_syscall_zero 836 .globl alpha_syscall_zero 838 .ent alpha_syscall_zero 837 .ent alpha_syscall_zero 839 alpha_syscall_zero: 838 alpha_syscall_zero: 840 .prologue 0 839 .prologue 0 841 /* Special because it needs to do some 840 /* Special because it needs to do something opposite to 842 force_successful_syscall_return(). 841 force_successful_syscall_return(). We use the saved 843 syscall number for that, zero meani 842 syscall number for that, zero meaning "not an error". 844 That works nicely, but for real sys 843 That works nicely, but for real syscall 0 we need to 845 make sure that this logics doesn't 844 make sure that this logics doesn't get confused. 846 Store a non-zero there - -ENOSYS we 845 Store a non-zero there - -ENOSYS we need in register 847 for our return value will do just f 846 for our return value will do just fine. 848 */ 847 */ 849 lda $0, -ENOSYS 848 lda $0, -ENOSYS 850 unop 849 unop 851 stq $0, 0($sp) 850 stq $0, 0($sp) 852 ret 851 ret 853 .end alpha_syscall_zero 852 .end alpha_syscall_zero
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.