1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * arch/alpha/kernel/entry.S 3 * arch/alpha/kernel/entry.S 4 * 4 * 5 * Kernel entry-points. 5 * Kernel entry-points. 6 */ 6 */ 7 7 8 #include <asm/asm-offsets.h> 8 #include <asm/asm-offsets.h> 9 #include <asm/thread_info.h> 9 #include <asm/thread_info.h> 10 #include <asm/pal.h> 10 #include <asm/pal.h> 11 #include <asm/errno.h> 11 #include <asm/errno.h> 12 #include <asm/unistd.h> 12 #include <asm/unistd.h> 13 13 14 .text 14 .text 15 .set noat 15 .set noat 16 .cfi_sections .debug_frame 16 .cfi_sections .debug_frame 17 17 18 /* Stack offsets. */ 18 /* Stack offsets. */ 19 #define SP_OFF 184 19 #define SP_OFF 184 20 #define SWITCH_STACK_SIZE 64 20 #define SWITCH_STACK_SIZE 64 21 21 22 .macro CFI_START_OSF_FRAME func 22 .macro CFI_START_OSF_FRAME func 23 .align 4 23 .align 4 24 .globl \func 24 .globl \func 25 .type \func,@function 25 .type \func,@function 26 \func: 26 \func: 27 .cfi_startproc simple 27 .cfi_startproc simple 28 .cfi_return_column 64 28 .cfi_return_column 64 29 .cfi_def_cfa $sp, 48 29 .cfi_def_cfa $sp, 48 30 .cfi_rel_offset 64, 8 30 .cfi_rel_offset 64, 8 31 .cfi_rel_offset $gp, 16 31 .cfi_rel_offset $gp, 16 32 .cfi_rel_offset $16, 24 32 .cfi_rel_offset $16, 24 33 .cfi_rel_offset $17, 32 33 .cfi_rel_offset $17, 32 34 .cfi_rel_offset $18, 40 34 .cfi_rel_offset $18, 40 35 .endm 35 .endm 36 36 37 .macro CFI_END_OSF_FRAME func 37 .macro CFI_END_OSF_FRAME func 38 .cfi_endproc 38 .cfi_endproc 39 .size \func, . - \func 39 .size \func, . - \func 40 .endm 40 .endm 41 41 42 /* 42 /* 43 * This defines the normal kernel pt-regs layo 43 * This defines the normal kernel pt-regs layout. 44 * 44 * 45 * regs 9-15 preserved by C code 45 * regs 9-15 preserved by C code 46 * regs 16-18 saved by PAL-code 46 * regs 16-18 saved by PAL-code 47 * regs 29-30 saved and set up by PAL-code 47 * regs 29-30 saved and set up by PAL-code 48 * JRP - Save regs 16-18 in a special area of 48 * JRP - Save regs 16-18 in a special area of the stack, so that 49 * the palcode-provided values are available t 49 * the palcode-provided values are available to the signal handler. 50 */ 50 */ 51 51 52 .macro SAVE_ALL 52 .macro SAVE_ALL 53 subq $sp, SP_OFF, $sp 53 subq $sp, SP_OFF, $sp 54 .cfi_adjust_cfa_offset SP_OFF 54 .cfi_adjust_cfa_offset SP_OFF 55 stq $0, 0($sp) 55 stq $0, 0($sp) 56 stq $1, 8($sp) 56 stq $1, 8($sp) 57 stq $2, 16($sp) 57 stq $2, 16($sp) 58 stq $3, 24($sp) 58 stq $3, 24($sp) 59 stq $4, 32($sp) 59 stq $4, 32($sp) 60 stq $28, 144($sp) 60 stq $28, 144($sp) 61 .cfi_rel_offset $0, 0 61 .cfi_rel_offset $0, 0 62 .cfi_rel_offset $1, 8 62 .cfi_rel_offset $1, 8 63 .cfi_rel_offset $2, 16 63 .cfi_rel_offset $2, 16 64 .cfi_rel_offset $3, 24 64 .cfi_rel_offset $3, 24 65 .cfi_rel_offset $4, 32 65 .cfi_rel_offset $4, 32 66 .cfi_rel_offset $28, 144 66 .cfi_rel_offset $28, 144 67 lda $2, alpha_mv 67 lda $2, alpha_mv 68 stq $5, 40($sp) 68 stq $5, 40($sp) 69 stq $6, 48($sp) 69 stq $6, 48($sp) 70 stq $7, 56($sp) 70 stq $7, 56($sp) 71 stq $8, 64($sp) 71 stq $8, 64($sp) 72 stq $19, 72($sp) 72 stq $19, 72($sp) 73 stq $20, 80($sp) 73 stq $20, 80($sp) 74 stq $21, 88($sp) 74 stq $21, 88($sp) 75 ldq $2, HAE_CACHE($2) 75 ldq $2, HAE_CACHE($2) 76 stq $22, 96($sp) 76 stq $22, 96($sp) 77 stq $23, 104($sp) 77 stq $23, 104($sp) 78 stq $24, 112($sp) 78 stq $24, 112($sp) 79 stq $25, 120($sp) 79 stq $25, 120($sp) 80 stq $26, 128($sp) 80 stq $26, 128($sp) 81 stq $27, 136($sp) 81 stq $27, 136($sp) 82 stq $2, 152($sp) 82 stq $2, 152($sp) 83 stq $16, 160($sp) 83 stq $16, 160($sp) 84 stq $17, 168($sp) 84 stq $17, 168($sp) 85 stq $18, 176($sp) 85 stq $18, 176($sp) 86 .cfi_rel_offset $5, 40 86 .cfi_rel_offset $5, 40 87 .cfi_rel_offset $6, 48 87 .cfi_rel_offset $6, 48 88 .cfi_rel_offset $7, 56 88 .cfi_rel_offset $7, 56 89 .cfi_rel_offset $8, 64 89 .cfi_rel_offset $8, 64 90 .cfi_rel_offset $19, 72 90 .cfi_rel_offset $19, 72 91 .cfi_rel_offset $20, 80 91 .cfi_rel_offset $20, 80 92 .cfi_rel_offset $21, 88 92 .cfi_rel_offset $21, 88 93 .cfi_rel_offset $22, 96 93 .cfi_rel_offset $22, 96 94 .cfi_rel_offset $23, 104 94 .cfi_rel_offset $23, 104 95 .cfi_rel_offset $24, 112 95 .cfi_rel_offset $24, 112 96 .cfi_rel_offset $25, 120 96 .cfi_rel_offset $25, 120 97 .cfi_rel_offset $26, 128 97 .cfi_rel_offset $26, 128 98 .cfi_rel_offset $27, 136 98 .cfi_rel_offset $27, 136 99 .endm 99 .endm 100 100 101 .macro RESTORE_ALL 101 .macro RESTORE_ALL 102 lda $19, alpha_mv 102 lda $19, alpha_mv 103 ldq $0, 0($sp) 103 ldq $0, 0($sp) 104 ldq $1, 8($sp) 104 ldq $1, 8($sp) 105 ldq $2, 16($sp) 105 ldq $2, 16($sp) 106 ldq $3, 24($sp) 106 ldq $3, 24($sp) 107 ldq $21, 152($sp) 107 ldq $21, 152($sp) 108 ldq $20, HAE_CACHE($19) 108 ldq $20, HAE_CACHE($19) 109 ldq $4, 32($sp) 109 ldq $4, 32($sp) 110 ldq $5, 40($sp) 110 ldq $5, 40($sp) 111 ldq $6, 48($sp) 111 ldq $6, 48($sp) 112 ldq $7, 56($sp) 112 ldq $7, 56($sp) 113 subq $20, $21, $20 113 subq $20, $21, $20 114 ldq $8, 64($sp) 114 ldq $8, 64($sp) 115 beq $20, 99f 115 beq $20, 99f 116 ldq $20, HAE_REG($19) 116 ldq $20, HAE_REG($19) 117 stq $21, HAE_CACHE($19) 117 stq $21, HAE_CACHE($19) 118 stq $21, 0($20) 118 stq $21, 0($20) 119 99: ldq $19, 72($sp) 119 99: ldq $19, 72($sp) 120 ldq $20, 80($sp) 120 ldq $20, 80($sp) 121 ldq $21, 88($sp) 121 ldq $21, 88($sp) 122 ldq $22, 96($sp) 122 ldq $22, 96($sp) 123 ldq $23, 104($sp) 123 ldq $23, 104($sp) 124 ldq $24, 112($sp) 124 ldq $24, 112($sp) 125 ldq $25, 120($sp) 125 ldq $25, 120($sp) 126 ldq $26, 128($sp) 126 ldq $26, 128($sp) 127 ldq $27, 136($sp) 127 ldq $27, 136($sp) 128 ldq $28, 144($sp) 128 ldq $28, 144($sp) 129 addq $sp, SP_OFF, $sp 129 addq $sp, SP_OFF, $sp 130 .cfi_restore $0 130 .cfi_restore $0 131 .cfi_restore $1 131 .cfi_restore $1 132 .cfi_restore $2 132 .cfi_restore $2 133 .cfi_restore $3 133 .cfi_restore $3 134 .cfi_restore $4 134 .cfi_restore $4 135 .cfi_restore $5 135 .cfi_restore $5 136 .cfi_restore $6 136 .cfi_restore $6 137 .cfi_restore $7 137 .cfi_restore $7 138 .cfi_restore $8 138 .cfi_restore $8 139 .cfi_restore $19 139 .cfi_restore $19 140 .cfi_restore $20 140 .cfi_restore $20 141 .cfi_restore $21 141 .cfi_restore $21 142 .cfi_restore $22 142 .cfi_restore $22 143 .cfi_restore $23 143 .cfi_restore $23 144 .cfi_restore $24 144 .cfi_restore $24 145 .cfi_restore $25 145 .cfi_restore $25 146 .cfi_restore $26 146 .cfi_restore $26 147 .cfi_restore $27 147 .cfi_restore $27 148 .cfi_restore $28 148 .cfi_restore $28 149 .cfi_adjust_cfa_offset -SP_OFF 149 .cfi_adjust_cfa_offset -SP_OFF 150 .endm 150 .endm 151 151 152 .macro DO_SWITCH_STACK 152 .macro DO_SWITCH_STACK 153 bsr $1, do_switch_stack 153 bsr $1, do_switch_stack 154 .cfi_adjust_cfa_offset SWITCH_STACK_S 154 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE 155 .cfi_rel_offset $9, 0 155 .cfi_rel_offset $9, 0 156 .cfi_rel_offset $10, 8 156 .cfi_rel_offset $10, 8 157 .cfi_rel_offset $11, 16 157 .cfi_rel_offset $11, 16 158 .cfi_rel_offset $12, 24 158 .cfi_rel_offset $12, 24 159 .cfi_rel_offset $13, 32 159 .cfi_rel_offset $13, 32 160 .cfi_rel_offset $14, 40 160 .cfi_rel_offset $14, 40 161 .cfi_rel_offset $15, 48 161 .cfi_rel_offset $15, 48 162 .endm 162 .endm 163 163 164 .macro UNDO_SWITCH_STACK 164 .macro UNDO_SWITCH_STACK 165 bsr $1, undo_switch_stack 165 bsr $1, undo_switch_stack 166 .cfi_restore $9 166 .cfi_restore $9 167 .cfi_restore $10 167 .cfi_restore $10 168 .cfi_restore $11 168 .cfi_restore $11 169 .cfi_restore $12 169 .cfi_restore $12 170 .cfi_restore $13 170 .cfi_restore $13 171 .cfi_restore $14 171 .cfi_restore $14 172 .cfi_restore $15 172 .cfi_restore $15 173 .cfi_adjust_cfa_offset -SWITCH_STACK_ 173 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE 174 .endm 174 .endm 175 175 176 /* 176 /* 177 * Non-syscall kernel entry points. 177 * Non-syscall kernel entry points. 178 */ 178 */ 179 179 180 CFI_START_OSF_FRAME entInt 180 CFI_START_OSF_FRAME entInt 181 SAVE_ALL 181 SAVE_ALL 182 lda $8, 0x3fff 182 lda $8, 0x3fff 183 lda $26, ret_from_sys_call 183 lda $26, ret_from_sys_call 184 bic $sp, $8, $8 184 bic $sp, $8, $8 185 mov $sp, $19 185 mov $sp, $19 186 jsr $31, do_entInt 186 jsr $31, do_entInt 187 CFI_END_OSF_FRAME entInt 187 CFI_END_OSF_FRAME entInt 188 188 189 CFI_START_OSF_FRAME entArith 189 CFI_START_OSF_FRAME entArith 190 SAVE_ALL 190 SAVE_ALL 191 lda $8, 0x3fff 191 lda $8, 0x3fff 192 lda $26, ret_from_sys_call 192 lda $26, ret_from_sys_call 193 bic $sp, $8, $8 193 bic $sp, $8, $8 194 mov $sp, $18 194 mov $sp, $18 195 jsr $31, do_entArith 195 jsr $31, do_entArith 196 CFI_END_OSF_FRAME entArith 196 CFI_END_OSF_FRAME entArith 197 197 198 CFI_START_OSF_FRAME entMM 198 CFI_START_OSF_FRAME entMM 199 SAVE_ALL 199 SAVE_ALL 200 /* save $9 - $15 so the inline exception code 200 /* save $9 - $15 so the inline exception code can manipulate them. */ 201 subq $sp, 56, $sp 201 subq $sp, 56, $sp 202 .cfi_adjust_cfa_offset 56 202 .cfi_adjust_cfa_offset 56 203 stq $9, 0($sp) 203 stq $9, 0($sp) 204 stq $10, 8($sp) 204 stq $10, 8($sp) 205 stq $11, 16($sp) 205 stq $11, 16($sp) 206 stq $12, 24($sp) 206 stq $12, 24($sp) 207 stq $13, 32($sp) 207 stq $13, 32($sp) 208 stq $14, 40($sp) 208 stq $14, 40($sp) 209 stq $15, 48($sp) 209 stq $15, 48($sp) 210 .cfi_rel_offset $9, 0 210 .cfi_rel_offset $9, 0 211 .cfi_rel_offset $10, 8 211 .cfi_rel_offset $10, 8 212 .cfi_rel_offset $11, 16 212 .cfi_rel_offset $11, 16 213 .cfi_rel_offset $12, 24 213 .cfi_rel_offset $12, 24 214 .cfi_rel_offset $13, 32 214 .cfi_rel_offset $13, 32 215 .cfi_rel_offset $14, 40 215 .cfi_rel_offset $14, 40 216 .cfi_rel_offset $15, 48 216 .cfi_rel_offset $15, 48 217 addq $sp, 56, $19 217 addq $sp, 56, $19 218 /* handle the fault */ 218 /* handle the fault */ 219 lda $8, 0x3fff 219 lda $8, 0x3fff 220 bic $sp, $8, $8 220 bic $sp, $8, $8 221 jsr $26, do_page_fault 221 jsr $26, do_page_fault 222 /* reload the registers after the exception co 222 /* reload the registers after the exception code played. */ 223 ldq $9, 0($sp) 223 ldq $9, 0($sp) 224 ldq $10, 8($sp) 224 ldq $10, 8($sp) 225 ldq $11, 16($sp) 225 ldq $11, 16($sp) 226 ldq $12, 24($sp) 226 ldq $12, 24($sp) 227 ldq $13, 32($sp) 227 ldq $13, 32($sp) 228 ldq $14, 40($sp) 228 ldq $14, 40($sp) 229 ldq $15, 48($sp) 229 ldq $15, 48($sp) 230 addq $sp, 56, $sp 230 addq $sp, 56, $sp 231 .cfi_restore $9 231 .cfi_restore $9 232 .cfi_restore $10 232 .cfi_restore $10 233 .cfi_restore $11 233 .cfi_restore $11 234 .cfi_restore $12 234 .cfi_restore $12 235 .cfi_restore $13 235 .cfi_restore $13 236 .cfi_restore $14 236 .cfi_restore $14 237 .cfi_restore $15 237 .cfi_restore $15 238 .cfi_adjust_cfa_offset -56 238 .cfi_adjust_cfa_offset -56 239 /* finish up the syscall as normal. */ 239 /* finish up the syscall as normal. */ 240 br ret_from_sys_call 240 br ret_from_sys_call 241 CFI_END_OSF_FRAME entMM 241 CFI_END_OSF_FRAME entMM 242 242 243 CFI_START_OSF_FRAME entIF 243 CFI_START_OSF_FRAME entIF 244 SAVE_ALL 244 SAVE_ALL 245 lda $8, 0x3fff 245 lda $8, 0x3fff 246 lda $26, ret_from_sys_call 246 lda $26, ret_from_sys_call 247 bic $sp, $8, $8 247 bic $sp, $8, $8 248 mov $sp, $17 248 mov $sp, $17 249 jsr $31, do_entIF 249 jsr $31, do_entIF 250 CFI_END_OSF_FRAME entIF 250 CFI_END_OSF_FRAME entIF 251 251 252 CFI_START_OSF_FRAME entUna 252 CFI_START_OSF_FRAME entUna 253 lda $sp, -256($sp) 253 lda $sp, -256($sp) 254 .cfi_adjust_cfa_offset 256 254 .cfi_adjust_cfa_offset 256 255 stq $0, 0($sp) 255 stq $0, 0($sp) 256 .cfi_rel_offset $0, 0 256 .cfi_rel_offset $0, 0 257 .cfi_remember_state 257 .cfi_remember_state 258 ldq $0, 256($sp) /* get PS */ 258 ldq $0, 256($sp) /* get PS */ 259 stq $1, 8($sp) 259 stq $1, 8($sp) 260 stq $2, 16($sp) 260 stq $2, 16($sp) 261 stq $3, 24($sp) 261 stq $3, 24($sp) 262 and $0, 8, $0 /* use 262 and $0, 8, $0 /* user mode? */ 263 stq $4, 32($sp) 263 stq $4, 32($sp) 264 bne $0, entUnaUser /* yup -> do u 264 bne $0, entUnaUser /* yup -> do user-level unaligned fault */ 265 stq $5, 40($sp) 265 stq $5, 40($sp) 266 stq $6, 48($sp) 266 stq $6, 48($sp) 267 stq $7, 56($sp) 267 stq $7, 56($sp) 268 stq $8, 64($sp) 268 stq $8, 64($sp) 269 stq $9, 72($sp) 269 stq $9, 72($sp) 270 stq $10, 80($sp) 270 stq $10, 80($sp) 271 stq $11, 88($sp) 271 stq $11, 88($sp) 272 stq $12, 96($sp) 272 stq $12, 96($sp) 273 stq $13, 104($sp) 273 stq $13, 104($sp) 274 stq $14, 112($sp) 274 stq $14, 112($sp) 275 stq $15, 120($sp) 275 stq $15, 120($sp) 276 /* 16-18 PAL-saved */ 276 /* 16-18 PAL-saved */ 277 stq $19, 152($sp) 277 stq $19, 152($sp) 278 stq $20, 160($sp) 278 stq $20, 160($sp) 279 stq $21, 168($sp) 279 stq $21, 168($sp) 280 stq $22, 176($sp) 280 stq $22, 176($sp) 281 stq $23, 184($sp) 281 stq $23, 184($sp) 282 stq $24, 192($sp) 282 stq $24, 192($sp) 283 stq $25, 200($sp) 283 stq $25, 200($sp) 284 stq $26, 208($sp) 284 stq $26, 208($sp) 285 stq $27, 216($sp) 285 stq $27, 216($sp) 286 stq $28, 224($sp) 286 stq $28, 224($sp) 287 mov $sp, $19 287 mov $sp, $19 288 stq $gp, 232($sp) 288 stq $gp, 232($sp) 289 .cfi_rel_offset $1, 1*8 289 .cfi_rel_offset $1, 1*8 290 .cfi_rel_offset $2, 2*8 290 .cfi_rel_offset $2, 2*8 291 .cfi_rel_offset $3, 3*8 291 .cfi_rel_offset $3, 3*8 292 .cfi_rel_offset $4, 4*8 292 .cfi_rel_offset $4, 4*8 293 .cfi_rel_offset $5, 5*8 293 .cfi_rel_offset $5, 5*8 294 .cfi_rel_offset $6, 6*8 294 .cfi_rel_offset $6, 6*8 295 .cfi_rel_offset $7, 7*8 295 .cfi_rel_offset $7, 7*8 296 .cfi_rel_offset $8, 8*8 296 .cfi_rel_offset $8, 8*8 297 .cfi_rel_offset $9, 9*8 297 .cfi_rel_offset $9, 9*8 298 .cfi_rel_offset $10, 10*8 298 .cfi_rel_offset $10, 10*8 299 .cfi_rel_offset $11, 11*8 299 .cfi_rel_offset $11, 11*8 300 .cfi_rel_offset $12, 12*8 300 .cfi_rel_offset $12, 12*8 301 .cfi_rel_offset $13, 13*8 301 .cfi_rel_offset $13, 13*8 302 .cfi_rel_offset $14, 14*8 302 .cfi_rel_offset $14, 14*8 303 .cfi_rel_offset $15, 15*8 303 .cfi_rel_offset $15, 15*8 304 .cfi_rel_offset $19, 19*8 304 .cfi_rel_offset $19, 19*8 305 .cfi_rel_offset $20, 20*8 305 .cfi_rel_offset $20, 20*8 306 .cfi_rel_offset $21, 21*8 306 .cfi_rel_offset $21, 21*8 307 .cfi_rel_offset $22, 22*8 307 .cfi_rel_offset $22, 22*8 308 .cfi_rel_offset $23, 23*8 308 .cfi_rel_offset $23, 23*8 309 .cfi_rel_offset $24, 24*8 309 .cfi_rel_offset $24, 24*8 310 .cfi_rel_offset $25, 25*8 310 .cfi_rel_offset $25, 25*8 311 .cfi_rel_offset $26, 26*8 311 .cfi_rel_offset $26, 26*8 312 .cfi_rel_offset $27, 27*8 312 .cfi_rel_offset $27, 27*8 313 .cfi_rel_offset $28, 28*8 313 .cfi_rel_offset $28, 28*8 314 .cfi_rel_offset $29, 29*8 314 .cfi_rel_offset $29, 29*8 315 lda $8, 0x3fff 315 lda $8, 0x3fff 316 stq $31, 248($sp) 316 stq $31, 248($sp) 317 bic $sp, $8, $8 317 bic $sp, $8, $8 318 jsr $26, do_entUna 318 jsr $26, do_entUna 319 ldq $0, 0($sp) 319 ldq $0, 0($sp) 320 ldq $1, 8($sp) 320 ldq $1, 8($sp) 321 ldq $2, 16($sp) 321 ldq $2, 16($sp) 322 ldq $3, 24($sp) 322 ldq $3, 24($sp) 323 ldq $4, 32($sp) 323 ldq $4, 32($sp) 324 ldq $5, 40($sp) 324 ldq $5, 40($sp) 325 ldq $6, 48($sp) 325 ldq $6, 48($sp) 326 ldq $7, 56($sp) 326 ldq $7, 56($sp) 327 ldq $8, 64($sp) 327 ldq $8, 64($sp) 328 ldq $9, 72($sp) 328 ldq $9, 72($sp) 329 ldq $10, 80($sp) 329 ldq $10, 80($sp) 330 ldq $11, 88($sp) 330 ldq $11, 88($sp) 331 ldq $12, 96($sp) 331 ldq $12, 96($sp) 332 ldq $13, 104($sp) 332 ldq $13, 104($sp) 333 ldq $14, 112($sp) 333 ldq $14, 112($sp) 334 ldq $15, 120($sp) 334 ldq $15, 120($sp) 335 /* 16-18 PAL-saved */ 335 /* 16-18 PAL-saved */ 336 ldq $19, 152($sp) 336 ldq $19, 152($sp) 337 ldq $20, 160($sp) 337 ldq $20, 160($sp) 338 ldq $21, 168($sp) 338 ldq $21, 168($sp) 339 ldq $22, 176($sp) 339 ldq $22, 176($sp) 340 ldq $23, 184($sp) 340 ldq $23, 184($sp) 341 ldq $24, 192($sp) 341 ldq $24, 192($sp) 342 ldq $25, 200($sp) 342 ldq $25, 200($sp) 343 ldq $26, 208($sp) 343 ldq $26, 208($sp) 344 ldq $27, 216($sp) 344 ldq $27, 216($sp) 345 ldq $28, 224($sp) 345 ldq $28, 224($sp) 346 ldq $gp, 232($sp) 346 ldq $gp, 232($sp) 347 lda $sp, 256($sp) 347 lda $sp, 256($sp) 348 .cfi_restore $1 348 .cfi_restore $1 349 .cfi_restore $2 349 .cfi_restore $2 350 .cfi_restore $3 350 .cfi_restore $3 351 .cfi_restore $4 351 .cfi_restore $4 352 .cfi_restore $5 352 .cfi_restore $5 353 .cfi_restore $6 353 .cfi_restore $6 354 .cfi_restore $7 354 .cfi_restore $7 355 .cfi_restore $8 355 .cfi_restore $8 356 .cfi_restore $9 356 .cfi_restore $9 357 .cfi_restore $10 357 .cfi_restore $10 358 .cfi_restore $11 358 .cfi_restore $11 359 .cfi_restore $12 359 .cfi_restore $12 360 .cfi_restore $13 360 .cfi_restore $13 361 .cfi_restore $14 361 .cfi_restore $14 362 .cfi_restore $15 362 .cfi_restore $15 363 .cfi_restore $19 363 .cfi_restore $19 364 .cfi_restore $20 364 .cfi_restore $20 365 .cfi_restore $21 365 .cfi_restore $21 366 .cfi_restore $22 366 .cfi_restore $22 367 .cfi_restore $23 367 .cfi_restore $23 368 .cfi_restore $24 368 .cfi_restore $24 369 .cfi_restore $25 369 .cfi_restore $25 370 .cfi_restore $26 370 .cfi_restore $26 371 .cfi_restore $27 371 .cfi_restore $27 372 .cfi_restore $28 372 .cfi_restore $28 373 .cfi_restore $29 373 .cfi_restore $29 374 .cfi_adjust_cfa_offset -256 374 .cfi_adjust_cfa_offset -256 375 call_pal PAL_rti 375 call_pal PAL_rti 376 376 377 .align 4 377 .align 4 378 entUnaUser: 378 entUnaUser: 379 .cfi_restore_state 379 .cfi_restore_state 380 ldq $0, 0($sp) /* restore ori 380 ldq $0, 0($sp) /* restore original $0 */ 381 lda $sp, 256($sp) /* pop entUna' 381 lda $sp, 256($sp) /* pop entUna's stack frame */ 382 .cfi_restore $0 382 .cfi_restore $0 383 .cfi_adjust_cfa_offset -256 383 .cfi_adjust_cfa_offset -256 384 SAVE_ALL /* setup norma 384 SAVE_ALL /* setup normal kernel stack */ 385 lda $sp, -56($sp) 385 lda $sp, -56($sp) 386 .cfi_adjust_cfa_offset 56 386 .cfi_adjust_cfa_offset 56 387 stq $9, 0($sp) 387 stq $9, 0($sp) 388 stq $10, 8($sp) 388 stq $10, 8($sp) 389 stq $11, 16($sp) 389 stq $11, 16($sp) 390 stq $12, 24($sp) 390 stq $12, 24($sp) 391 stq $13, 32($sp) 391 stq $13, 32($sp) 392 stq $14, 40($sp) 392 stq $14, 40($sp) 393 stq $15, 48($sp) 393 stq $15, 48($sp) 394 .cfi_rel_offset $9, 0 394 .cfi_rel_offset $9, 0 395 .cfi_rel_offset $10, 8 395 .cfi_rel_offset $10, 8 396 .cfi_rel_offset $11, 16 396 .cfi_rel_offset $11, 16 397 .cfi_rel_offset $12, 24 397 .cfi_rel_offset $12, 24 398 .cfi_rel_offset $13, 32 398 .cfi_rel_offset $13, 32 399 .cfi_rel_offset $14, 40 399 .cfi_rel_offset $14, 40 400 .cfi_rel_offset $15, 48 400 .cfi_rel_offset $15, 48 401 lda $8, 0x3fff 401 lda $8, 0x3fff 402 addq $sp, 56, $19 402 addq $sp, 56, $19 403 bic $sp, $8, $8 403 bic $sp, $8, $8 404 jsr $26, do_entUnaUser 404 jsr $26, do_entUnaUser 405 ldq $9, 0($sp) 405 ldq $9, 0($sp) 406 ldq $10, 8($sp) 406 ldq $10, 8($sp) 407 ldq $11, 16($sp) 407 ldq $11, 16($sp) 408 ldq $12, 24($sp) 408 ldq $12, 24($sp) 409 ldq $13, 32($sp) 409 ldq $13, 32($sp) 410 ldq $14, 40($sp) 410 ldq $14, 40($sp) 411 ldq $15, 48($sp) 411 ldq $15, 48($sp) 412 lda $sp, 56($sp) 412 lda $sp, 56($sp) 413 .cfi_restore $9 413 .cfi_restore $9 414 .cfi_restore $10 414 .cfi_restore $10 415 .cfi_restore $11 415 .cfi_restore $11 416 .cfi_restore $12 416 .cfi_restore $12 417 .cfi_restore $13 417 .cfi_restore $13 418 .cfi_restore $14 418 .cfi_restore $14 419 .cfi_restore $15 419 .cfi_restore $15 420 .cfi_adjust_cfa_offset -56 420 .cfi_adjust_cfa_offset -56 421 br ret_from_sys_call 421 br ret_from_sys_call 422 CFI_END_OSF_FRAME entUna 422 CFI_END_OSF_FRAME entUna 423 423 424 CFI_START_OSF_FRAME entDbg 424 CFI_START_OSF_FRAME entDbg 425 SAVE_ALL 425 SAVE_ALL 426 lda $8, 0x3fff 426 lda $8, 0x3fff 427 lda $26, ret_from_sys_call 427 lda $26, ret_from_sys_call 428 bic $sp, $8, $8 428 bic $sp, $8, $8 429 mov $sp, $16 429 mov $sp, $16 430 jsr $31, do_entDbg 430 jsr $31, do_entDbg 431 CFI_END_OSF_FRAME entDbg 431 CFI_END_OSF_FRAME entDbg 432 432 433 /* 433 /* 434 * The system call entry point is special. Mo 434 * The system call entry point is special. Most importantly, it looks 435 * like a function call to userspace as far as 435 * like a function call to userspace as far as clobbered registers. We 436 * do preserve the argument registers (for sys 436 * do preserve the argument registers (for syscall restarts) and $26 437 * (for leaf syscall functions). 437 * (for leaf syscall functions). 438 * 438 * 439 * So much for theory. We don't take advantag 439 * So much for theory. We don't take advantage of this yet. 440 * 440 * 441 * Note that a0-a2 are not saved by PALcode as 441 * Note that a0-a2 are not saved by PALcode as with the other entry points. 442 */ 442 */ 443 443 444 .align 4 444 .align 4 445 .globl entSys 445 .globl entSys 446 .type entSys, @function 446 .type entSys, @function 447 .cfi_startproc simple 447 .cfi_startproc simple 448 .cfi_return_column 64 448 .cfi_return_column 64 449 .cfi_def_cfa $sp, 48 449 .cfi_def_cfa $sp, 48 450 .cfi_rel_offset 64, 8 450 .cfi_rel_offset 64, 8 451 .cfi_rel_offset $gp, 16 451 .cfi_rel_offset $gp, 16 452 entSys: 452 entSys: 453 SAVE_ALL 453 SAVE_ALL 454 lda $8, 0x3fff 454 lda $8, 0x3fff 455 bic $sp, $8, $8 455 bic $sp, $8, $8 456 lda $4, NR_syscalls($31) 456 lda $4, NR_syscalls($31) 457 stq $16, SP_OFF+24($sp) 457 stq $16, SP_OFF+24($sp) 458 lda $5, sys_call_table 458 lda $5, sys_call_table 459 lda $27, sys_ni_syscall 459 lda $27, sys_ni_syscall 460 cmpult $0, $4, $4 460 cmpult $0, $4, $4 461 ldl $3, TI_FLAGS($8) 461 ldl $3, TI_FLAGS($8) 462 stq $17, SP_OFF+32($sp) 462 stq $17, SP_OFF+32($sp) 463 s8addq $0, $5, $5 463 s8addq $0, $5, $5 464 stq $18, SP_OFF+40($sp) 464 stq $18, SP_OFF+40($sp) 465 .cfi_rel_offset $16, SP_OFF+24 465 .cfi_rel_offset $16, SP_OFF+24 466 .cfi_rel_offset $17, SP_OFF+32 466 .cfi_rel_offset $17, SP_OFF+32 467 .cfi_rel_offset $18, SP_OFF+40 467 .cfi_rel_offset $18, SP_OFF+40 468 #ifdef CONFIG_AUDITSYSCALL 468 #ifdef CONFIG_AUDITSYSCALL 469 lda $6, _TIF_SYSCALL_TRACE | _TIF_ 469 lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT 470 and $3, $6, $3 470 and $3, $6, $3 471 bne $3, strace 471 bne $3, strace 472 #else 472 #else 473 blbs $3, strace /* che 473 blbs $3, strace /* check for SYSCALL_TRACE in disguise */ 474 #endif 474 #endif 475 beq $4, 1f 475 beq $4, 1f 476 ldq $27, 0($5) 476 ldq $27, 0($5) 477 1: jsr $26, ($27), sys_ni_syscall 477 1: jsr $26, ($27), sys_ni_syscall 478 ldgp $gp, 0($26) 478 ldgp $gp, 0($26) 479 blt $0, $syscall_error /* the 479 blt $0, $syscall_error /* the call failed */ 480 $ret_success: 480 $ret_success: 481 stq $0, 0($sp) 481 stq $0, 0($sp) 482 stq $31, 72($sp) /* a3= 482 stq $31, 72($sp) /* a3=0 => no error */ 483 483 484 .align 4 484 .align 4 485 .globl ret_from_sys_call 485 .globl ret_from_sys_call 486 ret_from_sys_call: 486 ret_from_sys_call: 487 cmovne $26, 0, $18 /* $18 487 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ 488 ldq $0, SP_OFF($sp) 488 ldq $0, SP_OFF($sp) 489 and $0, 8, $0 489 and $0, 8, $0 490 beq $0, ret_to_kernel 490 beq $0, ret_to_kernel 491 ret_to_user: 491 ret_to_user: 492 /* Make sure need_resched and sigpendi 492 /* Make sure need_resched and sigpending don't change between 493 sampling and the rti. */ 493 sampling and the rti. */ 494 lda $16, 7 494 lda $16, 7 495 call_pal PAL_swpipl 495 call_pal PAL_swpipl 496 ldl $17, TI_FLAGS($8) 496 ldl $17, TI_FLAGS($8) 497 and $17, _TIF_WORK_MASK, $2 497 and $17, _TIF_WORK_MASK, $2 498 bne $2, work_pending 498 bne $2, work_pending 499 restore_all: 499 restore_all: 500 ldl $2, TI_STATUS($8) 500 ldl $2, TI_STATUS($8) 501 and $2, TS_SAVED_FP | TS_RESTORE_F 501 and $2, TS_SAVED_FP | TS_RESTORE_FP, $3 502 bne $3, restore_fpu 502 bne $3, restore_fpu 503 restore_other: 503 restore_other: 504 .cfi_remember_state 504 .cfi_remember_state 505 RESTORE_ALL 505 RESTORE_ALL 506 call_pal PAL_rti 506 call_pal PAL_rti 507 507 508 ret_to_kernel: 508 ret_to_kernel: 509 .cfi_restore_state 509 .cfi_restore_state 510 lda $16, 7 510 lda $16, 7 511 call_pal PAL_swpipl 511 call_pal PAL_swpipl 512 br restore_other 512 br restore_other 513 513 514 .align 3 514 .align 3 515 $syscall_error: 515 $syscall_error: 516 /* 516 /* 517 * Some system calls (e.g., ptrace) ca 517 * Some system calls (e.g., ptrace) can return arbitrary 518 * values which might normally be mist 518 * values which might normally be mistaken as error numbers. 519 * Those functions must zero $0 (v0) d 519 * Those functions must zero $0 (v0) directly in the stack 520 * frame to indicate that a negative r 520 * frame to indicate that a negative return value wasn't an 521 * error number.. 521 * error number.. 522 */ 522 */ 523 ldq $18, 0($sp) /* old syscall 523 ldq $18, 0($sp) /* old syscall nr (zero if success) */ 524 beq $18, $ret_success 524 beq $18, $ret_success 525 525 526 ldq $19, 72($sp) /* .. and this 526 ldq $19, 72($sp) /* .. and this a3 */ 527 subq $31, $0, $0 /* with error 527 subq $31, $0, $0 /* with error in v0 */ 528 addq $31, 1, $1 /* set a3 for 528 addq $31, 1, $1 /* set a3 for errno return */ 529 stq $0, 0($sp) 529 stq $0, 0($sp) 530 mov $31, $26 /* tell "ret_f 530 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 531 stq $1, 72($sp) /* a3 for retu 531 stq $1, 72($sp) /* a3 for return */ 532 br ret_from_sys_call 532 br ret_from_sys_call 533 533 534 /* 534 /* 535 * Do all cleanup when returning from all inte 535 * Do all cleanup when returning from all interrupts and system calls. 536 * 536 * 537 * Arguments: 537 * Arguments: 538 * $8: current. 538 * $8: current. 539 * $17: TI_FLAGS. 539 * $17: TI_FLAGS. 540 * $18: The old syscall number, or zero i 540 * $18: The old syscall number, or zero if this is not a return 541 * from a syscall that errored and i 541 * from a syscall that errored and is possibly restartable. 542 * $19: The old a3 value 542 * $19: The old a3 value 543 */ 543 */ 544 544 545 .align 4 545 .align 4 546 .type work_pending, @function 546 .type work_pending, @function 547 work_pending: 547 work_pending: 548 and $17, _TIF_NOTIFY_RESUME | _TIF 548 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2 549 bne $2, $work_notifysig 549 bne $2, $work_notifysig 550 550 551 $work_resched: 551 $work_resched: 552 /* 552 /* 553 * We can get here only if we returned 553 * We can get here only if we returned from syscall without SIGPENDING 554 * or got through work_notifysig alrea 554 * or got through work_notifysig already. Either case means no syscall 555 * restarts for us, so let $18 and $19 555 * restarts for us, so let $18 and $19 burn. 556 */ 556 */ 557 jsr $26, schedule 557 jsr $26, schedule 558 mov 0, $18 558 mov 0, $18 559 br ret_to_user 559 br ret_to_user 560 560 561 $work_notifysig: 561 $work_notifysig: 562 mov $sp, $16 562 mov $sp, $16 563 DO_SWITCH_STACK 563 DO_SWITCH_STACK 564 jsr $26, do_work_pending 564 jsr $26, do_work_pending 565 UNDO_SWITCH_STACK 565 UNDO_SWITCH_STACK 566 br restore_all 566 br restore_all 567 567 568 /* 568 /* 569 * PTRACE syscall handler 569 * PTRACE syscall handler 570 */ 570 */ 571 571 572 .align 4 572 .align 4 573 .type strace, @function 573 .type strace, @function 574 strace: 574 strace: 575 /* set up signal stack, call syscall_t 575 /* set up signal stack, call syscall_trace */ 576 // NB: if anyone adds preemption, this 576 // NB: if anyone adds preemption, this block will need to be protected 577 ldl $1, TI_STATUS($8) 577 ldl $1, TI_STATUS($8) 578 and $1, TS_SAVED_FP, $3 578 and $1, TS_SAVED_FP, $3 579 or $1, TS_SAVED_FP, $2 579 or $1, TS_SAVED_FP, $2 580 bne $3, 1f 580 bne $3, 1f 581 stl $2, TI_STATUS($8) 581 stl $2, TI_STATUS($8) 582 bsr $26, __save_fpu 582 bsr $26, __save_fpu 583 1: 583 1: 584 DO_SWITCH_STACK 584 DO_SWITCH_STACK 585 jsr $26, syscall_trace_enter /* re 585 jsr $26, syscall_trace_enter /* returns the syscall number */ 586 UNDO_SWITCH_STACK 586 UNDO_SWITCH_STACK 587 587 588 /* get the arguments back.. */ 588 /* get the arguments back.. */ 589 ldq $16, SP_OFF+24($sp) 589 ldq $16, SP_OFF+24($sp) 590 ldq $17, SP_OFF+32($sp) 590 ldq $17, SP_OFF+32($sp) 591 ldq $18, SP_OFF+40($sp) 591 ldq $18, SP_OFF+40($sp) 592 ldq $19, 72($sp) 592 ldq $19, 72($sp) 593 ldq $20, 80($sp) 593 ldq $20, 80($sp) 594 ldq $21, 88($sp) 594 ldq $21, 88($sp) 595 595 596 /* get the system call pointer.. */ 596 /* get the system call pointer.. */ 597 lda $1, NR_syscalls($31) 597 lda $1, NR_syscalls($31) 598 lda $2, sys_call_table 598 lda $2, sys_call_table 599 lda $27, sys_ni_syscall 599 lda $27, sys_ni_syscall 600 cmpult $0, $1, $1 600 cmpult $0, $1, $1 601 s8addq $0, $2, $2 601 s8addq $0, $2, $2 602 beq $1, 1f 602 beq $1, 1f 603 ldq $27, 0($2) 603 ldq $27, 0($2) 604 1: jsr $26, ($27), sys_gettimeofday 604 1: jsr $26, ($27), sys_gettimeofday 605 ret_from_straced: 605 ret_from_straced: 606 ldgp $gp, 0($26) 606 ldgp $gp, 0($26) 607 607 608 /* check return.. */ 608 /* check return.. */ 609 blt $0, $strace_error /* the 609 blt $0, $strace_error /* the call failed */ 610 $strace_success: 610 $strace_success: 611 stq $31, 72($sp) /* a3= 611 stq $31, 72($sp) /* a3=0 => no error */ 612 stq $0, 0($sp) /* sav 612 stq $0, 0($sp) /* save return value */ 613 613 614 DO_SWITCH_STACK 614 DO_SWITCH_STACK 615 jsr $26, syscall_trace_leave 615 jsr $26, syscall_trace_leave 616 UNDO_SWITCH_STACK 616 UNDO_SWITCH_STACK 617 br $31, ret_from_sys_call 617 br $31, ret_from_sys_call 618 618 619 .align 3 619 .align 3 620 $strace_error: 620 $strace_error: 621 ldq $18, 0($sp) /* old syscall 621 ldq $18, 0($sp) /* old syscall nr (zero if success) */ 622 beq $18, $strace_success 622 beq $18, $strace_success 623 ldq $19, 72($sp) /* .. and this 623 ldq $19, 72($sp) /* .. and this a3 */ 624 624 625 subq $31, $0, $0 /* with error 625 subq $31, $0, $0 /* with error in v0 */ 626 addq $31, 1, $1 /* set a3 for 626 addq $31, 1, $1 /* set a3 for errno return */ 627 stq $0, 0($sp) 627 stq $0, 0($sp) 628 stq $1, 72($sp) /* a3 for retu 628 stq $1, 72($sp) /* a3 for return */ 629 629 630 DO_SWITCH_STACK 630 DO_SWITCH_STACK 631 mov $18, $9 /* save old sy 631 mov $18, $9 /* save old syscall number */ 632 mov $19, $10 /* save old a3 632 mov $19, $10 /* save old a3 */ 633 jsr $26, syscall_trace_leave 633 jsr $26, syscall_trace_leave 634 mov $9, $18 634 mov $9, $18 635 mov $10, $19 635 mov $10, $19 636 UNDO_SWITCH_STACK 636 UNDO_SWITCH_STACK 637 637 638 mov $31, $26 /* tell "ret_f 638 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 639 br ret_from_sys_call 639 br ret_from_sys_call 640 CFI_END_OSF_FRAME entSys 640 CFI_END_OSF_FRAME entSys 641 641 642 /* 642 /* 643 * Save and restore the switch stack -- aka th 643 * Save and restore the switch stack -- aka the balance of the user context. 644 */ 644 */ 645 645 646 .align 4 646 .align 4 647 .type do_switch_stack, @function 647 .type do_switch_stack, @function 648 .cfi_startproc simple 648 .cfi_startproc simple 649 .cfi_return_column 64 649 .cfi_return_column 64 650 .cfi_def_cfa $sp, 0 650 .cfi_def_cfa $sp, 0 651 .cfi_register 64, $1 651 .cfi_register 64, $1 652 do_switch_stack: 652 do_switch_stack: 653 lda $sp, -SWITCH_STACK_SIZE($sp) 653 lda $sp, -SWITCH_STACK_SIZE($sp) 654 .cfi_adjust_cfa_offset SWITCH_STACK_S 654 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE 655 stq $9, 0($sp) 655 stq $9, 0($sp) 656 stq $10, 8($sp) 656 stq $10, 8($sp) 657 stq $11, 16($sp) 657 stq $11, 16($sp) 658 stq $12, 24($sp) 658 stq $12, 24($sp) 659 stq $13, 32($sp) 659 stq $13, 32($sp) 660 stq $14, 40($sp) 660 stq $14, 40($sp) 661 stq $15, 48($sp) 661 stq $15, 48($sp) 662 stq $26, 56($sp) 662 stq $26, 56($sp) 663 ret $31, ($1), 1 663 ret $31, ($1), 1 664 .cfi_endproc 664 .cfi_endproc 665 .size do_switch_stack, .-do_switch_s 665 .size do_switch_stack, .-do_switch_stack 666 666 667 .align 4 667 .align 4 668 .type undo_switch_stack, @function 668 .type undo_switch_stack, @function 669 .cfi_startproc simple 669 .cfi_startproc simple 670 .cfi_def_cfa $sp, 0 670 .cfi_def_cfa $sp, 0 671 .cfi_register 64, $1 671 .cfi_register 64, $1 672 undo_switch_stack: 672 undo_switch_stack: 673 ldq $9, 0($sp) 673 ldq $9, 0($sp) 674 ldq $10, 8($sp) 674 ldq $10, 8($sp) 675 ldq $11, 16($sp) 675 ldq $11, 16($sp) 676 ldq $12, 24($sp) 676 ldq $12, 24($sp) 677 ldq $13, 32($sp) 677 ldq $13, 32($sp) 678 ldq $14, 40($sp) 678 ldq $14, 40($sp) 679 ldq $15, 48($sp) 679 ldq $15, 48($sp) 680 ldq $26, 56($sp) 680 ldq $26, 56($sp) 681 lda $sp, SWITCH_STACK_SIZE($sp) 681 lda $sp, SWITCH_STACK_SIZE($sp) 682 ret $31, ($1), 1 682 ret $31, ($1), 1 683 .cfi_endproc 683 .cfi_endproc 684 .size undo_switch_stack, .-undo_swit 684 .size undo_switch_stack, .-undo_switch_stack 685 685 686 #define FR(n) n * 8 + TI_FP($8) 686 #define FR(n) n * 8 + TI_FP($8) 687 .align 4 687 .align 4 688 .globl __save_fpu 688 .globl __save_fpu 689 .type __save_fpu, @function 689 .type __save_fpu, @function 690 __save_fpu: 690 __save_fpu: 691 #define V(n) stt $f##n, FR(n) 691 #define V(n) stt $f##n, FR(n) 692 V( 0); V( 1); V( 2); V( 3) 692 V( 0); V( 1); V( 2); V( 3) 693 V( 4); V( 5); V( 6); V( 7) 693 V( 4); V( 5); V( 6); V( 7) 694 V( 8); V( 9); V(10); V(11) 694 V( 8); V( 9); V(10); V(11) 695 V(12); V(13); V(14); V(15) 695 V(12); V(13); V(14); V(15) 696 V(16); V(17); V(18); V(19) 696 V(16); V(17); V(18); V(19) 697 V(20); V(21); V(22); V(23) 697 V(20); V(21); V(22); V(23) 698 V(24); V(25); V(26); V(27) 698 V(24); V(25); V(26); V(27) 699 mf_fpcr $f0 # get fpcr 699 mf_fpcr $f0 # get fpcr 700 V(28); V(29); V(30) 700 V(28); V(29); V(30) 701 stt $f0, FR(31) # save fpcr in 701 stt $f0, FR(31) # save fpcr in slot of $f31 702 ldt $f0, FR(0) # don't let "_ 702 ldt $f0, FR(0) # don't let "__save_fpu" change fp state. 703 ret 703 ret 704 #undef V 704 #undef V 705 .size __save_fpu, .-__save_fpu 705 .size __save_fpu, .-__save_fpu 706 706 707 .align 4 707 .align 4 708 restore_fpu: 708 restore_fpu: 709 and $3, TS_RESTORE_FP, $3 709 and $3, TS_RESTORE_FP, $3 710 bic $2, TS_SAVED_FP | TS_RESTORE_F 710 bic $2, TS_SAVED_FP | TS_RESTORE_FP, $2 711 beq $3, 1f 711 beq $3, 1f 712 #define V(n) ldt $f##n, FR(n) 712 #define V(n) ldt $f##n, FR(n) 713 ldt $f30, FR(31) # get saved fp 713 ldt $f30, FR(31) # get saved fpcr 714 V( 0); V( 1); V( 2); V( 3) 714 V( 0); V( 1); V( 2); V( 3) 715 mt_fpcr $f30 # install save 715 mt_fpcr $f30 # install saved fpcr 716 V( 4); V( 5); V( 6); V( 7) 716 V( 4); V( 5); V( 6); V( 7) 717 V( 8); V( 9); V(10); V(11) 717 V( 8); V( 9); V(10); V(11) 718 V(12); V(13); V(14); V(15) 718 V(12); V(13); V(14); V(15) 719 V(16); V(17); V(18); V(19) 719 V(16); V(17); V(18); V(19) 720 V(20); V(21); V(22); V(23) 720 V(20); V(21); V(22); V(23) 721 V(24); V(25); V(26); V(27) 721 V(24); V(25); V(26); V(27) 722 V(28); V(29); V(30) 722 V(28); V(29); V(30) 723 1: stl $2, TI_STATUS($8) 723 1: stl $2, TI_STATUS($8) 724 br restore_other 724 br restore_other 725 #undef V 725 #undef V 726 726 727 727 728 /* 728 /* 729 * The meat of the context switch code. 729 * The meat of the context switch code. 730 */ 730 */ 731 .align 4 731 .align 4 732 .globl alpha_switch_to 732 .globl alpha_switch_to 733 .type alpha_switch_to, @function 733 .type alpha_switch_to, @function 734 .cfi_startproc 734 .cfi_startproc 735 alpha_switch_to: 735 alpha_switch_to: 736 DO_SWITCH_STACK 736 DO_SWITCH_STACK 737 ldl $1, TI_STATUS($8) 737 ldl $1, TI_STATUS($8) 738 and $1, TS_RESTORE_FP, $3 738 and $1, TS_RESTORE_FP, $3 739 bne $3, 1f 739 bne $3, 1f 740 or $1, TS_RESTORE_FP | TS_SAVED_F 740 or $1, TS_RESTORE_FP | TS_SAVED_FP, $2 741 and $1, TS_SAVED_FP, $3 741 and $1, TS_SAVED_FP, $3 742 stl $2, TI_STATUS($8) 742 stl $2, TI_STATUS($8) 743 bne $3, 1f 743 bne $3, 1f 744 bsr $26, __save_fpu 744 bsr $26, __save_fpu 745 1: 745 1: 746 call_pal PAL_swpctx 746 call_pal PAL_swpctx 747 lda $8, 0x3fff 747 lda $8, 0x3fff 748 UNDO_SWITCH_STACK 748 UNDO_SWITCH_STACK 749 bic $sp, $8, $8 749 bic $sp, $8, $8 750 mov $17, $0 750 mov $17, $0 751 ret 751 ret 752 .cfi_endproc 752 .cfi_endproc 753 .size alpha_switch_to, .-alpha_switc 753 .size alpha_switch_to, .-alpha_switch_to 754 754 755 /* 755 /* 756 * New processes begin life here. 756 * New processes begin life here. 757 */ 757 */ 758 758 759 .globl ret_from_fork 759 .globl ret_from_fork 760 .align 4 760 .align 4 761 .ent ret_from_fork 761 .ent ret_from_fork 762 ret_from_fork: 762 ret_from_fork: 763 lda $26, ret_to_user 763 lda $26, ret_to_user 764 mov $17, $16 764 mov $17, $16 765 jmp $31, schedule_tail 765 jmp $31, schedule_tail 766 .end ret_from_fork 766 .end ret_from_fork 767 767 768 /* 768 /* 769 * ... and new kernel threads - here 769 * ... and new kernel threads - here 770 */ 770 */ 771 .align 4 771 .align 4 772 .globl ret_from_kernel_thread 772 .globl ret_from_kernel_thread 773 .ent ret_from_kernel_thread 773 .ent ret_from_kernel_thread 774 ret_from_kernel_thread: 774 ret_from_kernel_thread: 775 mov $17, $16 775 mov $17, $16 776 jsr $26, schedule_tail 776 jsr $26, schedule_tail 777 mov $9, $27 777 mov $9, $27 778 mov $10, $16 778 mov $10, $16 779 jsr $26, ($9) 779 jsr $26, ($9) 780 br $31, ret_to_user 780 br $31, ret_to_user 781 .end ret_from_kernel_thread 781 .end ret_from_kernel_thread 782 782 783 783 784 /* 784 /* 785 * Special system calls. Most of these are sp 785 * Special system calls. Most of these are special in that they either 786 * have to play switch_stack games. 786 * have to play switch_stack games. 787 */ 787 */ 788 788 789 .macro fork_like name 789 .macro fork_like name 790 .align 4 790 .align 4 791 .globl alpha_\name 791 .globl alpha_\name 792 .ent alpha_\name 792 .ent alpha_\name 793 alpha_\name: 793 alpha_\name: 794 .prologue 0 794 .prologue 0 795 bsr $1, do_switch_stack 795 bsr $1, do_switch_stack 796 // NB: if anyone adds preemption, this 796 // NB: if anyone adds preemption, this block will need to be protected 797 ldl $1, TI_STATUS($8) 797 ldl $1, TI_STATUS($8) 798 and $1, TS_SAVED_FP, $3 798 and $1, TS_SAVED_FP, $3 799 or $1, TS_SAVED_FP, $2 799 or $1, TS_SAVED_FP, $2 800 bne $3, 1f 800 bne $3, 1f 801 stl $2, TI_STATUS($8) 801 stl $2, TI_STATUS($8) 802 bsr $26, __save_fpu 802 bsr $26, __save_fpu 803 1: 803 1: 804 jsr $26, sys_\name 804 jsr $26, sys_\name 805 ldq $26, 56($sp) 805 ldq $26, 56($sp) 806 lda $sp, SWITCH_STACK_SIZE($sp) 806 lda $sp, SWITCH_STACK_SIZE($sp) 807 ret 807 ret 808 .end alpha_\name 808 .end alpha_\name 809 .endm 809 .endm 810 810 811 fork_like fork 811 fork_like fork 812 fork_like vfork 812 fork_like vfork 813 fork_like clone 813 fork_like clone 814 fork_like clone3 814 fork_like clone3 815 815 816 .macro sigreturn_like name 816 .macro sigreturn_like name 817 .align 4 817 .align 4 818 .globl sys_\name 818 .globl sys_\name 819 .ent sys_\name 819 .ent sys_\name 820 sys_\name: 820 sys_\name: 821 .prologue 0 821 .prologue 0 822 lda $9, ret_from_straced 822 lda $9, ret_from_straced 823 cmpult $26, $9, $9 823 cmpult $26, $9, $9 824 lda $sp, -SWITCH_STACK_SIZE($sp) 824 lda $sp, -SWITCH_STACK_SIZE($sp) 825 jsr $26, do_\name 825 jsr $26, do_\name 826 bne $9, 1f 826 bne $9, 1f 827 jsr $26, syscall_trace_leave 827 jsr $26, syscall_trace_leave 828 1: br $1, undo_switch_stack 828 1: br $1, undo_switch_stack 829 br ret_from_sys_call 829 br ret_from_sys_call 830 .end sys_\name 830 .end sys_\name 831 .endm 831 .endm 832 832 833 sigreturn_like sigreturn 833 sigreturn_like sigreturn 834 sigreturn_like rt_sigreturn 834 sigreturn_like rt_sigreturn 835 835 836 .align 4 836 .align 4 837 .globl alpha_syscall_zero 837 .globl alpha_syscall_zero 838 .ent alpha_syscall_zero 838 .ent alpha_syscall_zero 839 alpha_syscall_zero: 839 alpha_syscall_zero: 840 .prologue 0 840 .prologue 0 841 /* Special because it needs to do some 841 /* Special because it needs to do something opposite to 842 force_successful_syscall_return(). 842 force_successful_syscall_return(). We use the saved 843 syscall number for that, zero meani 843 syscall number for that, zero meaning "not an error". 844 That works nicely, but for real sys 844 That works nicely, but for real syscall 0 we need to 845 make sure that this logics doesn't 845 make sure that this logics doesn't get confused. 846 Store a non-zero there - -ENOSYS we 846 Store a non-zero there - -ENOSYS we need in register 847 for our return value will do just f 847 for our return value will do just fine. 848 */ 848 */ 849 lda $0, -ENOSYS 849 lda $0, -ENOSYS 850 unop 850 unop 851 stq $0, 0($sp) 851 stq $0, 0($sp) 852 ret 852 ret 853 .end alpha_syscall_zero 853 .end alpha_syscall_zero
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.