1 /* SPDX-License-Identifier: GPL-2.0 */ !! 1 /* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $ 2 /* !! 2 * arch/sparc64/kernel/entry.S: Sparc64 trap low-level entry points. 3 * arch/alpha/kernel/entry.S << 4 * 3 * 5 * Kernel entry-points. !! 4 * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu) >> 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) >> 6 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) >> 7 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 */ 8 */ 7 9 8 #include <asm/asm-offsets.h> !! 10 #include <linux/config.h> 9 #include <asm/thread_info.h> !! 11 #include <linux/errno.h> 10 #include <asm/pal.h> << 11 #include <asm/errno.h> << 12 #include <asm/unistd.h> << 13 << 14 .text << 15 .set noat << 16 .cfi_sections .debug_frame << 17 << 18 /* Stack offsets. */ << 19 #define SP_OFF 184 << 20 #define SWITCH_STACK_SIZE 64 << 21 << 22 .macro CFI_START_OSF_FRAME func << 23 .align 4 << 24 .globl \func << 25 .type \func,@function << 26 \func: << 27 .cfi_startproc simple << 28 .cfi_return_column 64 << 29 .cfi_def_cfa $sp, 48 << 30 .cfi_rel_offset 64, 8 << 31 .cfi_rel_offset $gp, 16 << 32 .cfi_rel_offset $16, 24 << 33 .cfi_rel_offset $17, 32 << 34 .cfi_rel_offset $18, 40 << 35 .endm << 36 << 37 .macro CFI_END_OSF_FRAME func << 38 .cfi_endproc << 39 .size \func, . - \func << 40 .endm << 41 12 42 /* !! 13 #include <asm/head.h> 43 * This defines the normal kernel pt-regs layo !! 14 #include <asm/asi.h> 44 * !! 15 #include <asm/smp.h> 45 * regs 9-15 preserved by C code !! 16 #include <asm/ptrace.h> 46 * regs 16-18 saved by PAL-code !! 17 #include <asm/page.h> 47 * regs 29-30 saved and set up by PAL-code !! 18 #include <asm/signal.h> 48 * JRP - Save regs 16-18 in a special area of !! 19 #include <asm/pgtable.h> 49 * the palcode-provided values are available t !! 20 #include <asm/processor.h> 50 */ !! 21 #include <asm/visasm.h> >> 22 #include <asm/estate.h> >> 23 #include <asm/auxio.h> 51 24 52 .macro SAVE_ALL !! 25 /* #define SYSCALL_TRACING 1 */ 53 subq $sp, SP_OFF, $sp << 54 .cfi_adjust_cfa_offset SP_OFF << 55 stq $0, 0($sp) << 56 stq $1, 8($sp) << 57 stq $2, 16($sp) << 58 stq $3, 24($sp) << 59 stq $4, 32($sp) << 60 stq $28, 144($sp) << 61 .cfi_rel_offset $0, 0 << 62 .cfi_rel_offset $1, 8 << 63 .cfi_rel_offset $2, 16 << 64 .cfi_rel_offset $3, 24 << 65 .cfi_rel_offset $4, 32 << 66 .cfi_rel_offset $28, 144 << 67 lda $2, alpha_mv << 68 stq $5, 40($sp) << 69 stq $6, 48($sp) << 70 stq $7, 56($sp) << 71 stq $8, 64($sp) << 72 stq $19, 72($sp) << 73 stq $20, 80($sp) << 74 stq $21, 88($sp) << 75 ldq $2, HAE_CACHE($2) << 76 stq $22, 96($sp) << 77 stq $23, 104($sp) << 78 stq $24, 112($sp) << 79 stq $25, 120($sp) << 80 stq $26, 128($sp) << 81 stq $27, 136($sp) << 82 stq $2, 152($sp) << 83 stq $16, 160($sp) << 84 stq $17, 168($sp) << 85 stq $18, 176($sp) << 86 .cfi_rel_offset $5, 40 << 87 .cfi_rel_offset $6, 48 << 88 .cfi_rel_offset $7, 56 << 89 .cfi_rel_offset $8, 64 << 90 .cfi_rel_offset $19, 72 << 91 .cfi_rel_offset $20, 80 << 92 .cfi_rel_offset $21, 88 << 93 .cfi_rel_offset $22, 96 << 94 .cfi_rel_offset $23, 104 << 95 .cfi_rel_offset $24, 112 << 96 .cfi_rel_offset $25, 120 << 97 .cfi_rel_offset $26, 128 << 98 .cfi_rel_offset $27, 136 << 99 .endm << 100 << 101 .macro RESTORE_ALL << 102 lda $19, alpha_mv << 103 ldq $0, 0($sp) << 104 ldq $1, 8($sp) << 105 ldq $2, 16($sp) << 106 ldq $3, 24($sp) << 107 ldq $21, 152($sp) << 108 ldq $20, HAE_CACHE($19) << 109 ldq $4, 32($sp) << 110 ldq $5, 40($sp) << 111 ldq $6, 48($sp) << 112 ldq $7, 56($sp) << 113 subq $20, $21, $20 << 114 ldq $8, 64($sp) << 115 beq $20, 99f << 116 ldq $20, HAE_REG($19) << 117 stq $21, HAE_CACHE($19) << 118 stq $21, 0($20) << 119 99: ldq $19, 72($sp) << 120 ldq $20, 80($sp) << 121 ldq $21, 88($sp) << 122 ldq $22, 96($sp) << 123 ldq $23, 104($sp) << 124 ldq $24, 112($sp) << 125 ldq $25, 120($sp) << 126 ldq $26, 128($sp) << 127 ldq $27, 136($sp) << 128 ldq $28, 144($sp) << 129 addq $sp, SP_OFF, $sp << 130 .cfi_restore $0 << 131 .cfi_restore $1 << 132 .cfi_restore $2 << 133 .cfi_restore $3 << 134 .cfi_restore $4 << 135 .cfi_restore $5 << 136 .cfi_restore $6 << 137 .cfi_restore $7 << 138 .cfi_restore $8 << 139 .cfi_restore $19 << 140 .cfi_restore $20 << 141 .cfi_restore $21 << 142 .cfi_restore $22 << 143 .cfi_restore $23 << 144 .cfi_restore $24 << 145 .cfi_restore $25 << 146 .cfi_restore $26 << 147 .cfi_restore $27 << 148 .cfi_restore $28 << 149 .cfi_adjust_cfa_offset -SP_OFF << 150 .endm << 151 << 152 .macro DO_SWITCH_STACK << 153 bsr $1, do_switch_stack << 154 .cfi_adjust_cfa_offset SWITCH_STACK_S << 155 .cfi_rel_offset $9, 0 << 156 .cfi_rel_offset $10, 8 << 157 .cfi_rel_offset $11, 16 << 158 .cfi_rel_offset $12, 24 << 159 .cfi_rel_offset $13, 32 << 160 .cfi_rel_offset $14, 40 << 161 .cfi_rel_offset $15, 48 << 162 .endm << 163 << 164 .macro UNDO_SWITCH_STACK << 165 bsr $1, undo_switch_stack << 166 .cfi_restore $9 << 167 .cfi_restore $10 << 168 .cfi_restore $11 << 169 .cfi_restore $12 << 170 .cfi_restore $13 << 171 .cfi_restore $14 << 172 .cfi_restore $15 << 173 .cfi_adjust_cfa_offset -SWITCH_STACK_ << 174 .endm << 175 26 176 /* !! 27 #define curptr g6 177 * Non-syscall kernel entry points. << 178 */ << 179 28 180 CFI_START_OSF_FRAME entInt !! 29 #define NR_SYSCALLS 272 /* Each OS is different... */ 181 SAVE_ALL << 182 lda $8, 0x3fff << 183 lda $26, ret_from_sys_call << 184 bic $sp, $8, $8 << 185 mov $sp, $19 << 186 jsr $31, do_entInt << 187 CFI_END_OSF_FRAME entInt << 188 << 189 CFI_START_OSF_FRAME entArith << 190 SAVE_ALL << 191 lda $8, 0x3fff << 192 lda $26, ret_from_sys_call << 193 bic $sp, $8, $8 << 194 mov $sp, $18 << 195 jsr $31, do_entArith << 196 CFI_END_OSF_FRAME entArith << 197 << 198 CFI_START_OSF_FRAME entMM << 199 SAVE_ALL << 200 /* save $9 - $15 so the inline exception code << 201 subq $sp, 56, $sp << 202 .cfi_adjust_cfa_offset 56 << 203 stq $9, 0($sp) << 204 stq $10, 8($sp) << 205 stq $11, 16($sp) << 206 stq $12, 24($sp) << 207 stq $13, 32($sp) << 208 stq $14, 40($sp) << 209 stq $15, 48($sp) << 210 .cfi_rel_offset $9, 0 << 211 .cfi_rel_offset $10, 8 << 212 .cfi_rel_offset $11, 16 << 213 .cfi_rel_offset $12, 24 << 214 .cfi_rel_offset $13, 32 << 215 .cfi_rel_offset $14, 40 << 216 .cfi_rel_offset $15, 48 << 217 addq $sp, 56, $19 << 218 /* handle the fault */ << 219 lda $8, 0x3fff << 220 bic $sp, $8, $8 << 221 jsr $26, do_page_fault << 222 /* reload the registers after the exception co << 223 ldq $9, 0($sp) << 224 ldq $10, 8($sp) << 225 ldq $11, 16($sp) << 226 ldq $12, 24($sp) << 227 ldq $13, 32($sp) << 228 ldq $14, 40($sp) << 229 ldq $15, 48($sp) << 230 addq $sp, 56, $sp << 231 .cfi_restore $9 << 232 .cfi_restore $10 << 233 .cfi_restore $11 << 234 .cfi_restore $12 << 235 .cfi_restore $13 << 236 .cfi_restore $14 << 237 .cfi_restore $15 << 238 .cfi_adjust_cfa_offset -56 << 239 /* finish up the syscall as normal. */ << 240 br ret_from_sys_call << 241 CFI_END_OSF_FRAME entMM << 242 << 243 CFI_START_OSF_FRAME entIF << 244 SAVE_ALL << 245 lda $8, 0x3fff << 246 lda $26, ret_from_sys_call << 247 bic $sp, $8, $8 << 248 mov $sp, $17 << 249 jsr $31, do_entIF << 250 CFI_END_OSF_FRAME entIF << 251 << 252 CFI_START_OSF_FRAME entUna << 253 lda $sp, -256($sp) << 254 .cfi_adjust_cfa_offset 256 << 255 stq $0, 0($sp) << 256 .cfi_rel_offset $0, 0 << 257 .cfi_remember_state << 258 ldq $0, 256($sp) /* get PS */ << 259 stq $1, 8($sp) << 260 stq $2, 16($sp) << 261 stq $3, 24($sp) << 262 and $0, 8, $0 /* use << 263 stq $4, 32($sp) << 264 bne $0, entUnaUser /* yup -> do u << 265 stq $5, 40($sp) << 266 stq $6, 48($sp) << 267 stq $7, 56($sp) << 268 stq $8, 64($sp) << 269 stq $9, 72($sp) << 270 stq $10, 80($sp) << 271 stq $11, 88($sp) << 272 stq $12, 96($sp) << 273 stq $13, 104($sp) << 274 stq $14, 112($sp) << 275 stq $15, 120($sp) << 276 /* 16-18 PAL-saved */ << 277 stq $19, 152($sp) << 278 stq $20, 160($sp) << 279 stq $21, 168($sp) << 280 stq $22, 176($sp) << 281 stq $23, 184($sp) << 282 stq $24, 192($sp) << 283 stq $25, 200($sp) << 284 stq $26, 208($sp) << 285 stq $27, 216($sp) << 286 stq $28, 224($sp) << 287 mov $sp, $19 << 288 stq $gp, 232($sp) << 289 .cfi_rel_offset $1, 1*8 << 290 .cfi_rel_offset $2, 2*8 << 291 .cfi_rel_offset $3, 3*8 << 292 .cfi_rel_offset $4, 4*8 << 293 .cfi_rel_offset $5, 5*8 << 294 .cfi_rel_offset $6, 6*8 << 295 .cfi_rel_offset $7, 7*8 << 296 .cfi_rel_offset $8, 8*8 << 297 .cfi_rel_offset $9, 9*8 << 298 .cfi_rel_offset $10, 10*8 << 299 .cfi_rel_offset $11, 11*8 << 300 .cfi_rel_offset $12, 12*8 << 301 .cfi_rel_offset $13, 13*8 << 302 .cfi_rel_offset $14, 14*8 << 303 .cfi_rel_offset $15, 15*8 << 304 .cfi_rel_offset $19, 19*8 << 305 .cfi_rel_offset $20, 20*8 << 306 .cfi_rel_offset $21, 21*8 << 307 .cfi_rel_offset $22, 22*8 << 308 .cfi_rel_offset $23, 23*8 << 309 .cfi_rel_offset $24, 24*8 << 310 .cfi_rel_offset $25, 25*8 << 311 .cfi_rel_offset $26, 26*8 << 312 .cfi_rel_offset $27, 27*8 << 313 .cfi_rel_offset $28, 28*8 << 314 .cfi_rel_offset $29, 29*8 << 315 lda $8, 0x3fff << 316 stq $31, 248($sp) << 317 bic $sp, $8, $8 << 318 jsr $26, do_entUna << 319 ldq $0, 0($sp) << 320 ldq $1, 8($sp) << 321 ldq $2, 16($sp) << 322 ldq $3, 24($sp) << 323 ldq $4, 32($sp) << 324 ldq $5, 40($sp) << 325 ldq $6, 48($sp) << 326 ldq $7, 56($sp) << 327 ldq $8, 64($sp) << 328 ldq $9, 72($sp) << 329 ldq $10, 80($sp) << 330 ldq $11, 88($sp) << 331 ldq $12, 96($sp) << 332 ldq $13, 104($sp) << 333 ldq $14, 112($sp) << 334 ldq $15, 120($sp) << 335 /* 16-18 PAL-saved */ << 336 ldq $19, 152($sp) << 337 ldq $20, 160($sp) << 338 ldq $21, 168($sp) << 339 ldq $22, 176($sp) << 340 ldq $23, 184($sp) << 341 ldq $24, 192($sp) << 342 ldq $25, 200($sp) << 343 ldq $26, 208($sp) << 344 ldq $27, 216($sp) << 345 ldq $28, 224($sp) << 346 ldq $gp, 232($sp) << 347 lda $sp, 256($sp) << 348 .cfi_restore $1 << 349 .cfi_restore $2 << 350 .cfi_restore $3 << 351 .cfi_restore $4 << 352 .cfi_restore $5 << 353 .cfi_restore $6 << 354 .cfi_restore $7 << 355 .cfi_restore $8 << 356 .cfi_restore $9 << 357 .cfi_restore $10 << 358 .cfi_restore $11 << 359 .cfi_restore $12 << 360 .cfi_restore $13 << 361 .cfi_restore $14 << 362 .cfi_restore $15 << 363 .cfi_restore $19 << 364 .cfi_restore $20 << 365 .cfi_restore $21 << 366 .cfi_restore $22 << 367 .cfi_restore $23 << 368 .cfi_restore $24 << 369 .cfi_restore $25 << 370 .cfi_restore $26 << 371 .cfi_restore $27 << 372 .cfi_restore $28 << 373 .cfi_restore $29 << 374 .cfi_adjust_cfa_offset -256 << 375 call_pal PAL_rti << 376 << 377 .align 4 << 378 entUnaUser: << 379 .cfi_restore_state << 380 ldq $0, 0($sp) /* restore ori << 381 lda $sp, 256($sp) /* pop entUna' << 382 .cfi_restore $0 << 383 .cfi_adjust_cfa_offset -256 << 384 SAVE_ALL /* setup norma << 385 lda $sp, -56($sp) << 386 .cfi_adjust_cfa_offset 56 << 387 stq $9, 0($sp) << 388 stq $10, 8($sp) << 389 stq $11, 16($sp) << 390 stq $12, 24($sp) << 391 stq $13, 32($sp) << 392 stq $14, 40($sp) << 393 stq $15, 48($sp) << 394 .cfi_rel_offset $9, 0 << 395 .cfi_rel_offset $10, 8 << 396 .cfi_rel_offset $11, 16 << 397 .cfi_rel_offset $12, 24 << 398 .cfi_rel_offset $13, 32 << 399 .cfi_rel_offset $14, 40 << 400 .cfi_rel_offset $15, 48 << 401 lda $8, 0x3fff << 402 addq $sp, 56, $19 << 403 bic $sp, $8, $8 << 404 jsr $26, do_entUnaUser << 405 ldq $9, 0($sp) << 406 ldq $10, 8($sp) << 407 ldq $11, 16($sp) << 408 ldq $12, 24($sp) << 409 ldq $13, 32($sp) << 410 ldq $14, 40($sp) << 411 ldq $15, 48($sp) << 412 lda $sp, 56($sp) << 413 .cfi_restore $9 << 414 .cfi_restore $10 << 415 .cfi_restore $11 << 416 .cfi_restore $12 << 417 .cfi_restore $13 << 418 .cfi_restore $14 << 419 .cfi_restore $15 << 420 .cfi_adjust_cfa_offset -56 << 421 br ret_from_sys_call << 422 CFI_END_OSF_FRAME entUna << 423 << 424 CFI_START_OSF_FRAME entDbg << 425 SAVE_ALL << 426 lda $8, 0x3fff << 427 lda $26, ret_from_sys_call << 428 bic $sp, $8, $8 << 429 mov $sp, $16 << 430 jsr $31, do_entDbg << 431 CFI_END_OSF_FRAME entDbg << 432 << 433 /* << 434 * The system call entry point is special. Mo << 435 * like a function call to userspace as far as << 436 * do preserve the argument registers (for sys << 437 * (for leaf syscall functions). << 438 * << 439 * So much for theory. We don't take advantag << 440 * << 441 * Note that a0-a2 are not saved by PALcode as << 442 */ << 443 30 444 .align 4 !! 31 .text 445 .globl entSys !! 32 .align 32 446 .type entSys, @function << 447 .cfi_startproc simple << 448 .cfi_return_column 64 << 449 .cfi_def_cfa $sp, 48 << 450 .cfi_rel_offset 64, 8 << 451 .cfi_rel_offset $gp, 16 << 452 entSys: << 453 SAVE_ALL << 454 lda $8, 0x3fff << 455 bic $sp, $8, $8 << 456 lda $4, NR_syscalls($31) << 457 stq $16, SP_OFF+24($sp) << 458 lda $5, sys_call_table << 459 lda $27, sys_ni_syscall << 460 cmpult $0, $4, $4 << 461 ldl $3, TI_FLAGS($8) << 462 stq $17, SP_OFF+32($sp) << 463 s8addq $0, $5, $5 << 464 stq $18, SP_OFF+40($sp) << 465 .cfi_rel_offset $16, SP_OFF+24 << 466 .cfi_rel_offset $17, SP_OFF+32 << 467 .cfi_rel_offset $18, SP_OFF+40 << 468 #ifdef CONFIG_AUDITSYSCALL << 469 lda $6, _TIF_SYSCALL_TRACE | _TIF_ << 470 and $3, $6, $3 << 471 bne $3, strace << 472 #else << 473 blbs $3, strace /* che << 474 #endif << 475 beq $4, 1f << 476 ldq $27, 0($5) << 477 1: jsr $26, ($27), sys_ni_syscall << 478 ldgp $gp, 0($26) << 479 blt $0, $syscall_error /* the << 480 $ret_success: << 481 stq $0, 0($sp) << 482 stq $31, 72($sp) /* a3= << 483 << 484 .align 4 << 485 .globl ret_from_sys_call << 486 ret_from_sys_call: << 487 cmovne $26, 0, $18 /* $18 << 488 ldq $0, SP_OFF($sp) << 489 and $0, 8, $0 << 490 beq $0, ret_to_kernel << 491 ret_to_user: << 492 /* Make sure need_resched and sigpendi << 493 sampling and the rti. */ << 494 lda $16, 7 << 495 call_pal PAL_swpipl << 496 ldl $17, TI_FLAGS($8) << 497 and $17, _TIF_WORK_MASK, $2 << 498 bne $2, work_pending << 499 restore_all: << 500 ldl $2, TI_STATUS($8) << 501 and $2, TS_SAVED_FP | TS_RESTORE_F << 502 bne $3, restore_fpu << 503 restore_other: << 504 .cfi_remember_state << 505 RESTORE_ALL << 506 call_pal PAL_rti << 507 << 508 ret_to_kernel: << 509 .cfi_restore_state << 510 lda $16, 7 << 511 call_pal PAL_swpipl << 512 br restore_other << 513 << 514 .align 3 << 515 $syscall_error: << 516 /* << 517 * Some system calls (e.g., ptrace) ca << 518 * values which might normally be mist << 519 * Those functions must zero $0 (v0) d << 520 * frame to indicate that a negative r << 521 * error number.. << 522 */ << 523 ldq $18, 0($sp) /* old syscall << 524 beq $18, $ret_success << 525 << 526 ldq $19, 72($sp) /* .. and this << 527 subq $31, $0, $0 /* with error << 528 addq $31, 1, $1 /* set a3 for << 529 stq $0, 0($sp) << 530 mov $31, $26 /* tell "ret_f << 531 stq $1, 72($sp) /* a3 for retu << 532 br ret_from_sys_call << 533 << 534 /* << 535 * Do all cleanup when returning from all inte << 536 * << 537 * Arguments: << 538 * $8: current. << 539 * $17: TI_FLAGS. << 540 * $18: The old syscall number, or zero i << 541 * from a syscall that errored and i << 542 * $19: The old a3 value << 543 */ << 544 << 545 .align 4 << 546 .type work_pending, @function << 547 work_pending: << 548 and $17, _TIF_NOTIFY_RESUME | _TIF << 549 bne $2, $work_notifysig << 550 << 551 $work_resched: << 552 /* << 553 * We can get here only if we returned << 554 * or got through work_notifysig alrea << 555 * restarts for us, so let $18 and $19 << 556 */ << 557 jsr $26, schedule << 558 mov 0, $18 << 559 br ret_to_user << 560 << 561 $work_notifysig: << 562 mov $sp, $16 << 563 DO_SWITCH_STACK << 564 jsr $26, do_work_pending << 565 UNDO_SWITCH_STACK << 566 br restore_all << 567 33 >> 34 .globl sparc64_vpte_patchme1 >> 35 .globl sparc64_vpte_patchme2 568 /* 36 /* 569 * PTRACE syscall handler !! 37 * On a second level vpte miss, check whether the original fault is to the OBP >> 38 * range (note that this is only possible for instruction miss, data misses to >> 39 * obp range do not use vpte). If so, go back directly to the faulting address. >> 40 * This is because we want to read the tpc, otherwise we have no way of knowing >> 41 * the 8k aligned faulting address if we are using >8k kernel pagesize. This also >> 42 * ensures no vpte range addresses are dropped into tlb while obp is executing >> 43 * (see inherit_locked_prom_mappings() rant). 570 */ 44 */ >> 45 sparc64_vpte_nucleus: >> 46 mov 0xf, %g5 >> 47 sllx %g5, 28, %g5 ! Load 0xf0000000 >> 48 cmp %g4, %g5 ! Is addr >= LOW_OBP_ADDRESS? >> 49 blu,pn %xcc, sparc64_vpte_patchme1 >> 50 mov 0x1, %g5 >> 51 sllx %g5, 32, %g5 ! Load 0x100000000 >> 52 cmp %g4, %g5 ! Is addr < HI_OBP_ADDRESS? >> 53 blu,pn %xcc, obp_iaddr_patch >> 54 nop >> 55 sparc64_vpte_patchme1: >> 56 sethi %hi(0), %g5 ! This has to be patched >> 57 sparc64_vpte_patchme2: >> 58 or %g5, %lo(0), %g5 ! This is patched too >> 59 ba,pt %xcc, sparc64_kpte_continue ! Part of dtlb_backend >> 60 add %g1, %g1, %g1 ! Finish PMD offset adjustment >> 61 >> 62 vpte_noent: >> 63 mov TLB_SFSR, %g1 ! Restore %g1 value >> 64 stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS >> 65 done ! Slick trick >> 66 >> 67 .globl obp_iaddr_patch >> 68 .globl obp_daddr_patch >> 69 >> 70 obp_iaddr_patch: >> 71 sethi %hi(0), %g5 ! This and following is patched >> 72 or %g5, %lo(0), %g5 ! g5 now holds obp pmd base physaddr >> 73 wrpr %g0, 1, %tl ! Behave as if we are at TL0 >> 74 rdpr %tpc, %g4 ! Find original faulting iaddr >> 75 srlx %g4, 13, %g4 ! Throw out context bits >> 76 sllx %g4, 13, %g4 ! g4 has vpn + ctx0 now >> 77 mov TLB_SFSR, %g1 ! Restore %g1 value >> 78 stxa %g4, [%g1 + %g1] ASI_IMMU ! Restore previous TAG_ACCESS >> 79 srlx %g4, 23, %g6 ! Find pmd number >> 80 and %g6, 0x7ff, %g6 ! Find pmd number >> 81 sllx %g6, 2, %g6 ! Find pmd offset >> 82 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pmd, ie pagetable physaddr >> 83 brz,pn %g5, longpath ! Kill the PROM ? :-) >> 84 sllx %g5, 11, %g5 ! Shift into place >> 85 srlx %g4, 13, %g6 ! find pte number in pagetable >> 86 and %g6, 0x3ff, %g6 ! find pte number in pagetable >> 87 sllx %g6, 3, %g6 ! find pte offset in pagetable >> 88 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pte >> 89 brgez,pn %g5, longpath ! Kill the PROM ? :-) >> 90 nop >> 91 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! put into tlb >> 92 retry ! go back to original fault >> 93 >> 94 obp_daddr_patch: >> 95 sethi %hi(0), %g5 ! This and following is patched >> 96 or %g5, %lo(0), %g5 ! g5 now holds obp pmd base physaddr >> 97 srlx %g4, 23, %g6 ! Find pmd number >> 98 and %g6, 0x7ff, %g6 ! Find pmd number >> 99 sllx %g6, 2, %g6 ! Find pmd offset >> 100 lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pmd, ie pagetable physaddr >> 101 brz,pn %g5, longpath >> 102 sllx %g5, 11, %g5 ! Shift into place >> 103 srlx %g4, 13, %g6 ! find pte number in pagetable >> 104 and %g6, 0x3ff, %g6 ! find pte number in pagetable >> 105 sllx %g6, 3, %g6 ! find pte offset in pagetable >> 106 ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5! Load pte >> 107 brgez,pn %g5, longpath >> 108 nop >> 109 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! put into tlb >> 110 retry 571 111 572 .align 4 << 573 .type strace, @function << 574 strace: << 575 /* set up signal stack, call syscall_t << 576 // NB: if anyone adds preemption, this << 577 ldl $1, TI_STATUS($8) << 578 and $1, TS_SAVED_FP, $3 << 579 or $1, TS_SAVED_FP, $2 << 580 bne $3, 1f << 581 stl $2, TI_STATUS($8) << 582 bsr $26, __save_fpu << 583 1: << 584 DO_SWITCH_STACK << 585 jsr $26, syscall_trace_enter /* re << 586 UNDO_SWITCH_STACK << 587 << 588 /* get the arguments back.. */ << 589 ldq $16, SP_OFF+24($sp) << 590 ldq $17, SP_OFF+32($sp) << 591 ldq $18, SP_OFF+40($sp) << 592 ldq $19, 72($sp) << 593 ldq $20, 80($sp) << 594 ldq $21, 88($sp) << 595 << 596 /* get the system call pointer.. */ << 597 lda $1, NR_syscalls($31) << 598 lda $2, sys_call_table << 599 lda $27, sys_ni_syscall << 600 cmpult $0, $1, $1 << 601 s8addq $0, $2, $2 << 602 beq $1, 1f << 603 ldq $27, 0($2) << 604 1: jsr $26, ($27), sys_gettimeofday << 605 ret_from_straced: << 606 ldgp $gp, 0($26) << 607 << 608 /* check return.. */ << 609 blt $0, $strace_error /* the << 610 $strace_success: << 611 stq $31, 72($sp) /* a3= << 612 stq $0, 0($sp) /* sav << 613 << 614 DO_SWITCH_STACK << 615 jsr $26, syscall_trace_leave << 616 UNDO_SWITCH_STACK << 617 br $31, ret_from_sys_call << 618 << 619 .align 3 << 620 $strace_error: << 621 ldq $18, 0($sp) /* old syscall << 622 beq $18, $strace_success << 623 ldq $19, 72($sp) /* .. and this << 624 << 625 subq $31, $0, $0 /* with error << 626 addq $31, 1, $1 /* set a3 for << 627 stq $0, 0($sp) << 628 stq $1, 72($sp) /* a3 for retu << 629 << 630 DO_SWITCH_STACK << 631 mov $18, $9 /* save old sy << 632 mov $19, $10 /* save old a3 << 633 jsr $26, syscall_trace_leave << 634 mov $9, $18 << 635 mov $10, $19 << 636 UNDO_SWITCH_STACK << 637 << 638 mov $31, $26 /* tell "ret_f << 639 br ret_from_sys_call << 640 CFI_END_OSF_FRAME entSys << 641 << 642 /* 112 /* 643 * Save and restore the switch stack -- aka th !! 113 * On a first level data miss, check whether this is to the OBP range (note that >> 114 * such accesses can be made by prom, as well as by kernel using prom_getproperty >> 115 * on "address"), and if so, do not use vpte access ... rather, use information >> 116 * saved during inherit_prom_mappings() using 8k pagesize. 644 */ 117 */ >> 118 kvmap: >> 119 mov 0xf, %g5 >> 120 sllx %g5, 28, %g5 ! Load 0xf0000000 >> 121 cmp %g4, %g5 ! Is addr >= LOW_OBP_ADDRESS? >> 122 blu,pn %xcc, vmalloc_addr >> 123 mov 0x1, %g5 >> 124 sllx %g5, 32, %g5 ! Load 0x100000000 >> 125 cmp %g4, %g5 ! Is addr < HI_OBP_ADDRESS? >> 126 blu,pn %xcc, obp_daddr_patch >> 127 nop >> 128 vmalloc_addr: ! vmalloc addr accessed >> 129 ldxa [%g3 + %g6] ASI_N, %g5 ! Yep, load k-vpte >> 130 brgez,pn %g5, longpath ! Valid, load into TLB >> 131 nop >> 132 stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB >> 133 retry >> 134 >> 135 /* This is trivial with the new code... */ >> 136 .globl do_fpdis >> 137 do_fpdis: >> 138 sethi %hi(TSTATE_PEF), %g4 ! IEU0 >> 139 rdpr %tstate, %g5 >> 140 andcc %g5, %g4, %g0 >> 141 be,pt %xcc, 1f >> 142 nop >> 143 rd %fprs, %g5 >> 144 andcc %g5, FPRS_FEF, %g0 >> 145 be,pt %xcc, 1f >> 146 nop >> 147 >> 148 /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */ >> 149 sethi %hi(109f), %g7 >> 150 ba,pt %xcc, etrap >> 151 109: or %g7, %lo(109b), %g7 >> 152 add %g0, %g0, %g0 >> 153 ba,a,pt %xcc, rtrap_clr_l6 >> 154 >> 155 1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group >> 156 wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles >> 157 andcc %g5, FPRS_FEF, %g0 ! IEU1 Group >> 158 be,a,pt %icc, 1f ! CTI >> 159 clr %g7 ! IEU0 >> 160 ldx [%g6 + TI_GSR], %g7 ! Load Group >> 161 1: andcc %g5, FPRS_DL, %g0 ! IEU1 >> 162 bne,pn %icc, 2f ! CTI >> 163 fzero %f0 ! FPA >> 164 andcc %g5, FPRS_DU, %g0 ! IEU1 Group >> 165 bne,pn %icc, 1f ! CTI >> 166 fzero %f2 ! FPA >> 167 faddd %f0, %f2, %f4 >> 168 fmuld %f0, %f2, %f6 >> 169 faddd %f0, %f2, %f8 >> 170 fmuld %f0, %f2, %f10 >> 171 faddd %f0, %f2, %f12 >> 172 fmuld %f0, %f2, %f14 >> 173 faddd %f0, %f2, %f16 >> 174 fmuld %f0, %f2, %f18 >> 175 faddd %f0, %f2, %f20 >> 176 fmuld %f0, %f2, %f22 >> 177 faddd %f0, %f2, %f24 >> 178 fmuld %f0, %f2, %f26 >> 179 faddd %f0, %f2, %f28 >> 180 fmuld %f0, %f2, %f30 >> 181 faddd %f0, %f2, %f32 >> 182 fmuld %f0, %f2, %f34 >> 183 faddd %f0, %f2, %f36 >> 184 fmuld %f0, %f2, %f38 >> 185 faddd %f0, %f2, %f40 >> 186 fmuld %f0, %f2, %f42 >> 187 faddd %f0, %f2, %f44 >> 188 fmuld %f0, %f2, %f46 >> 189 faddd %f0, %f2, %f48 >> 190 fmuld %f0, %f2, %f50 >> 191 faddd %f0, %f2, %f52 >> 192 fmuld %f0, %f2, %f54 >> 193 faddd %f0, %f2, %f56 >> 194 fmuld %f0, %f2, %f58 >> 195 b,pt %xcc, fpdis_exit2 >> 196 faddd %f0, %f2, %f60 >> 197 1: mov SECONDARY_CONTEXT, %g3 >> 198 add %g6, TI_FPREGS + 0x80, %g1 >> 199 faddd %f0, %f2, %f4 >> 200 fmuld %f0, %f2, %f6 >> 201 ldxa [%g3] ASI_DMMU, %g5 >> 202 add %g6, TI_FPREGS + 0xc0, %g2 >> 203 stxa %g0, [%g3] ASI_DMMU >> 204 membar #Sync >> 205 faddd %f0, %f2, %f8 >> 206 fmuld %f0, %f2, %f10 >> 207 ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-( >> 208 ldda [%g2] ASI_BLK_S, %f48 >> 209 faddd %f0, %f2, %f12 >> 210 fmuld %f0, %f2, %f14 >> 211 faddd %f0, %f2, %f16 >> 212 fmuld %f0, %f2, %f18 >> 213 faddd %f0, %f2, %f20 >> 214 fmuld %f0, %f2, %f22 >> 215 faddd %f0, %f2, %f24 >> 216 fmuld %f0, %f2, %f26 >> 217 faddd %f0, %f2, %f28 >> 218 fmuld %f0, %f2, %f30 >> 219 b,pt %xcc, fpdis_exit >> 220 membar #Sync >> 221 2: andcc %g5, FPRS_DU, %g0 >> 222 bne,pt %icc, 3f >> 223 fzero %f32 >> 224 mov SECONDARY_CONTEXT, %g3 >> 225 fzero %f34 >> 226 ldxa [%g3] ASI_DMMU, %g5 >> 227 add %g6, TI_FPREGS, %g1 >> 228 stxa %g0, [%g3] ASI_DMMU >> 229 membar #Sync >> 230 add %g6, TI_FPREGS + 0x40, %g2 >> 231 faddd %f32, %f34, %f36 >> 232 fmuld %f32, %f34, %f38 >> 233 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( >> 234 ldda [%g2] ASI_BLK_S, %f16 >> 235 faddd %f32, %f34, %f40 >> 236 fmuld %f32, %f34, %f42 >> 237 faddd %f32, %f34, %f44 >> 238 fmuld %f32, %f34, %f46 >> 239 faddd %f32, %f34, %f48 >> 240 fmuld %f32, %f34, %f50 >> 241 faddd %f32, %f34, %f52 >> 242 fmuld %f32, %f34, %f54 >> 243 faddd %f32, %f34, %f56 >> 244 fmuld %f32, %f34, %f58 >> 245 faddd %f32, %f34, %f60 >> 246 fmuld %f32, %f34, %f62 >> 247 ba,pt %xcc, fpdis_exit >> 248 membar #Sync >> 249 3: mov SECONDARY_CONTEXT, %g3 >> 250 add %g6, TI_FPREGS, %g1 >> 251 ldxa [%g3] ASI_DMMU, %g5 >> 252 mov 0x40, %g2 >> 253 stxa %g0, [%g3] ASI_DMMU >> 254 membar #Sync >> 255 ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-( >> 256 ldda [%g1 + %g2] ASI_BLK_S, %f16 >> 257 add %g1, 0x80, %g1 >> 258 ldda [%g1] ASI_BLK_S, %f32 >> 259 ldda [%g1 + %g2] ASI_BLK_S, %f48 >> 260 membar #Sync >> 261 fpdis_exit: >> 262 stxa %g5, [%g3] ASI_DMMU >> 263 membar #Sync >> 264 fpdis_exit2: >> 265 wr %g7, 0, %gsr >> 266 ldx [%g6 + TI_XFSR], %fsr >> 267 rdpr %tstate, %g3 >> 268 or %g3, %g4, %g3 ! anal... >> 269 wrpr %g3, %tstate >> 270 wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits >> 271 retry >> 272 >> 273 .align 32 >> 274 fp_other_bounce: >> 275 call do_fpother >> 276 add %sp, PTREGS_OFF, %o0 >> 277 ba,pt %xcc, rtrap >> 278 clr %l6 >> 279 >> 280 .globl do_fpother_check_fitos >> 281 .align 32 >> 282 do_fpother_check_fitos: >> 283 sethi %hi(fp_other_bounce - 4), %g7 >> 284 or %g7, %lo(fp_other_bounce - 4), %g7 645 285 646 .align 4 !! 286 /* NOTE: Need to preserve %g7 until we fully commit 647 .type do_switch_stack, @function !! 287 * to the fitos fixup. 648 .cfi_startproc simple !! 288 */ 649 .cfi_return_column 64 !! 289 stx %fsr, [%g6 + TI_XFSR] 650 .cfi_def_cfa $sp, 0 !! 290 rdpr %tstate, %g3 651 .cfi_register 64, $1 !! 291 andcc %g3, TSTATE_PRIV, %g0 652 do_switch_stack: !! 292 bne,pn %xcc, do_fptrap_after_fsr 653 lda $sp, -SWITCH_STACK_SIZE($sp) !! 293 nop 654 .cfi_adjust_cfa_offset SWITCH_STACK_S !! 294 ldx [%g6 + TI_XFSR], %g3 655 stq $9, 0($sp) !! 295 srlx %g3, 14, %g1 656 stq $10, 8($sp) !! 296 and %g1, 7, %g1 657 stq $11, 16($sp) !! 297 cmp %g1, 2 ! Unfinished FP-OP 658 stq $12, 24($sp) !! 298 bne,pn %xcc, do_fptrap_after_fsr 659 stq $13, 32($sp) !! 299 sethi %hi(1 << 23), %g1 ! Inexact 660 stq $14, 40($sp) !! 300 andcc %g3, %g1, %g0 661 stq $15, 48($sp) !! 301 bne,pn %xcc, do_fptrap_after_fsr 662 stq $26, 56($sp) !! 302 rdpr %tpc, %g1 663 ret $31, ($1), 1 !! 303 lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail 664 .cfi_endproc !! 304 #define FITOS_MASK 0xc1f83fe0 665 .size do_switch_stack, .-do_switch_s !! 305 #define FITOS_COMPARE 0x81a01880 666 !! 306 sethi %hi(FITOS_MASK), %g1 667 .align 4 !! 307 or %g1, %lo(FITOS_MASK), %g1 668 .type undo_switch_stack, @function !! 308 and %g3, %g1, %g1 669 .cfi_startproc simple !! 309 sethi %hi(FITOS_COMPARE), %g2 670 .cfi_def_cfa $sp, 0 !! 310 or %g2, %lo(FITOS_COMPARE), %g2 671 .cfi_register 64, $1 !! 311 cmp %g1, %g2 672 undo_switch_stack: !! 312 bne,pn %xcc, do_fptrap_after_fsr 673 ldq $9, 0($sp) !! 313 nop 674 ldq $10, 8($sp) !! 314 std %f62, [%g6 + TI_FPREGS + (62 * 4)] 675 ldq $11, 16($sp) !! 315 sethi %hi(fitos_table_1), %g1 676 ldq $12, 24($sp) !! 316 and %g3, 0x1f, %g2 677 ldq $13, 32($sp) !! 317 or %g1, %lo(fitos_table_1), %g1 678 ldq $14, 40($sp) !! 318 sllx %g2, 2, %g2 679 ldq $15, 48($sp) !! 319 jmpl %g1 + %g2, %g0 680 ldq $26, 56($sp) !! 320 ba,pt %xcc, fitos_emul_continue 681 lda $sp, SWITCH_STACK_SIZE($sp) !! 321 682 ret $31, ($1), 1 !! 322 fitos_table_1: 683 .cfi_endproc !! 323 fitod %f0, %f62 684 .size undo_switch_stack, .-undo_swit !! 324 fitod %f1, %f62 685 !! 325 fitod %f2, %f62 686 #define FR(n) n * 8 + TI_FP($8) !! 326 fitod %f3, %f62 687 .align 4 !! 327 fitod %f4, %f62 688 .globl __save_fpu !! 328 fitod %f5, %f62 689 .type __save_fpu, @function !! 329 fitod %f6, %f62 690 __save_fpu: !! 330 fitod %f7, %f62 691 #define V(n) stt $f##n, FR(n) !! 331 fitod %f8, %f62 692 V( 0); V( 1); V( 2); V( 3) !! 332 fitod %f9, %f62 693 V( 4); V( 5); V( 6); V( 7) !! 333 fitod %f10, %f62 694 V( 8); V( 9); V(10); V(11) !! 334 fitod %f11, %f62 695 V(12); V(13); V(14); V(15) !! 335 fitod %f12, %f62 696 V(16); V(17); V(18); V(19) !! 336 fitod %f13, %f62 697 V(20); V(21); V(22); V(23) !! 337 fitod %f14, %f62 698 V(24); V(25); V(26); V(27) !! 338 fitod %f15, %f62 699 mf_fpcr $f0 # get fpcr !! 339 fitod %f16, %f62 700 V(28); V(29); V(30) !! 340 fitod %f17, %f62 701 stt $f0, FR(31) # save fpcr in !! 341 fitod %f18, %f62 702 ldt $f0, FR(0) # don't let "_ !! 342 fitod %f19, %f62 703 ret !! 343 fitod %f20, %f62 704 #undef V !! 344 fitod %f21, %f62 705 .size __save_fpu, .-__save_fpu !! 345 fitod %f22, %f62 706 !! 346 fitod %f23, %f62 707 .align 4 !! 347 fitod %f24, %f62 708 restore_fpu: !! 348 fitod %f25, %f62 709 and $3, TS_RESTORE_FP, $3 !! 349 fitod %f26, %f62 710 bic $2, TS_SAVED_FP | TS_RESTORE_F !! 350 fitod %f27, %f62 711 beq $3, 1f !! 351 fitod %f28, %f62 712 #define V(n) ldt $f##n, FR(n) !! 352 fitod %f29, %f62 713 ldt $f30, FR(31) # get saved fp !! 353 fitod %f30, %f62 714 V( 0); V( 1); V( 2); V( 3) !! 354 fitod %f31, %f62 715 mt_fpcr $f30 # install save !! 355 716 V( 4); V( 5); V( 6); V( 7) !! 356 fitos_emul_continue: 717 V( 8); V( 9); V(10); V(11) !! 357 sethi %hi(fitos_table_2), %g1 718 V(12); V(13); V(14); V(15) !! 358 srl %g3, 25, %g2 719 V(16); V(17); V(18); V(19) !! 359 or %g1, %lo(fitos_table_2), %g1 720 V(20); V(21); V(22); V(23) !! 360 and %g2, 0x1f, %g2 721 V(24); V(25); V(26); V(27) !! 361 sllx %g2, 2, %g2 722 V(28); V(29); V(30) !! 362 jmpl %g1 + %g2, %g0 723 1: stl $2, TI_STATUS($8) !! 363 ba,pt %xcc, fitos_emul_fini 724 br restore_other !! 364 725 #undef V !! 365 fitos_table_2: >> 366 fdtos %f62, %f0 >> 367 fdtos %f62, %f1 >> 368 fdtos %f62, %f2 >> 369 fdtos %f62, %f3 >> 370 fdtos %f62, %f4 >> 371 fdtos %f62, %f5 >> 372 fdtos %f62, %f6 >> 373 fdtos %f62, %f7 >> 374 fdtos %f62, %f8 >> 375 fdtos %f62, %f9 >> 376 fdtos %f62, %f10 >> 377 fdtos %f62, %f11 >> 378 fdtos %f62, %f12 >> 379 fdtos %f62, %f13 >> 380 fdtos %f62, %f14 >> 381 fdtos %f62, %f15 >> 382 fdtos %f62, %f16 >> 383 fdtos %f62, %f17 >> 384 fdtos %f62, %f18 >> 385 fdtos %f62, %f19 >> 386 fdtos %f62, %f20 >> 387 fdtos %f62, %f21 >> 388 fdtos %f62, %f22 >> 389 fdtos %f62, %f23 >> 390 fdtos %f62, %f24 >> 391 fdtos %f62, %f25 >> 392 fdtos %f62, %f26 >> 393 fdtos %f62, %f27 >> 394 fdtos %f62, %f28 >> 395 fdtos %f62, %f29 >> 396 fdtos %f62, %f30 >> 397 fdtos %f62, %f31 >> 398 >> 399 fitos_emul_fini: >> 400 ldd [%g6 + TI_FPREGS + (62 * 4)], %f62 >> 401 done >> 402 >> 403 .globl do_fptrap >> 404 .align 32 >> 405 do_fptrap: >> 406 stx %fsr, [%g6 + TI_XFSR] >> 407 do_fptrap_after_fsr: >> 408 ldub [%g6 + TI_FPSAVED], %g3 >> 409 rd %fprs, %g1 >> 410 or %g3, %g1, %g3 >> 411 stb %g3, [%g6 + TI_FPSAVED] >> 412 rd %gsr, %g3 >> 413 stx %g3, [%g6 + TI_GSR] >> 414 mov SECONDARY_CONTEXT, %g3 >> 415 add %g6, TI_FPREGS, %g2 >> 416 ldxa [%g3] ASI_DMMU, %g5 >> 417 stxa %g0, [%g3] ASI_DMMU >> 418 membar #Sync >> 419 andcc %g1, FPRS_DL, %g0 >> 420 be,pn %icc, 4f >> 421 mov 0x40, %g3 >> 422 stda %f0, [%g2] ASI_BLK_S >> 423 stda %f16, [%g2 + %g3] ASI_BLK_S >> 424 andcc %g1, FPRS_DU, %g0 >> 425 be,pn %icc, 5f >> 426 4: add %g2, 128, %g2 >> 427 stda %f32, [%g2] ASI_BLK_S >> 428 stda %f48, [%g2 + %g3] ASI_BLK_S >> 429 5: mov SECONDARY_CONTEXT, %g1 >> 430 membar #Sync >> 431 stxa %g5, [%g1] ASI_DMMU >> 432 membar #Sync >> 433 ba,pt %xcc, etrap >> 434 wr %g0, 0, %fprs >> 435 >> 436 /* The registers for cross calls will be: >> 437 * >> 438 * DATA 0: [low 32-bits] Address of function to call, jmp to this >> 439 * [high 32-bits] MMU Context Argument 0, place in %g5 >> 440 * DATA 1: Address Argument 1, place in %g6 >> 441 * DATA 2: Address Argument 2, place in %g7 >> 442 * >> 443 * With this method we can do most of the cross-call tlb/cache >> 444 * flushing very quickly. >> 445 * >> 446 * Current CPU's IRQ worklist table is locked into %g1, >> 447 * don't touch. >> 448 */ >> 449 .text >> 450 .align 32 >> 451 .globl do_ivec >> 452 do_ivec: >> 453 mov 0x40, %g3 >> 454 ldxa [%g3 + %g0] ASI_INTR_R, %g3 >> 455 sethi %hi(KERNBASE), %g4 >> 456 cmp %g3, %g4 >> 457 bgeu,pn %xcc, do_ivec_xcall >> 458 srlx %g3, 32, %g5 >> 459 stxa %g0, [%g0] ASI_INTR_RECEIVE >> 460 membar #Sync >> 461 >> 462 sethi %hi(ivector_table), %g2 >> 463 sllx %g3, 5, %g3 >> 464 or %g2, %lo(ivector_table), %g2 >> 465 add %g2, %g3, %g3 >> 466 ldx [%g3 + 0x08], %g2 /* irq_info */ >> 467 ldub [%g3 + 0x04], %g4 /* pil */ >> 468 brz,pn %g2, do_ivec_spurious >> 469 mov 1, %g2 >> 470 >> 471 sllx %g2, %g4, %g2 >> 472 sllx %g4, 2, %g4 >> 473 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ >> 474 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ >> 475 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ >> 476 wr %g2, 0x0, %set_softint >> 477 retry >> 478 do_ivec_xcall: >> 479 mov 0x50, %g1 >> 480 >> 481 ldxa [%g1 + %g0] ASI_INTR_R, %g1 >> 482 srl %g3, 0, %g3 >> 483 mov 0x60, %g7 >> 484 ldxa [%g7 + %g0] ASI_INTR_R, %g7 >> 485 stxa %g0, [%g0] ASI_INTR_RECEIVE >> 486 membar #Sync >> 487 ba,pt %xcc, 1f >> 488 nop >> 489 >> 490 .align 32 >> 491 1: jmpl %g3, %g0 >> 492 nop >> 493 >> 494 do_ivec_spurious: >> 495 stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */ >> 496 rdpr %pstate, %g5 >> 497 >> 498 wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate >> 499 sethi %hi(109f), %g7 >> 500 ba,pt %xcc, etrap >> 501 109: or %g7, %lo(109b), %g7 >> 502 call catch_disabled_ivec >> 503 add %sp, PTREGS_OFF, %o0 >> 504 ba,pt %xcc, rtrap >> 505 clr %l6 >> 506 >> 507 .globl save_alternate_globals >> 508 save_alternate_globals: /* %o0 = save_area */ >> 509 rdpr %pstate, %o5 >> 510 andn %o5, PSTATE_IE, %o1 >> 511 wrpr %o1, PSTATE_AG, %pstate >> 512 stx %g0, [%o0 + 0x00] >> 513 stx %g1, [%o0 + 0x08] >> 514 stx %g2, [%o0 + 0x10] >> 515 stx %g3, [%o0 + 0x18] >> 516 stx %g4, [%o0 + 0x20] >> 517 stx %g5, [%o0 + 0x28] >> 518 stx %g6, [%o0 + 0x30] >> 519 stx %g7, [%o0 + 0x38] >> 520 wrpr %o1, PSTATE_IG, %pstate >> 521 stx %g0, [%o0 + 0x40] >> 522 stx %g1, [%o0 + 0x48] >> 523 stx %g2, [%o0 + 0x50] >> 524 stx %g3, [%o0 + 0x58] >> 525 stx %g4, [%o0 + 0x60] >> 526 stx %g5, [%o0 + 0x68] >> 527 stx %g6, [%o0 + 0x70] >> 528 stx %g7, [%o0 + 0x78] >> 529 wrpr %o1, PSTATE_MG, %pstate >> 530 stx %g0, [%o0 + 0x80] >> 531 stx %g1, [%o0 + 0x88] >> 532 stx %g2, [%o0 + 0x90] >> 533 stx %g3, [%o0 + 0x98] >> 534 stx %g4, [%o0 + 0xa0] >> 535 stx %g5, [%o0 + 0xa8] >> 536 stx %g6, [%o0 + 0xb0] >> 537 stx %g7, [%o0 + 0xb8] >> 538 wrpr %o5, 0x0, %pstate >> 539 retl >> 540 nop >> 541 >> 542 .globl restore_alternate_globals >> 543 restore_alternate_globals: /* %o0 = save_area */ >> 544 rdpr %pstate, %o5 >> 545 andn %o5, PSTATE_IE, %o1 >> 546 wrpr %o1, PSTATE_AG, %pstate >> 547 ldx [%o0 + 0x00], %g0 >> 548 ldx [%o0 + 0x08], %g1 >> 549 ldx [%o0 + 0x10], %g2 >> 550 ldx [%o0 + 0x18], %g3 >> 551 ldx [%o0 + 0x20], %g4 >> 552 ldx [%o0 + 0x28], %g5 >> 553 ldx [%o0 + 0x30], %g6 >> 554 ldx [%o0 + 0x38], %g7 >> 555 wrpr %o1, PSTATE_IG, %pstate >> 556 ldx [%o0 + 0x40], %g0 >> 557 ldx [%o0 + 0x48], %g1 >> 558 ldx [%o0 + 0x50], %g2 >> 559 ldx [%o0 + 0x58], %g3 >> 560 ldx [%o0 + 0x60], %g4 >> 561 ldx [%o0 + 0x68], %g5 >> 562 ldx [%o0 + 0x70], %g6 >> 563 ldx [%o0 + 0x78], %g7 >> 564 wrpr %o1, PSTATE_MG, %pstate >> 565 ldx [%o0 + 0x80], %g0 >> 566 ldx [%o0 + 0x88], %g1 >> 567 ldx [%o0 + 0x90], %g2 >> 568 ldx [%o0 + 0x98], %g3 >> 569 ldx [%o0 + 0xa0], %g4 >> 570 ldx [%o0 + 0xa8], %g5 >> 571 ldx [%o0 + 0xb0], %g6 >> 572 ldx [%o0 + 0xb8], %g7 >> 573 wrpr %o5, 0x0, %pstate >> 574 retl >> 575 nop >> 576 >> 577 .globl getcc, setcc >> 578 getcc: >> 579 ldx [%o0 + PT_V9_TSTATE], %o1 >> 580 srlx %o1, 32, %o1 >> 581 and %o1, 0xf, %o1 >> 582 retl >> 583 stx %o1, [%o0 + PT_V9_G1] >> 584 setcc: >> 585 ldx [%o0 + PT_V9_TSTATE], %o1 >> 586 ldx [%o0 + PT_V9_G1], %o2 >> 587 or %g0, %ulo(TSTATE_ICC), %o3 >> 588 sllx %o3, 32, %o3 >> 589 andn %o1, %o3, %o1 >> 590 sllx %o2, 32, %o2 >> 591 and %o2, %o3, %o2 >> 592 or %o1, %o2, %o1 >> 593 retl >> 594 stx %o1, [%o0 + PT_V9_TSTATE] >> 595 >> 596 .globl utrap, utrap_ill >> 597 utrap: brz,pn %g1, etrap >> 598 nop >> 599 save %sp, -128, %sp >> 600 rdpr %tstate, %l6 >> 601 rdpr %cwp, %l7 >> 602 andn %l6, TSTATE_CWP, %l6 >> 603 wrpr %l6, %l7, %tstate >> 604 rdpr %tpc, %l6 >> 605 rdpr %tnpc, %l7 >> 606 wrpr %g1, 0, %tnpc >> 607 done >> 608 utrap_ill: >> 609 call bad_trap >> 610 add %sp, PTREGS_OFF, %o0 >> 611 ba,pt %xcc, rtrap >> 612 clr %l6 >> 613 >> 614 #ifdef CONFIG_BLK_DEV_FD >> 615 .globl floppy_hardint >> 616 floppy_hardint: >> 617 wr %g0, (1 << 11), %clear_softint >> 618 sethi %hi(doing_pdma), %g1 >> 619 ld [%g1 + %lo(doing_pdma)], %g2 >> 620 brz,pn %g2, floppy_dosoftint >> 621 sethi %hi(fdc_status), %g3 >> 622 ldx [%g3 + %lo(fdc_status)], %g3 >> 623 sethi %hi(pdma_vaddr), %g5 >> 624 ldx [%g5 + %lo(pdma_vaddr)], %g4 >> 625 sethi %hi(pdma_size), %g5 >> 626 ldx [%g5 + %lo(pdma_size)], %g5 >> 627 >> 628 next_byte: >> 629 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7 >> 630 andcc %g7, 0x80, %g0 >> 631 be,pn %icc, floppy_fifo_emptied >> 632 andcc %g7, 0x20, %g0 >> 633 be,pn %icc, floppy_overrun >> 634 andcc %g7, 0x40, %g0 >> 635 be,pn %icc, floppy_write >> 636 sub %g5, 1, %g5 >> 637 >> 638 inc %g3 >> 639 lduba [%g3] ASI_PHYS_BYPASS_EC_E, %g7 >> 640 dec %g3 >> 641 orcc %g0, %g5, %g0 >> 642 stb %g7, [%g4] >> 643 bne,pn %xcc, next_byte >> 644 add %g4, 1, %g4 >> 645 >> 646 b,pt %xcc, floppy_tdone >> 647 nop >> 648 >> 649 floppy_write: >> 650 ldub [%g4], %g7 >> 651 orcc %g0, %g5, %g0 >> 652 inc %g3 >> 653 stba %g7, [%g3] ASI_PHYS_BYPASS_EC_E >> 654 dec %g3 >> 655 bne,pn %xcc, next_byte >> 656 add %g4, 1, %g4 >> 657 >> 658 floppy_tdone: >> 659 sethi %hi(pdma_vaddr), %g1 >> 660 stx %g4, [%g1 + %lo(pdma_vaddr)] >> 661 sethi %hi(pdma_size), %g1 >> 662 stx %g5, [%g1 + %lo(pdma_size)] >> 663 sethi %hi(auxio_register), %g1 >> 664 ldx [%g1 + %lo(auxio_register)], %g7 >> 665 lduba [%g7] ASI_PHYS_BYPASS_EC_E, %g5 >> 666 or %g5, AUXIO_AUX1_FTCNT, %g5 >> 667 /* andn %g5, AUXIO_AUX1_MASK, %g5 */ >> 668 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E >> 669 andn %g5, AUXIO_AUX1_FTCNT, %g5 >> 670 /* andn %g5, AUXIO_AUX1_MASK, %g5 */ >> 671 >> 672 nop; nop; nop; nop; nop; nop; >> 673 nop; nop; nop; nop; nop; nop; >> 674 >> 675 stba %g5, [%g7] ASI_PHYS_BYPASS_EC_E >> 676 sethi %hi(doing_pdma), %g1 >> 677 b,pt %xcc, floppy_dosoftint >> 678 st %g0, [%g1 + %lo(doing_pdma)] >> 679 >> 680 floppy_fifo_emptied: >> 681 sethi %hi(pdma_vaddr), %g1 >> 682 stx %g4, [%g1 + %lo(pdma_vaddr)] >> 683 sethi %hi(pdma_size), %g1 >> 684 stx %g5, [%g1 + %lo(pdma_size)] >> 685 sethi %hi(irq_action), %g1 >> 686 or %g1, %lo(irq_action), %g1 >> 687 ldx [%g1 + (11 << 3)], %g3 ! irqaction[floppy_irq] >> 688 ldx [%g3 + 0x08], %g4 ! action->flags>>48==ino >> 689 sethi %hi(ivector_table), %g3 >> 690 srlx %g4, 48, %g4 >> 691 or %g3, %lo(ivector_table), %g3 >> 692 sllx %g4, 5, %g4 >> 693 ldx [%g3 + %g4], %g4 ! &ivector_table[ino] >> 694 ldx [%g4 + 0x10], %g4 ! bucket->iclr >> 695 stwa %g0, [%g4] ASI_PHYS_BYPASS_EC_E ! ICLR_IDLE >> 696 membar #Sync ! probably not needed... >> 697 retry >> 698 >> 699 floppy_overrun: >> 700 sethi %hi(pdma_vaddr), %g1 >> 701 stx %g4, [%g1 + %lo(pdma_vaddr)] >> 702 sethi %hi(pdma_size), %g1 >> 703 stx %g5, [%g1 + %lo(pdma_size)] >> 704 sethi %hi(doing_pdma), %g1 >> 705 st %g0, [%g1 + %lo(doing_pdma)] >> 706 >> 707 floppy_dosoftint: >> 708 rdpr %pil, %g2 >> 709 wrpr %g0, 15, %pil >> 710 sethi %hi(109f), %g7 >> 711 b,pt %xcc, etrap_irq >> 712 109: or %g7, %lo(109b), %g7 >> 713 >> 714 mov 11, %o0 >> 715 mov 0, %o1 >> 716 call sparc_floppy_irq >> 717 add %sp, PTREGS_OFF, %o2 >> 718 >> 719 b,pt %xcc, rtrap_irq >> 720 nop >> 721 >> 722 #endif /* CONFIG_BLK_DEV_FD */ >> 723 >> 724 /* XXX Here is stuff we still need to write... -DaveM XXX */ >> 725 .globl netbsd_syscall >> 726 netbsd_syscall: >> 727 retl >> 728 nop >> 729 >> 730 /* These next few routines must be sure to clear the >> 731 * SFSR FaultValid bit so that the fast tlb data protection >> 732 * handler does not flush the wrong context and lock up the >> 733 * box. >> 734 */ >> 735 .globl __do_data_access_exception >> 736 .globl __do_data_access_exception_tl1 >> 737 __do_data_access_exception_tl1: >> 738 rdpr %pstate, %g4 >> 739 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate >> 740 mov TLB_SFSR, %g3 >> 741 mov DMMU_SFAR, %g5 >> 742 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR >> 743 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR >> 744 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit >> 745 membar #Sync >> 746 ba,pt %xcc, winfix_dax >> 747 rdpr %tpc, %g3 >> 748 __do_data_access_exception: >> 749 rdpr %pstate, %g4 >> 750 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate >> 751 mov TLB_SFSR, %g3 >> 752 mov DMMU_SFAR, %g5 >> 753 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR >> 754 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR >> 755 stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit >> 756 membar #Sync >> 757 sethi %hi(109f), %g7 >> 758 ba,pt %xcc, etrap >> 759 109: or %g7, %lo(109b), %g7 >> 760 mov %l4, %o1 >> 761 mov %l5, %o2 >> 762 call data_access_exception >> 763 add %sp, PTREGS_OFF, %o0 >> 764 ba,pt %xcc, rtrap >> 765 clr %l6 >> 766 >> 767 .globl __do_instruction_access_exception >> 768 .globl __do_instruction_access_exception_tl1 >> 769 __do_instruction_access_exception_tl1: >> 770 rdpr %pstate, %g4 >> 771 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate >> 772 mov TLB_SFSR, %g3 >> 773 mov DMMU_SFAR, %g5 >> 774 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR >> 775 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR >> 776 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit >> 777 membar #Sync >> 778 sethi %hi(109f), %g7 >> 779 ba,pt %xcc, etraptl1 >> 780 109: or %g7, %lo(109b), %g7 >> 781 mov %l4, %o1 >> 782 mov %l5, %o2 >> 783 call instruction_access_exception_tl1 >> 784 add %sp, PTREGS_OFF, %o0 >> 785 ba,pt %xcc, rtrap >> 786 clr %l6 >> 787 >> 788 __do_instruction_access_exception: >> 789 rdpr %pstate, %g4 >> 790 wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate >> 791 mov TLB_SFSR, %g3 >> 792 mov DMMU_SFAR, %g5 >> 793 ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR >> 794 ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR >> 795 stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit >> 796 membar #Sync >> 797 sethi %hi(109f), %g7 >> 798 ba,pt %xcc, etrap >> 799 109: or %g7, %lo(109b), %g7 >> 800 mov %l4, %o1 >> 801 mov %l5, %o2 >> 802 call instruction_access_exception >> 803 add %sp, PTREGS_OFF, %o0 >> 804 ba,pt %xcc, rtrap >> 805 clr %l6 >> 806 >> 807 /* This is the trap handler entry point for ECC correctable >> 808 * errors. They are corrected, but we listen for the trap >> 809 * so that the event can be logged. >> 810 * >> 811 * Disrupting errors are either: >> 812 * 1) single-bit ECC errors during UDB reads to system >> 813 * memory >> 814 * 2) data parity errors during write-back events >> 815 * >> 816 * As far as I can make out from the manual, the CEE trap >> 817 * is only for correctable errors during memory read >> 818 * accesses by the front-end of the processor. >> 819 * >> 820 * The code below is only for trap level 1 CEE events, >> 821 * as it is the only situation where we can safely record >> 822 * and log. For trap level >1 we just clear the CE bit >> 823 * in the AFSR and return. >> 824 */ 726 825 727 !! 826 /* Our trap handling infrastructure allows us to preserve 728 /* !! 827 * two 64-bit values during etrap for arguments to 729 * The meat of the context switch code. !! 828 * subsequent C code. Therefore we encode the information 730 */ !! 829 * as follows: 731 .align 4 !! 830 * 732 .globl alpha_switch_to !! 831 * value 1) Full 64-bits of AFAR 733 .type alpha_switch_to, @function !! 832 * value 2) Low 33-bits of AFSR, then bits 33-->42 734 .cfi_startproc !! 833 * are UDBL error status and bits 43-->52 735 alpha_switch_to: !! 834 * are UDBH error status 736 DO_SWITCH_STACK !! 835 */ 737 ldl $1, TI_STATUS($8) !! 836 .align 64 738 and $1, TS_RESTORE_FP, $3 !! 837 .globl cee_trap 739 bne $3, 1f !! 838 cee_trap: 740 or $1, TS_RESTORE_FP | TS_SAVED_F !! 839 ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR 741 and $1, TS_SAVED_FP, $3 !! 840 ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR 742 stl $2, TI_STATUS($8) !! 841 sllx %g1, 31, %g1 ! Clear reserved bits 743 bne $3, 1f !! 842 srlx %g1, 31, %g1 ! in AFSR 744 bsr $26, __save_fpu !! 843 745 1: !! 844 /* NOTE: UltraSparc-I/II have high and low UDB error 746 call_pal PAL_swpctx !! 845 * registers, corresponding to the two UDB units 747 lda $8, 0x3fff !! 846 * present on those chips. UltraSparc-IIi only 748 UNDO_SWITCH_STACK !! 847 * has a single UDB, called "SDB" in the manual. 749 bic $sp, $8, $8 !! 848 * For IIi the upper UDB register always reads 750 mov $17, $0 !! 849 * as zero so for our purposes things will just 751 ret !! 850 * work with the checks below. 752 .cfi_endproc !! 851 */ 753 .size alpha_switch_to, .-alpha_switc !! 852 ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status >> 853 andcc %g3, (1 << 8), %g4 ! Check CE bit >> 854 sllx %g3, (64 - 10), %g3 ! Clear reserved bits >> 855 srlx %g3, (64 - 10), %g3 ! in UDB-Low error status >> 856 >> 857 sllx %g3, (33 + 0), %g3 ! Shift up to encoding area >> 858 or %g1, %g3, %g1 ! Or it in >> 859 be,pn %xcc, 1f ! Branch if CE bit was clear >> 860 nop >> 861 stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL >> 862 membar #Sync ! Synchronize ASI stores >> 863 1: mov 0x18, %g5 ! Addr of UDB-High error status >> 864 ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it >> 865 >> 866 andcc %g3, (1 << 8), %g4 ! Check CE bit >> 867 sllx %g3, (64 - 10), %g3 ! Clear reserved bits >> 868 srlx %g3, (64 - 10), %g3 ! in UDB-High error status >> 869 sllx %g3, (33 + 10), %g3 ! Shift up to encoding area >> 870 or %g1, %g3, %g1 ! Or it in >> 871 be,pn %xcc, 1f ! Branch if CE bit was clear >> 872 nop >> 873 nop >> 874 >> 875 stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH >> 876 membar #Sync ! Synchronize ASI stores >> 877 1: mov 1, %g5 ! AFSR CE bit is >> 878 sllx %g5, 20, %g5 ! bit 20 >> 879 stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR >> 880 membar #Sync ! Synchronize ASI stores >> 881 sllx %g2, (64 - 41), %g2 ! Clear reserved bits >> 882 srlx %g2, (64 - 41), %g2 ! in latched AFAR >> 883 >> 884 andn %g2, 0x0f, %g2 ! Finish resv bit clearing >> 885 mov %g1, %g4 ! Move AFSR+UDB* into save reg >> 886 mov %g2, %g5 ! Move AFAR into save reg >> 887 rdpr %pil, %g2 >> 888 wrpr %g0, 15, %pil >> 889 ba,pt %xcc, etrap_irq >> 890 rd %pc, %g7 >> 891 mov %l4, %o0 >> 892 >> 893 mov %l5, %o1 >> 894 call cee_log >> 895 add %sp, PTREGS_OFF, %o2 >> 896 ba,a,pt %xcc, rtrap_irq >> 897 >> 898 /* Capture I/D/E-cache state into per-cpu error scoreboard. >> 899 * >> 900 * %g1: (TL>=0) ? 1 : 0 >> 901 * %g2: scratch >> 902 * %g3: scratch >> 903 * %g4: AFSR >> 904 * %g5: AFAR >> 905 * %g6: current thread ptr >> 906 * %g7: scratch >> 907 */ >> 908 #define CHEETAH_LOG_ERROR \ >> 909 /* Put "TL1" software bit into AFSR. */ \ >> 910 and %g1, 0x1, %g1; \ >> 911 sllx %g1, 63, %g2; \ >> 912 or %g4, %g2, %g4; \ >> 913 /* Get log entry pointer for this cpu at this trap level. */ \ >> 914 BRANCH_IF_JALAPENO(g2,g3,50f) \ >> 915 ldxa [%g0] ASI_SAFARI_CONFIG, %g2; \ >> 916 srlx %g2, 17, %g2; \ >> 917 ba,pt %xcc, 60f; \ >> 918 and %g2, 0x3ff, %g2; \ >> 919 50: ldxa [%g0] ASI_JBUS_CONFIG, %g2; \ >> 920 srlx %g2, 17, %g2; \ >> 921 and %g2, 0x1f, %g2; \ >> 922 60: sllx %g2, 9, %g2; \ >> 923 sethi %hi(cheetah_error_log), %g3; \ >> 924 ldx [%g3 + %lo(cheetah_error_log)], %g3; \ >> 925 brz,pn %g3, 80f; \ >> 926 nop; \ >> 927 add %g3, %g2, %g3; \ >> 928 sllx %g1, 8, %g1; \ >> 929 add %g3, %g1, %g1; \ >> 930 /* %g1 holds pointer to the top of the logging scoreboard */ \ >> 931 ldx [%g1 + 0x0], %g7; \ >> 932 cmp %g7, -1; \ >> 933 bne,pn %xcc, 80f; \ >> 934 nop; \ >> 935 stx %g4, [%g1 + 0x0]; \ >> 936 stx %g5, [%g1 + 0x8]; \ >> 937 add %g1, 0x10, %g1; \ >> 938 /* %g1 now points to D-cache logging area */ \ >> 939 set 0x3ff8, %g2; /* DC_addr mask */ \ >> 940 and %g5, %g2, %g2; /* DC_addr bits of AFAR */ \ >> 941 srlx %g5, 12, %g3; \ >> 942 or %g3, 1, %g3; /* PHYS tag + valid */ \ >> 943 10: ldxa [%g2] ASI_DCACHE_TAG, %g7; \ >> 944 cmp %g3, %g7; /* TAG match? */ \ >> 945 bne,pt %xcc, 13f; \ >> 946 nop; \ >> 947 /* Yep, what we want, capture state. */ \ >> 948 stx %g2, [%g1 + 0x20]; \ >> 949 stx %g7, [%g1 + 0x28]; \ >> 950 /* A membar Sync is required before and after utag access. */ \ >> 951 membar #Sync; \ >> 952 ldxa [%g2] ASI_DCACHE_UTAG, %g7; \ >> 953 membar #Sync; \ >> 954 stx %g7, [%g1 + 0x30]; \ >> 955 ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7; \ >> 956 stx %g7, [%g1 + 0x38]; \ >> 957 clr %g3; \ >> 958 12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7; \ >> 959 stx %g7, [%g1]; \ >> 960 add %g3, (1 << 5), %g3; \ >> 961 cmp %g3, (4 << 5); \ >> 962 bl,pt %xcc, 12b; \ >> 963 add %g1, 0x8, %g1; \ >> 964 ba,pt %xcc, 20f; \ >> 965 add %g1, 0x20, %g1; \ >> 966 13: sethi %hi(1 << 14), %g7; \ >> 967 add %g2, %g7, %g2; \ >> 968 srlx %g2, 14, %g7; \ >> 969 cmp %g7, 4; \ >> 970 bl,pt %xcc, 10b; \ >> 971 nop; \ >> 972 add %g1, 0x40, %g1; \ >> 973 20: /* %g1 now points to I-cache logging area */ \ >> 974 set 0x1fe0, %g2; /* IC_addr mask */ \ >> 975 and %g5, %g2, %g2; /* IC_addr bits of AFAR */ \ >> 976 sllx %g2, 1, %g2; /* IC_addr[13:6]==VA[12:5] */ \ >> 977 srlx %g5, (13 - 8), %g3; /* Make PTAG */ \ >> 978 andn %g3, 0xff, %g3; /* Mask off undefined bits */ \ >> 979 21: ldxa [%g2] ASI_IC_TAG, %g7; \ >> 980 andn %g7, 0xff, %g7; \ >> 981 cmp %g3, %g7; \ >> 982 bne,pt %xcc, 23f; \ >> 983 nop; \ >> 984 /* Yep, what we want, capture state. */ \ >> 985 stx %g2, [%g1 + 0x40]; \ >> 986 stx %g7, [%g1 + 0x48]; \ >> 987 add %g2, (1 << 3), %g2; \ >> 988 ldxa [%g2] ASI_IC_TAG, %g7; \ >> 989 add %g2, (1 << 3), %g2; \ >> 990 stx %g7, [%g1 + 0x50]; \ >> 991 ldxa [%g2] ASI_IC_TAG, %g7; \ >> 992 add %g2, (1 << 3), %g2; \ >> 993 stx %g7, [%g1 + 0x60]; \ >> 994 ldxa [%g2] ASI_IC_TAG, %g7; \ >> 995 stx %g7, [%g1 + 0x68]; \ >> 996 sub %g2, (3 << 3), %g2; \ >> 997 ldxa [%g2] ASI_IC_STAG, %g7; \ >> 998 stx %g7, [%g1 + 0x58]; \ >> 999 clr %g3; \ >> 1000 srlx %g2, 2, %g2; \ >> 1001 22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7; \ >> 1002 stx %g7, [%g1]; \ >> 1003 add %g3, (1 << 3), %g3; \ >> 1004 cmp %g3, (8 << 3); \ >> 1005 bl,pt %xcc, 22b; \ >> 1006 add %g1, 0x8, %g1; \ >> 1007 ba,pt %xcc, 30f; \ >> 1008 add %g1, 0x30, %g1; \ >> 1009 23: sethi %hi(1 << 14), %g7; \ >> 1010 add %g2, %g7, %g2; \ >> 1011 srlx %g2, 14, %g7; \ >> 1012 cmp %g7, 4; \ >> 1013 bl,pt %xcc, 21b; \ >> 1014 nop; \ >> 1015 add %g1, 0x70, %g1; \ >> 1016 30: /* %g1 now points to E-cache logging area */ \ >> 1017 andn %g5, (32 - 1), %g2; /* E-cache subblock */ \ >> 1018 stx %g2, [%g1 + 0x20]; \ >> 1019 ldxa [%g2] ASI_EC_TAG_DATA, %g7; \ >> 1020 stx %g7, [%g1 + 0x28]; \ >> 1021 ldxa [%g2] ASI_EC_R, %g0; \ >> 1022 clr %g3; \ >> 1023 31: ldxa [%g3] ASI_EC_DATA, %g7; \ >> 1024 stx %g7, [%g1 + %g3]; \ >> 1025 add %g3, 0x8, %g3; \ >> 1026 cmp %g3, 0x20; \ >> 1027 bl,pt %xcc, 31b; \ >> 1028 nop; \ >> 1029 80: /* DONE */ 754 1030 755 /* !! 1031 /* These get patched into the trap table at boot time 756 * New processes begin life here. !! 1032 * once we know we have a cheetah processor. 757 */ !! 1033 */ >> 1034 .globl cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1 >> 1035 cheetah_fecc_trap_vector: >> 1036 membar #Sync >> 1037 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 >> 1038 andn %g1, DCU_DC | DCU_IC, %g1 >> 1039 stxa %g1, [%g0] ASI_DCU_CONTROL_REG >> 1040 membar #Sync >> 1041 sethi %hi(cheetah_fast_ecc), %g2 >> 1042 jmpl %g2 + %lo(cheetah_fast_ecc), %g0 >> 1043 mov 0, %g1 >> 1044 cheetah_fecc_trap_vector_tl1: >> 1045 membar #Sync >> 1046 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 >> 1047 andn %g1, DCU_DC | DCU_IC, %g1 >> 1048 stxa %g1, [%g0] ASI_DCU_CONTROL_REG >> 1049 membar #Sync >> 1050 sethi %hi(cheetah_fast_ecc), %g2 >> 1051 jmpl %g2 + %lo(cheetah_fast_ecc), %g0 >> 1052 mov 1, %g1 >> 1053 .globl cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1 >> 1054 cheetah_cee_trap_vector: >> 1055 membar #Sync >> 1056 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 >> 1057 andn %g1, DCU_IC, %g1 >> 1058 stxa %g1, [%g0] ASI_DCU_CONTROL_REG >> 1059 membar #Sync >> 1060 sethi %hi(cheetah_cee), %g2 >> 1061 jmpl %g2 + %lo(cheetah_cee), %g0 >> 1062 mov 0, %g1 >> 1063 cheetah_cee_trap_vector_tl1: >> 1064 membar #Sync >> 1065 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 >> 1066 andn %g1, DCU_IC, %g1 >> 1067 stxa %g1, [%g0] ASI_DCU_CONTROL_REG >> 1068 membar #Sync >> 1069 sethi %hi(cheetah_cee), %g2 >> 1070 jmpl %g2 + %lo(cheetah_cee), %g0 >> 1071 mov 1, %g1 >> 1072 .globl cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1 >> 1073 cheetah_deferred_trap_vector: >> 1074 membar #Sync >> 1075 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1; >> 1076 andn %g1, DCU_DC | DCU_IC, %g1; >> 1077 stxa %g1, [%g0] ASI_DCU_CONTROL_REG; >> 1078 membar #Sync; >> 1079 sethi %hi(cheetah_deferred_trap), %g2 >> 1080 jmpl %g2 + %lo(cheetah_deferred_trap), %g0 >> 1081 mov 0, %g1 >> 1082 cheetah_deferred_trap_vector_tl1: >> 1083 membar #Sync; >> 1084 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1; >> 1085 andn %g1, DCU_DC | DCU_IC, %g1; >> 1086 stxa %g1, [%g0] ASI_DCU_CONTROL_REG; >> 1087 membar #Sync; >> 1088 sethi %hi(cheetah_deferred_trap), %g2 >> 1089 jmpl %g2 + %lo(cheetah_deferred_trap), %g0 >> 1090 mov 1, %g1 >> 1091 >> 1092 /* Cheetah+ specific traps. These are for the new I/D cache parity >> 1093 * error traps. The first argument to cheetah_plus_parity_handler >> 1094 * is encoded as follows: >> 1095 * >> 1096 * Bit0: 0=dcache,1=icache >> 1097 * Bit1: 0=recoverable,1=unrecoverable >> 1098 */ >> 1099 .globl cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1 >> 1100 cheetah_plus_dcpe_trap_vector: >> 1101 membar #Sync >> 1102 sethi %hi(do_cheetah_plus_data_parity), %g7 >> 1103 jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0 >> 1104 nop >> 1105 nop >> 1106 nop >> 1107 nop >> 1108 nop >> 1109 >> 1110 do_cheetah_plus_data_parity: >> 1111 ba,pt %xcc, etrap >> 1112 rd %pc, %g7 >> 1113 mov 0x0, %o0 >> 1114 call cheetah_plus_parity_error >> 1115 add %sp, PTREGS_OFF, %o1 >> 1116 ba,pt %xcc, rtrap >> 1117 clr %l6 >> 1118 >> 1119 cheetah_plus_dcpe_trap_vector_tl1: >> 1120 membar #Sync >> 1121 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate >> 1122 sethi %hi(do_dcpe_tl1), %g3 >> 1123 jmpl %g3 + %lo(do_dcpe_tl1), %g0 >> 1124 nop >> 1125 nop >> 1126 nop >> 1127 nop >> 1128 >> 1129 .globl cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1 >> 1130 cheetah_plus_icpe_trap_vector: >> 1131 membar #Sync >> 1132 sethi %hi(do_cheetah_plus_insn_parity), %g7 >> 1133 jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0 >> 1134 nop >> 1135 nop >> 1136 nop >> 1137 nop >> 1138 nop >> 1139 >> 1140 do_cheetah_plus_insn_parity: >> 1141 ba,pt %xcc, etrap >> 1142 rd %pc, %g7 >> 1143 mov 0x1, %o0 >> 1144 call cheetah_plus_parity_error >> 1145 add %sp, PTREGS_OFF, %o1 >> 1146 ba,pt %xcc, rtrap >> 1147 clr %l6 >> 1148 >> 1149 cheetah_plus_icpe_trap_vector_tl1: >> 1150 membar #Sync >> 1151 wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate >> 1152 sethi %hi(do_icpe_tl1), %g3 >> 1153 jmpl %g3 + %lo(do_icpe_tl1), %g0 >> 1154 nop >> 1155 nop >> 1156 nop >> 1157 nop >> 1158 >> 1159 /* If we take one of these traps when tl >= 1, then we >> 1160 * jump to interrupt globals. If some trap level above us >> 1161 * was also using interrupt globals, we cannot recover. >> 1162 * We may use all interrupt global registers except %g6. >> 1163 */ >> 1164 .globl do_dcpe_tl1, do_icpe_tl1 >> 1165 do_dcpe_tl1: >> 1166 rdpr %tl, %g1 ! Save original trap level >> 1167 mov 1, %g2 ! Setup TSTATE checking loop >> 1168 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit >> 1169 1: wrpr %g2, %tl ! Set trap level to check >> 1170 rdpr %tstate, %g4 ! Read TSTATE for this level >> 1171 andcc %g4, %g3, %g0 ! Interrupt globals in use? >> 1172 bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable >> 1173 wrpr %g1, %tl ! Restore original trap level >> 1174 add %g2, 1, %g2 ! Next trap level >> 1175 cmp %g2, %g1 ! Hit them all yet? >> 1176 ble,pt %icc, 1b ! Not yet >> 1177 nop >> 1178 wrpr %g1, %tl ! Restore original trap level >> 1179 do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ >> 1180 /* Reset D-cache parity */ >> 1181 sethi %hi(1 << 16), %g1 ! D-cache size >> 1182 mov (1 << 5), %g2 ! D-cache line size >> 1183 sub %g1, %g2, %g1 ! Move down 1 cacheline >> 1184 1: srl %g1, 14, %g3 ! Compute UTAG >> 1185 membar #Sync >> 1186 stxa %g3, [%g1] ASI_DCACHE_UTAG >> 1187 membar #Sync >> 1188 sub %g2, 8, %g3 ! 64-bit data word within line >> 1189 2: membar #Sync >> 1190 stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA >> 1191 membar #Sync >> 1192 subcc %g3, 8, %g3 ! Next 64-bit data word >> 1193 bge,pt %icc, 2b >> 1194 nop >> 1195 subcc %g1, %g2, %g1 ! Next cacheline >> 1196 bge,pt %icc, 1b >> 1197 nop >> 1198 ba,pt %xcc, dcpe_icpe_tl1_common >> 1199 nop >> 1200 >> 1201 do_dcpe_tl1_fatal: >> 1202 sethi %hi(1f), %g7 >> 1203 ba,pt %xcc, etraptl1 >> 1204 1: or %g7, %lo(1b), %g7 >> 1205 mov 0x2, %o0 >> 1206 call cheetah_plus_parity_error >> 1207 add %sp, PTREGS_OFF, %o1 >> 1208 ba,pt %xcc, rtrap >> 1209 clr %l6 >> 1210 >> 1211 do_icpe_tl1: >> 1212 rdpr %tl, %g1 ! Save original trap level >> 1213 mov 1, %g2 ! Setup TSTATE checking loop >> 1214 sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit >> 1215 1: wrpr %g2, %tl ! Set trap level to check >> 1216 rdpr %tstate, %g4 ! Read TSTATE for this level >> 1217 andcc %g4, %g3, %g0 ! Interrupt globals in use? >> 1218 bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable >> 1219 wrpr %g1, %tl ! Restore original trap level >> 1220 add %g2, 1, %g2 ! Next trap level >> 1221 cmp %g2, %g1 ! Hit them all yet? >> 1222 ble,pt %icc, 1b ! Not yet >> 1223 nop >> 1224 wrpr %g1, %tl ! Restore original trap level >> 1225 do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */ >> 1226 /* Flush I-cache */ >> 1227 sethi %hi(1 << 15), %g1 ! I-cache size >> 1228 mov (1 << 5), %g2 ! I-cache line size >> 1229 sub %g1, %g2, %g1 >> 1230 1: or %g1, (2 << 3), %g3 >> 1231 stxa %g0, [%g3] ASI_IC_TAG >> 1232 membar #Sync >> 1233 subcc %g1, %g2, %g1 >> 1234 bge,pt %icc, 1b >> 1235 nop >> 1236 ba,pt %xcc, dcpe_icpe_tl1_common >> 1237 nop >> 1238 >> 1239 do_icpe_tl1_fatal: >> 1240 sethi %hi(1f), %g7 >> 1241 ba,pt %xcc, etraptl1 >> 1242 1: or %g7, %lo(1b), %g7 >> 1243 mov 0x3, %o0 >> 1244 call cheetah_plus_parity_error >> 1245 add %sp, PTREGS_OFF, %o1 >> 1246 ba,pt %xcc, rtrap >> 1247 clr %l6 >> 1248 >> 1249 dcpe_icpe_tl1_common: >> 1250 /* Flush D-cache, re-enable D/I caches in DCU and finally >> 1251 * retry the trapping instruction. >> 1252 */ >> 1253 sethi %hi(1 << 16), %g1 ! D-cache size >> 1254 mov (1 << 5), %g2 ! D-cache line size >> 1255 sub %g1, %g2, %g1 >> 1256 1: stxa %g0, [%g1] ASI_DCACHE_TAG >> 1257 membar #Sync >> 1258 subcc %g1, %g2, %g1 >> 1259 bge,pt %icc, 1b >> 1260 nop >> 1261 ldxa [%g0] ASI_DCU_CONTROL_REG, %g1 >> 1262 or %g1, (DCU_DC | DCU_IC), %g1 >> 1263 stxa %g1, [%g0] ASI_DCU_CONTROL_REG >> 1264 membar #Sync >> 1265 retry >> 1266 >> 1267 /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc >> 1268 * in the trap table. That code has done a memory barrier >> 1269 * and has disabled both the I-cache and D-cache in the DCU >> 1270 * control register. The I-cache is disabled so that we may >> 1271 * capture the corrupted cache line, and the D-cache is disabled >> 1272 * because corrupt data may have been placed there and we don't >> 1273 * want to reference it. >> 1274 * >> 1275 * %g1 is one if this trap occurred at %tl >= 1. >> 1276 * >> 1277 * Next, we turn off error reporting so that we don't recurse. >> 1278 */ >> 1279 .globl cheetah_fast_ecc >> 1280 cheetah_fast_ecc: >> 1281 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 >> 1282 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2 >> 1283 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN >> 1284 membar #Sync >> 1285 >> 1286 /* Fetch and clear AFSR/AFAR */ >> 1287 ldxa [%g0] ASI_AFSR, %g4 >> 1288 ldxa [%g0] ASI_AFAR, %g5 >> 1289 stxa %g4, [%g0] ASI_AFSR >> 1290 membar #Sync >> 1291 >> 1292 CHEETAH_LOG_ERROR >> 1293 >> 1294 rdpr %pil, %g2 >> 1295 wrpr %g0, 15, %pil >> 1296 ba,pt %xcc, etrap_irq >> 1297 rd %pc, %g7 >> 1298 mov %l4, %o1 >> 1299 mov %l5, %o2 >> 1300 call cheetah_fecc_handler >> 1301 add %sp, PTREGS_OFF, %o0 >> 1302 ba,a,pt %xcc, rtrap_irq >> 1303 >> 1304 /* Our caller has disabled I-cache and performed membar Sync. */ >> 1305 .globl cheetah_cee >> 1306 cheetah_cee: >> 1307 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 >> 1308 andn %g2, ESTATE_ERROR_CEEN, %g2 >> 1309 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN >> 1310 membar #Sync >> 1311 >> 1312 /* Fetch and clear AFSR/AFAR */ >> 1313 ldxa [%g0] ASI_AFSR, %g4 >> 1314 ldxa [%g0] ASI_AFAR, %g5 >> 1315 stxa %g4, [%g0] ASI_AFSR >> 1316 membar #Sync >> 1317 >> 1318 CHEETAH_LOG_ERROR >> 1319 >> 1320 rdpr %pil, %g2 >> 1321 wrpr %g0, 15, %pil >> 1322 ba,pt %xcc, etrap_irq >> 1323 rd %pc, %g7 >> 1324 mov %l4, %o1 >> 1325 mov %l5, %o2 >> 1326 call cheetah_cee_handler >> 1327 add %sp, PTREGS_OFF, %o0 >> 1328 ba,a,pt %xcc, rtrap_irq >> 1329 >> 1330 /* Our caller has disabled I-cache+D-cache and performed membar Sync. */ >> 1331 .globl cheetah_deferred_trap >> 1332 cheetah_deferred_trap: >> 1333 ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2 >> 1334 andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2 >> 1335 stxa %g2, [%g0] ASI_ESTATE_ERROR_EN >> 1336 membar #Sync >> 1337 >> 1338 /* Fetch and clear AFSR/AFAR */ >> 1339 ldxa [%g0] ASI_AFSR, %g4 >> 1340 ldxa [%g0] ASI_AFAR, %g5 >> 1341 stxa %g4, [%g0] ASI_AFSR >> 1342 membar #Sync >> 1343 >> 1344 CHEETAH_LOG_ERROR >> 1345 >> 1346 rdpr %pil, %g2 >> 1347 wrpr %g0, 15, %pil >> 1348 ba,pt %xcc, etrap_irq >> 1349 rd %pc, %g7 >> 1350 mov %l4, %o1 >> 1351 mov %l5, %o2 >> 1352 call cheetah_deferred_handler >> 1353 add %sp, PTREGS_OFF, %o0 >> 1354 ba,a,pt %xcc, rtrap_irq >> 1355 >> 1356 .globl __do_privact >> 1357 __do_privact: >> 1358 mov TLB_SFSR, %g3 >> 1359 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit >> 1360 membar #Sync >> 1361 sethi %hi(109f), %g7 >> 1362 ba,pt %xcc, etrap >> 1363 109: or %g7, %lo(109b), %g7 >> 1364 call do_privact >> 1365 add %sp, PTREGS_OFF, %o0 >> 1366 ba,pt %xcc, rtrap >> 1367 clr %l6 >> 1368 >> 1369 .globl do_mna >> 1370 do_mna: >> 1371 rdpr %tl, %g3 >> 1372 cmp %g3, 1 758 1373 759 .globl ret_from_fork !! 1374 /* Setup %g4/%g5 now as they are used in the 760 .align 4 !! 1375 * winfixup code. 761 .ent ret_from_fork !! 1376 */ 762 ret_from_fork: !! 1377 mov TLB_SFSR, %g3 763 lda $26, ret_to_user !! 1378 mov DMMU_SFAR, %g4 764 mov $17, $16 !! 1379 ldxa [%g4] ASI_DMMU, %g4 765 jmp $31, schedule_tail !! 1380 ldxa [%g3] ASI_DMMU, %g5 766 .end ret_from_fork !! 1381 stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit >> 1382 membar #Sync >> 1383 bgu,pn %icc, winfix_mna >> 1384 rdpr %tpc, %g3 >> 1385 >> 1386 1: sethi %hi(109f), %g7 >> 1387 ba,pt %xcc, etrap >> 1388 109: or %g7, %lo(109b), %g7 >> 1389 mov %l4, %o1 >> 1390 mov %l5, %o2 >> 1391 call mem_address_unaligned >> 1392 add %sp, PTREGS_OFF, %o0 >> 1393 ba,pt %xcc, rtrap >> 1394 clr %l6 >> 1395 >> 1396 .globl do_lddfmna >> 1397 do_lddfmna: >> 1398 sethi %hi(109f), %g7 >> 1399 mov TLB_SFSR, %g4 >> 1400 ldxa [%g4] ASI_DMMU, %g5 >> 1401 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit >> 1402 membar #Sync >> 1403 mov DMMU_SFAR, %g4 >> 1404 ldxa [%g4] ASI_DMMU, %g4 >> 1405 ba,pt %xcc, etrap >> 1406 109: or %g7, %lo(109b), %g7 >> 1407 mov %l4, %o1 >> 1408 mov %l5, %o2 >> 1409 call handle_lddfmna >> 1410 add %sp, PTREGS_OFF, %o0 >> 1411 ba,pt %xcc, rtrap >> 1412 clr %l6 >> 1413 >> 1414 .globl do_stdfmna >> 1415 do_stdfmna: >> 1416 sethi %hi(109f), %g7 >> 1417 mov TLB_SFSR, %g4 >> 1418 ldxa [%g4] ASI_DMMU, %g5 >> 1419 stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit >> 1420 membar #Sync >> 1421 mov DMMU_SFAR, %g4 >> 1422 ldxa [%g4] ASI_DMMU, %g4 >> 1423 ba,pt %xcc, etrap >> 1424 109: or %g7, %lo(109b), %g7 >> 1425 mov %l4, %o1 >> 1426 mov %l5, %o2 >> 1427 call handle_stdfmna >> 1428 add %sp, PTREGS_OFF, %o0 >> 1429 ba,pt %xcc, rtrap >> 1430 clr %l6 >> 1431 >> 1432 .globl breakpoint_trap >> 1433 breakpoint_trap: >> 1434 call sparc_breakpoint >> 1435 add %sp, PTREGS_OFF, %o0 >> 1436 ba,pt %xcc, rtrap >> 1437 nop >> 1438 >> 1439 #if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \ >> 1440 defined(CONFIG_SOLARIS_EMUL_MODULE) >> 1441 /* SunOS uses syscall zero as the 'indirect syscall' it looks >> 1442 * like indir_syscall(scall_num, arg0, arg1, arg2...); etc. >> 1443 * This is complete brain damage. >> 1444 */ >> 1445 .globl sunos_indir >> 1446 sunos_indir: >> 1447 srl %o0, 0, %o0 >> 1448 mov %o7, %l4 >> 1449 cmp %o0, NR_SYSCALLS >> 1450 blu,a,pt %icc, 1f >> 1451 sll %o0, 0x2, %o0 >> 1452 sethi %hi(sunos_nosys), %l6 >> 1453 b,pt %xcc, 2f >> 1454 or %l6, %lo(sunos_nosys), %l6 >> 1455 1: sethi %hi(sunos_sys_table), %l7 >> 1456 or %l7, %lo(sunos_sys_table), %l7 >> 1457 lduw [%l7 + %o0], %l6 >> 1458 2: mov %o1, %o0 >> 1459 mov %o2, %o1 >> 1460 mov %o3, %o2 >> 1461 mov %o4, %o3 >> 1462 mov %o5, %o4 >> 1463 call %l6 >> 1464 mov %l4, %o7 >> 1465 >> 1466 .globl sunos_getpid >> 1467 sunos_getpid: >> 1468 call sys_getppid >> 1469 nop >> 1470 call sys_getpid >> 1471 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1] >> 1472 b,pt %xcc, ret_sys_call >> 1473 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] >> 1474 >> 1475 /* SunOS getuid() returns uid in %o0 and euid in %o1 */ >> 1476 .globl sunos_getuid >> 1477 sunos_getuid: >> 1478 call sys32_geteuid16 >> 1479 nop >> 1480 call sys32_getuid16 >> 1481 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1] >> 1482 b,pt %xcc, ret_sys_call >> 1483 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] >> 1484 >> 1485 /* SunOS getgid() returns gid in %o0 and egid in %o1 */ >> 1486 .globl sunos_getgid >> 1487 sunos_getgid: >> 1488 call sys32_getegid16 >> 1489 nop >> 1490 call sys32_getgid16 >> 1491 stx %o0, [%sp + PTREGS_OFF + PT_V9_I1] >> 1492 b,pt %xcc, ret_sys_call >> 1493 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] >> 1494 #endif 767 1495 768 /* !! 1496 /* SunOS's execv() call only specifies the argv argument, the 769 * ... and new kernel threads - here !! 1497 * environment settings are the same as the calling processes. 770 */ !! 1498 */ 771 .align 4 !! 1499 .globl sunos_execv, sys_execve, sys32_execve 772 .globl ret_from_kernel_thread !! 1500 sys_execve: 773 .ent ret_from_kernel_thread !! 1501 sethi %hi(sparc_execve), %g1 774 ret_from_kernel_thread: !! 1502 ba,pt %xcc, execve_merge 775 mov $17, $16 !! 1503 or %g1, %lo(sparc_execve), %g1 776 jsr $26, schedule_tail !! 1504 sunos_execv: 777 mov $9, $27 !! 1505 stx %g0, [%sp + PTREGS_OFF + PT_V9_I2] 778 mov $10, $16 !! 1506 sys32_execve: 779 jsr $26, ($9) !! 1507 sethi %hi(sparc32_execve), %g1 780 br $31, ret_to_user !! 1508 or %g1, %lo(sparc32_execve), %g1 781 .end ret_from_kernel_thread !! 1509 execve_merge: >> 1510 flushw >> 1511 jmpl %g1, %g0 >> 1512 add %sp, PTREGS_OFF, %o0 >> 1513 >> 1514 .globl sys_pipe, sys_sigpause, sys_nis_syscall >> 1515 .globl sys_sigsuspend, sys_rt_sigsuspend, sys32_rt_sigsuspend >> 1516 .globl sys_rt_sigreturn >> 1517 .globl sys32_sigreturn, sys32_rt_sigreturn >> 1518 .globl sys32_execve, sys_ptrace >> 1519 .globl sys_sigaltstack, sys32_sigaltstack >> 1520 .globl sys32_sigstack >> 1521 .align 32 >> 1522 sys_pipe: ba,pt %xcc, sparc_pipe >> 1523 add %sp, PTREGS_OFF, %o0 >> 1524 sys_nis_syscall:ba,pt %xcc, c_sys_nis_syscall >> 1525 add %sp, PTREGS_OFF, %o0 >> 1526 sys_memory_ordering: >> 1527 ba,pt %xcc, sparc_memory_ordering >> 1528 add %sp, PTREGS_OFF, %o1 >> 1529 sys_sigaltstack:ba,pt %xcc, do_sigaltstack >> 1530 add %i6, STACK_BIAS, %o2 >> 1531 sys32_sigstack: ba,pt %xcc, do_sys32_sigstack >> 1532 mov %i6, %o2 >> 1533 sys32_sigaltstack: >> 1534 ba,pt %xcc, do_sys32_sigaltstack >> 1535 mov %i6, %o2 >> 1536 >> 1537 .align 32 >> 1538 sys_sigsuspend: add %sp, PTREGS_OFF, %o0 >> 1539 call do_sigsuspend >> 1540 add %o7, 1f-.-4, %o7 >> 1541 nop >> 1542 sys_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */ >> 1543 add %sp, PTREGS_OFF, %o2 >> 1544 call do_rt_sigsuspend >> 1545 add %o7, 1f-.-4, %o7 >> 1546 nop >> 1547 sys32_rt_sigsuspend: /* NOTE: %o0,%o1 have a correct value already */ >> 1548 srl %o0, 0, %o0 >> 1549 add %sp, PTREGS_OFF, %o2 >> 1550 call do_rt_sigsuspend32 >> 1551 add %o7, 1f-.-4, %o7 >> 1552 /* NOTE: %o0 has a correct value already */ >> 1553 sys_sigpause: add %sp, PTREGS_OFF, %o1 >> 1554 call do_sigpause >> 1555 add %o7, 1f-.-4, %o7 >> 1556 nop >> 1557 sys32_sigreturn: >> 1558 add %sp, PTREGS_OFF, %o0 >> 1559 call do_sigreturn32 >> 1560 add %o7, 1f-.-4, %o7 >> 1561 nop >> 1562 sys_rt_sigreturn: >> 1563 add %sp, PTREGS_OFF, %o0 >> 1564 call do_rt_sigreturn >> 1565 add %o7, 1f-.-4, %o7 >> 1566 nop >> 1567 sys32_rt_sigreturn: >> 1568 add %sp, PTREGS_OFF, %o0 >> 1569 call do_rt_sigreturn32 >> 1570 add %o7, 1f-.-4, %o7 >> 1571 nop >> 1572 sys_ptrace: add %sp, PTREGS_OFF, %o0 >> 1573 call do_ptrace >> 1574 add %o7, 1f-.-4, %o7 >> 1575 nop >> 1576 .align 32 >> 1577 1: ldx [%curptr + TI_FLAGS], %l5 >> 1578 andcc %l5, _TIF_SYSCALL_TRACE, %g0 >> 1579 be,pt %icc, rtrap >> 1580 clr %l6 >> 1581 call syscall_trace >> 1582 nop >> 1583 >> 1584 ba,pt %xcc, rtrap >> 1585 clr %l6 >> 1586 >> 1587 /* This is how fork() was meant to be done, 8 instruction entry. >> 1588 * >> 1589 * I questioned the following code briefly, let me clear things >> 1590 * up so you must not reason on it like I did. >> 1591 * >> 1592 * Know the fork_kpsr etc. we use in the sparc32 port? We don't >> 1593 * need it here because the only piece of window state we copy to >> 1594 * the child is the CWP register. Even if the parent sleeps, >> 1595 * we are safe because we stuck it into pt_regs of the parent >> 1596 * so it will not change. >> 1597 * >> 1598 * XXX This raises the question, whether we can do the same on >> 1599 * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim. The >> 1600 * XXX answer is yes. We stick fork_kpsr in UREG_G0 and >> 1601 * XXX fork_kwim in UREG_G1 (global registers are considered >> 1602 * XXX volatile across a system call in the sparc ABI I think >> 1603 * XXX if it isn't we can use regs->y instead, anyone who depends >> 1604 * XXX upon the Y register being preserved across a fork deserves >> 1605 * XXX to lose). >> 1606 * >> 1607 * In fact we should take advantage of that fact for other things >> 1608 * during system calls... >> 1609 */ >> 1610 .globl sys_fork, sys_vfork, sys_clone, sparc_exit >> 1611 .globl ret_from_syscall >> 1612 .align 32 >> 1613 sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */ >> 1614 sethi %hi(0x4000 | 0x0100 | SIGCHLD), %o0 >> 1615 or %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0 >> 1616 ba,pt %xcc, sys_clone >> 1617 sys_fork: clr %o1 >> 1618 mov SIGCHLD, %o0 >> 1619 sys_clone: flushw >> 1620 movrz %o1, %fp, %o1 >> 1621 mov 0, %o3 >> 1622 ba,pt %xcc, sparc_do_fork >> 1623 add %sp, PTREGS_OFF, %o2 >> 1624 ret_from_syscall: >> 1625 /* Clear SPARC_FLAG_NEWCHILD, switch_to leaves thread.flags in >> 1626 * %o7 for us. Check performance counter stuff too. >> 1627 */ >> 1628 andn %o7, _TIF_NEWCHILD, %l0 >> 1629 stx %l0, [%g6 + TI_FLAGS] >> 1630 call schedule_tail >> 1631 mov %g5, %o0 >> 1632 andcc %l0, _TIF_PERFCTR, %g0 >> 1633 be,pt %icc, 1f >> 1634 nop >> 1635 ldx [%g6 + TI_PCR], %o7 >> 1636 wr %g0, %o7, %pcr >> 1637 >> 1638 /* Blackbird errata workaround. See commentary in >> 1639 * smp.c:smp_percpu_timer_interrupt() for more >> 1640 * information. >> 1641 */ >> 1642 ba,pt %xcc, 99f >> 1643 nop >> 1644 .align 64 >> 1645 99: wr %g0, %g0, %pic >> 1646 rd %pic, %g0 >> 1647 >> 1648 1: b,pt %xcc, ret_sys_call >> 1649 ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 >> 1650 sparc_exit: wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV), %pstate >> 1651 rdpr %otherwin, %g1 >> 1652 rdpr %cansave, %g3 >> 1653 add %g3, %g1, %g3 >> 1654 wrpr %g3, 0x0, %cansave >> 1655 wrpr %g0, 0x0, %otherwin >> 1656 wrpr %g0, (PSTATE_RMO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE), %pstate >> 1657 ba,pt %xcc, sys_exit >> 1658 stb %g0, [%g6 + TI_WSAVED] >> 1659 >> 1660 linux_sparc_ni_syscall: >> 1661 sethi %hi(sys_ni_syscall), %l7 >> 1662 b,pt %xcc, 4f >> 1663 or %l7, %lo(sys_ni_syscall), %l7 >> 1664 >> 1665 linux_syscall_trace32: >> 1666 call syscall_trace >> 1667 nop >> 1668 srl %i0, 0, %o0 >> 1669 mov %i4, %o4 >> 1670 srl %i1, 0, %o1 >> 1671 srl %i2, 0, %o2 >> 1672 b,pt %xcc, 2f >> 1673 srl %i3, 0, %o3 >> 1674 >> 1675 linux_syscall_trace: >> 1676 call syscall_trace >> 1677 nop >> 1678 mov %i0, %o0 >> 1679 mov %i1, %o1 >> 1680 mov %i2, %o2 >> 1681 mov %i3, %o3 >> 1682 b,pt %xcc, 2f >> 1683 mov %i4, %o4 >> 1684 >> 1685 >> 1686 /* Linux 32-bit and SunOS system calls enter here... */ >> 1687 .align 32 >> 1688 .globl linux_sparc_syscall32 >> 1689 linux_sparc_syscall32: >> 1690 /* Direct access to user regs, much faster. */ >> 1691 cmp %g1, NR_SYSCALLS ! IEU1 Group >> 1692 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI >> 1693 srl %i0, 0, %o0 ! IEU0 >> 1694 sll %g1, 2, %l4 ! IEU0 Group >> 1695 #ifdef SYSCALL_TRACING >> 1696 call syscall_trace_entry >> 1697 add %sp, PTREGS_OFF, %o0 >> 1698 srl %i0, 0, %o0 >> 1699 #endif >> 1700 mov %i4, %o4 ! IEU1 >> 1701 lduw [%l7 + %l4], %l7 ! Load >> 1702 srl %i1, 0, %o1 ! IEU0 Group >> 1703 ldx [%curptr + TI_FLAGS], %l0 ! Load >> 1704 >> 1705 mov %i5, %o5 ! IEU1 >> 1706 srl %i2, 0, %o2 ! IEU0 Group >> 1707 andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU0 Group >> 1708 bne,pn %icc, linux_syscall_trace32 ! CTI >> 1709 mov %i0, %l5 ! IEU1 >> 1710 call %l7 ! CTI Group brk forced >> 1711 srl %i3, 0, %o3 ! IEU0 >> 1712 ba,a,pt %xcc, 3f >> 1713 >> 1714 /* Linux native and SunOS system calls enter here... */ >> 1715 .align 32 >> 1716 .globl linux_sparc_syscall, ret_sys_call >> 1717 linux_sparc_syscall: >> 1718 /* Direct access to user regs, much faster. */ >> 1719 cmp %g1, NR_SYSCALLS ! IEU1 Group >> 1720 bgeu,pn %xcc, linux_sparc_ni_syscall ! CTI >> 1721 mov %i0, %o0 ! IEU0 >> 1722 sll %g1, 2, %l4 ! IEU0 Group >> 1723 #ifdef SYSCALL_TRACING >> 1724 call syscall_trace_entry >> 1725 add %sp, PTREGS_OFF, %o0 >> 1726 mov %i0, %o0 >> 1727 #endif >> 1728 mov %i1, %o1 ! IEU1 >> 1729 lduw [%l7 + %l4], %l7 ! Load >> 1730 4: mov %i2, %o2 ! IEU0 Group >> 1731 ldx [%curptr + TI_FLAGS], %l0 ! Load >> 1732 >> 1733 mov %i3, %o3 ! IEU1 >> 1734 mov %i4, %o4 ! IEU0 Group >> 1735 andcc %l0, _TIF_SYSCALL_TRACE, %g0 ! IEU1 Group+1 bubble >> 1736 bne,pn %icc, linux_syscall_trace ! CTI Group >> 1737 mov %i0, %l5 ! IEU0 >> 1738 2: call %l7 ! CTI Group brk forced >> 1739 mov %i5, %o5 ! IEU0 >> 1740 nop >> 1741 >> 1742 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] >> 1743 ret_sys_call: >> 1744 #ifdef SYSCALL_TRACING >> 1745 mov %o0, %o1 >> 1746 call syscall_trace_exit >> 1747 add %sp, PTREGS_OFF, %o0 >> 1748 mov %o1, %o0 >> 1749 #endif >> 1750 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 >> 1751 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc >> 1752 sra %o0, 0, %o0 >> 1753 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 >> 1754 cmp %o0, -ENOIOCTLCMD >> 1755 sllx %g2, 32, %g2 >> 1756 bgeu,pn %xcc, 1f >> 1757 andcc %l0, _TIF_SYSCALL_TRACE, %l6 >> 1758 80: >> 1759 andn %g3, %g2, %g3 /* System call success, clear Carry condition code. */ >> 1760 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] >> 1761 bne,pn %icc, linux_syscall_trace2 >> 1762 add %l1, 0x4, %l2 ! npc = npc+4 >> 1763 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] >> 1764 ba,pt %xcc, rtrap_clr_l6 >> 1765 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] 782 1766 783 !! 1767 1: 784 /* !! 1768 /* Really a failure? Check if force_successful_syscall_return() 785 * Special system calls. Most of these are sp !! 1769 * was invoked. 786 * have to play switch_stack games. !! 1770 */ 787 */ !! 1771 ldx [%curptr + TI_FLAGS], %l0 ! Load >> 1772 andcc %l0, _TIF_SYSCALL_SUCCESS, %g0 >> 1773 be,pt %icc, 1f >> 1774 andcc %l0, _TIF_SYSCALL_TRACE, %l6 >> 1775 andn %l0, _TIF_SYSCALL_SUCCESS, %l0 >> 1776 ba,pt %xcc, 80b >> 1777 stx %l0, [%curptr + TI_FLAGS] 788 1778 789 .macro fork_like name !! 1779 /* System call failure, set Carry condition code. 790 .align 4 !! 1780 * Also, get abs(errno) to return to the process. 791 .globl alpha_\name !! 1781 */ 792 .ent alpha_\name << 793 alpha_\name: << 794 .prologue 0 << 795 bsr $1, do_switch_stack << 796 // NB: if anyone adds preemption, this << 797 ldl $1, TI_STATUS($8) << 798 and $1, TS_SAVED_FP, $3 << 799 or $1, TS_SAVED_FP, $2 << 800 bne $3, 1f << 801 stl $2, TI_STATUS($8) << 802 bsr $26, __save_fpu << 803 1: 1782 1: 804 jsr $26, sys_\name !! 1783 sub %g0, %o0, %o0 805 ldq $26, 56($sp) !! 1784 or %g3, %g2, %g3 806 lda $sp, SWITCH_STACK_SIZE($sp) !! 1785 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] 807 ret !! 1786 mov 1, %l6 808 .end alpha_\name !! 1787 stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] 809 .endm !! 1788 bne,pn %icc, linux_syscall_trace2 810 !! 1789 add %l1, 0x4, %l2 !npc = npc+4 811 fork_like fork !! 1790 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] 812 fork_like vfork !! 1791 813 fork_like clone !! 1792 b,pt %xcc, rtrap 814 fork_like clone3 !! 1793 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] 815 !! 1794 linux_syscall_trace2: 816 .macro sigreturn_like name !! 1795 call syscall_trace 817 .align 4 !! 1796 nop 818 .globl sys_\name !! 1797 stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] 819 .ent sys_\name !! 1798 ba,pt %xcc, rtrap 820 sys_\name: !! 1799 stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] 821 .prologue 0 !! 1800 822 lda $9, ret_from_straced !! 1801 .align 32 823 cmpult $26, $9, $9 !! 1802 .globl __flushw_user 824 lda $sp, -SWITCH_STACK_SIZE($sp) !! 1803 __flushw_user: 825 jsr $26, do_\name !! 1804 rdpr %otherwin, %g1 826 bne $9, 1f !! 1805 brz,pn %g1, 2f 827 jsr $26, syscall_trace_leave !! 1806 clr %g2 828 1: br $1, undo_switch_stack !! 1807 1: save %sp, -128, %sp 829 br ret_from_sys_call !! 1808 rdpr %otherwin, %g1 830 .end sys_\name !! 1809 brnz,pt %g1, 1b 831 .endm !! 1810 add %g2, 1, %g2 832 !! 1811 1: sub %g2, 1, %g2 833 sigreturn_like sigreturn !! 1812 brnz,pt %g2, 1b 834 sigreturn_like rt_sigreturn !! 1813 restore %g0, %g0, %g0 835 !! 1814 2: retl 836 .align 4 !! 1815 nop 837 .globl alpha_syscall_zero << 838 .ent alpha_syscall_zero << 839 alpha_syscall_zero: << 840 .prologue 0 << 841 /* Special because it needs to do some << 842 force_successful_syscall_return(). << 843 syscall number for that, zero meani << 844 That works nicely, but for real sys << 845 make sure that this logics doesn't << 846 Store a non-zero there - -ENOSYS we << 847 for our return value will do just f << 848 */ << 849 lda $0, -ENOSYS << 850 unop << 851 stq $0, 0($sp) << 852 ret << 853 .end alpha_syscall_zero <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.