1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * rtrap.S: Return from Sparc trap low-level c 3 * rtrap.S: Return from Sparc trap low-level code. 4 * 4 * 5 * Copyright (C) 1995 David S. Miller (davem@c 5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 6 */ 6 */ 7 7 8 #include <asm/page.h> 8 #include <asm/page.h> 9 #include <asm/ptrace.h> 9 #include <asm/ptrace.h> 10 #include <asm/psr.h> 10 #include <asm/psr.h> 11 #include <asm/asi.h> 11 #include <asm/asi.h> 12 #include <asm/smp.h> 12 #include <asm/smp.h> 13 #include <asm/contregs.h> 13 #include <asm/contregs.h> 14 #include <asm/winmacro.h> 14 #include <asm/winmacro.h> 15 #include <asm/asmmacro.h> 15 #include <asm/asmmacro.h> 16 #include <asm/thread_info.h> 16 #include <asm/thread_info.h> 17 17 18 #define t_psr l0 18 #define t_psr l0 19 #define t_pc l1 19 #define t_pc l1 20 #define t_npc l2 20 #define t_npc l2 21 #define t_wim l3 21 #define t_wim l3 22 #define twin_tmp1 l4 22 #define twin_tmp1 l4 23 #define glob_tmp g4 23 #define glob_tmp g4 24 #define curptr g6 24 #define curptr g6 25 25 26 /* 7 WINDOW SPARC PATCH INSTRUCTIONS * 26 /* 7 WINDOW SPARC PATCH INSTRUCTIONS */ 27 .globl rtrap_7win_patch1, rtrap_7win_ 27 .globl rtrap_7win_patch1, rtrap_7win_patch2, rtrap_7win_patch3 28 .globl rtrap_7win_patch4, rtrap_7win_ 28 .globl rtrap_7win_patch4, rtrap_7win_patch5 29 rtrap_7win_patch1: srl %t_wim, 0x6, % 29 rtrap_7win_patch1: srl %t_wim, 0x6, %glob_tmp 30 rtrap_7win_patch2: and %glob_tmp, 0x7 30 rtrap_7win_patch2: and %glob_tmp, 0x7f, %glob_tmp 31 rtrap_7win_patch3: srl %g1, 7, %g2 31 rtrap_7win_patch3: srl %g1, 7, %g2 32 rtrap_7win_patch4: srl %g2, 6, %g2 32 rtrap_7win_patch4: srl %g2, 6, %g2 33 rtrap_7win_patch5: and %g1, 0x7f, %g1 33 rtrap_7win_patch5: and %g1, 0x7f, %g1 34 /* END OF PATCH INSTRUCTIONS */ 34 /* END OF PATCH INSTRUCTIONS */ 35 35 36 /* We need to check for a few things w 36 /* We need to check for a few things which are: 37 * 1) The need to call schedule() beca 37 * 1) The need to call schedule() because this 38 * processes quantum is up. 38 * processes quantum is up. 39 * 2) Pending signals for this process 39 * 2) Pending signals for this process, if any 40 * exist we need to call do_signal( 40 * exist we need to call do_signal() to do 41 * the needy. 41 * the needy. 42 * 42 * 43 * Else we just check if the rett woul 43 * Else we just check if the rett would land us 44 * in an invalid window, if so we need 44 * in an invalid window, if so we need to grab 45 * it off the user/kernel stack first. 45 * it off the user/kernel stack first. 46 */ 46 */ 47 47 48 .globl ret_trap_entry, rtrap_patch1, 48 .globl ret_trap_entry, rtrap_patch1, rtrap_patch2 49 .globl rtrap_patch3, rtrap_patch4, rt 49 .globl rtrap_patch3, rtrap_patch4, rtrap_patch5 50 .globl ret_trap_lockless_ipi 50 .globl ret_trap_lockless_ipi 51 ret_trap_entry: 51 ret_trap_entry: 52 ret_trap_lockless_ipi: 52 ret_trap_lockless_ipi: 53 andcc %t_psr, PSR_PS, %g0 53 andcc %t_psr, PSR_PS, %g0 54 sethi %hi(PSR_SYSCALL), %g1 54 sethi %hi(PSR_SYSCALL), %g1 55 be 1f 55 be 1f 56 andn %t_psr, %g1, %t_psr 56 andn %t_psr, %g1, %t_psr 57 57 58 wr %t_psr, 0x0, %psr 58 wr %t_psr, 0x0, %psr 59 b ret_trap_kernel 59 b ret_trap_kernel 60 nop 60 nop 61 61 62 1: 62 1: 63 ld [%curptr + TI_FLAGS], %g2 63 ld [%curptr + TI_FLAGS], %g2 64 andcc %g2, (_TIF_NEED_RESCHED), %g0 64 andcc %g2, (_TIF_NEED_RESCHED), %g0 65 be signal_p 65 be signal_p 66 nop 66 nop 67 67 68 call schedule 68 call schedule 69 nop 69 nop 70 70 71 ld [%curptr + TI_FLAGS], %g2 71 ld [%curptr + TI_FLAGS], %g2 72 signal_p: 72 signal_p: 73 andcc %g2, _TIF_DO_NOTIFY_RESUME_MAS 73 andcc %g2, _TIF_DO_NOTIFY_RESUME_MASK, %g0 74 bz,a ret_trap_continue 74 bz,a ret_trap_continue 75 ld [%sp + STACKFRAME_SZ + PT_PSR] 75 ld [%sp + STACKFRAME_SZ + PT_PSR], %t_psr 76 76 77 mov %g2, %o2 77 mov %g2, %o2 78 mov %l6, %o1 78 mov %l6, %o1 79 call do_notify_resume 79 call do_notify_resume 80 add %sp, STACKFRAME_SZ, %o0 ! pt_r 80 add %sp, STACKFRAME_SZ, %o0 ! pt_regs ptr 81 81 82 b signal_p 82 b signal_p 83 ld [%curptr + TI_FLAGS], %g2 83 ld [%curptr + TI_FLAGS], %g2 84 84 85 ret_trap_continue: 85 ret_trap_continue: 86 sethi %hi(PSR_SYSCALL), %g1 86 sethi %hi(PSR_SYSCALL), %g1 87 andn %t_psr, %g1, %t_psr 87 andn %t_psr, %g1, %t_psr 88 wr %t_psr, 0x0, %psr 88 wr %t_psr, 0x0, %psr 89 WRITE_PAUSE 89 WRITE_PAUSE 90 90 91 ld [%curptr + TI_W_SAVED], %twin_ 91 ld [%curptr + TI_W_SAVED], %twin_tmp1 92 orcc %g0, %twin_tmp1, %g0 92 orcc %g0, %twin_tmp1, %g0 93 be ret_trap_nobufwins 93 be ret_trap_nobufwins 94 nop 94 nop 95 95 96 wr %t_psr, PSR_ET, %psr 96 wr %t_psr, PSR_ET, %psr 97 WRITE_PAUSE 97 WRITE_PAUSE 98 98 99 mov 1, %o1 99 mov 1, %o1 100 call try_to_clear_window_buffer 100 call try_to_clear_window_buffer 101 add %sp, STACKFRAME_SZ, %o0 101 add %sp, STACKFRAME_SZ, %o0 102 102 103 b signal_p 103 b signal_p 104 ld [%curptr + TI_FLAGS], %g2 104 ld [%curptr + TI_FLAGS], %g2 105 105 106 ret_trap_nobufwins: 106 ret_trap_nobufwins: 107 /* Load up the user's out registers so 107 /* Load up the user's out registers so we can pull 108 * a window from the stack, if necessa 108 * a window from the stack, if necessary. 109 */ 109 */ 110 LOAD_PT_INS(sp) 110 LOAD_PT_INS(sp) 111 111 112 /* If there are already live user wind 112 /* If there are already live user windows in the 113 * set we can return from trap safely. 113 * set we can return from trap safely. 114 */ 114 */ 115 ld [%curptr + TI_UWINMASK], %twin 115 ld [%curptr + TI_UWINMASK], %twin_tmp1 116 orcc %g0, %twin_tmp1, %g0 116 orcc %g0, %twin_tmp1, %g0 117 bne ret_trap_userwins_ok 117 bne ret_trap_userwins_ok 118 nop 118 nop 119 119 120 /* Calculate new %wim, we have 120 /* Calculate new %wim, we have to pull a register 121 * window from the users stack 121 * window from the users stack. 122 */ 122 */ 123 ret_trap_pull_one_window: 123 ret_trap_pull_one_window: 124 rd %wim, %t_wim 124 rd %wim, %t_wim 125 sll %t_wim, 0x1, %twin_tmp 125 sll %t_wim, 0x1, %twin_tmp1 126 rtrap_patch1: srl %t_wim, 0x7, %glob_tmp 126 rtrap_patch1: srl %t_wim, 0x7, %glob_tmp 127 or %glob_tmp, %twin_tmp1, 127 or %glob_tmp, %twin_tmp1, %glob_tmp 128 rtrap_patch2: and %glob_tmp, 0xff, %glob 128 rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp 129 129 130 wr %glob_tmp, 0x0, %wim 130 wr %glob_tmp, 0x0, %wim 131 131 132 /* Here comes the architecture specifi 132 /* Here comes the architecture specific 133 * branch to the user stack checking r 133 * branch to the user stack checking routine 134 * for return from traps. 134 * for return from traps. 135 */ 135 */ 136 b srmmu_rett_stackchk 136 b srmmu_rett_stackchk 137 andcc %fp, 0x7, %g0 137 andcc %fp, 0x7, %g0 138 138 139 ret_trap_userwins_ok: 139 ret_trap_userwins_ok: 140 LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc) 140 LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc) 141 or %t_pc, %t_npc, %g2 141 or %t_pc, %t_npc, %g2 142 andcc %g2, 0x3, %g0 142 andcc %g2, 0x3, %g0 143 sethi %hi(PSR_SYSCALL), %g2 143 sethi %hi(PSR_SYSCALL), %g2 144 be 1f 144 be 1f 145 andn %t_psr, %g2, %t_psr 145 andn %t_psr, %g2, %t_psr 146 146 147 b ret_trap_unaligned_pc 147 b ret_trap_unaligned_pc 148 add %sp, STACKFRAME_SZ, %o0 148 add %sp, STACKFRAME_SZ, %o0 149 149 150 1: 150 1: 151 LOAD_PT_YREG(sp, g1) 151 LOAD_PT_YREG(sp, g1) 152 LOAD_PT_GLOBALS(sp) 152 LOAD_PT_GLOBALS(sp) 153 153 154 wr %t_psr, 0x0, %psr 154 wr %t_psr, 0x0, %psr 155 WRITE_PAUSE 155 WRITE_PAUSE 156 156 157 jmp %t_pc 157 jmp %t_pc 158 rett %t_npc 158 rett %t_npc 159 159 160 ret_trap_unaligned_pc: 160 ret_trap_unaligned_pc: 161 ld [%sp + STACKFRAME_SZ + PT_PC], 161 ld [%sp + STACKFRAME_SZ + PT_PC], %o1 162 ld [%sp + STACKFRAME_SZ + PT_NPC] 162 ld [%sp + STACKFRAME_SZ + PT_NPC], %o2 163 ld [%sp + STACKFRAME_SZ + PT_PSR] 163 ld [%sp + STACKFRAME_SZ + PT_PSR], %o3 164 164 165 wr %t_wim, 0x0, %wim 165 wr %t_wim, 0x0, %wim ! or else... 166 166 167 wr %t_psr, PSR_ET, %psr 167 wr %t_psr, PSR_ET, %psr 168 WRITE_PAUSE 168 WRITE_PAUSE 169 169 170 call do_memaccess_unaligned 170 call do_memaccess_unaligned 171 nop 171 nop 172 172 173 b signal_p 173 b signal_p 174 ld [%curptr + TI_FLAGS], %g2 174 ld [%curptr + TI_FLAGS], %g2 175 175 176 ret_trap_kernel: 176 ret_trap_kernel: 177 /* Will the rett land us in th 177 /* Will the rett land us in the invalid window? */ 178 mov 2, %g1 178 mov 2, %g1 179 sll %g1, %t_psr, %g1 179 sll %g1, %t_psr, %g1 180 rtrap_patch3: srl %g1, 8, %g2 180 rtrap_patch3: srl %g1, 8, %g2 181 or %g1, %g2, %g1 181 or %g1, %g2, %g1 182 rd %wim, %g2 182 rd %wim, %g2 183 andcc %g2, %g1, %g0 183 andcc %g2, %g1, %g0 184 be 1f ! Nope 184 be 1f ! Nope, just return from the trap 185 sll %g2, 0x1, %g1 185 sll %g2, 0x1, %g1 186 186 187 /* We have to grab a window be 187 /* We have to grab a window before returning. */ 188 rtrap_patch4: srl %g2, 7, %g2 188 rtrap_patch4: srl %g2, 7, %g2 189 or %g1, %g2, %g1 189 or %g1, %g2, %g1 190 rtrap_patch5: and %g1, 0xff, %g1 190 rtrap_patch5: and %g1, 0xff, %g1 191 191 192 wr %g1, 0x0, %wim 192 wr %g1, 0x0, %wim 193 193 194 /* Grrr, make sure we load from the ri 194 /* Grrr, make sure we load from the right %sp... */ 195 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1 195 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1) 196 196 197 restore %g0, %g0, %g0 197 restore %g0, %g0, %g0 198 LOAD_WINDOW(sp) 198 LOAD_WINDOW(sp) 199 b 2f 199 b 2f 200 save %g0, %g0, %g0 200 save %g0, %g0, %g0 201 201 202 /* Reload the entire frame in case thi 202 /* Reload the entire frame in case this is from a 203 * kernel system call or whatever... 203 * kernel system call or whatever... 204 */ 204 */ 205 1: 205 1: 206 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1 206 LOAD_PT_ALL(sp, t_psr, t_pc, t_npc, g1) 207 2: 207 2: 208 sethi %hi(PSR_SYSCALL), %twin_tmp1 208 sethi %hi(PSR_SYSCALL), %twin_tmp1 209 andn %t_psr, %twin_tmp1, %t_psr 209 andn %t_psr, %twin_tmp1, %t_psr 210 wr %t_psr, 0x0, %psr 210 wr %t_psr, 0x0, %psr 211 WRITE_PAUSE 211 WRITE_PAUSE 212 212 213 jmp %t_pc 213 jmp %t_pc 214 rett %t_npc 214 rett %t_npc 215 215 216 ret_trap_user_stack_is_bolixed: 216 ret_trap_user_stack_is_bolixed: 217 wr %t_wim, 0x0, %wim 217 wr %t_wim, 0x0, %wim 218 218 219 wr %t_psr, PSR_ET, %psr 219 wr %t_psr, PSR_ET, %psr 220 WRITE_PAUSE 220 WRITE_PAUSE 221 221 222 call window_ret_fault 222 call window_ret_fault 223 add %sp, STACKFRAME_SZ, %o0 223 add %sp, STACKFRAME_SZ, %o0 224 224 225 b signal_p 225 b signal_p 226 ld [%curptr + TI_FLAGS], %g2 226 ld [%curptr + TI_FLAGS], %g2 227 227 228 .globl srmmu_rett_stackchk 228 .globl srmmu_rett_stackchk 229 srmmu_rett_stackchk: 229 srmmu_rett_stackchk: 230 bne ret_trap_user_stack_is_bolixed 230 bne ret_trap_user_stack_is_bolixed 231 sethi %hi(PAGE_OFFSET), %g1 231 sethi %hi(PAGE_OFFSET), %g1 232 cmp %g1, %fp 232 cmp %g1, %fp 233 bleu ret_trap_user_stack_is_bolixed 233 bleu ret_trap_user_stack_is_bolixed 234 mov AC_M_SFSR, %g1 234 mov AC_M_SFSR, %g1 235 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0) 235 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0) 236 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0) 236 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0) 237 237 238 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1) 238 LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1) 239 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1) 239 SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1) 240 or %g1, 0x2, %g1 240 or %g1, 0x2, %g1 241 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) 241 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) 242 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) 242 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) 243 243 244 restore %g0, %g0, %g0 244 restore %g0, %g0, %g0 245 245 246 LOAD_WINDOW(sp) 246 LOAD_WINDOW(sp) 247 247 248 save %g0, %g0, %g0 248 save %g0, %g0, %g0 249 249 250 andn %g1, 0x2, %g1 250 andn %g1, 0x2, %g1 251 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) 251 LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) 252 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) 252 SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) 253 253 254 mov AC_M_SFAR, %g2 254 mov AC_M_SFAR, %g2 255 LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2) 255 LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2) 256 SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2) 256 SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2) 257 257 258 mov AC_M_SFSR, %g1 258 mov AC_M_SFSR, %g1 259 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1) 259 LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1) 260 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1) 260 SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1) 261 andcc %g1, 0x2, %g0 261 andcc %g1, 0x2, %g0 262 be ret_trap_userwins_ok 262 be ret_trap_userwins_ok 263 nop 263 nop 264 264 265 b,a ret_trap_user_stack_is_bolixed 265 b,a ret_trap_user_stack_is_bolixed
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.