1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * rtrap.S: Preparing for return from trap on 4 * 5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@s 6 * Copyright (C) 1997 David S. Miller (davem@c 7 */ 8 9 10 #include <asm/asi.h> 11 #include <asm/pstate.h> 12 #include <asm/ptrace.h> 13 #include <asm/spitfire.h> 14 #include <asm/head.h> 15 #include <asm/visasm.h> 16 #include <asm/processor.h> 17 18 #ifdef CONFIG_CONTEXT_TRACKING_USER 19 # define SCHEDULE_USER schedule_user 20 #else 21 # define SCHEDULE_USER schedule 22 #endif 23 24 .text 25 .align 32 26 __handle_preemption: 27 call SCHEDU 28 661: wrpr %g0, R 29 /* If userspace is using ADI, 30 * a pointer with version tag 31 * the ADI security, we must r 32 * we continue execution in th 33 */ 34 .section .sun_m7_1insn_patch, 35 .word 661b 36 wrpr %g0, R 37 .previous 38 ba,pt %xcc, 39 wrpr %g0, R 40 41 __handle_user_windows: 42 add %sp, P 43 call fault_ 44 661: wrpr %g0, R 45 /* If userspace is using ADI, 46 * a pointer with version tag 47 * the ADI security, we must r 48 * we continue execution in th 49 */ 50 .section .sun_m7_1insn_patch, 51 .word 661b 52 wrpr %g0, R 53 .previous 54 ba,pt %xcc, 55 wrpr %g0, R 56 57 __handle_userfpu: 58 rd %fprs, 59 andcc %l5, F 60 sethi %hi(TS 61 be,a,pn %icc, 62 andn %l1, % 63 ba,a,pt %xcc, 64 65 __handle_signal: 66 mov %l5, % 67 add %sp, P 68 mov %l0, % 69 call do_not 70 661: wrpr %g0, R 71 /* If userspace is using ADI, 72 * a pointer with version tag 73 * the ADI security, we must r 74 * we continue execution in th 75 */ 76 .section .sun_m7_1insn_patch, 77 .word 661b 78 wrpr %g0, R 79 .previous 80 wrpr %g0, R 81 82 /* Signal delivery can modify 83 * reload it. 84 */ 85 ldx [%sp + 86 sethi %hi(0x 87 and %l1, % 88 andn %l1, % 89 ba,pt %xcc, 90 srl %l4, 2 91 92 /* When returning from a NMI ( 93 * avoid running softirqs, doi 94 */ 95 .globl rtrap_ 96 rtrap_nmi: ldx [%sp + 97 sethi %hi(0x 98 and %l1, % 99 andn %l1, % 100 srl %l4, 2 101 ba,pt %xcc, 102 nop 103 /* Do not actually set the %pi 104 * below after we clear PSTATE 105 * If we re-enable interrupts 106 * the hardirq stack potential 107 * stack overflow. 108 */ 109 110 .align 64 111 .globl rtrap_ 112 rtrap_irq: 113 rtrap: 114 /* mm/ultra.S:xcall_report_reg 115 ldx [%sp + 116 rtrap_xcall: 117 sethi %hi(0x 118 and %l1, % 119 andn %l1, % 120 srl %l4, 2 121 #ifdef CONFIG_TRACE_IRQFLAGS 122 brnz,pn %l4, r 123 nop 124 call trace_ 125 nop 126 /* Do not actually set the %pi 127 * below after we clear PSTATE 128 * If we re-enable interrupts 129 * the hardirq stack potential 130 * stack overflow. 131 * 132 * It is tempting to put this 133 * call at the 'rt_continue' l 134 * as that path hits unconditi 135 * execute this in NMI return 136 */ 137 #endif 138 rtrap_no_irq_enable: 139 andcc %l1, T 140 bne,pn %icc, 141 nop 142 143 /* We must hold IRQs off and a 144 * state, then hold them off a 145 * If we are returning to kern 146 * that we are disabling inter 147 * %pil. 148 * 149 * If we do not do this, there 150 * the tests, later the signal 151 * not process it since we are 152 * take until the next local I 153 * event would be handled. 154 * 155 * This also means that if we 156 * windows, we have to redo al 157 * with IRQs disabled. 158 */ 159 to_user: wrpr %g0, R 160 wrpr 0, %pi 161 __handle_preemption_continue: 162 ldx [%g6 + 163 sethi %hi(_T 164 or %o0, % 165 andcc %l0, % 166 sethi %hi(TS 167 be,pt %xcc, 168 andcc %l1, % 169 andcc %l0, _ 170 bne,pn %xcc, 171 andcc %l0, _ 172 bne,pn %xcc, 173 ldub [%g6 + 174 brnz,pn %o2, _ 175 nop 176 sethi %hi(TS 177 andcc %l1, % 178 179 /* This fpdepth clear is neces 180 user_nowork: 181 bne,pn %xcc, 182 stb %g0, [ 183 __handle_userfpu_continue: 184 185 rt_continue: ldx [%sp + 186 ldx [%sp + 187 188 ldx [%sp + 189 ldx [%sp + 190 ldx [%sp + 191 brz,pt %l3, 1 192 mov %g6, % 193 194 /* Must do this before thread 195 LOAD_PER_CPU_BASE(%g5, %g6, %i 196 1: 197 ldx [%sp + 198 ldx [%sp + 199 200 /* Normal globals are restored 201 661: wrpr %g0, R 202 nop 203 .section .sun4v 204 .word 661b 205 wrpr %g0, R 206 SET_GL(1) 207 .previous 208 209 mov %l2, % 210 211 ldx [%sp + 212 ldx [%sp + 213 214 ldx [%sp + 215 ldx [%sp + 216 ldx [%sp + 217 ldx [%sp + 218 ldx [%sp + 219 ldx [%sp + 220 ldx [%sp + 221 ldx [%sp + 222 223 ld [%sp + 224 wr %o3, % 225 wrpr %l4, 0 226 wrpr %g0, 0 227 andn %l1, T 228 wrpr %l1, % 229 wrpr %l2, % 230 wrpr %o2, % 231 232 brnz,pn %l3, k 233 mov PRIMAR 234 235 661: ldxa [%l7 + 236 .section .sun4v 237 .word 661b 238 ldxa [%l7 + 239 .previous 240 241 sethi %hi(sp 242 ldx [%l1 + 243 or %l0, % 244 245 661: stxa %l0, [ 246 .section .sun4v 247 .word 661b 248 stxa %l0, [ 249 .previous 250 251 sethi %hi(KE 252 flush %l7 253 rdpr %wstat 254 rdpr %other 255 srl %l1, 3 256 257 661: wrpr %l2, % 258 .section .fast_ 259 .word 661b 260 .word 0x8988 261 .previous 262 263 wrpr %l1, % 264 brnz,pt %l2, u 265 661: wrpr %g0, % 266 .section .fast_ 267 .word 661b 268 nop 269 .previous 270 271 ldx [%g6 + 272 wr %g0, A 273 rdpr %cwp, 274 andcc %g3, _ 275 sub %g1, 1 276 bne,pt %xcc, 277 wrpr %g1, % 278 ba,a,pt %xcc, 279 nop 280 281 user_rtt_fill_fixup_dax: 282 ba,pt %xcc, user_rtt_fill_fi 283 mov 1, %g3 284 285 user_rtt_fill_fixup_mna: 286 ba,pt %xcc, user_rtt_fill_fi 287 mov 2, %g3 288 289 user_rtt_fill_fixup: 290 ba,pt %xcc, user_rtt_fill_fi 291 clr %g3 292 293 user_rtt_pre_restore: 294 add %g1, 1 295 wrpr %g1, 0 296 297 user_rtt_restore: 298 restore 299 rdpr %canre 300 wrpr %g1, 0 301 retry 302 nop 303 304 kern_rtt: rdpr %canre 305 brz,pn %g1, k 306 nop 307 kern_rtt_restore: 308 stw %g0, [ 309 restore 310 retry 311 312 to_kernel: 313 #ifdef CONFIG_PREEMPTION 314 ldsw [%g6 + 315 brnz %l5, k 316 ldx [%g6 + 317 andcc %l5, _ 318 be,pt %xcc, 319 nop 320 cmp %l4, 0 321 bne,pn %xcc, 322 nop 323 call preemp 324 nop 325 ba,pt %xcc, 326 #endif 327 kern_fpucheck: ldub [%g6 + 328 brz,pt %l5, r 329 srl %l5, 1 330 add %g6, T 331 ldub [%l6 + 332 sub %l5, 2 333 334 add %g6, T 335 andcc %l2, ( 336 be,pt %icc, 337 and %l2, F 338 andcc %l2, F 339 be,pn %icc, 340 sll %o0, 3 341 rd %fprs, 342 343 wr %g1, F 344 ldx [%o1 + 345 add %g6, T 346 sll %o0, 8 347 add %g6, T 348 brz,pn %l6, 1 349 add %g6, T 350 351 membar #Sync 352 ldda [%o3 + 353 ldda [%o4 + 354 membar #Sync 355 1: andcc %l2, F 356 be,pn %icc, 357 wr %g1, 0 358 add %o2, 0 359 membar #Sync 360 ldda [%o3 + 361 ldda [%o4 + 362 1: membar #Sync 363 ldx [%o1 + 364 2: stb %l5, [ 365 ba,pt %xcc, 366 nop 367 5: wr %g0, F 368 sll %o0, 8 369 370 add %g6, T 371 add %g6, T 372 membar #Sync 373 ldda [%o3 + 374 ldda [%o4 + 375 membar #Sync 376 wr %g0, F 377 ba,pt %xcc, 378 stb %l5, [
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.