~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/kernel/entry-header.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #include <linux/init.h>
  3 #include <linux/linkage.h>
  4 
  5 #include <asm/assembler.h>
  6 #include <asm/asm-offsets.h>
  7 #include <asm/errno.h>
  8 #include <asm/thread_info.h>
  9 #include <asm/uaccess-asm.h>
 10 #include <asm/v7m.h>
 11 
 12 @ Bad Abort numbers
 13 @ -----------------
 14 @
 15 #define BAD_PREFETCH    0
 16 #define BAD_DATA        1
 17 #define BAD_ADDREXCPTN  2
 18 #define BAD_IRQ         3
 19 #define BAD_UNDEFINSTR  4
 20 
 21 @
 22 @ Most of the stack format comes from struct pt_regs, but with
 23 @ the addition of 8 bytes for storing syscall args 5 and 6.
 24 @ This _must_ remain a multiple of 8 for EABI.
 25 @
 26 #define S_OFF           8
 27 
 28 /* 
 29  * The SWI code relies on the fact that R0 is at the bottom of the stack
 30  * (due to slow/fast restore user regs).
 31  */
 32 #if S_R0 != 0
 33 #error "Please fix"
 34 #endif
 35 
 36         .macro  zero_fp
 37 #ifdef CONFIG_FRAME_POINTER
 38         mov     fp, #0
 39 #endif
 40         .endm
 41 
 42 #ifdef CONFIG_ALIGNMENT_TRAP
 43 #define ATRAP(x...) x
 44 #else
 45 #define ATRAP(x...)
 46 #endif
 47 
 48         .macro  alignment_trap, rtmp1, rtmp2, label
 49 #ifdef CONFIG_ALIGNMENT_TRAP
 50         mrc     p15, 0, \rtmp2, c1, c0, 0
 51         ldr_va  \rtmp1, \label
 52         teq     \rtmp1, \rtmp2
 53         mcrne   p15, 0, \rtmp1, c1, c0, 0
 54 #endif
 55         .endm
 56 
 57 #ifdef CONFIG_CPU_V7M
 58 /*
 59  * ARMv7-M exception entry/exit macros.
 60  *
 61  * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
 62  * automatically saved on the current stack (32 words) before
 63  * switching to the exception stack (SP_main).
 64  *
 65  * If exception is taken while in user mode, SP_main is
 66  * empty. Otherwise, SP_main is aligned to 64 bit automatically
 67  * (CCR.STKALIGN set).
 68  *
 69  * Linux assumes that the interrupts are disabled when entering an
 70  * exception handler and it may BUG if this is not the case. Interrupts
 71  * are disabled during entry and reenabled in the exit macro.
 72  *
 73  * v7m_exception_slow_exit is used when returning from SVC or PendSV.
 74  * When returning to kernel mode, we don't return from exception.
 75  */
 76         .macro  v7m_exception_entry
 77         @ determine the location of the registers saved by the core during
 78         @ exception entry. Depending on the mode the cpu was in when the
 79         @ exception happend that is either on the main or the process stack.
 80         @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
 81         @ was used.
 82         tst     lr, #EXC_RET_STACK_MASK
 83         mrsne   r12, psp
 84         moveq   r12, sp
 85 
 86         @ we cannot rely on r0-r3 and r12 matching the value saved in the
 87         @ exception frame because of tail-chaining. So these have to be
 88         @ reloaded.
 89         ldmia   r12!, {r0-r3}
 90 
 91         @ Linux expects to have irqs off. Do it here before taking stack space
 92         cpsid   i
 93 
 94         sub     sp, #PT_REGS_SIZE-S_IP
 95         stmdb   sp!, {r0-r11}
 96 
 97         @ load saved r12, lr, return address and xPSR.
 98         @ r0-r7 are used for signals and never touched from now on. Clobbering
 99         @ r8-r12 is OK.
100         mov     r9, r12
101         ldmia   r9!, {r8, r10-r12}
102 
103         @ calculate the original stack pointer value.
104         @ r9 currently points to the memory location just above the auto saved
105         @ xPSR.
106         @ The cpu might automatically 8-byte align the stack. Bit 9
107         @ of the saved xPSR specifies if stack aligning took place. In this case
108         @ another 32-bit value is included in the stack.
109 
110         tst     r12, V7M_xPSR_FRAMEPTRALIGN
111         addne   r9, r9, #4
112 
113         @ store saved r12 using str to have a register to hold the base for stm
114         str     r8, [sp, #S_IP]
115         add     r8, sp, #S_SP
116         @ store r13-r15, xPSR
117         stmia   r8!, {r9-r12}
118         @ store old_r0
119         str     r0, [r8]
120         .endm
121 
122         /*
123          * PENDSV and SVCALL are configured to have the same exception
124          * priorities. As a kernel thread runs at SVCALL execution priority it
125          * can never be preempted and so we will never have to return to a
126          * kernel thread here.
127          */
128         .macro  v7m_exception_slow_exit ret_r0
129         cpsid   i
130         ldr     lr, =exc_ret
131         ldr     lr, [lr]
132 
133         @ read original r12, sp, lr, pc and xPSR
134         add     r12, sp, #S_IP
135         ldmia   r12, {r1-r5}
136 
137         @ an exception frame is always 8-byte aligned. To tell the hardware if
138         @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
139         @ accordingly.
140         tst     r2, #4
141         subne   r2, r2, #4
142         orrne   r5, V7M_xPSR_FRAMEPTRALIGN
143         biceq   r5, V7M_xPSR_FRAMEPTRALIGN
144 
145         @ ensure bit 0 is cleared in the PC, otherwise behaviour is
146         @ unpredictable
147         bic     r4, #1
148 
149         @ write basic exception frame
150         stmdb   r2!, {r1, r3-r5}
151         ldmia   sp, {r1, r3-r5}
152         .if     \ret_r0
153         stmdb   r2!, {r0, r3-r5}
154         .else
155         stmdb   r2!, {r1, r3-r5}
156         .endif
157 
158         @ restore process sp
159         msr     psp, r2
160 
161         @ restore original r4-r11
162         ldmia   sp!, {r0-r11}
163 
164         @ restore main sp
165         add     sp, sp, #PT_REGS_SIZE-S_IP
166 
167         cpsie   i
168         bx      lr
169         .endm
170 #endif  /* CONFIG_CPU_V7M */
171 
172         @
173         @ Store/load the USER SP and LR registers by switching to the SYS
174         @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
175         @ available. Should only be called from SVC mode
176         @
177         .macro  store_user_sp_lr, rd, rtemp, offset = 0
178         mrs     \rtemp, cpsr
179         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
180         msr     cpsr_c, \rtemp                  @ switch to the SYS mode
181 
182         str     sp, [\rd, #\offset]             @ save sp_usr
183         str     lr, [\rd, #\offset + 4]         @ save lr_usr
184 
185         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
186         msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
187         .endm
188 
189         .macro  load_user_sp_lr, rd, rtemp, offset = 0
190         mrs     \rtemp, cpsr
191         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
192         msr     cpsr_c, \rtemp                  @ switch to the SYS mode
193 
194         ldr     sp, [\rd, #\offset]             @ load sp_usr
195         ldr     lr, [\rd, #\offset + 4]         @ load lr_usr
196 
197         eor     \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
198         msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
199         .endm
200 
201 
202         .macro  svc_exit, rpsr, irq = 0
203         .if     \irq != 0
204         @ IRQs already off
205 #ifdef CONFIG_TRACE_IRQFLAGS
206         @ The parent context IRQs must have been enabled to get here in
207         @ the first place, so there's no point checking the PSR I bit.
208         bl      trace_hardirqs_on
209 #endif
210         .else
211         @ IRQs off again before pulling preserved data off the stack
212         disable_irq_notrace
213 #ifdef CONFIG_TRACE_IRQFLAGS
214         tst     \rpsr, #PSR_I_BIT
215         bleq    trace_hardirqs_on
216         tst     \rpsr, #PSR_I_BIT
217         blne    trace_hardirqs_off
218 #endif
219         .endif
220         uaccess_exit tsk, r0, r1
221 
222 #ifndef CONFIG_THUMB2_KERNEL
223         @ ARM mode SVC restore
224         msr     spsr_cxsf, \rpsr
225 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
226         @ We must avoid clrex due to Cortex-A15 erratum #830321
227         sub     r0, sp, #4                      @ uninhabited address
228         strex   r1, r2, [r0]                    @ clear the exclusive monitor
229 #endif
230         ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
231 #else
232         @ Thumb mode SVC restore
233         ldr     lr, [sp, #S_SP]                 @ top of the stack
234         ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
235 
236         @ We must avoid clrex due to Cortex-A15 erratum #830321
237         strex   r2, r1, [sp, #S_LR]             @ clear the exclusive monitor
238 
239         stmdb   lr!, {r0, r1, \rpsr}            @ calling lr and rfe context
240         ldmia   sp, {r0 - r12}
241         mov     sp, lr
242         ldr     lr, [sp], #4
243         rfeia   sp!
244 #endif
245         .endm
246 
247         @
248         @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
249         @
250         @ This macro acts in a similar manner to svc_exit but switches to FIQ
251         @ mode to restore the final part of the register state.
252         @
253         @ We cannot use the normal svc_exit procedure because that would
254         @ clobber spsr_svc (FIQ could be delivered during the first few
255         @ instructions of vector_swi meaning its contents have not been
256         @ saved anywhere).
257         @
258         @ Note that, unlike svc_exit, this macro also does not allow a caller
259         @ supplied rpsr. This is because the FIQ exceptions are not re-entrant
260         @ and the handlers cannot call into the scheduler (meaning the value
261         @ on the stack remains correct).
262         @
263         .macro  svc_exit_via_fiq
264         uaccess_exit tsk, r0, r1
265 #ifndef CONFIG_THUMB2_KERNEL
266         @ ARM mode restore
267         mov     r0, sp
268         ldmib   r0, {r1 - r14}  @ abort is deadly from here onward (it will
269                                 @ clobber state restored below)
270         msr     cpsr_c, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
271         add     r8, r0, #S_PC
272         ldr     r9, [r0, #S_PSR]
273         msr     spsr_cxsf, r9
274         ldr     r0, [r0, #S_R0]
275         ldmia   r8, {pc}^
276 #else
277         @ Thumb mode restore
278         add     r0, sp, #S_R2
279         ldr     lr, [sp, #S_LR]
280         ldr     sp, [sp, #S_SP] @ abort is deadly from here onward (it will
281                                 @ clobber state restored below)
282         ldmia   r0, {r2 - r12}
283         mov     r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
284         msr     cpsr_c, r1
285         sub     r0, #S_R2
286         add     r8, r0, #S_PC
287         ldmia   r0, {r0 - r1}
288         rfeia   r8
289 #endif
290         .endm
291 
292 
293         .macro  restore_user_regs, fast = 0, offset = 0
294 #if defined(CONFIG_CPU_32v6K) && \
295     (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
296 #ifdef CONFIG_CPU_V6
297 ALT_SMP(nop)
298 ALT_UP_B(.L1_\@)
299 #endif
300         @ The TLS register update is deferred until return to user space so we
301         @ can use it for other things while running in the kernel
302         mrc     p15, 0, r1, c13, c0, 3          @ get current_thread_info pointer
303         ldr     r1, [r1, #TI_TP_VALUE]
304         mcr     p15, 0, r1, c13, c0, 3          @ set TLS register
305 .L1_\@:
306 #endif
307 
308         uaccess_enable r1, isb=0
309 #ifndef CONFIG_THUMB2_KERNEL
310         @ ARM mode restore
311         mov     r2, sp
312         ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
313         ldr     lr, [r2, #\offset + S_PC]!      @ get pc
314         tst     r1, #PSR_I_BIT | 0x0f
315         bne     1f
316         msr     spsr_cxsf, r1                   @ save in spsr_svc
317 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
318         @ We must avoid clrex due to Cortex-A15 erratum #830321
319         strex   r1, r2, [r2]                    @ clear the exclusive monitor
320 #endif
321         .if     \fast
322         ldmdb   r2, {r1 - lr}^                  @ get calling r1 - lr
323         .else
324         ldmdb   r2, {r0 - lr}^                  @ get calling r0 - lr
325         .endif
326         mov     r0, r0                          @ ARMv5T and earlier require a nop
327                                                 @ after ldm {}^
328         add     sp, sp, #\offset + PT_REGS_SIZE
329         movs    pc, lr                          @ return & move spsr_svc into cpsr
330 1:      bug     "Returning to usermode but unexpected PSR bits set?", \@
331 #elif defined(CONFIG_CPU_V7M)
332         @ V7M restore.
333         @ Note that we don't need to do clrex here as clearing the local
334         @ monitor is part of the exception entry and exit sequence.
335         .if     \offset
336         add     sp, #\offset
337         .endif
338         v7m_exception_slow_exit ret_r0 = \fast
339 #else
340         @ Thumb mode restore
341         mov     r2, sp
342         load_user_sp_lr r2, r3, \offset + S_SP  @ calling sp, lr
343         ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
344         ldr     lr, [sp, #\offset + S_PC]       @ get pc
345         add     sp, sp, #\offset + S_SP
346         tst     r1, #PSR_I_BIT | 0x0f
347         bne     1f
348         msr     spsr_cxsf, r1                   @ save in spsr_svc
349 
350         @ We must avoid clrex due to Cortex-A15 erratum #830321
351         strex   r1, r2, [sp]                    @ clear the exclusive monitor
352 
353         .if     \fast
354         ldmdb   sp, {r1 - r12}                  @ get calling r1 - r12
355         .else
356         ldmdb   sp, {r0 - r12}                  @ get calling r0 - r12
357         .endif
358         add     sp, sp, #PT_REGS_SIZE - S_SP
359         movs    pc, lr                          @ return & move spsr_svc into cpsr
360 1:      bug     "Returning to usermode but unexpected PSR bits set?", \@
361 #endif  /* !CONFIG_THUMB2_KERNEL */
362         .endm
363 
364 /*
365  * Context tracking subsystem.  Used to instrument transitions
366  * between user and kernel mode.
367  */
368         .macro ct_user_exit, save = 1
369 #ifdef CONFIG_CONTEXT_TRACKING_USER
370         .if     \save
371         stmdb   sp!, {r0-r3, ip, lr}
372         bl      user_exit_callable
373         ldmia   sp!, {r0-r3, ip, lr}
374         .else
375         bl      user_exit_callable
376         .endif
377 #endif
378         .endm
379 
380         .macro ct_user_enter, save = 1
381 #ifdef CONFIG_CONTEXT_TRACKING_USER
382         .if     \save
383         stmdb   sp!, {r0-r3, ip, lr}
384         bl      user_enter_callable
385         ldmia   sp!, {r0-r3, ip, lr}
386         .else
387         bl      user_enter_callable
388         .endif
389 #endif
390         .endm
391 
392         .macro  invoke_syscall, table, nr, tmp, ret, reload=0
393 #ifdef CONFIG_CPU_SPECTRE
394         mov     \tmp, \nr
395         cmp     \tmp, #NR_syscalls              @ check upper syscall limit
396         movcs   \tmp, #0
397         csdb
398         badr    lr, \ret                        @ return address
399         .if     \reload
400         add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
401         ldmiacc r1, {r0 - r6}                   @ reload r0-r6
402         stmiacc sp, {r4, r5}                    @ update stack arguments
403         .endif
404         ldrcc   pc, [\table, \tmp, lsl #2]      @ call sys_* routine
405 #else
406         cmp     \nr, #NR_syscalls               @ check upper syscall limit
407         badr    lr, \ret                        @ return address
408         .if     \reload
409         add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
410         ldmiacc r1, {r0 - r6}                   @ reload r0-r6
411         stmiacc sp, {r4, r5}                    @ update stack arguments
412         .endif
413         ldrcc   pc, [\table, \nr, lsl #2]       @ call sys_* routine
414 #endif
415         .endm
416 
417 /*
418  * These are the registers used in the syscall handler, and allow us to
419  * have in theory up to 7 arguments to a function - r0 to r6.
420  *
421  * r7 is reserved for the system call number for thumb mode.
422  *
423  * Note that tbl == why is intentional.
424  *
425  * We must set at least "tsk" and "why" when calling ret_with_reschedule.
426  */
427 scno    .req    r7              @ syscall number
428 tbl     .req    r8              @ syscall table pointer
429 why     .req    r8              @ Linux syscall (!= 0)
430 tsk     .req    r9              @ current thread_info
431 
432         .macro  do_overflow_check, frame_size:req
433 #ifdef CONFIG_VMAP_STACK
434         @
435         @ Test whether the SP has overflowed. Task and IRQ stacks are aligned
436         @ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
437         @ zero.
438         @
439 ARM(    tst     sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT)      )
440 THUMB(  tst     r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT)      )
441 THUMB(  it      ne                                              )
442         bne     .Lstack_overflow_check\@
443 
444         .pushsection    .text
445 .Lstack_overflow_check\@:
446         @
447         @ The stack pointer is not pointing to a valid vmap'ed stack, but it
448         @ may be pointing into the linear map instead, which may happen if we
449         @ are already running from the overflow stack. We cannot detect overflow
450         @ in such cases so just carry on.
451         @
452         str     ip, [r0, #12]                   @ Stash IP on the mode stack
453         ldr_va  ip, high_memory                 @ Start of VMALLOC space
454 ARM(    cmp     sp, ip                  )       @ SP in vmalloc space?
455 THUMB(  cmp     r1, ip                  )
456 THUMB(  itt     lo                      )
457         ldrlo   ip, [r0, #12]                   @ Restore IP
458         blo     .Lout\@                         @ Carry on
459 
460 THUMB(  sub     r1, sp, r1              )       @ Restore original R1
461 THUMB(  sub     sp, r1                  )       @ Restore original SP
462         add     sp, sp, #\frame_size            @ Undo svc_entry's SP change
463         b       __bad_stack                     @ Handle VMAP stack overflow
464         .popsection
465 .Lout\@:
466 #endif
467         .endm

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php