~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/kernel/entry-armv.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  *  linux/arch/arm/kernel/entry-armv.S
  4  *
  5  *  Copyright (C) 1996,1997,1998 Russell King.
  6  *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  7  *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
  8  *
  9  *  Low-level vector interface routines
 10  *
 11  *  Note:  there is a StrongARM bug in the STMIA rn, {regs}^ instruction
 12  *  that causes it to save wrong values...  Be aware!
 13  */
 14 
 15 #include <linux/init.h>
 16 
 17 #include <asm/assembler.h>
 18 #include <asm/page.h>
 19 #include <asm/glue-df.h>
 20 #include <asm/glue-pf.h>
 21 #include <asm/vfpmacros.h>
 22 #include <asm/thread_notify.h>
 23 #include <asm/unwind.h>
 24 #include <asm/unistd.h>
 25 #include <asm/tls.h>
 26 #include <asm/system_info.h>
 27 #include <asm/uaccess-asm.h>
 28 
 29 #include "entry-header.S"
 30 #include <asm/probes.h>
 31 
 32 #ifdef CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATION
 33 #define RELOC_TEXT_NONE .reloc  .text, R_ARM_NONE, .
 34 #else
 35 #define RELOC_TEXT_NONE
 36 #endif
 37 
 38 /*
 39  * Interrupt handling.
 40  */
 41         .macro  irq_handler, from_user:req
 42         mov     r1, sp
 43         ldr_this_cpu r2, irq_stack_ptr, r2, r3
 44         .if     \from_user == 0
 45         @
 46         @ If we took the interrupt while running in the kernel, we may already
 47         @ be using the IRQ stack, so revert to the original value in that case.
 48         @
 49         subs    r3, r2, r1              @ SP above bottom of IRQ stack?
 50         rsbscs  r3, r3, #THREAD_SIZE    @ ... and below the top?
 51 #ifdef CONFIG_VMAP_STACK
 52         ldr_va  r3, high_memory, cc     @ End of the linear region
 53         cmpcc   r3, r1                  @ Stack pointer was below it?
 54 #endif
 55         bcc     0f                      @ If not, switch to the IRQ stack
 56         mov     r0, r1
 57         bl      generic_handle_arch_irq
 58         b       1f
 59 0:
 60         .endif
 61 
 62         mov_l   r0, generic_handle_arch_irq
 63         bl      call_with_stack
 64 1:
 65         .endm
 66 
 67         .macro  pabt_helper
 68         @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
 69 #ifdef MULTI_PABORT
 70         ldr_va  ip, processor, offset=PROCESSOR_PABT_FUNC
 71         bl_r    ip
 72 #else
 73         bl      CPU_PABORT_HANDLER
 74 #endif
 75         .endm
 76 
 77         .macro  dabt_helper
 78 
 79         @
 80         @ Call the processor-specific abort handler:
 81         @
 82         @  r2 - pt_regs
 83         @  r4 - aborted context pc
 84         @  r5 - aborted context psr
 85         @
 86         @ The abort handler must return the aborted address in r0, and
 87         @ the fault status register in r1.  r9 must be preserved.
 88         @
 89 #ifdef MULTI_DABORT
 90         ldr_va  ip, processor, offset=PROCESSOR_DABT_FUNC
 91         bl_r    ip
 92 #else
 93         bl      CPU_DABORT_HANDLER
 94 #endif
 95         .endm
 96 
 97         .section        .entry.text,"ax",%progbits
 98 
 99 /*
100  * Invalid mode handlers
101  */
102         .macro  inv_entry, reason
103         sub     sp, sp, #PT_REGS_SIZE
104  ARM(   stmib   sp, {r1 - lr}           )
105  THUMB( stmia   sp, {r0 - r12}          )
106  THUMB( str     sp, [sp, #S_SP]         )
107  THUMB( str     lr, [sp, #S_LR]         )
108         mov     r1, #\reason
109         .endm
110 
111 __pabt_invalid:
112         inv_entry BAD_PREFETCH
113         b       common_invalid
114 ENDPROC(__pabt_invalid)
115 
116 __dabt_invalid:
117         inv_entry BAD_DATA
118         b       common_invalid
119 ENDPROC(__dabt_invalid)
120 
121 __irq_invalid:
122         inv_entry BAD_IRQ
123         b       common_invalid
124 ENDPROC(__irq_invalid)
125 
126 __und_invalid:
127         inv_entry BAD_UNDEFINSTR
128 
129         @
130         @ XXX fall through to common_invalid
131         @
132 
133 @
134 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
135 @
136 common_invalid:
137         zero_fp
138 
139         ldmia   r0, {r4 - r6}
140         add     r0, sp, #S_PC           @ here for interlock avoidance
141         mov     r7, #-1                 @  ""   ""    ""        ""
142         str     r4, [sp]                @ save preserved r0
143         stmia   r0, {r5 - r7}           @ lr_<exception>,
144                                         @ cpsr_<exception>, "old_r0"
145 
146         mov     r0, sp
147         b       bad_mode
148 ENDPROC(__und_invalid)
149 
150 /*
151  * SVC mode handlers
152  */
153 
154 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
155 #define SPFIX(code...) code
156 #else
157 #define SPFIX(code...)
158 #endif
159 
160         .macro  svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
161  UNWIND(.fnstart                )
162         sub     sp, sp, #(SVC_REGS_SIZE + \stack_hole)
163  THUMB( add     sp, r1          )       @ get SP in a GPR without
164  THUMB( sub     r1, sp, r1      )       @ using a temp register
165 
166         .if     \overflow_check
167  UNWIND(.save   {r0 - pc}       )
168         do_overflow_check (SVC_REGS_SIZE + \stack_hole)
169         .endif
170 
171 #ifdef CONFIG_THUMB2_KERNEL
172         tst     r1, #4                  @ test stack pointer alignment
173         sub     r1, sp, r1              @ restore original R1
174         sub     sp, r1                  @ restore original SP
175 #else
176  SPFIX( tst     sp, #4          )
177 #endif
178  SPFIX( subne   sp, sp, #4      )
179 
180  ARM(   stmib   sp, {r1 - r12}  )
181  THUMB( stmia   sp, {r0 - r12}  )       @ No STMIB in Thumb-2
182 
183         ldmia   r0, {r3 - r5}
184         add     r7, sp, #S_SP           @ here for interlock avoidance
185         mov     r6, #-1                 @  ""  ""      ""       ""
186         add     r2, sp, #(SVC_REGS_SIZE + \stack_hole)
187  SPFIX( addne   r2, r2, #4      )
188         str     r3, [sp]                @ save the "real" r0 copied
189                                         @ from the exception stack
190 
191         mov     r3, lr
192 
193         @
194         @ We are now ready to fill in the remaining blanks on the stack:
195         @
196         @  r2 - sp_svc
197         @  r3 - lr_svc
198         @  r4 - lr_<exception>, already fixed up for correct return/restart
199         @  r5 - spsr_<exception>
200         @  r6 - orig_r0 (see pt_regs definition in ptrace.h)
201         @
202         stmia   r7, {r2 - r6}
203 
204         get_thread_info tsk
205         uaccess_entry tsk, r0, r1, r2, \uaccess
206 
207         .if \trace
208 #ifdef CONFIG_TRACE_IRQFLAGS
209         bl      trace_hardirqs_off
210 #endif
211         .endif
212         .endm
213 
214         .align  5
215 __dabt_svc:
216         svc_entry uaccess=0
217         mov     r2, sp
218         dabt_helper
219  THUMB( ldr     r5, [sp, #S_PSR]        )       @ potentially updated CPSR
220         svc_exit r5                             @ return from exception
221  UNWIND(.fnend          )
222 ENDPROC(__dabt_svc)
223 
224         .align  5
225 __irq_svc:
226         svc_entry
227         irq_handler from_user=0
228 
229 #ifdef CONFIG_PREEMPTION
230         ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
231         ldr     r0, [tsk, #TI_FLAGS]            @ get flags
232         teq     r8, #0                          @ if preempt count != 0
233         movne   r0, #0                          @ force flags to 0
234         tst     r0, #_TIF_NEED_RESCHED
235         blne    svc_preempt
236 #endif
237 
238         svc_exit r5, irq = 1                    @ return from exception
239  UNWIND(.fnend          )
240 ENDPROC(__irq_svc)
241 
242         .ltorg
243 
244 #ifdef CONFIG_PREEMPTION
245 svc_preempt:
246         mov     r8, lr
247 1:      bl      preempt_schedule_irq            @ irq en/disable is done inside
248         ldr     r0, [tsk, #TI_FLAGS]            @ get new tasks TI_FLAGS
249         tst     r0, #_TIF_NEED_RESCHED
250         reteq   r8                              @ go again
251         b       1b
252 #endif
253 
254 __und_fault:
255         @ Correct the PC such that it is pointing at the instruction
256         @ which caused the fault.  If the faulting instruction was ARM
257         @ the PC will be pointing at the next instruction, and have to
258         @ subtract 4.  Otherwise, it is Thumb, and the PC will be
259         @ pointing at the second half of the Thumb instruction.  We
260         @ have to subtract 2.
261         ldr     r2, [r0, #S_PC]
262         sub     r2, r2, r1
263         str     r2, [r0, #S_PC]
264         b       do_undefinstr
265 ENDPROC(__und_fault)
266 
267         .align  5
268 __und_svc:
269 #ifdef CONFIG_KPROBES
270         @ If a kprobe is about to simulate a "stmdb sp..." instruction,
271         @ it obviously needs free stack space which then will belong to
272         @ the saved context.
273         svc_entry MAX_STACK_SIZE
274 #else
275         svc_entry
276 #endif
277 
278         mov     r1, #4                          @ PC correction to apply
279  THUMB( tst     r5, #PSR_T_BIT          )       @ exception taken in Thumb mode?
280  THUMB( movne   r1, #2                  )       @ if so, fix up PC correction
281         mov     r0, sp                          @ struct pt_regs *regs
282         bl      __und_fault
283 
284 __und_svc_finish:
285         get_thread_info tsk
286         ldr     r5, [sp, #S_PSR]                @ Get SVC cpsr
287         svc_exit r5                             @ return from exception
288  UNWIND(.fnend          )
289 ENDPROC(__und_svc)
290 
291         .align  5
292 __pabt_svc:
293         svc_entry
294         mov     r2, sp                          @ regs
295         pabt_helper
296         svc_exit r5                             @ return from exception
297  UNWIND(.fnend          )
298 ENDPROC(__pabt_svc)
299 
300         .align  5
301 __fiq_svc:
302         svc_entry trace=0
303         mov     r0, sp                          @ struct pt_regs *regs
304         bl      handle_fiq_as_nmi
305         svc_exit_via_fiq
306  UNWIND(.fnend          )
307 ENDPROC(__fiq_svc)
308 
309 /*
310  * Abort mode handlers
311  */
312 
313 @
314 @ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
315 @ and reuses the same macros. However in abort mode we must also
316 @ save/restore lr_abt and spsr_abt to make nested aborts safe.
317 @
318         .align 5
319 __fiq_abt:
320         svc_entry trace=0
321 
322  ARM(   msr     cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
323  THUMB( mov     r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
324  THUMB( msr     cpsr_c, r0 )
325         mov     r1, lr          @ Save lr_abt
326         mrs     r2, spsr        @ Save spsr_abt, abort is now safe
327  ARM(   msr     cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
328  THUMB( mov     r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
329  THUMB( msr     cpsr_c, r0 )
330         stmfd   sp!, {r1 - r2}
331 
332         add     r0, sp, #8                      @ struct pt_regs *regs
333         bl      handle_fiq_as_nmi
334 
335         ldmfd   sp!, {r1 - r2}
336  ARM(   msr     cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
337  THUMB( mov     r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
338  THUMB( msr     cpsr_c, r0 )
339         mov     lr, r1          @ Restore lr_abt, abort is unsafe
340         msr     spsr_cxsf, r2   @ Restore spsr_abt
341  ARM(   msr     cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
342  THUMB( mov     r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
343  THUMB( msr     cpsr_c, r0 )
344 
345         svc_exit_via_fiq
346  UNWIND(.fnend          )
347 ENDPROC(__fiq_abt)
348 
349 /*
350  * User mode handlers
351  *
352  * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
353  */
354 
355 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
356 #error "sizeof(struct pt_regs) must be a multiple of 8"
357 #endif
358 
359         .macro  usr_entry, trace=1, uaccess=1
360  UNWIND(.fnstart        )
361  UNWIND(.cantunwind     )       @ don't unwind the user space
362         sub     sp, sp, #PT_REGS_SIZE
363  ARM(   stmib   sp, {r1 - r12}  )
364  THUMB( stmia   sp, {r0 - r12}  )
365 
366  ATRAP( mrc     p15, 0, r7, c1, c0, 0)
367  ATRAP( ldr_va  r8, cr_alignment)
368 
369         ldmia   r0, {r3 - r5}
370         add     r0, sp, #S_PC           @ here for interlock avoidance
371         mov     r6, #-1                 @  ""  ""     ""        ""
372 
373         str     r3, [sp]                @ save the "real" r0 copied
374                                         @ from the exception stack
375 
376         @
377         @ We are now ready to fill in the remaining blanks on the stack:
378         @
379         @  r4 - lr_<exception>, already fixed up for correct return/restart
380         @  r5 - spsr_<exception>
381         @  r6 - orig_r0 (see pt_regs definition in ptrace.h)
382         @
383         @ Also, separately save sp_usr and lr_usr
384         @
385         stmia   r0, {r4 - r6}
386  ARM(   stmdb   r0, {sp, lr}^                   )
387  THUMB( store_user_sp_lr r0, r1, S_SP - S_PC    )
388 
389         .if \uaccess
390         uaccess_disable ip
391         .endif
392 
393         @ Enable the alignment trap while in kernel mode
394  ATRAP( teq     r8, r7)
395  ATRAP( mcrne   p15, 0, r8, c1, c0, 0)
396 
397         reload_current r7, r8
398 
399         @
400         @ Clear FP to mark the first stack frame
401         @
402         zero_fp
403 
404         .if     \trace
405 #ifdef CONFIG_TRACE_IRQFLAGS
406         bl      trace_hardirqs_off
407 #endif
408         ct_user_exit save = 0
409         .endif
410         .endm
411 
412         .macro  kuser_cmpxchg_check
413 #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
414 #ifndef CONFIG_MMU
415 #warning "NPTL on non MMU needs fixing"
416 #else
417         @ Make sure our user space atomic helper is restarted
418         @ if it was interrupted in a critical region.  Here we
419         @ perform a quick test inline since it should be false
420         @ 99.9999% of the time.  The rest is done out of line.
421         ldr     r0, =TASK_SIZE
422         cmp     r4, r0
423         blhs    kuser_cmpxchg64_fixup
424 #endif
425 #endif
426         .endm
427 
428         .align  5
429 __dabt_usr:
430         usr_entry uaccess=0
431         kuser_cmpxchg_check
432         mov     r2, sp
433         dabt_helper
434         b       ret_from_exception
435  UNWIND(.fnend          )
436 ENDPROC(__dabt_usr)
437 
438         .align  5
439 __irq_usr:
440         usr_entry
441         kuser_cmpxchg_check
442         irq_handler from_user=1
443         get_thread_info tsk
444         mov     why, #0
445         b       ret_to_user_from_irq
446  UNWIND(.fnend          )
447 ENDPROC(__irq_usr)
448 
449         .ltorg
450 
451         .align  5
452 __und_usr:
453         usr_entry uaccess=0
454 
455         @ IRQs must be enabled before attempting to read the instruction from
456         @ user space since that could cause a page/translation fault if the
457         @ page table was modified by another CPU.
458         enable_irq
459 
460         tst     r5, #PSR_T_BIT                  @ Thumb mode?
461         mov     r1, #2                          @ set insn size to 2 for Thumb
462         bne     0f                              @ handle as Thumb undef exception
463 #ifdef CONFIG_FPE_NWFPE
464         adr     r9, ret_from_exception
465         bl      call_fpe                        @ returns via R9 on success
466 #endif
467         mov     r1, #4                          @ set insn size to 4 for ARM
468 0:      mov     r0, sp
469         uaccess_disable ip
470         bl      __und_fault
471         b       ret_from_exception
472  UNWIND(.fnend)
473 ENDPROC(__und_usr)
474 
475         .align  5
476 __pabt_usr:
477         usr_entry
478         mov     r2, sp                          @ regs
479         pabt_helper
480  UNWIND(.fnend          )
481         /* fall through */
482 /*
483  * This is the return code to user mode for abort handlers
484  */
485 ENTRY(ret_from_exception)
486  UNWIND(.fnstart        )
487  UNWIND(.cantunwind     )
488         get_thread_info tsk
489         mov     why, #0
490         b       ret_to_user
491  UNWIND(.fnend          )
492 ENDPROC(__pabt_usr)
493 ENDPROC(ret_from_exception)
494 
495         .align  5
496 __fiq_usr:
497         usr_entry trace=0
498         kuser_cmpxchg_check
499         mov     r0, sp                          @ struct pt_regs *regs
500         bl      handle_fiq_as_nmi
501         get_thread_info tsk
502         restore_user_regs fast = 0, offset = 0
503  UNWIND(.fnend          )
504 ENDPROC(__fiq_usr)
505 
506 /*
507  * Register switch for ARMv3 and ARMv4 processors
508  * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
509  * previous and next are guaranteed not to be the same.
510  */
511 ENTRY(__switch_to)
512  UNWIND(.fnstart        )
513  UNWIND(.cantunwind     )
514         add     ip, r1, #TI_CPU_SAVE
515  ARM(   stmia   ip!, {r4 - sl, fp, sp, lr} )    @ Store most regs on stack
516  THUMB( stmia   ip!, {r4 - sl, fp}         )    @ Store most regs on stack
517  THUMB( str     sp, [ip], #4               )
518  THUMB( str     lr, [ip], #4               )
519         ldr     r4, [r2, #TI_TP_VALUE]
520         ldr     r5, [r2, #TI_TP_VALUE + 4]
521 #ifdef CONFIG_CPU_USE_DOMAINS
522         mrc     p15, 0, r6, c3, c0, 0           @ Get domain register
523         str     r6, [r1, #TI_CPU_DOMAIN]        @ Save old domain register
524         ldr     r6, [r2, #TI_CPU_DOMAIN]
525 #endif
526         switch_tls r1, r4, r5, r3, r7
527 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
528     !defined(CONFIG_STACKPROTECTOR_PER_TASK)
529         ldr     r8, =__stack_chk_guard
530         .if (TSK_STACK_CANARY > IMM12_MASK)
531         add     r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
532         ldr     r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
533         .else
534         ldr     r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
535         .endif
536 #endif
537         mov     r7, r2                          @ Preserve 'next'
538 #ifdef CONFIG_CPU_USE_DOMAINS
539         mcr     p15, 0, r6, c3, c0, 0           @ Set domain register
540 #endif
541         mov     r5, r0
542         add     r4, r2, #TI_CPU_SAVE
543         ldr     r0, =thread_notify_head
544         mov     r1, #THREAD_NOTIFY_SWITCH
545         bl      atomic_notifier_call_chain
546 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
547     !defined(CONFIG_STACKPROTECTOR_PER_TASK)
548         str     r9, [r8]
549 #endif
550         mov     r0, r5
551 #if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
552         set_current r7, r8
553         ldmia   r4, {r4 - sl, fp, sp, pc}       @ Load all regs saved previously
554 #else
555         mov     r1, r7
556         ldmia   r4, {r4 - sl, fp, ip, lr}       @ Load all regs saved previously
557 #ifdef CONFIG_VMAP_STACK
558         @
559         @ Do a dummy read from the new stack while running from the old one so
560         @ that we can rely on do_translation_fault() to fix up any stale PMD
561         @ entries covering the vmalloc region.
562         @
563         ldr     r2, [ip]
564 #endif
565 
566         @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
567         @ effectuates the task switch, as that is what causes the observable
568         @ values of current and current_thread_info to change. When
569         @ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
570         @ current_thread_info) is done explicitly, and the update of SP just
571         @ switches us to another stack, with few other side effects. In order
572         @ to prevent this distinction from causing any inconsistencies, let's
573         @ keep the 'set_current' call as close as we can to the update of SP.
574         set_current r1, r2
575         mov     sp, ip
576         ret     lr
577 #endif
578  UNWIND(.fnend          )
579 ENDPROC(__switch_to)
580 
581 #ifdef CONFIG_VMAP_STACK
582         .text
583         .align  2
584 __bad_stack:
585         @
586         @ We've just detected an overflow. We need to load the address of this
587         @ CPU's overflow stack into the stack pointer register. We have only one
588         @ scratch register so let's use a sequence of ADDs including one
589         @ involving the PC, and decorate them with PC-relative group
590         @ relocations. As these are ARM only, switch to ARM mode first.
591         @
592         @ We enter here with IP clobbered and its value stashed on the mode
593         @ stack.
594         @
595 THUMB(  bx      pc              )
596 THUMB(  nop                     )
597 THUMB(  .arm                    )
598         ldr_this_cpu_armv6 ip, overflow_stack_ptr
599 
600         str     sp, [ip, #-4]!                  @ Preserve original SP value
601         mov     sp, ip                          @ Switch to overflow stack
602         pop     {ip}                            @ Original SP in IP
603 
604 #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
605         mov     ip, ip                          @ mov expected by unwinder
606         push    {fp, ip, lr, pc}                @ GCC flavor frame record
607 #else
608         str     ip, [sp, #-8]!                  @ store original SP
609         push    {fpreg, lr}                     @ Clang flavor frame record
610 #endif
611 UNWIND( ldr     ip, [r0, #4]    )               @ load exception LR
612 UNWIND( str     ip, [sp, #12]   )               @ store in the frame record
613         ldr     ip, [r0, #12]                   @ reload IP
614 
615         @ Store the original GPRs to the new stack.
616         svc_entry uaccess=0, overflow_check=0
617 
618 UNWIND( .save   {sp, pc}        )
619 UNWIND( .save   {fpreg, lr}     )
620 UNWIND( .setfp  fpreg, sp       )
621 
622         ldr     fpreg, [sp, #S_SP]              @ Add our frame record
623                                                 @ to the linked list
624 #if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
625         ldr     r1, [fp, #4]                    @ reload SP at entry
626         add     fp, fp, #12
627 #else
628         ldr     r1, [fpreg, #8]
629 #endif
630         str     r1, [sp, #S_SP]                 @ store in pt_regs
631 
632         @ Stash the regs for handle_bad_stack
633         mov     r0, sp
634 
635         @ Time to die
636         bl      handle_bad_stack
637         nop
638 UNWIND( .fnend                  )
639 ENDPROC(__bad_stack)
640 #endif
641 
642         __INIT
643 
644 /*
645  * User helpers.
646  *
647  * Each segment is 32-byte aligned and will be moved to the top of the high
648  * vector page.  New segments (if ever needed) must be added in front of
649  * existing ones.  This mechanism should be used only for things that are
650  * really small and justified, and not be abused freely.
651  *
652  * See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions.
653  */
654  THUMB( .arm    )
655 
656         .macro  usr_ret, reg
657 #ifdef CONFIG_ARM_THUMB
658         bx      \reg
659 #else
660         ret     \reg
661 #endif
662         .endm
663 
664         .macro  kuser_pad, sym, size
665         .if     (. - \sym) & 3
666         .rept   4 - (. - \sym) & 3
667         .byte   0
668         .endr
669         .endif
670         .rept   (\size - (. - \sym)) / 4
671         .word   0xe7fddef1
672         .endr
673         .endm
674 
675 #ifdef CONFIG_KUSER_HELPERS
676         .align  5
677         .globl  __kuser_helper_start
678 __kuser_helper_start:
679 
680 /*
681  * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
682  * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
683  */
684 
685 __kuser_cmpxchg64:                              @ 0xffff0f60
686 
687 #if defined(CONFIG_CPU_32v6K)
688 
689         stmfd   sp!, {r4, r5, r6, r7}
690         ldrd    r4, r5, [r0]                    @ load old val
691         ldrd    r6, r7, [r1]                    @ load new val
692         smp_dmb arm
693 1:      ldrexd  r0, r1, [r2]                    @ load current val
694         eors    r3, r0, r4                      @ compare with oldval (1)
695         eorseq  r3, r1, r5                      @ compare with oldval (2)
696         strexdeq r3, r6, r7, [r2]               @ store newval if eq
697         teqeq   r3, #1                          @ success?
698         beq     1b                              @ if no then retry
699         smp_dmb arm
700         rsbs    r0, r3, #0                      @ set returned val and C flag
701         ldmfd   sp!, {r4, r5, r6, r7}
702         usr_ret lr
703 
704 #elif !defined(CONFIG_SMP)
705 
706 #ifdef CONFIG_MMU
707 
708         /*
709          * The only thing that can break atomicity in this cmpxchg64
710          * implementation is either an IRQ or a data abort exception
711          * causing another process/thread to be scheduled in the middle of
712          * the critical sequence.  The same strategy as for cmpxchg is used.
713          */
714         stmfd   sp!, {r4, r5, r6, lr}
715         ldmia   r0, {r4, r5}                    @ load old val
716         ldmia   r1, {r6, lr}                    @ load new val
717 1:      ldmia   r2, {r0, r1}                    @ load current val
718         eors    r3, r0, r4                      @ compare with oldval (1)
719         eorseq  r3, r1, r5                      @ compare with oldval (2)
720 2:      stmiaeq r2, {r6, lr}                    @ store newval if eq
721         rsbs    r0, r3, #0                      @ set return val and C flag
722         ldmfd   sp!, {r4, r5, r6, pc}
723 
724         .text
725 kuser_cmpxchg64_fixup:
726         @ Called from kuser_cmpxchg_fixup.
727         @ r4 = address of interrupted insn (must be preserved).
728         @ sp = saved regs. r7 and r8 are clobbered.
729         @ 1b = first critical insn, 2b = last critical insn.
730         @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
731         mov     r7, #0xffff0fff
732         sub     r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
733         subs    r8, r4, r7
734         rsbscs  r8, r8, #(2b - 1b)
735         strcs   r7, [sp, #S_PC]
736 #if __LINUX_ARM_ARCH__ < 6
737         bcc     kuser_cmpxchg32_fixup
738 #endif
739         ret     lr
740         .previous
741 
742 #else
743 #warning "NPTL on non MMU needs fixing"
744         mov     r0, #-1
745         adds    r0, r0, #0
746         usr_ret lr
747 #endif
748 
749 #else
750 #error "incoherent kernel configuration"
751 #endif
752 
753         kuser_pad __kuser_cmpxchg64, 64
754 
755 __kuser_memory_barrier:                         @ 0xffff0fa0
756         smp_dmb arm
757         usr_ret lr
758 
759         kuser_pad __kuser_memory_barrier, 32
760 
761 __kuser_cmpxchg:                                @ 0xffff0fc0
762 
763 #if __LINUX_ARM_ARCH__ < 6
764 
765 #ifdef CONFIG_MMU
766 
767         /*
768          * The only thing that can break atomicity in this cmpxchg
769          * implementation is either an IRQ or a data abort exception
770          * causing another process/thread to be scheduled in the middle
771          * of the critical sequence.  To prevent this, code is added to
772          * the IRQ and data abort exception handlers to set the pc back
773          * to the beginning of the critical section if it is found to be
774          * within that critical section (see kuser_cmpxchg_fixup).
775          */
776 1:      ldr     r3, [r2]                        @ load current val
777         subs    r3, r3, r0                      @ compare with oldval
778 2:      streq   r1, [r2]                        @ store newval if eq
779         rsbs    r0, r3, #0                      @ set return val and C flag
780         usr_ret lr
781 
782         .text
783 kuser_cmpxchg32_fixup:
784         @ Called from kuser_cmpxchg_check macro.
785         @ r4 = address of interrupted insn (must be preserved).
786         @ sp = saved regs. r7 and r8 are clobbered.
787         @ 1b = first critical insn, 2b = last critical insn.
788         @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
789         mov     r7, #0xffff0fff
790         sub     r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
791         subs    r8, r4, r7
792         rsbscs  r8, r8, #(2b - 1b)
793         strcs   r7, [sp, #S_PC]
794         ret     lr
795         .previous
796 
797 #else
798 #warning "NPTL on non MMU needs fixing"
799         mov     r0, #-1
800         adds    r0, r0, #0
801         usr_ret lr
802 #endif
803 
804 #else
805 
806         smp_dmb arm
807 1:      ldrex   r3, [r2]
808         subs    r3, r3, r0
809         strexeq r3, r1, [r2]
810         teqeq   r3, #1
811         beq     1b
812         rsbs    r0, r3, #0
813         /* beware -- each __kuser slot must be 8 instructions max */
814         ALT_SMP(b       __kuser_memory_barrier)
815         ALT_UP(usr_ret  lr)
816 
817 #endif
818 
819         kuser_pad __kuser_cmpxchg, 32
820 
821 __kuser_get_tls:                                @ 0xffff0fe0
822         ldr     r0, [pc, #(16 - 8)]     @ read TLS, set in kuser_get_tls_init
823         usr_ret lr
824         mrc     p15, 0, r0, c13, c0, 3  @ 0xffff0fe8 hardware TLS code
825         kuser_pad __kuser_get_tls, 16
826         .rep    3
827         .word   0                       @ 0xffff0ff0 software TLS value, then
828         .endr                           @ pad up to __kuser_helper_version
829 
830 __kuser_helper_version:                         @ 0xffff0ffc
831         .word   ((__kuser_helper_end - __kuser_helper_start) >> 5)
832 
833         .globl  __kuser_helper_end
834 __kuser_helper_end:
835 
836 #endif
837 
838  THUMB( .thumb  )
839 
840 /*
841  * Vector stubs.
842  *
843  * This code is copied to 0xffff1000 so we can use branches in the
844  * vectors, rather than ldr's.  Note that this code must not exceed
845  * a page size.
846  *
847  * Common stub entry macro:
848  *   Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
849  *
850  * SP points to a minimal amount of processor-private memory, the address
851  * of which is copied into r0 for the mode specific abort handler.
852  */
853         .macro  vector_stub, name, mode, correction=0
854         .align  5
855 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
856 vector_bhb_bpiall_\name:
857         mcr     p15, 0, r0, c7, c5, 6   @ BPIALL
858         @ isb not needed due to "movs pc, lr" in the vector stub
859         @ which gives a "context synchronisation".
860 #endif
861 
862 vector_\name:
863         .if \correction
864         sub     lr, lr, #\correction
865         .endif
866 
867         @ Save r0, lr_<exception> (parent PC)
868         stmia   sp, {r0, lr}            @ save r0, lr
869 
870         @ Save spsr_<exception> (parent CPSR)
871 .Lvec_\name:
872         mrs     lr, spsr
873         str     lr, [sp, #8]            @ save spsr
874 
875         @
876         @ Prepare for SVC32 mode.  IRQs remain disabled.
877         @
878         mrs     r0, cpsr
879         eor     r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
880         msr     spsr_cxsf, r0
881 
882         @
883         @ the branch table must immediately follow this code
884         @
885         and     lr, lr, #0x0f
886  THUMB( adr     r0, 1f                  )
887  THUMB( ldr     lr, [r0, lr, lsl #2]    )
888         mov     r0, sp
889  ARM(   ldr     lr, [pc, lr, lsl #2]    )
890         movs    pc, lr                  @ branch to handler in SVC mode
891 ENDPROC(vector_\name)
892 
893 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
894         .subsection 1
895         .align 5
896 vector_bhb_loop8_\name:
897         .if \correction
898         sub     lr, lr, #\correction
899         .endif
900 
901         @ Save r0, lr_<exception> (parent PC)
902         stmia   sp, {r0, lr}
903 
904         @ bhb workaround
905         mov     r0, #8
906 3:      W(b)    . + 4
907         subs    r0, r0, #1
908         bne     3b
909         dsb     nsh
910         @ isb not needed due to "movs pc, lr" in the vector stub
911         @ which gives a "context synchronisation".
912         b       .Lvec_\name
913 ENDPROC(vector_bhb_loop8_\name)
914         .previous
915 #endif
916 
917         .align  2
918         @ handler addresses follow this label
919 1:
920         .endm
921 
922         .section .stubs, "ax", %progbits
923         @ These need to remain at the start of the section so that
924         @ they are in range of the 'SWI' entries in the vector tables
925         @ located 4k down.
926 .L__vector_swi:
927         .word   vector_swi
928 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
929 .L__vector_bhb_loop8_swi:
930         .word   vector_bhb_loop8_swi
931 .L__vector_bhb_bpiall_swi:
932         .word   vector_bhb_bpiall_swi
933 #endif
934 
935 vector_rst:
936  ARM(   swi     SYS_ERROR0      )
937  THUMB( svc     #0              )
938  THUMB( nop                     )
939         b       vector_und
940 
941 /*
942  * Interrupt dispatcher
943  */
944         vector_stub     irq, IRQ_MODE, 4
945 
946         .long   __irq_usr                       @  0  (USR_26 / USR_32)
947         .long   __irq_invalid                   @  1  (FIQ_26 / FIQ_32)
948         .long   __irq_invalid                   @  2  (IRQ_26 / IRQ_32)
949         .long   __irq_svc                       @  3  (SVC_26 / SVC_32)
950         .long   __irq_invalid                   @  4
951         .long   __irq_invalid                   @  5
952         .long   __irq_invalid                   @  6
953         .long   __irq_invalid                   @  7
954         .long   __irq_invalid                   @  8
955         .long   __irq_invalid                   @  9
956         .long   __irq_invalid                   @  a
957         .long   __irq_invalid                   @  b
958         .long   __irq_invalid                   @  c
959         .long   __irq_invalid                   @  d
960         .long   __irq_invalid                   @  e
961         .long   __irq_invalid                   @  f
962 
963 /*
964  * Data abort dispatcher
965  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
966  */
967         vector_stub     dabt, ABT_MODE, 8
968 
969         .long   __dabt_usr                      @  0  (USR_26 / USR_32)
970         .long   __dabt_invalid                  @  1  (FIQ_26 / FIQ_32)
971         .long   __dabt_invalid                  @  2  (IRQ_26 / IRQ_32)
972         .long   __dabt_svc                      @  3  (SVC_26 / SVC_32)
973         .long   __dabt_invalid                  @  4
974         .long   __dabt_invalid                  @  5
975         .long   __dabt_invalid                  @  6
976         .long   __dabt_invalid                  @  7
977         .long   __dabt_invalid                  @  8
978         .long   __dabt_invalid                  @  9
979         .long   __dabt_invalid                  @  a
980         .long   __dabt_invalid                  @  b
981         .long   __dabt_invalid                  @  c
982         .long   __dabt_invalid                  @  d
983         .long   __dabt_invalid                  @  e
984         .long   __dabt_invalid                  @  f
985 
986 /*
987  * Prefetch abort dispatcher
988  * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
989  */
990         vector_stub     pabt, ABT_MODE, 4
991 
992         .long   __pabt_usr                      @  0 (USR_26 / USR_32)
993         .long   __pabt_invalid                  @  1 (FIQ_26 / FIQ_32)
994         .long   __pabt_invalid                  @  2 (IRQ_26 / IRQ_32)
995         .long   __pabt_svc                      @  3 (SVC_26 / SVC_32)
996         .long   __pabt_invalid                  @  4
997         .long   __pabt_invalid                  @  5
998         .long   __pabt_invalid                  @  6
999         .long   __pabt_invalid                  @  7
1000         .long   __pabt_invalid                  @  8
1001         .long   __pabt_invalid                  @  9
1002         .long   __pabt_invalid                  @  a
1003         .long   __pabt_invalid                  @  b
1004         .long   __pabt_invalid                  @  c
1005         .long   __pabt_invalid                  @  d
1006         .long   __pabt_invalid                  @  e
1007         .long   __pabt_invalid                  @  f
1008 
1009 /*
1010  * Undef instr entry dispatcher
1011  * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1012  */
1013         vector_stub     und, UND_MODE
1014 
1015         .long   __und_usr                       @  0 (USR_26 / USR_32)
1016         .long   __und_invalid                   @  1 (FIQ_26 / FIQ_32)
1017         .long   __und_invalid                   @  2 (IRQ_26 / IRQ_32)
1018         .long   __und_svc                       @  3 (SVC_26 / SVC_32)
1019         .long   __und_invalid                   @  4
1020         .long   __und_invalid                   @  5
1021         .long   __und_invalid                   @  6
1022         .long   __und_invalid                   @  7
1023         .long   __und_invalid                   @  8
1024         .long   __und_invalid                   @  9
1025         .long   __und_invalid                   @  a
1026         .long   __und_invalid                   @  b
1027         .long   __und_invalid                   @  c
1028         .long   __und_invalid                   @  d
1029         .long   __und_invalid                   @  e
1030         .long   __und_invalid                   @  f
1031 
1032         .align  5
1033 
1034 /*=============================================================================
1035  * Address exception handler
1036  *-----------------------------------------------------------------------------
1037  * These aren't too critical.
1038  * (they're not supposed to happen, and won't happen in 32-bit data mode).
1039  */
1040 
1041 vector_addrexcptn:
1042         b       vector_addrexcptn
1043 
1044 /*=============================================================================
1045  * FIQ "NMI" handler
1046  *-----------------------------------------------------------------------------
1047  * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1048  * systems. This must be the last vector stub, so lets place it in its own
1049  * subsection.
1050  */
1051         .subsection 2
1052         vector_stub     fiq, FIQ_MODE, 4
1053 
1054         .long   __fiq_usr                       @  0  (USR_26 / USR_32)
1055         .long   __fiq_svc                       @  1  (FIQ_26 / FIQ_32)
1056         .long   __fiq_svc                       @  2  (IRQ_26 / IRQ_32)
1057         .long   __fiq_svc                       @  3  (SVC_26 / SVC_32)
1058         .long   __fiq_svc                       @  4
1059         .long   __fiq_svc                       @  5
1060         .long   __fiq_svc                       @  6
1061         .long   __fiq_abt                       @  7
1062         .long   __fiq_svc                       @  8
1063         .long   __fiq_svc                       @  9
1064         .long   __fiq_svc                       @  a
1065         .long   __fiq_svc                       @  b
1066         .long   __fiq_svc                       @  c
1067         .long   __fiq_svc                       @  d
1068         .long   __fiq_svc                       @  e
1069         .long   __fiq_svc                       @  f
1070 
1071         .globl  vector_fiq
1072 
1073         .section .vectors, "ax", %progbits
1074         RELOC_TEXT_NONE
1075         W(b)    vector_rst
1076         W(b)    vector_und
1077 ARM(    .reloc  ., R_ARM_LDR_PC_G0, .L__vector_swi              )
1078 THUMB(  .reloc  ., R_ARM_THM_PC12, .L__vector_swi               )
1079         W(ldr)  pc, .
1080         W(b)    vector_pabt
1081         W(b)    vector_dabt
1082         W(b)    vector_addrexcptn
1083         W(b)    vector_irq
1084         W(b)    vector_fiq
1085 
1086 #ifdef CONFIG_HARDEN_BRANCH_HISTORY
1087         .section .vectors.bhb.loop8, "ax", %progbits
1088         RELOC_TEXT_NONE
1089         W(b)    vector_rst
1090         W(b)    vector_bhb_loop8_und
1091 ARM(    .reloc  ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi    )
1092 THUMB(  .reloc  ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi     )
1093         W(ldr)  pc, .
1094         W(b)    vector_bhb_loop8_pabt
1095         W(b)    vector_bhb_loop8_dabt
1096         W(b)    vector_addrexcptn
1097         W(b)    vector_bhb_loop8_irq
1098         W(b)    vector_bhb_loop8_fiq
1099 
1100         .section .vectors.bhb.bpiall, "ax", %progbits
1101         RELOC_TEXT_NONE
1102         W(b)    vector_rst
1103         W(b)    vector_bhb_bpiall_und
1104 ARM(    .reloc  ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi   )
1105 THUMB(  .reloc  ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi    )
1106         W(ldr)  pc, .
1107         W(b)    vector_bhb_bpiall_pabt
1108         W(b)    vector_bhb_bpiall_dabt
1109         W(b)    vector_addrexcptn
1110         W(b)    vector_bhb_bpiall_irq
1111         W(b)    vector_bhb_bpiall_fiq
1112 #endif
1113 
1114         .data
1115         .align  2
1116 
1117         .globl  cr_alignment
1118 cr_alignment:
1119         .space  4

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php