~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/kernel/entry-armv.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm/kernel/entry-armv.S (Architecture ppc) and /arch/sparc64/kernel/entry-armv.S (Architecture sparc64)


  1 /* SPDX-License-Identifier: GPL-2.0-only */       
  2 /*                                                
  3  *  linux/arch/arm/kernel/entry-armv.S            
  4  *                                                
  5  *  Copyright (C) 1996,1997,1998 Russell King.    
  6  *  ARM700 fix by Matthew Godbolt (linux-user@    
  7  *  nommu support by Hyok S. Choi (hyok.choi@s    
  8  *                                                
  9  *  Low-level vector interface routines           
 10  *                                                
 11  *  Note:  there is a StrongARM bug in the STM    
 12  *  that causes it to save wrong values...  Be    
 13  */                                               
 14                                                   
 15 #include <linux/init.h>                           
 16                                                   
 17 #include <asm/assembler.h>                        
 18 #include <asm/page.h>                             
 19 #include <asm/glue-df.h>                          
 20 #include <asm/glue-pf.h>                          
 21 #include <asm/vfpmacros.h>                        
 22 #include <asm/thread_notify.h>                    
 23 #include <asm/unwind.h>                           
 24 #include <asm/unistd.h>                           
 25 #include <asm/tls.h>                              
 26 #include <asm/system_info.h>                      
 27 #include <asm/uaccess-asm.h>                      
 28                                                   
 29 #include "entry-header.S"                         
 30 #include <asm/probes.h>                           
 31                                                   
 32 #ifdef CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATI    
 33 #define RELOC_TEXT_NONE .reloc  .text, R_ARM_N    
 34 #else                                             
 35 #define RELOC_TEXT_NONE                           
 36 #endif                                            
 37                                                   
 38 /*                                                
 39  * Interrupt handling.                            
 40  */                                               
 41         .macro  irq_handler, from_user:req        
 42         mov     r1, sp                            
 43         ldr_this_cpu r2, irq_stack_ptr, r2, r3    
 44         .if     \from_user == 0                   
 45         @                                         
 46         @ If we took the interrupt while runni    
 47         @ be using the IRQ stack, so revert to    
 48         @                                         
 49         subs    r3, r2, r1              @ SP a    
 50         rsbscs  r3, r3, #THREAD_SIZE    @ ...     
 51 #ifdef CONFIG_VMAP_STACK                          
 52         ldr_va  r3, high_memory, cc     @ End     
 53         cmpcc   r3, r1                  @ Stac    
 54 #endif                                            
 55         bcc     0f                      @ If n    
 56         mov     r0, r1                            
 57         bl      generic_handle_arch_irq           
 58         b       1f                                
 59 0:                                                
 60         .endif                                    
 61                                                   
 62         mov_l   r0, generic_handle_arch_irq       
 63         bl      call_with_stack                   
 64 1:                                                
 65         .endm                                     
 66                                                   
 67         .macro  pabt_helper                       
 68         @ PABORT handler takes pt_regs in r2,     
 69 #ifdef MULTI_PABORT                               
 70         ldr_va  ip, processor, offset=PROCESSO    
 71         bl_r    ip                                
 72 #else                                             
 73         bl      CPU_PABORT_HANDLER                
 74 #endif                                            
 75         .endm                                     
 76                                                   
 77         .macro  dabt_helper                       
 78                                                   
 79         @                                         
 80         @ Call the processor-specific abort ha    
 81         @                                         
 82         @  r2 - pt_regs                           
 83         @  r4 - aborted context pc                
 84         @  r5 - aborted context psr               
 85         @                                         
 86         @ The abort handler must return the ab    
 87         @ the fault status register in r1.  r9    
 88         @                                         
 89 #ifdef MULTI_DABORT                               
 90         ldr_va  ip, processor, offset=PROCESSO    
 91         bl_r    ip                                
 92 #else                                             
 93         bl      CPU_DABORT_HANDLER                
 94 #endif                                            
 95         .endm                                     
 96                                                   
 97         .section        .entry.text,"ax",%prog    
 98                                                   
 99 /*                                                
100  * Invalid mode handlers                          
101  */                                               
102         .macro  inv_entry, reason                 
103         sub     sp, sp, #PT_REGS_SIZE             
104  ARM(   stmib   sp, {r1 - lr}           )         
105  THUMB( stmia   sp, {r0 - r12}          )         
106  THUMB( str     sp, [sp, #S_SP]         )         
107  THUMB( str     lr, [sp, #S_LR]         )         
108         mov     r1, #\reason                      
109         .endm                                     
110                                                   
111 __pabt_invalid:                                   
112         inv_entry BAD_PREFETCH                    
113         b       common_invalid                    
114 ENDPROC(__pabt_invalid)                           
115                                                   
116 __dabt_invalid:                                   
117         inv_entry BAD_DATA                        
118         b       common_invalid                    
119 ENDPROC(__dabt_invalid)                           
120                                                   
121 __irq_invalid:                                    
122         inv_entry BAD_IRQ                         
123         b       common_invalid                    
124 ENDPROC(__irq_invalid)                            
125                                                   
126 __und_invalid:                                    
127         inv_entry BAD_UNDEFINSTR                  
128                                                   
129         @                                         
130         @ XXX fall through to common_invalid      
131         @                                         
132                                                   
133 @                                                 
134 @ common_invalid - generic code for failed exc    
135 @                                                 
136 common_invalid:                                   
137         zero_fp                                   
138                                                   
139         ldmia   r0, {r4 - r6}                     
140         add     r0, sp, #S_PC           @ here    
141         mov     r7, #-1                 @  ""     
142         str     r4, [sp]                @ save    
143         stmia   r0, {r5 - r7}           @ lr_<    
144                                         @ cpsr    
145                                                   
146         mov     r0, sp                            
147         b       bad_mode                          
148 ENDPROC(__und_invalid)                            
149                                                   
150 /*                                                
151  * SVC mode handlers                              
152  */                                               
153                                                   
154 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH    
155 #define SPFIX(code...) code                       
156 #else                                             
157 #define SPFIX(code...)                            
158 #endif                                            
159                                                   
160         .macro  svc_entry, stack_hole=0, trace    
161  UNWIND(.fnstart                )                 
162         sub     sp, sp, #(SVC_REGS_SIZE + \sta    
163  THUMB( add     sp, r1          )       @ get     
164  THUMB( sub     r1, sp, r1      )       @ usin    
165                                                   
166         .if     \overflow_check                   
167  UNWIND(.save   {r0 - pc}       )                 
168         do_overflow_check (SVC_REGS_SIZE + \st    
169         .endif                                    
170                                                   
171 #ifdef CONFIG_THUMB2_KERNEL                       
172         tst     r1, #4                  @ test    
173         sub     r1, sp, r1              @ rest    
174         sub     sp, r1                  @ rest    
175 #else                                             
176  SPFIX( tst     sp, #4          )                 
177 #endif                                            
178  SPFIX( subne   sp, sp, #4      )                 
179                                                   
180  ARM(   stmib   sp, {r1 - r12}  )                 
181  THUMB( stmia   sp, {r0 - r12}  )       @ No S    
182                                                   
183         ldmia   r0, {r3 - r5}                     
184         add     r7, sp, #S_SP           @ here    
185         mov     r6, #-1                 @  ""     
186         add     r2, sp, #(SVC_REGS_SIZE + \sta    
187  SPFIX( addne   r2, r2, #4      )                 
188         str     r3, [sp]                @ save    
189                                         @ from    
190                                                   
191         mov     r3, lr                            
192                                                   
193         @                                         
194         @ We are now ready to fill in the rema    
195         @                                         
196         @  r2 - sp_svc                            
197         @  r3 - lr_svc                            
198         @  r4 - lr_<exception>, already fixed     
199         @  r5 - spsr_<exception>                  
200         @  r6 - orig_r0 (see pt_regs definitio    
201         @                                         
202         stmia   r7, {r2 - r6}                     
203                                                   
204         get_thread_info tsk                       
205         uaccess_entry tsk, r0, r1, r2, \uacces    
206                                                   
207         .if \trace                                
208 #ifdef CONFIG_TRACE_IRQFLAGS                      
209         bl      trace_hardirqs_off                
210 #endif                                            
211         .endif                                    
212         .endm                                     
213                                                   
214         .align  5                                 
215 __dabt_svc:                                       
216         svc_entry uaccess=0                       
217         mov     r2, sp                            
218         dabt_helper                               
219  THUMB( ldr     r5, [sp, #S_PSR]        )         
220         svc_exit r5                               
221  UNWIND(.fnend          )                         
222 ENDPROC(__dabt_svc)                               
223                                                   
224         .align  5                                 
225 __irq_svc:                                        
226         svc_entry                                 
227         irq_handler from_user=0                   
228                                                   
229 #ifdef CONFIG_PREEMPTION                          
230         ldr     r8, [tsk, #TI_PREEMPT]            
231         ldr     r0, [tsk, #TI_FLAGS]              
232         teq     r8, #0                            
233         movne   r0, #0                            
234         tst     r0, #_TIF_NEED_RESCHED            
235         blne    svc_preempt                       
236 #endif                                            
237                                                   
238         svc_exit r5, irq = 1                      
239  UNWIND(.fnend          )                         
240 ENDPROC(__irq_svc)                                
241                                                   
242         .ltorg                                    
243                                                   
244 #ifdef CONFIG_PREEMPTION                          
245 svc_preempt:                                      
246         mov     r8, lr                            
247 1:      bl      preempt_schedule_irq              
248         ldr     r0, [tsk, #TI_FLAGS]              
249         tst     r0, #_TIF_NEED_RESCHED            
250         reteq   r8                                
251         b       1b                                
252 #endif                                            
253                                                   
254 __und_fault:                                      
255         @ Correct the PC such that it is point    
256         @ which caused the fault.  If the faul    
257         @ the PC will be pointing at the next     
258         @ subtract 4.  Otherwise, it is Thumb,    
259         @ pointing at the second half of the T    
260         @ have to subtract 2.                     
261         ldr     r2, [r0, #S_PC]                   
262         sub     r2, r2, r1                        
263         str     r2, [r0, #S_PC]                   
264         b       do_undefinstr                     
265 ENDPROC(__und_fault)                              
266                                                   
267         .align  5                                 
268 __und_svc:                                        
269 #ifdef CONFIG_KPROBES                             
270         @ If a kprobe is about to simulate a "    
271         @ it obviously needs free stack space     
272         @ the saved context.                      
273         svc_entry MAX_STACK_SIZE                  
274 #else                                             
275         svc_entry                                 
276 #endif                                            
277                                                   
278         mov     r1, #4                            
279  THUMB( tst     r5, #PSR_T_BIT          )         
280  THUMB( movne   r1, #2                  )         
281         mov     r0, sp                            
282         bl      __und_fault                       
283                                                   
284 __und_svc_finish:                                 
285         get_thread_info tsk                       
286         ldr     r5, [sp, #S_PSR]                  
287         svc_exit r5                               
288  UNWIND(.fnend          )                         
289 ENDPROC(__und_svc)                                
290                                                   
291         .align  5                                 
292 __pabt_svc:                                       
293         svc_entry                                 
294         mov     r2, sp                            
295         pabt_helper                               
296         svc_exit r5                               
297  UNWIND(.fnend          )                         
298 ENDPROC(__pabt_svc)                               
299                                                   
300         .align  5                                 
301 __fiq_svc:                                        
302         svc_entry trace=0                         
303         mov     r0, sp                            
304         bl      handle_fiq_as_nmi                 
305         svc_exit_via_fiq                          
306  UNWIND(.fnend          )                         
307 ENDPROC(__fiq_svc)                                
308                                                   
309 /*                                                
310  * Abort mode handlers                            
311  */                                               
312                                                   
313 @                                                 
314 @ Taking a FIQ in abort mode is similar to tak    
315 @ and reuses the same macros. However in abort    
316 @ save/restore lr_abt and spsr_abt to make nes    
317 @                                                 
318         .align 5                                  
319 __fiq_abt:                                        
320         svc_entry trace=0                         
321                                                   
322  ARM(   msr     cpsr_c, #ABT_MODE | PSR_I_BIT     
323  THUMB( mov     r0, #ABT_MODE | PSR_I_BIT | PS    
324  THUMB( msr     cpsr_c, r0 )                      
325         mov     r1, lr          @ Save lr_abt     
326         mrs     r2, spsr        @ Save spsr_ab    
327  ARM(   msr     cpsr_c, #SVC_MODE | PSR_I_BIT     
328  THUMB( mov     r0, #SVC_MODE | PSR_I_BIT | PS    
329  THUMB( msr     cpsr_c, r0 )                      
330         stmfd   sp!, {r1 - r2}                    
331                                                   
332         add     r0, sp, #8                        
333         bl      handle_fiq_as_nmi                 
334                                                   
335         ldmfd   sp!, {r1 - r2}                    
336  ARM(   msr     cpsr_c, #ABT_MODE | PSR_I_BIT     
337  THUMB( mov     r0, #ABT_MODE | PSR_I_BIT | PS    
338  THUMB( msr     cpsr_c, r0 )                      
339         mov     lr, r1          @ Restore lr_a    
340         msr     spsr_cxsf, r2   @ Restore spsr    
341  ARM(   msr     cpsr_c, #SVC_MODE | PSR_I_BIT     
342  THUMB( mov     r0, #SVC_MODE | PSR_I_BIT | PS    
343  THUMB( msr     cpsr_c, r0 )                      
344                                                   
345         svc_exit_via_fiq                          
346  UNWIND(.fnend          )                         
347 ENDPROC(__fiq_abt)                                
348                                                   
349 /*                                                
350  * User mode handlers                             
351  *                                                
352  * EABI note: sp_svc is always 64-bit aligned     
353  */                                               
354                                                   
355 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH    
356 #error "sizeof(struct pt_regs) must be a multi    
357 #endif                                            
358                                                   
359         .macro  usr_entry, trace=1, uaccess=1     
360  UNWIND(.fnstart        )                         
361  UNWIND(.cantunwind     )       @ don't unwind    
362         sub     sp, sp, #PT_REGS_SIZE             
363  ARM(   stmib   sp, {r1 - r12}  )                 
364  THUMB( stmia   sp, {r0 - r12}  )                 
365                                                   
366  ATRAP( mrc     p15, 0, r7, c1, c0, 0)            
367  ATRAP( ldr_va  r8, cr_alignment)                 
368                                                   
369         ldmia   r0, {r3 - r5}                     
370         add     r0, sp, #S_PC           @ here    
371         mov     r6, #-1                 @  ""     
372                                                   
373         str     r3, [sp]                @ save    
374                                         @ from    
375                                                   
376         @                                         
377         @ We are now ready to fill in the rema    
378         @                                         
379         @  r4 - lr_<exception>, already fixed     
380         @  r5 - spsr_<exception>                  
381         @  r6 - orig_r0 (see pt_regs definitio    
382         @                                         
383         @ Also, separately save sp_usr and lr_    
384         @                                         
385         stmia   r0, {r4 - r6}                     
386  ARM(   stmdb   r0, {sp, lr}^                     
387  THUMB( store_user_sp_lr r0, r1, S_SP - S_PC      
388                                                   
389         .if \uaccess                              
390         uaccess_disable ip                        
391         .endif                                    
392                                                   
393         @ Enable the alignment trap while in k    
394  ATRAP( teq     r8, r7)                           
395  ATRAP( mcrne   p15, 0, r8, c1, c0, 0)            
396                                                   
397         reload_current r7, r8                     
398                                                   
399         @                                         
400         @ Clear FP to mark the first stack fra    
401         @                                         
402         zero_fp                                   
403                                                   
404         .if     \trace                            
405 #ifdef CONFIG_TRACE_IRQFLAGS                      
406         bl      trace_hardirqs_off                
407 #endif                                            
408         ct_user_exit save = 0                     
409         .endif                                    
410         .endm                                     
411                                                   
412         .macro  kuser_cmpxchg_check               
413 #if !defined(CONFIG_CPU_32v6K) && defined(CONF    
414 #ifndef CONFIG_MMU                                
415 #warning "NPTL on non MMU needs fixing"           
416 #else                                             
417         @ Make sure our user space atomic help    
418         @ if it was interrupted in a critical     
419         @ perform a quick test inline since it    
420         @ 99.9999% of the time.  The rest is d    
421         ldr     r0, =TASK_SIZE                    
422         cmp     r4, r0                            
423         blhs    kuser_cmpxchg64_fixup             
424 #endif                                            
425 #endif                                            
426         .endm                                     
427                                                   
428         .align  5                                 
429 __dabt_usr:                                       
430         usr_entry uaccess=0                       
431         kuser_cmpxchg_check                       
432         mov     r2, sp                            
433         dabt_helper                               
434         b       ret_from_exception                
435  UNWIND(.fnend          )                         
436 ENDPROC(__dabt_usr)                               
437                                                   
438         .align  5                                 
439 __irq_usr:                                        
440         usr_entry                                 
441         kuser_cmpxchg_check                       
442         irq_handler from_user=1                   
443         get_thread_info tsk                       
444         mov     why, #0                           
445         b       ret_to_user_from_irq              
446  UNWIND(.fnend          )                         
447 ENDPROC(__irq_usr)                                
448                                                   
449         .ltorg                                    
450                                                   
451         .align  5                                 
452 __und_usr:                                        
453         usr_entry uaccess=0                       
454                                                   
455         @ IRQs must be enabled before attempti    
456         @ user space since that could cause a     
457         @ page table was modified by another C    
458         enable_irq                                
459                                                   
460         tst     r5, #PSR_T_BIT                    
461         mov     r1, #2                            
462         bne     0f                                
463 #ifdef CONFIG_FPE_NWFPE                           
464         adr     r9, ret_from_exception            
465         bl      call_fpe                          
466 #endif                                            
467         mov     r1, #4                            
468 0:      mov     r0, sp                            
469         uaccess_disable ip                        
470         bl      __und_fault                       
471         b       ret_from_exception                
472  UNWIND(.fnend)                                   
473 ENDPROC(__und_usr)                                
474                                                   
475         .align  5                                 
476 __pabt_usr:                                       
477         usr_entry                                 
478         mov     r2, sp                            
479         pabt_helper                               
480  UNWIND(.fnend          )                         
481         /* fall through */                        
482 /*                                                
483  * This is the return code to user mode for ab    
484  */                                               
485 ENTRY(ret_from_exception)                         
486  UNWIND(.fnstart        )                         
487  UNWIND(.cantunwind     )                         
488         get_thread_info tsk                       
489         mov     why, #0                           
490         b       ret_to_user                       
491  UNWIND(.fnend          )                         
492 ENDPROC(__pabt_usr)                               
493 ENDPROC(ret_from_exception)                       
494                                                   
495         .align  5                                 
496 __fiq_usr:                                        
497         usr_entry trace=0                         
498         kuser_cmpxchg_check                       
499         mov     r0, sp                            
500         bl      handle_fiq_as_nmi                 
501         get_thread_info tsk                       
502         restore_user_regs fast = 0, offset = 0    
503  UNWIND(.fnend          )                         
504 ENDPROC(__fiq_usr)                                
505                                                   
506 /*                                                
507  * Register switch for ARMv3 and ARMv4 process    
508  * r0 = previous task_struct, r1 = previous th    
509  * previous and next are guaranteed not to be     
510  */                                               
511 ENTRY(__switch_to)                                
512  UNWIND(.fnstart        )                         
513  UNWIND(.cantunwind     )                         
514         add     ip, r1, #TI_CPU_SAVE              
515  ARM(   stmia   ip!, {r4 - sl, fp, sp, lr} )      
516  THUMB( stmia   ip!, {r4 - sl, fp}         )      
517  THUMB( str     sp, [ip], #4               )      
518  THUMB( str     lr, [ip], #4               )      
519         ldr     r4, [r2, #TI_TP_VALUE]            
520         ldr     r5, [r2, #TI_TP_VALUE + 4]        
521 #ifdef CONFIG_CPU_USE_DOMAINS                     
522         mrc     p15, 0, r6, c3, c0, 0             
523         str     r6, [r1, #TI_CPU_DOMAIN]          
524         ldr     r6, [r2, #TI_CPU_DOMAIN]          
525 #endif                                            
526         switch_tls r1, r4, r5, r3, r7             
527 #if defined(CONFIG_STACKPROTECTOR) && !defined    
528     !defined(CONFIG_STACKPROTECTOR_PER_TASK)      
529         ldr     r8, =__stack_chk_guard            
530         .if (TSK_STACK_CANARY > IMM12_MASK)       
531         add     r9, r2, #TSK_STACK_CANARY & ~I    
532         ldr     r9, [r9, #TSK_STACK_CANARY & I    
533         .else                                     
534         ldr     r9, [r2, #TSK_STACK_CANARY & I    
535         .endif                                    
536 #endif                                            
537         mov     r7, r2                            
538 #ifdef CONFIG_CPU_USE_DOMAINS                     
539         mcr     p15, 0, r6, c3, c0, 0             
540 #endif                                            
541         mov     r5, r0                            
542         add     r4, r2, #TI_CPU_SAVE              
543         ldr     r0, =thread_notify_head           
544         mov     r1, #THREAD_NOTIFY_SWITCH         
545         bl      atomic_notifier_call_chain        
546 #if defined(CONFIG_STACKPROTECTOR) && !defined    
547     !defined(CONFIG_STACKPROTECTOR_PER_TASK)      
548         str     r9, [r8]                          
549 #endif                                            
550         mov     r0, r5                            
551 #if !defined(CONFIG_THUMB2_KERNEL) && !defined    
552         set_current r7, r8                        
553         ldmia   r4, {r4 - sl, fp, sp, pc}         
554 #else                                             
555         mov     r1, r7                            
556         ldmia   r4, {r4 - sl, fp, ip, lr}         
557 #ifdef CONFIG_VMAP_STACK                          
558         @                                         
559         @ Do a dummy read from the new stack w    
560         @ that we can rely on do_translation_f    
561         @ entries covering the vmalloc region.    
562         @                                         
563         ldr     r2, [ip]                          
564 #endif                                            
565                                                   
566         @ When CONFIG_THREAD_INFO_IN_TASK=n, t    
567         @ effectuates the task switch, as that    
568         @ values of current and current_thread    
569         @ CONFIG_THREAD_INFO_IN_TASK=y, settin    
570         @ current_thread_info) is done explici    
571         @ switches us to another stack, with f    
572         @ to prevent this distinction from cau    
573         @ keep the 'set_current' call as close    
574         set_current r1, r2                        
575         mov     sp, ip                            
576         ret     lr                                
577 #endif                                            
578  UNWIND(.fnend          )                         
579 ENDPROC(__switch_to)                              
580                                                   
581 #ifdef CONFIG_VMAP_STACK                          
582         .text                                     
583         .align  2                                 
584 __bad_stack:                                      
585         @                                         
586         @ We've just detected an overflow. We     
587         @ CPU's overflow stack into the stack     
588         @ scratch register so let's use a sequ    
589         @ involving the PC, and decorate them     
590         @ relocations. As these are ARM only,     
591         @                                         
592         @ We enter here with IP clobbered and     
593         @ stack.                                  
594         @                                         
595 THUMB(  bx      pc              )                 
596 THUMB(  nop                     )                 
597 THUMB(  .arm                    )                 
598         ldr_this_cpu_armv6 ip, overflow_stack_    
599                                                   
600         str     sp, [ip, #-4]!                    
601         mov     sp, ip                            
602         pop     {ip}                              
603                                                   
604 #if defined(CONFIG_UNWINDER_FRAME_POINTER) &&     
605         mov     ip, ip                            
606         push    {fp, ip, lr, pc}                  
607 #else                                             
608         str     ip, [sp, #-8]!                    
609         push    {fpreg, lr}                       
610 #endif                                            
611 UNWIND( ldr     ip, [r0, #4]    )                 
612 UNWIND( str     ip, [sp, #12]   )                 
613         ldr     ip, [r0, #12]                     
614                                                   
615         @ Store the original GPRs to the new s    
616         svc_entry uaccess=0, overflow_check=0     
617                                                   
618 UNWIND( .save   {sp, pc}        )                 
619 UNWIND( .save   {fpreg, lr}     )                 
620 UNWIND( .setfp  fpreg, sp       )                 
621                                                   
622         ldr     fpreg, [sp, #S_SP]                
623                                                   
624 #if defined(CONFIG_UNWINDER_FRAME_POINTER) &&     
625         ldr     r1, [fp, #4]                      
626         add     fp, fp, #12                       
627 #else                                             
628         ldr     r1, [fpreg, #8]                   
629 #endif                                            
630         str     r1, [sp, #S_SP]                   
631                                                   
632         @ Stash the regs for handle_bad_stack     
633         mov     r0, sp                            
634                                                   
635         @ Time to die                             
636         bl      handle_bad_stack                  
637         nop                                       
638 UNWIND( .fnend                  )                 
639 ENDPROC(__bad_stack)                              
640 #endif                                            
641                                                   
642         __INIT                                    
643                                                   
644 /*                                                
645  * User helpers.                                  
646  *                                                
647  * Each segment is 32-byte aligned and will be    
648  * vector page.  New segments (if ever needed)    
649  * existing ones.  This mechanism should be us    
650  * really small and justified, and not be abus    
651  *                                                
652  * See Documentation/arch/arm/kernel_user_help    
653  */                                               
654  THUMB( .arm    )                                 
655                                                   
656         .macro  usr_ret, reg                      
657 #ifdef CONFIG_ARM_THUMB                           
658         bx      \reg                              
659 #else                                             
660         ret     \reg                              
661 #endif                                            
662         .endm                                     
663                                                   
664         .macro  kuser_pad, sym, size              
665         .if     (. - \sym) & 3                    
666         .rept   4 - (. - \sym) & 3                
667         .byte   0                                 
668         .endr                                     
669         .endif                                    
670         .rept   (\size - (. - \sym)) / 4          
671         .word   0xe7fddef1                        
672         .endr                                     
673         .endm                                     
674                                                   
675 #ifdef CONFIG_KUSER_HELPERS                       
676         .align  5                                 
677         .globl  __kuser_helper_start              
678 __kuser_helper_start:                             
679                                                   
680 /*                                                
681  * Due to the length of some sequences, __kuse    
682  * kuser "slots", therefore 0xffff0f80 is not     
683  */                                               
684                                                   
685 __kuser_cmpxchg64:                                
686                                                   
687 #if defined(CONFIG_CPU_32v6K)                     
688                                                   
689         stmfd   sp!, {r4, r5, r6, r7}             
690         ldrd    r4, r5, [r0]                      
691         ldrd    r6, r7, [r1]                      
692         smp_dmb arm                               
693 1:      ldrexd  r0, r1, [r2]                      
694         eors    r3, r0, r4                        
695         eorseq  r3, r1, r5                        
696         strexdeq r3, r6, r7, [r2]                 
697         teqeq   r3, #1                            
698         beq     1b                                
699         smp_dmb arm                               
700         rsbs    r0, r3, #0                        
701         ldmfd   sp!, {r4, r5, r6, r7}             
702         usr_ret lr                                
703                                                   
704 #elif !defined(CONFIG_SMP)                        
705                                                   
706 #ifdef CONFIG_MMU                                 
707                                                   
708         /*                                        
709          * The only thing that can break atomi    
710          * implementation is either an IRQ or     
711          * causing another process/thread to b    
712          * the critical sequence.  The same st    
713          */                                       
714         stmfd   sp!, {r4, r5, r6, lr}             
715         ldmia   r0, {r4, r5}                      
716         ldmia   r1, {r6, lr}                      
717 1:      ldmia   r2, {r0, r1}                      
718         eors    r3, r0, r4                        
719         eorseq  r3, r1, r5                        
720 2:      stmiaeq r2, {r6, lr}                      
721         rsbs    r0, r3, #0                        
722         ldmfd   sp!, {r4, r5, r6, pc}             
723                                                   
724         .text                                     
725 kuser_cmpxchg64_fixup:                            
726         @ Called from kuser_cmpxchg_fixup.        
727         @ r4 = address of interrupted insn (mu    
728         @ sp = saved regs. r7 and r8 are clobb    
729         @ 1b = first critical insn, 2b = last     
730         @ If r4 >= 1b and r4 <= 2b then saved     
731         mov     r7, #0xffff0fff                   
732         sub     r7, r7, #(0xffff0fff - (0xffff    
733         subs    r8, r4, r7                        
734         rsbscs  r8, r8, #(2b - 1b)                
735         strcs   r7, [sp, #S_PC]                   
736 #if __LINUX_ARM_ARCH__ < 6                        
737         bcc     kuser_cmpxchg32_fixup             
738 #endif                                            
739         ret     lr                                
740         .previous                                 
741                                                   
742 #else                                             
743 #warning "NPTL on non MMU needs fixing"           
744         mov     r0, #-1                           
745         adds    r0, r0, #0                        
746         usr_ret lr                                
747 #endif                                            
748                                                   
749 #else                                             
750 #error "incoherent kernel configuration"          
751 #endif                                            
752                                                   
753         kuser_pad __kuser_cmpxchg64, 64           
754                                                   
755 __kuser_memory_barrier:                           
756         smp_dmb arm                               
757         usr_ret lr                                
758                                                   
759         kuser_pad __kuser_memory_barrier, 32      
760                                                   
761 __kuser_cmpxchg:                                  
762                                                   
763 #if __LINUX_ARM_ARCH__ < 6                        
764                                                   
765 #ifdef CONFIG_MMU                                 
766                                                   
767         /*                                        
768          * The only thing that can break atomi    
769          * implementation is either an IRQ or     
770          * causing another process/thread to b    
771          * of the critical sequence.  To preve    
772          * the IRQ and data abort exception ha    
773          * to the beginning of the critical se    
774          * within that critical section (see k    
775          */                                       
776 1:      ldr     r3, [r2]                          
777         subs    r3, r3, r0                        
778 2:      streq   r1, [r2]                          
779         rsbs    r0, r3, #0                        
780         usr_ret lr                                
781                                                   
782         .text                                     
783 kuser_cmpxchg32_fixup:                            
784         @ Called from kuser_cmpxchg_check macr    
785         @ r4 = address of interrupted insn (mu    
786         @ sp = saved regs. r7 and r8 are clobb    
787         @ 1b = first critical insn, 2b = last     
788         @ If r4 >= 1b and r4 <= 2b then saved     
789         mov     r7, #0xffff0fff                   
790         sub     r7, r7, #(0xffff0fff - (0xffff    
791         subs    r8, r4, r7                        
792         rsbscs  r8, r8, #(2b - 1b)                
793         strcs   r7, [sp, #S_PC]                   
794         ret     lr                                
795         .previous                                 
796                                                   
797 #else                                             
798 #warning "NPTL on non MMU needs fixing"           
799         mov     r0, #-1                           
800         adds    r0, r0, #0                        
801         usr_ret lr                                
802 #endif                                            
803                                                   
804 #else                                             
805                                                   
806         smp_dmb arm                               
807 1:      ldrex   r3, [r2]                          
808         subs    r3, r3, r0                        
809         strexeq r3, r1, [r2]                      
810         teqeq   r3, #1                            
811         beq     1b                                
812         rsbs    r0, r3, #0                        
813         /* beware -- each __kuser slot must be    
814         ALT_SMP(b       __kuser_memory_barrier    
815         ALT_UP(usr_ret  lr)                       
816                                                   
817 #endif                                            
818                                                   
819         kuser_pad __kuser_cmpxchg, 32             
820                                                   
821 __kuser_get_tls:                                  
822         ldr     r0, [pc, #(16 - 8)]     @ read    
823         usr_ret lr                                
824         mrc     p15, 0, r0, c13, c0, 3  @ 0xff    
825         kuser_pad __kuser_get_tls, 16             
826         .rep    3                                 
827         .word   0                       @ 0xff    
828         .endr                           @ pad     
829                                                   
830 __kuser_helper_version:                           
831         .word   ((__kuser_helper_end - __kuser    
832                                                   
833         .globl  __kuser_helper_end                
834 __kuser_helper_end:                               
835                                                   
836 #endif                                            
837                                                   
838  THUMB( .thumb  )                                 
839                                                   
840 /*                                                
841  * Vector stubs.                                  
842  *                                                
843  * This code is copied to 0xffff1000 so we can    
844  * vectors, rather than ldr's.  Note that this    
845  * a page size.                                   
846  *                                                
847  * Common stub entry macro:                       
848  *   Enter in IRQ mode, spsr = SVC/USR CPSR, l    
849  *                                                
850  * SP points to a minimal amount of processor-    
851  * of which is copied into r0 for the mode spe    
852  */                                               
853         .macro  vector_stub, name, mode, corre    
854         .align  5                                 
855 #ifdef CONFIG_HARDEN_BRANCH_HISTORY               
856 vector_bhb_bpiall_\name:                          
857         mcr     p15, 0, r0, c7, c5, 6   @ BPIA    
858         @ isb not needed due to "movs pc, lr"     
859         @ which gives a "context synchronisati    
860 #endif                                            
861                                                   
862 vector_\name:                                     
863         .if \correction                           
864         sub     lr, lr, #\correction              
865         .endif                                    
866                                                   
867         @ Save r0, lr_<exception> (parent PC)     
868         stmia   sp, {r0, lr}            @ save    
869                                                   
870         @ Save spsr_<exception> (parent CPSR)     
871 .Lvec_\name:                                      
872         mrs     lr, spsr                          
873         str     lr, [sp, #8]            @ save    
874                                                   
875         @                                         
876         @ Prepare for SVC32 mode.  IRQs remain    
877         @                                         
878         mrs     r0, cpsr                          
879         eor     r0, r0, #(\mode ^ SVC_MODE | P    
880         msr     spsr_cxsf, r0                     
881                                                   
882         @                                         
883         @ the branch table must immediately fo    
884         @                                         
885         and     lr, lr, #0x0f                     
886  THUMB( adr     r0, 1f                  )         
887  THUMB( ldr     lr, [r0, lr, lsl #2]    )         
888         mov     r0, sp                            
889  ARM(   ldr     lr, [pc, lr, lsl #2]    )         
890         movs    pc, lr                  @ bran    
891 ENDPROC(vector_\name)                             
892                                                   
893 #ifdef CONFIG_HARDEN_BRANCH_HISTORY               
894         .subsection 1                             
895         .align 5                                  
896 vector_bhb_loop8_\name:                           
897         .if \correction                           
898         sub     lr, lr, #\correction              
899         .endif                                    
900                                                   
901         @ Save r0, lr_<exception> (parent PC)     
902         stmia   sp, {r0, lr}                      
903                                                   
904         @ bhb workaround                          
905         mov     r0, #8                            
906 3:      W(b)    . + 4                             
907         subs    r0, r0, #1                        
908         bne     3b                                
909         dsb     nsh                               
910         @ isb not needed due to "movs pc, lr"     
911         @ which gives a "context synchronisati    
912         b       .Lvec_\name                       
913 ENDPROC(vector_bhb_loop8_\name)                   
914         .previous                                 
915 #endif                                            
916                                                   
917         .align  2                                 
918         @ handler addresses follow this label     
919 1:                                                
920         .endm                                     
921                                                   
922         .section .stubs, "ax", %progbits          
923         @ These need to remain at the start of    
924         @ they are in range of the 'SWI' entri    
925         @ located 4k down.                        
926 .L__vector_swi:                                   
927         .word   vector_swi                        
928 #ifdef CONFIG_HARDEN_BRANCH_HISTORY               
929 .L__vector_bhb_loop8_swi:                         
930         .word   vector_bhb_loop8_swi              
931 .L__vector_bhb_bpiall_swi:                        
932         .word   vector_bhb_bpiall_swi             
933 #endif                                            
934                                                   
935 vector_rst:                                       
936  ARM(   swi     SYS_ERROR0      )                 
937  THUMB( svc     #0              )                 
938  THUMB( nop                     )                 
939         b       vector_und                        
940                                                   
941 /*                                                
942  * Interrupt dispatcher                           
943  */                                               
944         vector_stub     irq, IRQ_MODE, 4          
945                                                   
946         .long   __irq_usr                         
947         .long   __irq_invalid                     
948         .long   __irq_invalid                     
949         .long   __irq_svc                         
950         .long   __irq_invalid                     
951         .long   __irq_invalid                     
952         .long   __irq_invalid                     
953         .long   __irq_invalid                     
954         .long   __irq_invalid                     
955         .long   __irq_invalid                     
956         .long   __irq_invalid                     
957         .long   __irq_invalid                     
958         .long   __irq_invalid                     
959         .long   __irq_invalid                     
960         .long   __irq_invalid                     
961         .long   __irq_invalid                     
962                                                   
963 /*                                                
964  * Data abort dispatcher                          
965  * Enter in ABT mode, spsr = USR CPSR, lr = US    
966  */                                               
967         vector_stub     dabt, ABT_MODE, 8         
968                                                   
969         .long   __dabt_usr                        
970         .long   __dabt_invalid                    
971         .long   __dabt_invalid                    
972         .long   __dabt_svc                        
973         .long   __dabt_invalid                    
974         .long   __dabt_invalid                    
975         .long   __dabt_invalid                    
976         .long   __dabt_invalid                    
977         .long   __dabt_invalid                    
978         .long   __dabt_invalid                    
979         .long   __dabt_invalid                    
980         .long   __dabt_invalid                    
981         .long   __dabt_invalid                    
982         .long   __dabt_invalid                    
983         .long   __dabt_invalid                    
984         .long   __dabt_invalid                    
985                                                   
986 /*                                                
987  * Prefetch abort dispatcher                      
988  * Enter in ABT mode, spsr = USR CPSR, lr = US    
989  */                                               
990         vector_stub     pabt, ABT_MODE, 4         
991                                                   
992         .long   __pabt_usr                        
993         .long   __pabt_invalid                    
994         .long   __pabt_invalid                    
995         .long   __pabt_svc                        
996         .long   __pabt_invalid                    
997         .long   __pabt_invalid                    
998         .long   __pabt_invalid                    
999         .long   __pabt_invalid                    
1000         .long   __pabt_invalid                   
1001         .long   __pabt_invalid                   
1002         .long   __pabt_invalid                   
1003         .long   __pabt_invalid                   
1004         .long   __pabt_invalid                   
1005         .long   __pabt_invalid                   
1006         .long   __pabt_invalid                   
1007         .long   __pabt_invalid                   
1008                                                  
1009 /*                                               
1010  * Undef instr entry dispatcher                  
1011  * Enter in UND mode, spsr = SVC/USR CPSR, lr    
1012  */                                              
1013         vector_stub     und, UND_MODE            
1014                                                  
1015         .long   __und_usr                        
1016         .long   __und_invalid                    
1017         .long   __und_invalid                    
1018         .long   __und_svc                        
1019         .long   __und_invalid                    
1020         .long   __und_invalid                    
1021         .long   __und_invalid                    
1022         .long   __und_invalid                    
1023         .long   __und_invalid                    
1024         .long   __und_invalid                    
1025         .long   __und_invalid                    
1026         .long   __und_invalid                    
1027         .long   __und_invalid                    
1028         .long   __und_invalid                    
1029         .long   __und_invalid                    
1030         .long   __und_invalid                    
1031                                                  
1032         .align  5                                
1033                                                  
1034 /*===========================================    
1035  * Address exception handler                     
1036  *-------------------------------------------    
1037  * These aren't too critical.                    
1038  * (they're not supposed to happen, and won't    
1039  */                                              
1040                                                  
1041 vector_addrexcptn:                               
1042         b       vector_addrexcptn                
1043                                                  
1044 /*===========================================    
1045  * FIQ "NMI" handler                             
1046  *-------------------------------------------    
1047  * Handle a FIQ using the SVC stack allowing     
1048  * systems. This must be the last vector stub    
1049  * subsection.                                   
1050  */                                              
1051         .subsection 2                            
1052         vector_stub     fiq, FIQ_MODE, 4         
1053                                                  
1054         .long   __fiq_usr                        
1055         .long   __fiq_svc                        
1056         .long   __fiq_svc                        
1057         .long   __fiq_svc                        
1058         .long   __fiq_svc                        
1059         .long   __fiq_svc                        
1060         .long   __fiq_svc                        
1061         .long   __fiq_abt                        
1062         .long   __fiq_svc                        
1063         .long   __fiq_svc                        
1064         .long   __fiq_svc                        
1065         .long   __fiq_svc                        
1066         .long   __fiq_svc                        
1067         .long   __fiq_svc                        
1068         .long   __fiq_svc                        
1069         .long   __fiq_svc                        
1070                                                  
1071         .globl  vector_fiq                       
1072                                                  
1073         .section .vectors, "ax", %progbits       
1074         RELOC_TEXT_NONE                          
1075         W(b)    vector_rst                       
1076         W(b)    vector_und                       
1077 ARM(    .reloc  ., R_ARM_LDR_PC_G0, .L__vecto    
1078 THUMB(  .reloc  ., R_ARM_THM_PC12, .L__vector    
1079         W(ldr)  pc, .                            
1080         W(b)    vector_pabt                      
1081         W(b)    vector_dabt                      
1082         W(b)    vector_addrexcptn                
1083         W(b)    vector_irq                       
1084         W(b)    vector_fiq                       
1085                                                  
1086 #ifdef CONFIG_HARDEN_BRANCH_HISTORY              
1087         .section .vectors.bhb.loop8, "ax", %p    
1088         RELOC_TEXT_NONE                          
1089         W(b)    vector_rst                       
1090         W(b)    vector_bhb_loop8_und             
1091 ARM(    .reloc  ., R_ARM_LDR_PC_G0, .L__vecto    
1092 THUMB(  .reloc  ., R_ARM_THM_PC12, .L__vector    
1093         W(ldr)  pc, .                            
1094         W(b)    vector_bhb_loop8_pabt            
1095         W(b)    vector_bhb_loop8_dabt            
1096         W(b)    vector_addrexcptn                
1097         W(b)    vector_bhb_loop8_irq             
1098         W(b)    vector_bhb_loop8_fiq             
1099                                                  
1100         .section .vectors.bhb.bpiall, "ax", %    
1101         RELOC_TEXT_NONE                          
1102         W(b)    vector_rst                       
1103         W(b)    vector_bhb_bpiall_und            
1104 ARM(    .reloc  ., R_ARM_LDR_PC_G0, .L__vecto    
1105 THUMB(  .reloc  ., R_ARM_THM_PC12, .L__vector    
1106         W(ldr)  pc, .                            
1107         W(b)    vector_bhb_bpiall_pabt           
1108         W(b)    vector_bhb_bpiall_dabt           
1109         W(b)    vector_addrexcptn                
1110         W(b)    vector_bhb_bpiall_irq            
1111         W(b)    vector_bhb_bpiall_fiq            
1112 #endif                                           
1113                                                  
1114         .data                                    
1115         .align  2                                
1116                                                  
1117         .globl  cr_alignment                     
1118 cr_alignment:                                    
1119         .space  4                                
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php