~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/kernel/entry-header.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm/kernel/entry-header.S (Version linux-6.12-rc7) and /arch/sparc/kernel/entry-header.S (Version linux-6.4.16)


  1 /* SPDX-License-Identifier: GPL-2.0 */            
  2 #include <linux/init.h>                           
  3 #include <linux/linkage.h>                        
  4                                                   
  5 #include <asm/assembler.h>                        
  6 #include <asm/asm-offsets.h>                      
  7 #include <asm/errno.h>                            
  8 #include <asm/thread_info.h>                      
  9 #include <asm/uaccess-asm.h>                      
 10 #include <asm/v7m.h>                              
 11                                                   
 12 @ Bad Abort numbers                               
 13 @ -----------------                               
 14 @                                                 
 15 #define BAD_PREFETCH    0                         
 16 #define BAD_DATA        1                         
 17 #define BAD_ADDREXCPTN  2                         
 18 #define BAD_IRQ         3                         
 19 #define BAD_UNDEFINSTR  4                         
 20                                                   
 21 @                                                 
 22 @ Most of the stack format comes from struct p    
 23 @ the addition of 8 bytes for storing syscall     
 24 @ This _must_ remain a multiple of 8 for EABI.    
 25 @                                                 
 26 #define S_OFF           8                         
 27                                                   
 28 /*                                                
 29  * The SWI code relies on the fact that R0 is     
 30  * (due to slow/fast restore user regs).          
 31  */                                               
 32 #if S_R0 != 0                                     
 33 #error "Please fix"                               
 34 #endif                                            
 35                                                   
 36         .macro  zero_fp                           
 37 #ifdef CONFIG_FRAME_POINTER                       
 38         mov     fp, #0                            
 39 #endif                                            
 40         .endm                                     
 41                                                   
 42 #ifdef CONFIG_ALIGNMENT_TRAP                      
 43 #define ATRAP(x...) x                             
 44 #else                                             
 45 #define ATRAP(x...)                               
 46 #endif                                            
 47                                                   
 48         .macro  alignment_trap, rtmp1, rtmp2,     
 49 #ifdef CONFIG_ALIGNMENT_TRAP                      
 50         mrc     p15, 0, \rtmp2, c1, c0, 0         
 51         ldr_va  \rtmp1, \label                    
 52         teq     \rtmp1, \rtmp2                    
 53         mcrne   p15, 0, \rtmp1, c1, c0, 0         
 54 #endif                                            
 55         .endm                                     
 56                                                   
 57 #ifdef CONFIG_CPU_V7M                             
 58 /*                                                
 59  * ARMv7-M exception entry/exit macros.           
 60  *                                                
 61  * xPSR, ReturnAddress(), LR (R14), R12, R3, R    
 62  * automatically saved on the current stack (3    
 63  * switching to the exception stack (SP_main).    
 64  *                                                
 65  * If exception is taken while in user mode, S    
 66  * empty. Otherwise, SP_main is aligned to 64     
 67  * (CCR.STKALIGN set).                            
 68  *                                                
 69  * Linux assumes that the interrupts are disab    
 70  * exception handler and it may BUG if this is    
 71  * are disabled during entry and reenabled in     
 72  *                                                
 73  * v7m_exception_slow_exit is used when return    
 74  * When returning to kernel mode, we don't ret    
 75  */                                               
 76         .macro  v7m_exception_entry               
 77         @ determine the location of the regist    
 78         @ exception entry. Depending on the mo    
 79         @ exception happend that is either on     
 80         @ Bit 2 of EXC_RETURN stored in the lr    
 81         @ was used.                               
 82         tst     lr, #EXC_RET_STACK_MASK           
 83         mrsne   r12, psp                          
 84         moveq   r12, sp                           
 85                                                   
 86         @ we cannot rely on r0-r3 and r12 matc    
 87         @ exception frame because of tail-chai    
 88         @ reloaded.                               
 89         ldmia   r12!, {r0-r3}                     
 90                                                   
 91         @ Linux expects to have irqs off. Do i    
 92         cpsid   i                                 
 93                                                   
 94         sub     sp, #PT_REGS_SIZE-S_IP            
 95         stmdb   sp!, {r0-r11}                     
 96                                                   
 97         @ load saved r12, lr, return address a    
 98         @ r0-r7 are used for signals and never    
 99         @ r8-r12 is OK.                           
100         mov     r9, r12                           
101         ldmia   r9!, {r8, r10-r12}                
102                                                   
103         @ calculate the original stack pointer    
104         @ r9 currently points to the memory lo    
105         @ xPSR.                                   
106         @ The cpu might automatically 8-byte a    
107         @ of the saved xPSR specifies if stack    
108         @ another 32-bit value is included in     
109                                                   
110         tst     r12, V7M_xPSR_FRAMEPTRALIGN       
111         addne   r9, r9, #4                        
112                                                   
113         @ store saved r12 using str to have a     
114         str     r8, [sp, #S_IP]                   
115         add     r8, sp, #S_SP                     
116         @ store r13-r15, xPSR                     
117         stmia   r8!, {r9-r12}                     
118         @ store old_r0                            
119         str     r0, [r8]                          
120         .endm                                     
121                                                   
122         /*                                        
123          * PENDSV and SVCALL are configured to    
124          * priorities. As a kernel thread runs    
125          * can never be preempted and so we wi    
126          * kernel thread here.                    
127          */                                       
128         .macro  v7m_exception_slow_exit ret_r0    
129         cpsid   i                                 
130         ldr     lr, =exc_ret                      
131         ldr     lr, [lr]                          
132                                                   
133         @ read original r12, sp, lr, pc and xP    
134         add     r12, sp, #S_IP                    
135         ldmia   r12, {r1-r5}                      
136                                                   
137         @ an exception frame is always 8-byte     
138         @ the sp to be restored is aligned or     
139         @ accordingly.                            
140         tst     r2, #4                            
141         subne   r2, r2, #4                        
142         orrne   r5, V7M_xPSR_FRAMEPTRALIGN        
143         biceq   r5, V7M_xPSR_FRAMEPTRALIGN        
144                                                   
145         @ ensure bit 0 is cleared in the PC, o    
146         @ unpredictable                           
147         bic     r4, #1                            
148                                                   
149         @ write basic exception frame             
150         stmdb   r2!, {r1, r3-r5}                  
151         ldmia   sp, {r1, r3-r5}                   
152         .if     \ret_r0                           
153         stmdb   r2!, {r0, r3-r5}                  
154         .else                                     
155         stmdb   r2!, {r1, r3-r5}                  
156         .endif                                    
157                                                   
158         @ restore process sp                      
159         msr     psp, r2                           
160                                                   
161         @ restore original r4-r11                 
162         ldmia   sp!, {r0-r11}                     
163                                                   
164         @ restore main sp                         
165         add     sp, sp, #PT_REGS_SIZE-S_IP        
166                                                   
167         cpsie   i                                 
168         bx      lr                                
169         .endm                                     
170 #endif  /* CONFIG_CPU_V7M */                      
171                                                   
172         @                                         
173         @ Store/load the USER SP and LR regist    
174         @ mode. Useful in Thumb-2 mode where "    
175         @ available. Should only be called fro    
176         @                                         
177         .macro  store_user_sp_lr, rd, rtemp, o    
178         mrs     \rtemp, cpsr                      
179         eor     \rtemp, \rtemp, #(SVC_MODE ^ S    
180         msr     cpsr_c, \rtemp                    
181                                                   
182         str     sp, [\rd, #\offset]               
183         str     lr, [\rd, #\offset + 4]           
184                                                   
185         eor     \rtemp, \rtemp, #(SVC_MODE ^ S    
186         msr     cpsr_c, \rtemp                    
187         .endm                                     
188                                                   
189         .macro  load_user_sp_lr, rd, rtemp, of    
190         mrs     \rtemp, cpsr                      
191         eor     \rtemp, \rtemp, #(SVC_MODE ^ S    
192         msr     cpsr_c, \rtemp                    
193                                                   
194         ldr     sp, [\rd, #\offset]               
195         ldr     lr, [\rd, #\offset + 4]           
196                                                   
197         eor     \rtemp, \rtemp, #(SVC_MODE ^ S    
198         msr     cpsr_c, \rtemp                    
199         .endm                                     
200                                                   
201                                                   
202         .macro  svc_exit, rpsr, irq = 0           
203         .if     \irq != 0                         
204         @ IRQs already off                        
205 #ifdef CONFIG_TRACE_IRQFLAGS                      
206         @ The parent context IRQs must have be    
207         @ the first place, so there's no point    
208         bl      trace_hardirqs_on                 
209 #endif                                            
210         .else                                     
211         @ IRQs off again before pulling preser    
212         disable_irq_notrace                       
213 #ifdef CONFIG_TRACE_IRQFLAGS                      
214         tst     \rpsr, #PSR_I_BIT                 
215         bleq    trace_hardirqs_on                 
216         tst     \rpsr, #PSR_I_BIT                 
217         blne    trace_hardirqs_off                
218 #endif                                            
219         .endif                                    
220         uaccess_exit tsk, r0, r1                  
221                                                   
222 #ifndef CONFIG_THUMB2_KERNEL                      
223         @ ARM mode SVC restore                    
224         msr     spsr_cxsf, \rpsr                  
225 #if defined(CONFIG_CPU_V6) || defined(CONFIG_C    
226         @ We must avoid clrex due to Cortex-A1    
227         sub     r0, sp, #4                        
228         strex   r1, r2, [r0]                      
229 #endif                                            
230         ldmia   sp, {r0 - pc}^                    
231 #else                                             
232         @ Thumb mode SVC restore                  
233         ldr     lr, [sp, #S_SP]                   
234         ldrd    r0, r1, [sp, #S_LR]               
235                                                   
236         @ We must avoid clrex due to Cortex-A1    
237         strex   r2, r1, [sp, #S_LR]               
238                                                   
239         stmdb   lr!, {r0, r1, \rpsr}              
240         ldmia   sp, {r0 - r12}                    
241         mov     sp, lr                            
242         ldr     lr, [sp], #4                      
243         rfeia   sp!                               
244 #endif                                            
245         .endm                                     
246                                                   
247         @                                         
248         @ svc_exit_via_fiq - like svc_exit but    
249         @                                         
250         @ This macro acts in a similar manner     
251         @ mode to restore the final part of th    
252         @                                         
253         @ We cannot use the normal svc_exit pr    
254         @ clobber spsr_svc (FIQ could be deliv    
255         @ instructions of vector_swi meaning i    
256         @ saved anywhere).                        
257         @                                         
258         @ Note that, unlike svc_exit, this mac    
259         @ supplied rpsr. This is because the F    
260         @ and the handlers cannot call into th    
261         @ on the stack remains correct).          
262         @                                         
263         .macro  svc_exit_via_fiq                  
264         uaccess_exit tsk, r0, r1                  
265 #ifndef CONFIG_THUMB2_KERNEL                      
266         @ ARM mode restore                        
267         mov     r0, sp                            
268         ldmib   r0, {r1 - r14}  @ abort is dea    
269                                 @ clobber stat    
270         msr     cpsr_c, #FIQ_MODE | PSR_I_BIT     
271         add     r8, r0, #S_PC                     
272         ldr     r9, [r0, #S_PSR]                  
273         msr     spsr_cxsf, r9                     
274         ldr     r0, [r0, #S_R0]                   
275         ldmia   r8, {pc}^                         
276 #else                                             
277         @ Thumb mode restore                      
278         add     r0, sp, #S_R2                     
279         ldr     lr, [sp, #S_LR]                   
280         ldr     sp, [sp, #S_SP] @ abort is dea    
281                                 @ clobber stat    
282         ldmia   r0, {r2 - r12}                    
283         mov     r1, #FIQ_MODE | PSR_I_BIT | PS    
284         msr     cpsr_c, r1                        
285         sub     r0, #S_R2                         
286         add     r8, r0, #S_PC                     
287         ldmia   r0, {r0 - r1}                     
288         rfeia   r8                                
289 #endif                                            
290         .endm                                     
291                                                   
292                                                   
293         .macro  restore_user_regs, fast = 0, o    
294 #if defined(CONFIG_CPU_32v6K) && \                
295     (!defined(CONFIG_CPU_V6) || defined(CONFIG    
296 #ifdef CONFIG_CPU_V6                              
297 ALT_SMP(nop)                                      
298 ALT_UP_B(.L1_\@)                                  
299 #endif                                            
300         @ The TLS register update is deferred     
301         @ can use it for other things while ru    
302         mrc     p15, 0, r1, c13, c0, 3            
303         ldr     r1, [r1, #TI_TP_VALUE]            
304         mcr     p15, 0, r1, c13, c0, 3            
305 .L1_\@:                                           
306 #endif                                            
307                                                   
308         uaccess_enable r1, isb=0                  
309 #ifndef CONFIG_THUMB2_KERNEL                      
310         @ ARM mode restore                        
311         mov     r2, sp                            
312         ldr     r1, [r2, #\offset + S_PSR]        
313         ldr     lr, [r2, #\offset + S_PC]!        
314         tst     r1, #PSR_I_BIT | 0x0f             
315         bne     1f                                
316         msr     spsr_cxsf, r1                     
317 #if defined(CONFIG_CPU_V6) || defined(CONFIG_C    
318         @ We must avoid clrex due to Cortex-A1    
319         strex   r1, r2, [r2]                      
320 #endif                                            
321         .if     \fast                             
322         ldmdb   r2, {r1 - lr}^                    
323         .else                                     
324         ldmdb   r2, {r0 - lr}^                    
325         .endif                                    
326         mov     r0, r0                            
327                                                   
328         add     sp, sp, #\offset + PT_REGS_SIZ    
329         movs    pc, lr                            
330 1:      bug     "Returning to usermode but une    
331 #elif defined(CONFIG_CPU_V7M)                     
332         @ V7M restore.                            
333         @ Note that we don't need to do clrex     
334         @ monitor is part of the exception ent    
335         .if     \offset                           
336         add     sp, #\offset                      
337         .endif                                    
338         v7m_exception_slow_exit ret_r0 = \fast    
339 #else                                             
340         @ Thumb mode restore                      
341         mov     r2, sp                            
342         load_user_sp_lr r2, r3, \offset + S_SP    
343         ldr     r1, [sp, #\offset + S_PSR]        
344         ldr     lr, [sp, #\offset + S_PC]         
345         add     sp, sp, #\offset + S_SP           
346         tst     r1, #PSR_I_BIT | 0x0f             
347         bne     1f                                
348         msr     spsr_cxsf, r1                     
349                                                   
350         @ We must avoid clrex due to Cortex-A1    
351         strex   r1, r2, [sp]                      
352                                                   
353         .if     \fast                             
354         ldmdb   sp, {r1 - r12}                    
355         .else                                     
356         ldmdb   sp, {r0 - r12}                    
357         .endif                                    
358         add     sp, sp, #PT_REGS_SIZE - S_SP      
359         movs    pc, lr                            
360 1:      bug     "Returning to usermode but une    
361 #endif  /* !CONFIG_THUMB2_KERNEL */               
362         .endm                                     
363                                                   
364 /*                                                
365  * Context tracking subsystem.  Used to instru    
366  * between user and kernel mode.                  
367  */                                               
368         .macro ct_user_exit, save = 1             
369 #ifdef CONFIG_CONTEXT_TRACKING_USER               
370         .if     \save                             
371         stmdb   sp!, {r0-r3, ip, lr}              
372         bl      user_exit_callable                
373         ldmia   sp!, {r0-r3, ip, lr}              
374         .else                                     
375         bl      user_exit_callable                
376         .endif                                    
377 #endif                                            
378         .endm                                     
379                                                   
380         .macro ct_user_enter, save = 1            
381 #ifdef CONFIG_CONTEXT_TRACKING_USER               
382         .if     \save                             
383         stmdb   sp!, {r0-r3, ip, lr}              
384         bl      user_enter_callable               
385         ldmia   sp!, {r0-r3, ip, lr}              
386         .else                                     
387         bl      user_enter_callable               
388         .endif                                    
389 #endif                                            
390         .endm                                     
391                                                   
392         .macro  invoke_syscall, table, nr, tmp    
393 #ifdef CONFIG_CPU_SPECTRE                         
394         mov     \tmp, \nr                         
395         cmp     \tmp, #NR_syscalls                
396         movcs   \tmp, #0                          
397         csdb                                      
398         badr    lr, \ret                          
399         .if     \reload                           
400         add     r1, sp, #S_R0 + S_OFF             
401         ldmiacc r1, {r0 - r6}                     
402         stmiacc sp, {r4, r5}                      
403         .endif                                    
404         ldrcc   pc, [\table, \tmp, lsl #2]        
405 #else                                             
406         cmp     \nr, #NR_syscalls                 
407         badr    lr, \ret                          
408         .if     \reload                           
409         add     r1, sp, #S_R0 + S_OFF             
410         ldmiacc r1, {r0 - r6}                     
411         stmiacc sp, {r4, r5}                      
412         .endif                                    
413         ldrcc   pc, [\table, \nr, lsl #2]         
414 #endif                                            
415         .endm                                     
416                                                   
417 /*                                                
418  * These are the registers used in the syscall    
419  * have in theory up to 7 arguments to a funct    
420  *                                                
421  * r7 is reserved for the system call number f    
422  *                                                
423  * Note that tbl == why is intentional.           
424  *                                                
425  * We must set at least "tsk" and "why" when c    
426  */                                               
427 scno    .req    r7              @ syscall numb    
428 tbl     .req    r8              @ syscall tabl    
429 why     .req    r8              @ Linux syscal    
430 tsk     .req    r9              @ current thre    
431                                                   
432         .macro  do_overflow_check, frame_size:    
433 #ifdef CONFIG_VMAP_STACK                          
434         @                                         
435         @ Test whether the SP has overflowed.     
436         @ so that SP & BIT(THREAD_SIZE_ORDER +    
437         @ zero.                                   
438         @                                         
439 ARM(    tst     sp, #1 << (THREAD_SIZE_ORDER +    
440 THUMB(  tst     r1, #1 << (THREAD_SIZE_ORDER +    
441 THUMB(  it      ne                                
442         bne     .Lstack_overflow_check\@          
443                                                   
444         .pushsection    .text                     
445 .Lstack_overflow_check\@:                         
446         @                                         
447         @ The stack pointer is not pointing to    
448         @ may be pointing into the linear map     
449         @ are already running from the overflo    
450         @ in such cases so just carry on.         
451         @                                         
452         str     ip, [r0, #12]                     
453         ldr_va  ip, high_memory                   
454 ARM(    cmp     sp, ip                  )         
455 THUMB(  cmp     r1, ip                  )         
456 THUMB(  itt     lo                      )         
457         ldrlo   ip, [r0, #12]                     
458         blo     .Lout\@                           
459                                                   
460 THUMB(  sub     r1, sp, r1              )         
461 THUMB(  sub     sp, r1                  )         
462         add     sp, sp, #\frame_size              
463         b       __bad_stack                       
464         .popsection                               
465 .Lout\@:                                          
466 #endif                                            
467         .endm                                     
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php