~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/kernel/head.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/arm64/kernel/head.S (Architecture m68k) and /arch/sparc/kernel/head.S (Architecture sparc)


  1 /* SPDX-License-Identifier: GPL-2.0-only */       
  2 /*                                                
  3  * Low-level CPU initialisation                   
  4  * Based on arch/arm/kernel/head.S                
  5  *                                                
  6  * Copyright (C) 1994-2002 Russell King           
  7  * Copyright (C) 2003-2012 ARM Ltd.               
  8  * Authors:     Catalin Marinas <catalin.marina    
  9  *              Will Deacon <will.deacon@arm.co    
 10  */                                               
 11                                                   
 12 #include <linux/linkage.h>                        
 13 #include <linux/init.h>                           
 14 #include <linux/pgtable.h>                        
 15                                                   
 16 #include <asm/asm_pointer_auth.h>                 
 17 #include <asm/assembler.h>                        
 18 #include <asm/boot.h>                             
 19 #include <asm/bug.h>                              
 20 #include <asm/ptrace.h>                           
 21 #include <asm/asm-offsets.h>                      
 22 #include <asm/cache.h>                            
 23 #include <asm/cputype.h>                          
 24 #include <asm/el2_setup.h>                        
 25 #include <asm/elf.h>                              
 26 #include <asm/image.h>                            
 27 #include <asm/kernel-pgtable.h>                   
 28 #include <asm/kvm_arm.h>                          
 29 #include <asm/memory.h>                           
 30 #include <asm/pgtable-hwdef.h>                    
 31 #include <asm/page.h>                             
 32 #include <asm/scs.h>                              
 33 #include <asm/smp.h>                              
 34 #include <asm/sysreg.h>                           
 35 #include <asm/thread_info.h>                      
 36 #include <asm/virt.h>                             
 37                                                   
 38 #include "efi-header.S"                           
 39                                                   
 40 #if (PAGE_OFFSET & 0x1fffff) != 0                 
 41 #error PAGE_OFFSET must be at least 2MB aligne    
 42 #endif                                            
 43                                                   
 44 /*                                                
 45  * Kernel startup entry point.                    
 46  * ---------------------------                    
 47  *                                                
 48  * The requirements are:                          
 49  *   MMU = off, D-cache = off, I-cache = on or    
 50  *   x0 = physical address to the FDT blob.       
 51  *                                                
 52  * Note that the callee-saved registers are us    
 53  * that are useful before the MMU is enabled.     
 54  * in the entry routines.                         
 55  */                                               
 56         __HEAD                                    
 57         /*                                        
 58          * DO NOT MODIFY. Image header expecte    
 59          */                                       
 60         efi_signature_nop                         
 61         b       primary_entry                     
 62         .quad   0                                 
 63         le64sym _kernel_size_le                   
 64         le64sym _kernel_flags_le                  
 65         .quad   0                                 
 66         .quad   0                                 
 67         .quad   0                                 
 68         .ascii  ARM64_IMAGE_MAGIC                 
 69         .long   .Lpe_header_offset                
 70                                                   
 71         __EFI_PE_HEADER                           
 72                                                   
 73         .section ".idmap.text","a"                
 74                                                   
 75         /*                                        
 76          * The following callee saved general     
 77          * primary lowlevel boot path:            
 78          *                                        
 79          *  Register   Scope                      
 80          *  x19        primary_entry() .. star    
 81          *  x20        primary_entry() .. __pr    
 82          *  x21        primary_entry() .. star    
 83          */                                       
 84 SYM_CODE_START(primary_entry)                     
 85         bl      record_mmu_state                  
 86         bl      preserve_boot_args                
 87                                                   
 88         adrp    x1, early_init_stack              
 89         mov     sp, x1                            
 90         mov     x29, xzr                          
 91         adrp    x0, init_idmap_pg_dir             
 92         mov     x1, xzr                           
 93         bl      __pi_create_init_idmap            
 94                                                   
 95         /*                                        
 96          * If the page tables have been popula    
 97          * accesses (MMU disabled), invalidate    
 98          * remove any speculatively loaded cac    
 99          */                                       
100         cbnz    x19, 0f                           
101         dmb     sy                                
102         mov     x1, x0                            
103         adrp    x0, init_idmap_pg_dir             
104         adr_l   x2, dcache_inval_poc              
105         blr     x2                                
106         b       1f                                
107                                                   
108         /*                                        
109          * If we entered with the MMU and cach    
110          * of the primary boot code to the PoC    
111          * the MMU off.                           
112          */                                       
113 0:      adrp    x0, __idmap_text_start            
114         adr_l   x1, __idmap_text_end              
115         adr_l   x2, dcache_clean_poc              
116         blr     x2                                
117                                                   
118 1:      mov     x0, x19                           
119         bl      init_kernel_el                    
120         mov     x20, x0                           
121                                                   
122         /*                                        
123          * The following calls CPU setup code,    
124          * details.                               
125          * On return, the CPU will be ready fo    
126          * the TCR will have been set.            
127          */                                       
128         bl      __cpu_setup                       
129         b       __primary_switch                  
130 SYM_CODE_END(primary_entry)                       
131                                                   
132         __INIT                                    
133 SYM_CODE_START_LOCAL(record_mmu_state)            
134         mrs     x19, CurrentEL                    
135         cmp     x19, #CurrentEL_EL2               
136         mrs     x19, sctlr_el1                    
137         b.ne    0f                                
138         mrs     x19, sctlr_el2                    
139 0:                                                
140 CPU_LE( tbnz    x19, #SCTLR_ELx_EE_SHIFT, 1f      
141 CPU_BE( tbz     x19, #SCTLR_ELx_EE_SHIFT, 1f      
142         tst     x19, #SCTLR_ELx_C                 
143         and     x19, x19, #SCTLR_ELx_M            
144         csel    x19, xzr, x19, eq                 
145         ret                                       
146                                                   
147         /*                                        
148          * Set the correct endianness early so    
149          * before init_kernel_el() occur in th    
150          * this means the MMU must be disabled    
151          * up getting interpreted with the wro    
152          */                                       
153 1:      eor     x19, x19, #SCTLR_ELx_EE           
154         bic     x19, x19, #SCTLR_ELx_M            
155         b.ne    2f                                
156         pre_disable_mmu_workaround                
157         msr     sctlr_el2, x19                    
158         b       3f                                
159 2:      pre_disable_mmu_workaround                
160         msr     sctlr_el1, x19                    
161 3:      isb                                       
162         mov     x19, xzr                          
163         ret                                       
164 SYM_CODE_END(record_mmu_state)                    
165                                                   
166 /*                                                
167  * Preserve the arguments passed by the bootlo    
168  */                                               
169 SYM_CODE_START_LOCAL(preserve_boot_args)          
170         mov     x21, x0                           
171                                                   
172         adr_l   x0, boot_args                     
173         stp     x21, x1, [x0]                     
174         stp     x2, x3, [x0, #16]                 
175                                                   
176         cbnz    x19, 0f                           
177         dmb     sy                                
178                                                   
179                                                   
180         add     x1, x0, #0x20                     
181         b       dcache_inval_poc                  
182 0:      str_l   x19, mmu_enabled_at_boot, x0      
183         ret                                       
184 SYM_CODE_END(preserve_boot_args)                  
185                                                   
186         /*                                        
187          * Initialize CPU registers with task-    
188          *                                        
189          * Create a final frame record at task    
190          * that the unwinder can identify the     
191          * its location in the task stack. We     
192          * for consistency with user tasks and    
193          */                                       
194         .macro  init_cpu_task tsk, tmp1, tmp2     
195         msr     sp_el0, \tsk                      
196                                                   
197         ldr     \tmp1, [\tsk, #TSK_STACK]         
198         add     sp, \tmp1, #THREAD_SIZE           
199         sub     sp, sp, #PT_REGS_SIZE             
200                                                   
201         stp     xzr, xzr, [sp, #S_STACKFRAME]     
202         add     x29, sp, #S_STACKFRAME            
203                                                   
204         scs_load_current                          
205                                                   
206         adr_l   \tmp1, __per_cpu_offset           
207         ldr     w\tmp2, [\tsk, #TSK_TI_CPU]       
208         ldr     \tmp1, [\tmp1, \tmp2, lsl #3]     
209         set_this_cpu_offset \tmp1                 
210         .endm                                     
211                                                   
212 /*                                                
213  * The following fragment of code is executed     
214  *                                                
215  *   x0 = __pa(KERNEL_START)                      
216  */                                               
217 SYM_FUNC_START_LOCAL(__primary_switched)          
218         adr_l   x4, init_task                     
219         init_cpu_task x4, x5, x6                  
220                                                   
221         adr_l   x8, vectors                       
222         msr     vbar_el1, x8                      
223         isb                                       
224                                                   
225         stp     x29, x30, [sp, #-16]!             
226         mov     x29, sp                           
227                                                   
228         str_l   x21, __fdt_pointer, x5            
229                                                   
230         adrp    x4, _text                         
231         sub     x4, x4, x0                        
232         str_l   x4, kimage_voffset, x5            
233                                                   
234         mov     x0, x20                           
235         bl      set_cpu_boot_mode_flag            
236                                                   
237 #if defined(CONFIG_KASAN_GENERIC) || defined(C    
238         bl      kasan_early_init                  
239 #endif                                            
240         mov     x0, x20                           
241         bl      finalise_el2                      
242         ldp     x29, x30, [sp], #16               
243         bl      start_kernel                      
244         ASM_BUG()                                 
245 SYM_FUNC_END(__primary_switched)                  
246                                                   
247 /*                                                
248  * end early head section, begin head code tha    
249  * hotplug and needs to have the same protecti    
250  */                                               
251         .section ".idmap.text","a"                
252                                                   
253 /*                                                
254  * Starting from EL2 or EL1, configure the CPU    
255  * reachable EL supported by the kernel in a c    
256  * from EL2 to EL1, configure EL2 before confi    
257  *                                                
258  * Since we cannot always rely on ERET synchro    
259  * SCTLR_ELx.EOS is clear), we place an ISB pr    
260  *                                                
261  * Returns either BOOT_CPU_MODE_EL1 or BOOT_CP    
262  * booted in EL1 or EL2 respectively, with the    
263  * potential context flags. These flags are *n    
264  *                                                
265  * x0: whether we are being called from the pr    
266  */                                               
267 SYM_FUNC_START(init_kernel_el)                    
268         mrs     x1, CurrentEL                     
269         cmp     x1, #CurrentEL_EL2                
270         b.eq    init_el2                          
271                                                   
272 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)            
273         mov_q   x0, INIT_SCTLR_EL1_MMU_OFF        
274         pre_disable_mmu_workaround                
275         msr     sctlr_el1, x0                     
276         isb                                       
277         mov_q   x0, INIT_PSTATE_EL1               
278         msr     spsr_el1, x0                      
279         msr     elr_el1, lr                       
280         mov     w0, #BOOT_CPU_MODE_EL1            
281         eret                                      
282                                                   
283 SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)            
284         msr     elr_el2, lr                       
285                                                   
286         // clean all HYP code to the PoC if we    
287         cbz     x0, 0f                            
288         adrp    x0, __hyp_idmap_text_start        
289         adr_l   x1, __hyp_text_end                
290         adr_l   x2, dcache_clean_poc              
291         blr     x2                                
292                                                   
293         mov_q   x0, INIT_SCTLR_EL2_MMU_OFF        
294         pre_disable_mmu_workaround                
295         msr     sctlr_el2, x0                     
296         isb                                       
297 0:                                                
298         mov_q   x0, HCR_HOST_NVHE_FLAGS           
299                                                   
300         /*                                        
301          * Compliant CPUs advertise their VHE-    
302          * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.    
303          * RES1 in that case. Publish the E2H     
304          * it can be picked up by the init_el2    
305          *                                        
306          * Fruity CPUs seem to have HCR_EL2.E2    
307          * don't advertise it (they predate th    
308          */                                       
309         mrs_s   x1, SYS_ID_AA64MMFR4_EL1          
310         tbz     x1, #(ID_AA64MMFR4_EL1_E2H0_SH    
311                                                   
312         orr     x0, x0, #HCR_E2H                  
313 1:                                                
314         msr     hcr_el2, x0                       
315         isb                                       
316                                                   
317         init_el2_state                            
318                                                   
319         /* Hypervisor stub */                     
320         adr_l   x0, __hyp_stub_vectors            
321         msr     vbar_el2, x0                      
322         isb                                       
323                                                   
324         mov_q   x1, INIT_SCTLR_EL1_MMU_OFF        
325                                                   
326         mrs     x0, hcr_el2                       
327         and     x0, x0, #HCR_E2H                  
328         cbz     x0, 2f                            
329                                                   
330         /* Set a sane SCTLR_EL1, the VHE way *    
331         msr_s   SYS_SCTLR_EL12, x1                
332         mov     x2, #BOOT_CPU_FLAG_E2H            
333         b       3f                                
334                                                   
335 2:                                                
336         msr     sctlr_el1, x1                     
337         mov     x2, xzr                           
338 3:                                                
339         __init_el2_nvhe_prepare_eret              
340                                                   
341         mov     w0, #BOOT_CPU_MODE_EL2            
342         orr     x0, x0, x2                        
343         eret                                      
344 SYM_FUNC_END(init_kernel_el)                      
345                                                   
346         /*                                        
347          * This provides a "holding pen" for p    
348          * cores are held until we're ready fo    
349          */                                       
350 SYM_FUNC_START(secondary_holding_pen)             
351         mov     x0, xzr                           
352         bl      init_kernel_el                    
353         mrs     x2, mpidr_el1                     
354         mov_q   x1, MPIDR_HWID_BITMASK            
355         and     x2, x2, x1                        
356         adr_l   x3, secondary_holding_pen_rele    
357 pen:    ldr     x4, [x3]                          
358         cmp     x4, x2                            
359         b.eq    secondary_startup                 
360         wfe                                       
361         b       pen                               
362 SYM_FUNC_END(secondary_holding_pen)               
363                                                   
364         /*                                        
365          * Secondary entry point that jumps st    
366          * be used where CPUs are brought onli    
367          */                                       
368 SYM_FUNC_START(secondary_entry)                   
369         mov     x0, xzr                           
370         bl      init_kernel_el                    
371         b       secondary_startup                 
372 SYM_FUNC_END(secondary_entry)                     
373                                                   
374 SYM_FUNC_START_LOCAL(secondary_startup)           
375         /*                                        
376          * Common entry point for secondary CP    
377          */                                       
378         mov     x20, x0                           
379                                                   
380 #ifdef CONFIG_ARM64_VA_BITS_52                    
381 alternative_if ARM64_HAS_VA52                     
382         bl      __cpu_secondary_check52bitva      
383 alternative_else_nop_endif                        
384 #endif                                            
385                                                   
386         bl      __cpu_setup                       
387         adrp    x1, swapper_pg_dir                
388         adrp    x2, idmap_pg_dir                  
389         bl      __enable_mmu                      
390         ldr     x8, =__secondary_switched         
391         br      x8                                
392 SYM_FUNC_END(secondary_startup)                   
393                                                   
394         .text                                     
395 SYM_FUNC_START_LOCAL(__secondary_switched)        
396         mov     x0, x20                           
397         bl      set_cpu_boot_mode_flag            
398                                                   
399         mov     x0, x20                           
400         bl      finalise_el2                      
401                                                   
402         str_l   xzr, __early_cpu_boot_status,     
403         adr_l   x5, vectors                       
404         msr     vbar_el1, x5                      
405         isb                                       
406                                                   
407         adr_l   x0, secondary_data                
408         ldr     x2, [x0, #CPU_BOOT_TASK]          
409         cbz     x2, __secondary_too_slow          
410                                                   
411         init_cpu_task x2, x1, x3                  
412                                                   
413 #ifdef CONFIG_ARM64_PTR_AUTH                      
414         ptrauth_keys_init_cpu x2, x3, x4, x5      
415 #endif                                            
416                                                   
417         bl      secondary_start_kernel            
418         ASM_BUG()                                 
419 SYM_FUNC_END(__secondary_switched)                
420                                                   
421 SYM_FUNC_START_LOCAL(__secondary_too_slow)        
422         wfe                                       
423         wfi                                       
424         b       __secondary_too_slow              
425 SYM_FUNC_END(__secondary_too_slow)                
426                                                   
427 /*                                                
428  * Sets the __boot_cpu_mode flag depending on     
429  * in w0. See arch/arm64/include/asm/virt.h fo    
430  */                                               
431 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)      
432         adr_l   x1, __boot_cpu_mode               
433         cmp     w0, #BOOT_CPU_MODE_EL2            
434         b.ne    1f                                
435         add     x1, x1, #4                        
436 1:      str     w0, [x1]                          
437         ret                                       
438 SYM_FUNC_END(set_cpu_boot_mode_flag)              
439                                                   
440 /*                                                
441  * The booting CPU updates the failed status @    
442  * with MMU turned off.                           
443  *                                                
444  * update_early_cpu_boot_status tmp, status       
445  *  - Corrupts tmp1, tmp2                         
446  *  - Writes 'status' to __early_cpu_boot_stat    
447  *    it is committed to memory.                  
448  */                                               
449                                                   
450         .macro  update_early_cpu_boot_status s    
451         mov     \tmp2, #\status                   
452         adr_l   \tmp1, __early_cpu_boot_status    
453         str     \tmp2, [\tmp1]                    
454         dmb     sy                                
455         dc      ivac, \tmp1                       
456         .endm                                     
457                                                   
458 /*                                                
459  * Enable the MMU.                                
460  *                                                
461  *  x0  = SCTLR_EL1 value for turning on the M    
462  *  x1  = TTBR1_EL1 value                         
463  *  x2  = ID map root table address               
464  *                                                
465  * Returns to the caller via x30/lr. This requ    
466  * by the .idmap.text section.                    
467  *                                                
468  * Checks if the selected granule size is supp    
469  * If it isn't, park the CPU                      
470  */                                               
471         .section ".idmap.text","a"                
472 SYM_FUNC_START(__enable_mmu)                      
473         mrs     x3, ID_AA64MMFR0_EL1              
474         ubfx    x3, x3, #ID_AA64MMFR0_EL1_TGRA    
475         cmp     x3, #ID_AA64MMFR0_EL1_TGRAN_SU    
476         b.lt    __no_granule_support              
477         cmp     x3, #ID_AA64MMFR0_EL1_TGRAN_SU    
478         b.gt    __no_granule_support              
479         phys_to_ttbr x2, x2                       
480         msr     ttbr0_el1, x2                     
481         load_ttbr1 x1, x1, x3                     
482                                                   
483         set_sctlr_el1   x0                        
484                                                   
485         ret                                       
486 SYM_FUNC_END(__enable_mmu)                        
487                                                   
488 #ifdef CONFIG_ARM64_VA_BITS_52                    
489 SYM_FUNC_START(__cpu_secondary_check52bitva)      
490 #ifndef CONFIG_ARM64_LPA2                         
491         mrs_s   x0, SYS_ID_AA64MMFR2_EL1          
492         and     x0, x0, ID_AA64MMFR2_EL1_VARan    
493         cbnz    x0, 2f                            
494 #else                                             
495         mrs     x0, id_aa64mmfr0_el1              
496         sbfx    x0, x0, #ID_AA64MMFR0_EL1_TGRA    
497         cmp     x0, #ID_AA64MMFR0_EL1_TGRAN_LP    
498         b.ge    2f                                
499 #endif                                            
500                                                   
501         update_early_cpu_boot_status \            
502                 CPU_STUCK_IN_KERNEL | CPU_STUC    
503 1:      wfe                                       
504         wfi                                       
505         b       1b                                
506                                                   
507 2:      ret                                       
508 SYM_FUNC_END(__cpu_secondary_check52bitva)        
509 #endif                                            
510                                                   
511 SYM_FUNC_START_LOCAL(__no_granule_support)        
512         /* Indicate that this CPU can't boot a    
513         update_early_cpu_boot_status \            
514                 CPU_STUCK_IN_KERNEL | CPU_STUC    
515 1:                                                
516         wfe                                       
517         wfi                                       
518         b       1b                                
519 SYM_FUNC_END(__no_granule_support)                
520                                                   
521 SYM_FUNC_START_LOCAL(__primary_switch)            
522         adrp    x1, reserved_pg_dir               
523         adrp    x2, init_idmap_pg_dir             
524         bl      __enable_mmu                      
525                                                   
526         adrp    x1, early_init_stack              
527         mov     sp, x1                            
528         mov     x29, xzr                          
529         mov     x0, x20                           
530         mov     x1, x21                           
531         bl      __pi_early_map_kernel             
532                                                   
533         ldr     x8, =__primary_switched           
534         adrp    x0, KERNEL_START                  
535         br      x8                                
536 SYM_FUNC_END(__primary_switch)                    
                                                      

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php