1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Low-level CPU initialisation !! 2 * arch/alpha/kernel/head.S 4 * Based on arch/arm/kernel/head.S << 5 * 3 * 6 * Copyright (C) 1994-2002 Russell King !! 4 * initial boot stuff.. At this point, the bootloader has already 7 * Copyright (C) 2003-2012 ARM Ltd. !! 5 * switched into OSF/1 PAL-code, and loaded us at the correct address 8 * Authors: Catalin Marinas <catalin.marina !! 6 * (START_ADDR). So there isn't much left for us to do: just set up 9 * Will Deacon <will.deacon@arm.co !! 7 * the kernel global pointer and jump to the kernel entry-point. 10 */ 8 */ 11 9 12 #include <linux/linkage.h> << 13 #include <linux/init.h> 10 #include <linux/init.h> 14 #include <linux/pgtable.h> !! 11 #include <asm/system.h> 15 << 16 #include <asm/asm_pointer_auth.h> << 17 #include <asm/assembler.h> << 18 #include <asm/boot.h> << 19 #include <asm/bug.h> << 20 #include <asm/ptrace.h> << 21 #include <asm/asm-offsets.h> 12 #include <asm/asm-offsets.h> 22 #include <asm/cache.h> << 23 #include <asm/cputype.h> << 24 #include <asm/el2_setup.h> << 25 #include <asm/elf.h> << 26 #include <asm/image.h> << 27 #include <asm/kernel-pgtable.h> << 28 #include <asm/kvm_arm.h> << 29 #include <asm/memory.h> << 30 #include <asm/pgtable-hwdef.h> << 31 #include <asm/page.h> << 32 #include <asm/scs.h> << 33 #include <asm/smp.h> << 34 #include <asm/sysreg.h> << 35 #include <asm/thread_info.h> << 36 #include <asm/virt.h> << 37 << 38 #include "efi-header.S" << 39 << 40 #if (PAGE_OFFSET & 0x1fffff) != 0 << 41 #error PAGE_OFFSET must be at least 2MB aligne << 42 #endif << 43 << 44 /* << 45 * Kernel startup entry point. << 46 * --------------------------- << 47 * << 48 * The requirements are: << 49 * MMU = off, D-cache = off, I-cache = on or << 50 * x0 = physical address to the FDT blob. << 51 * << 52 * Note that the callee-saved registers are us << 53 * that are useful before the MMU is enabled. << 54 * in the entry routines. << 55 */ << 56 __HEAD << 57 /* << 58 * DO NOT MODIFY. Image header expecte << 59 */ << 60 efi_signature_nop << 61 b primary_entry << 62 .quad 0 << 63 le64sym _kernel_size_le << 64 le64sym _kernel_flags_le << 65 .quad 0 << 66 .quad 0 << 67 .quad 0 << 68 .ascii ARM64_IMAGE_MAGIC << 69 .long .Lpe_header_offset << 70 << 71 __EFI_PE_HEADER << 72 << 73 .section ".idmap.text","a" << 74 << 75 /* << 76 * The following callee saved general << 77 * primary lowlevel boot path: << 78 * << 79 * Register Scope << 80 * x19 primary_entry() .. star << 81 * x20 primary_entry() .. __pr << 82 * x21 primary_entry() .. star << 83 */ << 84 SYM_CODE_START(primary_entry) << 85 bl record_mmu_state << 86 bl preserve_boot_args << 87 << 88 adrp x1, early_init_stack << 89 mov sp, x1 << 90 mov x29, xzr << 91 adrp x0, init_idmap_pg_dir << 92 mov x1, xzr << 93 bl __pi_create_init_idmap << 94 << 95 /* << 96 * If the page tables have been popula << 97 * accesses (MMU disabled), invalidate << 98 * remove any speculatively loaded cac << 99 */ << 100 cbnz x19, 0f << 101 dmb sy << 102 mov x1, x0 << 103 adrp x0, init_idmap_pg_dir << 104 adr_l x2, dcache_inval_poc << 105 blr x2 << 106 b 1f << 107 << 108 /* << 109 * If we entered with the MMU and cach << 110 * of the primary boot code to the PoC << 111 * the MMU off. << 112 */ << 113 0: adrp x0, __idmap_text_start << 114 adr_l x1, __idmap_text_end << 115 adr_l x2, dcache_clean_poc << 116 blr x2 << 117 << 118 1: mov x0, x19 << 119 bl init_kernel_el << 120 mov x20, x0 << 121 << 122 /* << 123 * The following calls CPU setup code, << 124 * details. << 125 * On return, the CPU will be ready fo << 126 * the TCR will have been set. << 127 */ << 128 bl __cpu_setup << 129 b __primary_switch << 130 SYM_CODE_END(primary_entry) << 131 << 132 __INIT << 133 SYM_CODE_START_LOCAL(record_mmu_state) << 134 mrs x19, CurrentEL << 135 cmp x19, #CurrentEL_EL2 << 136 mrs x19, sctlr_el1 << 137 b.ne 0f << 138 mrs x19, sctlr_el2 << 139 0: << 140 CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f << 141 CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f << 142 tst x19, #SCTLR_ELx_C << 143 and x19, x19, #SCTLR_ELx_M << 144 csel x19, xzr, x19, eq << 145 ret << 146 << 147 /* << 148 * Set the correct endianness early so << 149 * before init_kernel_el() occur in th << 150 * this means the MMU must be disabled << 151 * up getting interpreted with the wro << 152 */ << 153 1: eor x19, x19, #SCTLR_ELx_EE << 154 bic x19, x19, #SCTLR_ELx_M << 155 b.ne 2f << 156 pre_disable_mmu_workaround << 157 msr sctlr_el2, x19 << 158 b 3f << 159 2: pre_disable_mmu_workaround << 160 msr sctlr_el1, x19 << 161 3: isb << 162 mov x19, xzr << 163 ret << 164 SYM_CODE_END(record_mmu_state) << 165 << 166 /* << 167 * Preserve the arguments passed by the bootlo << 168 */ << 169 SYM_CODE_START_LOCAL(preserve_boot_args) << 170 mov x21, x0 << 171 << 172 adr_l x0, boot_args << 173 stp x21, x1, [x0] << 174 stp x2, x3, [x0, #16] << 175 << 176 cbnz x19, 0f << 177 dmb sy << 178 << 179 << 180 add x1, x0, #0x20 << 181 b dcache_inval_poc << 182 0: str_l x19, mmu_enabled_at_boot, x0 << 183 ret << 184 SYM_CODE_END(preserve_boot_args) << 185 << 186 /* << 187 * Initialize CPU registers with task- << 188 * << 189 * Create a final frame record at task << 190 * that the unwinder can identify the << 191 * its location in the task stack. We << 192 * for consistency with user tasks and << 193 */ << 194 .macro init_cpu_task tsk, tmp1, tmp2 << 195 msr sp_el0, \tsk << 196 << 197 ldr \tmp1, [\tsk, #TSK_STACK] << 198 add sp, \tmp1, #THREAD_SIZE << 199 sub sp, sp, #PT_REGS_SIZE << 200 << 201 stp xzr, xzr, [sp, #S_STACKFRAME] << 202 add x29, sp, #S_STACKFRAME << 203 << 204 scs_load_current << 205 << 206 adr_l \tmp1, __per_cpu_offset << 207 ldr w\tmp2, [\tsk, #TSK_TI_CPU] << 208 ldr \tmp1, [\tmp1, \tmp2, lsl #3] << 209 set_this_cpu_offset \tmp1 << 210 .endm << 211 << 212 /* << 213 * The following fragment of code is executed << 214 * << 215 * x0 = __pa(KERNEL_START) << 216 */ << 217 SYM_FUNC_START_LOCAL(__primary_switched) << 218 adr_l x4, init_task << 219 init_cpu_task x4, x5, x6 << 220 << 221 adr_l x8, vectors << 222 msr vbar_el1, x8 << 223 isb << 224 << 225 stp x29, x30, [sp, #-16]! << 226 mov x29, sp << 227 << 228 str_l x21, __fdt_pointer, x5 << 229 << 230 adrp x4, _text << 231 sub x4, x4, x0 << 232 str_l x4, kimage_voffset, x5 << 233 << 234 mov x0, x20 << 235 bl set_cpu_boot_mode_flag << 236 << 237 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 238 bl kasan_early_init << 239 #endif << 240 mov x0, x20 << 241 bl finalise_el2 << 242 ldp x29, x30, [sp], #16 << 243 bl start_kernel << 244 ASM_BUG() << 245 SYM_FUNC_END(__primary_switched) << 246 << 247 /* << 248 * end early head section, begin head code tha << 249 * hotplug and needs to have the same protecti << 250 */ << 251 .section ".idmap.text","a" << 252 << 253 /* << 254 * Starting from EL2 or EL1, configure the CPU << 255 * reachable EL supported by the kernel in a c << 256 * from EL2 to EL1, configure EL2 before confi << 257 * << 258 * Since we cannot always rely on ERET synchro << 259 * SCTLR_ELx.EOS is clear), we place an ISB pr << 260 * << 261 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CP << 262 * booted in EL1 or EL2 respectively, with the << 263 * potential context flags. These flags are *n << 264 * << 265 * x0: whether we are being called from the pr << 266 */ << 267 SYM_FUNC_START(init_kernel_el) << 268 mrs x1, CurrentEL << 269 cmp x1, #CurrentEL_EL2 << 270 b.eq init_el2 << 271 << 272 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) << 273 mov_q x0, INIT_SCTLR_EL1_MMU_OFF << 274 pre_disable_mmu_workaround << 275 msr sctlr_el1, x0 << 276 isb << 277 mov_q x0, INIT_PSTATE_EL1 << 278 msr spsr_el1, x0 << 279 msr elr_el1, lr << 280 mov w0, #BOOT_CPU_MODE_EL1 << 281 eret << 282 << 283 SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) << 284 msr elr_el2, lr << 285 << 286 // clean all HYP code to the PoC if we << 287 cbz x0, 0f << 288 adrp x0, __hyp_idmap_text_start << 289 adr_l x1, __hyp_text_end << 290 adr_l x2, dcache_clean_poc << 291 blr x2 << 292 << 293 mov_q x0, INIT_SCTLR_EL2_MMU_OFF << 294 pre_disable_mmu_workaround << 295 msr sctlr_el2, x0 << 296 isb << 297 0: << 298 mov_q x0, HCR_HOST_NVHE_FLAGS << 299 << 300 /* << 301 * Compliant CPUs advertise their VHE- << 302 * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2. << 303 * RES1 in that case. Publish the E2H << 304 * it can be picked up by the init_el2 << 305 * << 306 * Fruity CPUs seem to have HCR_EL2.E2 << 307 * don't advertise it (they predate th << 308 */ << 309 mrs_s x1, SYS_ID_AA64MMFR4_EL1 << 310 tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SH << 311 << 312 orr x0, x0, #HCR_E2H << 313 1: << 314 msr hcr_el2, x0 << 315 isb << 316 << 317 init_el2_state << 318 << 319 /* Hypervisor stub */ << 320 adr_l x0, __hyp_stub_vectors << 321 msr vbar_el2, x0 << 322 isb << 323 << 324 mov_q x1, INIT_SCTLR_EL1_MMU_OFF << 325 << 326 mrs x0, hcr_el2 << 327 and x0, x0, #HCR_E2H << 328 cbz x0, 2f << 329 << 330 /* Set a sane SCTLR_EL1, the VHE way * << 331 msr_s SYS_SCTLR_EL12, x1 << 332 mov x2, #BOOT_CPU_FLAG_E2H << 333 b 3f << 334 << 335 2: << 336 msr sctlr_el1, x1 << 337 mov x2, xzr << 338 3: << 339 __init_el2_nvhe_prepare_eret << 340 << 341 mov w0, #BOOT_CPU_MODE_EL2 << 342 orr x0, x0, x2 << 343 eret << 344 SYM_FUNC_END(init_kernel_el) << 345 << 346 /* << 347 * This provides a "holding pen" for p << 348 * cores are held until we're ready fo << 349 */ << 350 SYM_FUNC_START(secondary_holding_pen) << 351 mov x0, xzr << 352 bl init_kernel_el << 353 mrs x2, mpidr_el1 << 354 mov_q x1, MPIDR_HWID_BITMASK << 355 and x2, x2, x1 << 356 adr_l x3, secondary_holding_pen_rele << 357 pen: ldr x4, [x3] << 358 cmp x4, x2 << 359 b.eq secondary_startup << 360 wfe << 361 b pen << 362 SYM_FUNC_END(secondary_holding_pen) << 363 << 364 /* << 365 * Secondary entry point that jumps st << 366 * be used where CPUs are brought onli << 367 */ << 368 SYM_FUNC_START(secondary_entry) << 369 mov x0, xzr << 370 bl init_kernel_el << 371 b secondary_startup << 372 SYM_FUNC_END(secondary_entry) << 373 << 374 SYM_FUNC_START_LOCAL(secondary_startup) << 375 /* << 376 * Common entry point for secondary CP << 377 */ << 378 mov x20, x0 << 379 << 380 #ifdef CONFIG_ARM64_VA_BITS_52 << 381 alternative_if ARM64_HAS_VA52 << 382 bl __cpu_secondary_check52bitva << 383 alternative_else_nop_endif << 384 #endif << 385 << 386 bl __cpu_setup << 387 adrp x1, swapper_pg_dir << 388 adrp x2, idmap_pg_dir << 389 bl __enable_mmu << 390 ldr x8, =__secondary_switched << 391 br x8 << 392 SYM_FUNC_END(secondary_startup) << 393 << 394 .text << 395 SYM_FUNC_START_LOCAL(__secondary_switched) << 396 mov x0, x20 << 397 bl set_cpu_boot_mode_flag << 398 << 399 mov x0, x20 << 400 bl finalise_el2 << 401 << 402 str_l xzr, __early_cpu_boot_status, << 403 adr_l x5, vectors << 404 msr vbar_el1, x5 << 405 isb << 406 << 407 adr_l x0, secondary_data << 408 ldr x2, [x0, #CPU_BOOT_TASK] << 409 cbz x2, __secondary_too_slow << 410 << 411 init_cpu_task x2, x1, x3 << 412 << 413 #ifdef CONFIG_ARM64_PTR_AUTH << 414 ptrauth_keys_init_cpu x2, x3, x4, x5 << 415 #endif << 416 << 417 bl secondary_start_kernel << 418 ASM_BUG() << 419 SYM_FUNC_END(__secondary_switched) << 420 << 421 SYM_FUNC_START_LOCAL(__secondary_too_slow) << 422 wfe << 423 wfi << 424 b __secondary_too_slow << 425 SYM_FUNC_END(__secondary_too_slow) << 426 << 427 /* << 428 * Sets the __boot_cpu_mode flag depending on << 429 * in w0. See arch/arm64/include/asm/virt.h fo << 430 */ << 431 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) << 432 adr_l x1, __boot_cpu_mode << 433 cmp w0, #BOOT_CPU_MODE_EL2 << 434 b.ne 1f << 435 add x1, x1, #4 << 436 1: str w0, [x1] << 437 ret << 438 SYM_FUNC_END(set_cpu_boot_mode_flag) << 439 << 440 /* << 441 * The booting CPU updates the failed status @ << 442 * with MMU turned off. << 443 * << 444 * update_early_cpu_boot_status tmp, status << 445 * - Corrupts tmp1, tmp2 << 446 * - Writes 'status' to __early_cpu_boot_stat << 447 * it is committed to memory. << 448 */ << 449 13 450 .macro update_early_cpu_boot_status s !! 14 __HEAD 451 mov \tmp2, #\status !! 15 .globl swapper_pg_dir 452 adr_l \tmp1, __early_cpu_boot_status !! 16 .globl _stext 453 str \tmp2, [\tmp1] !! 17 swapper_pg_dir=SWAPPER_PGD 454 dmb sy !! 18 455 dc ivac, \tmp1 !! 19 .set noreorder 456 .endm !! 20 .globl __start 457 !! 21 .ent __start 458 /* !! 22 _stext: 459 * Enable the MMU. !! 23 __start: 460 * !! 24 .prologue 0 461 * x0 = SCTLR_EL1 value for turning on the M !! 25 br $27,1f 462 * x1 = TTBR1_EL1 value !! 26 1: ldgp $29,0($27) 463 * x2 = ID map root table address !! 27 /* We need to get current_task_info loaded up... */ 464 * !! 28 lda $8,init_thread_union 465 * Returns to the caller via x30/lr. This requ !! 29 /* ... and find our stack ... */ 466 * by the .idmap.text section. !! 30 lda $30,0x4000 - SIZEOF_PT_REGS($8) 467 * !! 31 /* ... and then we can start the kernel. */ 468 * Checks if the selected granule size is supp !! 32 jsr $26,start_kernel 469 * If it isn't, park the CPU !! 33 call_pal PAL_halt 470 */ !! 34 .end __start 471 .section ".idmap.text","a" !! 35 472 SYM_FUNC_START(__enable_mmu) !! 36 #ifdef CONFIG_SMP 473 mrs x3, ID_AA64MMFR0_EL1 !! 37 .align 3 474 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRA !! 38 .globl __smp_callin 475 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU !! 39 .ent __smp_callin 476 b.lt __no_granule_support !! 40 /* On entry here from SRM console, the HWPCB of the per-cpu 477 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU !! 41 slot for this processor has been loaded. We've arranged 478 b.gt __no_granule_support !! 42 for the UNIQUE value for this process to contain the PCBB 479 phys_to_ttbr x2, x2 !! 43 of the target idle task. */ 480 msr ttbr0_el1, x2 !! 44 __smp_callin: 481 load_ttbr1 x1, x1, x3 !! 45 .prologue 1 482 !! 46 ldgp $29,0($27) # First order of business, load the GP. 483 set_sctlr_el1 x0 !! 47 484 !! 48 call_pal PAL_rduniq # Grab the target PCBB. 485 ret !! 49 mov $0,$16 # Install it. 486 SYM_FUNC_END(__enable_mmu) !! 50 call_pal PAL_swpctx 487 !! 51 488 #ifdef CONFIG_ARM64_VA_BITS_52 !! 52 lda $8,0x3fff # Find "current". 489 SYM_FUNC_START(__cpu_secondary_check52bitva) !! 53 bic $30,$8,$8 490 #ifndef CONFIG_ARM64_LPA2 !! 54 491 mrs_s x0, SYS_ID_AA64MMFR2_EL1 !! 55 jsr $26,smp_callin 492 and x0, x0, ID_AA64MMFR2_EL1_VARan !! 56 call_pal PAL_halt 493 cbnz x0, 2f !! 57 .end __smp_callin 494 #else !! 58 #endif /* CONFIG_SMP */ 495 mrs x0, id_aa64mmfr0_el1 !! 59 496 sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRA !! 60 # 497 cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LP !! 61 # The following two functions are needed for supporting SRM PALcode 498 b.ge 2f !! 62 # on the PC164 (at least), since that PALcode manages the interrupt 499 #endif !! 63 # masking, and we cannot duplicate the effort without causing problems 500 !! 64 # 501 update_early_cpu_boot_status \ !! 65 502 CPU_STUCK_IN_KERNEL | CPU_STUC !! 66 .align 3 503 1: wfe !! 67 .globl cserve_ena 504 wfi !! 68 .ent cserve_ena 505 b 1b !! 69 cserve_ena: 506 !! 70 .prologue 0 507 2: ret !! 71 bis $16,$16,$17 508 SYM_FUNC_END(__cpu_secondary_check52bitva) !! 72 lda $16,52($31) 509 #endif !! 73 call_pal PAL_cserve 510 !! 74 ret ($26) 511 SYM_FUNC_START_LOCAL(__no_granule_support) !! 75 .end cserve_ena 512 /* Indicate that this CPU can't boot a !! 76 513 update_early_cpu_boot_status \ !! 77 .align 3 514 CPU_STUCK_IN_KERNEL | CPU_STUC !! 78 .globl cserve_dis 515 1: !! 79 .ent cserve_dis 516 wfe !! 80 cserve_dis: 517 wfi !! 81 .prologue 0 518 b 1b !! 82 bis $16,$16,$17 519 SYM_FUNC_END(__no_granule_support) !! 83 lda $16,53($31) 520 !! 84 call_pal PAL_cserve 521 SYM_FUNC_START_LOCAL(__primary_switch) !! 85 ret ($26) 522 adrp x1, reserved_pg_dir !! 86 .end cserve_dis 523 adrp x2, init_idmap_pg_dir !! 87 524 bl __enable_mmu !! 88 # 525 !! 89 # It is handy, on occasion, to make halt actually just loop. 526 adrp x1, early_init_stack !! 90 # Putting it here means we dont have to recompile the whole 527 mov sp, x1 !! 91 # kernel. 528 mov x29, xzr !! 92 # 529 mov x0, x20 !! 93 530 mov x1, x21 !! 94 .align 3 531 bl __pi_early_map_kernel !! 95 .globl halt 532 !! 96 .ent halt 533 ldr x8, =__primary_switched !! 97 halt: 534 adrp x0, KERNEL_START !! 98 .prologue 0 535 br x8 !! 99 call_pal PAL_halt 536 SYM_FUNC_END(__primary_switch) !! 100 .end halt
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.