1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Low-level CPU initialisation !! 2 * This file is subject to the terms and conditions of the GNU General Public 4 * Based on arch/arm/kernel/head.S !! 3 * License. See the file "COPYING" in the main directory of this archive 5 * !! 4 * for more details. 6 * Copyright (C) 1994-2002 Russell King !! 5 * 7 * Copyright (C) 2003-2012 ARM Ltd. !! 6 * Copyright (C) 1994, 1995 Waldorf Electronics 8 * Authors: Catalin Marinas <catalin.marina !! 7 * Written by Ralf Baechle and Andreas Busse 9 * Will Deacon <will.deacon@arm.co !! 8 * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle >> 9 * Copyright (C) 1996 Paul M. Antoine >> 10 * Modified for DECStation and hence R3000 support by Paul M. Antoine >> 11 * Further modifications by David S. Miller and Harald Koerfgen >> 12 * Copyright (C) 1999 Silicon Graphics, Inc. >> 13 * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com >> 14 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 10 */ 15 */ 11 << 12 #include <linux/linkage.h> << 13 #include <linux/init.h> 16 #include <linux/init.h> 14 #include <linux/pgtable.h> !! 17 #include <linux/threads.h> 15 << 16 #include <asm/asm_pointer_auth.h> << 17 #include <asm/assembler.h> << 18 #include <asm/boot.h> << 19 #include <asm/bug.h> << 20 #include <asm/ptrace.h> << 21 #include <asm/asm-offsets.h> << 22 #include <asm/cache.h> << 23 #include <asm/cputype.h> << 24 #include <asm/el2_setup.h> << 25 #include <asm/elf.h> << 26 #include <asm/image.h> << 27 #include <asm/kernel-pgtable.h> << 28 #include <asm/kvm_arm.h> << 29 #include <asm/memory.h> << 30 #include <asm/pgtable-hwdef.h> << 31 #include <asm/page.h> << 32 #include <asm/scs.h> << 33 #include <asm/smp.h> << 34 #include <asm/sysreg.h> << 35 #include <asm/thread_info.h> << 36 #include <asm/virt.h> << 37 18 38 #include "efi-header.S" !! 19 #include <asm/addrspace.h> >> 20 #include <asm/asm.h> >> 21 #include <asm/asmmacro.h> >> 22 #include <asm/irqflags.h> >> 23 #include <asm/regdef.h> >> 24 #include <asm/mipsregs.h> >> 25 #include <asm/stackframe.h> >> 26 >> 27 #include <kernel-entry-init.h> >> 28 >> 29 /* >> 30 * For the moment disable interrupts, mark the kernel mode and >> 31 * set ST0_KX so that the CPU does not spit fire when using >> 32 * 64-bit addresses. A full initialization of the CPU's status >> 33 * register is done later in per_cpu_trap_init(). >> 34 */ >> 35 .macro setup_c0_status set clr >> 36 .set push >> 37 mfc0 t0, CP0_STATUS >> 38 or t0, ST0_KERNEL_CUMASK|\set|0x1f|\clr >> 39 xor t0, 0x1f|\clr >> 40 mtc0 t0, CP0_STATUS >> 41 .set noreorder >> 42 sll zero,3 # ehb >> 43 .set pop >> 44 .endm 39 45 40 #if (PAGE_OFFSET & 0x1fffff) != 0 !! 46 .macro setup_c0_status_pri 41 #error PAGE_OFFSET must be at least 2MB aligne !! 47 #ifdef CONFIG_64BIT >> 48 setup_c0_status ST0_KX 0 >> 49 #else >> 50 setup_c0_status 0 0 42 #endif 51 #endif 43 << 44 /* << 45 * Kernel startup entry point. << 46 * --------------------------- << 47 * << 48 * The requirements are: << 49 * MMU = off, D-cache = off, I-cache = on or << 50 * x0 = physical address to the FDT blob. << 51 * << 52 * Note that the callee-saved registers are us << 53 * that are useful before the MMU is enabled. << 54 * in the entry routines. << 55 */ << 56 __HEAD << 57 /* << 58 * DO NOT MODIFY. Image header expecte << 59 */ << 60 efi_signature_nop << 61 b primary_entry << 62 .quad 0 << 63 le64sym _kernel_size_le << 64 le64sym _kernel_flags_le << 65 .quad 0 << 66 .quad 0 << 67 .quad 0 << 68 .ascii ARM64_IMAGE_MAGIC << 69 .long .Lpe_header_offset << 70 << 71 __EFI_PE_HEADER << 72 << 73 .section ".idmap.text","a" << 74 << 75 /* << 76 * The following callee saved general << 77 * primary lowlevel boot path: << 78 * << 79 * Register Scope << 80 * x19 primary_entry() .. star << 81 * x20 primary_entry() .. __pr << 82 * x21 primary_entry() .. star << 83 */ << 84 SYM_CODE_START(primary_entry) << 85 bl record_mmu_state << 86 bl preserve_boot_args << 87 << 88 adrp x1, early_init_stack << 89 mov sp, x1 << 90 mov x29, xzr << 91 adrp x0, init_idmap_pg_dir << 92 mov x1, xzr << 93 bl __pi_create_init_idmap << 94 << 95 /* << 96 * If the page tables have been popula << 97 * accesses (MMU disabled), invalidate << 98 * remove any speculatively loaded cac << 99 */ << 100 cbnz x19, 0f << 101 dmb sy << 102 mov x1, x0 << 103 adrp x0, init_idmap_pg_dir << 104 adr_l x2, dcache_inval_poc << 105 blr x2 << 106 b 1f << 107 << 108 /* << 109 * If we entered with the MMU and cach << 110 * of the primary boot code to the PoC << 111 * the MMU off. << 112 */ << 113 0: adrp x0, __idmap_text_start << 114 adr_l x1, __idmap_text_end << 115 adr_l x2, dcache_clean_poc << 116 blr x2 << 117 << 118 1: mov x0, x19 << 119 bl init_kernel_el << 120 mov x20, x0 << 121 << 122 /* << 123 * The following calls CPU setup code, << 124 * details. << 125 * On return, the CPU will be ready fo << 126 * the TCR will have been set. << 127 */ << 128 bl __cpu_setup << 129 b __primary_switch << 130 SYM_CODE_END(primary_entry) << 131 << 132 __INIT << 133 SYM_CODE_START_LOCAL(record_mmu_state) << 134 mrs x19, CurrentEL << 135 cmp x19, #CurrentEL_EL2 << 136 mrs x19, sctlr_el1 << 137 b.ne 0f << 138 mrs x19, sctlr_el2 << 139 0: << 140 CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f << 141 CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f << 142 tst x19, #SCTLR_ELx_C << 143 and x19, x19, #SCTLR_ELx_M << 144 csel x19, xzr, x19, eq << 145 ret << 146 << 147 /* << 148 * Set the correct endianness early so << 149 * before init_kernel_el() occur in th << 150 * this means the MMU must be disabled << 151 * up getting interpreted with the wro << 152 */ << 153 1: eor x19, x19, #SCTLR_ELx_EE << 154 bic x19, x19, #SCTLR_ELx_M << 155 b.ne 2f << 156 pre_disable_mmu_workaround << 157 msr sctlr_el2, x19 << 158 b 3f << 159 2: pre_disable_mmu_workaround << 160 msr sctlr_el1, x19 << 161 3: isb << 162 mov x19, xzr << 163 ret << 164 SYM_CODE_END(record_mmu_state) << 165 << 166 /* << 167 * Preserve the arguments passed by the bootlo << 168 */ << 169 SYM_CODE_START_LOCAL(preserve_boot_args) << 170 mov x21, x0 << 171 << 172 adr_l x0, boot_args << 173 stp x21, x1, [x0] << 174 stp x2, x3, [x0, #16] << 175 << 176 cbnz x19, 0f << 177 dmb sy << 178 << 179 << 180 add x1, x0, #0x20 << 181 b dcache_inval_poc << 182 0: str_l x19, mmu_enabled_at_boot, x0 << 183 ret << 184 SYM_CODE_END(preserve_boot_args) << 185 << 186 /* << 187 * Initialize CPU registers with task- << 188 * << 189 * Create a final frame record at task << 190 * that the unwinder can identify the << 191 * its location in the task stack. We << 192 * for consistency with user tasks and << 193 */ << 194 .macro init_cpu_task tsk, tmp1, tmp2 << 195 msr sp_el0, \tsk << 196 << 197 ldr \tmp1, [\tsk, #TSK_STACK] << 198 add sp, \tmp1, #THREAD_SIZE << 199 sub sp, sp, #PT_REGS_SIZE << 200 << 201 stp xzr, xzr, [sp, #S_STACKFRAME] << 202 add x29, sp, #S_STACKFRAME << 203 << 204 scs_load_current << 205 << 206 adr_l \tmp1, __per_cpu_offset << 207 ldr w\tmp2, [\tsk, #TSK_TI_CPU] << 208 ldr \tmp1, [\tmp1, \tmp2, lsl #3] << 209 set_this_cpu_offset \tmp1 << 210 .endm 52 .endm 211 53 212 /* !! 54 .macro setup_c0_status_sec 213 * The following fragment of code is executed !! 55 #ifdef CONFIG_64BIT 214 * !! 56 setup_c0_status ST0_KX ST0_BEV 215 * x0 = __pa(KERNEL_START) !! 57 #else 216 */ !! 58 setup_c0_status 0 ST0_BEV 217 SYM_FUNC_START_LOCAL(__primary_switched) << 218 adr_l x4, init_task << 219 init_cpu_task x4, x5, x6 << 220 << 221 adr_l x8, vectors << 222 msr vbar_el1, x8 << 223 isb << 224 << 225 stp x29, x30, [sp, #-16]! << 226 mov x29, sp << 227 << 228 str_l x21, __fdt_pointer, x5 << 229 << 230 adrp x4, _text << 231 sub x4, x4, x0 << 232 str_l x4, kimage_voffset, x5 << 233 << 234 mov x0, x20 << 235 bl set_cpu_boot_mode_flag << 236 << 237 #if defined(CONFIG_KASAN_GENERIC) || defined(C << 238 bl kasan_early_init << 239 #endif 59 #endif 240 mov x0, x20 !! 60 .endm 241 bl finalise_el2 << 242 ldp x29, x30, [sp], #16 << 243 bl start_kernel << 244 ASM_BUG() << 245 SYM_FUNC_END(__primary_switched) << 246 << 247 /* << 248 * end early head section, begin head code tha << 249 * hotplug and needs to have the same protecti << 250 */ << 251 .section ".idmap.text","a" << 252 << 253 /* << 254 * Starting from EL2 or EL1, configure the CPU << 255 * reachable EL supported by the kernel in a c << 256 * from EL2 to EL1, configure EL2 before confi << 257 * << 258 * Since we cannot always rely on ERET synchro << 259 * SCTLR_ELx.EOS is clear), we place an ISB pr << 260 * << 261 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CP << 262 * booted in EL1 or EL2 respectively, with the << 263 * potential context flags. These flags are *n << 264 * << 265 * x0: whether we are being called from the pr << 266 */ << 267 SYM_FUNC_START(init_kernel_el) << 268 mrs x1, CurrentEL << 269 cmp x1, #CurrentEL_EL2 << 270 b.eq init_el2 << 271 << 272 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) << 273 mov_q x0, INIT_SCTLR_EL1_MMU_OFF << 274 pre_disable_mmu_workaround << 275 msr sctlr_el1, x0 << 276 isb << 277 mov_q x0, INIT_PSTATE_EL1 << 278 msr spsr_el1, x0 << 279 msr elr_el1, lr << 280 mov w0, #BOOT_CPU_MODE_EL1 << 281 eret << 282 << 283 SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) << 284 msr elr_el2, lr << 285 << 286 // clean all HYP code to the PoC if we << 287 cbz x0, 0f << 288 adrp x0, __hyp_idmap_text_start << 289 adr_l x1, __hyp_text_end << 290 adr_l x2, dcache_clean_poc << 291 blr x2 << 292 << 293 mov_q x0, INIT_SCTLR_EL2_MMU_OFF << 294 pre_disable_mmu_workaround << 295 msr sctlr_el2, x0 << 296 isb << 297 0: << 298 mov_q x0, HCR_HOST_NVHE_FLAGS << 299 << 300 /* << 301 * Compliant CPUs advertise their VHE- << 302 * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2. << 303 * RES1 in that case. Publish the E2H << 304 * it can be picked up by the init_el2 << 305 * << 306 * Fruity CPUs seem to have HCR_EL2.E2 << 307 * don't advertise it (they predate th << 308 */ << 309 mrs_s x1, SYS_ID_AA64MMFR4_EL1 << 310 tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SH << 311 << 312 orr x0, x0, #HCR_E2H << 313 1: << 314 msr hcr_el2, x0 << 315 isb << 316 << 317 init_el2_state << 318 << 319 /* Hypervisor stub */ << 320 adr_l x0, __hyp_stub_vectors << 321 msr vbar_el2, x0 << 322 isb << 323 << 324 mov_q x1, INIT_SCTLR_EL1_MMU_OFF << 325 << 326 mrs x0, hcr_el2 << 327 and x0, x0, #HCR_E2H << 328 cbz x0, 2f << 329 << 330 /* Set a sane SCTLR_EL1, the VHE way * << 331 msr_s SYS_SCTLR_EL12, x1 << 332 mov x2, #BOOT_CPU_FLAG_E2H << 333 b 3f << 334 << 335 2: << 336 msr sctlr_el1, x1 << 337 mov x2, xzr << 338 3: << 339 __init_el2_nvhe_prepare_eret << 340 << 341 mov w0, #BOOT_CPU_MODE_EL2 << 342 orr x0, x0, x2 << 343 eret << 344 SYM_FUNC_END(init_kernel_el) << 345 61 >> 62 #ifndef CONFIG_NO_EXCEPT_FILL 346 /* 63 /* 347 * This provides a "holding pen" for p !! 64 * Reserved space for exception handlers. 348 * cores are held until we're ready fo !! 65 * Necessary for machines which link their kernels at KSEG0. 349 */ 66 */ 350 SYM_FUNC_START(secondary_holding_pen) !! 67 .fill 0x400 351 mov x0, xzr !! 68 #endif 352 bl init_kernel_el << 353 mrs x2, mpidr_el1 << 354 mov_q x1, MPIDR_HWID_BITMASK << 355 and x2, x2, x1 << 356 adr_l x3, secondary_holding_pen_rele << 357 pen: ldr x4, [x3] << 358 cmp x4, x2 << 359 b.eq secondary_startup << 360 wfe << 361 b pen << 362 SYM_FUNC_END(secondary_holding_pen) << 363 69 364 /* !! 70 EXPORT(_stext) 365 * Secondary entry point that jumps st << 366 * be used where CPUs are brought onli << 367 */ << 368 SYM_FUNC_START(secondary_entry) << 369 mov x0, xzr << 370 bl init_kernel_el << 371 b secondary_startup << 372 SYM_FUNC_END(secondary_entry) << 373 71 374 SYM_FUNC_START_LOCAL(secondary_startup) !! 72 #ifdef CONFIG_BOOT_RAW 375 /* 73 /* 376 * Common entry point for secondary CP !! 74 * Give us a fighting chance of running if execution beings at the >> 75 * kernel load address. This is needed because this platform does >> 76 * not have a ELF loader yet. 377 */ 77 */ 378 mov x20, x0 !! 78 FEXPORT(__kernel_entry) >> 79 j kernel_entry >> 80 #endif /* CONFIG_BOOT_RAW */ 379 81 380 #ifdef CONFIG_ARM64_VA_BITS_52 !! 82 __REF 381 alternative_if ARM64_HAS_VA52 << 382 bl __cpu_secondary_check52bitva << 383 alternative_else_nop_endif << 384 #endif << 385 << 386 bl __cpu_setup << 387 adrp x1, swapper_pg_dir << 388 adrp x2, idmap_pg_dir << 389 bl __enable_mmu << 390 ldr x8, =__secondary_switched << 391 br x8 << 392 SYM_FUNC_END(secondary_startup) << 393 << 394 .text << 395 SYM_FUNC_START_LOCAL(__secondary_switched) << 396 mov x0, x20 << 397 bl set_cpu_boot_mode_flag << 398 << 399 mov x0, x20 << 400 bl finalise_el2 << 401 << 402 str_l xzr, __early_cpu_boot_status, << 403 adr_l x5, vectors << 404 msr vbar_el1, x5 << 405 isb << 406 << 407 adr_l x0, secondary_data << 408 ldr x2, [x0, #CPU_BOOT_TASK] << 409 cbz x2, __secondary_too_slow << 410 << 411 init_cpu_task x2, x1, x3 << 412 << 413 #ifdef CONFIG_ARM64_PTR_AUTH << 414 ptrauth_keys_init_cpu x2, x3, x4, x5 << 415 #endif << 416 << 417 bl secondary_start_kernel << 418 ASM_BUG() << 419 SYM_FUNC_END(__secondary_switched) << 420 << 421 SYM_FUNC_START_LOCAL(__secondary_too_slow) << 422 wfe << 423 wfi << 424 b __secondary_too_slow << 425 SYM_FUNC_END(__secondary_too_slow) << 426 83 427 /* !! 84 NESTED(kernel_entry, 16, sp) # kernel entry point 428 * Sets the __boot_cpu_mode flag depending on << 429 * in w0. See arch/arm64/include/asm/virt.h fo << 430 */ << 431 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) << 432 adr_l x1, __boot_cpu_mode << 433 cmp w0, #BOOT_CPU_MODE_EL2 << 434 b.ne 1f << 435 add x1, x1, #4 << 436 1: str w0, [x1] << 437 ret << 438 SYM_FUNC_END(set_cpu_boot_mode_flag) << 439 85 440 /* !! 86 kernel_entry_setup # cpu specific setup 441 * The booting CPU updates the failed status @ << 442 * with MMU turned off. << 443 * << 444 * update_early_cpu_boot_status tmp, status << 445 * - Corrupts tmp1, tmp2 << 446 * - Writes 'status' to __early_cpu_boot_stat << 447 * it is committed to memory. << 448 */ << 449 87 450 .macro update_early_cpu_boot_status s !! 88 setup_c0_status_pri 451 mov \tmp2, #\status << 452 adr_l \tmp1, __early_cpu_boot_status << 453 str \tmp2, [\tmp1] << 454 dmb sy << 455 dc ivac, \tmp1 << 456 .endm << 457 89 458 /* !! 90 /* We might not get launched at the address the kernel is linked to, 459 * Enable the MMU. !! 91 so we jump there. */ 460 * !! 92 PTR_LA t0, 0f 461 * x0 = SCTLR_EL1 value for turning on the M !! 93 jr t0 462 * x1 = TTBR1_EL1 value !! 94 0: 463 * x2 = ID map root table address << 464 * << 465 * Returns to the caller via x30/lr. This requ << 466 * by the .idmap.text section. << 467 * << 468 * Checks if the selected granule size is supp << 469 * If it isn't, park the CPU << 470 */ << 471 .section ".idmap.text","a" << 472 SYM_FUNC_START(__enable_mmu) << 473 mrs x3, ID_AA64MMFR0_EL1 << 474 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRA << 475 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU << 476 b.lt __no_granule_support << 477 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU << 478 b.gt __no_granule_support << 479 phys_to_ttbr x2, x2 << 480 msr ttbr0_el1, x2 << 481 load_ttbr1 x1, x1, x3 << 482 << 483 set_sctlr_el1 x0 << 484 << 485 ret << 486 SYM_FUNC_END(__enable_mmu) << 487 << 488 #ifdef CONFIG_ARM64_VA_BITS_52 << 489 SYM_FUNC_START(__cpu_secondary_check52bitva) << 490 #ifndef CONFIG_ARM64_LPA2 << 491 mrs_s x0, SYS_ID_AA64MMFR2_EL1 << 492 and x0, x0, ID_AA64MMFR2_EL1_VARan << 493 cbnz x0, 2f << 494 #else << 495 mrs x0, id_aa64mmfr0_el1 << 496 sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRA << 497 cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LP << 498 b.ge 2f << 499 #endif << 500 << 501 update_early_cpu_boot_status \ << 502 CPU_STUCK_IN_KERNEL | CPU_STUC << 503 1: wfe << 504 wfi << 505 b 1b << 506 << 507 2: ret << 508 SYM_FUNC_END(__cpu_secondary_check52bitva) << 509 #endif << 510 95 511 SYM_FUNC_START_LOCAL(__no_granule_support) !! 96 PTR_LA t0, __bss_start # clear .bss 512 /* Indicate that this CPU can't boot a !! 97 LONG_S zero, (t0) 513 update_early_cpu_boot_status \ !! 98 PTR_LA t1, __bss_stop - LONGSIZE 514 CPU_STUCK_IN_KERNEL | CPU_STUC << 515 1: 99 1: 516 wfe !! 100 PTR_ADDIU t0, LONGSIZE 517 wfi !! 101 LONG_S zero, (t0) 518 b 1b !! 102 bne t0, t1, 1b 519 SYM_FUNC_END(__no_granule_support) !! 103 520 !! 104 LONG_S a0, fw_arg0 # firmware arguments 521 SYM_FUNC_START_LOCAL(__primary_switch) !! 105 LONG_S a1, fw_arg1 522 adrp x1, reserved_pg_dir !! 106 LONG_S a2, fw_arg2 523 adrp x2, init_idmap_pg_dir !! 107 LONG_S a3, fw_arg3 524 bl __enable_mmu !! 108 525 !! 109 MTC0 zero, CP0_CONTEXT # clear context register 526 adrp x1, early_init_stack !! 110 #ifdef CONFIG_64BIT 527 mov sp, x1 !! 111 MTC0 zero, CP0_XCONTEXT 528 mov x29, xzr !! 112 #endif 529 mov x0, x20 !! 113 PTR_LA $28, init_thread_union 530 mov x1, x21 !! 114 /* Set the SP after an empty pt_regs. */ 531 bl __pi_early_map_kernel !! 115 PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE 532 !! 116 PTR_ADDU sp, $28 533 ldr x8, =__primary_switched !! 117 back_to_back_c0_hazard 534 adrp x0, KERNEL_START !! 118 set_saved_sp sp, t0, t1 535 br x8 !! 119 PTR_SUBU sp, 4 * SZREG # init stack pointer 536 SYM_FUNC_END(__primary_switch) !! 120 >> 121 #ifdef CONFIG_RELOCATABLE >> 122 /* Copy kernel and apply the relocations */ >> 123 jal relocate_kernel >> 124 >> 125 /* Repoint the sp into the new kernel image */ >> 126 PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE >> 127 PTR_ADDU sp, $28 >> 128 set_saved_sp sp, t0, t1 >> 129 PTR_SUBU sp, 4 * SZREG # init stack pointer >> 130 >> 131 /* >> 132 * relocate_kernel returns the entry point either >> 133 * in the relocated kernel or the original if for >> 134 * some reason relocation failed - jump there now >> 135 * with instruction hazard barrier because of the >> 136 * newly sync'd icache. >> 137 */ >> 138 jr.hb v0 >> 139 #else /* !CONFIG_RELOCATABLE */ >> 140 j start_kernel >> 141 #endif /* !CONFIG_RELOCATABLE */ >> 142 END(kernel_entry) >> 143 >> 144 #ifdef CONFIG_SMP >> 145 /* >> 146 * SMP slave cpus entry point. Board specific code for bootstrap calls this >> 147 * function after setting up the stack and gp registers. >> 148 */ >> 149 NESTED(smp_bootstrap, 16, sp) >> 150 smp_slave_setup >> 151 setup_c0_status_sec >> 152 j start_secondary >> 153 END(smp_bootstrap) >> 154 #endif /* CONFIG_SMP */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.