1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Copyright (C) 2012 Regents of the Universit !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. >> 5 * >> 6 * Copyright (C) 1994, 1995 Waldorf Electronics >> 7 * Written by Ralf Baechle and Andreas Busse >> 8 * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle >> 9 * Copyright (C) 1996 Paul M. Antoine >> 10 * Modified for DECStation and hence R3000 support by Paul M. Antoine >> 11 * Further modifications by David S. Miller and Harald Koerfgen >> 12 * Copyright (C) 1999 Silicon Graphics, Inc. >> 13 * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com >> 14 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 4 */ 15 */ >> 16 #include <linux/init.h> >> 17 #include <linux/threads.h> 5 18 6 #include <asm/asm-offsets.h> !! 19 #include <asm/addrspace.h> 7 #include <asm/asm.h> 20 #include <asm/asm.h> 8 #include <linux/init.h> !! 21 #include <asm/asmmacro.h> 9 #include <linux/linkage.h> !! 22 #include <asm/irqflags.h> 10 #include <asm/thread_info.h> !! 23 #include <asm/regdef.h> 11 #include <asm/page.h> !! 24 #include <asm/mipsregs.h> 12 #include <asm/pgtable.h> !! 25 #include <asm/stackframe.h> 13 #include <asm/csr.h> !! 26 14 #include <asm/hwcap.h> !! 27 #include <kernel-entry-init.h> 15 #include <asm/image.h> << 16 #include <asm/scs.h> << 17 #include <asm/xip_fixup.h> << 18 #include "efi-header.S" << 19 28 20 __HEAD << 21 SYM_CODE_START(_start) << 22 /* << 23 * Image header expected by Linux boot << 24 * structure is described in asm/image << 25 * Do not modify it without modifying << 26 * that expects this header format!! << 27 */ << 28 #ifdef CONFIG_EFI << 29 /* 29 /* 30 * This instruction decodes to "MZ" AS !! 30 * For the moment disable interrupts, mark the kernel mode and >> 31 * set ST0_KX so that the CPU does not spit fire when using >> 32 * 64-bit addresses. A full initialization of the CPU's status >> 33 * register is done later in per_cpu_trap_init(). 31 */ 34 */ 32 c.li s4,-13 !! 35 .macro setup_c0_status set clr 33 j _start_kernel !! 36 .set push 34 #else !! 37 mfc0 t0, CP0_STATUS 35 /* jump to start kernel */ !! 38 or t0, ST0_CU0|\set|0x1f|\clr 36 j _start_kernel !! 39 xor t0, 0x1f|\clr 37 /* reserved */ !! 40 mtc0 t0, CP0_STATUS 38 .word 0 !! 41 .set noreorder 39 #endif !! 42 sll zero,3 # ehb 40 .balign 8 !! 43 .set pop 41 #ifdef CONFIG_RISCV_M_MODE !! 44 .endm 42 /* Image load offset (0MB) from start !! 45 43 .dword 0 !! 46 .macro setup_c0_status_pri 44 #else !! 47 #ifdef CONFIG_64BIT 45 #if __riscv_xlen == 64 !! 48 setup_c0_status ST0_KX 0 46 /* Image load offset(2MB) from start o << 47 .dword 0x200000 << 48 #else 49 #else 49 /* Image load offset(4MB) from start o !! 50 setup_c0_status 0 0 50 .dword 0x400000 << 51 #endif << 52 #endif 51 #endif 53 /* Effective size of kernel image */ !! 52 .endm 54 .dword _end - _start << 55 .dword __HEAD_FLAGS << 56 .word RISCV_HEADER_VERSION << 57 .word 0 << 58 .dword 0 << 59 .ascii RISCV_IMAGE_MAGIC << 60 .balign 4 << 61 .ascii RISCV_IMAGE_MAGIC2 << 62 #ifdef CONFIG_EFI << 63 .word pe_head_start - _start << 64 pe_head_start: << 65 53 66 __EFI_PE_HEADER !! 54 .macro setup_c0_status_sec >> 55 #ifdef CONFIG_64BIT >> 56 setup_c0_status ST0_KX ST0_BEV 67 #else 57 #else 68 .word 0 !! 58 setup_c0_status 0 ST0_BEV 69 #endif 59 #endif >> 60 .endm 70 61 71 .align 2 !! 62 #ifndef CONFIG_NO_EXCEPT_FILL 72 #ifdef CONFIG_MMU << 73 .global relocate_enable_mmu << 74 relocate_enable_mmu: << 75 /* Relocate return address */ << 76 la a1, kernel_map << 77 XIP_FIXUP_OFFSET a1 << 78 REG_L a1, KERNEL_MAP_VIRT_ADDR(a1) << 79 la a2, _start << 80 sub a1, a1, a2 << 81 add ra, ra, a1 << 82 << 83 /* Point stvec to virtual address of i << 84 la a2, 1f << 85 add a2, a2, a1 << 86 csrw CSR_TVEC, a2 << 87 << 88 /* Compute satp for kernel page tables << 89 srl a2, a0, PAGE_SHIFT << 90 la a1, satp_mode << 91 XIP_FIXUP_OFFSET a1 << 92 REG_L a1, 0(a1) << 93 or a2, a2, a1 << 94 << 95 /* 63 /* 96 * Load trampoline page directory, whi !! 64 * Reserved space for exception handlers. 97 * stvec if VA != PA, or simply fall t !! 65 * Necessary for machines which link their kernels at KSEG0. 98 * full fence here because setup_vm() << 99 * to ensure the new translations are << 100 */ 66 */ 101 la a0, trampoline_pg_dir !! 67 .fill 0x400 102 XIP_FIXUP_OFFSET a0 << 103 srl a0, a0, PAGE_SHIFT << 104 or a0, a0, a1 << 105 sfence.vma << 106 csrw CSR_SATP, a0 << 107 .align 2 << 108 1: << 109 /* Set trap vector to spin forever to << 110 la a0, .Lsecondary_park << 111 csrw CSR_TVEC, a0 << 112 << 113 /* Reload the global pointer */ << 114 load_global_pointer << 115 << 116 /* << 117 * Switch to kernel page tables. A fu << 118 * avoid using the trampoline translat << 119 * the first superpage. Fetching the << 120 * because that first superpage is tra << 121 */ << 122 csrw CSR_SATP, a2 << 123 sfence.vma << 124 << 125 ret << 126 #endif /* CONFIG_MMU */ << 127 #ifdef CONFIG_SMP << 128 .global secondary_start_sbi << 129 secondary_start_sbi: << 130 /* Mask all interrupts */ << 131 csrw CSR_IE, zero << 132 csrw CSR_IP, zero << 133 << 134 /* Load the global pointer */ << 135 load_global_pointer << 136 << 137 /* << 138 * Disable FPU & VECTOR to detect ille << 139 * floating point or vector in kernel << 140 */ << 141 li t0, SR_FS_VS << 142 csrc CSR_STATUS, t0 << 143 << 144 /* Set trap vector to spin forever to << 145 la a3, .Lsecondary_park << 146 csrw CSR_TVEC, a3 << 147 << 148 /* a0 contains the hartid & a1 contain << 149 li a2, SBI_HART_BOOT_TASK_PTR_OFFSET << 150 XIP_FIXUP_OFFSET a2 << 151 add a2, a2, a1 << 152 REG_L tp, (a2) << 153 li a3, SBI_HART_BOOT_STACK_PTR_OFFSET << 154 XIP_FIXUP_OFFSET a3 << 155 add a3, a3, a1 << 156 REG_L sp, (a3) << 157 << 158 .Lsecondary_start_common: << 159 << 160 #ifdef CONFIG_MMU << 161 /* Enable virtual memory and relocate << 162 la a0, swapper_pg_dir << 163 XIP_FIXUP_OFFSET a0 << 164 call relocate_enable_mmu << 165 #endif 68 #endif 166 call .Lsetup_trap_vector << 167 scs_load_current << 168 call smp_callin << 169 #endif /* CONFIG_SMP */ << 170 << 171 .align 2 << 172 .Lsecondary_park: << 173 /* << 174 * Park this hart if we: << 175 * - have too many harts on CONFIG_RI << 176 * - receive an early trap, before se << 177 * - fail in smp_callin(), as a succe << 178 */ << 179 wfi << 180 j .Lsecondary_park << 181 69 182 .align 2 !! 70 EXPORT(_stext) 183 .Lsetup_trap_vector: << 184 /* Set trap vector to exception handle << 185 la a0, handle_exception << 186 csrw CSR_TVEC, a0 << 187 71 >> 72 #ifdef CONFIG_BOOT_RAW 188 /* 73 /* 189 * Set sup0 scratch register to 0, ind !! 74 * Give us a fighting chance of running if execution beings at the 190 * we are presently executing in kerne !! 75 * kernel load address. This is needed because this platform does >> 76 * not have a ELF loader yet. 191 */ 77 */ 192 csrw CSR_SCRATCH, zero !! 78 FEXPORT(__kernel_entry) 193 ret !! 79 j kernel_entry >> 80 #endif 194 81 195 SYM_CODE_END(_start) !! 82 __REF 196 83 197 SYM_CODE_START(_start_kernel) !! 84 NESTED(kernel_entry, 16, sp) # kernel entry point 198 /* Mask all interrupts */ << 199 csrw CSR_IE, zero << 200 csrw CSR_IP, zero << 201 << 202 #ifdef CONFIG_RISCV_M_MODE << 203 /* flush the instruction cache */ << 204 fence.i << 205 85 206 /* Reset all registers except ra, a0, !! 86 kernel_entry_setup # cpu specific setup 207 call reset_regs << 208 87 209 /* !! 88 setup_c0_status_pri 210 * Setup a PMP to permit access to all << 211 * not implement PMPs, so we set up a << 212 * touching the PMPs on any trap. << 213 */ << 214 la a0, .Lpmp_done << 215 csrw CSR_TVEC, a0 << 216 89 217 li a0, -1 !! 90 /* We might not get launched at the address the kernel is linked to, 218 csrw CSR_PMPADDR0, a0 !! 91 so we jump there. */ 219 li a0, (PMP_A_NAPOT | PMP_R | PMP_W | !! 92 PTR_LA t0, 0f 220 csrw CSR_PMPCFG0, a0 !! 93 jr t0 221 .align 2 !! 94 0: 222 .Lpmp_done: << 223 95 224 /* !! 96 #ifdef CONFIG_USE_OF 225 * The hartid in a0 is expected later !! 97 #ifdef CONFIG_MIPS_RAW_APPENDED_DTB 226 * to hand it to us. !! 98 PTR_LA t2, __appended_dtb 227 */ << 228 csrr a0, CSR_MHARTID << 229 #endif /* CONFIG_RISCV_M_MODE */ << 230 << 231 /* Load the global pointer */ << 232 load_global_pointer << 233 << 234 /* << 235 * Disable FPU & VECTOR to detect ille << 236 * floating point or vector in kernel << 237 */ << 238 li t0, SR_FS_VS << 239 csrc CSR_STATUS, t0 << 240 << 241 #ifdef CONFIG_RISCV_BOOT_SPINWAIT << 242 li t0, CONFIG_NR_CPUS << 243 blt a0, t0, .Lgood_cores << 244 tail .Lsecondary_park << 245 .Lgood_cores: << 246 << 247 /* The lottery system is only required << 248 #ifndef CONFIG_XIP_KERNEL << 249 /* Pick one hart to run the main boot << 250 la a3, hart_lottery << 251 li a2, 1 << 252 amoadd.w a3, a2, (a3) << 253 bnez a3, .Lsecondary_start << 254 99 >> 100 #ifdef CONFIG_CPU_BIG_ENDIAN >> 101 li t1, 0xd00dfeed 255 #else 102 #else 256 /* hart_lottery in flash contains a ma !! 103 li t1, 0xedfe0dd0 257 la a3, hart_lottery << 258 mv a2, a3 << 259 XIP_FIXUP_OFFSET a2 << 260 XIP_FIXUP_FLASH_OFFSET a3 << 261 lw t1, (a3) << 262 amoswap.w t0, t1, (a2) << 263 /* first time here if hart_lottery in << 264 beq t0, t1, .Lsecondary_start << 265 << 266 #endif /* CONFIG_XIP */ << 267 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */ << 268 << 269 #ifdef CONFIG_XIP_KERNEL << 270 la sp, _end + THREAD_SIZE << 271 XIP_FIXUP_OFFSET sp << 272 mv s0, a0 << 273 mv s1, a1 << 274 call __copy_data << 275 << 276 /* Restore a0 & a1 copy */ << 277 mv a0, s0 << 278 mv a1, s1 << 279 #endif 104 #endif 280 !! 105 lw t0, (t2) 281 #ifndef CONFIG_XIP_KERNEL !! 106 beq t0, t1, dtb_found 282 /* Clear BSS for flat non-ELF images * << 283 la a3, __bss_start << 284 la a4, __bss_stop << 285 ble a4, a3, .Lclear_bss_done << 286 .Lclear_bss: << 287 REG_S zero, (a3) << 288 add a3, a3, RISCV_SZPTR << 289 blt a3, a4, .Lclear_bss << 290 .Lclear_bss_done: << 291 #endif 107 #endif 292 la a2, boot_cpu_hartid !! 108 li t1, -2 293 XIP_FIXUP_OFFSET a2 !! 109 move t2, a1 294 REG_S a0, (a2) !! 110 beq a0, t1, dtb_found 295 << 296 /* Initialize page tables and relocate << 297 la tp, init_task << 298 la sp, init_thread_union + THREAD_SIZE << 299 XIP_FIXUP_OFFSET sp << 300 addi sp, sp, -PT_SIZE_ON_STACK << 301 scs_load_init_stack << 302 #ifdef CONFIG_BUILTIN_DTB << 303 la a0, __dtb_start << 304 XIP_FIXUP_OFFSET a0 << 305 #else << 306 mv a0, a1 << 307 #endif /* CONFIG_BUILTIN_DTB */ << 308 /* Set trap vector to spin forever to << 309 la a3, .Lsecondary_park << 310 csrw CSR_TVEC, a3 << 311 call setup_vm << 312 #ifdef CONFIG_MMU << 313 la a0, early_pg_dir << 314 XIP_FIXUP_OFFSET a0 << 315 call relocate_enable_mmu << 316 #endif /* CONFIG_MMU */ << 317 << 318 call .Lsetup_trap_vector << 319 /* Restore C environment */ << 320 la tp, init_task << 321 la sp, init_thread_union + THREAD_SIZE << 322 addi sp, sp, -PT_SIZE_ON_STACK << 323 scs_load_current << 324 111 325 #ifdef CONFIG_KASAN !! 112 li t2, 0 326 call kasan_early_init !! 113 dtb_found: 327 #endif 114 #endif 328 /* Start the kernel */ !! 115 PTR_LA t0, __bss_start # clear .bss 329 call soc_early_init !! 116 LONG_S zero, (t0) 330 tail start_kernel !! 117 PTR_LA t1, __bss_stop - LONGSIZE 331 !! 118 1: 332 #ifdef CONFIG_RISCV_BOOT_SPINWAIT !! 119 PTR_ADDIU t0, LONGSIZE 333 .Lsecondary_start: !! 120 LONG_S zero, (t0) 334 /* Set trap vector to spin forever to !! 121 bne t0, t1, 1b 335 la a3, .Lsecondary_park !! 122 336 csrw CSR_TVEC, a3 !! 123 LONG_S a0, fw_arg0 # firmware arguments 337 !! 124 LONG_S a1, fw_arg1 338 slli a3, a0, LGREG !! 125 LONG_S a2, fw_arg2 339 la a1, __cpu_spinwait_stack_pointer !! 126 LONG_S a3, fw_arg3 340 XIP_FIXUP_OFFSET a1 !! 127 341 la a2, __cpu_spinwait_task_pointer !! 128 #ifdef CONFIG_USE_OF 342 XIP_FIXUP_OFFSET a2 !! 129 LONG_S t2, fw_passed_dtb 343 add a1, a3, a1 !! 130 #endif 344 add a2, a3, a2 !! 131 345 !! 132 MTC0 zero, CP0_CONTEXT # clear context register 346 /* !! 133 PTR_LA $28, init_thread_union 347 * This hart didn't win the lottery, s !! 134 /* Set the SP after an empty pt_regs. */ 348 * get far enough along the boot proce !! 135 PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE >> 136 PTR_ADDU sp, $28 >> 137 back_to_back_c0_hazard >> 138 set_saved_sp sp, t0, t1 >> 139 PTR_SUBU sp, 4 * SZREG # init stack pointer >> 140 >> 141 #ifdef CONFIG_RELOCATABLE >> 142 /* Copy kernel and apply the relocations */ >> 143 jal relocate_kernel >> 144 >> 145 /* Repoint the sp into the new kernel image */ >> 146 PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE >> 147 PTR_ADDU sp, $28 >> 148 set_saved_sp sp, t0, t1 >> 149 PTR_SUBU sp, 4 * SZREG # init stack pointer >> 150 >> 151 /* >> 152 * relocate_kernel returns the entry point either >> 153 * in the relocated kernel or the original if for >> 154 * some reason relocation failed - jump there now >> 155 * with instruction hazard barrier because of the >> 156 * newly sync'd icache. 349 */ 157 */ 350 .Lwait_for_cpu_up: !! 158 jr.hb v0 351 /* FIXME: We should WFI to save some e !! 159 #else 352 REG_L sp, (a1) !! 160 j start_kernel 353 REG_L tp, (a2) !! 161 #endif 354 beqz sp, .Lwait_for_cpu_up !! 162 END(kernel_entry) 355 beqz tp, .Lwait_for_cpu_up << 356 fence << 357 << 358 tail .Lsecondary_start_common << 359 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */ << 360 << 361 SYM_CODE_END(_start_kernel) << 362 << 363 #ifdef CONFIG_RISCV_M_MODE << 364 SYM_CODE_START_LOCAL(reset_regs) << 365 li sp, 0 << 366 li gp, 0 << 367 li tp, 0 << 368 li t0, 0 << 369 li t1, 0 << 370 li t2, 0 << 371 li s0, 0 << 372 li s1, 0 << 373 li a2, 0 << 374 li a3, 0 << 375 li a4, 0 << 376 li a5, 0 << 377 li a6, 0 << 378 li a7, 0 << 379 li s2, 0 << 380 li s3, 0 << 381 li s4, 0 << 382 li s5, 0 << 383 li s6, 0 << 384 li s7, 0 << 385 li s8, 0 << 386 li s9, 0 << 387 li s10, 0 << 388 li s11, 0 << 389 li t3, 0 << 390 li t4, 0 << 391 li t5, 0 << 392 li t6, 0 << 393 csrw CSR_SCRATCH, 0 << 394 << 395 #ifdef CONFIG_FPU << 396 csrr t0, CSR_MISA << 397 andi t0, t0, (COMPAT_HWCAP_ISA_F | << 398 beqz t0, .Lreset_regs_done_fpu << 399 << 400 li t1, SR_FS << 401 csrs CSR_STATUS, t1 << 402 fmv.s.x f0, zero << 403 fmv.s.x f1, zero << 404 fmv.s.x f2, zero << 405 fmv.s.x f3, zero << 406 fmv.s.x f4, zero << 407 fmv.s.x f5, zero << 408 fmv.s.x f6, zero << 409 fmv.s.x f7, zero << 410 fmv.s.x f8, zero << 411 fmv.s.x f9, zero << 412 fmv.s.x f10, zero << 413 fmv.s.x f11, zero << 414 fmv.s.x f12, zero << 415 fmv.s.x f13, zero << 416 fmv.s.x f14, zero << 417 fmv.s.x f15, zero << 418 fmv.s.x f16, zero << 419 fmv.s.x f17, zero << 420 fmv.s.x f18, zero << 421 fmv.s.x f19, zero << 422 fmv.s.x f20, zero << 423 fmv.s.x f21, zero << 424 fmv.s.x f22, zero << 425 fmv.s.x f23, zero << 426 fmv.s.x f24, zero << 427 fmv.s.x f25, zero << 428 fmv.s.x f26, zero << 429 fmv.s.x f27, zero << 430 fmv.s.x f28, zero << 431 fmv.s.x f29, zero << 432 fmv.s.x f30, zero << 433 fmv.s.x f31, zero << 434 csrw fcsr, 0 << 435 /* note that the caller must clear SR_ << 436 .Lreset_regs_done_fpu: << 437 #endif /* CONFIG_FPU */ << 438 << 439 #ifdef CONFIG_RISCV_ISA_V << 440 csrr t0, CSR_MISA << 441 li t1, COMPAT_HWCAP_ISA_V << 442 and t0, t0, t1 << 443 beqz t0, .Lreset_regs_done_vector << 444 163 445 /* !! 164 #ifdef CONFIG_SMP 446 * Clear vector registers and reset vc !! 165 /* 447 * VLMAX has a defined value, VLEN is !! 166 * SMP slave cpus entry point. Board specific code for bootstrap calls this 448 * and this form of vsetvli is defined !! 167 * function after setting up the stack and gp registers. 449 */ !! 168 */ 450 li t1, SR_VS !! 169 NESTED(smp_bootstrap, 16, sp) 451 csrs CSR_STATUS, t1 !! 170 smp_slave_setup 452 csrs CSR_VCSR, x0 !! 171 setup_c0_status_sec 453 vsetvli t1, x0, e8, m8, ta, ma !! 172 j start_secondary 454 vmv.v.i v0, 0 !! 173 END(smp_bootstrap) 455 vmv.v.i v8, 0 !! 174 #endif /* CONFIG_SMP */ 456 vmv.v.i v16, 0 << 457 vmv.v.i v24, 0 << 458 /* note that the caller must clear SR_ << 459 .Lreset_regs_done_vector: << 460 #endif /* CONFIG_RISCV_ISA_V */ << 461 ret << 462 SYM_CODE_END(reset_regs) << 463 #endif /* CONFIG_RISCV_M_MODE */ <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.