1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Low-level CPU initialisation !! 2 * PowerPC version 4 * Based on arch/arm/kernel/head.S !! 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 4 * 6 * Copyright (C) 1994-2002 Russell King !! 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 7 * Copyright (C) 2003-2012 ARM Ltd. !! 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 8 * Authors: Catalin Marinas <catalin.marina !! 7 * Adapted for Power Macintosh by Paul Mackerras. 9 * Will Deacon <will.deacon@arm.co !! 8 * Low-level exception handlers and MMU support 10 */ !! 9 * rewritten by Paul Mackerras. 11 !! 10 * Copyright (C) 1996 Paul Mackerras. 12 #include <linux/linkage.h> !! 11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 13 #include <linux/init.h> !! 12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 14 #include <linux/pgtable.h> !! 13 * 15 !! 14 * This file contains the low-level support and setup for the 16 #include <asm/asm_pointer_auth.h> !! 15 * PowerPC platform, including trap and interrupt dispatch. 17 #include <asm/assembler.h> !! 16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.) 18 #include <asm/boot.h> << 19 #include <asm/bug.h> << 20 #include <asm/ptrace.h> << 21 #include <asm/asm-offsets.h> << 22 #include <asm/cache.h> << 23 #include <asm/cputype.h> << 24 #include <asm/el2_setup.h> << 25 #include <asm/elf.h> << 26 #include <asm/image.h> << 27 #include <asm/kernel-pgtable.h> << 28 #include <asm/kvm_arm.h> << 29 #include <asm/memory.h> << 30 #include <asm/pgtable-hwdef.h> << 31 #include <asm/page.h> << 32 #include <asm/scs.h> << 33 #include <asm/smp.h> << 34 #include <asm/sysreg.h> << 35 #include <asm/thread_info.h> << 36 #include <asm/virt.h> << 37 << 38 #include "efi-header.S" << 39 << 40 #if (PAGE_OFFSET & 0x1fffff) != 0 << 41 #error PAGE_OFFSET must be at least 2MB aligne << 42 #endif << 43 << 44 /* << 45 * Kernel startup entry point. << 46 * --------------------------- << 47 * 17 * 48 * The requirements are: !! 18 * This program is free software; you can redistribute it and/or 49 * MMU = off, D-cache = off, I-cache = on or !! 19 * modify it under the terms of the GNU General Public License 50 * x0 = physical address to the FDT blob. !! 20 * as published by the Free Software Foundation; either version >> 21 * 2 of the License, or (at your option) any later version. 51 * 22 * 52 * Note that the callee-saved registers are us << 53 * that are useful before the MMU is enabled. << 54 * in the entry routines. << 55 */ 23 */ 56 __HEAD << 57 /* << 58 * DO NOT MODIFY. Image header expecte << 59 */ << 60 efi_signature_nop << 61 b primary_entry << 62 .quad 0 << 63 le64sym _kernel_size_le << 64 le64sym _kernel_flags_le << 65 .quad 0 << 66 .quad 0 << 67 .quad 0 << 68 .ascii ARM64_IMAGE_MAGIC << 69 .long .Lpe_header_offset << 70 24 71 __EFI_PE_HEADER !! 25 #include <linux/config.h> 72 !! 26 #include <asm/processor.h> 73 .section ".idmap.text","a" !! 27 #include <asm/page.h> 74 !! 28 #include <asm/mmu.h> 75 /* !! 29 #include <asm/pgtable.h> 76 * The following callee saved general !! 30 #include <asm/cputable.h> 77 * primary lowlevel boot path: !! 31 #include <asm/cache.h> 78 * !! 32 #include <asm/thread_info.h> 79 * Register Scope !! 33 #include <asm/ppc_asm.h> 80 * x19 primary_entry() .. star !! 34 #include <asm/offsets.h> 81 * x20 primary_entry() .. __pr << 82 * x21 primary_entry() .. star << 83 */ << 84 SYM_CODE_START(primary_entry) << 85 bl record_mmu_state << 86 bl preserve_boot_args << 87 << 88 adrp x1, early_init_stack << 89 mov sp, x1 << 90 mov x29, xzr << 91 adrp x0, init_idmap_pg_dir << 92 mov x1, xzr << 93 bl __pi_create_init_idmap << 94 35 95 /* !! 36 #ifdef CONFIG_APUS 96 * If the page tables have been popula !! 37 #include <asm/amigappc.h> 97 * accesses (MMU disabled), invalidate !! 38 #endif 98 * remove any speculatively loaded cac << 99 */ << 100 cbnz x19, 0f << 101 dmb sy << 102 mov x1, x0 << 103 adrp x0, init_idmap_pg_dir << 104 adr_l x2, dcache_inval_poc << 105 blr x2 << 106 b 1f << 107 39 108 /* !! 40 #ifdef CONFIG_PPC64BRIDGE 109 * If we entered with the MMU and cach !! 41 #define LOAD_BAT(n, reg, RA, RB) \ 110 * of the primary boot code to the PoC !! 42 ld RA,(n*32)+0(reg); \ 111 * the MMU off. !! 43 ld RB,(n*32)+8(reg); \ 112 */ !! 44 mtspr IBAT##n##U,RA; \ 113 0: adrp x0, __idmap_text_start !! 45 mtspr IBAT##n##L,RB; \ 114 adr_l x1, __idmap_text_end !! 46 ld RA,(n*32)+16(reg); \ 115 adr_l x2, dcache_clean_poc !! 47 ld RB,(n*32)+24(reg); \ 116 blr x2 !! 48 mtspr DBAT##n##U,RA; \ 117 !! 49 mtspr DBAT##n##L,RB; \ 118 1: mov x0, x19 !! 50 119 bl init_kernel_el !! 51 #else /* CONFIG_PPC64BRIDGE */ 120 mov x20, x0 !! 52 >> 53 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ >> 54 #define LOAD_BAT(n, reg, RA, RB) \ >> 55 /* see the comment for clear_bats() -- Cort */ \ >> 56 li RA,0; \ >> 57 mtspr IBAT##n##U,RA; \ >> 58 mtspr DBAT##n##U,RA; \ >> 59 lwz RA,(n*16)+0(reg); \ >> 60 lwz RB,(n*16)+4(reg); \ >> 61 mtspr IBAT##n##U,RA; \ >> 62 mtspr IBAT##n##L,RB; \ >> 63 beq 1f; \ >> 64 lwz RA,(n*16)+8(reg); \ >> 65 lwz RB,(n*16)+12(reg); \ >> 66 mtspr DBAT##n##U,RA; \ >> 67 mtspr DBAT##n##L,RB; \ >> 68 1: >> 69 #endif /* CONFIG_PPC64BRIDGE */ 121 70 122 /* !! 71 .text 123 * The following calls CPU setup code, !! 72 .stabs "arch/ppc/kernel/",N_SO,0,0,0f 124 * details. !! 73 .stabs "head.S",N_SO,0,0,0f 125 * On return, the CPU will be ready fo << 126 * the TCR will have been set. << 127 */ << 128 bl __cpu_setup << 129 b __primary_switch << 130 SYM_CODE_END(primary_entry) << 131 << 132 __INIT << 133 SYM_CODE_START_LOCAL(record_mmu_state) << 134 mrs x19, CurrentEL << 135 cmp x19, #CurrentEL_EL2 << 136 mrs x19, sctlr_el1 << 137 b.ne 0f << 138 mrs x19, sctlr_el2 << 139 0: 74 0: 140 CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f !! 75 .globl _stext 141 CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f !! 76 _stext: 142 tst x19, #SCTLR_ELx_C << 143 and x19, x19, #SCTLR_ELx_M << 144 csel x19, xzr, x19, eq << 145 ret << 146 77 >> 78 /* >> 79 * _start is defined this way because the XCOFF loader in the OpenFirmware >> 80 * on the powermac expects the entry point to be a procedure descriptor. >> 81 */ >> 82 .text >> 83 .globl _start >> 84 _start: 147 /* 85 /* 148 * Set the correct endianness early so !! 86 * These are here for legacy reasons, the kernel used to 149 * before init_kernel_el() occur in th !! 87 * need to look like a coff function entry for the pmac 150 * this means the MMU must be disabled !! 88 * but we're always started by some kind of bootloader now. 151 * up getting interpreted with the wro !! 89 * -- Cort 152 */ 90 */ 153 1: eor x19, x19, #SCTLR_ELx_EE !! 91 nop /* used by __secondary_hold on prep (mtx) and chrp smp */ 154 bic x19, x19, #SCTLR_ELx_M !! 92 nop /* used by __secondary_hold on prep (mtx) and chrp smp */ 155 b.ne 2f !! 93 nop 156 pre_disable_mmu_workaround !! 94 157 msr sctlr_el2, x19 !! 95 /* PMAC 158 b 3f !! 96 * Enter here with the kernel text, data and bss loaded starting at 159 2: pre_disable_mmu_workaround !! 97 * 0, running with virtual == physical mapping. 160 msr sctlr_el1, x19 !! 98 * r5 points to the prom entry point (the client interface handler 161 3: isb !! 99 * address). Address translation is turned on, with the prom 162 mov x19, xzr !! 100 * managing the hash table. Interrupts are disabled. The stack 163 ret !! 101 * pointer (r1) points to just below the end of the half-meg region 164 SYM_CODE_END(record_mmu_state) !! 102 * from 0x380000 - 0x400000, which is mapped in already. 165 !! 103 * 166 /* !! 104 * If we are booted from MacOS via BootX, we enter with the kernel 167 * Preserve the arguments passed by the bootlo !! 105 * image loaded somewhere, and the following values in registers: 168 */ !! 106 * r3: 'BooX' (0x426f6f58) 169 SYM_CODE_START_LOCAL(preserve_boot_args) !! 107 * r4: virtual address of boot_infos_t 170 mov x21, x0 !! 108 * r5: 0 171 !! 109 * 172 adr_l x0, boot_args !! 110 * APUS 173 stp x21, x1, [x0] !! 111 * r3: 'APUS' 174 stp x2, x3, [x0, #16] !! 112 * r4: physical address of memory base 175 !! 113 * Linux/m68k style BootInfo structure at &_end. 176 cbnz x19, 0f !! 114 * 177 dmb sy !! 115 * PREP 178 !! 116 * This is jumped to on prep systems right after the kernel is relocated 179 !! 117 * to its proper place in memory by the boot loader. The expected layout 180 add x1, x0, #0x20 !! 118 * of the regs is: 181 b dcache_inval_poc !! 119 * r3: ptr to residual data 182 0: str_l x19, mmu_enabled_at_boot, x0 !! 120 * r4: initrd_start or if no initrd then 0 183 ret !! 121 * r5: initrd_end - unused if r4 is 0 184 SYM_CODE_END(preserve_boot_args) !! 122 * r6: Start of command line string >> 123 * r7: End of command line string >> 124 * >> 125 * This just gets a minimal mmu environment setup so we can call >> 126 * start_here() to do the real work. >> 127 * -- Cort >> 128 */ 185 129 186 /* !! 130 .globl __start 187 * Initialize CPU registers with task- !! 131 __start: 188 * !! 132 /* 189 * Create a final frame record at task !! 133 * We have to do any OF calls before we map ourselves to KERNELBASE, 190 * that the unwinder can identify the !! 134 * because OF may have I/O devices mapped into that area 191 * its location in the task stack. We !! 135 * (particularly on CHRP). 192 * for consistency with user tasks and !! 136 */ 193 */ !! 137 mr r31,r3 /* save parameters */ 194 .macro init_cpu_task tsk, tmp1, tmp2 !! 138 mr r30,r4 195 msr sp_el0, \tsk !! 139 mr r29,r5 >> 140 mr r28,r6 >> 141 mr r27,r7 >> 142 li r24,0 /* cpu # */ 196 143 197 ldr \tmp1, [\tsk, #TSK_STACK] !! 144 #ifdef CONFIG_POWER4 198 add sp, \tmp1, #THREAD_SIZE !! 145 /* 199 sub sp, sp, #PT_REGS_SIZE !! 146 * On the PPC970, we have to turn off real-mode cache inhibit >> 147 * early, before we first turn the MMU off. >> 148 */ >> 149 mfspr r0,SPRN_PVR >> 150 srwi r0,r0,16 >> 151 cmpwi r0,0x39 >> 152 beql ppc970_setup_hid >> 153 #endif /* CONFIG_POWER4 */ 200 154 201 stp xzr, xzr, [sp, #S_STACKFRAME] !! 155 /* 202 add x29, sp, #S_STACKFRAME !! 156 * early_init() does the early machine identification and does >> 157 * the necessary low-level setup and clears the BSS >> 158 * -- Cort <cort@fsmlabs.com> >> 159 */ >> 160 bl early_init 203 161 204 scs_load_current !! 162 #ifdef CONFIG_APUS >> 163 /* On APUS the __va/__pa constants need to be set to the correct >> 164 * values before continuing. >> 165 */ >> 166 mr r4,r30 >> 167 bl fix_mem_constants >> 168 #endif /* CONFIG_APUS */ 205 169 206 adr_l \tmp1, __per_cpu_offset !! 170 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains 207 ldr w\tmp2, [\tsk, #TSK_TI_CPU] !! 171 * the physical address we are running at, returned by early_init() 208 ldr \tmp1, [\tmp1, \tmp2, lsl #3] !! 172 */ 209 set_this_cpu_offset \tmp1 !! 173 bl mmu_off 210 .endm !! 174 __after_mmu_off: >> 175 #ifndef CONFIG_POWER4 >> 176 bl clear_bats >> 177 bl flush_tlbs >> 178 >> 179 bl initial_bats >> 180 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) >> 181 bl setup_disp_bat >> 182 #endif >> 183 #else /* CONFIG_POWER4 */ >> 184 bl reloc_offset >> 185 bl initial_mm_power4 >> 186 #endif /* CONFIG_POWER4 */ 211 187 212 /* 188 /* 213 * The following fragment of code is executed !! 189 * Call setup_cpu for CPU 0 and initialize 6xx Idle 214 * << 215 * x0 = __pa(KERNEL_START) << 216 */ 190 */ 217 SYM_FUNC_START_LOCAL(__primary_switched) !! 191 bl reloc_offset 218 adr_l x4, init_task !! 192 li r24,0 /* cpu# */ 219 init_cpu_task x4, x5, x6 !! 193 bl call_setup_cpu /* Call setup_cpu for this CPU */ >> 194 #ifdef CONFIG_6xx >> 195 bl reloc_offset >> 196 bl init_idle_6xx >> 197 #endif /* CONFIG_6xx */ >> 198 #ifdef CONFIG_POWER4 >> 199 bl reloc_offset >> 200 bl init_idle_power4 >> 201 #endif /* CONFIG_POWER4 */ 220 202 221 adr_l x8, vectors << 222 msr vbar_el1, x8 << 223 isb << 224 203 225 stp x29, x30, [sp, #-16]! !! 204 #ifndef CONFIG_APUS 226 mov x29, sp !! 205 /* >> 206 * We need to run with _start at physical address 0. >> 207 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses >> 208 * the exception vectors at 0 (and therefore this copy >> 209 * overwrites OF's exception vectors with our own). >> 210 * If the MMU is already turned on, we copy stuff to KERNELBASE, >> 211 * otherwise we copy it to 0. >> 212 */ >> 213 bl reloc_offset >> 214 mr r26,r3 >> 215 addis r4,r3,KERNELBASE@h /* current address of _start */ >> 216 cmpwi 0,r4,0 /* are we already running at 0? */ >> 217 bne relocate_kernel >> 218 #endif /* CONFIG_APUS */ >> 219 /* >> 220 * we now have the 1st 16M of ram mapped with the bats. >> 221 * prep needs the mmu to be turned on here, but pmac already has it on. >> 222 * this shouldn't bother the pmac since it just gets turned on again >> 223 * as we jump to our code at KERNELBASE. -- Cort >> 224 * Actually no, pmac doesn't have it on any more. BootX enters with MMU >> 225 * off, and in other cases, we now turn it off before changing BATs above. >> 226 */ >> 227 turn_on_mmu: >> 228 mfmsr r0 >> 229 ori r0,r0,MSR_DR|MSR_IR >> 230 mtspr SRR1,r0 >> 231 lis r0,start_here@h >> 232 ori r0,r0,start_here@l >> 233 mtspr SRR0,r0 >> 234 SYNC >> 235 RFI /* enables MMU */ 227 236 228 str_l x21, __fdt_pointer, x5 !! 237 /* >> 238 * We need __secondary_hold as a place to hold the other cpus on >> 239 * an SMP machine, even when we are running a UP kernel. >> 240 */ >> 241 . = 0xc0 /* for prep bootloader */ >> 242 li r3,1 /* MTX only has 1 cpu */ >> 243 .globl __secondary_hold >> 244 __secondary_hold: >> 245 /* tell the master we're here */ >> 246 stw r3,4(0) >> 247 #ifdef CONFIG_SMP >> 248 100: lwz r4,0(0) >> 249 /* wait until we're told to start */ >> 250 cmpw 0,r4,r3 >> 251 bne 100b >> 252 /* our cpu # was at addr 0 - go */ >> 253 mr r24,r3 /* cpu # */ >> 254 b __secondary_start >> 255 #else >> 256 b . >> 257 #endif /* CONFIG_SMP */ 229 258 230 adrp x4, _text !! 259 /* 231 sub x4, x4, x0 !! 260 * Exception entry code. This code runs with address translation 232 str_l x4, kimage_voffset, x5 !! 261 * turned off, i.e. using physical addresses. >> 262 * We assume sprg3 has the physical address of the current >> 263 * task's thread_struct. >> 264 */ >> 265 #define EXCEPTION_PROLOG \ >> 266 mtspr SPRG0,r10; \ >> 267 mtspr SPRG1,r11; \ >> 268 mfcr r10; \ >> 269 EXCEPTION_PROLOG_1; \ >> 270 EXCEPTION_PROLOG_2 >> 271 >> 272 #define EXCEPTION_PROLOG_1 \ >> 273 mfspr r11,SRR1; /* check whether user or kernel */ \ >> 274 andi. r11,r11,MSR_PR; \ >> 275 tophys(r11,r1); /* use tophys(r1) if kernel */ \ >> 276 beq 1f; \ >> 277 mfspr r11,SPRG3; \ >> 278 lwz r11,THREAD_INFO-THREAD(r11); \ >> 279 addi r11,r11,THREAD_SIZE; \ >> 280 tophys(r11,r11); \ >> 281 1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */ >> 282 >> 283 >> 284 #define EXCEPTION_PROLOG_2 \ >> 285 CLR_TOP32(r11); \ >> 286 stw r10,_CCR(r11); /* save registers */ \ >> 287 stw r12,GPR12(r11); \ >> 288 stw r9,GPR9(r11); \ >> 289 mfspr r10,SPRG0; \ >> 290 stw r10,GPR10(r11); \ >> 291 mfspr r12,SPRG1; \ >> 292 stw r12,GPR11(r11); \ >> 293 mflr r10; \ >> 294 stw r10,_LINK(r11); \ >> 295 mfspr r12,SRR0; \ >> 296 mfspr r9,SRR1; \ >> 297 stw r1,GPR1(r11); \ >> 298 stw r1,0(r11); \ >> 299 tovirt(r1,r11); /* set new kernel sp */ \ >> 300 li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \ >> 301 MTMSRD(r10); /* (except for mach check in rtas) */ \ >> 302 stw r0,GPR0(r11); \ >> 303 SAVE_4GPRS(3, r11); \ >> 304 SAVE_2GPRS(7, r11) 233 305 234 mov x0, x20 !! 306 /* 235 bl set_cpu_boot_mode_flag !! 307 * Note: code which follows this uses cr0.eq (set if from kernel), >> 308 * r11, r12 (SRR0), and r9 (SRR1). >> 309 * >> 310 * Note2: once we have set r1 we are in a position to take exceptions >> 311 * again, and we could thus set MSR:RI at that point. >> 312 */ 236 313 237 #if defined(CONFIG_KASAN_GENERIC) || defined(C !! 314 /* 238 bl kasan_early_init !! 315 * Exception vectors. >> 316 */ >> 317 #define EXCEPTION(n, label, hdlr, xfer) \ >> 318 . = n; \ >> 319 label: \ >> 320 EXCEPTION_PROLOG; \ >> 321 addi r3,r1,STACK_FRAME_OVERHEAD; \ >> 322 xfer(n, hdlr) >> 323 >> 324 #define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \ >> 325 li r10,trap; \ >> 326 stw r10,TRAP(r11); \ >> 327 li r10,MSR_KERNEL; \ >> 328 copyee(r10, r9); \ >> 329 bl tfer; \ >> 330 i##n: \ >> 331 .long hdlr; \ >> 332 .long ret >> 333 >> 334 #define COPY_EE(d, s) rlwimi d,s,0,16,16 >> 335 #define NOCOPY(d, s) >> 336 >> 337 #define EXC_XFER_STD(n, hdlr) \ >> 338 EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \ >> 339 ret_from_except_full) >> 340 >> 341 #define EXC_XFER_LITE(n, hdlr) \ >> 342 EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \ >> 343 ret_from_except) >> 344 >> 345 #define EXC_XFER_EE(n, hdlr) \ >> 346 EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \ >> 347 ret_from_except_full) >> 348 >> 349 #define EXC_XFER_EE_LITE(n, hdlr) \ >> 350 EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \ >> 351 ret_from_except) >> 352 >> 353 /* System reset */ >> 354 /* core99 pmac starts the seconary here by changing the vector, and >> 355 putting it back to what it was (UnknownException) when done. */ >> 356 #if defined(CONFIG_GEMINI) && defined(CONFIG_SMP) >> 357 . = 0x100 >> 358 b __secondary_start_gemini >> 359 #else >> 360 EXCEPTION(0x100, Reset, UnknownException, EXC_XFER_STD) 239 #endif 361 #endif 240 mov x0, x20 << 241 bl finalise_el2 << 242 ldp x29, x30, [sp], #16 << 243 bl start_kernel << 244 ASM_BUG() << 245 SYM_FUNC_END(__primary_switched) << 246 362 >> 363 /* Machine check */ 247 /* 364 /* 248 * end early head section, begin head code tha !! 365 * On CHRP, this is complicated by the fact that we could get a 249 * hotplug and needs to have the same protecti !! 366 * machine check inside RTAS, and we have no guarantee that certain >> 367 * critical registers will have the values we expect. The set of >> 368 * registers that might have bad values includes all the GPRs >> 369 * and all the BATs. We indicate that we are in RTAS by putting >> 370 * a non-zero value, the address of the exception frame to use, >> 371 * in SPRG2. The machine check handler checks SPRG2 and uses its >> 372 * value if it is non-zero. If we ever needed to free up SPRG2, >> 373 * we could use a field in the thread_info or thread_struct instead. >> 374 * (Other exception handlers assume that r1 is a valid kernel stack >> 375 * pointer when we take an exception from supervisor mode.) >> 376 * -- paulus. 250 */ 377 */ 251 .section ".idmap.text","a" !! 378 . = 0x200 >> 379 mtspr SPRG0,r10 >> 380 mtspr SPRG1,r11 >> 381 mfcr r10 >> 382 #ifdef CONFIG_PPC_CHRP >> 383 mfspr r11,SPRG2 >> 384 cmpwi 0,r11,0 >> 385 bne 7f >> 386 #endif /* CONFIG_PPC_CHRP */ >> 387 EXCEPTION_PROLOG_1 >> 388 7: EXCEPTION_PROLOG_2 >> 389 addi r3,r1,STACK_FRAME_OVERHEAD >> 390 #ifdef CONFIG_PPC_CHRP >> 391 mfspr r4,SPRG2 >> 392 cmpwi cr1,r4,0 >> 393 bne cr1,1f >> 394 #endif >> 395 EXC_XFER_STD(0x200, MachineCheckException) >> 396 #ifdef CONFIG_PPC_CHRP >> 397 1: b machine_check_in_rtas >> 398 #endif >> 399 >> 400 /* Data access exception. */ >> 401 . = 0x300 >> 402 #ifdef CONFIG_PPC64BRIDGE >> 403 b DataAccess >> 404 DataAccessCont: >> 405 #else >> 406 DataAccess: >> 407 EXCEPTION_PROLOG >> 408 #endif /* CONFIG_PPC64BRIDGE */ >> 409 mfspr r10,DSISR >> 410 andis. r0,r10,0xa470 /* weird error? */ >> 411 bne 1f /* if not, try to put a PTE */ >> 412 mfspr r4,DAR /* into the hash table */ >> 413 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ >> 414 bl hash_page >> 415 1: stw r10,_DSISR(r11) >> 416 mr r5,r10 >> 417 mfspr r4,DAR >> 418 stw r4,_DAR(r11) >> 419 addi r3,r1,STACK_FRAME_OVERHEAD >> 420 EXC_XFER_EE_LITE(0x300, do_page_fault) >> 421 >> 422 #ifdef CONFIG_PPC64BRIDGE >> 423 /* SLB fault on data access. */ >> 424 . = 0x380 >> 425 b DataSegment >> 426 #endif /* CONFIG_PPC64BRIDGE */ >> 427 >> 428 /* Instruction access exception. */ >> 429 . = 0x400 >> 430 #ifdef CONFIG_PPC64BRIDGE >> 431 b InstructionAccess >> 432 InstructionAccessCont: >> 433 #else >> 434 InstructionAccess: >> 435 EXCEPTION_PROLOG >> 436 #endif /* CONFIG_PPC64BRIDGE */ >> 437 andis. r0,r9,0x4000 /* no pte found? */ >> 438 beq 1f /* if so, try to put a PTE */ >> 439 li r3,0 /* into the hash table */ >> 440 mr r4,r12 /* SRR0 is fault address */ >> 441 bl hash_page >> 442 1: addi r3,r1,STACK_FRAME_OVERHEAD >> 443 mr r4,r12 >> 444 mr r5,r9 >> 445 EXC_XFER_EE_LITE(0x400, do_page_fault) >> 446 >> 447 #ifdef CONFIG_PPC64BRIDGE >> 448 /* SLB fault on instruction access. */ >> 449 . = 0x480 >> 450 b InstructionSegment >> 451 #endif /* CONFIG_PPC64BRIDGE */ >> 452 >> 453 /* External interrupt */ >> 454 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) >> 455 >> 456 /* Alignment exception */ >> 457 . = 0x600 >> 458 Alignment: >> 459 EXCEPTION_PROLOG >> 460 mfspr r4,DAR >> 461 stw r4,_DAR(r11) >> 462 mfspr r5,DSISR >> 463 stw r5,_DSISR(r11) >> 464 addi r3,r1,STACK_FRAME_OVERHEAD >> 465 EXC_XFER_EE(0x600, AlignmentException) >> 466 >> 467 /* Program check exception */ >> 468 EXCEPTION(0x700, ProgramCheck, ProgramCheckException, EXC_XFER_EE) >> 469 >> 470 /* Floating-point unavailable */ >> 471 . = 0x800 >> 472 FPUnavailable: >> 473 EXCEPTION_PROLOG >> 474 bne load_up_fpu /* if from user, just load it up */ >> 475 addi r3,r1,STACK_FRAME_OVERHEAD >> 476 EXC_XFER_EE_LITE(0x800, KernelFP) >> 477 >> 478 /* Decrementer */ >> 479 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE) >> 480 >> 481 EXCEPTION(0xa00, Trap_0a, UnknownException, EXC_XFER_EE) >> 482 EXCEPTION(0xb00, Trap_0b, UnknownException, EXC_XFER_EE) >> 483 >> 484 /* System call */ >> 485 . = 0xc00 >> 486 SystemCall: >> 487 EXCEPTION_PROLOG >> 488 EXC_XFER_EE_LITE(0xc00, DoSyscall) >> 489 >> 490 /* Single step - not used on 601 */ >> 491 EXCEPTION(0xd00, SingleStep, SingleStepException, EXC_XFER_EE) >> 492 EXCEPTION(0xe00, Trap_0e, UnknownException, EXC_XFER_EE) 252 493 253 /* 494 /* 254 * Starting from EL2 or EL1, configure the CPU !! 495 * The Altivec unavailable trap is at 0x0f20. Foo. 255 * reachable EL supported by the kernel in a c !! 496 * We effectively remap it to 0x3000. 256 * from EL2 to EL1, configure EL2 before confi << 257 * << 258 * Since we cannot always rely on ERET synchro << 259 * SCTLR_ELx.EOS is clear), we place an ISB pr << 260 * << 261 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CP << 262 * booted in EL1 or EL2 respectively, with the << 263 * potential context flags. These flags are *n << 264 * << 265 * x0: whether we are being called from the pr << 266 */ 497 */ 267 SYM_FUNC_START(init_kernel_el) !! 498 . = 0xf00 268 mrs x1, CurrentEL << 269 cmp x1, #CurrentEL_EL2 << 270 b.eq init_el2 << 271 << 272 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) << 273 mov_q x0, INIT_SCTLR_EL1_MMU_OFF << 274 pre_disable_mmu_workaround << 275 msr sctlr_el1, x0 << 276 isb << 277 mov_q x0, INIT_PSTATE_EL1 << 278 msr spsr_el1, x0 << 279 msr elr_el1, lr << 280 mov w0, #BOOT_CPU_MODE_EL1 << 281 eret << 282 499 283 SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) !! 500 #ifdef CONFIG_ALTIVEC 284 msr elr_el2, lr !! 501 b Trap_0f >> 502 . = 0xf20 >> 503 b AltiVecUnavailable >> 504 Trap_0f: >> 505 #endif >> 506 EXCEPTION_PROLOG >> 507 addi r3,r1,STACK_FRAME_OVERHEAD >> 508 EXC_XFER_EE(0xf00, UnknownException) 285 509 286 // clean all HYP code to the PoC if we !! 510 /* 287 cbz x0, 0f !! 511 * Handle TLB miss for instruction on 603/603e. 288 adrp x0, __hyp_idmap_text_start !! 512 * Note: we get an alternate set of r0 - r3 to use automatically. 289 adr_l x1, __hyp_text_end !! 513 */ 290 adr_l x2, dcache_clean_poc !! 514 . = 0x1000 291 blr x2 !! 515 InstructionTLBMiss: >> 516 /* >> 517 * r0: stored ctr >> 518 * r1: linux style pte ( later becomes ppc hardware pte ) >> 519 * r2: ptr to linux-style pte >> 520 * r3: scratch >> 521 */ >> 522 mfctr r0 >> 523 /* Get PTE (linux-style) and check access */ >> 524 mfspr r3,IMISS >> 525 lis r1,KERNELBASE@h /* check if kernel address */ >> 526 cmplw 0,r3,r1 >> 527 mfspr r2,SPRG3 >> 528 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ >> 529 lwz r2,PGDIR(r2) >> 530 blt+ 112f >> 531 lis r2,swapper_pg_dir@ha /* if kernel address, use */ >> 532 addi r2,r2,swapper_pg_dir@l /* kernel page table */ >> 533 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ >> 534 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ >> 535 112: tophys(r2,r2) >> 536 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ >> 537 lwz r2,0(r2) /* get pmd entry */ >> 538 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ >> 539 beq- InstructionAddressInvalid /* return if no mapping */ >> 540 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ >> 541 lwz r3,0(r2) /* get linux-style pte */ >> 542 andc. r1,r1,r3 /* check access & ~permission */ >> 543 bne- InstructionAddressInvalid /* return if access not permitted */ >> 544 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ >> 545 /* >> 546 * NOTE! We are assuming this is not an SMP system, otherwise >> 547 * we would need to update the pte atomically with lwarx/stwcx. >> 548 */ >> 549 stw r3,0(r2) /* update PTE (accessed bit) */ >> 550 /* Convert linux-style PTE to low word of PPC-style PTE */ >> 551 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ >> 552 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ >> 553 and r1,r1,r2 /* writable if _RW and _DIRTY */ >> 554 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ >> 555 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ >> 556 ori r1,r1,0xe14 /* clear out reserved bits and M */ >> 557 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ >> 558 mtspr RPA,r1 >> 559 mfspr r3,IMISS >> 560 tlbli r3 >> 561 mfspr r3,SRR1 /* Need to restore CR0 */ >> 562 mtcrf 0x80,r3 >> 563 rfi >> 564 InstructionAddressInvalid: >> 565 mfspr r3,SRR1 >> 566 rlwinm r1,r3,9,6,6 /* Get load/store bit */ >> 567 >> 568 addis r1,r1,0x2000 >> 569 mtspr DSISR,r1 /* (shouldn't be needed) */ >> 570 mtctr r0 /* Restore CTR */ >> 571 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ >> 572 or r2,r2,r1 >> 573 mtspr SRR1,r2 >> 574 mfspr r1,IMISS /* Get failing address */ >> 575 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ >> 576 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ >> 577 xor r1,r1,r2 >> 578 mtspr DAR,r1 /* Set fault address */ >> 579 mfmsr r0 /* Restore "normal" registers */ >> 580 xoris r0,r0,MSR_TGPR>>16 >> 581 mtcrf 0x80,r3 /* Restore CR0 */ >> 582 mtmsr r0 >> 583 b InstructionAccess 292 584 293 mov_q x0, INIT_SCTLR_EL2_MMU_OFF !! 585 /* 294 pre_disable_mmu_workaround !! 586 * Handle TLB miss for DATA Load operation on 603/603e 295 msr sctlr_el2, x0 !! 587 */ 296 isb !! 588 . = 0x1100 297 0: !! 589 DataLoadTLBMiss: 298 mov_q x0, HCR_HOST_NVHE_FLAGS !! 590 /* >> 591 * r0: stored ctr >> 592 * r1: linux style pte ( later becomes ppc hardware pte ) >> 593 * r2: ptr to linux-style pte >> 594 * r3: scratch >> 595 */ >> 596 mfctr r0 >> 597 /* Get PTE (linux-style) and check access */ >> 598 mfspr r3,DMISS >> 599 lis r1,KERNELBASE@h /* check if kernel address */ >> 600 cmplw 0,r3,r1 >> 601 mfspr r2,SPRG3 >> 602 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ >> 603 lwz r2,PGDIR(r2) >> 604 blt+ 112f >> 605 lis r2,swapper_pg_dir@ha /* if kernel address, use */ >> 606 addi r2,r2,swapper_pg_dir@l /* kernel page table */ >> 607 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ >> 608 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ >> 609 112: tophys(r2,r2) >> 610 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ >> 611 lwz r2,0(r2) /* get pmd entry */ >> 612 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ >> 613 beq- DataAddressInvalid /* return if no mapping */ >> 614 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ >> 615 lwz r3,0(r2) /* get linux-style pte */ >> 616 andc. r1,r1,r3 /* check access & ~permission */ >> 617 bne- DataAddressInvalid /* return if access not permitted */ >> 618 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ >> 619 /* >> 620 * NOTE! We are assuming this is not an SMP system, otherwise >> 621 * we would need to update the pte atomically with lwarx/stwcx. >> 622 */ >> 623 stw r3,0(r2) /* update PTE (accessed bit) */ >> 624 /* Convert linux-style PTE to low word of PPC-style PTE */ >> 625 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ >> 626 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ >> 627 and r1,r1,r2 /* writable if _RW and _DIRTY */ >> 628 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ >> 629 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ >> 630 ori r1,r1,0xe14 /* clear out reserved bits and M */ >> 631 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ >> 632 mtspr RPA,r1 >> 633 mfspr r3,DMISS >> 634 tlbld r3 >> 635 mfspr r3,SRR1 /* Need to restore CR0 */ >> 636 mtcrf 0x80,r3 >> 637 rfi >> 638 DataAddressInvalid: >> 639 mfspr r3,SRR1 >> 640 rlwinm r1,r3,9,6,6 /* Get load/store bit */ >> 641 addis r1,r1,0x2000 >> 642 mtspr DSISR,r1 >> 643 mtctr r0 /* Restore CTR */ >> 644 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ >> 645 mtspr SRR1,r2 >> 646 mfspr r1,DMISS /* Get failing address */ >> 647 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ >> 648 beq 20f /* Jump if big endian */ >> 649 xori r1,r1,3 >> 650 20: mtspr DAR,r1 /* Set fault address */ >> 651 mfmsr r0 /* Restore "normal" registers */ >> 652 xoris r0,r0,MSR_TGPR>>16 >> 653 mtcrf 0x80,r3 /* Restore CR0 */ >> 654 mtmsr r0 >> 655 b DataAccess 299 656 >> 657 /* >> 658 * Handle TLB miss for DATA Store on 603/603e >> 659 */ >> 660 . = 0x1200 >> 661 DataStoreTLBMiss: >> 662 /* >> 663 * r0: stored ctr >> 664 * r1: linux style pte ( later becomes ppc hardware pte ) >> 665 * r2: ptr to linux-style pte >> 666 * r3: scratch >> 667 */ >> 668 mfctr r0 >> 669 /* Get PTE (linux-style) and check access */ >> 670 mfspr r3,DMISS >> 671 lis r1,KERNELBASE@h /* check if kernel address */ >> 672 cmplw 0,r3,r1 >> 673 mfspr r2,SPRG3 >> 674 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ >> 675 lwz r2,PGDIR(r2) >> 676 blt+ 112f >> 677 lis r2,swapper_pg_dir@ha /* if kernel address, use */ >> 678 addi r2,r2,swapper_pg_dir@l /* kernel page table */ >> 679 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ >> 680 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ >> 681 112: tophys(r2,r2) >> 682 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ >> 683 lwz r2,0(r2) /* get pmd entry */ >> 684 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ >> 685 beq- DataAddressInvalid /* return if no mapping */ >> 686 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ >> 687 lwz r3,0(r2) /* get linux-style pte */ >> 688 andc. r1,r1,r3 /* check access & ~permission */ >> 689 bne- DataAddressInvalid /* return if access not permitted */ >> 690 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY 300 /* 691 /* 301 * Compliant CPUs advertise their VHE- !! 692 * NOTE! We are assuming this is not an SMP system, otherwise 302 * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2. !! 693 * we would need to update the pte atomically with lwarx/stwcx. 303 * RES1 in that case. Publish the E2H << 304 * it can be picked up by the init_el2 << 305 * << 306 * Fruity CPUs seem to have HCR_EL2.E2 << 307 * don't advertise it (they predate th << 308 */ 694 */ 309 mrs_s x1, SYS_ID_AA64MMFR4_EL1 !! 695 stw r3,0(r2) /* update PTE (accessed/dirty bits) */ 310 tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SH !! 696 /* Convert linux-style PTE to low word of PPC-style PTE */ >> 697 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ >> 698 li r1,0xe15 /* clear out reserved bits and M */ >> 699 andc r1,r3,r1 /* PP = user? 2: 0 */ >> 700 mtspr RPA,r1 >> 701 mfspr r3,DMISS >> 702 tlbld r3 >> 703 mfspr r3,SRR1 /* Need to restore CR0 */ >> 704 mtcrf 0x80,r3 >> 705 rfi >> 706 >> 707 #ifndef CONFIG_ALTIVEC >> 708 #define AltivecAssistException UnknownException >> 709 #endif >> 710 EXCEPTION(0x1300, Trap_13, InstructionBreakpoint, EXC_XFER_EE) >> 711 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE) >> 712 EXCEPTION(0x1500, Trap_15, UnknownException, EXC_XFER_EE) >> 713 #ifdef CONFIG_POWER4 >> 714 EXCEPTION(0x1600, Trap_16, UnknownException, EXC_XFER_EE) >> 715 EXCEPTION(0x1700, Trap_17, AltivecAssistException, EXC_XFER_EE) >> 716 EXCEPTION(0x1800, Trap_18, TAUException, EXC_XFER_STD) >> 717 #else /* !CONFIG_POWER4 */ >> 718 EXCEPTION(0x1600, Trap_16, AltivecAssistException, EXC_XFER_EE) >> 719 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD) >> 720 EXCEPTION(0x1800, Trap_18, UnknownException, EXC_XFER_EE) >> 721 #endif /* CONFIG_POWER4 */ >> 722 EXCEPTION(0x1900, Trap_19, UnknownException, EXC_XFER_EE) >> 723 EXCEPTION(0x1a00, Trap_1a, UnknownException, EXC_XFER_EE) >> 724 EXCEPTION(0x1b00, Trap_1b, UnknownException, EXC_XFER_EE) >> 725 EXCEPTION(0x1c00, Trap_1c, UnknownException, EXC_XFER_EE) >> 726 EXCEPTION(0x1d00, Trap_1d, UnknownException, EXC_XFER_EE) >> 727 EXCEPTION(0x1e00, Trap_1e, UnknownException, EXC_XFER_EE) >> 728 EXCEPTION(0x1f00, Trap_1f, UnknownException, EXC_XFER_EE) >> 729 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE) >> 730 EXCEPTION(0x2100, Trap_21, UnknownException, EXC_XFER_EE) >> 731 EXCEPTION(0x2200, Trap_22, UnknownException, EXC_XFER_EE) >> 732 EXCEPTION(0x2300, Trap_23, UnknownException, EXC_XFER_EE) >> 733 EXCEPTION(0x2400, Trap_24, UnknownException, EXC_XFER_EE) >> 734 EXCEPTION(0x2500, Trap_25, UnknownException, EXC_XFER_EE) >> 735 EXCEPTION(0x2600, Trap_26, UnknownException, EXC_XFER_EE) >> 736 EXCEPTION(0x2700, Trap_27, UnknownException, EXC_XFER_EE) >> 737 EXCEPTION(0x2800, Trap_28, UnknownException, EXC_XFER_EE) >> 738 EXCEPTION(0x2900, Trap_29, UnknownException, EXC_XFER_EE) >> 739 EXCEPTION(0x2a00, Trap_2a, UnknownException, EXC_XFER_EE) >> 740 EXCEPTION(0x2b00, Trap_2b, UnknownException, EXC_XFER_EE) >> 741 EXCEPTION(0x2c00, Trap_2c, UnknownException, EXC_XFER_EE) >> 742 EXCEPTION(0x2d00, Trap_2d, UnknownException, EXC_XFER_EE) >> 743 EXCEPTION(0x2e00, Trap_2e, UnknownException, EXC_XFER_EE) >> 744 EXCEPTION(0x2f00, MOLTrampoline, UnknownException, EXC_XFER_EE_LITE) >> 745 >> 746 .globl mol_trampoline >> 747 .set mol_trampoline, i0x2f00 >> 748 >> 749 . = 0x3000 >> 750 >> 751 #ifdef CONFIG_ALTIVEC >> 752 AltiVecUnavailable: >> 753 EXCEPTION_PROLOG >> 754 bne load_up_altivec /* if from user, just load it up */ >> 755 EXC_XFER_EE_LITE(0xf20, KernelAltiVec) >> 756 #endif /* CONFIG_ALTIVEC */ >> 757 >> 758 #ifdef CONFIG_PPC64BRIDGE >> 759 DataAccess: >> 760 EXCEPTION_PROLOG >> 761 b DataAccessCont >> 762 >> 763 InstructionAccess: >> 764 EXCEPTION_PROLOG >> 765 b InstructionAccessCont >> 766 >> 767 DataSegment: >> 768 EXCEPTION_PROLOG >> 769 addi r3,r1,STACK_FRAME_OVERHEAD >> 770 mfspr r4,DAR >> 771 stw r4,_DAR(r11) >> 772 EXC_XFER_STD(0x380, UnknownException) >> 773 >> 774 InstructionSegment: >> 775 EXCEPTION_PROLOG >> 776 addi r3,r1,STACK_FRAME_OVERHEAD >> 777 EXC_XFER_STD(0x480, UnknownException) >> 778 #endif /* CONFIG_PPC64BRIDGE */ 311 779 312 orr x0, x0, #HCR_E2H !! 780 /* >> 781 * This task wants to use the FPU now. >> 782 * On UP, disable FP for the task which had the FPU previously, >> 783 * and save its floating-point registers in its thread_struct. >> 784 * Load up this task's FP registers from its thread_struct, >> 785 * enable the FPU for the current task and return to the task. >> 786 */ >> 787 load_up_fpu: >> 788 mfmsr r5 >> 789 ori r5,r5,MSR_FP >> 790 #ifdef CONFIG_PPC64BRIDGE >> 791 clrldi r5,r5,1 /* turn off 64-bit mode */ >> 792 #endif /* CONFIG_PPC64BRIDGE */ >> 793 SYNC >> 794 MTMSRD(r5) /* enable use of fpu now */ >> 795 isync >> 796 /* >> 797 * For SMP, we don't do lazy FPU switching because it just gets too >> 798 * horrendously complex, especially when a task switches from one CPU >> 799 * to another. Instead we call giveup_fpu in switch_to. >> 800 */ >> 801 #ifndef CONFIG_SMP >> 802 tophys(r6,0) /* get __pa constant */ >> 803 addis r3,r6,last_task_used_math@ha >> 804 lwz r4,last_task_used_math@l(r3) >> 805 cmpi 0,r4,0 >> 806 beq 1f >> 807 add r4,r4,r6 >> 808 addi r4,r4,THREAD /* want last_task_used_math->thread */ >> 809 SAVE_32FPRS(0, r4) >> 810 mffs fr0 >> 811 stfd fr0,THREAD_FPSCR-4(r4) >> 812 lwz r5,PT_REGS(r4) >> 813 add r5,r5,r6 >> 814 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 815 li r10,MSR_FP|MSR_FE0|MSR_FE1 >> 816 andc r4,r4,r10 /* disable FP for previous task */ >> 817 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 818 1: >> 819 #endif /* CONFIG_SMP */ >> 820 /* enable use of FP after return */ >> 821 mfspr r5,SPRG3 /* current task's THREAD (phys) */ >> 822 lwz r4,THREAD_FPEXC_MODE(r5) >> 823 ori r9,r9,MSR_FP /* enable FP for current */ >> 824 or r9,r9,r4 >> 825 lfd fr0,THREAD_FPSCR-4(r5) >> 826 mtfsf 0xff,fr0 >> 827 REST_32FPRS(0, r5) >> 828 #ifndef CONFIG_SMP >> 829 subi r4,r5,THREAD >> 830 sub r4,r4,r6 >> 831 stw r4,last_task_used_math@l(r3) >> 832 #endif /* CONFIG_SMP */ >> 833 /* restore registers and return */ >> 834 /* we haven't used ctr or xer or lr */ >> 835 /* fall through to fast_exception_return */ >> 836 >> 837 .globl fast_exception_return >> 838 fast_exception_return: >> 839 andi. r10,r9,MSR_RI /* check for recoverable interrupt */ >> 840 beq 1f /* if not, we've got problems */ >> 841 2: REST_4GPRS(3, r11) >> 842 lwz r10,_CCR(r11) >> 843 REST_GPR(1, r11) >> 844 mtcr r10 >> 845 lwz r10,_LINK(r11) >> 846 mtlr r10 >> 847 REST_GPR(10, r11) >> 848 mtspr SRR1,r9 >> 849 mtspr SRR0,r12 >> 850 REST_GPR(9, r11) >> 851 REST_GPR(12, r11) >> 852 lwz r11,GPR11(r11) >> 853 SYNC >> 854 RFI >> 855 >> 856 /* check if the exception happened in a restartable section */ >> 857 1: lis r3,exc_exit_restart_end@ha >> 858 addi r3,r3,exc_exit_restart_end@l >> 859 cmplw r12,r3 >> 860 bge 3f >> 861 lis r4,exc_exit_restart@ha >> 862 addi r4,r4,exc_exit_restart@l >> 863 cmplw r12,r4 >> 864 blt 3f >> 865 lis r3,fee_restarts@ha >> 866 tophys(r3,r3) >> 867 lwz r5,fee_restarts@l(r3) >> 868 addi r5,r5,1 >> 869 stw r5,fee_restarts@l(r3) >> 870 mr r12,r4 /* restart at exc_exit_restart */ >> 871 b 2b >> 872 >> 873 .comm fee_restarts,4 >> 874 >> 875 /* aargh, a nonrecoverable interrupt, panic */ >> 876 /* aargh, we don't know which trap this is */ >> 877 /* but the 601 doesn't implement the RI bit, so assume it's OK */ >> 878 3: >> 879 BEGIN_FTR_SECTION >> 880 b 2b >> 881 END_FTR_SECTION_IFSET(CPU_FTR_601) >> 882 li r10,-1 >> 883 stw r10,TRAP(r11) >> 884 addi r3,r1,STACK_FRAME_OVERHEAD >> 885 li r10,MSR_KERNEL >> 886 bl transfer_to_handler_full >> 887 .long nonrecoverable_exception >> 888 .long ret_from_except >> 889 >> 890 /* >> 891 * FP unavailable trap from kernel - print a message, but let >> 892 * the task use FP in the kernel until it returns to user mode. >> 893 */ >> 894 KernelFP: >> 895 lwz r3,_MSR(r1) >> 896 ori r3,r3,MSR_FP >> 897 stw r3,_MSR(r1) /* enable use of FP after return */ >> 898 lis r3,86f@h >> 899 ori r3,r3,86f@l >> 900 mr r4,r2 /* current */ >> 901 lwz r5,_NIP(r1) >> 902 bl printk >> 903 b ret_from_except >> 904 86: .string "floating point used in kernel (task=%p, pc=%x)\n" >> 905 .align 4,0 >> 906 >> 907 #ifdef CONFIG_ALTIVEC >> 908 /* Note that the AltiVec support is closely modeled after the FP >> 909 * support. Changes to one are likely to be applicable to the >> 910 * other! */ >> 911 load_up_altivec: >> 912 /* >> 913 * Disable AltiVec for the task which had AltiVec previously, >> 914 * and save its AltiVec registers in its thread_struct. >> 915 * Enables AltiVec for use in the kernel on return. >> 916 * On SMP we know the AltiVec units are free, since we give it up every >> 917 * switch. -- Kumar >> 918 */ >> 919 mfmsr r5 >> 920 oris r5,r5,MSR_VEC@h >> 921 MTMSRD(r5) /* enable use of AltiVec now */ >> 922 isync >> 923 /* >> 924 * For SMP, we don't do lazy AltiVec switching because it just gets too >> 925 * horrendously complex, especially when a task switches from one CPU >> 926 * to another. Instead we call giveup_altivec in switch_to. >> 927 */ >> 928 #ifndef CONFIG_SMP >> 929 tophys(r6,0) >> 930 addis r3,r6,last_task_used_altivec@ha >> 931 lwz r4,last_task_used_altivec@l(r3) >> 932 cmpi 0,r4,0 >> 933 beq 1f >> 934 add r4,r4,r6 >> 935 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ >> 936 SAVE_32VR(0,r10,r4) >> 937 mfvscr vr0 >> 938 li r10,THREAD_VSCR >> 939 stvx vr0,r10,r4 >> 940 lwz r5,PT_REGS(r4) >> 941 add r5,r5,r6 >> 942 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 943 lis r10,MSR_VEC@h >> 944 andc r4,r4,r10 /* disable altivec for previous task */ >> 945 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 313 1: 946 1: 314 msr hcr_el2, x0 !! 947 #endif /* CONFIG_SMP */ 315 isb !! 948 /* enable use of AltiVec after return */ >> 949 oris r9,r9,MSR_VEC@h >> 950 mfspr r5,SPRG3 /* current task's THREAD (phys) */ >> 951 li r4,1 >> 952 li r10,THREAD_VSCR >> 953 stw r4,THREAD_USED_VR(r5) >> 954 lvx vr0,r10,r5 >> 955 mtvscr vr0 >> 956 REST_32VR(0,r10,r5) >> 957 #ifndef CONFIG_SMP >> 958 subi r4,r5,THREAD >> 959 sub r4,r4,r6 >> 960 stw r4,last_task_used_altivec@l(r3) >> 961 #endif /* CONFIG_SMP */ >> 962 /* restore registers and return */ >> 963 /* we haven't used ctr or xer or lr */ >> 964 b fast_exception_return 316 965 317 init_el2_state !! 966 /* >> 967 * AltiVec unavailable trap from kernel - print a message, but let >> 968 * the task use AltiVec in the kernel until it returns to user mode. >> 969 */ >> 970 KernelAltiVec: >> 971 lwz r3,_MSR(r1) >> 972 oris r3,r3,MSR_VEC@h >> 973 stw r3,_MSR(r1) /* enable use of AltiVec after return */ >> 974 lis r3,87f@h >> 975 ori r3,r3,87f@l >> 976 mr r4,r2 /* current */ >> 977 lwz r5,_NIP(r1) >> 978 bl printk >> 979 b ret_from_except >> 980 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" >> 981 .align 4,0 318 982 319 /* Hypervisor stub */ !! 983 /* 320 adr_l x0, __hyp_stub_vectors !! 984 * giveup_altivec(tsk) 321 msr vbar_el2, x0 !! 985 * Disable AltiVec for the task given as the argument, 322 isb !! 986 * and save the AltiVec registers in its thread_struct. 323 !! 987 * Enables AltiVec for use in the kernel on return. 324 mov_q x1, INIT_SCTLR_EL1_MMU_OFF !! 988 */ 325 << 326 mrs x0, hcr_el2 << 327 and x0, x0, #HCR_E2H << 328 cbz x0, 2f << 329 << 330 /* Set a sane SCTLR_EL1, the VHE way * << 331 msr_s SYS_SCTLR_EL12, x1 << 332 mov x2, #BOOT_CPU_FLAG_E2H << 333 b 3f << 334 989 335 2: !! 990 .globl giveup_altivec 336 msr sctlr_el1, x1 !! 991 giveup_altivec: 337 mov x2, xzr !! 992 mfmsr r5 338 3: !! 993 oris r5,r5,MSR_VEC@h 339 __init_el2_nvhe_prepare_eret !! 994 SYNC >> 995 MTMSRD(r5) /* enable use of AltiVec now */ >> 996 isync >> 997 cmpi 0,r3,0 >> 998 beqlr- /* if no previous owner, done */ >> 999 addi r3,r3,THREAD /* want THREAD of task */ >> 1000 lwz r5,PT_REGS(r3) >> 1001 cmpi 0,r5,0 >> 1002 SAVE_32VR(0, r4, r3) >> 1003 mfvscr vr0 >> 1004 li r4,THREAD_VSCR >> 1005 stvx vr0,r4,r3 >> 1006 beq 1f >> 1007 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1008 lis r3,MSR_VEC@h >> 1009 andc r4,r4,r3 /* disable AltiVec for previous task */ >> 1010 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1011 1: >> 1012 #ifndef CONFIG_SMP >> 1013 li r5,0 >> 1014 lis r4,last_task_used_altivec@ha >> 1015 stw r5,last_task_used_altivec@l(r4) >> 1016 #endif /* CONFIG_SMP */ >> 1017 blr >> 1018 #endif /* CONFIG_ALTIVEC */ 340 1019 341 mov w0, #BOOT_CPU_MODE_EL2 !! 1020 /* 342 orr x0, x0, x2 !! 1021 * giveup_fpu(tsk) 343 eret !! 1022 * Disable FP for the task given as the argument, 344 SYM_FUNC_END(init_kernel_el) !! 1023 * and save the floating-point registers in its thread_struct. >> 1024 * Enables the FPU for use in the kernel on return. >> 1025 */ >> 1026 .globl giveup_fpu >> 1027 giveup_fpu: >> 1028 mfmsr r5 >> 1029 ori r5,r5,MSR_FP >> 1030 SYNC_601 >> 1031 ISYNC_601 >> 1032 MTMSRD(r5) /* enable use of fpu now */ >> 1033 SYNC_601 >> 1034 isync >> 1035 cmpi 0,r3,0 >> 1036 beqlr- /* if no previous owner, done */ >> 1037 addi r3,r3,THREAD /* want THREAD of task */ >> 1038 lwz r5,PT_REGS(r3) >> 1039 cmpi 0,r5,0 >> 1040 SAVE_32FPRS(0, r3) >> 1041 mffs fr0 >> 1042 stfd fr0,THREAD_FPSCR-4(r3) >> 1043 beq 1f >> 1044 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1045 li r3,MSR_FP|MSR_FE0|MSR_FE1 >> 1046 andc r4,r4,r3 /* disable FP for previous task */ >> 1047 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1048 1: >> 1049 #ifndef CONFIG_SMP >> 1050 li r5,0 >> 1051 lis r4,last_task_used_math@ha >> 1052 stw r5,last_task_used_math@l(r4) >> 1053 #endif /* CONFIG_SMP */ >> 1054 blr 345 1055 346 /* !! 1056 /* 347 * This provides a "holding pen" for p !! 1057 * This code is jumped to from the startup code to copy 348 * cores are held until we're ready fo !! 1058 * the kernel image to physical address 0. 349 */ !! 1059 */ 350 SYM_FUNC_START(secondary_holding_pen) !! 1060 relocate_kernel: 351 mov x0, xzr !! 1061 addis r9,r26,klimit@ha /* fetch klimit */ 352 bl init_kernel_el !! 1062 lwz r25,klimit@l(r9) 353 mrs x2, mpidr_el1 !! 1063 addis r25,r25,-KERNELBASE@h 354 mov_q x1, MPIDR_HWID_BITMASK !! 1064 li r3,0 /* Destination base address */ 355 and x2, x2, x1 !! 1065 li r6,0 /* Destination offset */ 356 adr_l x3, secondary_holding_pen_rele !! 1066 li r5,0x4000 /* # bytes of memory to copy */ 357 pen: ldr x4, [x3] !! 1067 bl copy_and_flush /* copy the first 0x4000 bytes */ 358 cmp x4, x2 !! 1068 addi r0,r3,4f@l /* jump to the address of 4f */ 359 b.eq secondary_startup !! 1069 mtctr r0 /* in copy and do the rest. */ 360 wfe !! 1070 bctr /* jump to the copy */ 361 b pen !! 1071 4: mr r5,r25 362 SYM_FUNC_END(secondary_holding_pen) !! 1072 bl copy_and_flush /* copy the rest */ >> 1073 b turn_on_mmu 363 1074 364 /* !! 1075 /* 365 * Secondary entry point that jumps st !! 1076 * Copy routine used to copy the kernel to start at physical address 0 366 * be used where CPUs are brought onli !! 1077 * and flush and invalidate the caches as needed. 367 */ !! 1078 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 368 SYM_FUNC_START(secondary_entry) !! 1079 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 369 mov x0, xzr !! 1080 */ 370 bl init_kernel_el !! 1081 copy_and_flush: 371 b secondary_startup !! 1082 addi r5,r5,-4 372 SYM_FUNC_END(secondary_entry) !! 1083 addi r6,r6,-4 >> 1084 4: li r0,L1_CACHE_LINE_SIZE/4 >> 1085 mtctr r0 >> 1086 3: addi r6,r6,4 /* copy a cache line */ >> 1087 lwzx r0,r6,r4 >> 1088 stwx r0,r6,r3 >> 1089 bdnz 3b >> 1090 dcbst r6,r3 /* write it to memory */ >> 1091 sync >> 1092 icbi r6,r3 /* flush the icache line */ >> 1093 cmplw 0,r6,r5 >> 1094 blt 4b >> 1095 sync /* additional sync needed on g4 */ >> 1096 isync >> 1097 addi r5,r5,4 >> 1098 addi r6,r6,4 >> 1099 blr 373 1100 374 SYM_FUNC_START_LOCAL(secondary_startup) !! 1101 #ifdef CONFIG_APUS 375 /* !! 1102 /* 376 * Common entry point for secondary CP !! 1103 * On APUS the physical base address of the kernel is not known at compile 377 */ !! 1104 * time, which means the __pa/__va constants used are incorrect. In the 378 mov x20, x0 !! 1105 * __init section is recorded the virtual addresses of instructions using >> 1106 * these constants, so all that has to be done is fix these before >> 1107 * continuing the kernel boot. >> 1108 * >> 1109 * r4 = The physical address of the kernel base. >> 1110 */ >> 1111 fix_mem_constants: >> 1112 mr r10,r4 >> 1113 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ >> 1114 neg r11,r10 /* phys_to_virt constant */ >> 1115 >> 1116 lis r12,__vtop_table_begin@h >> 1117 ori r12,r12,__vtop_table_begin@l >> 1118 add r12,r12,r10 /* table begin phys address */ >> 1119 lis r13,__vtop_table_end@h >> 1120 ori r13,r13,__vtop_table_end@l >> 1121 add r13,r13,r10 /* table end phys address */ >> 1122 subi r12,r12,4 >> 1123 subi r13,r13,4 >> 1124 1: lwzu r14,4(r12) /* virt address of instruction */ >> 1125 add r14,r14,r10 /* phys address of instruction */ >> 1126 lwz r15,0(r14) /* instruction, now insert top */ >> 1127 rlwimi r15,r10,16,16,31 /* half of vp const in low half */ >> 1128 stw r15,0(r14) /* of instruction and restore. */ >> 1129 dcbst r0,r14 /* write it to memory */ >> 1130 sync >> 1131 icbi r0,r14 /* flush the icache line */ >> 1132 cmpw r12,r13 >> 1133 bne 1b >> 1134 sync /* additional sync needed on g4 */ >> 1135 isync 379 1136 380 #ifdef CONFIG_ARM64_VA_BITS_52 !! 1137 /* 381 alternative_if ARM64_HAS_VA52 !! 1138 * Map the memory where the exception handlers will 382 bl __cpu_secondary_check52bitva !! 1139 * be copied to when hash constants have been patched. 383 alternative_else_nop_endif !! 1140 */ >> 1141 #ifdef CONFIG_APUS_FAST_EXCEPT >> 1142 lis r8,0xfff0 >> 1143 #else >> 1144 lis r8,0 384 #endif 1145 #endif >> 1146 ori r8,r8,0x2 /* 128KB, supervisor */ >> 1147 mtspr DBAT3U,r8 >> 1148 mtspr DBAT3L,r8 >> 1149 >> 1150 lis r12,__ptov_table_begin@h >> 1151 ori r12,r12,__ptov_table_begin@l >> 1152 add r12,r12,r10 /* table begin phys address */ >> 1153 lis r13,__ptov_table_end@h >> 1154 ori r13,r13,__ptov_table_end@l >> 1155 add r13,r13,r10 /* table end phys address */ >> 1156 subi r12,r12,4 >> 1157 subi r13,r13,4 >> 1158 1: lwzu r14,4(r12) /* virt address of instruction */ >> 1159 add r14,r14,r10 /* phys address of instruction */ >> 1160 lwz r15,0(r14) /* instruction, now insert top */ >> 1161 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ >> 1162 stw r15,0(r14) /* of instruction and restore. */ >> 1163 dcbst r0,r14 /* write it to memory */ >> 1164 sync >> 1165 icbi r0,r14 /* flush the icache line */ >> 1166 cmpw r12,r13 >> 1167 bne 1b >> 1168 >> 1169 sync /* additional sync needed on g4 */ >> 1170 isync /* No speculative loading until now */ >> 1171 blr >> 1172 >> 1173 /*********************************************************************** >> 1174 * Please note that on APUS the exception handlers are located at the >> 1175 * physical address 0xfff0000. For this reason, the exception handlers >> 1176 * cannot use relative branches to access the code below. >> 1177 ***********************************************************************/ >> 1178 #endif /* CONFIG_APUS */ >> 1179 >> 1180 #ifdef CONFIG_SMP >> 1181 #ifdef CONFIG_GEMINI >> 1182 .globl __secondary_start_gemini >> 1183 __secondary_start_gemini: >> 1184 mfspr r4,HID0 >> 1185 ori r4,r4,HID0_ICFI >> 1186 li r3,0 >> 1187 ori r3,r3,HID0_ICE >> 1188 andc r4,r4,r3 >> 1189 mtspr HID0,r4 >> 1190 sync >> 1191 bl gemini_prom_init >> 1192 b __secondary_start >> 1193 #endif /* CONFIG_GEMINI */ >> 1194 .globl __secondary_start_psurge >> 1195 __secondary_start_psurge: >> 1196 li r24,1 /* cpu # */ >> 1197 b __secondary_start_psurge99 >> 1198 .globl __secondary_start_psurge2 >> 1199 __secondary_start_psurge2: >> 1200 li r24,2 /* cpu # */ >> 1201 b __secondary_start_psurge99 >> 1202 .globl __secondary_start_psurge3 >> 1203 __secondary_start_psurge3: >> 1204 li r24,3 /* cpu # */ >> 1205 b __secondary_start_psurge99 >> 1206 __secondary_start_psurge99: >> 1207 /* we come in here with IR=0 and DR=1, and DBAT 0 >> 1208 set to map the 0xf0000000 - 0xffffffff region */ >> 1209 mfmsr r0 >> 1210 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ >> 1211 SYNC >> 1212 mtmsr r0 >> 1213 isync >> 1214 >> 1215 .globl __secondary_start >> 1216 __secondary_start: >> 1217 #ifdef CONFIG_PPC64BRIDGE >> 1218 mfmsr r0 >> 1219 clrldi r0,r0,1 /* make sure it's in 32-bit mode */ >> 1220 SYNC >> 1221 MTMSRD(r0) >> 1222 isync >> 1223 #endif >> 1224 /* Copy some CPU settings from CPU 0 */ >> 1225 bl __restore_cpu_setup 385 1226 386 bl __cpu_setup !! 1227 lis r3,-KERNELBASE@h 387 adrp x1, swapper_pg_dir !! 1228 mr r4,r24 388 adrp x2, idmap_pg_dir !! 1229 bl identify_cpu 389 bl __enable_mmu !! 1230 bl call_setup_cpu /* Call setup_cpu for this CPU */ 390 ldr x8, =__secondary_switched !! 1231 #ifdef CONFIG_6xx 391 br x8 !! 1232 lis r3,-KERNELBASE@h 392 SYM_FUNC_END(secondary_startup) !! 1233 bl init_idle_6xx >> 1234 #endif /* CONFIG_6xx */ >> 1235 #ifdef CONFIG_POWER4 >> 1236 lis r3,-KERNELBASE@h >> 1237 bl init_idle_power4 >> 1238 #endif /* CONFIG_POWER4 */ >> 1239 >> 1240 /* get current_thread_info and current */ >> 1241 lis r1,secondary_ti@ha >> 1242 tophys(r1,r1) >> 1243 lwz r1,secondary_ti@l(r1) >> 1244 tophys(r2,r1) >> 1245 lwz r2,TI_TASK(r2) >> 1246 >> 1247 /* stack */ >> 1248 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD >> 1249 li r0,0 >> 1250 tophys(r3,r1) >> 1251 stw r0,0(r3) >> 1252 >> 1253 /* load up the MMU */ >> 1254 bl load_up_mmu >> 1255 >> 1256 /* ptr to phys current thread */ >> 1257 tophys(r4,r2) >> 1258 addi r4,r4,THREAD /* phys address of our thread_struct */ >> 1259 CLR_TOP32(r4) >> 1260 mtspr SPRG3,r4 >> 1261 li r3,0 >> 1262 mtspr SPRG2,r3 /* 0 => not in RTAS */ >> 1263 >> 1264 /* enable MMU and jump to start_secondary */ >> 1265 li r4,MSR_KERNEL >> 1266 FIX_SRR1(r4,r5) >> 1267 lis r3,start_secondary@h >> 1268 ori r3,r3,start_secondary@l >> 1269 mtspr SRR0,r3 >> 1270 mtspr SRR1,r4 >> 1271 SYNC >> 1272 RFI >> 1273 #endif /* CONFIG_SMP */ 393 1274 394 .text !! 1275 /* 395 SYM_FUNC_START_LOCAL(__secondary_switched) !! 1276 * Those generic dummy functions are kept for CPUs not 396 mov x0, x20 !! 1277 * included in CONFIG_6xx 397 bl set_cpu_boot_mode_flag !! 1278 */ >> 1279 _GLOBAL(__setup_cpu_power3) >> 1280 blr >> 1281 _GLOBAL(__setup_cpu_power4) >> 1282 blr >> 1283 _GLOBAL(__setup_cpu_ppc970) >> 1284 blr >> 1285 _GLOBAL(__setup_cpu_generic) >> 1286 blr >> 1287 >> 1288 #ifndef CONFIG_6xx >> 1289 _GLOBAL(__save_cpu_setup) >> 1290 blr >> 1291 _GLOBAL(__restore_cpu_setup) >> 1292 #ifdef CONFIG_POWER4 >> 1293 /* turn off real-mode cache inhibit on the PPC970 */ >> 1294 mfspr r0,SPRN_PVR >> 1295 srwi r0,r0,16 >> 1296 cmpwi r0,0x39 >> 1297 beq ppc970_setup_hid >> 1298 #endif >> 1299 blr >> 1300 #endif /* CONFIG_6xx */ 398 1301 399 mov x0, x20 << 400 bl finalise_el2 << 401 1302 402 str_l xzr, __early_cpu_boot_status, !! 1303 /* 403 adr_l x5, vectors !! 1304 * Load stuff into the MMU. Intended to be called with 404 msr vbar_el1, x5 !! 1305 * IR=0 and DR=0. 405 isb !! 1306 */ >> 1307 load_up_mmu: >> 1308 sync /* Force all PTE updates to finish */ >> 1309 isync >> 1310 tlbia /* Clear all TLB entries */ >> 1311 sync /* wait for tlbia/tlbie to finish */ >> 1312 TLBSYNC /* ... on all CPUs */ >> 1313 /* Load the SDR1 register (hash table base & size) */ >> 1314 lis r6,_SDR1@ha >> 1315 tophys(r6,r6) >> 1316 lwz r6,_SDR1@l(r6) >> 1317 mtspr SDR1,r6 >> 1318 #ifdef CONFIG_PPC64BRIDGE >> 1319 /* clear the ASR so we only use the pseudo-segment registers. */ >> 1320 li r6,0 >> 1321 mtasr r6 >> 1322 #endif /* CONFIG_PPC64BRIDGE */ >> 1323 li r0,16 /* load up segment register values */ >> 1324 mtctr r0 /* for context 0 */ >> 1325 lis r3,0x2000 /* Ku = 1, VSID = 0 */ >> 1326 li r4,0 >> 1327 3: mtsrin r3,r4 >> 1328 addi r3,r3,0x111 /* increment VSID */ >> 1329 addis r4,r4,0x1000 /* address of next segment */ >> 1330 bdnz 3b >> 1331 #ifndef CONFIG_POWER4 >> 1332 /* Load the BAT registers with the values set up by MMU_init. >> 1333 MMU_init takes care of whether we're on a 601 or not. */ >> 1334 mfpvr r3 >> 1335 srwi r3,r3,16 >> 1336 cmpwi r3,1 >> 1337 lis r3,BATS@ha >> 1338 addi r3,r3,BATS@l >> 1339 tophys(r3,r3) >> 1340 LOAD_BAT(0,r3,r4,r5) >> 1341 LOAD_BAT(1,r3,r4,r5) >> 1342 LOAD_BAT(2,r3,r4,r5) >> 1343 LOAD_BAT(3,r3,r4,r5) >> 1344 #endif /* CONFIG_POWER4 */ >> 1345 blr 406 1346 407 adr_l x0, secondary_data !! 1347 /* 408 ldr x2, [x0, #CPU_BOOT_TASK] !! 1348 * This is where the main kernel code starts. 409 cbz x2, __secondary_too_slow !! 1349 */ >> 1350 start_here: >> 1351 /* ptr to current */ >> 1352 lis r2,init_task@h >> 1353 ori r2,r2,init_task@l >> 1354 /* Set up for using our exception vectors */ >> 1355 /* ptr to phys current thread */ >> 1356 tophys(r4,r2) >> 1357 addi r4,r4,THREAD /* init task's THREAD */ >> 1358 CLR_TOP32(r4) >> 1359 mtspr SPRG3,r4 >> 1360 li r3,0 >> 1361 mtspr SPRG2,r3 /* 0 => not in RTAS */ >> 1362 >> 1363 /* stack */ >> 1364 lis r1,init_thread_union@ha >> 1365 addi r1,r1,init_thread_union@l >> 1366 li r0,0 >> 1367 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) >> 1368 /* >> 1369 * Do early bootinfo parsing, platform-specific initialization, >> 1370 * and set up the MMU. >> 1371 */ >> 1372 mr r3,r31 >> 1373 mr r4,r30 >> 1374 mr r5,r29 >> 1375 mr r6,r28 >> 1376 mr r7,r27 >> 1377 bl machine_init >> 1378 bl MMU_init >> 1379 >> 1380 #ifdef CONFIG_APUS >> 1381 /* Copy exception code to exception vector base on APUS. */ >> 1382 lis r4,KERNELBASE@h >> 1383 #ifdef CONFIG_APUS_FAST_EXCEPT >> 1384 lis r3,0xfff0 /* Copy to 0xfff00000 */ >> 1385 #else >> 1386 lis r3,0 /* Copy to 0x00000000 */ >> 1387 #endif >> 1388 li r5,0x4000 /* # bytes of memory to copy */ >> 1389 li r6,0 >> 1390 bl copy_and_flush /* copy the first 0x4000 bytes */ >> 1391 #endif /* CONFIG_APUS */ 410 1392 411 init_cpu_task x2, x1, x3 !! 1393 /* >> 1394 * Go back to running unmapped so we can load up new values >> 1395 * for SDR1 (hash table pointer) and the segment registers >> 1396 * and change to using our exception vectors. >> 1397 */ >> 1398 lis r4,2f@h >> 1399 ori r4,r4,2f@l >> 1400 tophys(r4,r4) >> 1401 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) >> 1402 FIX_SRR1(r3,r5) >> 1403 mtspr SRR0,r4 >> 1404 mtspr SRR1,r3 >> 1405 SYNC >> 1406 RFI >> 1407 /* Load up the kernel context */ >> 1408 2: bl load_up_mmu >> 1409 >> 1410 #ifdef CONFIG_BDI_SWITCH >> 1411 /* Add helper information for the Abatron bdiGDB debugger. >> 1412 * We do this here because we know the mmu is disabled, and >> 1413 * will be enabled for real in just a few instructions. >> 1414 */ >> 1415 lis r5, abatron_pteptrs@h >> 1416 ori r5, r5, abatron_pteptrs@l >> 1417 stw r5, 0xf0(r0) /* This much match your Abatron config */ >> 1418 lis r6, swapper_pg_dir@h >> 1419 ori r6, r6, swapper_pg_dir@l >> 1420 tophys(r5, r5) >> 1421 stw r6, 0(r5) >> 1422 #endif /* CONFIG_BDI_SWITCH */ >> 1423 >> 1424 /* Now turn on the MMU for real! */ >> 1425 li r4,MSR_KERNEL >> 1426 FIX_SRR1(r4,r5) >> 1427 lis r3,start_kernel@h >> 1428 ori r3,r3,start_kernel@l >> 1429 mtspr SRR0,r3 >> 1430 mtspr SRR1,r4 >> 1431 SYNC >> 1432 RFI 412 1433 413 #ifdef CONFIG_ARM64_PTR_AUTH !! 1434 /* 414 ptrauth_keys_init_cpu x2, x3, x4, x5 !! 1435 * Set up the segment registers for a new context. >> 1436 */ >> 1437 _GLOBAL(set_context) >> 1438 mulli r3,r3,897 /* multiply context by skew factor */ >> 1439 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ >> 1440 addis r3,r3,0x6000 /* Set Ks, Ku bits */ >> 1441 li r0,NUM_USER_SEGMENTS >> 1442 mtctr r0 >> 1443 >> 1444 #ifdef CONFIG_BDI_SWITCH >> 1445 /* Context switch the PTE pointer for the Abatron BDI2000. >> 1446 * The PGDIR is passed as second argument. >> 1447 */ >> 1448 lis r5, KERNELBASE@h >> 1449 lwz r5, 0xf0(r5) >> 1450 stw r4, 0x4(r5) 415 #endif 1451 #endif >> 1452 li r4,0 >> 1453 BEGIN_FTR_SECTION >> 1454 dssall >> 1455 sync >> 1456 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) >> 1457 3: isync >> 1458 #ifdef CONFIG_PPC64BRIDGE >> 1459 slbie r4 >> 1460 #endif /* CONFIG_PPC64BRIDGE */ >> 1461 mtsrin r3,r4 >> 1462 addi r3,r3,0x111 /* next VSID */ >> 1463 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ >> 1464 addis r4,r4,0x1000 /* address of next segment */ >> 1465 bdnz 3b >> 1466 sync >> 1467 isync >> 1468 blr 416 1469 417 bl secondary_start_kernel !! 1470 /* 418 ASM_BUG() !! 1471 * An undocumented "feature" of 604e requires that the v bit 419 SYM_FUNC_END(__secondary_switched) !! 1472 * be cleared before changing BAT values. 420 !! 1473 * 421 SYM_FUNC_START_LOCAL(__secondary_too_slow) !! 1474 * Also, newer IBM firmware does not clear bat3 and 4 so 422 wfe !! 1475 * this makes sure it's done. 423 wfi !! 1476 * -- Cort 424 b __secondary_too_slow !! 1477 */ 425 SYM_FUNC_END(__secondary_too_slow) !! 1478 clear_bats: >> 1479 li r10,0 >> 1480 mfspr r9,PVR >> 1481 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ >> 1482 cmpwi r9, 1 >> 1483 beq 1f >> 1484 >> 1485 mtspr DBAT0U,r10 >> 1486 mtspr DBAT0L,r10 >> 1487 mtspr DBAT1U,r10 >> 1488 mtspr DBAT1L,r10 >> 1489 mtspr DBAT2U,r10 >> 1490 mtspr DBAT2L,r10 >> 1491 mtspr DBAT3U,r10 >> 1492 mtspr DBAT3L,r10 >> 1493 1: >> 1494 mtspr IBAT0U,r10 >> 1495 mtspr IBAT0L,r10 >> 1496 mtspr IBAT1U,r10 >> 1497 mtspr IBAT1L,r10 >> 1498 mtspr IBAT2U,r10 >> 1499 mtspr IBAT2L,r10 >> 1500 mtspr IBAT3U,r10 >> 1501 mtspr IBAT3L,r10 >> 1502 BEGIN_FTR_SECTION >> 1503 /* Here's a tweak: at this point, CPU setup have >> 1504 * not been called yet, so HIGH_BAT_EN may not be >> 1505 * set in HID0 for the 745x processors. However, it >> 1506 * seems that doesn't affect our ability to actually >> 1507 * write to these SPRs. >> 1508 */ >> 1509 mtspr SPRN_DBAT4U,r20 >> 1510 mtspr SPRN_DBAT4L,r20 >> 1511 mtspr SPRN_DBAT5U,r20 >> 1512 mtspr SPRN_DBAT5L,r20 >> 1513 mtspr SPRN_DBAT6U,r20 >> 1514 mtspr SPRN_DBAT6L,r20 >> 1515 mtspr SPRN_DBAT7U,r20 >> 1516 mtspr SPRN_DBAT7L,r20 >> 1517 mtspr SPRN_IBAT4U,r20 >> 1518 mtspr SPRN_IBAT4L,r20 >> 1519 mtspr SPRN_IBAT5U,r20 >> 1520 mtspr SPRN_IBAT5L,r20 >> 1521 mtspr SPRN_IBAT6U,r20 >> 1522 mtspr SPRN_IBAT6L,r20 >> 1523 mtspr SPRN_IBAT7U,r20 >> 1524 mtspr SPRN_IBAT7L,r20 >> 1525 END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) >> 1526 blr >> 1527 >> 1528 flush_tlbs: >> 1529 lis r10, 0x40 >> 1530 1: addic. r10, r10, -0x1000 >> 1531 tlbie r10 >> 1532 blt 1b >> 1533 sync >> 1534 blr >> 1535 >> 1536 mmu_off: >> 1537 addi r4, r3, __after_mmu_off - _start >> 1538 mfmsr r3 >> 1539 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ >> 1540 beqlr >> 1541 andc r3,r3,r0 >> 1542 mtspr SRR0,r4 >> 1543 mtspr SRR1,r3 >> 1544 sync >> 1545 RFI 426 1546 >> 1547 #ifndef CONFIG_POWER4 427 /* 1548 /* 428 * Sets the __boot_cpu_mode flag depending on !! 1549 * Use the first pair of BAT registers to map the 1st 16MB 429 * in w0. See arch/arm64/include/asm/virt.h fo !! 1550 * of RAM to KERNELBASE. From this point on we can't safely >> 1551 * call OF any more. 430 */ 1552 */ 431 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) !! 1553 initial_bats: 432 adr_l x1, __boot_cpu_mode !! 1554 lis r11,KERNELBASE@h 433 cmp w0, #BOOT_CPU_MODE_EL2 !! 1555 #ifndef CONFIG_PPC64BRIDGE 434 b.ne 1f !! 1556 mfspr r9,PVR 435 add x1, x1, #4 !! 1557 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ 436 1: str w0, [x1] !! 1558 cmpi 0,r9,1 437 ret !! 1559 bne 4f 438 SYM_FUNC_END(set_cpu_boot_mode_flag) !! 1560 ori r11,r11,4 /* set up BAT registers for 601 */ >> 1561 li r8,0x7f /* valid, block length = 8MB */ >> 1562 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ >> 1563 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ >> 1564 mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */ >> 1565 mtspr IBAT0L,r8 /* lower BAT register */ >> 1566 mtspr IBAT1U,r9 >> 1567 mtspr IBAT1L,r10 >> 1568 isync >> 1569 blr >> 1570 #endif /* CONFIG_PPC64BRIDGE */ >> 1571 >> 1572 4: tophys(r8,r11) >> 1573 #ifdef CONFIG_SMP >> 1574 ori r8,r8,0x12 /* R/W access, M=1 */ >> 1575 #else >> 1576 ori r8,r8,2 /* R/W access */ >> 1577 #endif /* CONFIG_SMP */ >> 1578 #ifdef CONFIG_APUS >> 1579 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ >> 1580 #else >> 1581 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ >> 1582 #endif /* CONFIG_APUS */ 439 1583 >> 1584 #ifdef CONFIG_PPC64BRIDGE >> 1585 /* clear out the high 32 bits in the BAT */ >> 1586 clrldi r11,r11,32 >> 1587 clrldi r8,r8,32 >> 1588 #endif /* CONFIG_PPC64BRIDGE */ >> 1589 mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ >> 1590 mtspr DBAT0U,r11 /* bit in upper BAT register */ >> 1591 mtspr IBAT0L,r8 >> 1592 mtspr IBAT0U,r11 >> 1593 isync >> 1594 blr >> 1595 >> 1596 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) >> 1597 setup_disp_bat: >> 1598 /* >> 1599 * setup the display bat prepared for us in prom.c >> 1600 */ >> 1601 mflr r8 >> 1602 bl reloc_offset >> 1603 mtlr r8 >> 1604 addis r8,r3,disp_BAT@ha >> 1605 addi r8,r8,disp_BAT@l >> 1606 lwz r11,0(r8) >> 1607 lwz r8,4(r8) >> 1608 mfspr r9,PVR >> 1609 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ >> 1610 cmpi 0,r9,1 >> 1611 beq 1f >> 1612 mtspr DBAT3L,r8 >> 1613 mtspr DBAT3U,r11 >> 1614 blr >> 1615 1: mtspr IBAT3L,r8 >> 1616 mtspr IBAT3U,r11 >> 1617 blr >> 1618 >> 1619 #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ >> 1620 >> 1621 #else /* CONFIG_POWER4 */ 440 /* 1622 /* 441 * The booting CPU updates the failed status @ !! 1623 * Load up the SDR1 and segment register values now 442 * with MMU turned off. !! 1624 * since we don't have the BATs. 443 * !! 1625 * Also make sure we are running in 32-bit mode. 444 * update_early_cpu_boot_status tmp, status << 445 * - Corrupts tmp1, tmp2 << 446 * - Writes 'status' to __early_cpu_boot_stat << 447 * it is committed to memory. << 448 */ 1626 */ 449 1627 450 .macro update_early_cpu_boot_status s !! 1628 initial_mm_power4: 451 mov \tmp2, #\status !! 1629 addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ 452 adr_l \tmp1, __early_cpu_boot_status !! 1630 lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ 453 str \tmp2, [\tmp1] !! 1631 mtspr SDR1,r14 454 dmb sy !! 1632 slbia 455 dc ivac, \tmp1 !! 1633 lis r4,0x2000 /* set pseudo-segment reg 12 */ 456 .endm !! 1634 ori r5,r4,0x0ccc >> 1635 mtsr 12,r5 >> 1636 ori r5,r4,0x0888 /* set pseudo-segment reg 8 */ >> 1637 mtsr 8,r5 /* (for access to serial port) */ >> 1638 ori r5,r4,0x0999 /* set pseudo-segment reg 8 */ >> 1639 mtsr 9,r5 /* (for access to screen) */ >> 1640 mfmsr r0 >> 1641 clrldi r0,r0,1 >> 1642 sync >> 1643 mtmsr r0 >> 1644 isync >> 1645 blr 457 1646 458 /* 1647 /* 459 * Enable the MMU. !! 1648 * On 970 (G5), we pre-set a few bits in HID0 & HID1 460 * !! 1649 */ 461 * x0 = SCTLR_EL1 value for turning on the M !! 1650 ppc970_setup_hid: 462 * x1 = TTBR1_EL1 value !! 1651 li r0,0 463 * x2 = ID map root table address !! 1652 sync 464 * !! 1653 mtspr 0x3f4,r0 465 * Returns to the caller via x30/lr. This requ !! 1654 isync 466 * by the .idmap.text section. !! 1655 sync >> 1656 mtspr 0x3f6,r0 >> 1657 isync >> 1658 mfspr r0,SPRN_HID0 >> 1659 li r11,1 /* clear DOZE, NAP and SLEEP */ >> 1660 rldimi r0,r11,52,8 /* set DPM */ >> 1661 mtspr SPRN_HID0,r0 >> 1662 mfspr r0,SPRN_HID0 >> 1663 mfspr r0,SPRN_HID0 >> 1664 mfspr r0,SPRN_HID0 >> 1665 mfspr r0,SPRN_HID0 >> 1666 mfspr r0,SPRN_HID0 >> 1667 mfspr r0,SPRN_HID0 >> 1668 sync >> 1669 isync >> 1670 mfspr r0,SPRN_HID1 >> 1671 li r11,0x1200 /* enable i-fetch cacheability */ >> 1672 sldi r11,r11,44 /* and prefetch */ >> 1673 or r0,r0,r11 >> 1674 mtspr SPRN_HID1,r0 >> 1675 mtspr SPRN_HID1,r0 >> 1676 isync >> 1677 li r0,0 >> 1678 sync >> 1679 mtspr 0x137,0 >> 1680 isync >> 1681 blr >> 1682 #endif /* CONFIG_POWER4 */ >> 1683 >> 1684 #ifdef CONFIG_8260 >> 1685 /* Jump into the system reset for the rom. >> 1686 * We first disable the MMU, and then jump to the ROM reset address. 467 * 1687 * 468 * Checks if the selected granule size is supp !! 1688 * r3 is the board info structure, r4 is the location for starting. 469 * If it isn't, park the CPU !! 1689 * I use this for building a small kernel that can load other kernels, >> 1690 * rather than trying to write or rely on a rom monitor that can tftp load. 470 */ 1691 */ 471 .section ".idmap.text","a" !! 1692 .globl m8260_gorom 472 SYM_FUNC_START(__enable_mmu) !! 1693 m8260_gorom: 473 mrs x3, ID_AA64MMFR0_EL1 !! 1694 mfmsr r0 474 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRA !! 1695 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ 475 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU !! 1696 sync 476 b.lt __no_granule_support !! 1697 mtmsr r0 477 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU !! 1698 sync 478 b.gt __no_granule_support !! 1699 mfspr r11, HID0 479 phys_to_ttbr x2, x2 !! 1700 lis r10, 0 480 msr ttbr0_el1, x2 !! 1701 ori r10,r10,HID0_ICE|HID0_DCE 481 load_ttbr1 x1, x1, x3 !! 1702 andc r11, r11, r10 >> 1703 mtspr HID0, r11 >> 1704 isync >> 1705 li r5, MSR_ >> 1706 lis r6,2f@h >> 1707 addis r6,r6,-KERNELBASE@h >> 1708 ori r6,r6,2f@l >> 1709 mtspr SRR0,r6 >> 1710 mtspr SRR1,r5 >> 1711 isync >> 1712 sync >> 1713 rfi >> 1714 2: >> 1715 mtlr r4 >> 1716 blr >> 1717 #endif 482 1718 483 set_sctlr_el1 x0 << 484 1719 485 ret !! 1720 /* 486 SYM_FUNC_END(__enable_mmu) !! 1721 * We put a few things here that have to be page-aligned. >> 1722 * This stuff goes at the beginning of the data segment, >> 1723 * which is page-aligned. >> 1724 */ >> 1725 .data >> 1726 .globl sdata >> 1727 sdata: >> 1728 .globl empty_zero_page >> 1729 empty_zero_page: >> 1730 .space 4096 >> 1731 >> 1732 .globl swapper_pg_dir >> 1733 swapper_pg_dir: >> 1734 .space 4096 487 1735 488 #ifdef CONFIG_ARM64_VA_BITS_52 !! 1736 /* 489 SYM_FUNC_START(__cpu_secondary_check52bitva) !! 1737 * This space gets a copy of optional info passed to us by the bootstrap 490 #ifndef CONFIG_ARM64_LPA2 !! 1738 * Used to pass parameters into the kernel like root=/dev/sda1, etc. 491 mrs_s x0, SYS_ID_AA64MMFR2_EL1 !! 1739 */ 492 and x0, x0, ID_AA64MMFR2_EL1_VARan !! 1740 .globl cmd_line 493 cbnz x0, 2f !! 1741 cmd_line: 494 #else !! 1742 .space 512 495 mrs x0, id_aa64mmfr0_el1 !! 1743 496 sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRA !! 1744 .globl intercept_table 497 cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LP !! 1745 intercept_table: 498 b.ge 2f !! 1746 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 499 #endif !! 1747 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 500 !! 1748 .long 0, 0, 0, i0x1300, 0, 0, 0, 0 501 update_early_cpu_boot_status \ !! 1749 .long 0, 0, 0, 0, 0, 0, 0, 0 502 CPU_STUCK_IN_KERNEL | CPU_STUC !! 1750 .long 0, 0, 0, 0, 0, 0, 0, 0 503 1: wfe !! 1751 .long 0, 0, 0, 0, 0, 0, 0, 0 504 wfi !! 1752 505 b 1b !! 1753 /* Room for two PTE pointers, usually the kernel and current user pointers 506 !! 1754 * to their respective root page table. 507 2: ret !! 1755 */ 508 SYM_FUNC_END(__cpu_secondary_check52bitva) !! 1756 abatron_pteptrs: 509 #endif !! 1757 .space 8 510 << 511 SYM_FUNC_START_LOCAL(__no_granule_support) << 512 /* Indicate that this CPU can't boot a << 513 update_early_cpu_boot_status \ << 514 CPU_STUCK_IN_KERNEL | CPU_STUC << 515 1: << 516 wfe << 517 wfi << 518 b 1b << 519 SYM_FUNC_END(__no_granule_support) << 520 << 521 SYM_FUNC_START_LOCAL(__primary_switch) << 522 adrp x1, reserved_pg_dir << 523 adrp x2, init_idmap_pg_dir << 524 bl __enable_mmu << 525 << 526 adrp x1, early_init_stack << 527 mov sp, x1 << 528 mov x29, xzr << 529 mov x0, x20 << 530 mov x1, x21 << 531 bl __pi_early_map_kernel << 532 << 533 ldr x8, =__primary_switched << 534 adrp x0, KERNEL_START << 535 br x8 << 536 SYM_FUNC_END(__primary_switch) <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.