1 /* SPDX-License-Identifier: GPL-2.0-only */ << 2 /* 1 /* 3 * Low-level CPU initialisation !! 2 * PowerPC version 4 * Based on arch/arm/kernel/head.S !! 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 4 * 6 * Copyright (C) 1994-2002 Russell King !! 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 7 * Copyright (C) 2003-2012 ARM Ltd. !! 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 8 * Authors: Catalin Marinas <catalin.marina !! 7 * Adapted for Power Macintosh by Paul Mackerras. 9 * Will Deacon <will.deacon@arm.co !! 8 * Low-level exception handlers and MMU support 10 */ !! 9 * rewritten by Paul Mackerras. 11 !! 10 * Copyright (C) 1996 Paul Mackerras. 12 #include <linux/linkage.h> !! 11 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). 13 #include <linux/init.h> !! 12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 14 #include <linux/pgtable.h> !! 13 * 15 !! 14 * This file contains the low-level support and setup for the 16 #include <asm/asm_pointer_auth.h> !! 15 * PowerPC platform, including trap and interrupt dispatch. 17 #include <asm/assembler.h> !! 16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.) 18 #include <asm/boot.h> !! 17 * 19 #include <asm/bug.h> !! 18 * This program is free software; you can redistribute it and/or 20 #include <asm/ptrace.h> !! 19 * modify it under the terms of the GNU General Public License 21 #include <asm/asm-offsets.h> !! 20 * as published by the Free Software Foundation; either version 22 #include <asm/cache.h> !! 21 * 2 of the License, or (at your option) any later version. 23 #include <asm/cputype.h> !! 22 * 24 #include <asm/el2_setup.h> !! 23 */ 25 #include <asm/elf.h> << 26 #include <asm/image.h> << 27 #include <asm/kernel-pgtable.h> << 28 #include <asm/kvm_arm.h> << 29 #include <asm/memory.h> << 30 #include <asm/pgtable-hwdef.h> << 31 #include <asm/page.h> << 32 #include <asm/scs.h> << 33 #include <asm/smp.h> << 34 #include <asm/sysreg.h> << 35 #include <asm/thread_info.h> << 36 #include <asm/virt.h> << 37 24 38 #include "efi-header.S" !! 25 #include <linux/config.h> >> 26 #include <linux/threads.h> >> 27 #include <asm/processor.h> >> 28 #include <asm/page.h> >> 29 #include <asm/mmu.h> >> 30 #include <asm/pgtable.h> >> 31 #include <asm/cputable.h> >> 32 #include <asm/cache.h> >> 33 #include <asm/ppc_asm.h> >> 34 #include "ppc_defs.h" 39 35 40 #if (PAGE_OFFSET & 0x1fffff) != 0 !! 36 #ifdef CONFIG_APUS 41 #error PAGE_OFFSET must be at least 2MB aligne !! 37 #include <asm/amigappc.h> 42 #endif 38 #endif 43 39 >> 40 #ifdef CONFIG_PPC64BRIDGE >> 41 #define LOAD_BAT(n, reg, RA, RB) \ >> 42 ld RA,(n*32)+0(reg); \ >> 43 ld RB,(n*32)+8(reg); \ >> 44 mtspr IBAT##n##U,RA; \ >> 45 mtspr IBAT##n##L,RB; \ >> 46 ld RA,(n*32)+16(reg); \ >> 47 ld RB,(n*32)+24(reg); \ >> 48 mtspr DBAT##n##U,RA; \ >> 49 mtspr DBAT##n##L,RB; \ >> 50 >> 51 #else /* CONFIG_PPC64BRIDGE */ >> 52 >> 53 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ >> 54 #define LOAD_BAT(n, reg, RA, RB) \ >> 55 /* see the comment for clear_bats() -- Cort */ \ >> 56 li RA,0; \ >> 57 mtspr IBAT##n##U,RA; \ >> 58 mtspr DBAT##n##U,RA; \ >> 59 lwz RA,(n*16)+0(reg); \ >> 60 lwz RB,(n*16)+4(reg); \ >> 61 mtspr IBAT##n##U,RA; \ >> 62 mtspr IBAT##n##L,RB; \ >> 63 beq 1f; \ >> 64 lwz RA,(n*16)+8(reg); \ >> 65 lwz RB,(n*16)+12(reg); \ >> 66 mtspr DBAT##n##U,RA; \ >> 67 mtspr DBAT##n##L,RB; \ >> 68 1: >> 69 #endif /* CONFIG_PPC64BRIDGE */ >> 70 >> 71 .text >> 72 .stabs "arch/ppc/kernel/",N_SO,0,0,0f >> 73 .stabs "head.S",N_SO,0,0,0f >> 74 0: >> 75 .globl _stext >> 76 _stext: >> 77 44 /* 78 /* 45 * Kernel startup entry point. !! 79 * _start is defined this way because the XCOFF loader in the OpenFirmware 46 * --------------------------- !! 80 * on the powermac expects the entry point to be a procedure descriptor. 47 * << 48 * The requirements are: << 49 * MMU = off, D-cache = off, I-cache = on or << 50 * x0 = physical address to the FDT blob. << 51 * << 52 * Note that the callee-saved registers are us << 53 * that are useful before the MMU is enabled. << 54 * in the entry routines. << 55 */ 81 */ 56 __HEAD !! 82 .text >> 83 .globl _start >> 84 _start: 57 /* 85 /* 58 * DO NOT MODIFY. Image header expecte !! 86 * These are here for legacy reasons, the kernel used to >> 87 * need to look like a coff function entry for the pmac >> 88 * but we're always started by some kind of bootloader now. >> 89 * -- Cort 59 */ 90 */ 60 efi_signature_nop !! 91 nop 61 b primary_entry !! 92 nop 62 .quad 0 !! 93 nop 63 le64sym _kernel_size_le !! 94 64 le64sym _kernel_flags_le !! 95 /* PMAC 65 .quad 0 !! 96 * Enter here with the kernel text, data and bss loaded starting at 66 .quad 0 !! 97 * 0, running with virtual == physical mapping. 67 .quad 0 !! 98 * r5 points to the prom entry point (the client interface handler 68 .ascii ARM64_IMAGE_MAGIC !! 99 * address). Address translation is turned on, with the prom 69 .long .Lpe_header_offset !! 100 * managing the hash table. Interrupts are disabled. The stack >> 101 * pointer (r1) points to just below the end of the half-meg region >> 102 * from 0x380000 - 0x400000, which is mapped in already. >> 103 * >> 104 * If we are booted from MacOS via BootX, we enter with the kernel >> 105 * image loaded somewhere, and the following values in registers: >> 106 * r3: 'BooX' (0x426f6f58) >> 107 * r4: virtual address of boot_infos_t >> 108 * r5: 0 >> 109 * >> 110 * APUS >> 111 * r3: 'APUS' >> 112 * r4: physical address of memory base >> 113 * Linux/m68k style BootInfo structure at &_end. >> 114 * >> 115 * PREP >> 116 * This is jumped to on prep systems right after the kernel is relocated >> 117 * to its proper place in memory by the boot loader. The expected layout >> 118 * of the regs is: >> 119 * r3: ptr to residual data >> 120 * r4: initrd_start or if no initrd then 0 >> 121 * r5: initrd_end - unused if r4 is 0 >> 122 * r6: Start of command line string >> 123 * r7: End of command line string >> 124 * >> 125 * This just gets a minimal mmu environment setup so we can call >> 126 * start_here() to do the real work. >> 127 * -- Cort >> 128 */ 70 129 71 __EFI_PE_HEADER !! 130 .globl __start >> 131 __start: >> 132 /* >> 133 * We have to do any OF calls before we map ourselves to KERNELBASE, >> 134 * because OF may have I/O devices mapped into that area >> 135 * (particularly on CHRP). >> 136 */ >> 137 mr r31,r3 /* save parameters */ >> 138 mr r30,r4 >> 139 mr r29,r5 >> 140 mr r28,r6 >> 141 mr r27,r7 >> 142 li r24,0 /* cpu # */ 72 143 73 .section ".idmap.text","a" !! 144 #ifdef CONFIG_POWER4 >> 145 /* >> 146 * On the PPC970, we have to turn off real-mode cache inhibit >> 147 * early, before we first turn the MMU off. >> 148 */ >> 149 mfspr r0,SPRN_PVR >> 150 srwi r0,r0,16 >> 151 cmpwi r0,0x39 >> 152 beql ppc970_setup_hid >> 153 #endif 74 154 75 /* !! 155 /* 76 * The following callee saved general !! 156 * early_init() does the early machine identification and does 77 * primary lowlevel boot path: !! 157 * the necessary low-level setup and clears the BSS 78 * !! 158 * -- Cort <cort@fsmlabs.com> 79 * Register Scope !! 159 */ 80 * x19 primary_entry() .. star !! 160 bl early_init 81 * x20 primary_entry() .. __pr << 82 * x21 primary_entry() .. star << 83 */ << 84 SYM_CODE_START(primary_entry) << 85 bl record_mmu_state << 86 bl preserve_boot_args << 87 << 88 adrp x1, early_init_stack << 89 mov sp, x1 << 90 mov x29, xzr << 91 adrp x0, init_idmap_pg_dir << 92 mov x1, xzr << 93 bl __pi_create_init_idmap << 94 161 95 /* !! 162 #ifdef CONFIG_APUS 96 * If the page tables have been popula !! 163 /* On APUS the __va/__pa constants need to be set to the correct 97 * accesses (MMU disabled), invalidate !! 164 * values before continuing. 98 * remove any speculatively loaded cac !! 165 */ 99 */ !! 166 mr r4,r30 100 cbnz x19, 0f !! 167 bl fix_mem_constants 101 dmb sy !! 168 #endif /* CONFIG_APUS */ 102 mov x1, x0 << 103 adrp x0, init_idmap_pg_dir << 104 adr_l x2, dcache_inval_poc << 105 blr x2 << 106 b 1f << 107 169 108 /* !! 170 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains 109 * If we entered with the MMU and cach !! 171 * the physical address we are running at, returned by early_init() 110 * of the primary boot code to the PoC !! 172 */ 111 * the MMU off. !! 173 bl mmu_off 112 */ !! 174 __after_mmu_off: 113 0: adrp x0, __idmap_text_start << 114 adr_l x1, __idmap_text_end << 115 adr_l x2, dcache_clean_poc << 116 blr x2 << 117 << 118 1: mov x0, x19 << 119 bl init_kernel_el << 120 mov x20, x0 << 121 175 122 /* !! 176 #ifndef CONFIG_POWER4 123 * The following calls CPU setup code, !! 177 bl clear_bats 124 * details. !! 178 bl flush_tlbs 125 * On return, the CPU will be ready fo !! 179 bl initial_bats 126 * the TCR will have been set. !! 180 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) 127 */ !! 181 bl setup_disp_bat 128 bl __cpu_setup !! 182 #endif 129 b __primary_switch !! 183 #else /* CONFIG_POWER4 */ 130 SYM_CODE_END(primary_entry) !! 184 /* 131 !! 185 * Load up the SDR1 and segment register values now 132 __INIT !! 186 * since we don't have the BATs. 133 SYM_CODE_START_LOCAL(record_mmu_state) !! 187 * Also make sure we are running in 32-bit mode. 134 mrs x19, CurrentEL !! 188 */ 135 cmp x19, #CurrentEL_EL2 !! 189 bl reloc_offset 136 mrs x19, sctlr_el1 !! 190 addis r14,r3,_SDR1@ha /* get the value from _SDR1 */ 137 b.ne 0f !! 191 lwz r14,_SDR1@l(r14) /* assume hash table below 4GB */ 138 mrs x19, sctlr_el2 !! 192 mtspr SDR1,r14 139 0: !! 193 slbia 140 CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f !! 194 lis r4,0x2000 /* set pseudo-segment reg 12 */ 141 CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f !! 195 ori r5,r4,0x0ccc 142 tst x19, #SCTLR_ELx_C !! 196 mtsr 12,r5 143 and x19, x19, #SCTLR_ELx_M !! 197 ori r4,r4,0x0888 /* set pseudo-segment reg 8 */ 144 csel x19, xzr, x19, eq !! 198 mtsr 8,r4 /* (for access to serial port) */ 145 ret !! 199 mfmsr r0 >> 200 clrldi r0,r0,1 >> 201 sync >> 202 mtmsr r0 >> 203 isync >> 204 #endif /* CONFIG_POWER4 */ 146 205 147 /* 206 /* 148 * Set the correct endianness early so !! 207 * Call setup_cpu for CPU 0 149 * before init_kernel_el() occur in th << 150 * this means the MMU must be disabled << 151 * up getting interpreted with the wro << 152 */ 208 */ 153 1: eor x19, x19, #SCTLR_ELx_EE !! 209 bl reloc_offset 154 bic x19, x19, #SCTLR_ELx_M !! 210 li r24,0 /* cpu# */ 155 b.ne 2f !! 211 bl call_setup_cpu /* Call setup_cpu for this CPU */ 156 pre_disable_mmu_workaround !! 212 #ifdef CONFIG_6xx 157 msr sctlr_el2, x19 !! 213 bl reloc_offset 158 b 3f !! 214 bl init_idle_6xx 159 2: pre_disable_mmu_workaround !! 215 #endif /* CONFIG_6xx */ 160 msr sctlr_el1, x19 << 161 3: isb << 162 mov x19, xzr << 163 ret << 164 SYM_CODE_END(record_mmu_state) << 165 << 166 /* << 167 * Preserve the arguments passed by the bootlo << 168 */ << 169 SYM_CODE_START_LOCAL(preserve_boot_args) << 170 mov x21, x0 << 171 << 172 adr_l x0, boot_args << 173 stp x21, x1, [x0] << 174 stp x2, x3, [x0, #16] << 175 << 176 cbnz x19, 0f << 177 dmb sy << 178 << 179 << 180 add x1, x0, #0x20 << 181 b dcache_inval_poc << 182 0: str_l x19, mmu_enabled_at_boot, x0 << 183 ret << 184 SYM_CODE_END(preserve_boot_args) << 185 216 186 /* !! 217 #ifndef CONFIG_APUS 187 * Initialize CPU registers with task- !! 218 /* 188 * !! 219 * We need to run with _start at physical address 0. 189 * Create a final frame record at task !! 220 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses 190 * that the unwinder can identify the !! 221 * the exception vectors at 0 (and therefore this copy 191 * its location in the task stack. We !! 222 * overwrites OF's exception vectors with our own). 192 * for consistency with user tasks and !! 223 * The MMU is off at this point. 193 */ !! 224 */ 194 .macro init_cpu_task tsk, tmp1, tmp2 !! 225 bl reloc_offset 195 msr sp_el0, \tsk !! 226 mr r26,r3 >> 227 addis r4,r3,KERNELBASE@h /* current address of _start */ >> 228 cmpwi 0,r4,0 /* are we already running at 0? */ >> 229 bne relocate_kernel >> 230 #endif /* CONFIG_APUS */ >> 231 /* >> 232 * we now have the 1st 16M of ram mapped with the bats. >> 233 * prep needs the mmu to be turned on here, but pmac already has it on. >> 234 * this shouldn't bother the pmac since it just gets turned on again >> 235 * as we jump to our code at KERNELBASE. -- Cort >> 236 * Actually no, pmac doesn't have it on any more. BootX enters with MMU >> 237 * off, and in other cases, we now turn it off before changing BATs above. >> 238 */ >> 239 turn_on_mmu: >> 240 mfmsr r0 >> 241 ori r0,r0,MSR_DR|MSR_IR >> 242 mtspr SRR1,r0 >> 243 lis r0,start_here@h >> 244 ori r0,r0,start_here@l >> 245 mtspr SRR0,r0 >> 246 SYNC >> 247 RFI /* enables MMU */ 196 248 197 ldr \tmp1, [\tsk, #TSK_STACK] !! 249 /* 198 add sp, \tmp1, #THREAD_SIZE !! 250 * We need __secondary_hold as a place to hold the other cpus on 199 sub sp, sp, #PT_REGS_SIZE !! 251 * an SMP machine, even when we are running a UP kernel. >> 252 */ >> 253 . = 0xc0 /* for prep bootloader */ >> 254 li r3,1 /* MTX only has 1 cpu */ >> 255 .globl __secondary_hold >> 256 __secondary_hold: >> 257 /* tell the master we're here */ >> 258 stw r3,4(0) >> 259 #ifdef CONFIG_SMP >> 260 100: lwz r4,0(0) >> 261 /* wait until we're told to start */ >> 262 cmpw 0,r4,r3 >> 263 bne 100b >> 264 /* our cpu # was at addr 0 - go */ >> 265 mr r24,r3 /* cpu # */ >> 266 b __secondary_start >> 267 #else >> 268 b . >> 269 #endif /* CONFIG_SMP */ 200 270 201 stp xzr, xzr, [sp, #S_STACKFRAME] !! 271 /* 202 add x29, sp, #S_STACKFRAME !! 272 * Exception entry code. This code runs with address translation >> 273 * turned off, i.e. using physical addresses. >> 274 * We assume sprg3 has the physical address of the current >> 275 * task's thread_struct. >> 276 */ >> 277 #define EXCEPTION_PROLOG \ >> 278 mtspr SPRG0,r20; \ >> 279 mtspr SPRG1,r21; \ >> 280 mfcr r20; \ >> 281 mfspr r21,SPRG2; /* exception stack to use from */ \ >> 282 cmpwi 0,r21,0; /* user mode or RTAS */ \ >> 283 bne 1f; \ >> 284 tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \ >> 285 subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\ >> 286 1: CLR_TOP32(r21); \ >> 287 stw r20,_CCR(r21); /* save registers */ \ >> 288 stw r22,GPR22(r21); \ >> 289 stw r23,GPR23(r21); \ >> 290 mfspr r20,SPRG0; \ >> 291 stw r20,GPR20(r21); \ >> 292 mfspr r22,SPRG1; \ >> 293 stw r22,GPR21(r21); \ >> 294 mflr r20; \ >> 295 stw r20,_LINK(r21); \ >> 296 mfctr r22; \ >> 297 stw r22,_CTR(r21); \ >> 298 mfspr r20,XER; \ >> 299 stw r20,_XER(r21); \ >> 300 mfspr r22,SRR0; \ >> 301 mfspr r23,SRR1; \ >> 302 stw r0,GPR0(r21); \ >> 303 stw r1,GPR1(r21); \ >> 304 stw r2,GPR2(r21); \ >> 305 stw r1,0(r21); \ >> 306 tovirt(r1,r21); /* set new kernel sp */ \ >> 307 SAVE_4GPRS(3, r21); \ >> 308 SAVE_GPR(7, r21); >> 309 /* >> 310 * Note: code which follows this uses cr0.eq (set if from kernel), >> 311 * r21, r22 (SRR0), and r23 (SRR1). >> 312 */ 203 313 204 scs_load_current !! 314 /* >> 315 * Exception vectors. >> 316 */ >> 317 #define STD_EXCEPTION(n, label, hdlr) \ >> 318 . = n; \ >> 319 label: \ >> 320 EXCEPTION_PROLOG; \ >> 321 addi r3,r1,STACK_FRAME_OVERHEAD; \ >> 322 li r20,MSR_KERNEL; \ >> 323 bl transfer_to_handler; \ >> 324 i##n: \ >> 325 .long hdlr; \ >> 326 .long ret_from_except >> 327 >> 328 /* System reset */ >> 329 #ifdef CONFIG_SMP /* MVME/MTX and gemini start the secondary here */ >> 330 #ifdef CONFIG_GEMINI >> 331 . = 0x100 >> 332 b __secondary_start_gemini >> 333 #else /* CONFIG_GEMINI */ >> 334 STD_EXCEPTION(0x100, Reset, __secondary_start_psurge) >> 335 #endif /* CONFIG_GEMINI */ >> 336 #else >> 337 STD_EXCEPTION(0x100, Reset, UnknownException) >> 338 #endif 205 339 206 adr_l \tmp1, __per_cpu_offset !! 340 /* Machine check */ 207 ldr w\tmp2, [\tsk, #TSK_TI_CPU] !! 341 BEGIN_FTR_SECTION 208 ldr \tmp1, [\tmp1, \tmp2, lsl #3] !! 342 DSSALL 209 set_this_cpu_offset \tmp1 !! 343 sync 210 .endm !! 344 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) >> 345 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException) >> 346 >> 347 /* Data access exception. */ >> 348 . = 0x300 >> 349 #ifdef CONFIG_PPC64BRIDGE >> 350 b DataAccess >> 351 DataAccessCont: >> 352 #else >> 353 DataAccess: >> 354 EXCEPTION_PROLOG >> 355 #endif /* CONFIG_PPC64BRIDGE */ >> 356 mfspr r20,DSISR >> 357 BEGIN_FTR_SECTION >> 358 andis. r0,r20,0xa470 /* weird error? */ >> 359 bne 1f /* if not, try to put a PTE */ >> 360 mfspr r4,DAR /* into the hash table */ >> 361 rlwinm r3,r20,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ >> 362 bl hash_page >> 363 END_FTR_SECTION_IFSET(CPU_FTR_HPTE_TABLE) >> 364 1: stw r20,_DSISR(r21) >> 365 mr r5,r20 >> 366 mfspr r4,DAR >> 367 stw r4,_DAR(r21) >> 368 addi r3,r1,STACK_FRAME_OVERHEAD >> 369 li r20,MSR_KERNEL >> 370 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ >> 371 bl transfer_to_handler >> 372 i0x300: >> 373 .long do_page_fault >> 374 .long ret_from_except >> 375 >> 376 #ifdef CONFIG_PPC64BRIDGE >> 377 /* SLB fault on data access. */ >> 378 . = 0x380 >> 379 b DataSegment >> 380 DataSegmentCont: >> 381 mfspr r4,DAR >> 382 stw r4,_DAR(r21) >> 383 addi r3,r1,STACK_FRAME_OVERHEAD >> 384 li r20,MSR_KERNEL >> 385 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ >> 386 bl transfer_to_handler >> 387 .long UnknownException >> 388 .long ret_from_except >> 389 #endif /* CONFIG_PPC64BRIDGE */ >> 390 >> 391 /* Instruction access exception. */ >> 392 . = 0x400 >> 393 #ifdef CONFIG_PPC64BRIDGE >> 394 b InstructionAccess >> 395 InstructionAccessCont: >> 396 #else >> 397 InstructionAccess: >> 398 EXCEPTION_PROLOG >> 399 #endif /* CONFIG_PPC64BRIDGE */ >> 400 BEGIN_FTR_SECTION >> 401 andis. r0,r23,0x4000 /* no pte found? */ >> 402 beq 1f /* if so, try to put a PTE */ >> 403 li r3,0 /* into the hash table */ >> 404 mr r4,r22 /* SRR0 is fault address */ >> 405 bl hash_page >> 406 END_FTR_SECTION_IFSET(CPU_FTR_HPTE_TABLE) >> 407 1: addi r3,r1,STACK_FRAME_OVERHEAD >> 408 mr r4,r22 >> 409 mr r5,r23 >> 410 li r20,MSR_KERNEL >> 411 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ >> 412 bl transfer_to_handler >> 413 i0x400: >> 414 .long do_page_fault >> 415 .long ret_from_except >> 416 >> 417 #ifdef CONFIG_PPC64BRIDGE >> 418 /* SLB fault on instruction access. */ >> 419 . = 0x480 >> 420 b InstructionSegment >> 421 InstructionSegmentCont: >> 422 addi r3,r1,STACK_FRAME_OVERHEAD >> 423 li r20,MSR_KERNEL >> 424 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ >> 425 bl transfer_to_handler >> 426 .long UnknownException >> 427 .long ret_from_except >> 428 #endif /* CONFIG_PPC64BRIDGE */ >> 429 >> 430 /* External interrupt */ >> 431 . = 0x500; >> 432 HardwareInterrupt: >> 433 EXCEPTION_PROLOG; >> 434 addi r3,r1,STACK_FRAME_OVERHEAD >> 435 li r20,MSR_KERNEL >> 436 li r4,0 >> 437 bl transfer_to_handler >> 438 .globl do_IRQ_intercept >> 439 do_IRQ_intercept: >> 440 .long do_IRQ; >> 441 .long ret_from_intercept >> 442 >> 443 /* Alignment exception */ >> 444 . = 0x600 >> 445 Alignment: >> 446 EXCEPTION_PROLOG >> 447 mfspr r4,DAR >> 448 stw r4,_DAR(r21) >> 449 mfspr r5,DSISR >> 450 stw r5,_DSISR(r21) >> 451 addi r3,r1,STACK_FRAME_OVERHEAD >> 452 li r20,MSR_KERNEL >> 453 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ >> 454 bl transfer_to_handler >> 455 i0x600: >> 456 .long AlignmentException >> 457 .long ret_from_except >> 458 >> 459 /* Program check exception */ >> 460 . = 0x700 >> 461 ProgramCheck: >> 462 EXCEPTION_PROLOG >> 463 addi r3,r1,STACK_FRAME_OVERHEAD >> 464 li r20,MSR_KERNEL >> 465 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ >> 466 bl transfer_to_handler >> 467 i0x700: >> 468 .long ProgramCheckException >> 469 .long ret_from_except >> 470 >> 471 /* Floating-point unavailable */ >> 472 . = 0x800 >> 473 FPUnavailable: >> 474 EXCEPTION_PROLOG >> 475 bne load_up_fpu /* if from user, just load it up */ >> 476 li r20,MSR_KERNEL >> 477 bl transfer_to_handler /* if from kernel, take a trap */ >> 478 i0x800: >> 479 .long KernelFP >> 480 .long ret_from_except >> 481 >> 482 . = 0x900 >> 483 Decrementer: >> 484 EXCEPTION_PROLOG >> 485 addi r3,r1,STACK_FRAME_OVERHEAD >> 486 li r20,MSR_KERNEL >> 487 bl transfer_to_handler >> 488 .globl timer_interrupt_intercept >> 489 timer_interrupt_intercept: >> 490 .long timer_interrupt >> 491 .long ret_from_intercept >> 492 >> 493 STD_EXCEPTION(0xa00, Trap_0a, UnknownException) >> 494 STD_EXCEPTION(0xb00, Trap_0b, UnknownException) >> 495 >> 496 /* System call */ >> 497 . = 0xc00 >> 498 SystemCall: >> 499 EXCEPTION_PROLOG >> 500 stw r3,ORIG_GPR3(r21) >> 501 li r20,MSR_KERNEL >> 502 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */ >> 503 bl transfer_to_handler >> 504 .long DoSyscall >> 505 .long ret_from_except >> 506 >> 507 /* Single step - not used on 601 */ >> 508 STD_EXCEPTION(0xd00, SingleStep, SingleStepException) >> 509 STD_EXCEPTION(0xe00, Trap_0e, UnknownException) 211 510 212 /* 511 /* 213 * The following fragment of code is executed !! 512 * The Altivec unavailable trap is at 0x0f20. Foo. 214 * !! 513 * We effectively remap it to 0x3000. 215 * x0 = __pa(KERNEL_START) << 216 */ 514 */ 217 SYM_FUNC_START_LOCAL(__primary_switched) !! 515 . = 0xf00 218 adr_l x4, init_task !! 516 b Trap_0f 219 init_cpu_task x4, x5, x6 !! 517 trap_0f_cont: 220 !! 518 addi r3,r1,STACK_FRAME_OVERHEAD 221 adr_l x8, vectors !! 519 li r20,MSR_KERNEL 222 msr vbar_el1, x8 !! 520 bl transfer_to_handler 223 isb !! 521 .long UnknownException >> 522 .long ret_from_except >> 523 >> 524 . = 0xf20 >> 525 #ifdef CONFIG_ALTIVEC >> 526 b AltiVecUnavailable >> 527 #endif >> 528 Trap_0f: >> 529 EXCEPTION_PROLOG >> 530 b trap_0f_cont 224 531 225 stp x29, x30, [sp, #-16]! !! 532 /* 226 mov x29, sp !! 533 * Handle TLB miss for instruction on 603/603e. >> 534 * Note: we get an alternate set of r0 - r3 to use automatically. >> 535 */ >> 536 . = 0x1000 >> 537 InstructionTLBMiss: >> 538 /* >> 539 * r0: stored ctr >> 540 * r1: linux style pte ( later becomes ppc hardware pte ) >> 541 * r2: ptr to linux-style pte >> 542 * r3: scratch >> 543 */ >> 544 mfctr r0 >> 545 /* Get PTE (linux-style) and check access */ >> 546 mfspr r3,IMISS >> 547 lis r1,KERNELBASE@h /* check if kernel address */ >> 548 cmplw 0,r3,r1 >> 549 mfspr r2,SPRG3 >> 550 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ >> 551 lwz r2,PGDIR(r2) >> 552 blt+ 112f >> 553 lis r2,swapper_pg_dir@ha /* if kernel address, use */ >> 554 addi r2,r2,swapper_pg_dir@l /* kernel page table */ >> 555 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ >> 556 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ >> 557 112: tophys(r2,r2) >> 558 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ >> 559 lwz r2,0(r2) /* get pmd entry */ >> 560 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ >> 561 beq- InstructionAddressInvalid /* return if no mapping */ >> 562 tophys(r2,r2) >> 563 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ >> 564 lwz r3,0(r2) /* get linux-style pte */ >> 565 andc. r1,r1,r3 /* check access & ~permission */ >> 566 bne- InstructionAddressInvalid /* return if access not permitted */ >> 567 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ >> 568 /* >> 569 * NOTE! We are assuming this is not an SMP system, otherwise >> 570 * we would need to update the pte atomically with lwarx/stwcx. >> 571 */ >> 572 stw r3,0(r2) /* update PTE (accessed bit) */ >> 573 /* Convert linux-style PTE to low word of PPC-style PTE */ >> 574 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ >> 575 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ >> 576 and r1,r1,r2 /* writable if _RW and _DIRTY */ >> 577 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ >> 578 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ >> 579 ori r1,r1,0xe14 /* clear out reserved bits and M */ >> 580 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ >> 581 mtspr RPA,r1 >> 582 mfspr r3,IMISS >> 583 tlbli r3 >> 584 mfspr r3,SRR1 /* Need to restore CR0 */ >> 585 mtcrf 0x80,r3 >> 586 rfi >> 587 InstructionAddressInvalid: >> 588 mfspr r3,SRR1 >> 589 rlwinm r1,r3,9,6,6 /* Get load/store bit */ >> 590 >> 591 addis r1,r1,0x2000 >> 592 mtspr DSISR,r1 /* (shouldn't be needed) */ >> 593 mtctr r0 /* Restore CTR */ >> 594 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ >> 595 or r2,r2,r1 >> 596 mtspr SRR1,r2 >> 597 mfspr r1,IMISS /* Get failing address */ >> 598 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ >> 599 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */ >> 600 xor r1,r1,r2 >> 601 mtspr DAR,r1 /* Set fault address */ >> 602 mfmsr r0 /* Restore "normal" registers */ >> 603 xoris r0,r0,MSR_TGPR>>16 >> 604 mtcrf 0x80,r3 /* Restore CR0 */ >> 605 mtmsr r0 >> 606 b InstructionAccess 227 607 228 str_l x21, __fdt_pointer, x5 !! 608 /* >> 609 * Handle TLB miss for DATA Load operation on 603/603e >> 610 */ >> 611 . = 0x1100 >> 612 DataLoadTLBMiss: >> 613 /* >> 614 * r0: stored ctr >> 615 * r1: linux style pte ( later becomes ppc hardware pte ) >> 616 * r2: ptr to linux-style pte >> 617 * r3: scratch >> 618 */ >> 619 mfctr r0 >> 620 /* Get PTE (linux-style) and check access */ >> 621 mfspr r3,DMISS >> 622 lis r1,KERNELBASE@h /* check if kernel address */ >> 623 cmplw 0,r3,r1 >> 624 mfspr r2,SPRG3 >> 625 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */ >> 626 lwz r2,PGDIR(r2) >> 627 blt+ 112f >> 628 lis r2,swapper_pg_dir@ha /* if kernel address, use */ >> 629 addi r2,r2,swapper_pg_dir@l /* kernel page table */ >> 630 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ >> 631 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ >> 632 112: tophys(r2,r2) >> 633 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ >> 634 lwz r2,0(r2) /* get pmd entry */ >> 635 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ >> 636 beq- DataAddressInvalid /* return if no mapping */ >> 637 tophys(r2,r2) >> 638 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ >> 639 lwz r3,0(r2) /* get linux-style pte */ >> 640 andc. r1,r1,r3 /* check access & ~permission */ >> 641 bne- DataAddressInvalid /* return if access not permitted */ >> 642 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */ >> 643 /* >> 644 * NOTE! We are assuming this is not an SMP system, otherwise >> 645 * we would need to update the pte atomically with lwarx/stwcx. >> 646 */ >> 647 stw r3,0(r2) /* update PTE (accessed bit) */ >> 648 /* Convert linux-style PTE to low word of PPC-style PTE */ >> 649 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */ >> 650 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */ >> 651 and r1,r1,r2 /* writable if _RW and _DIRTY */ >> 652 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ >> 653 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */ >> 654 ori r1,r1,0xe14 /* clear out reserved bits and M */ >> 655 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */ >> 656 mtspr RPA,r1 >> 657 mfspr r3,DMISS >> 658 tlbld r3 >> 659 mfspr r3,SRR1 /* Need to restore CR0 */ >> 660 mtcrf 0x80,r3 >> 661 rfi >> 662 DataAddressInvalid: >> 663 mfspr r3,SRR1 >> 664 rlwinm r1,r3,9,6,6 /* Get load/store bit */ >> 665 addis r1,r1,0x2000 >> 666 mtspr DSISR,r1 >> 667 mtctr r0 /* Restore CTR */ >> 668 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */ >> 669 mtspr SRR1,r2 >> 670 mfspr r1,DMISS /* Get failing address */ >> 671 rlwinm. r2,r2,0,31,31 /* Check for little endian access */ >> 672 beq 20f /* Jump if big endian */ >> 673 xori r1,r1,3 >> 674 20: mtspr DAR,r1 /* Set fault address */ >> 675 mfmsr r0 /* Restore "normal" registers */ >> 676 xoris r0,r0,MSR_TGPR>>16 >> 677 mtcrf 0x80,r3 /* Restore CR0 */ >> 678 mtmsr r0 >> 679 b DataAccess 229 680 230 adrp x4, _text !! 681 /* 231 sub x4, x4, x0 !! 682 * Handle TLB miss for DATA Store on 603/603e 232 str_l x4, kimage_voffset, x5 !! 683 */ >> 684 . = 0x1200 >> 685 DataStoreTLBMiss: >> 686 /* >> 687 * r0: stored ctr >> 688 * r1: linux style pte ( later becomes ppc hardware pte ) >> 689 * r2: ptr to linux-style pte >> 690 * r3: scratch >> 691 */ >> 692 mfctr r0 >> 693 /* Get PTE (linux-style) and check access */ >> 694 mfspr r3,DMISS >> 695 lis r1,KERNELBASE@h /* check if kernel address */ >> 696 cmplw 0,r3,r1 >> 697 mfspr r2,SPRG3 >> 698 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */ >> 699 lwz r2,PGDIR(r2) >> 700 blt+ 112f >> 701 lis r2,swapper_pg_dir@ha /* if kernel address, use */ >> 702 addi r2,r2,swapper_pg_dir@l /* kernel page table */ >> 703 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */ >> 704 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */ >> 705 112: tophys(r2,r2) >> 706 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */ >> 707 lwz r2,0(r2) /* get pmd entry */ >> 708 rlwinm. r2,r2,0,0,19 /* extract address of pte page */ >> 709 beq- DataAddressInvalid /* return if no mapping */ >> 710 tophys(r2,r2) >> 711 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */ >> 712 lwz r3,0(r2) /* get linux-style pte */ >> 713 andc. r1,r1,r3 /* check access & ~permission */ >> 714 bne- DataAddressInvalid /* return if access not permitted */ >> 715 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY >> 716 /* >> 717 * NOTE! We are assuming this is not an SMP system, otherwise >> 718 * we would need to update the pte atomically with lwarx/stwcx. >> 719 */ >> 720 stw r3,0(r2) /* update PTE (accessed/dirty bits) */ >> 721 /* Convert linux-style PTE to low word of PPC-style PTE */ >> 722 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */ >> 723 li r1,0xe15 /* clear out reserved bits and M */ >> 724 andc r1,r3,r1 /* PP = user? 2: 0 */ >> 725 mtspr RPA,r1 >> 726 mfspr r3,DMISS >> 727 tlbld r3 >> 728 mfspr r3,SRR1 /* Need to restore CR0 */ >> 729 mtcrf 0x80,r3 >> 730 rfi 233 731 234 mov x0, x20 !! 732 #ifndef CONFIG_ALTIVEC 235 bl set_cpu_boot_mode_flag !! 733 #define AltivecAssistException UnknownException >> 734 #endif 236 735 237 #if defined(CONFIG_KASAN_GENERIC) || defined(C !! 736 STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint) 238 bl kasan_early_init !! 737 STD_EXCEPTION(0x1400, SMI, SMIException) >> 738 STD_EXCEPTION(0x1500, Trap_15, UnknownException) >> 739 #ifdef CONFIG_POWER4 >> 740 STD_EXCEPTION(0x1600, Trap_16, UnknownException) >> 741 STD_EXCEPTION(0x1700, Trap_17, AltivecAssistException) >> 742 STD_EXCEPTION(0x1800, Trap_18, TAUException) >> 743 #else /* !CONFIG_POWER4 */ >> 744 STD_EXCEPTION(0x1600, Trap_16, AltivecAssistException) >> 745 STD_EXCEPTION(0x1700, Trap_17, TAUException) >> 746 STD_EXCEPTION(0x1800, Trap_18, UnknownException) 239 #endif 747 #endif 240 mov x0, x20 !! 748 STD_EXCEPTION(0x1900, Trap_19, UnknownException) 241 bl finalise_el2 !! 749 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException) 242 ldp x29, x30, [sp], #16 !! 750 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException) 243 bl start_kernel !! 751 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException) 244 ASM_BUG() !! 752 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException) 245 SYM_FUNC_END(__primary_switched) !! 753 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException) >> 754 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException) >> 755 STD_EXCEPTION(0x2000, RunMode, RunModeException) >> 756 STD_EXCEPTION(0x2100, Trap_21, UnknownException) >> 757 STD_EXCEPTION(0x2200, Trap_22, UnknownException) >> 758 STD_EXCEPTION(0x2300, Trap_23, UnknownException) >> 759 STD_EXCEPTION(0x2400, Trap_24, UnknownException) >> 760 STD_EXCEPTION(0x2500, Trap_25, UnknownException) >> 761 STD_EXCEPTION(0x2600, Trap_26, UnknownException) >> 762 STD_EXCEPTION(0x2700, Trap_27, UnknownException) >> 763 STD_EXCEPTION(0x2800, Trap_28, UnknownException) >> 764 STD_EXCEPTION(0x2900, Trap_29, UnknownException) >> 765 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException) >> 766 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException) >> 767 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException) >> 768 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException) >> 769 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException) >> 770 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException) >> 771 >> 772 . = 0x3000 >> 773 >> 774 #ifdef CONFIG_ALTIVEC >> 775 AltiVecUnavailable: >> 776 EXCEPTION_PROLOG >> 777 bne load_up_altivec /* if from user, just load it up */ >> 778 li r20,MSR_KERNEL >> 779 bl transfer_to_handler /* if from kernel, take a trap */ >> 780 .long KernelAltiVec >> 781 .long ret_from_except >> 782 #endif /* CONFIG_ALTIVEC */ >> 783 >> 784 #ifdef CONFIG_PPC64BRIDGE >> 785 DataAccess: >> 786 EXCEPTION_PROLOG >> 787 b DataAccessCont >> 788 InstructionAccess: >> 789 EXCEPTION_PROLOG >> 790 b InstructionAccessCont >> 791 DataSegment: >> 792 EXCEPTION_PROLOG >> 793 b DataSegmentCont >> 794 InstructionSegment: >> 795 EXCEPTION_PROLOG >> 796 b InstructionSegmentCont >> 797 #endif /* CONFIG_PPC64BRIDGE */ 246 798 247 /* 799 /* 248 * end early head section, begin head code tha !! 800 * This code finishes saving the registers to the exception frame 249 * hotplug and needs to have the same protecti !! 801 * and jumps to the appropriate handler for the exception, turning >> 802 * on address translation. 250 */ 803 */ 251 .section ".idmap.text","a" !! 804 .globl transfer_to_handler >> 805 transfer_to_handler: >> 806 stw r22,_NIP(r21) >> 807 stw r23,_MSR(r21) >> 808 SAVE_4GPRS(8, r21) >> 809 SAVE_8GPRS(12, r21) >> 810 SAVE_8GPRS(24, r21) >> 811 andi. r23,r23,MSR_PR >> 812 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */ >> 813 addi r2,r23,-THREAD /* set r2 to current */ >> 814 beq 2f >> 815 addi r24,r1,STACK_FRAME_OVERHEAD >> 816 stw r24,PT_REGS(r23) >> 817 #ifdef CONFIG_ALTIVEC >> 818 BEGIN_FTR_SECTION >> 819 mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */ >> 820 stw r22,THREAD_VRSAVE(r23) >> 821 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) >> 822 #endif /* CONFIG_ALTIVEC */ >> 823 #ifndef CONFIG_6xx >> 824 2: >> 825 #endif >> 826 .globl transfer_to_handler_cont >> 827 transfer_to_handler_cont: >> 828 tovirt(r2,r2) >> 829 mflr r23 >> 830 andi. r24,r23,0x3f00 /* get vector offset */ >> 831 stw r24,TRAP(r21) >> 832 li r22,0 >> 833 stw r22,RESULT(r21) >> 834 mtspr SPRG2,r22 /* r1 is now kernel sp */ >> 835 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */ >> 836 cmplw 0,r1,r2 >> 837 cmplw 1,r1,r24 >> 838 crand 1,1,4 >> 839 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */ >> 840 lwz r24,0(r23) /* virtual address of handler */ >> 841 lwz r23,4(r23) /* where to go when done */ >> 842 FIX_SRR1(r20,r22) >> 843 mtspr SRR0,r24 >> 844 mtspr SRR1,r20 >> 845 mtlr r23 >> 846 SYNC >> 847 RFI /* jump to handler, enable MMU */ >> 848 >> 849 #ifdef CONFIG_6xx >> 850 2: >> 851 /* Out of line case when returning to kernel, >> 852 * check return from power_save_6xx >> 853 */ >> 854 mfspr r24,SPRN_HID0 >> 855 mtcr r24 >> 856 BEGIN_FTR_SECTION >> 857 bt- 8,power_save_6xx_restore /* Check DOZE */ >> 858 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE) >> 859 BEGIN_FTR_SECTION >> 860 bt- 9,power_save_6xx_restore /* Check NAP */ >> 861 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP) >> 862 b transfer_to_handler_cont >> 863 >> 864 #endif /* CONFIG_6xx */ 252 865 253 /* 866 /* 254 * Starting from EL2 or EL1, configure the CPU !! 867 * On kernel stack overflow, load up an initial stack pointer 255 * reachable EL supported by the kernel in a c !! 868 * and call StackOverflow(regs), which should not return. 256 * from EL2 to EL1, configure EL2 before confi << 257 * << 258 * Since we cannot always rely on ERET synchro << 259 * SCTLR_ELx.EOS is clear), we place an ISB pr << 260 * << 261 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CP << 262 * booted in EL1 or EL2 respectively, with the << 263 * potential context flags. These flags are *n << 264 * << 265 * x0: whether we are being called from the pr << 266 */ 869 */ 267 SYM_FUNC_START(init_kernel_el) !! 870 stack_ovf: 268 mrs x1, CurrentEL !! 871 addi r3,r1,STACK_FRAME_OVERHEAD 269 cmp x1, #CurrentEL_EL2 !! 872 lis r1,init_task_union@ha 270 b.eq init_el2 !! 873 addi r1,r1,init_task_union@l >> 874 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD >> 875 lis r24,StackOverflow@ha >> 876 addi r24,r24,StackOverflow@l >> 877 li r20,MSR_KERNEL >> 878 FIX_SRR1(r20,r22) >> 879 mtspr SRR0,r24 >> 880 mtspr SRR1,r20 >> 881 SYNC >> 882 RFI 271 883 272 SYM_INNER_LABEL(init_el1, SYM_L_LOCAL) !! 884 /* 273 mov_q x0, INIT_SCTLR_EL1_MMU_OFF !! 885 * This task wants to use the FPU now. 274 pre_disable_mmu_workaround !! 886 * On UP, disable FP for the task which had the FPU previously, 275 msr sctlr_el1, x0 !! 887 * and save its floating-point registers in its thread_struct. 276 isb !! 888 * Load up this task's FP registers from its thread_struct, 277 mov_q x0, INIT_PSTATE_EL1 !! 889 * enable the FPU for the current task and return to the task. 278 msr spsr_el1, x0 !! 890 */ 279 msr elr_el1, lr !! 891 load_up_fpu: 280 mov w0, #BOOT_CPU_MODE_EL1 !! 892 mfmsr r5 281 eret !! 893 ori r5,r5,MSR_FP 282 !! 894 #ifdef CONFIG_PPC64BRIDGE 283 SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) !! 895 clrldi r5,r5,1 /* turn off 64-bit mode */ 284 msr elr_el2, lr !! 896 #endif /* CONFIG_PPC64BRIDGE */ >> 897 SYNC >> 898 MTMSRD(r5) /* enable use of fpu now */ >> 899 isync >> 900 /* >> 901 * For SMP, we don't do lazy FPU switching because it just gets too >> 902 * horrendously complex, especially when a task switches from one CPU >> 903 * to another. Instead we call giveup_fpu in switch_to. >> 904 */ >> 905 #ifndef CONFIG_SMP >> 906 tophys(r6,0) /* get __pa constant */ >> 907 addis r3,r6,last_task_used_math@ha >> 908 lwz r4,last_task_used_math@l(r3) >> 909 cmpi 0,r4,0 >> 910 beq 1f >> 911 add r4,r4,r6 >> 912 addi r4,r4,THREAD /* want last_task_used_math->thread */ >> 913 SAVE_32FPRS(0, r4) >> 914 mffs fr0 >> 915 stfd fr0,THREAD_FPSCR-4(r4) >> 916 lwz r5,PT_REGS(r4) >> 917 add r5,r5,r6 >> 918 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 919 li r20,MSR_FP|MSR_FE0|MSR_FE1 >> 920 andc r4,r4,r20 /* disable FP for previous task */ >> 921 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 922 1: >> 923 #endif /* CONFIG_SMP */ >> 924 /* enable use of FP after return */ >> 925 mfspr r5,SPRG3 /* current task's THREAD (phys) */ >> 926 lwz r4,THREAD_FPEXC_MODE(r5) >> 927 ori r23,r23,MSR_FP /* enable FP for current */ >> 928 or r23,r23,r4 >> 929 lfd fr0,THREAD_FPSCR-4(r5) >> 930 mtfsf 0xff,fr0 >> 931 REST_32FPRS(0, r5) >> 932 #ifndef CONFIG_SMP >> 933 subi r4,r5,THREAD >> 934 sub r4,r4,r6 >> 935 stw r4,last_task_used_math@l(r3) >> 936 #endif /* CONFIG_SMP */ >> 937 /* restore registers and return */ >> 938 lwz r3,_CCR(r21) >> 939 lwz r4,_LINK(r21) >> 940 mtcrf 0xff,r3 >> 941 mtlr r4 >> 942 REST_GPR(1, r21) >> 943 REST_4GPRS(3, r21) >> 944 /* we haven't used ctr or xer */ >> 945 mtspr SRR1,r23 >> 946 mtspr SRR0,r22 >> 947 REST_GPR(20, r21) >> 948 REST_2GPRS(22, r21) >> 949 lwz r21,GPR21(r21) >> 950 SYNC >> 951 RFI 285 952 286 // clean all HYP code to the PoC if we !! 953 /* 287 cbz x0, 0f !! 954 * FP unavailable trap from kernel - print a message, but let 288 adrp x0, __hyp_idmap_text_start !! 955 * the task use FP in the kernel until it returns to user mode. 289 adr_l x1, __hyp_text_end !! 956 */ 290 adr_l x2, dcache_clean_poc !! 957 KernelFP: 291 blr x2 !! 958 lwz r3,_MSR(r1) >> 959 ori r3,r3,MSR_FP >> 960 stw r3,_MSR(r1) /* enable use of FP after return */ >> 961 lis r3,86f@h >> 962 ori r3,r3,86f@l >> 963 mr r4,r2 /* current */ >> 964 lwz r5,_NIP(r1) >> 965 bl printk >> 966 b ret_from_except >> 967 86: .string "floating point used in kernel (task=%p, pc=%x)\n" >> 968 .align 4 >> 969 >> 970 #ifdef CONFIG_ALTIVEC >> 971 /* Note that the AltiVec support is closely modeled after the FP >> 972 * support. Changes to one are likely to be applicable to the >> 973 * other! */ >> 974 load_up_altivec: >> 975 /* >> 976 * Disable AltiVec for the task which had AltiVec previously, >> 977 * and save its AltiVec registers in its thread_struct. >> 978 * Enables AltiVec for use in the kernel on return. >> 979 * On SMP we know the AltiVec units are free, since we give it up every >> 980 * switch. -- Kumar >> 981 */ >> 982 mfmsr r5 >> 983 oris r5,r5,MSR_VEC@h >> 984 mtmsr r5 /* enable use of AltiVec now */ >> 985 isync >> 986 /* >> 987 * For SMP, we don't do lazy AltiVec switching because it just gets too >> 988 * horrendously complex, especially when a task switches from one CPU >> 989 * to another. Instead we call giveup_altivec in switch_to. >> 990 */ >> 991 #ifndef CONFIG_SMP >> 992 #ifndef CONFIG_APUS >> 993 lis r6,-KERNELBASE@h >> 994 #else >> 995 lis r6,CYBERBASEp@h >> 996 lwz r6,0(r6) >> 997 #endif >> 998 addis r3,r6,last_task_used_altivec@ha >> 999 lwz r4,last_task_used_altivec@l(r3) >> 1000 cmpi 0,r4,0 >> 1001 beq 1f >> 1002 add r4,r4,r6 >> 1003 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ >> 1004 SAVE_32VR(0,r20,r4) >> 1005 MFVSCR(vr0) >> 1006 li r20,THREAD_VSCR >> 1007 STVX(vr0,r20,r4) >> 1008 lwz r5,PT_REGS(r4) >> 1009 add r5,r5,r6 >> 1010 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1011 lis r20,MSR_VEC@h >> 1012 andc r4,r4,r20 /* disable altivec for previous task */ >> 1013 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1014 1: >> 1015 #endif /* CONFIG_SMP */ >> 1016 /* enable use of AltiVec after return */ >> 1017 oris r23,r23,MSR_VEC@h >> 1018 mfspr r5,SPRG3 /* current task's THREAD (phys) */ >> 1019 li r4,1 >> 1020 li r20,THREAD_VSCR >> 1021 stw r4,THREAD_USED_VR(r5) >> 1022 LVX(vr0,r20,r5) >> 1023 MTVSCR(vr0) >> 1024 REST_32VR(0,r20,r5) >> 1025 #ifndef CONFIG_SMP >> 1026 subi r4,r5,THREAD >> 1027 sub r4,r4,r6 >> 1028 stw r4,last_task_used_altivec@l(r3) >> 1029 #endif /* CONFIG_SMP */ >> 1030 /* restore registers and return */ >> 1031 lwz r3,_CCR(r21) >> 1032 lwz r4,_LINK(r21) >> 1033 mtcrf 0xff,r3 >> 1034 mtlr r4 >> 1035 REST_GPR(1, r21) >> 1036 REST_4GPRS(3, r21) >> 1037 /* we haven't used ctr or xer */ >> 1038 mtspr SRR1,r23 >> 1039 mtspr SRR0,r22 >> 1040 REST_GPR(20, r21) >> 1041 REST_2GPRS(22, r21) >> 1042 lwz r21,GPR21(r21) >> 1043 SYNC >> 1044 RFI 292 1045 293 mov_q x0, INIT_SCTLR_EL2_MMU_OFF !! 1046 /* 294 pre_disable_mmu_workaround !! 1047 * AltiVec unavailable trap from kernel - print a message, but let 295 msr sctlr_el2, x0 !! 1048 * the task use AltiVec in the kernel until it returns to user mode. 296 isb !! 1049 */ 297 0: !! 1050 KernelAltiVec: 298 mov_q x0, HCR_HOST_NVHE_FLAGS !! 1051 lwz r3,_MSR(r1) >> 1052 oris r3,r3,MSR_VEC@h >> 1053 stw r3,_MSR(r1) /* enable use of AltiVec after return */ >> 1054 lis r3,87f@h >> 1055 ori r3,r3,87f@l >> 1056 mr r4,r2 /* current */ >> 1057 lwz r5,_NIP(r1) >> 1058 bl printk >> 1059 b ret_from_except >> 1060 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" >> 1061 .align 4 299 1062 300 /* !! 1063 /* 301 * Compliant CPUs advertise their VHE- !! 1064 * giveup_altivec(tsk) 302 * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2. !! 1065 * Disable AltiVec for the task given as the argument, 303 * RES1 in that case. Publish the E2H !! 1066 * and save the AltiVec registers in its thread_struct. 304 * it can be picked up by the init_el2 !! 1067 * Enables AltiVec for use in the kernel on return. 305 * !! 1068 */ 306 * Fruity CPUs seem to have HCR_EL2.E2 << 307 * don't advertise it (they predate th << 308 */ << 309 mrs_s x1, SYS_ID_AA64MMFR4_EL1 << 310 tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SH << 311 1069 312 orr x0, x0, #HCR_E2H !! 1070 .globl giveup_altivec >> 1071 giveup_altivec: >> 1072 mfmsr r5 >> 1073 oris r5,r5,MSR_VEC@h >> 1074 SYNC >> 1075 mtmsr r5 /* enable use of AltiVec now */ >> 1076 isync >> 1077 cmpi 0,r3,0 >> 1078 beqlr- /* if no previous owner, done */ >> 1079 addi r3,r3,THREAD /* want THREAD of task */ >> 1080 lwz r5,PT_REGS(r3) >> 1081 cmpi 0,r5,0 >> 1082 SAVE_32VR(0, r4, r3) >> 1083 MFVSCR(vr0) >> 1084 li r4,THREAD_VSCR >> 1085 STVX(vr0, r4, r3) >> 1086 beq 1f >> 1087 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1088 lis r3,MSR_VEC@h >> 1089 andc r4,r4,r3 /* disable AltiVec for previous task */ >> 1090 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) 313 1: 1091 1: 314 msr hcr_el2, x0 !! 1092 #ifndef CONFIG_SMP 315 isb !! 1093 li r5,0 316 !! 1094 lis r4,last_task_used_altivec@ha 317 init_el2_state !! 1095 stw r5,last_task_used_altivec@l(r4) >> 1096 #endif /* CONFIG_SMP */ >> 1097 blr >> 1098 #endif /* CONFIG_ALTIVEC */ 318 1099 319 /* Hypervisor stub */ !! 1100 /* 320 adr_l x0, __hyp_stub_vectors !! 1101 * giveup_fpu(tsk) 321 msr vbar_el2, x0 !! 1102 * Disable FP for the task given as the argument, 322 isb !! 1103 * and save the floating-point registers in its thread_struct. 323 !! 1104 * Enables the FPU for use in the kernel on return. 324 mov_q x1, INIT_SCTLR_EL1_MMU_OFF !! 1105 */ 325 !! 1106 .globl giveup_fpu 326 mrs x0, hcr_el2 !! 1107 giveup_fpu: 327 and x0, x0, #HCR_E2H !! 1108 mfmsr r5 328 cbz x0, 2f !! 1109 ori r5,r5,MSR_FP 329 !! 1110 SYNC_601 330 /* Set a sane SCTLR_EL1, the VHE way * !! 1111 ISYNC_601 331 msr_s SYS_SCTLR_EL12, x1 !! 1112 mtmsr r5 /* enable use of fpu now */ 332 mov x2, #BOOT_CPU_FLAG_E2H !! 1113 SYNC_601 333 b 3f !! 1114 isync 334 !! 1115 cmpi 0,r3,0 335 2: !! 1116 beqlr- /* if no previous owner, done */ 336 msr sctlr_el1, x1 !! 1117 addi r3,r3,THREAD /* want THREAD of task */ 337 mov x2, xzr !! 1118 lwz r5,PT_REGS(r3) 338 3: !! 1119 cmpi 0,r5,0 339 __init_el2_nvhe_prepare_eret !! 1120 SAVE_32FPRS(0, r3) 340 !! 1121 mffs fr0 341 mov w0, #BOOT_CPU_MODE_EL2 !! 1122 stfd fr0,THREAD_FPSCR-4(r3) 342 orr x0, x0, x2 !! 1123 beq 1f 343 eret !! 1124 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) 344 SYM_FUNC_END(init_kernel_el) !! 1125 li r3,MSR_FP|MSR_FE0|MSR_FE1 >> 1126 andc r4,r4,r3 /* disable FP for previous task */ >> 1127 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) >> 1128 1: >> 1129 #ifndef CONFIG_SMP >> 1130 li r5,0 >> 1131 lis r4,last_task_used_math@ha >> 1132 stw r5,last_task_used_math@l(r4) >> 1133 #endif /* CONFIG_SMP */ >> 1134 blr 345 1135 346 /* !! 1136 /* 347 * This provides a "holding pen" for p !! 1137 * This code is jumped to from the startup code to copy 348 * cores are held until we're ready fo !! 1138 * the kernel image to physical address 0. 349 */ !! 1139 */ 350 SYM_FUNC_START(secondary_holding_pen) !! 1140 relocate_kernel: 351 mov x0, xzr !! 1141 addis r9,r26,klimit@ha /* fetch klimit */ 352 bl init_kernel_el !! 1142 lwz r25,klimit@l(r9) 353 mrs x2, mpidr_el1 !! 1143 addis r25,r25,-KERNELBASE@h 354 mov_q x1, MPIDR_HWID_BITMASK !! 1144 li r3,0 /* Destination base address */ 355 and x2, x2, x1 !! 1145 li r6,0 /* Destination offset */ 356 adr_l x3, secondary_holding_pen_rele !! 1146 li r5,0x4000 /* # bytes of memory to copy */ 357 pen: ldr x4, [x3] !! 1147 bl copy_and_flush /* copy the first 0x4000 bytes */ 358 cmp x4, x2 !! 1148 addi r0,r3,4f@l /* jump to the address of 4f */ 359 b.eq secondary_startup !! 1149 mtctr r0 /* in copy and do the rest. */ 360 wfe !! 1150 bctr /* jump to the copy */ 361 b pen !! 1151 4: mr r5,r25 362 SYM_FUNC_END(secondary_holding_pen) !! 1152 bl copy_and_flush /* copy the rest */ >> 1153 b turn_on_mmu 363 1154 364 /* !! 1155 /* 365 * Secondary entry point that jumps st !! 1156 * Copy routine used to copy the kernel to start at physical address 0 366 * be used where CPUs are brought onli !! 1157 * and flush and invalidate the caches as needed. 367 */ !! 1158 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset 368 SYM_FUNC_START(secondary_entry) !! 1159 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. 369 mov x0, xzr !! 1160 */ 370 bl init_kernel_el !! 1161 copy_and_flush: 371 b secondary_startup !! 1162 addi r5,r5,-4 372 SYM_FUNC_END(secondary_entry) !! 1163 addi r6,r6,-4 >> 1164 4: li r0,L1_CACHE_LINE_SIZE/4 >> 1165 mtctr r0 >> 1166 3: addi r6,r6,4 /* copy a cache line */ >> 1167 lwzx r0,r6,r4 >> 1168 stwx r0,r6,r3 >> 1169 bdnz 3b >> 1170 dcbst r6,r3 /* write it to memory */ >> 1171 sync >> 1172 icbi r6,r3 /* flush the icache line */ >> 1173 cmplw 0,r6,r5 >> 1174 blt 4b >> 1175 sync /* additional sync needed on g4 */ >> 1176 isync >> 1177 addi r5,r5,4 >> 1178 addi r6,r6,4 >> 1179 blr 373 1180 374 SYM_FUNC_START_LOCAL(secondary_startup) !! 1181 #ifdef CONFIG_APUS 375 /* !! 1182 /* 376 * Common entry point for secondary CP !! 1183 * On APUS the physical base address of the kernel is not known at compile 377 */ !! 1184 * time, which means the __pa/__va constants used are incorrect. In the 378 mov x20, x0 !! 1185 * __init section is recorded the virtual addresses of instructions using >> 1186 * these constants, so all that has to be done is fix these before >> 1187 * continuing the kernel boot. >> 1188 * >> 1189 * r4 = The physical address of the kernel base. >> 1190 */ >> 1191 fix_mem_constants: >> 1192 mr r10,r4 >> 1193 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */ >> 1194 neg r11,r10 /* phys_to_virt constant */ >> 1195 >> 1196 lis r12,__vtop_table_begin@h >> 1197 ori r12,r12,__vtop_table_begin@l >> 1198 add r12,r12,r10 /* table begin phys address */ >> 1199 lis r13,__vtop_table_end@h >> 1200 ori r13,r13,__vtop_table_end@l >> 1201 add r13,r13,r10 /* table end phys address */ >> 1202 subi r12,r12,4 >> 1203 subi r13,r13,4 >> 1204 1: lwzu r14,4(r12) /* virt address of instruction */ >> 1205 add r14,r14,r10 /* phys address of instruction */ >> 1206 lwz r15,0(r14) /* instruction, now insert top */ >> 1207 rlwimi r15,r10,16,16,31 /* half of vp const in low half */ >> 1208 stw r15,0(r14) /* of instruction and restore. */ >> 1209 dcbst r0,r14 /* write it to memory */ >> 1210 sync >> 1211 icbi r0,r14 /* flush the icache line */ >> 1212 cmpw r12,r13 >> 1213 bne 1b >> 1214 sync /* additional sync needed on g4 */ >> 1215 isync 379 1216 380 #ifdef CONFIG_ARM64_VA_BITS_52 !! 1217 /* 381 alternative_if ARM64_HAS_VA52 !! 1218 * Map the memory where the exception handlers will 382 bl __cpu_secondary_check52bitva !! 1219 * be copied to when hash constants have been patched. 383 alternative_else_nop_endif !! 1220 */ >> 1221 #ifdef CONFIG_APUS_FAST_EXCEPT >> 1222 lis r8,0xfff0 >> 1223 #else >> 1224 lis r8,0 >> 1225 #endif >> 1226 ori r8,r8,0x2 /* 128KB, supervisor */ >> 1227 mtspr DBAT3U,r8 >> 1228 mtspr DBAT3L,r8 >> 1229 >> 1230 lis r12,__ptov_table_begin@h >> 1231 ori r12,r12,__ptov_table_begin@l >> 1232 add r12,r12,r10 /* table begin phys address */ >> 1233 lis r13,__ptov_table_end@h >> 1234 ori r13,r13,__ptov_table_end@l >> 1235 add r13,r13,r10 /* table end phys address */ >> 1236 subi r12,r12,4 >> 1237 subi r13,r13,4 >> 1238 1: lwzu r14,4(r12) /* virt address of instruction */ >> 1239 add r14,r14,r10 /* phys address of instruction */ >> 1240 lwz r15,0(r14) /* instruction, now insert top */ >> 1241 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/ >> 1242 stw r15,0(r14) /* of instruction and restore. */ >> 1243 dcbst r0,r14 /* write it to memory */ >> 1244 sync >> 1245 icbi r0,r14 /* flush the icache line */ >> 1246 cmpw r12,r13 >> 1247 bne 1b >> 1248 >> 1249 sync /* additional sync needed on g4 */ >> 1250 isync /* No speculative loading until now */ >> 1251 blr >> 1252 >> 1253 /*********************************************************************** >> 1254 * Please note that on APUS the exception handlers are located at the >> 1255 * physical address 0xfff0000. For this reason, the exception handlers >> 1256 * cannot use relative branches to access the code below. >> 1257 ***********************************************************************/ >> 1258 #endif /* CONFIG_APUS */ >> 1259 >> 1260 #ifdef CONFIG_SMP >> 1261 #ifdef CONFIG_GEMINI >> 1262 .globl __secondary_start_gemini >> 1263 __secondary_start_gemini: >> 1264 mfspr r4,HID0 >> 1265 ori r4,r4,HID0_ICFI >> 1266 li r3,0 >> 1267 ori r3,r3,HID0_ICE >> 1268 andc r4,r4,r3 >> 1269 mtspr HID0,r4 >> 1270 sync >> 1271 b __secondary_start >> 1272 #endif /* CONFIG_GEMINI */ >> 1273 .globl __secondary_start_psurge >> 1274 __secondary_start_psurge: >> 1275 li r24,1 /* cpu # */ >> 1276 b __secondary_start_psurge99 >> 1277 .globl __secondary_start_psurge2 >> 1278 __secondary_start_psurge2: >> 1279 li r24,2 /* cpu # */ >> 1280 b __secondary_start_psurge99 >> 1281 .globl __secondary_start_psurge3 >> 1282 __secondary_start_psurge3: >> 1283 li r24,3 /* cpu # */ >> 1284 b __secondary_start_psurge99 >> 1285 __secondary_start_psurge99: >> 1286 /* we come in here with IR=0 and DR=1, and DBAT 0 >> 1287 set to map the 0xf0000000 - 0xffffffff region */ >> 1288 mfmsr r0 >> 1289 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */ >> 1290 SYNC >> 1291 mtmsr r0 >> 1292 isync >> 1293 >> 1294 .globl __secondary_start >> 1295 __secondary_start: >> 1296 #ifdef CONFIG_PPC64BRIDGE >> 1297 mfmsr r0 >> 1298 clrldi r0,r0,1 /* make sure it's in 32-bit mode */ >> 1299 SYNC >> 1300 MTMSRD(r0) >> 1301 isync 384 #endif 1302 #endif >> 1303 /* Copy some CPU settings from CPU 0 */ >> 1304 bl __restore_cpu_setup 385 1305 386 bl __cpu_setup !! 1306 lis r3,-KERNELBASE@h 387 adrp x1, swapper_pg_dir !! 1307 mr r4,r24 388 adrp x2, idmap_pg_dir !! 1308 bl identify_cpu 389 bl __enable_mmu !! 1309 bl call_setup_cpu /* Call setup_cpu for this CPU */ 390 ldr x8, =__secondary_switched !! 1310 #ifdef CONFIG_6xx 391 br x8 !! 1311 lis r3,-KERNELBASE@h 392 SYM_FUNC_END(secondary_startup) !! 1312 bl init_idle_6xx >> 1313 #endif /* CONFIG_6xx */ >> 1314 >> 1315 /* get current */ >> 1316 lis r2,current_set@h >> 1317 ori r2,r2,current_set@l >> 1318 tophys(r2,r2) >> 1319 slwi r24,r24,2 /* get current_set[cpu#] */ >> 1320 lwzx r2,r2,r24 >> 1321 >> 1322 /* stack */ >> 1323 addi r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD >> 1324 li r0,0 >> 1325 tophys(r3,r1) >> 1326 stw r0,0(r3) >> 1327 >> 1328 /* load up the MMU */ >> 1329 bl load_up_mmu >> 1330 >> 1331 /* ptr to phys current thread */ >> 1332 tophys(r4,r2) >> 1333 addi r4,r4,THREAD /* phys address of our thread_struct */ >> 1334 CLR_TOP32(r4) >> 1335 mtspr SPRG3,r4 >> 1336 li r3,0 >> 1337 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */ >> 1338 stw r3,PT_REGS(r4) /* set thread.regs to 0 for kernel thread */ >> 1339 >> 1340 /* enable MMU and jump to start_secondary */ >> 1341 li r4,MSR_KERNEL >> 1342 FIX_SRR1(r4,r5) >> 1343 lis r3,start_secondary@h >> 1344 ori r3,r3,start_secondary@l >> 1345 mtspr SRR0,r3 >> 1346 mtspr SRR1,r4 >> 1347 SYNC >> 1348 RFI >> 1349 #endif /* CONFIG_SMP */ 393 1350 394 .text !! 1351 /* 395 SYM_FUNC_START_LOCAL(__secondary_switched) !! 1352 * Those generic dummy functions are kept for CPUs not 396 mov x0, x20 !! 1353 * included in CONFIG_6xx 397 bl set_cpu_boot_mode_flag !! 1354 */ >> 1355 _GLOBAL(__setup_cpu_power3) >> 1356 blr >> 1357 _GLOBAL(__setup_cpu_power4) >> 1358 blr >> 1359 _GLOBAL(__setup_cpu_ppc970) >> 1360 blr >> 1361 _GLOBAL(__setup_cpu_generic) >> 1362 blr >> 1363 >> 1364 #ifndef CONFIG_6xx >> 1365 _GLOBAL(__save_cpu_setup) >> 1366 blr >> 1367 _GLOBAL(__restore_cpu_setup) >> 1368 #ifdef CONFIG_POWER4 >> 1369 /* turn off real-mode cache inhibit on the PPC970 */ >> 1370 mfspr r0,SPRN_PVR >> 1371 srwi r0,r0,16 >> 1372 cmpwi r0,0x39 >> 1373 beq ppc970_setup_hid >> 1374 blr >> 1375 #endif >> 1376 #endif /* CONFIG_6xx */ 398 1377 399 mov x0, x20 !! 1378 /* 400 bl finalise_el2 !! 1379 * Load stuff into the MMU. Intended to be called with >> 1380 * IR=0 and DR=0. >> 1381 */ >> 1382 load_up_mmu: >> 1383 sync /* Force all PTE updates to finish */ >> 1384 isync >> 1385 tlbia /* Clear all TLB entries */ >> 1386 sync /* wait for tlbia/tlbie to finish */ >> 1387 TLBSYNC /* ... on all CPUs */ >> 1388 /* Load the SDR1 register (hash table base & size) */ >> 1389 lis r6,_SDR1@ha >> 1390 tophys(r6,r6) >> 1391 lwz r6,_SDR1@l(r6) >> 1392 mtspr SDR1,r6 >> 1393 #ifdef CONFIG_PPC64BRIDGE >> 1394 /* clear the ASR so we only use the pseudo-segment registers. */ >> 1395 li r6,0 >> 1396 mtasr r6 >> 1397 #endif /* CONFIG_PPC64BRIDGE */ >> 1398 li r0,16 /* load up segment register values */ >> 1399 mtctr r0 /* for context 0 */ >> 1400 lis r3,0x2000 /* Ku = 1, VSID = 0 */ >> 1401 li r4,0 >> 1402 3: mtsrin r3,r4 >> 1403 addi r3,r3,0x111 /* increment VSID */ >> 1404 addis r4,r4,0x1000 /* address of next segment */ >> 1405 bdnz 3b >> 1406 #ifndef CONFIG_POWER4 >> 1407 /* Load the BAT registers with the values set up by MMU_init. >> 1408 MMU_init takes care of whether we're on a 601 or not. */ >> 1409 mfpvr r3 >> 1410 srwi r3,r3,16 >> 1411 cmpwi r3,1 >> 1412 lis r3,BATS@ha >> 1413 addi r3,r3,BATS@l >> 1414 tophys(r3,r3) >> 1415 LOAD_BAT(0,r3,r4,r5) >> 1416 LOAD_BAT(1,r3,r4,r5) >> 1417 LOAD_BAT(2,r3,r4,r5) >> 1418 LOAD_BAT(3,r3,r4,r5) >> 1419 #endif /* CONFIG_POWER4 */ >> 1420 blr 401 1421 402 str_l xzr, __early_cpu_boot_status, !! 1422 /* 403 adr_l x5, vectors !! 1423 * This is where the main kernel code starts. 404 msr vbar_el1, x5 !! 1424 */ 405 isb !! 1425 start_here: >> 1426 /* ptr to current */ >> 1427 lis r2,init_task_union@h >> 1428 ori r2,r2,init_task_union@l >> 1429 /* Set up for using our exception vectors */ >> 1430 /* ptr to phys current thread */ >> 1431 tophys(r4,r2) >> 1432 addi r4,r4,THREAD /* init task's THREAD */ >> 1433 CLR_TOP32(r4) >> 1434 mtspr SPRG3,r4 >> 1435 li r3,0 >> 1436 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */ >> 1437 >> 1438 /* stack */ >> 1439 addi r1,r2,TASK_UNION_SIZE >> 1440 li r0,0 >> 1441 stwu r0,-STACK_FRAME_OVERHEAD(r1) >> 1442 /* >> 1443 * Do early bootinfo parsing, platform-specific initialization, >> 1444 * and set up the MMU. >> 1445 */ >> 1446 mr r3,r31 >> 1447 mr r4,r30 >> 1448 mr r5,r29 >> 1449 mr r6,r28 >> 1450 mr r7,r27 >> 1451 bl machine_init >> 1452 bl MMU_init >> 1453 >> 1454 #ifdef CONFIG_APUS >> 1455 /* Copy exception code to exception vector base on APUS. */ >> 1456 lis r4,KERNELBASE@h >> 1457 #ifdef CONFIG_APUS_FAST_EXCEPT >> 1458 lis r3,0xfff0 /* Copy to 0xfff00000 */ >> 1459 #else >> 1460 lis r3,0 /* Copy to 0x00000000 */ >> 1461 #endif >> 1462 li r5,0x4000 /* # bytes of memory to copy */ >> 1463 li r6,0 >> 1464 bl copy_and_flush /* copy the first 0x4000 bytes */ >> 1465 #endif /* CONFIG_APUS */ 406 1466 407 adr_l x0, secondary_data !! 1467 /* 408 ldr x2, [x0, #CPU_BOOT_TASK] !! 1468 * Go back to running unmapped so we can load up new values 409 cbz x2, __secondary_too_slow !! 1469 * for SDR1 (hash table pointer) and the segment registers >> 1470 * and change to using our exception vectors. >> 1471 */ >> 1472 lis r4,2f@h >> 1473 ori r4,r4,2f@l >> 1474 tophys(r4,r4) >> 1475 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR) >> 1476 FIX_SRR1(r3,r5) >> 1477 mtspr SRR0,r4 >> 1478 mtspr SRR1,r3 >> 1479 SYNC >> 1480 RFI >> 1481 /* Load up the kernel context */ >> 1482 2: bl load_up_mmu >> 1483 >> 1484 #ifdef CONFIG_BDI_SWITCH >> 1485 /* Add helper information for the Abatron bdiGDB debugger. >> 1486 * We do this here because we know the mmu is disabled, and >> 1487 * will be enabled for real in just a few instructions. >> 1488 */ >> 1489 lis r5, abatron_pteptrs@h >> 1490 ori r5, r5, abatron_pteptrs@l >> 1491 stw r5, 0xf0(r0) /* This much match your Abatron config */ >> 1492 lis r6, swapper_pg_dir@h >> 1493 ori r6, r6, swapper_pg_dir@l >> 1494 tophys(r5, r5) >> 1495 stw r6, 0(r5) >> 1496 #endif 410 1497 411 init_cpu_task x2, x1, x3 !! 1498 /* Now turn on the MMU for real! */ >> 1499 li r4,MSR_KERNEL >> 1500 FIX_SRR1(r4,r5) >> 1501 lis r3,start_kernel@h >> 1502 ori r3,r3,start_kernel@l >> 1503 mtspr SRR0,r3 >> 1504 mtspr SRR1,r4 >> 1505 SYNC >> 1506 RFI 412 1507 413 #ifdef CONFIG_ARM64_PTR_AUTH !! 1508 /* 414 ptrauth_keys_init_cpu x2, x3, x4, x5 !! 1509 * Set up the segment registers for a new context. >> 1510 */ >> 1511 _GLOBAL(set_context) >> 1512 mulli r3,r3,897 /* multiply context by skew factor */ >> 1513 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */ >> 1514 addis r3,r3,0x6000 /* Set Ks, Ku bits */ >> 1515 li r0,NUM_USER_SEGMENTS >> 1516 mtctr r0 >> 1517 >> 1518 #ifdef CONFIG_BDI_SWITCH >> 1519 /* Context switch the PTE pointer for the Abatron BDI2000. >> 1520 * The PGDIR is passed as second argument. >> 1521 */ >> 1522 lis r5, KERNELBASE@h >> 1523 lwz r5, 0xf0(r5) >> 1524 stw r4, 0x4(r5) 415 #endif 1525 #endif 416 1526 417 bl secondary_start_kernel !! 1527 li r4,0 418 ASM_BUG() !! 1528 BEGIN_FTR_SECTION 419 SYM_FUNC_END(__secondary_switched) !! 1529 DSSALL 420 !! 1530 sync 421 SYM_FUNC_START_LOCAL(__secondary_too_slow) !! 1531 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 422 wfe !! 1532 3: isync 423 wfi !! 1533 #ifdef CONFIG_PPC64BRIDGE 424 b __secondary_too_slow !! 1534 slbie r4 425 SYM_FUNC_END(__secondary_too_slow) !! 1535 #endif /* CONFIG_PPC64BRIDGE */ >> 1536 mtsrin r3,r4 >> 1537 addi r3,r3,0x111 /* next VSID */ >> 1538 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */ >> 1539 addis r4,r4,0x1000 /* address of next segment */ >> 1540 bdnz 3b >> 1541 sync >> 1542 isync >> 1543 blr 426 1544 427 /* 1545 /* 428 * Sets the __boot_cpu_mode flag depending on !! 1546 * An undocumented "feature" of 604e requires that the v bit 429 * in w0. See arch/arm64/include/asm/virt.h fo !! 1547 * be cleared before changing BAT values. >> 1548 * >> 1549 * Also, newer IBM firmware does not clear bat3 and 4 so >> 1550 * this makes sure it's done. >> 1551 * -- Cort 430 */ 1552 */ 431 SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag) !! 1553 clear_bats: 432 adr_l x1, __boot_cpu_mode !! 1554 li r20,0 433 cmp w0, #BOOT_CPU_MODE_EL2 !! 1555 mfspr r9,PVR 434 b.ne 1f !! 1556 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ 435 add x1, x1, #4 !! 1557 cmpwi r9, 1 436 1: str w0, [x1] !! 1558 beq 1f 437 ret !! 1559 438 SYM_FUNC_END(set_cpu_boot_mode_flag) !! 1560 mtspr DBAT0U,r20 >> 1561 mtspr DBAT0L,r20 >> 1562 mtspr DBAT1U,r20 >> 1563 mtspr DBAT1L,r20 >> 1564 mtspr DBAT2U,r20 >> 1565 mtspr DBAT2L,r20 >> 1566 mtspr DBAT3U,r20 >> 1567 mtspr DBAT3L,r20 >> 1568 1: >> 1569 mtspr IBAT0U,r20 >> 1570 mtspr IBAT0L,r20 >> 1571 mtspr IBAT1U,r20 >> 1572 mtspr IBAT1L,r20 >> 1573 mtspr IBAT2U,r20 >> 1574 mtspr IBAT2L,r20 >> 1575 mtspr IBAT3U,r20 >> 1576 mtspr IBAT3L,r20 >> 1577 BEGIN_FTR_SECTION >> 1578 /* Here's a tweak: at this point, CPU setup have >> 1579 * not been called yet, so HIGH_BAT_EN may not be >> 1580 * set in HID0 for the 745x processors. However, it >> 1581 * seems that doesn't affect our ability to actually >> 1582 * write to these SPRs. >> 1583 */ >> 1584 mtspr SPRN_DBAT4U,r20 >> 1585 mtspr SPRN_DBAT4L,r20 >> 1586 mtspr SPRN_DBAT5U,r20 >> 1587 mtspr SPRN_DBAT5L,r20 >> 1588 mtspr SPRN_DBAT6U,r20 >> 1589 mtspr SPRN_DBAT6L,r20 >> 1590 mtspr SPRN_DBAT7U,r20 >> 1591 mtspr SPRN_DBAT7L,r20 >> 1592 mtspr SPRN_IBAT4U,r20 >> 1593 mtspr SPRN_IBAT4L,r20 >> 1594 mtspr SPRN_IBAT5U,r20 >> 1595 mtspr SPRN_IBAT5L,r20 >> 1596 mtspr SPRN_IBAT6U,r20 >> 1597 mtspr SPRN_IBAT6L,r20 >> 1598 mtspr SPRN_IBAT7U,r20 >> 1599 mtspr SPRN_IBAT7L,r20 >> 1600 END_FTR_SECTION_IFSET(CPU_FTR_HAS_HIGH_BATS) >> 1601 blr >> 1602 >> 1603 flush_tlbs: >> 1604 lis r20, 0x40 >> 1605 1: addic. r20, r20, -0x1000 >> 1606 tlbie r20 >> 1607 blt 1b >> 1608 sync >> 1609 blr >> 1610 >> 1611 mmu_off: >> 1612 addi r4, r3, __after_mmu_off - _start >> 1613 mfmsr r3 >> 1614 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */ >> 1615 beqlr >> 1616 andc r3,r3,r0 >> 1617 mtspr SRR0,r4 >> 1618 mtspr SRR1,r3 >> 1619 sync >> 1620 RFI 439 1621 >> 1622 #ifndef CONFIG_POWER4 440 /* 1623 /* 441 * The booting CPU updates the failed status @ !! 1624 * Use the first pair of BAT registers to map the 1st 16MB 442 * with MMU turned off. !! 1625 * of RAM to KERNELBASE. From this point on we can't safely 443 * !! 1626 * call OF any more. 444 * update_early_cpu_boot_status tmp, status << 445 * - Corrupts tmp1, tmp2 << 446 * - Writes 'status' to __early_cpu_boot_stat << 447 * it is committed to memory. << 448 */ 1627 */ >> 1628 initial_bats: >> 1629 lis r11,KERNELBASE@h >> 1630 #ifndef CONFIG_PPC64BRIDGE >> 1631 mfspr r9,PVR >> 1632 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ >> 1633 cmpi 0,r9,1 >> 1634 bne 4f >> 1635 ori r11,r11,4 /* set up BAT registers for 601 */ >> 1636 li r8,0x7f /* valid, block length = 8MB */ >> 1637 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */ >> 1638 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */ >> 1639 mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */ >> 1640 mtspr IBAT0L,r8 /* lower BAT register */ >> 1641 mtspr IBAT1U,r9 >> 1642 mtspr IBAT1L,r10 >> 1643 isync >> 1644 blr >> 1645 #endif /* CONFIG_PPC64BRIDGE */ >> 1646 >> 1647 4: tophys(r8,r11) >> 1648 #ifdef CONFIG_SMP >> 1649 ori r8,r8,0x12 /* R/W access, M=1 */ >> 1650 #else >> 1651 ori r8,r8,2 /* R/W access */ >> 1652 #endif /* CONFIG_SMP */ >> 1653 #ifdef CONFIG_APUS >> 1654 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */ >> 1655 #else >> 1656 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */ >> 1657 #endif /* CONFIG_APUS */ 449 1658 450 .macro update_early_cpu_boot_status s !! 1659 #ifdef CONFIG_PPC64BRIDGE 451 mov \tmp2, #\status !! 1660 /* clear out the high 32 bits in the BAT */ 452 adr_l \tmp1, __early_cpu_boot_status !! 1661 clrldi r11,r11,32 453 str \tmp2, [\tmp1] !! 1662 clrldi r8,r8,32 454 dmb sy !! 1663 #endif /* CONFIG_PPC64BRIDGE */ 455 dc ivac, \tmp1 !! 1664 mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */ 456 .endm !! 1665 mtspr DBAT0U,r11 /* bit in upper BAT register */ >> 1666 mtspr IBAT0L,r8 >> 1667 mtspr IBAT0U,r11 >> 1668 isync >> 1669 blr 457 1670 458 /* !! 1671 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) 459 * Enable the MMU. !! 1672 setup_disp_bat: 460 * !! 1673 /* 461 * x0 = SCTLR_EL1 value for turning on the M !! 1674 * setup the display bat prepared for us in prom.c 462 * x1 = TTBR1_EL1 value !! 1675 */ 463 * x2 = ID map root table address !! 1676 mflr r8 464 * !! 1677 bl reloc_offset 465 * Returns to the caller via x30/lr. This requ !! 1678 mtlr r8 466 * by the .idmap.text section. !! 1679 addis r8,r3,disp_BAT@ha >> 1680 addi r8,r8,disp_BAT@l >> 1681 lwz r11,0(r8) >> 1682 lwz r8,4(r8) >> 1683 mfspr r9,PVR >> 1684 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ >> 1685 cmpi 0,r9,1 >> 1686 beq 1f >> 1687 mtspr DBAT3L,r8 >> 1688 mtspr DBAT3U,r11 >> 1689 blr >> 1690 1: mtspr IBAT3L,r8 >> 1691 mtspr IBAT3U,r11 >> 1692 blr >> 1693 >> 1694 #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ >> 1695 >> 1696 #else /* CONFIG_POWER4 */ >> 1697 ppc970_setup_hid: >> 1698 li r0,0 >> 1699 sync >> 1700 mtspr 0x3f4,r0 >> 1701 isync >> 1702 sync >> 1703 mtspr 0x3f6,r0 >> 1704 isync >> 1705 mfspr r0,SPRN_HID0 >> 1706 li r11,5 /* clear DOZE and SLEEP */ >> 1707 rldimi r0,r11,52,8 /* and set NAP and DPM */ >> 1708 li r11,0 >> 1709 rldimi r0,r11,32,31 /* clear EN_ATTN */ >> 1710 mtspr SPRN_HID0,r0 >> 1711 mfspr r0,SPRN_HID0 >> 1712 mfspr r0,SPRN_HID0 >> 1713 mfspr r0,SPRN_HID0 >> 1714 mfspr r0,SPRN_HID0 >> 1715 mfspr r0,SPRN_HID0 >> 1716 mfspr r0,SPRN_HID0 >> 1717 sync >> 1718 isync >> 1719 mfspr r0,SPRN_HID1 >> 1720 li r11,0x1200 /* enable i-fetch cacheability */ >> 1721 sldi r11,r11,44 /* and prefetch */ >> 1722 or r0,r0,r11 >> 1723 mtspr SPRN_HID1,r0 >> 1724 mtspr SPRN_HID1,r0 >> 1725 isync >> 1726 li r0,0 >> 1727 sync >> 1728 mtspr 0x137,0 >> 1729 isync >> 1730 blr >> 1731 #endif /* CONFIG_POWER4 */ >> 1732 >> 1733 #ifdef CONFIG_8260 >> 1734 /* Jump into the system reset for the rom. >> 1735 * We first disable the MMU, and then jump to the ROM reset address. 467 * 1736 * 468 * Checks if the selected granule size is supp !! 1737 * r3 is the board info structure, r4 is the location for starting. 469 * If it isn't, park the CPU !! 1738 * I use this for building a small kernel that can load other kernels, >> 1739 * rather than trying to write or rely on a rom monitor that can tftp load. 470 */ 1740 */ 471 .section ".idmap.text","a" !! 1741 .globl m8260_gorom 472 SYM_FUNC_START(__enable_mmu) !! 1742 m8260_gorom: 473 mrs x3, ID_AA64MMFR0_EL1 !! 1743 mfmsr r0 474 ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRA !! 1744 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */ 475 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU !! 1745 sync 476 b.lt __no_granule_support !! 1746 mtmsr r0 477 cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SU !! 1747 sync 478 b.gt __no_granule_support !! 1748 mfspr r11, HID0 479 phys_to_ttbr x2, x2 !! 1749 lis r10, 0 480 msr ttbr0_el1, x2 !! 1750 ori r10,r10,HID0_ICE|HID0_DCE 481 load_ttbr1 x1, x1, x3 !! 1751 andc r11, r11, r10 >> 1752 mtspr HID0, r11 >> 1753 isync >> 1754 li r5, MSR_ >> 1755 lis r6,2f@h >> 1756 addis r6,r6,-KERNELBASE@h >> 1757 ori r6,r6,2f@l >> 1758 mtspr SRR0,r6 >> 1759 mtspr SRR1,r5 >> 1760 isync >> 1761 sync >> 1762 rfi >> 1763 2: >> 1764 mtlr r4 >> 1765 blr >> 1766 #endif 482 1767 483 set_sctlr_el1 x0 << 484 1768 485 ret !! 1769 /* 486 SYM_FUNC_END(__enable_mmu) !! 1770 * We put a few things here that have to be page-aligned. >> 1771 * This stuff goes at the beginning of the data segment, >> 1772 * which is page-aligned. >> 1773 */ >> 1774 .data >> 1775 .globl sdata >> 1776 sdata: >> 1777 .globl empty_zero_page >> 1778 empty_zero_page: >> 1779 .space 4096 >> 1780 >> 1781 .globl swapper_pg_dir >> 1782 swapper_pg_dir: >> 1783 .space 4096 487 1784 488 #ifdef CONFIG_ARM64_VA_BITS_52 !! 1785 /* 489 SYM_FUNC_START(__cpu_secondary_check52bitva) !! 1786 * This space gets a copy of optional info passed to us by the bootstrap 490 #ifndef CONFIG_ARM64_LPA2 !! 1787 * Used to pass parameters into the kernel like root=/dev/sda1, etc. 491 mrs_s x0, SYS_ID_AA64MMFR2_EL1 !! 1788 */ 492 and x0, x0, ID_AA64MMFR2_EL1_VARan !! 1789 .globl cmd_line 493 cbnz x0, 2f !! 1790 cmd_line: 494 #else !! 1791 .space 512 495 mrs x0, id_aa64mmfr0_el1 !! 1792 496 sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRA !! 1793 .globl intercept_table 497 cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LP !! 1794 intercept_table: 498 b.ge 2f !! 1795 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700 499 #endif !! 1796 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0 500 !! 1797 .long 0, 0, 0, i0x1300, 0, 0, 0, 0 501 update_early_cpu_boot_status \ !! 1798 .long 0, 0, 0, 0, 0, 0, 0, 0 502 CPU_STUCK_IN_KERNEL | CPU_STUC !! 1799 .long 0, 0, 0, 0, 0, 0, 0, 0 503 1: wfe !! 1800 .long 0, 0, 0, 0, 0, 0, 0, 0 504 wfi !! 1801 505 b 1b !! 1802 #ifdef CONFIG_BDI_SWITCH 506 !! 1803 /* Room for two PTE pointers, usually the kernel and current user pointers 507 2: ret !! 1804 * to their respective root page table. 508 SYM_FUNC_END(__cpu_secondary_check52bitva) !! 1805 */ 509 #endif !! 1806 abatron_pteptrs: 510 !! 1807 .space 8 511 SYM_FUNC_START_LOCAL(__no_granule_support) !! 1808 #endif 512 /* Indicate that this CPU can't boot a << 513 update_early_cpu_boot_status \ << 514 CPU_STUCK_IN_KERNEL | CPU_STUC << 515 1: << 516 wfe << 517 wfi << 518 b 1b << 519 SYM_FUNC_END(__no_granule_support) << 520 << 521 SYM_FUNC_START_LOCAL(__primary_switch) << 522 adrp x1, reserved_pg_dir << 523 adrp x2, init_idmap_pg_dir << 524 bl __enable_mmu << 525 << 526 adrp x1, early_init_stack << 527 mov sp, x1 << 528 mov x29, xzr << 529 mov x0, x20 << 530 mov x1, x21 << 531 bl __pi_early_map_kernel << 532 << 533 ldr x8, =__primary_switched << 534 adrp x0, KERNEL_START << 535 br x8 << 536 SYM_FUNC_END(__primary_switch) <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.