1 /* SPDX-License-Identifier: GPL-2.0-or-later * !! 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* !! 2 /* head.S: Initial boot code for the Sparc64 port of Linux. 3 * PowerPC version << 4 * Copyright (C) 1995-1996 Gary Thomas (gdt << 5 * << 6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) << 7 * Copyright (C) 1996 Cort Dougan <cort@cs.n << 8 * Adapted for Power Macintosh by Paul Macker << 9 * Low-level exception handlers and MMU suppo << 10 * rewritten by Paul Mackerras. << 11 * Copyright (C) 1996 Paul Mackerras. << 12 * << 13 * Adapted for 64bit PowerPC by Dave Engebret << 14 * Mike Corrigan {engebret|bergner|mikejc}@ << 15 * 3 * 16 * This file contains the entry point for the !! 4 * Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net) 17 * with some early initialization code common !! 5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au) 18 * variants. !! 6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) >> 7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx) 19 */ 8 */ 20 9 21 #include <linux/linkage.h> !! 10 #include <linux/version.h> >> 11 #include <linux/errno.h> >> 12 #include <linux/export.h> 22 #include <linux/threads.h> 13 #include <linux/threads.h> 23 #include <linux/init.h> 14 #include <linux/init.h> 24 #include <asm/reg.h> !! 15 #include <linux/linkage.h> 25 #include <asm/page.h> !! 16 #include <linux/pgtable.h> 26 #include <asm/mmu.h> << 27 #include <asm/ppc_asm.h> << 28 #include <asm/head-64.h> << 29 #include <asm/asm-offsets.h> << 30 #include <asm/bug.h> << 31 #include <asm/cputable.h> << 32 #include <asm/setup.h> << 33 #include <asm/hvcall.h> << 34 #include <asm/thread_info.h> 17 #include <asm/thread_info.h> 35 #include <asm/firmware.h> !! 18 #include <asm/asi.h> 36 #include <asm/page_64.h> !! 19 #include <asm/pstate.h> 37 #include <asm/irqflags.h> << 38 #include <asm/kvm_book3s_asm.h> << 39 #include <asm/ptrace.h> 20 #include <asm/ptrace.h> 40 #include <asm/hw_irq.h> !! 21 #include <asm/spitfire.h> 41 #include <asm/cputhreads.h> !! 22 #include <asm/page.h> 42 #include <asm/ppc-opcode.h> !! 23 #include <asm/errno.h> 43 #include <asm/feature-fixups.h> !! 24 #include <asm/signal.h> 44 #ifdef CONFIG_PPC_BOOK3S !! 25 #include <asm/processor.h> 45 #include <asm/exception-64s.h> !! 26 #include <asm/lsu.h> 46 #else !! 27 #include <asm/dcr.h> 47 #include <asm/exception-64e.h> !! 28 #include <asm/dcu.h> 48 #endif !! 29 #include <asm/head.h> 49 !! 30 #include <asm/ttable.h> 50 /* The physical memory is laid out such that t !! 31 #include <asm/mmu.h> 51 * spin code sits at 0x0000...0x00ff. On serve !! 32 #include <asm/cpudata.h> 52 * using the layout described in exceptions-64 !! 33 #include <asm/pil.h> 53 */ !! 34 #include <asm/estate.h> 54 !! 35 #include <asm/sfafsr.h> 55 /* !! 36 #include <asm/unistd.h> 56 * Entering into this code we make the followi !! 37 57 * !! 38 /* This section from from _start to sparc64_boot_end should fit into 58 * For pSeries or server processors: !! 39 * 0x0000000000404000 to 0x0000000000408000. 59 * 1. The MMU is off & open firmware is runn !! 40 */ 60 * 2. The primary CPU enters at __start. !! 41 .text 61 * 3. If the RTAS supports "query-cpu-stoppe !! 42 .globl start, _start, stext, _stext 62 * CPUs will enter as directed by "start- !! 43 _start: 63 * generic_secondary_smp_init, with PIR i !! 44 start: 64 * 4. Else the secondary CPUs will enter at !! 45 _stext: 65 * directed by the "start-cpu" RTS call, !! 46 stext: 66 * -or- For OPAL entry: !! 47 ! 0x0000000000404000 67 * 1. The MMU is off, processor in HV mode. !! 48 b sparc64_boot 68 * 2. The primary CPU enters at 0 with devic !! 49 flushw /* Flush register file. */ 69 * in r8, and entry in r9 for debugging p !! 50 70 * 3. Secondary CPUs enter as directed by OP !! 51 /* This stuff has to be in sync with SILO and other potential boot loaders 71 * is at generic_secondary_smp_init, with !! 52 * Fields should be kept upward compatible and whenever any change is made, 72 * !! 53 * HdrS version should be incremented. 73 * For Book3E processors: !! 54 */ 74 * 1. The MMU is on running in AS0 in a stat !! 55 .global root_flags, ram_flags, root_dev 75 * 2. The kernel is entered at __start !! 56 .global sparc_ramdisk_image, sparc_ramdisk_size 76 */ !! 57 .global sparc_ramdisk_image64 77 << 78 /* << 79 * boot_from_prom and prom_init run at the phy << 80 * after prom and kexec entry run at the virtu << 81 * Secondaries run at the virtual address from << 82 * onward. << 83 */ << 84 << 85 OPEN_FIXED_SECTION(first_256B, 0x0, 0x100) << 86 USE_FIXED_SECTION(first_256B) << 87 /* << 88 * Offsets are relative from the start << 89 * first_256B starts at 0. Offsets are << 90 * than the fixed section entry macros << 91 */ << 92 . = 0x0 << 93 _GLOBAL(__start) << 94 /* NOP this out unconditionally */ << 95 BEGIN_FTR_SECTION << 96 FIXUP_ENDIAN << 97 b __start_initialization_multipl << 98 END_FTR_SECTION(0, 1) << 99 << 100 /* Catch branch to 0 in real mode */ << 101 trap << 102 << 103 /* Secondary processors spin on this v << 104 * When non-zero, it contains the real << 105 * should jump to. << 106 */ << 107 .balign 8 << 108 .globl __secondary_hold_spinloop << 109 __secondary_hold_spinloop: << 110 .8byte 0x0 << 111 << 112 /* Secondary processors write this val << 113 /* after they enter the spin loop imme << 114 .globl __secondary_hold_acknowledge << 115 __secondary_hold_acknowledge: << 116 .8byte 0x0 << 117 << 118 #ifdef CONFIG_RELOCATABLE << 119 /* This flag is set to 1 by a loader i << 120 * at the loaded address instead of th << 121 * is used by kexec-tools to keep the << 122 * crash_kernel region. The loader is << 123 * observing the alignment requirement << 124 */ << 125 << 126 #ifdef CONFIG_RELOCATABLE_TEST << 127 #define RUN_AT_LOAD_DEFAULT 1 /* Tes << 128 #else << 129 #define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "ru << 130 #endif << 131 << 132 /* Do not move this variable as kexec- << 133 . = 0x5c << 134 .globl __run_at_load << 135 __run_at_load: << 136 DEFINE_FIXED_SYMBOL(__run_at_load, first_256B) << 137 .long RUN_AT_LOAD_DEFAULT << 138 #endif << 139 << 140 . = 0x60 << 141 /* << 142 * The following code is used to hold secondar << 143 * in a spin loop after they have entered the << 144 * before the bulk of the kernel has been relo << 145 * is relocated to physical address 0x60 befor << 146 * All of it must fit below the first exceptio << 147 * Use .globl here not _GLOBAL because we want << 148 * to be the actual text address, not a descri << 149 */ << 150 .globl __secondary_hold << 151 __secondary_hold: << 152 FIXUP_ENDIAN << 153 #ifndef CONFIG_PPC_BOOK3E_64 << 154 mfmsr r24 << 155 ori r24,r24,MSR_RI << 156 mtmsrd r24 /* RI << 157 #endif << 158 /* Grab our physical cpu number */ << 159 mr r24,r3 << 160 /* stash r4 for book3e */ << 161 mr r25,r4 << 162 << 163 /* Tell the master cpu we're here */ << 164 /* Relocation is off & we are located << 165 /* than 0x100, so only need to grab lo << 166 std r24,(ABS_ADDR(__secondary_hold << 167 sync << 168 << 169 /* All secondary cpus wait here until << 170 100: ld r12,(ABS_ADDR(__secondary_hold << 171 cmpdi 0,r12,0 << 172 beq 100b << 173 << 174 #if defined(CONFIG_SMP) || defined(CONFIG_KEXE << 175 #ifdef CONFIG_PPC_BOOK3E_64 << 176 tovirt(r12,r12) << 177 #endif << 178 mtctr r12 << 179 mr r3,r24 << 180 /* << 181 * it may be the case that other platf << 182 * begin with, this gives us some safe << 183 */ << 184 #ifdef CONFIG_PPC_BOOK3E_64 << 185 mr r4,r25 << 186 #else << 187 li r4,0 << 188 #endif << 189 /* Make sure that patched code is visi << 190 isync << 191 bctr << 192 #else << 193 0: trap << 194 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, << 195 #endif << 196 CLOSE_FIXED_SECTION(first_256B) << 197 << 198 /* << 199 * On server, we include the exception vectors << 200 * relies on absolute addressing which is only << 201 * this compilation unit << 202 */ << 203 #ifdef CONFIG_PPC_BOOK3S << 204 #include "exceptions-64s.S" << 205 #else << 206 OPEN_TEXT_SECTION(0x100) << 207 #endif << 208 << 209 USE_TEXT_SECTION() << 210 << 211 #include "interrupt_64.S" << 212 << 213 #ifdef CONFIG_PPC_BOOK3E_64 << 214 /* << 215 * The booting_thread_hwid holds the thread id << 216 * hotplug case. It is set by cpu hotplug code << 217 * The thread id is the same as the initial va << 218 * bit field. << 219 */ << 220 .globl booting_thread_hwid << 221 booting_thread_hwid: << 222 .long INVALID_THREAD_HWID << 223 .align 3 << 224 /* << 225 * start a thread in the same core << 226 * input parameters: << 227 * r3 = the thread physical id << 228 * r4 = the entry point where thread starts << 229 */ << 230 _GLOBAL(book3e_start_thread) << 231 LOAD_REG_IMMEDIATE(r5, MSR_KERNEL) << 232 cmpwi r3, 0 << 233 beq 10f << 234 cmpwi r3, 1 << 235 beq 11f << 236 /* If the thread id is invalid, just e << 237 b 13f << 238 10: << 239 MTTMR(TMRN_IMSR0, 5) << 240 MTTMR(TMRN_INIA0, 4) << 241 b 12f << 242 11: << 243 MTTMR(TMRN_IMSR1, 5) << 244 MTTMR(TMRN_INIA1, 4) << 245 12: << 246 isync << 247 li r6, 1 << 248 sld r6, r6, r3 << 249 mtspr SPRN_TENS, r6 << 250 13: << 251 blr << 252 58 253 /* !! 59 .ascii "HdrS" 254 * stop a thread in the same core !! 60 .word LINUX_VERSION_CODE 255 * input parameter: << 256 * r3 = the thread physical id << 257 */ << 258 _GLOBAL(book3e_stop_thread) << 259 cmpwi r3, 0 << 260 beq 10f << 261 cmpwi r3, 1 << 262 beq 10f << 263 /* If the thread id is invalid, just e << 264 b 13f << 265 10: << 266 li r4, 1 << 267 sld r4, r4, r3 << 268 mtspr SPRN_TENC, r4 << 269 13: << 270 blr << 271 << 272 _GLOBAL(fsl_secondary_thread_init) << 273 mfspr r4,SPRN_BUCSR << 274 << 275 /* Enable branch prediction */ << 276 lis r3,BUCSR_INIT@h << 277 ori r3,r3,BUCSR_INIT@l << 278 mtspr SPRN_BUCSR,r3 << 279 isync << 280 61 281 /* !! 62 /* History: 282 * Fix PIR to match the linear numberi << 283 * << 284 * On e6500, the reset value of PIR us << 285 * the thread within a core, and the u << 286 * number. There are two threads per << 287 * but the low bit right by two bits s << 288 * continuous. << 289 * 63 * 290 * If the old value of BUCSR is non-ze !! 64 * 0x0300 : Supports being located at other than 0x4000 291 * before. Thus, we assume we are com !! 65 * 0x0202 : Supports kernel params string 292 * scenario, and PIR is already set to !! 66 * 0x0201 : Supports reboot_command 293 * is a bit of a hack, but there are l !! 67 */ 294 * getting information into the thread !! 68 .half 0x0301 /* HdrS version */ 295 * seemed like they'd be overkill. We !! 69 296 * at the old PIR value which state it !! 70 root_flags: 297 * could be valid for one thread out o !! 71 .half 1 298 * thread in Linux. !! 72 root_dev: 299 */ !! 73 .half 0 300 !! 74 ram_flags: 301 mfspr r3, SPRN_PIR !! 75 .half 0 302 cmpwi r4,0 !! 76 sparc_ramdisk_image: 303 bne 1f !! 77 .word 0 304 rlwimi r3, r3, 30, 2, 30 !! 78 sparc_ramdisk_size: 305 mtspr SPRN_PIR, r3 !! 79 .word 0 306 1: !! 80 .xword reboot_command 307 mr r24,r3 !! 81 .xword bootstr_info 308 !! 82 sparc_ramdisk_image64: 309 /* turn on 64-bit mode */ !! 83 .xword 0 310 bl enable_64b_mode !! 84 .word _end 311 !! 85 312 /* Book3E initialization */ !! 86 /* PROM cif handler code address is in %o4. */ 313 mr r3,r24 !! 87 sparc64_boot: 314 bl book3e_secondary_thread_init !! 88 mov %o4, %l7 315 bl relative_toc << 316 << 317 b generic_secondary_common_init << 318 << 319 #endif /* CONFIG_PPC_BOOK3E_64 */ << 320 << 321 /* << 322 * On pSeries and most other platforms, second << 323 * in the following code. << 324 * At entry, r3 = this processor's number (phy << 325 * << 326 * On Book3E, r4 = 1 to indicate that the init << 327 * this core already exists (setup via some ot << 328 * as SCOM before entry). << 329 */ << 330 _GLOBAL(generic_secondary_smp_init) << 331 FIXUP_ENDIAN << 332 << 333 li r13,0 << 334 << 335 /* Poison TOC */ << 336 li r2,-1 << 337 << 338 mr r24,r3 << 339 mr r25,r4 << 340 << 341 /* turn on 64-bit mode */ << 342 bl enable_64b_mode << 343 << 344 #ifdef CONFIG_PPC_BOOK3E_64 << 345 /* Book3E initialization */ << 346 mr r3,r24 << 347 mr r4,r25 << 348 bl book3e_secondary_core_init << 349 /* Now NIA and r2 are relocated to PAG << 350 /* << 351 * After common core init has finished, check << 352 * one we wanted to boot. If not, start the sp << 353 * current thread. << 354 */ << 355 LOAD_REG_ADDR(r4, booting_thread_hwid) << 356 lwz r3, 0(r4) << 357 li r5, INVALID_THREAD_HWID << 358 cmpw r3, r5 << 359 beq 20f << 360 << 361 /* << 362 * The value of booting_thread_hwid ha << 363 * so make it invalid. << 364 */ << 365 stw r5, 0(r4) << 366 << 367 /* << 368 * Get the current thread id and check << 369 * If not, start the one specified in << 370 * the current thread. << 371 */ << 372 mfspr r8, SPRN_TIR << 373 cmpw r3, r8 << 374 beq 20f << 375 << 376 /* start the specified thread */ << 377 LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary << 378 bl book3e_start_thread << 379 << 380 /* stop the current thread */ << 381 mr r3, r8 << 382 bl book3e_stop_thread << 383 10: << 384 b 10b << 385 20: << 386 #else << 387 /* Now the MMU is off, can branch to o << 388 bcl 20,31,$+4 << 389 1: mflr r11 << 390 addi r11,r11,(2f - 1b) << 391 tovirt(r11, r11) << 392 mtctr r11 << 393 bctr << 394 2: << 395 bl relative_toc << 396 #endif << 397 << 398 generic_secondary_common_init: << 399 /* Set up a paca value for this proces << 400 * physical cpu id in r24, we need to << 401 * which logical id maps to our physic << 402 */ << 403 #ifndef CONFIG_SMP << 404 b kexec_wait /* wai << 405 #else << 406 LOAD_REG_ADDR(r8, paca_ptrs) /* Loa << 407 ld r8,0(r8) /* Get << 408 #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_ << 409 LOAD_REG_IMMEDIATE(r7, NR_CPUS) << 410 #else << 411 LOAD_REG_ADDR(r7, nr_cpu_ids) /* Loa << 412 lwz r7,0(r7) /* als << 413 #endif << 414 li r5,0 /* log << 415 1: << 416 sldi r9,r5,3 /* get << 417 ldx r13,r9,r8 /* r13 << 418 lhz r6,PACAHWCPUID(r13) /* Loa << 419 cmpw r6,r24 /* Com << 420 beq 2f << 421 addi r5,r5,1 << 422 cmpw r5,r7 /* Che << 423 blt 1b << 424 << 425 mr r3,r24 /* not << 426 b kexec_wait /* nex << 427 << 428 2: SET_PACA(r13) << 429 #ifdef CONFIG_PPC_BOOK3E_64 << 430 addi r12,r13,PACA_EXTLB /* and << 431 mtspr SPRN_SPRG_TLB_EXFRAME,r12 << 432 #endif << 433 << 434 /* From now on, r24 is expected to be << 435 mr r24,r5 << 436 << 437 /* Create a temp kernel stack for use << 438 ld r1,PACAEMERGSP(r13) << 439 subi r1,r1,STACK_FRAME_MIN_SIZE << 440 << 441 /* See if we need to call a cpu state << 442 LOAD_REG_ADDR(r23, cur_cpu_spec) << 443 ld r23,0(r23) << 444 ld r12,CPU_SPEC_RESTORE(r23) << 445 cmpdi 0,r12,0 << 446 beq 3f << 447 #ifdef CONFIG_PPC64_ELF_ABI_V1 << 448 ld r12,0(r12) << 449 #endif << 450 mtctr r12 << 451 bctrl << 452 << 453 3: LOAD_REG_ADDR(r3, spinning_secondaries << 454 lwarx r4,0,r3 << 455 subi r4,r4,1 << 456 stwcx. r4,0,r3 << 457 bne 3b << 458 isync << 459 << 460 4: HMT_LOW << 461 lbz r23,PACAPROCSTART(r13) /* Tes << 462 /* sta << 463 cmpwi 0,r23,0 << 464 beq 4b /* Loo << 465 << 466 sync /* ord << 467 isync /* In << 468 << 469 b __secondary_start << 470 #endif /* SMP */ << 471 << 472 /* << 473 * Turn the MMU off. << 474 * Assumes we're mapped EA == RA if the MMU is << 475 */ << 476 #ifdef CONFIG_PPC_BOOK3S << 477 SYM_FUNC_START_LOCAL(__mmu_off) << 478 mfmsr r3 << 479 andi. r0,r3,MSR_IR|MSR_DR << 480 beqlr << 481 mflr r4 << 482 andc r3,r3,r0 << 483 mtspr SPRN_SRR0,r4 << 484 mtspr SPRN_SRR1,r3 << 485 sync << 486 rfid << 487 b . /* prevent speculative << 488 SYM_FUNC_END(__mmu_off) << 489 << 490 SYM_FUNC_START_LOCAL(start_initialization_book << 491 mflr r25 << 492 << 493 /* Setup some critical 970 SPRs before << 494 mfspr r0,SPRN_PVR << 495 srwi r0,r0,16 << 496 cmpwi r0,0x39 /* 970 */ << 497 beq 1f << 498 cmpwi r0,0x3c /* 970FX */ << 499 beq 1f << 500 cmpwi r0,0x44 /* 970MP */ << 501 beq 1f << 502 cmpwi r0,0x45 /* 970GX */ << 503 bne 2f << 504 1: bl __cpu_preinit_ppc970 << 505 2: << 506 89 507 /* Switch off MMU if not already off * !! 90 /* We need to remap the kernel. Use position independent 508 bl __mmu_off !! 91 * code to remap us to KERNBASE. 509 !! 92 * 510 /* Now the MMU is off, can return to o !! 93 * SILO can invoke us with 32-bit address masking enabled, 511 tovirt(r25,r25) !! 94 * so make sure that's clear. 512 mtlr r25 << 513 blr << 514 SYM_FUNC_END(start_initialization_book3s) << 515 #endif << 516 << 517 /* << 518 * Here is our main kernel entry point. We sup << 519 * depending on the value of r5. << 520 * << 521 * r5 != NULL -> OF entry, we go to prom_ini << 522 * in r3...r7 << 523 * << 524 * r5 == NULL -> kexec style entry. r3 is a << 525 * DT block, r4 is a physical << 526 * << 527 */ << 528 __start_initialization_multiplatform: << 529 /* Make sure we are running in 64 bits << 530 bl enable_64b_mode << 531 << 532 /* Zero r13 (paca) so early program ch << 533 li r13,0 << 534 << 535 /* Poison TOC */ << 536 li r2,-1 << 537 << 538 /* << 539 * Are we booted from a PROM Of-type c << 540 */ 95 */ 541 cmpldi cr0,r5,0 !! 96 rdpr %pstate, %g1 542 beq 1f !! 97 andn %g1, PSTATE_AM, %g1 543 b __boot_from_prom !! 98 wrpr %g1, 0x0, %pstate >> 99 ba,a,pt %xcc, 1f >> 100 nop >> 101 >> 102 .globl prom_finddev_name, prom_chosen_path, prom_root_node >> 103 .globl prom_getprop_name, prom_mmu_name, prom_peer_name >> 104 .globl prom_callmethod_name, prom_translate_name, prom_root_compatible >> 105 .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache >> 106 .globl prom_boot_mapped_pc, prom_boot_mapping_mode >> 107 .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low >> 108 .globl prom_compatible_name, prom_cpu_path, prom_cpu_compatible >> 109 .globl is_sun4v, sun4v_chip_type, prom_set_trap_table_name >> 110 prom_peer_name: >> 111 .asciz "peer" >> 112 prom_compatible_name: >> 113 .asciz "compatible" >> 114 prom_finddev_name: >> 115 .asciz "finddevice" >> 116 prom_chosen_path: >> 117 .asciz "/chosen" >> 118 prom_cpu_path: >> 119 .asciz "/cpu" >> 120 prom_getprop_name: >> 121 .asciz "getprop" >> 122 prom_mmu_name: >> 123 .asciz "mmu" >> 124 prom_callmethod_name: >> 125 .asciz "call-method" >> 126 prom_translate_name: >> 127 .asciz "translate" >> 128 prom_map_name: >> 129 .asciz "map" >> 130 prom_unmap_name: >> 131 .asciz "unmap" >> 132 prom_set_trap_table_name: >> 133 .asciz "SUNW,set-trap-table" >> 134 prom_sun4v_name: >> 135 .asciz "sun4v" >> 136 prom_niagara_prefix: >> 137 .asciz "SUNW,UltraSPARC-T" >> 138 prom_sparc_prefix: >> 139 .asciz "SPARC-" >> 140 prom_sparc64x_prefix: >> 141 .asciz "SPARC64-X" >> 142 .align 4 >> 143 prom_root_compatible: >> 144 .skip 64 >> 145 prom_cpu_compatible: >> 146 .skip 64 >> 147 prom_root_node: >> 148 .word 0 >> 149 EXPORT_SYMBOL(prom_root_node) >> 150 prom_mmu_ihandle_cache: >> 151 .word 0 >> 152 prom_boot_mapped_pc: >> 153 .word 0 >> 154 prom_boot_mapping_mode: >> 155 .word 0 >> 156 .align 8 >> 157 prom_boot_mapping_phys_high: >> 158 .xword 0 >> 159 prom_boot_mapping_phys_low: >> 160 .xword 0 >> 161 is_sun4v: >> 162 .word 0 >> 163 sun4v_chip_type: >> 164 .word SUN4V_CHIP_INVALID >> 165 EXPORT_SYMBOL(sun4v_chip_type) 544 1: 166 1: 545 /* Save parameters */ !! 167 rd %pc, %l0 546 mr r31,r3 << 547 mr r30,r4 << 548 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL << 549 /* Save OPAL entry */ << 550 mr r28,r8 << 551 mr r29,r9 << 552 #endif << 553 << 554 /* Get TOC pointer (current runtime ad << 555 bl relative_toc << 556 << 557 /* These functions return to the virtu << 558 #ifdef CONFIG_PPC_BOOK3E_64 << 559 bl start_initialization_book3e << 560 #else << 561 bl start_initialization_book3s << 562 #endif /* CONFIG_PPC_BOOK3E_64 */ << 563 << 564 /* Get TOC pointer, virtual */ << 565 bl relative_toc << 566 << 567 /* find out where we are now */ << 568 << 569 /* OPAL doesn't pass base address in r << 570 bcl 20,31,$+4 << 571 0: mflr r26 /* r26 << 572 addis r26,r26,(_stext - 0b)@ha << 573 addi r26,r26,(_stext - 0b)@l /* cur << 574 << 575 b __after_prom_start << 576 << 577 __REF << 578 __boot_from_prom: << 579 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE << 580 /* Get TOC pointer, non-virtual */ << 581 bl relative_toc << 582 << 583 /* find out where we are now */ << 584 bcl 20,31,$+4 << 585 0: mflr r26 /* r26 << 586 addis r26,r26,(_stext - 0b)@ha << 587 addi r26,r26,(_stext - 0b)@l /* cur << 588 << 589 /* Save parameters */ << 590 mr r31,r3 << 591 mr r30,r4 << 592 mr r29,r5 << 593 mr r28,r6 << 594 mr r27,r7 << 595 168 596 /* !! 169 mov (1b - prom_peer_name), %l1 597 * Align the stack to 16-byte boundary !! 170 sub %l0, %l1, %l1 598 * Depending on the size and layout of !! 171 mov 0, %l2 599 * boot binary, the stack pointer may !! 172 600 */ !! 173 /* prom_root_node = prom_peer(0) */ 601 rldicr r1,r1,0,59 !! 174 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer" >> 175 mov 1, %l3 >> 176 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 >> 177 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 >> 178 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0 >> 179 stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 >> 180 call %l7 >> 181 add %sp, (2047 + 128), %o0 ! argument array >> 182 >> 183 ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node >> 184 mov (1b - prom_root_node), %l1 >> 185 sub %l0, %l1, %l1 >> 186 stw %l4, [%l1] >> 187 >> 188 mov (1b - prom_getprop_name), %l1 >> 189 mov (1b - prom_compatible_name), %l2 >> 190 mov (1b - prom_root_compatible), %l5 >> 191 sub %l0, %l1, %l1 >> 192 sub %l0, %l2, %l2 >> 193 sub %l0, %l5, %l5 >> 194 >> 195 /* prom_getproperty(prom_root_node, "compatible", >> 196 * &prom_root_compatible, 64) >> 197 */ >> 198 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" >> 199 mov 4, %l3 >> 200 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 >> 201 mov 1, %l3 >> 202 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 >> 203 stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node >> 204 stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" >> 205 stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible >> 206 mov 64, %l3 >> 207 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size >> 208 stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 >> 209 call %l7 >> 210 add %sp, (2047 + 128), %o0 ! argument array >> 211 >> 212 mov (1b - prom_finddev_name), %l1 >> 213 mov (1b - prom_chosen_path), %l2 >> 214 mov (1b - prom_boot_mapped_pc), %l3 >> 215 sub %l0, %l1, %l1 >> 216 sub %l0, %l2, %l2 >> 217 sub %l0, %l3, %l3 >> 218 stw %l0, [%l3] >> 219 sub %sp, (192 + 128), %sp >> 220 >> 221 /* chosen_node = prom_finddevice("/chosen") */ >> 222 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" >> 223 mov 1, %l3 >> 224 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 >> 225 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 >> 226 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen" >> 227 stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 >> 228 call %l7 >> 229 add %sp, (2047 + 128), %o0 ! argument array >> 230 >> 231 ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node >> 232 >> 233 mov (1b - prom_getprop_name), %l1 >> 234 mov (1b - prom_mmu_name), %l2 >> 235 mov (1b - prom_mmu_ihandle_cache), %l5 >> 236 sub %l0, %l1, %l1 >> 237 sub %l0, %l2, %l2 >> 238 sub %l0, %l5, %l5 >> 239 >> 240 /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */ >> 241 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" >> 242 mov 4, %l3 >> 243 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 >> 244 mov 1, %l3 >> 245 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 >> 246 stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node >> 247 stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu" >> 248 stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache >> 249 mov 4, %l3 >> 250 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3) >> 251 stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 >> 252 call %l7 >> 253 add %sp, (2047 + 128), %o0 ! argument array >> 254 >> 255 mov (1b - prom_callmethod_name), %l1 >> 256 mov (1b - prom_translate_name), %l2 >> 257 sub %l0, %l1, %l1 >> 258 sub %l0, %l2, %l2 >> 259 lduw [%l5], %l5 ! prom_mmu_ihandle_cache >> 260 >> 261 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method" >> 262 mov 3, %l3 >> 263 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3 >> 264 mov 5, %l3 >> 265 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5 >> 266 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate" >> 267 stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache >> 268 /* PAGE align */ >> 269 srlx %l0, 13, %l3 >> 270 sllx %l3, 13, %l3 >> 271 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC >> 272 stx %g0, [%sp + 2047 + 128 + 0x30] ! res1 >> 273 stx %g0, [%sp + 2047 + 128 + 0x38] ! res2 >> 274 stx %g0, [%sp + 2047 + 128 + 0x40] ! res3 >> 275 stx %g0, [%sp + 2047 + 128 + 0x48] ! res4 >> 276 stx %g0, [%sp + 2047 + 128 + 0x50] ! res5 >> 277 call %l7 >> 278 add %sp, (2047 + 128), %o0 ! argument array >> 279 >> 280 ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode >> 281 mov (1b - prom_boot_mapping_mode), %l4 >> 282 sub %l0, %l4, %l4 >> 283 stw %l1, [%l4] >> 284 mov (1b - prom_boot_mapping_phys_high), %l4 >> 285 sub %l0, %l4, %l4 >> 286 ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high >> 287 stx %l2, [%l4 + 0x0] >> 288 ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low >> 289 /* 4MB align */ >> 290 srlx %l3, ILOG2_4MB, %l3 >> 291 sllx %l3, ILOG2_4MB, %l3 >> 292 stx %l3, [%l4 + 0x8] >> 293 >> 294 /* Leave service as-is, "call-method" */ >> 295 mov 7, %l3 >> 296 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7 >> 297 mov 1, %l3 >> 298 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 >> 299 mov (1b - prom_map_name), %l3 >> 300 sub %l0, %l3, %l3 >> 301 stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map" >> 302 /* Leave arg2 as-is, prom_mmu_ihandle_cache */ >> 303 mov -1, %l3 >> 304 stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default) >> 305 /* 4MB align the kernel image size. */ >> 306 set (_end - KERNBASE), %l3 >> 307 set ((4 * 1024 * 1024) - 1), %l4 >> 308 add %l3, %l4, %l3 >> 309 andn %l3, %l4, %l3 >> 310 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB) >> 311 sethi %hi(KERNBASE), %l3 >> 312 stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE) >> 313 stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty >> 314 mov (1b - prom_boot_mapping_phys_low), %l3 >> 315 sub %l0, %l3, %l3 >> 316 ldx [%l3], %l3 >> 317 stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr >> 318 call %l7 >> 319 add %sp, (2047 + 128), %o0 ! argument array >> 320 >> 321 add %sp, (192 + 128), %sp >> 322 >> 323 sethi %hi(prom_root_compatible), %g1 >> 324 or %g1, %lo(prom_root_compatible), %g1 >> 325 sethi %hi(prom_sun4v_name), %g7 >> 326 or %g7, %lo(prom_sun4v_name), %g7 >> 327 mov 5, %g3 >> 328 90: ldub [%g7], %g2 >> 329 ldub [%g1], %g4 >> 330 cmp %g2, %g4 >> 331 bne,pn %icc, 80f >> 332 add %g7, 1, %g7 >> 333 subcc %g3, 1, %g3 >> 334 bne,pt %xcc, 90b >> 335 add %g1, 1, %g1 >> 336 >> 337 sethi %hi(is_sun4v), %g1 >> 338 or %g1, %lo(is_sun4v), %g1 >> 339 mov 1, %g7 >> 340 stw %g7, [%g1] >> 341 >> 342 /* cpu_node = prom_finddevice("/cpu") */ >> 343 mov (1b - prom_finddev_name), %l1 >> 344 mov (1b - prom_cpu_path), %l2 >> 345 sub %l0, %l1, %l1 >> 346 sub %l0, %l2, %l2 >> 347 sub %sp, (192 + 128), %sp >> 348 >> 349 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice" >> 350 mov 1, %l3 >> 351 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1 >> 352 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 >> 353 stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/cpu" >> 354 stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1 >> 355 call %l7 >> 356 add %sp, (2047 + 128), %o0 ! argument array >> 357 >> 358 ldx [%sp + 2047 + 128 + 0x20], %l4 ! cpu device node >> 359 >> 360 mov (1b - prom_getprop_name), %l1 >> 361 mov (1b - prom_compatible_name), %l2 >> 362 mov (1b - prom_cpu_compatible), %l5 >> 363 sub %l0, %l1, %l1 >> 364 sub %l0, %l2, %l2 >> 365 sub %l0, %l5, %l5 >> 366 >> 367 /* prom_getproperty(cpu_node, "compatible", >> 368 * &prom_cpu_compatible, 64) >> 369 */ >> 370 stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop" >> 371 mov 4, %l3 >> 372 stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4 >> 373 mov 1, %l3 >> 374 stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1 >> 375 stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, cpu_node >> 376 stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible" >> 377 stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_cpu_compatible >> 378 mov 64, %l3 >> 379 stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size >> 380 stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1 >> 381 call %l7 >> 382 add %sp, (2047 + 128), %o0 ! argument array >> 383 >> 384 add %sp, (192 + 128), %sp >> 385 >> 386 sethi %hi(prom_cpu_compatible), %g1 >> 387 or %g1, %lo(prom_cpu_compatible), %g1 >> 388 sethi %hi(prom_niagara_prefix), %g7 >> 389 or %g7, %lo(prom_niagara_prefix), %g7 >> 390 mov 17, %g3 >> 391 90: ldub [%g7], %g2 >> 392 ldub [%g1], %g4 >> 393 cmp %g2, %g4 >> 394 bne,pn %icc, 89f >> 395 add %g7, 1, %g7 >> 396 subcc %g3, 1, %g3 >> 397 bne,pt %xcc, 90b >> 398 add %g1, 1, %g1 >> 399 ba,pt %xcc, 91f >> 400 nop >> 401 >> 402 89: sethi %hi(prom_cpu_compatible), %g1 >> 403 or %g1, %lo(prom_cpu_compatible), %g1 >> 404 sethi %hi(prom_sparc_prefix), %g7 >> 405 or %g7, %lo(prom_sparc_prefix), %g7 >> 406 mov 6, %g3 >> 407 90: ldub [%g7], %g2 >> 408 ldub [%g1], %g4 >> 409 cmp %g2, %g4 >> 410 bne,pn %icc, 4f >> 411 add %g7, 1, %g7 >> 412 subcc %g3, 1, %g3 >> 413 bne,pt %xcc, 90b >> 414 add %g1, 1, %g1 >> 415 >> 416 sethi %hi(prom_cpu_compatible), %g1 >> 417 or %g1, %lo(prom_cpu_compatible), %g1 >> 418 ldub [%g1 + 6], %g2 >> 419 cmp %g2, 'T' >> 420 be,pt %xcc, 70f >> 421 cmp %g2, 'M' >> 422 be,pt %xcc, 70f >> 423 cmp %g2, 'S' >> 424 bne,pn %xcc, 49f >> 425 nop >> 426 >> 427 70: ldub [%g1 + 7], %g2 >> 428 cmp %g2, CPU_ID_NIAGARA3 >> 429 be,pt %xcc, 5f >> 430 mov SUN4V_CHIP_NIAGARA3, %g4 >> 431 cmp %g2, CPU_ID_NIAGARA4 >> 432 be,pt %xcc, 5f >> 433 mov SUN4V_CHIP_NIAGARA4, %g4 >> 434 cmp %g2, CPU_ID_NIAGARA5 >> 435 be,pt %xcc, 5f >> 436 mov SUN4V_CHIP_NIAGARA5, %g4 >> 437 cmp %g2, CPU_ID_M6 >> 438 be,pt %xcc, 5f >> 439 mov SUN4V_CHIP_SPARC_M6, %g4 >> 440 cmp %g2, CPU_ID_M7 >> 441 be,pt %xcc, 5f >> 442 mov SUN4V_CHIP_SPARC_M7, %g4 >> 443 cmp %g2, CPU_ID_M8 >> 444 be,pt %xcc, 5f >> 445 mov SUN4V_CHIP_SPARC_M8, %g4 >> 446 cmp %g2, CPU_ID_SONOMA1 >> 447 be,pt %xcc, 5f >> 448 mov SUN4V_CHIP_SPARC_SN, %g4 >> 449 ba,pt %xcc, 49f >> 450 nop >> 451 >> 452 91: sethi %hi(prom_cpu_compatible), %g1 >> 453 or %g1, %lo(prom_cpu_compatible), %g1 >> 454 ldub [%g1 + 17], %g2 >> 455 cmp %g2, CPU_ID_NIAGARA1 >> 456 be,pt %xcc, 5f >> 457 mov SUN4V_CHIP_NIAGARA1, %g4 >> 458 cmp %g2, CPU_ID_NIAGARA2 >> 459 be,pt %xcc, 5f >> 460 mov SUN4V_CHIP_NIAGARA2, %g4 >> 461 >> 462 4: >> 463 /* Athena */ >> 464 sethi %hi(prom_cpu_compatible), %g1 >> 465 or %g1, %lo(prom_cpu_compatible), %g1 >> 466 sethi %hi(prom_sparc64x_prefix), %g7 >> 467 or %g7, %lo(prom_sparc64x_prefix), %g7 >> 468 mov 9, %g3 >> 469 41: ldub [%g7], %g2 >> 470 ldub [%g1], %g4 >> 471 cmp %g2, %g4 >> 472 bne,pn %icc, 49f >> 473 add %g7, 1, %g7 >> 474 subcc %g3, 1, %g3 >> 475 bne,pt %xcc, 41b >> 476 add %g1, 1, %g1 >> 477 ba,pt %xcc, 5f >> 478 mov SUN4V_CHIP_SPARC64X, %g4 >> 479 >> 480 49: >> 481 mov SUN4V_CHIP_UNKNOWN, %g4 >> 482 5: sethi %hi(sun4v_chip_type), %g2 >> 483 or %g2, %lo(sun4v_chip_type), %g2 >> 484 stw %g4, [%g2] >> 485 >> 486 80: >> 487 BRANCH_IF_SUN4V(g1, jump_to_sun4u_init) >> 488 BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot) >> 489 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot) >> 490 ba,pt %xcc, spitfire_boot >> 491 nop >> 492 >> 493 cheetah_plus_boot: >> 494 /* Preserve OBP chosen DCU and DCR register settings. */ >> 495 ba,pt %xcc, cheetah_generic_boot >> 496 nop >> 497 >> 498 cheetah_boot: >> 499 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1 >> 500 wr %g1, %asr18 >> 501 >> 502 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7 >> 503 or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7 >> 504 sllx %g7, 32, %g7 >> 505 or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7 >> 506 stxa %g7, [%g0] ASI_DCU_CONTROL_REG >> 507 membar #Sync >> 508 >> 509 cheetah_generic_boot: >> 510 mov TSB_EXTENSION_P, %g3 >> 511 stxa %g0, [%g3] ASI_DMMU >> 512 stxa %g0, [%g3] ASI_IMMU >> 513 membar #Sync >> 514 >> 515 mov TSB_EXTENSION_S, %g3 >> 516 stxa %g0, [%g3] ASI_DMMU >> 517 membar #Sync >> 518 >> 519 mov TSB_EXTENSION_N, %g3 >> 520 stxa %g0, [%g3] ASI_DMMU >> 521 stxa %g0, [%g3] ASI_IMMU >> 522 membar #Sync >> 523 >> 524 ba,a,pt %xcc, jump_to_sun4u_init >> 525 >> 526 spitfire_boot: >> 527 /* Typically PROM has already enabled both MMU's and both on-chip >> 528 * caches, but we do it here anyway just to be paranoid. >> 529 */ >> 530 mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1 >> 531 stxa %g1, [%g0] ASI_LSU_CONTROL >> 532 membar #Sync >> 533 >> 534 jump_to_sun4u_init: >> 535 /* >> 536 * Make sure we are in privileged mode, have address masking, >> 537 * using the ordinary globals and have enabled floating >> 538 * point. >> 539 * >> 540 * Again, typically PROM has left %pil at 13 or similar, and >> 541 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate. >> 542 */ >> 543 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate >> 544 wr %g0, 0, %fprs >> 545 >> 546 set sun4u_init, %g2 >> 547 jmpl %g2 + %g0, %g0 >> 548 nop >> 549 >> 550 __REF >> 551 sun4u_init: >> 552 BRANCH_IF_SUN4V(g1, sun4v_init) >> 553 >> 554 /* Set ctx 0 */ >> 555 mov PRIMARY_CONTEXT, %g7 >> 556 stxa %g0, [%g7] ASI_DMMU >> 557 membar #Sync >> 558 >> 559 mov SECONDARY_CONTEXT, %g7 >> 560 stxa %g0, [%g7] ASI_DMMU >> 561 membar #Sync >> 562 >> 563 ba,a,pt %xcc, sun4u_continue >> 564 >> 565 sun4v_init: >> 566 /* Set ctx 0 */ >> 567 mov PRIMARY_CONTEXT, %g7 >> 568 stxa %g0, [%g7] ASI_MMU >> 569 membar #Sync >> 570 >> 571 mov SECONDARY_CONTEXT, %g7 >> 572 stxa %g0, [%g7] ASI_MMU >> 573 membar #Sync >> 574 ba,a,pt %xcc, niagara_tlb_fixup >> 575 >> 576 sun4u_continue: >> 577 BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup) >> 578 >> 579 ba,a,pt %xcc, spitfire_tlb_fixup >> 580 >> 581 niagara_tlb_fixup: >> 582 mov 3, %g2 /* Set TLB type to hypervisor. */ >> 583 sethi %hi(tlb_type), %g1 >> 584 stw %g2, [%g1 + %lo(tlb_type)] >> 585 >> 586 /* Patch copy/clear ops. */ >> 587 sethi %hi(sun4v_chip_type), %g1 >> 588 lduw [%g1 + %lo(sun4v_chip_type)], %g1 >> 589 cmp %g1, SUN4V_CHIP_NIAGARA1 >> 590 be,pt %xcc, niagara_patch >> 591 cmp %g1, SUN4V_CHIP_NIAGARA2 >> 592 be,pt %xcc, niagara2_patch >> 593 nop >> 594 cmp %g1, SUN4V_CHIP_NIAGARA3 >> 595 be,pt %xcc, niagara2_patch >> 596 nop >> 597 cmp %g1, SUN4V_CHIP_NIAGARA4 >> 598 be,pt %xcc, niagara4_patch >> 599 nop >> 600 cmp %g1, SUN4V_CHIP_NIAGARA5 >> 601 be,pt %xcc, niagara4_patch >> 602 nop >> 603 cmp %g1, SUN4V_CHIP_SPARC_M6 >> 604 be,pt %xcc, niagara4_patch >> 605 nop >> 606 cmp %g1, SUN4V_CHIP_SPARC_M7 >> 607 be,pt %xcc, sparc_m7_patch >> 608 nop >> 609 cmp %g1, SUN4V_CHIP_SPARC_M8 >> 610 be,pt %xcc, sparc_m7_patch >> 611 nop >> 612 cmp %g1, SUN4V_CHIP_SPARC_SN >> 613 be,pt %xcc, niagara4_patch >> 614 nop >> 615 >> 616 call generic_patch_copyops >> 617 nop >> 618 call generic_patch_bzero >> 619 nop >> 620 call generic_patch_pageops >> 621 nop >> 622 >> 623 ba,a,pt %xcc, 80f >> 624 nop >> 625 >> 626 sparc_m7_patch: >> 627 call m7_patch_copyops >> 628 nop >> 629 call m7_patch_bzero >> 630 nop >> 631 call m7_patch_pageops >> 632 nop >> 633 >> 634 ba,a,pt %xcc, 80f >> 635 nop >> 636 >> 637 niagara4_patch: >> 638 call niagara4_patch_copyops >> 639 nop >> 640 call niagara4_patch_bzero >> 641 nop >> 642 call niagara4_patch_pageops >> 643 nop >> 644 call niagara4_patch_fls >> 645 nop >> 646 >> 647 ba,a,pt %xcc, 80f >> 648 nop >> 649 >> 650 niagara2_patch: >> 651 call niagara2_patch_copyops >> 652 nop >> 653 call niagara_patch_bzero >> 654 nop >> 655 call niagara_patch_pageops >> 656 nop >> 657 >> 658 ba,a,pt %xcc, 80f >> 659 nop >> 660 >> 661 niagara_patch: >> 662 call niagara_patch_copyops >> 663 nop >> 664 call niagara_patch_bzero >> 665 nop >> 666 call niagara_patch_pageops >> 667 nop >> 668 >> 669 80: >> 670 /* Patch TLB/cache ops. */ >> 671 call hypervisor_patch_cachetlbops >> 672 nop >> 673 >> 674 ba,a,pt %xcc, tlb_fixup_done >> 675 >> 676 cheetah_tlb_fixup: >> 677 mov 2, %g2 /* Set TLB type to cheetah+. */ >> 678 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f) >> 679 >> 680 mov 1, %g2 /* Set TLB type to cheetah. */ >> 681 >> 682 1: sethi %hi(tlb_type), %g1 >> 683 stw %g2, [%g1 + %lo(tlb_type)] >> 684 >> 685 /* Patch copy/page operations to cheetah optimized versions. */ >> 686 call cheetah_patch_copyops >> 687 nop >> 688 call cheetah_patch_copy_page >> 689 nop >> 690 call cheetah_patch_cachetlbops >> 691 nop >> 692 >> 693 ba,a,pt %xcc, tlb_fixup_done >> 694 >> 695 spitfire_tlb_fixup: >> 696 /* Set TLB type to spitfire. */ >> 697 mov 0, %g2 >> 698 sethi %hi(tlb_type), %g1 >> 699 stw %g2, [%g1 + %lo(tlb_type)] >> 700 >> 701 tlb_fixup_done: >> 702 sethi %hi(init_thread_union), %g6 >> 703 or %g6, %lo(init_thread_union), %g6 >> 704 ldx [%g6 + TI_TASK], %g4 >> 705 >> 706 wr %g0, ASI_P, %asi >> 707 mov 1, %g1 >> 708 sllx %g1, THREAD_SHIFT, %g1 >> 709 sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1 >> 710 add %g6, %g1, %sp >> 711 >> 712 /* Set per-cpu pointer initially to zero, this makes >> 713 * the boot-cpu use the in-kernel-image per-cpu areas >> 714 * before setup_per_cpu_area() is invoked. >> 715 */ >> 716 clr %g5 >> 717 >> 718 wrpr %g0, 0, %wstate >> 719 wrpr %g0, 0x0, %tl >> 720 >> 721 /* Clear the bss */ >> 722 sethi %hi(__bss_start), %o0 >> 723 or %o0, %lo(__bss_start), %o0 >> 724 sethi %hi(_end), %o1 >> 725 or %o1, %lo(_end), %o1 >> 726 call __bzero >> 727 sub %o1, %o0, %o1 >> 728 >> 729 call prom_init >> 730 mov %l7, %o0 ! OpenPROM cif handler >> 731 >> 732 /* To create a one-register-window buffer between the kernel's >> 733 * initial stack and the last stack frame we use from the firmware, >> 734 * do the rest of the boot from a C helper function. >> 735 */ >> 736 call start_early_boot >> 737 nop >> 738 /* Not reached... */ 602 739 603 #ifdef CONFIG_RELOCATABLE << 604 /* Relocate code for where we are now << 605 mr r3,r26 << 606 bl relocate << 607 #endif << 608 << 609 /* Restore parameters */ << 610 mr r3,r31 << 611 mr r4,r30 << 612 mr r5,r29 << 613 mr r6,r28 << 614 mr r7,r27 << 615 << 616 /* Do all of the interaction with OF c << 617 mr r8,r26 << 618 bl CFUNC(prom_init) << 619 #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ << 620 << 621 /* We never return. We also hit that t << 622 * from OF while CONFIG_PPC_OF_BOOT_TR << 623 trap << 624 .previous 740 .previous 625 741 626 __after_prom_start: !! 742 /* This is meant to allow the sharing of this code between 627 #ifdef CONFIG_RELOCATABLE !! 743 * boot processor invocation (via setup_tba() below) and 628 /* process relocations for the final a !! 744 * secondary processor startup (via trampoline.S). The 629 lwz r7,(FIXED_SYMBOL_ABS_ADDR(__ru !! 745 * former does use this code, the latter does not yet due 630 cmplwi cr0,r7,1 /* flagged to !! 746 * to some complexities. That should be fixed up at some 631 mr r25,r26 /* then use cu !! 747 * point. 632 beq 1f !! 748 * 633 LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) / !! 749 * There used to be enormous complexity wrt. transferring 634 1: mr r3,r25 !! 750 * over from the firmware's trap table to the Linux kernel's. 635 bl relocate !! 751 * For example, there was a chicken & egg problem wrt. building 636 #if defined(CONFIG_PPC_BOOK3E_64) !! 752 * the OBP page tables, yet needing to be on the Linux kernel 637 /* IVPR needs to be set after relocati !! 753 * trap table (to translate PAGE_OFFSET addresses) in order to 638 bl init_core_book3e !! 754 * do that. 639 #endif !! 755 * 640 #endif !! 756 * We now handle OBP tlb misses differently, via linear lookups 641 !! 757 * into the prom_trans[] array. So that specific problem no 642 /* !! 758 * longer exists. Yet, unfortunately there are still some issues 643 * We need to run with _stext at physical addr !! 759 * preventing trampoline.S from using this code... ho hum. 644 * This will leave some code in the first 256B !! 760 */ 645 * real memory, which are reserved for softwar !! 761 .globl setup_trap_table 646 * !! 762 setup_trap_table: 647 * Note: This process overwrites the OF except !! 763 save %sp, -192, %sp 648 */ !! 764 649 LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET) !! 765 /* Force interrupts to be disabled. */ 650 mr r4,r26 /* Loa !! 766 rdpr %pstate, %l0 651 cmpld r3,r4 /* Che !! 767 andn %l0, PSTATE_IE, %o1 652 beq 9f /* If !! 768 wrpr %o1, 0x0, %pstate 653 li r6,0x100 /* Sta !! 769 rdpr %pil, %l1 654 /* byt !! 770 wrpr %g0, PIL_NORMAL_MAX, %pil 655 !! 771 656 #ifdef CONFIG_RELOCATABLE !! 772 /* Make the firmware call to jump over to the Linux trap table. */ 657 /* !! 773 sethi %hi(is_sun4v), %o0 658 * Check if the kernel has to be running as re !! 774 lduw [%o0 + %lo(is_sun4v)], %o0 659 * variable __run_at_load, if it is set the ke !! 775 brz,pt %o0, 1f 660 * kernel, otherwise it will be moved to PHYSI !! 776 nop 661 */ !! 777 662 lwz r7,(FIXED_SYMBOL_ABS_ADDR(__ru !! 778 TRAP_LOAD_TRAP_BLOCK(%g2, %g3) 663 cmplwi cr0,r7,1 !! 779 add %g2, TRAP_PER_CPU_FAULT_INFO, %g2 664 bne 3f !! 780 stxa %g2, [%g0] ASI_SCRATCHPAD 665 << 666 #ifdef CONFIG_PPC_BOOK3E_64 << 667 LOAD_REG_ADDR(r5, __end_interrupts) << 668 LOAD_REG_ADDR(r11, _stext) << 669 sub r5,r5,r11 << 670 #else << 671 /* just copy interrupts */ << 672 LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_ << 673 #endif << 674 b 5f << 675 3: << 676 #endif << 677 /* # bytes of memory to copy */ << 678 lis r5,(ABS_ADDR(copy_to_here, tex << 679 addi r5,r5,(ABS_ADDR(copy_to_here, << 680 << 681 bl copy_and_flush /* cop << 682 /* thi << 683 /* exe << 684 /* Jump to the copy of this code that << 685 addis r8,r3,(ABS_ADDR(4f, text))@ha << 686 addi r12,r8,(ABS_ADDR(4f, text))@l << 687 mtctr r12 << 688 bctr << 689 << 690 .balign 8 << 691 p_end: .8byte _end - copy_to_here << 692 781 693 4: !! 782 /* Compute physical address: 694 /* !! 783 * 695 * Now copy the rest of the kernel up !! 784 * paddr = kern_base + (mmfsa_vaddr - KERNBASE) 696 * _end - copy_to_here to the copy lim << 697 */ 785 */ 698 addis r8,r26,(ABS_ADDR(p_end, text)) !! 786 sethi %hi(KERNBASE), %g3 699 ld r8,(ABS_ADDR(p_end, text))@l(r !! 787 sub %g2, %g3, %g2 700 add r5,r5,r8 !! 788 sethi %hi(kern_base), %g3 701 5: bl copy_and_flush /* cop !! 789 ldx [%g3 + %lo(kern_base)], %g3 >> 790 add %g2, %g3, %o1 >> 791 sethi %hi(sparc64_ttable_tl0), %o0 >> 792 >> 793 set prom_set_trap_table_name, %g2 >> 794 stx %g2, [%sp + 2047 + 128 + 0x00] >> 795 mov 2, %g2 >> 796 stx %g2, [%sp + 2047 + 128 + 0x08] >> 797 mov 0, %g2 >> 798 stx %g2, [%sp + 2047 + 128 + 0x10] >> 799 stx %o0, [%sp + 2047 + 128 + 0x18] >> 800 stx %o1, [%sp + 2047 + 128 + 0x20] >> 801 sethi %hi(p1275buf), %g2 >> 802 or %g2, %lo(p1275buf), %g2 >> 803 ldx [%g2 + 0x08], %o1 >> 804 call %o1 >> 805 add %sp, (2047 + 128), %o0 >> 806 >> 807 ba,a,pt %xcc, 2f >> 808 >> 809 1: sethi %hi(sparc64_ttable_tl0), %o0 >> 810 set prom_set_trap_table_name, %g2 >> 811 stx %g2, [%sp + 2047 + 128 + 0x00] >> 812 mov 1, %g2 >> 813 stx %g2, [%sp + 2047 + 128 + 0x08] >> 814 mov 0, %g2 >> 815 stx %g2, [%sp + 2047 + 128 + 0x10] >> 816 stx %o0, [%sp + 2047 + 128 + 0x18] >> 817 sethi %hi(p1275buf), %g2 >> 818 or %g2, %lo(p1275buf), %g2 >> 819 ldx [%g2 + 0x08], %o1 >> 820 call %o1 >> 821 add %sp, (2047 + 128), %o0 >> 822 >> 823 /* Start using proper page size encodings in ctx register. */ >> 824 2: sethi %hi(sparc64_kern_pri_context), %g3 >> 825 ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2 >> 826 >> 827 mov PRIMARY_CONTEXT, %g1 >> 828 >> 829 661: stxa %g2, [%g1] ASI_DMMU >> 830 .section .sun4v_1insn_patch, "ax" >> 831 .word 661b >> 832 stxa %g2, [%g1] ASI_MMU >> 833 .previous 702 834 703 9: b start_here_multiplatform !! 835 membar #Sync 704 836 705 /* !! 837 BRANCH_IF_SUN4V(o2, 1f) 706 * Copy routine used to copy the kernel to sta << 707 * and flush and invalidate the caches as need << 708 * r3 = dest addr, r4 = source addr, r5 = copy << 709 * on exit, r3, r4, r5 are unchanged, r6 is up << 710 * << 711 * Note: this routine *only* clobbers r0, r6 a << 712 */ << 713 _GLOBAL(copy_and_flush) << 714 addi r5,r5,-8 << 715 addi r6,r6,-8 << 716 4: li r0,8 /* Use << 717 /* den << 718 /* siz << 719 /* ext << 720 /* but << 721 /* Can << 722 /* fro << 723 /* mov << 724 << 725 mtctr r0 /* put << 726 3: addi r6,r6,8 /* cop << 727 ldx r0,r6,r4 << 728 stdx r0,r6,r3 << 729 bdnz 3b << 730 dcbst r6,r3 /* wri << 731 sync << 732 icbi r6,r3 /* flu << 733 cmpld 0,r6,r5 << 734 blt 4b << 735 sync << 736 addi r5,r5,8 << 737 addi r6,r6,8 << 738 isync << 739 blr << 740 838 741 _ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Calle !! 839 /* Kill PROM timer */ >> 840 sethi %hi(0x80000000), %o2 >> 841 sllx %o2, 32, %o2 >> 842 wr %o2, 0, %tick_cmpr 742 843 743 .align 8 !! 844 BRANCH_IF_ANY_CHEETAH(o2, o3, 1f) 744 copy_to_here: << 745 845 746 #ifdef CONFIG_SMP !! 846 ba,a,pt %xcc, 2f 747 #ifdef CONFIG_PPC_PMAC << 748 /* << 749 * On PowerMac, secondary processors starts fr << 750 * is temporarily turned into a call to one of << 751 */ << 752 .section ".text"; << 753 .align 2 ; << 754 847 755 .globl __secondary_start_pmac_0 !! 848 /* Disable STICK_INT interrupts. */ 756 __secondary_start_pmac_0: << 757 /* NB the entries for cpus 0, 1, 2 mus << 758 li r24,0 << 759 b 1f << 760 li r24,1 << 761 b 1f << 762 li r24,2 << 763 b 1f << 764 li r24,3 << 765 1: 849 1: 766 !! 850 sethi %hi(0x80000000), %o2 767 _GLOBAL(pmac_secondary_start) !! 851 sllx %o2, 32, %o2 768 /* turn on 64-bit mode */ !! 852 wr %o2, %asr25 769 bl enable_64b_mode << 770 << 771 li r0,0 << 772 mfspr r3,SPRN_HID4 << 773 rldimi r3,r0,40,23 /* clear bit 2 << 774 sync << 775 mtspr SPRN_HID4,r3 << 776 isync << 777 sync << 778 slbia << 779 << 780 /* Branch to our PAGE_OFFSET address * << 781 bcl 20,31,$+4 << 782 1: mflr r11 << 783 addi r11,r11,(2f - 1b) << 784 tovirt(r11, r11) << 785 mtctr r11 << 786 bctr << 787 2: << 788 bl relative_toc << 789 853 790 /* Copy some CPU settings from CPU 0 * !! 854 2: 791 bl __restore_cpu_ppc970 !! 855 wrpr %g0, %g0, %wstate 792 << 793 /* pSeries do that early though I don' << 794 mfmsr r3 << 795 ori r3,r3,MSR_RI << 796 mtmsrd r3 /* RI << 797 << 798 /* Set up a paca value for this proces << 799 LOAD_REG_ADDR(r4,paca_ptrs) /* Loa << 800 ld r4,0(r4) /* Get << 801 sldi r5,r24,3 /* get << 802 ldx r13,r5,r4 /* r13 << 803 SET_PACA(r13) /* Sav << 804 << 805 /* Mark interrupts soft and hard disab << 806 * in the PACA when doing hotplug) << 807 */ << 808 li r0,IRQS_DISABLED << 809 stb r0,PACAIRQSOFTMASK(r13) << 810 li r0,PACA_IRQ_HARD_DIS << 811 stb r0,PACAIRQHAPPENED(r13) << 812 << 813 /* Create a temp kernel stack for use << 814 ld r1,PACAEMERGSP(r13) << 815 subi r1,r1,STACK_FRAME_MIN_SIZE << 816 << 817 b __secondary_start << 818 << 819 #endif /* CONFIG_PPC_PMAC */ << 820 << 821 /* << 822 * This function is called after the master CP << 823 * secondary processors. The execution enviro << 824 * The paca for this processor has the followi << 825 * this point: << 826 * 1. Processor number << 827 * 2. Segment table pointer (virtual address << 828 * On entry the following are set: << 829 * r1 = stack pointer (real addr of t << 830 * r24 = cpu# (in Linux terms) << 831 * r13 = paca virtual address << 832 * SPRG_PACA = paca virtual address << 833 */ << 834 .section ".text"; << 835 .align 2 ; << 836 << 837 .globl __secondary_start << 838 __secondary_start: << 839 /* Set thread priority to MEDIUM */ << 840 HMT_MEDIUM << 841 << 842 /* << 843 * Do early setup for this CPU, in par << 844 * can turn it on below. This is a cal << 845 * running on the emergency stack. << 846 */ << 847 bl CFUNC(early_setup_secondary) << 848 856 849 /* !! 857 call init_irqwork_curcpu 850 * The primary has initialized our ker !! 858 nop 851 * it and put it in r1. We must *not* << 852 * below, because it may not be inside << 853 */ << 854 ld r1, PACAKSAVE(r13) << 855 859 856 /* Clear backchain so we get nice back !! 860 /* Now we can restore interrupt state. */ 857 li r7,0 !! 861 wrpr %l0, 0, %pstate 858 mtlr r7 !! 862 wrpr %l1, 0x0, %pil >> 863 >> 864 ret >> 865 restore >> 866 >> 867 .globl setup_tba >> 868 setup_tba: >> 869 save %sp, -192, %sp >> 870 >> 871 /* The boot processor is the only cpu which invokes this >> 872 * routine, the other cpus set things up via trampoline.S. >> 873 * So save the OBP trap table address here. >> 874 */ >> 875 rdpr %tba, %g7 >> 876 sethi %hi(prom_tba), %o1 >> 877 or %o1, %lo(prom_tba), %o1 >> 878 stx %g7, [%o1] >> 879 >> 880 call setup_trap_table >> 881 nop >> 882 >> 883 ret >> 884 restore >> 885 sparc64_boot_end: >> 886 >> 887 #include "etrap_64.S" >> 888 #include "rtrap_64.S" >> 889 #include "winfixup.S" >> 890 #include "fpu_traps.S" >> 891 #include "ivec.S" >> 892 #include "getsetcc.S" >> 893 #include "utrap.S" >> 894 #include "spiterrs.S" >> 895 #include "cherrs.S" >> 896 #include "misctrap.S" >> 897 #include "syscalls.S" >> 898 #include "helpers.S" >> 899 #include "sun4v_tlb_miss.S" >> 900 #include "sun4v_mcd.S" >> 901 #include "sun4v_ivec.S" >> 902 #include "ktlb.S" >> 903 #include "tsb.S" 859 904 860 /* Mark interrupts soft and hard disab << 861 * in the PACA when doing hotplug) << 862 */ << 863 li r7,IRQS_DISABLED << 864 stb r7,PACAIRQSOFTMASK(r13) << 865 li r0,PACA_IRQ_HARD_DIS << 866 stb r0,PACAIRQHAPPENED(r13) << 867 << 868 /* enable MMU and jump to start_second << 869 LOAD_REG_ADDR(r3, start_secondary_prol << 870 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) << 871 << 872 mtspr SPRN_SRR0,r3 << 873 mtspr SPRN_SRR1,r4 << 874 RFI_TO_KERNEL << 875 b . /* prevent speculative << 876 << 877 /* << 878 * Running with relocation on at this point. << 879 * zero the stack back-chain pointer and get t << 880 * before going into C code. << 881 */ << 882 start_secondary_prolog: << 883 LOAD_PACA_TOC() << 884 li r3,0 << 885 std r3,0(r1) /* Zer << 886 bl CFUNC(start_secondary) << 887 b . << 888 /* 905 /* 889 * Reset stack pointer and call start_secondar !! 906 * The following skip makes sure the trap table in ttable.S is aligned 890 * to continue with online operation when woke !! 907 * on a 32K boundary as required by the v9 specs for TBA register. 891 * from cede in cpu offline. << 892 */ << 893 _GLOBAL(start_secondary_resume) << 894 ld r1,PACAKSAVE(r13) /* Rel << 895 li r3,0 << 896 std r3,0(r1) /* Zer << 897 bl CFUNC(start_secondary) << 898 b . << 899 #endif << 900 << 901 /* << 902 * This subroutine clobbers r11 and r12 << 903 */ << 904 SYM_FUNC_START_LOCAL(enable_64b_mode) << 905 mfmsr r11 /* gra << 906 #ifdef CONFIG_PPC_BOOK3E_64 << 907 oris r11,r11,0x8000 /* CM << 908 mtmsr r11 << 909 #else /* CONFIG_PPC_BOOK3E_64 */ << 910 LOAD_REG_IMMEDIATE(r12, MSR_64BIT) << 911 or r11,r11,r12 << 912 mtmsrd r11 << 913 isync << 914 #endif << 915 blr << 916 SYM_FUNC_END(enable_64b_mode) << 917 << 918 /* << 919 * This puts the TOC pointer into r2, offset b << 920 * by the toolchain). It computes the correct << 921 * are running at the moment, using position-i << 922 * 908 * 923 * Note: The compiler constructs pointers usin !! 909 * We align to a 32K boundary, then we have the 32K kernel TSB, 924 * TOC in -mcmodel=medium mode. After we reloc !! 910 * the 64K kernel 4MB TSB, and then the 32K aligned trap table. 925 * the MMU is on we need our TOC to be a virtu << 926 * these pointers will be real addresses which << 927 * accessed later with the MMU on. We branch t << 928 * while still in real mode then call relative << 929 * this. << 930 */ 911 */ 931 _GLOBAL(relative_toc) !! 912 1: 932 #ifdef CONFIG_PPC_KERNEL_PCREL !! 913 .skip 0x4000 + _start - 1b 933 tdnei r2,-1 << 934 blr << 935 #else << 936 mflr r0 << 937 bcl 20,31,$+4 << 938 0: mflr r11 << 939 ld r2,(p_toc - 0b)(r11) << 940 add r2,r2,r11 << 941 mtlr r0 << 942 blr << 943 << 944 .balign 8 << 945 p_toc: .8byte .TOC. - 0b << 946 #endif << 947 << 948 /* << 949 * This is where the main kernel code starts. << 950 */ << 951 __REF << 952 start_here_multiplatform: << 953 /* Adjust TOC for moved kernel. Could << 954 bl relative_toc << 955 << 956 /* Clear out the BSS. It may have been << 957 * already but that's irrelevant since << 958 * be detached from the kernel complet << 959 * to clear it now for kexec-style ent << 960 */ << 961 LOAD_REG_ADDR(r11,__bss_stop) << 962 LOAD_REG_ADDR(r8,__bss_start) << 963 sub r11,r11,r8 /* bss << 964 addi r11,r11,7 /* rou << 965 srdi. r11,r11,3 /* shi << 966 beq 4f << 967 addi r8,r8,-8 << 968 li r0,0 << 969 mtctr r11 /* zer << 970 3: stdu r0,8(r8) << 971 bdnz 3b << 972 4: << 973 << 974 #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL << 975 /* Setup OPAL entry */ << 976 LOAD_REG_ADDR(r11, opal) << 977 std r28,0(r11); << 978 std r29,8(r11); << 979 #endif << 980 << 981 #ifndef CONFIG_PPC_BOOK3E_64 << 982 mfmsr r6 << 983 ori r6,r6,MSR_RI << 984 mtmsrd r6 /* RI << 985 #endif << 986 << 987 #ifdef CONFIG_RELOCATABLE << 988 /* Save the physical address we're run << 989 LOAD_REG_ADDR(r4, kernstart_addr) << 990 clrldi r0,r25,2 << 991 std r0,0(r4) << 992 #endif << 993 << 994 /* set up a stack pointer */ << 995 LOAD_REG_ADDR(r3,init_thread_union) << 996 LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) << 997 add r1,r3,r1 << 998 li r0,0 << 999 stdu r0,-STACK_FRAME_MIN_SIZE(r1) << 1000 << 1001 /* << 1002 * Do very early kernel initializatio << 1003 * and SLB setup before we turn on re << 1004 */ << 1005 << 1006 #ifdef CONFIG_KASAN << 1007 bl CFUNC(kasan_early_init) << 1008 #endif << 1009 /* Restore parameters passed from pro << 1010 mr r3,r31 << 1011 LOAD_REG_ADDR(r12, DOTSYM(early_setup << 1012 mtctr r12 << 1013 bctrl /* also sets r13 and << 1014 << 1015 LOAD_REG_ADDR(r3, start_here_common) << 1016 ld r4,PACAKMSR(r13) << 1017 mtspr SPRN_SRR0,r3 << 1018 mtspr SPRN_SRR1,r4 << 1019 RFI_TO_KERNEL << 1020 b . /* prevent speculativ << 1021 << 1022 /* This is where all platforms conver << 1023 << 1024 start_here_common: << 1025 /* relocation is on at this point */ << 1026 std r1,PACAKSAVE(r13) << 1027 914 1028 /* Load the TOC (virtual address) */ !! 915 ! 0x0000000000408000 1029 LOAD_PACA_TOC() << 1030 916 1031 /* Mark interrupts soft and hard disa !! 917 .globl swapper_tsb 1032 * in the PACA when doing hotplug) !! 918 swapper_tsb: 1033 */ !! 919 .skip (32 * 1024) 1034 li r0,IRQS_DISABLED !! 920 1035 stb r0,PACAIRQSOFTMASK(r13) !! 921 .globl swapper_4m_tsb 1036 li r0,PACA_IRQ_HARD_DIS !! 922 swapper_4m_tsb: 1037 stb r0,PACAIRQHAPPENED(r13) !! 923 .skip (64 * 1024) 1038 !! 924 1039 /* Generic kernel entry */ !! 925 ! 0x0000000000420000 1040 bl CFUNC(start_kernel) !! 926 1041 !! 927 /* Some care needs to be exercised if you try to move the 1042 /* Not reached */ !! 928 * location of the trap table relative to other things. For 1043 0: trap !! 929 * one thing there are br* instructions in some of the 1044 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__ !! 930 * trap table entires which branch back to code in ktlb.S 1045 .previous !! 931 * Those instructions can only handle a signed 16-bit >> 932 * displacement. >> 933 * >> 934 * There is a binutils bug (bugzilla #4558) which causes >> 935 * the relocation overflow checks for such instructions to >> 936 * not be done correctly. So bintuils will not notice the >> 937 * error and will instead write junk into the relocation and >> 938 * you'll have an unbootable kernel. >> 939 */ >> 940 #include "ttable_64.S" >> 941 >> 942 ! 0x0000000000428000 >> 943 >> 944 #include "hvcalls.S" >> 945 #include "systbls_64.S" >> 946 >> 947 .data >> 948 .align 8 >> 949 .globl prom_tba, tlb_type >> 950 prom_tba: .xword 0 >> 951 tlb_type: .word 0 /* Must NOT end up in BSS */ >> 952 EXPORT_SYMBOL(tlb_type) >> 953 .section ".fixup",#alloc,#execinstr >> 954 >> 955 ENTRY(__retl_efault) >> 956 retl >> 957 mov -EFAULT, %o0 >> 958 ENDPROC(__retl_efault) >> 959 >> 960 ENTRY(__retl_o1) >> 961 retl >> 962 mov %o1, %o0 >> 963 ENDPROC(__retl_o1) >> 964 >> 965 ENTRY(__retl_o1_asi) >> 966 wr %o5, 0x0, %asi >> 967 retl >> 968 mov %o1, %o0 >> 969 ENDPROC(__retl_o1_asi)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.