1 /* This file is subject to the terms and condi !! 1 /* >> 2 * This file is subject to the terms and conditions of the GNU General Public 2 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 3 * for more details. 4 * for more details. 4 * 5 * 5 * Copyright (C) 1999-2007 by Helge Deller <del !! 6 * Copyright (C) 1994, 1995 Waldorf Electronics 6 * Copyright 1999 SuSE GmbH (Philipp Rumpf) !! 7 * Written by Ralf Baechle and Andreas Busse 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.or !! 8 * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle 8 * Copyright 2000 Hewlett Packard (Paul Bame, !! 9 * Copyright (C) 1996 Paul M. Antoine 9 * Copyright (C) 2001 Grant Grundler (Hewlett !! 10 * Modified for DECStation and hence R3000 support by Paul M. Antoine 10 * Copyright (C) 2004 Kyle McMartin <kyle@debia !! 11 * Further modifications by David S. Miller and Harald Koerfgen 11 * !! 12 * Copyright (C) 1999 Silicon Graphics, Inc. 12 * Initial Version 04-23-1999 by Helge Deller < !! 13 * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com >> 14 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 13 */ 15 */ 14 << 15 #include <asm/asm-offsets.h> << 16 #include <asm/psw.h> << 17 #include <asm/pdc.h> << 18 << 19 #include <asm/assembly.h> << 20 << 21 #include <linux/linkage.h> << 22 #include <linux/init.h> 16 #include <linux/init.h> 23 #include <linux/pgtable.h> !! 17 #include <linux/threads.h> 24 18 25 .level 1.1 !! 19 #include <asm/addrspace.h> >> 20 #include <asm/asm.h> >> 21 #include <asm/asmmacro.h> >> 22 #include <asm/irqflags.h> >> 23 #include <asm/regdef.h> >> 24 #include <asm/mipsregs.h> >> 25 #include <asm/stackframe.h> 26 26 27 __INITDATA !! 27 #include <kernel-entry-init.h> 28 ENTRY(boot_args) << 29 .word 0 /* arg0 */ << 30 .word 0 /* arg1 */ << 31 .word 0 /* arg2 */ << 32 .word 0 /* arg3 */ << 33 END(boot_args) << 34 << 35 __HEAD << 36 << 37 .align 4 << 38 .import init_task,data << 39 .import init_stack,data << 40 .import fault_vector_20,code /* IVA << 41 #ifndef CONFIG_64BIT << 42 .import fault_vector_11,code /* IVA << 43 .import $global$ /* for << 44 #endif /*!CONFIG_64BIT*/ << 45 ENTRY(parisc_kernel_start) << 46 .proc << 47 .callinfo << 48 << 49 /* Make sure sr4-sr7 are set to zero f << 50 mtsp %r0,%sr4 << 51 mtsp %r0,%sr5 << 52 mtsp %r0,%sr6 << 53 mtsp %r0,%sr7 << 54 << 55 /* Clear BSS (shouldn't the boot loade << 56 << 57 .import __bss_start,data << 58 .import __bss_stop,data << 59 << 60 load32 PA(__bss_start),%r3 << 61 load32 PA(__bss_stop),%r4 << 62 $bss_loop: << 63 cmpb,<<,n %r3,%r4,$bss_loop << 64 stw,ma %r0,4(%r3) << 65 << 66 /* Save away the arguments the boot lo << 67 load32 PA(boot_args),%r1 << 68 stw,ma %arg0,4(%r1) << 69 stw,ma %arg1,4(%r1) << 70 stw,ma %arg2,4(%r1) << 71 stw,ma %arg3,4(%r1) << 72 << 73 #if defined(CONFIG_PA20) << 74 /* check for 64-bit capable CPU as req << 75 ldi 32,%r10 << 76 mtctl %r10,%cr11 << 77 .level 2.0 << 78 mfctl,w %cr11,%r10 << 79 .level 1.1 << 80 comib,<>,n 0,%r10,$cpu_ok << 81 << 82 load32 PA(msg1),%arg0 << 83 ldi msg1_end-msg1,%arg1 << 84 $iodc_panic: << 85 copy %arg0, %r10 << 86 copy %arg1, %r11 << 87 load32 PA(init_stack),%sp << 88 #define MEM_CONS 0x3A0 << 89 ldw MEM_CONS+32(%r0),%arg0 << 90 ldi ENTRY_IO_COUT,%arg1 << 91 ldw MEM_CONS+36(%r0),%arg2 << 92 ldw MEM_CONS+8(%r0),%arg3 << 93 load32 PA(__bss_start),%r1 << 94 stw %r1,-52(%sp) << 95 stw %r0,-56(%sp) << 96 stw %r10,-60(%sp) << 97 stw %r11,-64(%sp) << 98 stw %r0,-68(%sp) << 99 load32 PA(.iodc_panic_ret), % << 100 ldw MEM_CONS+40(%r0),%r1 << 101 bv,n (%r1) << 102 .iodc_panic_ret: << 103 b . /* wai << 104 or %r10,%r10,%r10 /* qem << 105 msg1: .ascii "Can't boot kernel which was bu << 106 msg1_end: << 107 28 108 $cpu_ok: !! 29 /* 109 #endif !! 30 * For the moment disable interrupts, mark the kernel mode and 110 !! 31 * set ST0_KX so that the CPU does not spit fire when using 111 .level PA_ASM_LEVEL !! 32 * 64-bit addresses. A full initialization of the CPU's status >> 33 * register is done later in per_cpu_trap_init(). >> 34 */ >> 35 .macro setup_c0_status set clr >> 36 .set push >> 37 mfc0 t0, CP0_STATUS >> 38 or t0, ST0_CU0|\set|0x1f|\clr >> 39 xor t0, 0x1f|\clr >> 40 mtc0 t0, CP0_STATUS >> 41 .set noreorder >> 42 sll zero,3 # ehb >> 43 .set pop >> 44 .endm 112 45 113 /* Initialize startup VM. Just map fir !! 46 .macro setup_c0_status_pri 114 load32 PA(swapper_pg_dir),%r4 !! 47 #ifdef CONFIG_64BIT 115 mtctl %r4,%cr24 /* Ini !! 48 setup_c0_status ST0_KX 0 116 mtctl %r4,%cr25 /* Ini << 117 << 118 #if CONFIG_PGTABLE_LEVELS == 3 << 119 /* Set pmd in pgd */ << 120 load32 PA(pmd0),%r5 << 121 shrd %r5,PxD_VALUE_SHIFT,%r << 122 ldo (PxD_FLAG_PRESENT+PxD_ << 123 stw %r3,ASM_PGD_ENTRY*ASM_ << 124 ldo ASM_PMD_ENTRY*ASM_PMD_ << 125 #else 49 #else 126 /* 2-level page table, so pmd == pgd * !! 50 setup_c0_status 0 0 127 ldo ASM_PGD_ENTRY*ASM_PGD_ << 128 #endif 51 #endif >> 52 .endm 129 53 130 /* Fill in pmd with enough pte directo !! 54 .macro setup_c0_status_sec 131 load32 PA(pg0),%r1 !! 55 #ifdef CONFIG_64BIT 132 SHRREG %r1,PxD_VALUE_SHIFT,%r !! 56 setup_c0_status ST0_KX ST0_BEV 133 ldo (PxD_FLAG_PRESENT+PxD_ << 134 << 135 ldi ASM_PT_INITIAL,%r1 << 136 << 137 1: << 138 stw %r3,0(%r4) << 139 ldo (PAGE_SIZE >> PxD_VALU << 140 addib,> -1,%r1,1b << 141 #if CONFIG_PGTABLE_LEVELS == 3 << 142 ldo ASM_PMD_ENTRY_SIZE(%r4 << 143 #else 57 #else 144 ldo ASM_PGD_ENTRY_SIZE(%r4 !! 58 setup_c0_status 0 ST0_BEV 145 #endif 59 #endif >> 60 .endm 146 61 147 !! 62 #ifndef CONFIG_NO_EXCEPT_FILL 148 /* Now initialize the PTEs themselves. !! 63 /* 149 * everything ... it will get remapped !! 64 * Reserved space for exception handlers. 150 ldo 0+_PAGE_KERNEL_RWX(%r0 !! 65 * Necessary for machines which link their kernels at KSEG0. 151 load32 (1<<(KERNEL_INITIAL_OR !! 66 */ 152 load32 PA(pg0),%r1 !! 67 .fill 0x400 153 << 154 $pgt_fill_loop: << 155 STREGM %r3,ASM_PTE_ENTRY_SIZE << 156 ldo (1<<PFN_PTE_SHIFT)(%r3 << 157 addib,> -1,%r11,$pgt_fill_loop << 158 nop << 159 << 160 /* Load the return address...er...cras << 161 copy %r0,%r2 << 162 << 163 /* And the RFI Target address too */ << 164 load32 start_parisc,%r11 << 165 << 166 /* And the initial task pointer */ << 167 load32 init_task,%r6 << 168 mtctl %r6,%cr30 << 169 << 170 /* And the stack pointer too */ << 171 load32 init_stack,%sp << 172 tophys_r1 %sp << 173 #if defined(CONFIG_64BIT) && defined(CONFIG_FU << 174 .import _mcount,data << 175 /* initialize mcount FPTR */ << 176 /* Get the global data pointer */ << 177 loadgp << 178 load32 PA(_mcount), %r10 << 179 std %dp,0x18(%r10) << 180 #endif << 181 << 182 #define MEM_PDC_LO 0x388 << 183 #define MEM_PDC_HI 0x35C << 184 #ifdef CONFIG_64BIT << 185 /* Get PDCE_PROC for monarch CPU. */ << 186 ldw MEM_PDC_LO(%r0),%r3 << 187 ldw MEM_PDC_HI(%r0),%r10 << 188 depd %r10, 31, 32, %r3 << 189 #endif 68 #endif 190 69 >> 70 EXPORT(_stext) 191 71 192 #ifdef CONFIG_SMP !! 72 #ifdef CONFIG_BOOT_RAW 193 /* Set the smp rendezvous address into << 194 ** It would be safer to do this in ini << 195 ** it's just way easier to deal with h << 196 ** of 64-bit function ptrs and the add << 197 */ << 198 load32 PA(smp_slave_stext),%r << 199 stw %r10,0x10(%r0) /* MEM << 200 stw %r0,0x28(%r0) /* MEM << 201 << 202 /* FALLTHROUGH */ << 203 .procend << 204 << 205 #ifdef CONFIG_HOTPLUG_CPU << 206 /* common_stext is far away in another << 207 load32 PA(common_stext), %rp << 208 bv,n (%rp) << 209 << 210 /* common_stext and smp_slave_stext ne << 211 .text << 212 #endif << 213 << 214 /* 73 /* 215 ** Code Common to both Monarch and Sla !! 74 * Give us a fighting chance of running if execution beings at the 216 ** Entry: !! 75 * kernel load address. This is needed because this platform does 217 ** !! 76 * not have a ELF loader yet. 218 ** 1.1: !! 77 */ 219 ** %r11 must contain RFI target add !! 78 FEXPORT(__kernel_entry) 220 ** %r25/%r26 args to pass to target !! 79 j kernel_entry 221 ** %r2 in case rfi target decides !! 80 #endif /* CONFIG_BOOT_RAW */ 222 ** !! 81 223 ** 2.0w: !! 82 __REF 224 ** %r3 PDCE_PROC address !! 83 225 ** %r11 RFI target address !! 84 NESTED(kernel_entry, 16, sp) # kernel entry point 226 ** !! 85 227 ** Caller must init: SR4-7, %sp, %r10, !! 86 kernel_entry_setup # cpu specific setup 228 */ !! 87 229 common_stext: !! 88 setup_c0_status_pri 230 .proc !! 89 231 .callinfo !! 90 /* We might not get launched at the address the kernel is linked to, 232 #else !! 91 so we jump there. */ 233 /* Clear PDC entry point - we won't us !! 92 PTR_LA t0, 0f 234 stw %r0,0x10(%r0) /* MEM !! 93 jr t0 235 stw %r0,0x28(%r0) /* MEM !! 94 0: 236 #endif /*CONFIG_SMP*/ !! 95 >> 96 #ifdef CONFIG_USE_OF >> 97 #if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \ >> 98 defined(CONFIG_MIPS_ELF_APPENDED_DTB) >> 99 >> 100 PTR_LA t2, __appended_dtb >> 101 >> 102 #ifdef CONFIG_CPU_BIG_ENDIAN >> 103 li t1, 0xd00dfeed >> 104 #else /* !CONFIG_CPU_BIG_ENDIAN */ >> 105 li t1, 0xedfe0dd0 >> 106 #endif /* !CONFIG_CPU_BIG_ENDIAN */ >> 107 lw t0, (t2) >> 108 beq t0, t1, dtb_found >> 109 #endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */ >> 110 li t1, -2 >> 111 move t2, a1 >> 112 beq a0, t1, dtb_found >> 113 >> 114 #ifdef CONFIG_BUILTIN_DTB >> 115 PTR_LA t2, __dtb_start >> 116 PTR_LA t1, __dtb_end >> 117 bne t1, t2, dtb_found >> 118 #endif /* CONFIG_BUILTIN_DTB */ >> 119 >> 120 li t2, 0 >> 121 dtb_found: >> 122 #endif /* CONFIG_USE_OF */ >> 123 PTR_LA t0, __bss_start # clear .bss >> 124 LONG_S zero, (t0) >> 125 PTR_LA t1, __bss_stop - LONGSIZE >> 126 1: >> 127 PTR_ADDIU t0, LONGSIZE >> 128 LONG_S zero, (t0) >> 129 bne t0, t1, 1b 237 130 238 #ifdef CONFIG_64BIT !! 131 LONG_S a0, fw_arg0 # firmware arguments 239 mfctl %cr30,%r6 !! 132 LONG_S a1, fw_arg1 240 tophys_r1 %r6 !! 133 LONG_S a2, fw_arg2 >> 134 LONG_S a3, fw_arg3 241 135 242 /* Save the rfi target address */ !! 136 #ifdef CONFIG_USE_OF 243 STREG %r11, TASK_PT_GR11(%r !! 137 LONG_S t2, fw_passed_dtb 244 /* Switch to wide mode Superdome doesn << 245 ** calls. << 246 */ << 247 1: mfia %rp /* cle << 248 ldo 2f-1b(%rp),%rp << 249 depdi 0,31,32,%rp << 250 bv (%rp) << 251 ssm PSW_SM_W,%r0 << 252 << 253 /* Set Wide mode as the "Default" (eg << 254 ** First trap occurs *right* after (or << 255 ** Someday, palo might not do this for << 256 */ << 257 2: << 258 << 259 ldo PDC_PSW(%r0),%arg0 << 260 ldo PDC_PSW_SET_DEFAULTS(% << 261 ldo PDC_PSW_WIDE_BIT(%r0), << 262 load32 PA(stext_pdc_ret), %rp << 263 bv (%r3) << 264 copy %r0,%arg3 << 265 << 266 stext_pdc_ret: << 267 LDREG TASK_PT_GR11(%r6), %r1 << 268 tovirt_r1 %r6 << 269 mtctl %r6,%cr30 << 270 #endif 138 #endif 271 139 272 #ifndef CONFIG_64BIT !! 140 MTC0 zero, CP0_CONTEXT # clear context register 273 /* clear all BTLBs */ !! 141 #ifdef CONFIG_64BIT 274 ldi PDC_BLOCK_TLB,%arg0 !! 142 MTC0 zero, CP0_XCONTEXT 275 load32 PA(stext_pdc_btlb_ret) << 276 ldw MEM_PDC_LO(%r0),%r3 << 277 bv (%r3) << 278 ldi PDC_BTLB_PURGE_ALL,%ar << 279 stext_pdc_btlb_ret: << 280 #endif 143 #endif >> 144 PTR_LA $28, init_thread_union >> 145 /* Set the SP after an empty pt_regs. */ >> 146 PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE >> 147 PTR_ADDU sp, $28 >> 148 back_to_back_c0_hazard >> 149 set_saved_sp sp, t0, t1 >> 150 PTR_SUBU sp, 4 * SZREG # init stack pointer >> 151 >> 152 #ifdef CONFIG_RELOCATABLE >> 153 /* Copy kernel and apply the relocations */ >> 154 jal relocate_kernel >> 155 >> 156 /* Repoint the sp into the new kernel image */ >> 157 PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE >> 158 PTR_ADDU sp, $28 >> 159 set_saved_sp sp, t0, t1 >> 160 PTR_SUBU sp, 4 * SZREG # init stack pointer 281 161 282 /* PARANOID: clear user scratch/user s << 283 mtsp %r0,%sr0 << 284 mtsp %r0,%sr1 << 285 mtsp %r0,%sr2 << 286 mtsp %r0,%sr3 << 287 << 288 /* Initialize Protection Registers */ << 289 mtctl %r0,%cr8 << 290 mtctl %r0,%cr9 << 291 mtctl %r0,%cr12 << 292 mtctl %r0,%cr13 << 293 << 294 /* Initialize the global data pointer << 295 loadgp << 296 << 297 /* Set up our interrupt table. HPMCs << 298 * << 299 * We need to install the correct iva << 300 * following short sequence of instruc << 301 * (without being illegal on a PA1.1 m << 302 */ << 303 #ifndef CONFIG_64BIT << 304 ldi 32,%r10 << 305 mtctl %r10,%cr11 << 306 .level 2.0 << 307 mfctl,w %cr11,%r10 << 308 .level 1.1 << 309 comib,<>,n 0,%r10,$is_pa20 << 310 ldil L%PA(fault_vector_11), << 311 b $install_iva << 312 ldo R%PA(fault_vector_11)( << 313 << 314 $is_pa20: << 315 .level PA_ASM_LEVEL /* restor << 316 #endif /*!CONFIG_64BIT*/ << 317 load32 PA(fault_vector_20),%r << 318 << 319 $install_iva: << 320 mtctl %r10,%cr14 << 321 << 322 b aligned_rfi /* Prepar << 323 nop << 324 << 325 .align 128 << 326 aligned_rfi: << 327 pcxt_ssm_bug << 328 << 329 copy %r3, %arg0 /* PDC << 330 << 331 rsm PSW_SM_QUIET,%r0 << 332 /* Don't need NOPs, have 8 compliant i << 333 << 334 mtctl %r0,%cr17 /* Cle << 335 mtctl %r0,%cr17 /* Cle << 336 << 337 /* Load RFI target into PC queue */ << 338 mtctl %r11,%cr18 /* IIA << 339 ldo 4(%r11),%r11 << 340 mtctl %r11,%cr18 /* IIA << 341 << 342 load32 KERNEL_PSW,%r10 << 343 mtctl %r10,%ipsw << 344 << 345 tovirt_r1 %sp << 346 << 347 /* Jump through hyperspace to Virt Mod << 348 rfi << 349 nop << 350 << 351 .procend << 352 << 353 #ifdef CONFIG_SMP << 354 << 355 .import smp_init_current_idle_task,dat << 356 .import smp_callin,code << 357 << 358 #ifndef CONFIG_64BIT << 359 smp_callin_rtn: << 360 .proc << 361 .callinfo << 362 break 1,1 /* Break if r << 363 nop << 364 nop << 365 .procend << 366 #endif /*!CONFIG_64BIT*/ << 367 << 368 /********************************************* << 369 * smp_slave_stext is executed by all non-monar << 370 * pokes the slave CPUs in smp.c:smp_boot_cpus( << 371 * << 372 * Once here, registers values are initialized << 373 * mode. Once all available/eligible CPUs are i << 374 * released and start out by executing their ow << 375 ********************************************** << 376 smp_slave_stext: << 377 .proc << 378 .callinfo << 379 << 380 /* << 381 ** Initialize Space registers << 382 */ << 383 mtsp %r0,%sr4 << 384 mtsp %r0,%sr5 << 385 mtsp %r0,%sr6 << 386 mtsp %r0,%sr7 << 387 << 388 #ifdef CONFIG_64BIT << 389 /* 162 /* 390 * Enable Wide mode early, in case th !! 163 * relocate_kernel returns the entry point either 391 * task in smp_init_current_idle_task !! 164 * in the relocated kernel or the original if for >> 165 * some reason relocation failed - jump there now >> 166 * with instruction hazard barrier because of the >> 167 * newly sync'd icache. 392 */ 168 */ 393 1: mfia %rp /* cle !! 169 jr.hb v0 394 ldo 2f-1b(%rp),%rp !! 170 #else /* !CONFIG_RELOCATABLE */ 395 depdi 0,31,32,%rp !! 171 j start_kernel 396 bv (%rp) !! 172 #endif /* !CONFIG_RELOCATABLE */ 397 ssm PSW_SM_W,%r0 !! 173 END(kernel_entry) 398 2: << 399 #endif << 400 << 401 /* Initialize the SP - monarch sets u << 402 load32 PA(smp_init_current_id << 403 LDREG 0(%r6),%r6 << 404 mtctl %r6,%cr30 << 405 tophys_r1 %r6 << 406 LDREG TASK_STACK(%r6),%sp << 407 tophys_r1 %sp << 408 ldo FRAME_SIZE(%sp),%sp << 409 << 410 /* point CPU to kernel page tables */ << 411 load32 PA(swapper_pg_dir),%r4 << 412 mtctl %r4,%cr24 /* Ini << 413 mtctl %r4,%cr25 /* Ini << 414 << 415 #ifdef CONFIG_64BIT << 416 /* Setup PDCE_PROC entry */ << 417 copy %arg0,%r3 << 418 #else << 419 /* Load RFI *return* address in case s << 420 load32 smp_callin_rtn,%r2 << 421 #endif << 422 << 423 /* Load RFI target address. */ << 424 load32 smp_callin,%r11 << 425 << 426 /* ok...common code can handle the res << 427 b common_stext << 428 nop << 429 174 430 .procend !! 175 #ifdef CONFIG_SMP >> 176 /* >> 177 * SMP slave cpus entry point. Board specific code for bootstrap calls this >> 178 * function after setting up the stack and gp registers. >> 179 */ >> 180 NESTED(smp_bootstrap, 16, sp) >> 181 smp_slave_setup >> 182 setup_c0_status_sec >> 183 j start_secondary >> 184 END(smp_bootstrap) 431 #endif /* CONFIG_SMP */ 185 #endif /* CONFIG_SMP */ 432 << 433 #ifndef CONFIG_64BIT << 434 .section .data..ro_after_init << 435 << 436 .align 4 << 437 .export $global$,data << 438 << 439 .type $global$,@object << 440 .size $global$,4 << 441 $global$: << 442 .word 0 << 443 #endif /*!CONFIG_64BIT*/ <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.