1 /* This file is subject to the terms and condi !! 1 /* >> 2 * This file is subject to the terms and conditions of the GNU General Public 2 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 3 * for more details. 4 * for more details. 4 * 5 * 5 * Copyright (C) 1999-2007 by Helge Deller <del !! 6 * Copyright (C) 1994, 1995 Waldorf Electronics 6 * Copyright 1999 SuSE GmbH (Philipp Rumpf) !! 7 * Written by Ralf Baechle and Andreas Busse 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.or !! 8 * Copyright (C) 1995 - 1999 Ralf Baechle 8 * Copyright 2000 Hewlett Packard (Paul Bame, !! 9 * Copyright (C) 1996 Paul M. Antoine 9 * Copyright (C) 2001 Grant Grundler (Hewlett !! 10 * Modified for DECStation and hence R3000 support by Paul M. Antoine 10 * Copyright (C) 2004 Kyle McMartin <kyle@debia !! 11 * Further modifications by David S. Miller and Harald Koerfgen >> 12 * Copyright (C) 1999 Silicon Graphics, Inc. 11 * 13 * 12 * Initial Version 04-23-1999 by Helge Deller < !! 14 * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com >> 15 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. 13 */ 16 */ 14 !! 17 #include <linux/config.h> 15 #include <asm/asm-offsets.h> << 16 #include <asm/psw.h> << 17 #include <asm/pdc.h> << 18 << 19 #include <asm/assembly.h> << 20 << 21 #include <linux/linkage.h> << 22 #include <linux/init.h> 18 #include <linux/init.h> 23 #include <linux/pgtable.h> !! 19 #include <linux/threads.h> 24 << 25 .level 1.1 << 26 << 27 __INITDATA << 28 ENTRY(boot_args) << 29 .word 0 /* arg0 */ << 30 .word 0 /* arg1 */ << 31 .word 0 /* arg2 */ << 32 .word 0 /* arg3 */ << 33 END(boot_args) << 34 << 35 __HEAD << 36 << 37 .align 4 << 38 .import init_task,data << 39 .import init_stack,data << 40 .import fault_vector_20,code /* IVA << 41 #ifndef CONFIG_64BIT << 42 .import fault_vector_11,code /* IVA << 43 .import $global$ /* for << 44 #endif /*!CONFIG_64BIT*/ << 45 ENTRY(parisc_kernel_start) << 46 .proc << 47 .callinfo << 48 << 49 /* Make sure sr4-sr7 are set to zero f << 50 mtsp %r0,%sr4 << 51 mtsp %r0,%sr5 << 52 mtsp %r0,%sr6 << 53 mtsp %r0,%sr7 << 54 << 55 /* Clear BSS (shouldn't the boot loade << 56 << 57 .import __bss_start,data << 58 .import __bss_stop,data << 59 << 60 load32 PA(__bss_start),%r3 << 61 load32 PA(__bss_stop),%r4 << 62 $bss_loop: << 63 cmpb,<<,n %r3,%r4,$bss_loop << 64 stw,ma %r0,4(%r3) << 65 << 66 /* Save away the arguments the boot lo << 67 load32 PA(boot_args),%r1 << 68 stw,ma %arg0,4(%r1) << 69 stw,ma %arg1,4(%r1) << 70 stw,ma %arg2,4(%r1) << 71 stw,ma %arg3,4(%r1) << 72 << 73 #if defined(CONFIG_PA20) << 74 /* check for 64-bit capable CPU as req << 75 ldi 32,%r10 << 76 mtctl %r10,%cr11 << 77 .level 2.0 << 78 mfctl,w %cr11,%r10 << 79 .level 1.1 << 80 comib,<>,n 0,%r10,$cpu_ok << 81 << 82 load32 PA(msg1),%arg0 << 83 ldi msg1_end-msg1,%arg1 << 84 $iodc_panic: << 85 copy %arg0, %r10 << 86 copy %arg1, %r11 << 87 load32 PA(init_stack),%sp << 88 #define MEM_CONS 0x3A0 << 89 ldw MEM_CONS+32(%r0),%arg0 << 90 ldi ENTRY_IO_COUT,%arg1 << 91 ldw MEM_CONS+36(%r0),%arg2 << 92 ldw MEM_CONS+8(%r0),%arg3 << 93 load32 PA(__bss_start),%r1 << 94 stw %r1,-52(%sp) << 95 stw %r0,-56(%sp) << 96 stw %r10,-60(%sp) << 97 stw %r11,-64(%sp) << 98 stw %r0,-68(%sp) << 99 load32 PA(.iodc_panic_ret), % << 100 ldw MEM_CONS+40(%r0),%r1 << 101 bv,n (%r1) << 102 .iodc_panic_ret: << 103 b . /* wai << 104 or %r10,%r10,%r10 /* qem << 105 msg1: .ascii "Can't boot kernel which was bu << 106 msg1_end: << 107 << 108 $cpu_ok: << 109 #endif << 110 << 111 .level PA_ASM_LEVEL << 112 << 113 /* Initialize startup VM. Just map fir << 114 load32 PA(swapper_pg_dir),%r4 << 115 mtctl %r4,%cr24 /* Ini << 116 mtctl %r4,%cr25 /* Ini << 117 << 118 #if CONFIG_PGTABLE_LEVELS == 3 << 119 /* Set pmd in pgd */ << 120 load32 PA(pmd0),%r5 << 121 shrd %r5,PxD_VALUE_SHIFT,%r << 122 ldo (PxD_FLAG_PRESENT+PxD_ << 123 stw %r3,ASM_PGD_ENTRY*ASM_ << 124 ldo ASM_PMD_ENTRY*ASM_PMD_ << 125 #else << 126 /* 2-level page table, so pmd == pgd * << 127 ldo ASM_PGD_ENTRY*ASM_PGD_ << 128 #endif << 129 << 130 /* Fill in pmd with enough pte directo << 131 load32 PA(pg0),%r1 << 132 SHRREG %r1,PxD_VALUE_SHIFT,%r << 133 ldo (PxD_FLAG_PRESENT+PxD_ << 134 << 135 ldi ASM_PT_INITIAL,%r1 << 136 20 >> 21 #include <asm/asm.h> >> 22 #include <asm/current.h> >> 23 #include <asm/offset.h> >> 24 #include <asm/pgtable-bits.h> >> 25 #include <asm/processor.h> >> 26 #include <asm/regdef.h> >> 27 #include <asm/cachectl.h> >> 28 #include <asm/mipsregs.h> >> 29 #include <asm/stackframe.h> >> 30 >> 31 .text >> 32 /* >> 33 * Reserved space for exception handlers. >> 34 * Necessary for machines which link their kernels at KSEG0. >> 35 */ >> 36 .fill 0x400 >> 37 >> 38 /* The following two symbols are used for kernel profiling. */ >> 39 EXPORT(stext) >> 40 EXPORT(_stext) >> 41 >> 42 __INIT >> 43 >> 44 /* Cache Error */ >> 45 LEAF(except_vec2_generic) >> 46 .set noreorder >> 47 .set noat >> 48 .set mips0 >> 49 /* >> 50 * This is a very bad place to be. Our cache error >> 51 * detection has triggered. If we have write-back data >> 52 * in the cache, we may not be able to recover. As a >> 53 * first-order desperate measure, turn off KSEG0 cacheing. >> 54 */ >> 55 mfc0 k0,CP0_CONFIG >> 56 li k1,~CONF_CM_CMASK >> 57 and k0,k0,k1 >> 58 ori k0,k0,CONF_CM_UNCACHED >> 59 mtc0 k0,CP0_CONFIG >> 60 /* Give it a few cycles to sink in... */ >> 61 nop >> 62 nop >> 63 nop >> 64 >> 65 j cache_parity_error >> 66 nop >> 67 END(except_vec2_generic) >> 68 >> 69 .set at >> 70 >> 71 /* >> 72 * Special interrupt vector for embedded MIPS. This is a >> 73 * dedicated interrupt vector which reduces interrupt processing >> 74 * overhead. The jump instruction will be inserted here at >> 75 * initialization time. This handler may only be 8 bytes in >> 76 * size! >> 77 */ >> 78 NESTED(except_vec4, 0, sp) >> 79 1: j 1b /* Dummy, will be replaced */ >> 80 nop >> 81 END(except_vec4) >> 82 >> 83 /* >> 84 * EJTAG debug exception handler. >> 85 * The EJTAG debug exception entry point is 0xbfc00480, which >> 86 * normally is in the boot PROM, so the boot PROM must do a >> 87 * unconditional jump to this vector. >> 88 */ >> 89 NESTED(except_vec_ejtag_debug, 0, sp) >> 90 j ejtag_debug_handler >> 91 nop >> 92 END(except_vec_ejtag_debug) >> 93 >> 94 __FINIT >> 95 >> 96 /* >> 97 * EJTAG debug exception handler. >> 98 */ >> 99 NESTED(ejtag_debug_handler, PT_SIZE, sp) >> 100 .set noat >> 101 .set noreorder >> 102 mtc0 k0, CP0_DESAVE >> 103 mfc0 k0, CP0_DEBUG >> 104 >> 105 sll k0, k0, 30 # Check for SDBBP. >> 106 bgez k0, ejtag_return >> 107 >> 108 la k0, ejtag_debug_buffer >> 109 sw k1, 0(k0) >> 110 SAVE_ALL >> 111 jal ejtag_exception_handler >> 112 move a0, sp >> 113 RESTORE_ALL >> 114 la k0, ejtag_debug_buffer >> 115 lw k1, 0(k0) >> 116 >> 117 ejtag_return: >> 118 mfc0 k0, CP0_DESAVE >> 119 .set mips32 >> 120 deret >> 121 .set mips0 >> 122 nop >> 123 .set at >> 124 END(ejtag_debug_handler) >> 125 >> 126 __INIT >> 127 >> 128 /* >> 129 * NMI debug exception handler for MIPS reference boards. >> 130 * The NMI debug exception entry point is 0xbfc00000, which >> 131 * normally is in the boot PROM, so the boot PROM must do a >> 132 * unconditional jump to this vector. >> 133 */ >> 134 NESTED(except_vec_nmi, 0, sp) >> 135 j nmi_handler >> 136 nop >> 137 END(except_vec_nmi) >> 138 >> 139 __FINIT >> 140 >> 141 NESTED(nmi_handler, PT_SIZE, sp) >> 142 .set noat >> 143 .set noreorder >> 144 .set mips3 >> 145 SAVE_ALL >> 146 jal nmi_exception_handler >> 147 move a0, sp >> 148 RESTORE_ALL >> 149 eret >> 150 .set at >> 151 .set mips0 >> 152 END(nmi_handler) >> 153 >> 154 __INIT >> 155 >> 156 /* >> 157 * Kernel entry point >> 158 */ >> 159 NESTED(kernel_entry, 16, sp) >> 160 .set noreorder >> 161 >> 162 /* >> 163 * The firmware/bootloader passes argc/argp/envp >> 164 * to us as arguments. But clear bss first because >> 165 * the romvec and other important info is stored there >> 166 * by prom_init(). >> 167 */ >> 168 la t0, _edata >> 169 sw zero, (t0) >> 170 la t1, (_end - 4) 137 1: 171 1: 138 stw %r3,0(%r4) !! 172 addiu t0, 4 139 ldo (PAGE_SIZE >> PxD_VALU !! 173 bne t0, t1, 1b 140 addib,> -1,%r1,1b !! 174 sw zero, (t0) 141 #if CONFIG_PGTABLE_LEVELS == 3 !! 175 142 ldo ASM_PMD_ENTRY_SIZE(%r4 !! 176 /* 143 #else !! 177 * Stack for kernel and init, current variable 144 ldo ASM_PGD_ENTRY_SIZE(%r4 !! 178 */ 145 #endif !! 179 la $28, init_task_union 146 !! 180 addiu t0, $28, KERNEL_STACK_SIZE-32 147 !! 181 subu sp, t0, 4*SZREG 148 /* Now initialize the PTEs themselves. !! 182 sw t0, kernelsp 149 * everything ... it will get remapped !! 183 150 ldo 0+_PAGE_KERNEL_RWX(%r0 !! 184 jal init_arch 151 load32 (1<<(KERNEL_INITIAL_OR !! 185 nop 152 load32 PA(pg0),%r1 !! 186 END(kernel_entry) 153 << 154 $pgt_fill_loop: << 155 STREGM %r3,ASM_PTE_ENTRY_SIZE << 156 ldo (1<<PFN_PTE_SHIFT)(%r3 << 157 addib,> -1,%r11,$pgt_fill_loop << 158 nop << 159 << 160 /* Load the return address...er...cras << 161 copy %r0,%r2 << 162 << 163 /* And the RFI Target address too */ << 164 load32 start_parisc,%r11 << 165 << 166 /* And the initial task pointer */ << 167 load32 init_task,%r6 << 168 mtctl %r6,%cr30 << 169 << 170 /* And the stack pointer too */ << 171 load32 init_stack,%sp << 172 tophys_r1 %sp << 173 #if defined(CONFIG_64BIT) && defined(CONFIG_FU << 174 .import _mcount,data << 175 /* initialize mcount FPTR */ << 176 /* Get the global data pointer */ << 177 loadgp << 178 load32 PA(_mcount), %r10 << 179 std %dp,0x18(%r10) << 180 #endif << 181 << 182 #define MEM_PDC_LO 0x388 << 183 #define MEM_PDC_HI 0x35C << 184 #ifdef CONFIG_64BIT << 185 /* Get PDCE_PROC for monarch CPU. */ << 186 ldw MEM_PDC_LO(%r0),%r3 << 187 ldw MEM_PDC_HI(%r0),%r10 << 188 depd %r10, 31, 32, %r3 << 189 #endif << 190 << 191 << 192 #ifdef CONFIG_SMP << 193 /* Set the smp rendezvous address into << 194 ** It would be safer to do this in ini << 195 ** it's just way easier to deal with h << 196 ** of 64-bit function ptrs and the add << 197 */ << 198 load32 PA(smp_slave_stext),%r << 199 stw %r10,0x10(%r0) /* MEM << 200 stw %r0,0x28(%r0) /* MEM << 201 << 202 /* FALLTHROUGH */ << 203 .procend << 204 << 205 #ifdef CONFIG_HOTPLUG_CPU << 206 /* common_stext is far away in another << 207 load32 PA(common_stext), %rp << 208 bv,n (%rp) << 209 << 210 /* common_stext and smp_slave_stext ne << 211 .text << 212 #endif << 213 << 214 /* << 215 ** Code Common to both Monarch and Sla << 216 ** Entry: << 217 ** << 218 ** 1.1: << 219 ** %r11 must contain RFI target add << 220 ** %r25/%r26 args to pass to target << 221 ** %r2 in case rfi target decides << 222 ** << 223 ** 2.0w: << 224 ** %r3 PDCE_PROC address << 225 ** %r11 RFI target address << 226 ** << 227 ** Caller must init: SR4-7, %sp, %r10, << 228 */ << 229 common_stext: << 230 .proc << 231 .callinfo << 232 #else << 233 /* Clear PDC entry point - we won't us << 234 stw %r0,0x10(%r0) /* MEM << 235 stw %r0,0x28(%r0) /* MEM << 236 #endif /*CONFIG_SMP*/ << 237 << 238 #ifdef CONFIG_64BIT << 239 mfctl %cr30,%r6 << 240 tophys_r1 %r6 << 241 << 242 /* Save the rfi target address */ << 243 STREG %r11, TASK_PT_GR11(%r << 244 /* Switch to wide mode Superdome doesn << 245 ** calls. << 246 */ << 247 1: mfia %rp /* cle << 248 ldo 2f-1b(%rp),%rp << 249 depdi 0,31,32,%rp << 250 bv (%rp) << 251 ssm PSW_SM_W,%r0 << 252 << 253 /* Set Wide mode as the "Default" (eg << 254 ** First trap occurs *right* after (or << 255 ** Someday, palo might not do this for << 256 */ << 257 2: << 258 << 259 ldo PDC_PSW(%r0),%arg0 << 260 ldo PDC_PSW_SET_DEFAULTS(% << 261 ldo PDC_PSW_WIDE_BIT(%r0), << 262 load32 PA(stext_pdc_ret), %rp << 263 bv (%r3) << 264 copy %r0,%arg3 << 265 << 266 stext_pdc_ret: << 267 LDREG TASK_PT_GR11(%r6), %r1 << 268 tovirt_r1 %r6 << 269 mtctl %r6,%cr30 << 270 #endif << 271 187 272 #ifndef CONFIG_64BIT << 273 /* clear all BTLBs */ << 274 ldi PDC_BLOCK_TLB,%arg0 << 275 load32 PA(stext_pdc_btlb_ret) << 276 ldw MEM_PDC_LO(%r0),%r3 << 277 bv (%r3) << 278 ldi PDC_BTLB_PURGE_ALL,%ar << 279 stext_pdc_btlb_ret: << 280 #endif << 281 << 282 /* PARANOID: clear user scratch/user s << 283 mtsp %r0,%sr0 << 284 mtsp %r0,%sr1 << 285 mtsp %r0,%sr2 << 286 mtsp %r0,%sr3 << 287 << 288 /* Initialize Protection Registers */ << 289 mtctl %r0,%cr8 << 290 mtctl %r0,%cr9 << 291 mtctl %r0,%cr12 << 292 mtctl %r0,%cr13 << 293 << 294 /* Initialize the global data pointer << 295 loadgp << 296 << 297 /* Set up our interrupt table. HPMCs << 298 * << 299 * We need to install the correct iva << 300 * following short sequence of instruc << 301 * (without being illegal on a PA1.1 m << 302 */ << 303 #ifndef CONFIG_64BIT << 304 ldi 32,%r10 << 305 mtctl %r10,%cr11 << 306 .level 2.0 << 307 mfctl,w %cr11,%r10 << 308 .level 1.1 << 309 comib,<>,n 0,%r10,$is_pa20 << 310 ldil L%PA(fault_vector_11), << 311 b $install_iva << 312 ldo R%PA(fault_vector_11)( << 313 << 314 $is_pa20: << 315 .level PA_ASM_LEVEL /* restor << 316 #endif /*!CONFIG_64BIT*/ << 317 load32 PA(fault_vector_20),%r << 318 << 319 $install_iva: << 320 mtctl %r10,%cr14 << 321 << 322 b aligned_rfi /* Prepar << 323 nop << 324 << 325 .align 128 << 326 aligned_rfi: << 327 pcxt_ssm_bug << 328 << 329 copy %r3, %arg0 /* PDC << 330 << 331 rsm PSW_SM_QUIET,%r0 << 332 /* Don't need NOPs, have 8 compliant i << 333 << 334 mtctl %r0,%cr17 /* Cle << 335 mtctl %r0,%cr17 /* Cle << 336 << 337 /* Load RFI target into PC queue */ << 338 mtctl %r11,%cr18 /* IIA << 339 ldo 4(%r11),%r11 << 340 mtctl %r11,%cr18 /* IIA << 341 << 342 load32 KERNEL_PSW,%r10 << 343 mtctl %r10,%ipsw << 344 << 345 tovirt_r1 %sp << 346 << 347 /* Jump through hyperspace to Virt Mod << 348 rfi << 349 nop << 350 << 351 .procend << 352 188 353 #ifdef CONFIG_SMP 189 #ifdef CONFIG_SMP 354 190 355 .import smp_init_current_idle_task,dat !! 191 /* 356 .import smp_callin,code !! 192 * SMP slave cpus entry point. Board specific code for bootstrap calls this 357 !! 193 * function after setting up the stack and gp registers. 358 #ifndef CONFIG_64BIT !! 194 */ 359 smp_callin_rtn: !! 195 LEAF(smp_bootstrap) 360 .proc !! 196 .set push 361 .callinfo !! 197 .set noreorder 362 break 1,1 /* Break if r !! 198 mtc0 zero, CP0_WIRED 363 nop !! 199 CLI 364 nop !! 200 mfc0 t0, CP0_STATUS 365 .procend !! 201 li t1, ~(ST0_CU1|ST0_CU2|ST0_CU3|ST0_KX|ST0_SX|ST0_UX) 366 #endif /*!CONFIG_64BIT*/ !! 202 and t0, t1 367 !! 203 or t0, (ST0_CU0); 368 /********************************************* !! 204 jal start_secondary 369 * smp_slave_stext is executed by all non-monar !! 205 mtc0 t0, CP0_STATUS 370 * pokes the slave CPUs in smp.c:smp_boot_cpus( !! 206 .set pop 371 * !! 207 END(smp_bootstrap) 372 * Once here, registers values are initialized !! 208 #endif 373 * mode. Once all available/eligible CPUs are i !! 209 374 * released and start out by executing their ow !! 210 __FINIT 375 ********************************************** !! 211 376 smp_slave_stext: !! 212 /* 377 .proc !! 213 * This buffer is reserved for the use of the EJTAG debug 378 .callinfo !! 214 * handler. 379 !! 215 */ 380 /* !! 216 .data 381 ** Initialize Space registers !! 217 EXPORT(ejtag_debug_buffer) 382 */ !! 218 .fill 4 383 mtsp %r0,%sr4 !! 219 384 mtsp %r0,%sr5 !! 220 .comm kernelsp, NR_CPUS * 8, 8 385 mtsp %r0,%sr6 !! 221 .comm pgd_current, NR_CPUS * 8, 8 386 mtsp %r0,%sr7 !! 222 387 !! 223 .macro page name, order=0 388 #ifdef CONFIG_64BIT !! 224 .globl \name 389 /* !! 225 \name: .size \name, (_PAGE_SIZE << \order) 390 * Enable Wide mode early, in case th !! 226 .org . + (_PAGE_SIZE << \order) 391 * task in smp_init_current_idle_task !! 227 .type \name, @object 392 */ !! 228 .endm 393 1: mfia %rp /* cle !! 229 394 ldo 2f-1b(%rp),%rp !! 230 .data 395 depdi 0,31,32,%rp !! 231 .align 12 396 bv (%rp) !! 232 397 ssm PSW_SM_W,%r0 !! 233 page swapper_pg_dir, _PGD_ORDER 398 2: !! 234 page empty_bad_page, 0 399 #endif !! 235 page empty_bad_page_table, 0 400 !! 236 page invalid_pte_table, 0 401 /* Initialize the SP - monarch sets u << 402 load32 PA(smp_init_current_id << 403 LDREG 0(%r6),%r6 << 404 mtctl %r6,%cr30 << 405 tophys_r1 %r6 << 406 LDREG TASK_STACK(%r6),%sp << 407 tophys_r1 %sp << 408 ldo FRAME_SIZE(%sp),%sp << 409 << 410 /* point CPU to kernel page tables */ << 411 load32 PA(swapper_pg_dir),%r4 << 412 mtctl %r4,%cr24 /* Ini << 413 mtctl %r4,%cr25 /* Ini << 414 << 415 #ifdef CONFIG_64BIT << 416 /* Setup PDCE_PROC entry */ << 417 copy %arg0,%r3 << 418 #else << 419 /* Load RFI *return* address in case s << 420 load32 smp_callin_rtn,%r2 << 421 #endif << 422 << 423 /* Load RFI target address. */ << 424 load32 smp_callin,%r11 << 425 << 426 /* ok...common code can handle the res << 427 b common_stext << 428 nop << 429 << 430 .procend << 431 #endif /* CONFIG_SMP */ << 432 << 433 #ifndef CONFIG_64BIT << 434 .section .data..ro_after_init << 435 << 436 .align 4 << 437 .export $global$,data << 438 << 439 .type $global$,@object << 440 .size $global$,4 << 441 $global$: << 442 .word 0 << 443 #endif /*!CONFIG_64BIT*/ <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.