1 /* This file is subject to the terms and condi !! 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 * License. See the file "COPYING" in the mai !! 2 /* 3 * for more details. !! 3 * arch/alpha/kernel/head.S 4 * 4 * 5 * Copyright (C) 1999-2007 by Helge Deller <del !! 5 * initial boot stuff.. At this point, the bootloader has already 6 * Copyright 1999 SuSE GmbH (Philipp Rumpf) !! 6 * switched into OSF/1 PAL-code, and loaded us at the correct address 7 * Copyright 1999 Philipp Rumpf (prumpf@tux.or !! 7 * (START_ADDR). So there isn't much left for us to do: just set up 8 * Copyright 2000 Hewlett Packard (Paul Bame, !! 8 * the kernel global pointer and jump to the kernel entry-point. 9 * Copyright (C) 2001 Grant Grundler (Hewlett << 10 * Copyright (C) 2004 Kyle McMartin <kyle@debia << 11 * << 12 * Initial Version 04-23-1999 by Helge Deller < << 13 */ 9 */ 14 10 15 #include <asm/asm-offsets.h> << 16 #include <asm/psw.h> << 17 #include <asm/pdc.h> << 18 << 19 #include <asm/assembly.h> << 20 << 21 #include <linux/linkage.h> << 22 #include <linux/init.h> 11 #include <linux/init.h> 23 #include <linux/pgtable.h> !! 12 #include <asm/asm-offsets.h> 24 !! 13 #include <asm/pal.h> 25 .level 1.1 !! 14 #include <asm/setup.h> 26 << 27 __INITDATA << 28 ENTRY(boot_args) << 29 .word 0 /* arg0 */ << 30 .word 0 /* arg1 */ << 31 .word 0 /* arg2 */ << 32 .word 0 /* arg3 */ << 33 END(boot_args) << 34 << 35 __HEAD << 36 << 37 .align 4 << 38 .import init_task,data << 39 .import init_stack,data << 40 .import fault_vector_20,code /* IVA << 41 #ifndef CONFIG_64BIT << 42 .import fault_vector_11,code /* IVA << 43 .import $global$ /* for << 44 #endif /*!CONFIG_64BIT*/ << 45 ENTRY(parisc_kernel_start) << 46 .proc << 47 .callinfo << 48 << 49 /* Make sure sr4-sr7 are set to zero f << 50 mtsp %r0,%sr4 << 51 mtsp %r0,%sr5 << 52 mtsp %r0,%sr6 << 53 mtsp %r0,%sr7 << 54 << 55 /* Clear BSS (shouldn't the boot loade << 56 << 57 .import __bss_start,data << 58 .import __bss_stop,data << 59 << 60 load32 PA(__bss_start),%r3 << 61 load32 PA(__bss_stop),%r4 << 62 $bss_loop: << 63 cmpb,<<,n %r3,%r4,$bss_loop << 64 stw,ma %r0,4(%r3) << 65 << 66 /* Save away the arguments the boot lo << 67 load32 PA(boot_args),%r1 << 68 stw,ma %arg0,4(%r1) << 69 stw,ma %arg1,4(%r1) << 70 stw,ma %arg2,4(%r1) << 71 stw,ma %arg3,4(%r1) << 72 << 73 #if defined(CONFIG_PA20) << 74 /* check for 64-bit capable CPU as req << 75 ldi 32,%r10 << 76 mtctl %r10,%cr11 << 77 .level 2.0 << 78 mfctl,w %cr11,%r10 << 79 .level 1.1 << 80 comib,<>,n 0,%r10,$cpu_ok << 81 << 82 load32 PA(msg1),%arg0 << 83 ldi msg1_end-msg1,%arg1 << 84 $iodc_panic: << 85 copy %arg0, %r10 << 86 copy %arg1, %r11 << 87 load32 PA(init_stack),%sp << 88 #define MEM_CONS 0x3A0 << 89 ldw MEM_CONS+32(%r0),%arg0 << 90 ldi ENTRY_IO_COUT,%arg1 << 91 ldw MEM_CONS+36(%r0),%arg2 << 92 ldw MEM_CONS+8(%r0),%arg3 << 93 load32 PA(__bss_start),%r1 << 94 stw %r1,-52(%sp) << 95 stw %r0,-56(%sp) << 96 stw %r10,-60(%sp) << 97 stw %r11,-64(%sp) << 98 stw %r0,-68(%sp) << 99 load32 PA(.iodc_panic_ret), % << 100 ldw MEM_CONS+40(%r0),%r1 << 101 bv,n (%r1) << 102 .iodc_panic_ret: << 103 b . /* wai << 104 or %r10,%r10,%r10 /* qem << 105 msg1: .ascii "Can't boot kernel which was bu << 106 msg1_end: << 107 << 108 $cpu_ok: << 109 #endif << 110 << 111 .level PA_ASM_LEVEL << 112 << 113 /* Initialize startup VM. Just map fir << 114 load32 PA(swapper_pg_dir),%r4 << 115 mtctl %r4,%cr24 /* Ini << 116 mtctl %r4,%cr25 /* Ini << 117 << 118 #if CONFIG_PGTABLE_LEVELS == 3 << 119 /* Set pmd in pgd */ << 120 load32 PA(pmd0),%r5 << 121 shrd %r5,PxD_VALUE_SHIFT,%r << 122 ldo (PxD_FLAG_PRESENT+PxD_ << 123 stw %r3,ASM_PGD_ENTRY*ASM_ << 124 ldo ASM_PMD_ENTRY*ASM_PMD_ << 125 #else << 126 /* 2-level page table, so pmd == pgd * << 127 ldo ASM_PGD_ENTRY*ASM_PGD_ << 128 #endif << 129 << 130 /* Fill in pmd with enough pte directo << 131 load32 PA(pg0),%r1 << 132 SHRREG %r1,PxD_VALUE_SHIFT,%r << 133 ldo (PxD_FLAG_PRESENT+PxD_ << 134 << 135 ldi ASM_PT_INITIAL,%r1 << 136 << 137 1: << 138 stw %r3,0(%r4) << 139 ldo (PAGE_SIZE >> PxD_VALU << 140 addib,> -1,%r1,1b << 141 #if CONFIG_PGTABLE_LEVELS == 3 << 142 ldo ASM_PMD_ENTRY_SIZE(%r4 << 143 #else << 144 ldo ASM_PGD_ENTRY_SIZE(%r4 << 145 #endif << 146 << 147 << 148 /* Now initialize the PTEs themselves. << 149 * everything ... it will get remapped << 150 ldo 0+_PAGE_KERNEL_RWX(%r0 << 151 load32 (1<<(KERNEL_INITIAL_OR << 152 load32 PA(pg0),%r1 << 153 << 154 $pgt_fill_loop: << 155 STREGM %r3,ASM_PTE_ENTRY_SIZE << 156 ldo (1<<PFN_PTE_SHIFT)(%r3 << 157 addib,> -1,%r11,$pgt_fill_loop << 158 nop << 159 << 160 /* Load the return address...er...cras << 161 copy %r0,%r2 << 162 << 163 /* And the RFI Target address too */ << 164 load32 start_parisc,%r11 << 165 << 166 /* And the initial task pointer */ << 167 load32 init_task,%r6 << 168 mtctl %r6,%cr30 << 169 << 170 /* And the stack pointer too */ << 171 load32 init_stack,%sp << 172 tophys_r1 %sp << 173 #if defined(CONFIG_64BIT) && defined(CONFIG_FU << 174 .import _mcount,data << 175 /* initialize mcount FPTR */ << 176 /* Get the global data pointer */ << 177 loadgp << 178 load32 PA(_mcount), %r10 << 179 std %dp,0x18(%r10) << 180 #endif << 181 << 182 #define MEM_PDC_LO 0x388 << 183 #define MEM_PDC_HI 0x35C << 184 #ifdef CONFIG_64BIT << 185 /* Get PDCE_PROC for monarch CPU. */ << 186 ldw MEM_PDC_LO(%r0),%r3 << 187 ldw MEM_PDC_HI(%r0),%r10 << 188 depd %r10, 31, 32, %r3 << 189 #endif << 190 << 191 << 192 #ifdef CONFIG_SMP << 193 /* Set the smp rendezvous address into << 194 ** It would be safer to do this in ini << 195 ** it's just way easier to deal with h << 196 ** of 64-bit function ptrs and the add << 197 */ << 198 load32 PA(smp_slave_stext),%r << 199 stw %r10,0x10(%r0) /* MEM << 200 stw %r0,0x28(%r0) /* MEM << 201 << 202 /* FALLTHROUGH */ << 203 .procend << 204 << 205 #ifdef CONFIG_HOTPLUG_CPU << 206 /* common_stext is far away in another << 207 load32 PA(common_stext), %rp << 208 bv,n (%rp) << 209 << 210 /* common_stext and smp_slave_stext ne << 211 .text << 212 #endif << 213 << 214 /* << 215 ** Code Common to both Monarch and Sla << 216 ** Entry: << 217 ** << 218 ** 1.1: << 219 ** %r11 must contain RFI target add << 220 ** %r25/%r26 args to pass to target << 221 ** %r2 in case rfi target decides << 222 ** << 223 ** 2.0w: << 224 ** %r3 PDCE_PROC address << 225 ** %r11 RFI target address << 226 ** << 227 ** Caller must init: SR4-7, %sp, %r10, << 228 */ << 229 common_stext: << 230 .proc << 231 .callinfo << 232 #else << 233 /* Clear PDC entry point - we won't us << 234 stw %r0,0x10(%r0) /* MEM << 235 stw %r0,0x28(%r0) /* MEM << 236 #endif /*CONFIG_SMP*/ << 237 << 238 #ifdef CONFIG_64BIT << 239 mfctl %cr30,%r6 << 240 tophys_r1 %r6 << 241 << 242 /* Save the rfi target address */ << 243 STREG %r11, TASK_PT_GR11(%r << 244 /* Switch to wide mode Superdome doesn << 245 ** calls. << 246 */ << 247 1: mfia %rp /* cle << 248 ldo 2f-1b(%rp),%rp << 249 depdi 0,31,32,%rp << 250 bv (%rp) << 251 ssm PSW_SM_W,%r0 << 252 << 253 /* Set Wide mode as the "Default" (eg << 254 ** First trap occurs *right* after (or << 255 ** Someday, palo might not do this for << 256 */ << 257 2: << 258 << 259 ldo PDC_PSW(%r0),%arg0 << 260 ldo PDC_PSW_SET_DEFAULTS(% << 261 ldo PDC_PSW_WIDE_BIT(%r0), << 262 load32 PA(stext_pdc_ret), %rp << 263 bv (%r3) << 264 copy %r0,%arg3 << 265 << 266 stext_pdc_ret: << 267 LDREG TASK_PT_GR11(%r6), %r1 << 268 tovirt_r1 %r6 << 269 mtctl %r6,%cr30 << 270 #endif << 271 << 272 #ifndef CONFIG_64BIT << 273 /* clear all BTLBs */ << 274 ldi PDC_BLOCK_TLB,%arg0 << 275 load32 PA(stext_pdc_btlb_ret) << 276 ldw MEM_PDC_LO(%r0),%r3 << 277 bv (%r3) << 278 ldi PDC_BTLB_PURGE_ALL,%ar << 279 stext_pdc_btlb_ret: << 280 #endif << 281 << 282 /* PARANOID: clear user scratch/user s << 283 mtsp %r0,%sr0 << 284 mtsp %r0,%sr1 << 285 mtsp %r0,%sr2 << 286 mtsp %r0,%sr3 << 287 << 288 /* Initialize Protection Registers */ << 289 mtctl %r0,%cr8 << 290 mtctl %r0,%cr9 << 291 mtctl %r0,%cr12 << 292 mtctl %r0,%cr13 << 293 << 294 /* Initialize the global data pointer << 295 loadgp << 296 << 297 /* Set up our interrupt table. HPMCs << 298 * << 299 * We need to install the correct iva << 300 * following short sequence of instruc << 301 * (without being illegal on a PA1.1 m << 302 */ << 303 #ifndef CONFIG_64BIT << 304 ldi 32,%r10 << 305 mtctl %r10,%cr11 << 306 .level 2.0 << 307 mfctl,w %cr11,%r10 << 308 .level 1.1 << 309 comib,<>,n 0,%r10,$is_pa20 << 310 ldil L%PA(fault_vector_11), << 311 b $install_iva << 312 ldo R%PA(fault_vector_11)( << 313 << 314 $is_pa20: << 315 .level PA_ASM_LEVEL /* restor << 316 #endif /*!CONFIG_64BIT*/ << 317 load32 PA(fault_vector_20),%r << 318 << 319 $install_iva: << 320 mtctl %r10,%cr14 << 321 << 322 b aligned_rfi /* Prepar << 323 nop << 324 << 325 .align 128 << 326 aligned_rfi: << 327 pcxt_ssm_bug << 328 << 329 copy %r3, %arg0 /* PDC << 330 << 331 rsm PSW_SM_QUIET,%r0 << 332 /* Don't need NOPs, have 8 compliant i << 333 << 334 mtctl %r0,%cr17 /* Cle << 335 mtctl %r0,%cr17 /* Cle << 336 << 337 /* Load RFI target into PC queue */ << 338 mtctl %r11,%cr18 /* IIA << 339 ldo 4(%r11),%r11 << 340 mtctl %r11,%cr18 /* IIA << 341 << 342 load32 KERNEL_PSW,%r10 << 343 mtctl %r10,%ipsw << 344 << 345 tovirt_r1 %sp << 346 << 347 /* Jump through hyperspace to Virt Mod << 348 rfi << 349 nop << 350 15 351 .procend !! 16 __HEAD >> 17 .globl _stext >> 18 .set noreorder >> 19 .globl __start >> 20 .ent __start >> 21 _stext: >> 22 __start: >> 23 .prologue 0 >> 24 br $27,1f >> 25 1: ldgp $29,0($27) >> 26 /* We need to get current_task_info loaded up... */ >> 27 lda $8,init_thread_union >> 28 /* ... and find our stack ... */ >> 29 lda $30,0x4000 - SIZEOF_PT_REGS($8) >> 30 /* ... and then we can start the kernel. */ >> 31 jsr $26,start_kernel >> 32 call_pal PAL_halt >> 33 .end __start 352 34 353 #ifdef CONFIG_SMP 35 #ifdef CONFIG_SMP >> 36 .align 3 >> 37 .globl __smp_callin >> 38 .ent __smp_callin >> 39 /* On entry here from SRM console, the HWPCB of the per-cpu >> 40 slot for this processor has been loaded. We've arranged >> 41 for the UNIQUE value for this process to contain the PCBB >> 42 of the target idle task. */ >> 43 __smp_callin: >> 44 .prologue 1 >> 45 ldgp $29,0($27) # First order of business, load the GP. >> 46 >> 47 call_pal PAL_rduniq # Grab the target PCBB. >> 48 mov $0,$16 # Install it. >> 49 call_pal PAL_swpctx 354 50 355 .import smp_init_current_idle_task,dat !! 51 lda $8,0x3fff # Find "current". 356 .import smp_callin,code !! 52 bic $30,$8,$8 357 << 358 #ifndef CONFIG_64BIT << 359 smp_callin_rtn: << 360 .proc << 361 .callinfo << 362 break 1,1 /* Break if r << 363 nop << 364 nop << 365 .procend << 366 #endif /*!CONFIG_64BIT*/ << 367 << 368 /********************************************* << 369 * smp_slave_stext is executed by all non-monar << 370 * pokes the slave CPUs in smp.c:smp_boot_cpus( << 371 * << 372 * Once here, registers values are initialized << 373 * mode. Once all available/eligible CPUs are i << 374 * released and start out by executing their ow << 375 ********************************************** << 376 smp_slave_stext: << 377 .proc << 378 .callinfo << 379 << 380 /* << 381 ** Initialize Space registers << 382 */ << 383 mtsp %r0,%sr4 << 384 mtsp %r0,%sr5 << 385 mtsp %r0,%sr6 << 386 mtsp %r0,%sr7 << 387 << 388 #ifdef CONFIG_64BIT << 389 /* << 390 * Enable Wide mode early, in case th << 391 * task in smp_init_current_idle_task << 392 */ << 393 1: mfia %rp /* cle << 394 ldo 2f-1b(%rp),%rp << 395 depdi 0,31,32,%rp << 396 bv (%rp) << 397 ssm PSW_SM_W,%r0 << 398 2: << 399 #endif << 400 << 401 /* Initialize the SP - monarch sets u << 402 load32 PA(smp_init_current_id << 403 LDREG 0(%r6),%r6 << 404 mtctl %r6,%cr30 << 405 tophys_r1 %r6 << 406 LDREG TASK_STACK(%r6),%sp << 407 tophys_r1 %sp << 408 ldo FRAME_SIZE(%sp),%sp << 409 << 410 /* point CPU to kernel page tables */ << 411 load32 PA(swapper_pg_dir),%r4 << 412 mtctl %r4,%cr24 /* Ini << 413 mtctl %r4,%cr25 /* Ini << 414 << 415 #ifdef CONFIG_64BIT << 416 /* Setup PDCE_PROC entry */ << 417 copy %arg0,%r3 << 418 #else << 419 /* Load RFI *return* address in case s << 420 load32 smp_callin_rtn,%r2 << 421 #endif << 422 53 423 /* Load RFI target address. */ !! 54 jsr $26,smp_callin 424 load32 smp_callin,%r11 !! 55 call_pal PAL_halt 425 !! 56 .end __smp_callin 426 /* ok...common code can handle the res << 427 b common_stext << 428 nop << 429 << 430 .procend << 431 #endif /* CONFIG_SMP */ 57 #endif /* CONFIG_SMP */ 432 58 433 #ifndef CONFIG_64BIT !! 59 # 434 .section .data..ro_after_init !! 60 # The following two functions are needed for supporting SRM PALcode 435 !! 61 # on the PC164 (at least), since that PALcode manages the interrupt 436 .align 4 !! 62 # masking, and we cannot duplicate the effort without causing problems 437 .export $global$,data !! 63 # 438 !! 64 439 .type $global$,@object !! 65 .align 3 440 .size $global$,4 !! 66 .globl cserve_ena 441 $global$: !! 67 .ent cserve_ena 442 .word 0 !! 68 cserve_ena: 443 #endif /*!CONFIG_64BIT*/ !! 69 .prologue 0 >> 70 bis $16,$16,$17 >> 71 lda $16,52($31) >> 72 call_pal PAL_cserve >> 73 ret ($26) >> 74 .end cserve_ena >> 75 >> 76 .align 3 >> 77 .globl cserve_dis >> 78 .ent cserve_dis >> 79 cserve_dis: >> 80 .prologue 0 >> 81 bis $16,$16,$17 >> 82 lda $16,53($31) >> 83 call_pal PAL_cserve >> 84 ret ($26) >> 85 .end cserve_dis >> 86 >> 87 # >> 88 # It is handy, on occasion, to make halt actually just loop. >> 89 # Putting it here means we dont have to recompile the whole >> 90 # kernel. >> 91 # >> 92 >> 93 .align 3 >> 94 .globl halt >> 95 .ent halt >> 96 halt: >> 97 .prologue 0 >> 98 call_pal PAL_halt >> 99 .end halt
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.