1 /* SPDX-License-Identifier: GPL-2.0-or-later * !! 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * OpenRISC head.S !! 3 * arch/alpha/kernel/head.S 4 * 4 * 5 * Linux architectural port borrowing liberall !! 5 * initial boot stuff.. At this point, the bootloader has already 6 * others. All original copyrights apply as p !! 6 * switched into OSF/1 PAL-code, and loaded us at the correct address 7 * declaration. !! 7 * (START_ADDR). So there isn't much left for us to do: just set up 8 * !! 8 * the kernel global pointer and jump to the kernel entry-point. 9 * Modifications for the OpenRISC architecture << 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@ << 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@so << 12 */ 9 */ 13 10 14 #include <linux/linkage.h> << 15 #include <linux/threads.h> << 16 #include <linux/errno.h> << 17 #include <linux/init.h> 11 #include <linux/init.h> 18 #include <linux/serial_reg.h> << 19 #include <linux/pgtable.h> << 20 #include <asm/processor.h> << 21 #include <asm/page.h> << 22 #include <asm/mmu.h> << 23 #include <asm/thread_info.h> << 24 #include <asm/cache.h> << 25 #include <asm/spr_defs.h> << 26 #include <asm/asm-offsets.h> 12 #include <asm/asm-offsets.h> 27 #include <linux/of_fdt.h> !! 13 #include <asm/pal.h> 28 !! 14 #include <asm/setup.h> 29 #define tophys(rd,rs) << 30 l.movhi rd,hi(-KERNELBASE) << 31 l.add rd,rd,rs << 32 << 33 #define CLEAR_GPR(gpr) << 34 l.movhi gpr,0x0 << 35 << 36 #define LOAD_SYMBOL_2_GPR(gpr,symbol) << 37 l.movhi gpr,hi(symbol) << 38 l.ori gpr,gpr,lo(symbol) << 39 << 40 << 41 #define UART_BASE_ADD 0x90000000 << 42 << 43 #define EXCEPTION_SR (SPR_SR_DME | SPR_SR_IME << 44 #define SYSCALL_SR (SPR_SR_DME | SPR_SR_IME | << 45 << 46 /* =========================================== << 47 << 48 #define SPR_SHADOW_GPR(x) ((x) + SPR_GPR << 49 << 50 /* << 51 * emergency_print temporary stores << 52 */ << 53 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS << 54 #define EMERGENCY_PRINT_STORE_GPR4 l.mtsp << 55 #define EMERGENCY_PRINT_LOAD_GPR4 l.mfsp << 56 << 57 #define EMERGENCY_PRINT_STORE_GPR5 l.mtsp << 58 #define EMERGENCY_PRINT_LOAD_GPR5 l.mfsp << 59 << 60 #define EMERGENCY_PRINT_STORE_GPR6 l.mtsp << 61 #define EMERGENCY_PRINT_LOAD_GPR6 l.mfsp << 62 << 63 #define EMERGENCY_PRINT_STORE_GPR7 l.mtsp << 64 #define EMERGENCY_PRINT_LOAD_GPR7 l.mfsp << 65 << 66 #define EMERGENCY_PRINT_STORE_GPR8 l.mtsp << 67 #define EMERGENCY_PRINT_LOAD_GPR8 l.mfsp << 68 << 69 #define EMERGENCY_PRINT_STORE_GPR9 l.mtsp << 70 #define EMERGENCY_PRINT_LOAD_GPR9 l.mfsp << 71 << 72 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ << 73 #define EMERGENCY_PRINT_STORE_GPR4 l.sw << 74 #define EMERGENCY_PRINT_LOAD_GPR4 l.lwz << 75 << 76 #define EMERGENCY_PRINT_STORE_GPR5 l.sw << 77 #define EMERGENCY_PRINT_LOAD_GPR5 l.lwz << 78 << 79 #define EMERGENCY_PRINT_STORE_GPR6 l.sw << 80 #define EMERGENCY_PRINT_LOAD_GPR6 l.lwz << 81 << 82 #define EMERGENCY_PRINT_STORE_GPR7 l.sw << 83 #define EMERGENCY_PRINT_LOAD_GPR7 l.lwz << 84 << 85 #define EMERGENCY_PRINT_STORE_GPR8 l.sw << 86 #define EMERGENCY_PRINT_LOAD_GPR8 l.lwz << 87 << 88 #define EMERGENCY_PRINT_STORE_GPR9 l.sw << 89 #define EMERGENCY_PRINT_LOAD_GPR9 l.lwz << 90 << 91 #endif << 92 << 93 /* << 94 * TLB miss handlers temorary stores << 95 */ << 96 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS << 97 #define EXCEPTION_STORE_GPR2 l.mtsp << 98 #define EXCEPTION_LOAD_GPR2 l.mfsp << 99 << 100 #define EXCEPTION_STORE_GPR3 l.mtsp << 101 #define EXCEPTION_LOAD_GPR3 l.mfsp << 102 << 103 #define EXCEPTION_STORE_GPR4 l.mtsp << 104 #define EXCEPTION_LOAD_GPR4 l.mfsp << 105 << 106 #define EXCEPTION_STORE_GPR5 l.mtsp << 107 #define EXCEPTION_LOAD_GPR5 l.mfsp << 108 << 109 #define EXCEPTION_STORE_GPR6 l.mtsp << 110 #define EXCEPTION_LOAD_GPR6 l.mfsp << 111 << 112 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ << 113 #define EXCEPTION_STORE_GPR2 l.sw << 114 #define EXCEPTION_LOAD_GPR2 l.lwz << 115 << 116 #define EXCEPTION_STORE_GPR3 l.sw << 117 #define EXCEPTION_LOAD_GPR3 l.lwz << 118 << 119 #define EXCEPTION_STORE_GPR4 l.sw << 120 #define EXCEPTION_LOAD_GPR4 l.lwz << 121 << 122 #define EXCEPTION_STORE_GPR5 l.sw << 123 #define EXCEPTION_LOAD_GPR5 l.lwz << 124 << 125 #define EXCEPTION_STORE_GPR6 l.sw << 126 #define EXCEPTION_LOAD_GPR6 l.lwz << 127 << 128 #endif << 129 15 130 /* !! 16 __HEAD 131 * EXCEPTION_HANDLE temporary stores !! 17 .globl _stext 132 */ !! 18 .set noreorder 133 !! 19 .globl __start 134 #ifdef CONFIG_OPENRISC_HAVE_SHADOW_GPRS !! 20 .ent __start 135 #define EXCEPTION_T_STORE_GPR30 l.mtsp !! 21 _stext: 136 #define EXCEPTION_T_LOAD_GPR30(reg) l.mfsp !! 22 __start: 137 !! 23 .prologue 0 138 #define EXCEPTION_T_STORE_GPR10 l.mtsp !! 24 br $27,1f 139 #define EXCEPTION_T_LOAD_GPR10(reg) l.mfsp !! 25 1: ldgp $29,0($27) 140 !! 26 /* We need to get current_task_info loaded up... */ 141 #define EXCEPTION_T_STORE_SP l.mtsp !! 27 lda $8,init_thread_union 142 #define EXCEPTION_T_LOAD_SP(reg) l.mfsp !! 28 /* ... and find our stack ... */ 143 !! 29 lda $30,0x4000 - SIZEOF_PT_REGS($8) 144 #else /* !CONFIG_OPENRISC_HAVE_SHADOW_GPRS */ !! 30 /* ... and then we can start the kernel. */ 145 #define EXCEPTION_T_STORE_GPR30 l.sw !! 31 jsr $26,start_kernel 146 #define EXCEPTION_T_LOAD_GPR30(reg) l.lwz !! 32 call_pal PAL_halt 147 !! 33 .end __start 148 #define EXCEPTION_T_STORE_GPR10 l.sw << 149 #define EXCEPTION_T_LOAD_GPR10(reg) l.lwz << 150 << 151 #define EXCEPTION_T_STORE_SP l.sw << 152 #define EXCEPTION_T_LOAD_SP(reg) l.lwz << 153 #endif << 154 << 155 /* =========================================== << 156 34 157 #ifdef CONFIG_SMP 35 #ifdef CONFIG_SMP 158 #define GET_CURRENT_PGD(reg,t1) !! 36 .align 3 159 LOAD_SYMBOL_2_GPR(reg,current_pgd) !! 37 .globl __smp_callin 160 l.mfspr t1,r0,SPR_COREID !! 38 .ent __smp_callin 161 l.slli t1,t1,2 !! 39 /* On entry here from SRM console, the HWPCB of the per-cpu 162 l.add reg,reg,t1 !! 40 slot for this processor has been loaded. We've arranged 163 tophys (t1,reg) !! 41 for the UNIQUE value for this process to contain the PCBB 164 l.lwz reg,0(t1) !! 42 of the target idle task. */ 165 #else !! 43 __smp_callin: 166 #define GET_CURRENT_PGD(reg,t1) !! 44 .prologue 1 167 LOAD_SYMBOL_2_GPR(reg,current_pgd) !! 45 ldgp $29,0($27) # First order of business, load the GP. 168 tophys (t1,reg) !! 46 169 l.lwz reg,0(t1) !! 47 call_pal PAL_rduniq # Grab the target PCBB. 170 #endif !! 48 mov $0,$16 # Install it. 171 !! 49 call_pal PAL_swpctx 172 /* Load r10 from current_thread_info_set - clo !! 50 173 #ifdef CONFIG_SMP !! 51 lda $8,0x3fff # Find "current". 174 #define GET_CURRENT_THREAD_INFO !! 52 bic $30,$8,$8 175 LOAD_SYMBOL_2_GPR(r1,current_thread_in !! 53 176 tophys (r30,r1) !! 54 jsr $26,smp_callin 177 l.mfspr r10,r0,SPR_COREID !! 55 call_pal PAL_halt 178 l.slli r10,r10,2 !! 56 .end __smp_callin 179 l.add r30,r30,r10 !! 57 #endif /* CONFIG_SMP */ 180 /* r10: current_thread_info */ !! 58 181 l.lwz r10,0(r30) !! 59 # 182 #else !! 60 # The following two functions are needed for supporting SRM PALcode 183 #define GET_CURRENT_THREAD_INFO !! 61 # on the PC164 (at least), since that PALcode manages the interrupt 184 LOAD_SYMBOL_2_GPR(r1,current_thread_in !! 62 # masking, and we cannot duplicate the effort without causing problems 185 tophys (r30,r1) !! 63 # 186 /* r10: current_thread_info */ !! 64 187 l.lwz r10,0(r30) !! 65 .align 3 188 #endif !! 66 .globl cserve_ena 189 !! 67 .ent cserve_ena 190 /* !! 68 cserve_ena: 191 * DSCR: this is a common hook for handling ex !! 69 .prologue 0 192 * the needed registers, set up stack an !! 70 bis $16,$16,$17 193 * then jump to the handler while enabli !! 71 lda $16,52($31) 194 * !! 72 call_pal PAL_cserve 195 * PRMS: handler - a function to jump t !! 73 ret ($26) 196 * remaining registers to !! 74 .end cserve_ena 197 * appropriate arch-indep !! 75 198 * and finaly jump to ret !! 76 .align 3 199 * !! 77 .globl cserve_dis 200 * PREQ: unchanged state from the time excepti !! 78 .ent cserve_dis 201 * !! 79 cserve_dis: 202 * POST: SAVED the following registers origina !! 80 .prologue 0 203 * to the new created exception fr !! 81 bis $16,$16,$17 204 * !! 82 lda $16,53($31) 205 * r1 - ksp pointing to the new (e !! 83 call_pal PAL_cserve 206 * r4 - EEAR exception EA !! 84 ret ($26) 207 * r10 - current pointing to current_th !! 85 .end cserve_dis 208 * r12 - syscall 0, since we didn't com !! 86 209 * r30 - handler address of the handler !! 87 # 210 * !! 88 # It is handy, on occasion, to make halt actually just loop. 211 * handler has to save remaining registe !! 89 # Putting it here means we dont have to recompile the whole 212 * ksp frame *before* tainting them! !! 90 # kernel. 213 * !! 91 # 214 * NOTE: this function is not reentrant per se !! 92 215 * by processor disabling all exceptions !! 93 .align 3 216 * accours. !! 94 .globl halt 217 * !! 95 .ent halt 218 * OPTM: no need to make it so wasteful to ext !! 96 halt: 219 */ !! 97 .prologue 0 220 !! 98 call_pal PAL_halt 221 #define EXCEPTION_HANDLE(handler) !! 99 .end halt 222 EXCEPTION_T_STORE_GPR30 << 223 l.mfspr r30,r0,SPR_ESR_BASE << 224 l.andi r30,r30,SPR_SR_SM << 225 l.sfeqi r30,0 << 226 EXCEPTION_T_STORE_GPR10 << 227 l.bnf 2f << 228 EXCEPTION_T_STORE_SP << 229 1: /* user_mode: */ << 230 GET_CURRENT_THREAD_INFO << 231 tophys (r30,r10) << 232 l.lwz r1,(TI_KSP)(r30) << 233 /* fall through */ << 234 2: /* kernel_mode: */ << 235 /* create new stack frame, save only n << 236 /* r1: KSP, r10: current, r4: EEAR, r3 << 237 /* r12: temp, syscall indicator */ << 238 l.addi r1,r1,-(INT_FRAME_SIZE) << 239 /* r1 is KSP, r30 is __pa(KSP) */ << 240 tophys (r30,r1) << 241 l.sw PT_GPR12(r30),r12 << 242 /* r4 use for tmp before EA */ << 243 l.mfspr r12,r0,SPR_EPCR_BASE << 244 l.sw PT_PC(r30),r12 << 245 l.mfspr r12,r0,SPR_ESR_BASE << 246 l.sw PT_SR(r30),r12 << 247 /* save r30 */ << 248 EXCEPTION_T_LOAD_GPR30(r12) << 249 l.sw PT_GPR30(r30),r12 << 250 /* save r10 as was prior to exception << 251 EXCEPTION_T_LOAD_GPR10(r12) << 252 l.sw PT_GPR10(r30),r12 << 253 /* save PT_SP as was prior to exceptio << 254 EXCEPTION_T_LOAD_SP(r12) << 255 l.sw PT_SP(r30),r12 << 256 /* save exception r4, set r4 = EA */ << 257 l.sw PT_GPR4(r30),r4 << 258 l.mfspr r4,r0,SPR_EEAR_BASE << 259 /* r12 == 1 if we come from syscall */ << 260 CLEAR_GPR(r12) << 261 /* ----- turn on MMU ----- */ << 262 /* Carry DSX into exception SR */ << 263 l.mfspr r30,r0,SPR_SR << 264 l.andi r30,r30,SPR_SR_DSX << 265 l.ori r30,r30,(EXCEPTION_SR) << 266 l.mtspr r0,r30,SPR_ESR_BASE << 267 /* r30: EA address of handler */ << 268 LOAD_SYMBOL_2_GPR(r30,handler) << 269 l.mtspr r0,r30,SPR_EPCR_BASE << 270 l.rfe << 271 << 272 /* << 273 * this doesn't work << 274 * << 275 * << 276 * #ifdef CONFIG_JUMP_UPON_UNHANDLED_EXCEPTION << 277 * #define UNHANDLED_EXCEPTION(handler) << 278 * l.ori r3,r0,0x1 << 279 * l.mtspr r0,r3,SPR_SR << 280 * l.movhi r3,hi(0xf0000100) << 281 * l.ori r3,r3,lo(0xf0000100) << 282 * l.jr r3 << 283 * l.nop 1 << 284 * << 285 * #endif << 286 */ << 287 << 288 /* DSCR: this is the same as EXCEPTION_HANDLE( << 289 * a bit more carefull (if we have a PT_ << 290 * corruption) and set them up from 'cur << 291 * << 292 */ << 293 #define UNHANDLED_EXCEPTION(handler) << 294 EXCEPTION_T_STORE_GPR30 << 295 EXCEPTION_T_STORE_GPR10 << 296 EXCEPTION_T_STORE_SP << 297 /* temporary store r3, r9 into r1, r10 << 298 l.addi r1,r3,0x0 << 299 l.addi r10,r9,0x0 << 300 LOAD_SYMBOL_2_GPR(r9,_string_unhandled << 301 tophys (r3,r9) << 302 l.jal _emergency_print << 303 l.nop << 304 l.mfspr r3,r0,SPR_NPC << 305 l.jal _emergency_print_nr << 306 l.andi r3,r3,0x1f00 << 307 LOAD_SYMBOL_2_GPR(r9,_string_epc_prefi << 308 tophys (r3,r9) << 309 l.jal _emergency_print << 310 l.nop << 311 l.jal _emergency_print_nr << 312 l.mfspr r3,r0,SPR_EPCR_BASE << 313 LOAD_SYMBOL_2_GPR(r9,_string_nl) << 314 tophys (r3,r9) << 315 l.jal _emergency_print << 316 l.nop << 317 /* end of printing */ << 318 l.addi r3,r1,0x0 << 319 l.addi r9,r10,0x0 << 320 /* extract current, ksp from current_s << 321 LOAD_SYMBOL_2_GPR(r1,_unhandled_stack_ << 322 LOAD_SYMBOL_2_GPR(r10,init_thread_unio << 323 /* create new stack frame, save only n << 324 /* r1: KSP, r10: current, r31: __pa(KS << 325 /* r12: temp, syscall indicator, r13 t << 326 l.addi r1,r1,-(INT_FRAME_SIZE) << 327 /* r1 is KSP, r30 is __pa(KSP) */ << 328 tophys (r30,r1) << 329 l.sw PT_GPR12(r30),r12 << 330 l.mfspr r12,r0,SPR_EPCR_BASE << 331 l.sw PT_PC(r30),r12 << 332 l.mfspr r12,r0,SPR_ESR_BASE << 333 l.sw PT_SR(r30),r12 << 334 /* save r31 */ << 335 EXCEPTION_T_LOAD_GPR30(r12) << 336 l.sw PT_GPR30(r30),r12 << 337 /* save r10 as was prior to exception << 338 EXCEPTION_T_LOAD_GPR10(r12) << 339 l.sw PT_GPR10(r30),r12 << 340 /* save PT_SP as was prior to exceptio << 341 EXCEPTION_T_LOAD_SP(r12) << 342 l.sw PT_SP(r30),r12 << 343 l.sw PT_GPR13(r30),r13 << 344 /* --> */ << 345 /* save exception r4, set r4 = EA */ << 346 l.sw PT_GPR4(r30),r4 << 347 l.mfspr r4,r0,SPR_EEAR_BASE << 348 /* r12 == 1 if we come from syscall */ << 349 CLEAR_GPR(r12) << 350 /* ----- play a MMU trick ----- */ << 351 l.ori r30,r0,(EXCEPTION_SR) << 352 l.mtspr r0,r30,SPR_ESR_BASE << 353 /* r31: EA address of handler */ << 354 LOAD_SYMBOL_2_GPR(r30,handler) << 355 l.mtspr r0,r30,SPR_EPCR_BASE << 356 l.rfe << 357 << 358 /* =========================================== << 359 << 360 /* ---[ 0x100: RESET exception ]-------------- << 361 .org 0x100 << 362 /* Jump to .init code at _start which << 363 * and will be discarded after boot. << 364 */ << 365 LOAD_SYMBOL_2_GPR(r15, _start) << 366 tophys (r13,r15) << 367 l.jr r13 << 368 l.nop << 369 << 370 /* ---[ 0x200: BUS exception ]---------------- << 371 .org 0x200 << 372 _dispatch_bus_fault: << 373 EXCEPTION_HANDLE(_bus_fault_handler) << 374 << 375 /* ---[ 0x300: Data Page Fault exception ]---- << 376 .org 0x300 << 377 _dispatch_do_dpage_fault: << 378 // totaly disable timer interrupt << 379 // l.mtspr r0,r0,SPR_TTMR << 380 // DEBUG_TLB_PROBE(0x300) << 381 // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x300 << 382 EXCEPTION_HANDLE(_data_page_fault_hand << 383 << 384 /* ---[ 0x400: Insn Page Fault exception ]---- << 385 .org 0x400 << 386 _dispatch_do_ipage_fault: << 387 // totaly disable timer interrupt << 388 // l.mtspr r0,r0,SPR_TTMR << 389 // DEBUG_TLB_PROBE(0x400) << 390 // EXCEPTION_DEBUG_VALUE_ER_ENABLED(0x400 << 391 EXCEPTION_HANDLE(_insn_page_fault_hand << 392 << 393 /* ---[ 0x500: Timer exception ]-------------- << 394 .org 0x500 << 395 EXCEPTION_HANDLE(_timer_handler) << 396 << 397 /* ---[ 0x600: Alignment exception ]---------- << 398 .org 0x600 << 399 EXCEPTION_HANDLE(_alignment_handler) << 400 << 401 /* ---[ 0x700: Illegal insn exception ]------- << 402 .org 0x700 << 403 EXCEPTION_HANDLE(_illegal_instruction_ << 404 << 405 /* ---[ 0x800: External interrupt exception ]- << 406 .org 0x800 << 407 EXCEPTION_HANDLE(_external_irq_handler << 408 << 409 /* ---[ 0x900: DTLB miss exception ]---------- << 410 .org 0x900 << 411 l.j boot_dtlb_miss_handler << 412 l.nop << 413 << 414 /* ---[ 0xa00: ITLB miss exception ]---------- << 415 .org 0xa00 << 416 l.j boot_itlb_miss_handler << 417 l.nop << 418 << 419 /* ---[ 0xb00: Range exception ]-------------- << 420 .org 0xb00 << 421 UNHANDLED_EXCEPTION(_vector_0xb00) << 422 << 423 /* ---[ 0xc00: Syscall exception ]------------ << 424 .org 0xc00 << 425 EXCEPTION_HANDLE(_sys_call_handler) << 426 << 427 /* ---[ 0xd00: Floating point exception ]----- << 428 .org 0xd00 << 429 EXCEPTION_HANDLE(_fpe_trap_handler) << 430 << 431 /* ---[ 0xe00: Trap exception ]--------------- << 432 .org 0xe00 << 433 // UNHANDLED_EXCEPTION(_vector_0xe00) << 434 EXCEPTION_HANDLE(_trap_handler) << 435 << 436 /* ---[ 0xf00: Reserved exception ]----------- << 437 .org 0xf00 << 438 UNHANDLED_EXCEPTION(_vector_0xf00) << 439 << 440 /* ---[ 0x1000: Reserved exception ]---------- << 441 .org 0x1000 << 442 UNHANDLED_EXCEPTION(_vector_0x1000) << 443 << 444 /* ---[ 0x1100: Reserved exception ]---------- << 445 .org 0x1100 << 446 UNHANDLED_EXCEPTION(_vector_0x1100) << 447 << 448 /* ---[ 0x1200: Reserved exception ]---------- << 449 .org 0x1200 << 450 UNHANDLED_EXCEPTION(_vector_0x1200) << 451 << 452 /* ---[ 0x1300: Reserved exception ]---------- << 453 .org 0x1300 << 454 UNHANDLED_EXCEPTION(_vector_0x1300) << 455 << 456 /* ---[ 0x1400: Reserved exception ]---------- << 457 .org 0x1400 << 458 UNHANDLED_EXCEPTION(_vector_0x1400) << 459 << 460 /* ---[ 0x1500: Reserved exception ]---------- << 461 .org 0x1500 << 462 UNHANDLED_EXCEPTION(_vector_0x1500) << 463 << 464 /* ---[ 0x1600: Reserved exception ]---------- << 465 .org 0x1600 << 466 UNHANDLED_EXCEPTION(_vector_0x1600) << 467 << 468 /* ---[ 0x1700: Reserved exception ]---------- << 469 .org 0x1700 << 470 UNHANDLED_EXCEPTION(_vector_0x1700) << 471 << 472 /* ---[ 0x1800: Reserved exception ]---------- << 473 .org 0x1800 << 474 UNHANDLED_EXCEPTION(_vector_0x1800) << 475 << 476 /* ---[ 0x1900: Reserved exception ]---------- << 477 .org 0x1900 << 478 UNHANDLED_EXCEPTION(_vector_0x1900) << 479 << 480 /* ---[ 0x1a00: Reserved exception ]---------- << 481 .org 0x1a00 << 482 UNHANDLED_EXCEPTION(_vector_0x1a00) << 483 << 484 /* ---[ 0x1b00: Reserved exception ]---------- << 485 .org 0x1b00 << 486 UNHANDLED_EXCEPTION(_vector_0x1b00) << 487 << 488 /* ---[ 0x1c00: Reserved exception ]---------- << 489 .org 0x1c00 << 490 UNHANDLED_EXCEPTION(_vector_0x1c00) << 491 << 492 /* ---[ 0x1d00: Reserved exception ]---------- << 493 .org 0x1d00 << 494 UNHANDLED_EXCEPTION(_vector_0x1d00) << 495 << 496 /* ---[ 0x1e00: Reserved exception ]---------- << 497 .org 0x1e00 << 498 UNHANDLED_EXCEPTION(_vector_0x1e00) << 499 << 500 /* ---[ 0x1f00: Reserved exception ]---------- << 501 .org 0x1f00 << 502 UNHANDLED_EXCEPTION(_vector_0x1f00) << 503 << 504 .org 0x2000 << 505 /* =========================================== << 506 << 507 /* .text*/ << 508 << 509 /* This early stuff belongs in HEAD, but some << 510 * don't... */ << 511 << 512 __HEAD << 513 .global _start << 514 _start: << 515 /* Init r0 to zero as per spec */ << 516 CLEAR_GPR(r0) << 517 << 518 /* save kernel parameters */ << 519 l.or r25,r0,r3 /* pointer to << 520 << 521 /* << 522 * ensure a deterministic start << 523 */ << 524 << 525 l.ori r3,r0,0x1 << 526 l.mtspr r0,r3,SPR_SR << 527 << 528 /* << 529 * Start the TTCR as early as possible << 530 * measurements of boot time from the << 531 * important is that the TTCR does not << 532 * random_init(). << 533 */ << 534 l.movhi r3,hi(SPR_TTMR_CR) << 535 l.mtspr r0,r3,SPR_TTMR << 536 << 537 CLEAR_GPR(r1) << 538 CLEAR_GPR(r2) << 539 CLEAR_GPR(r3) << 540 CLEAR_GPR(r4) << 541 CLEAR_GPR(r5) << 542 CLEAR_GPR(r6) << 543 CLEAR_GPR(r7) << 544 CLEAR_GPR(r8) << 545 CLEAR_GPR(r9) << 546 CLEAR_GPR(r10) << 547 CLEAR_GPR(r11) << 548 CLEAR_GPR(r12) << 549 CLEAR_GPR(r13) << 550 CLEAR_GPR(r14) << 551 CLEAR_GPR(r15) << 552 CLEAR_GPR(r16) << 553 CLEAR_GPR(r17) << 554 CLEAR_GPR(r18) << 555 CLEAR_GPR(r19) << 556 CLEAR_GPR(r20) << 557 CLEAR_GPR(r21) << 558 CLEAR_GPR(r22) << 559 CLEAR_GPR(r23) << 560 CLEAR_GPR(r24) << 561 CLEAR_GPR(r26) << 562 CLEAR_GPR(r27) << 563 CLEAR_GPR(r28) << 564 CLEAR_GPR(r29) << 565 CLEAR_GPR(r30) << 566 CLEAR_GPR(r31) << 567 << 568 #ifdef CONFIG_SMP << 569 l.mfspr r26,r0,SPR_COREID << 570 l.sfeq r26,r0 << 571 l.bnf secondary_wait << 572 l.nop << 573 #endif << 574 /* << 575 * set up initial ksp and current << 576 */ << 577 /* setup kernel stack */ << 578 LOAD_SYMBOL_2_GPR(r1,init_thread_union << 579 LOAD_SYMBOL_2_GPR(r10,init_thread_unio << 580 tophys (r31,r10) << 581 l.sw TI_KSP(r31), r1 << 582 << 583 l.ori r4,r0,0x0 << 584 << 585 << 586 /* << 587 * .data contains initialized data, << 588 * .bss contains uninitialized data - << 589 */ << 590 clear_bss: << 591 LOAD_SYMBOL_2_GPR(r24, __bss_start) << 592 LOAD_SYMBOL_2_GPR(r26, _end) << 593 tophys(r28,r24) << 594 tophys(r30,r26) << 595 CLEAR_GPR(r24) << 596 CLEAR_GPR(r26) << 597 1: << 598 l.sw (0)(r28),r0 << 599 l.sfltu r28,r30 << 600 l.bf 1b << 601 l.addi r28,r28,4 << 602 << 603 enable_ic: << 604 l.jal _ic_enable << 605 l.nop << 606 << 607 enable_dc: << 608 l.jal _dc_enable << 609 l.nop << 610 << 611 flush_tlb: << 612 l.jal _flush_tlb << 613 l.nop << 614 << 615 /* The MMU needs to be enabled before or1k_ear << 616 << 617 enable_mmu: << 618 /* << 619 * enable dmmu & immu << 620 * SR[5] = 0, SR[6] = 0, 6th and 7th b << 621 */ << 622 l.mfspr r30,r0,SPR_SR << 623 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME << 624 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR << 625 l.or r30,r30,r28 << 626 l.mtspr r0,r30,SPR_SR << 627 l.nop << 628 l.nop << 629 l.nop << 630 l.nop << 631 l.nop << 632 l.nop << 633 l.nop << 634 l.nop << 635 l.nop << 636 l.nop << 637 l.nop << 638 l.nop << 639 l.nop << 640 l.nop << 641 l.nop << 642 l.nop << 643 << 644 // reset the simulation counters << 645 l.nop 5 << 646 << 647 /* check fdt header magic word */ << 648 l.lwz r3,0(r25) /* load magic << 649 l.movhi r4,hi(OF_DT_HEADER) << 650 l.ori r4,r4,lo(OF_DT_HEADER) << 651 l.sfeq r3,r4 << 652 l.bf _fdt_found << 653 l.nop << 654 /* magic number mismatch, set fdt poin << 655 l.or r25,r0,r0 << 656 _fdt_found: << 657 /* pass fdt pointer to or1k_early_setu << 658 l.or r3,r0,r25 << 659 LOAD_SYMBOL_2_GPR(r24, or1k_early_setu << 660 l.jalr r24 << 661 l.nop << 662 << 663 clear_regs: << 664 /* << 665 * clear all GPRS to increase determin << 666 */ << 667 CLEAR_GPR(r2) << 668 CLEAR_GPR(r3) << 669 CLEAR_GPR(r4) << 670 CLEAR_GPR(r5) << 671 CLEAR_GPR(r6) << 672 CLEAR_GPR(r7) << 673 CLEAR_GPR(r8) << 674 CLEAR_GPR(r9) << 675 CLEAR_GPR(r11) << 676 CLEAR_GPR(r12) << 677 CLEAR_GPR(r13) << 678 CLEAR_GPR(r14) << 679 CLEAR_GPR(r15) << 680 CLEAR_GPR(r16) << 681 CLEAR_GPR(r17) << 682 CLEAR_GPR(r18) << 683 CLEAR_GPR(r19) << 684 CLEAR_GPR(r20) << 685 CLEAR_GPR(r21) << 686 CLEAR_GPR(r22) << 687 CLEAR_GPR(r23) << 688 CLEAR_GPR(r24) << 689 CLEAR_GPR(r25) << 690 CLEAR_GPR(r26) << 691 CLEAR_GPR(r27) << 692 CLEAR_GPR(r28) << 693 CLEAR_GPR(r29) << 694 CLEAR_GPR(r30) << 695 CLEAR_GPR(r31) << 696 << 697 jump_start_kernel: << 698 /* << 699 * jump to kernel entry (start_kernel) << 700 */ << 701 LOAD_SYMBOL_2_GPR(r30, start_kernel) << 702 l.jr r30 << 703 l.nop << 704 << 705 _flush_tlb: << 706 /* << 707 * I N V A L I D A T E T L B e n << 708 */ << 709 LOAD_SYMBOL_2_GPR(r5,SPR_DTLBMR_BASE(0 << 710 LOAD_SYMBOL_2_GPR(r6,SPR_ITLBMR_BASE(0 << 711 l.addi r7,r0,128 /* Maximum number of << 712 1: << 713 l.mtspr r5,r0,0x0 << 714 l.mtspr r6,r0,0x0 << 715 << 716 l.addi r5,r5,1 << 717 l.addi r6,r6,1 << 718 l.sfeq r7,r0 << 719 l.bnf 1b << 720 l.addi r7,r7,-1 << 721 << 722 l.jr r9 << 723 l.nop << 724 << 725 #ifdef CONFIG_SMP << 726 secondary_wait: << 727 /* Doze the cpu until we are asked to << 728 /* If we dont have power management sk << 729 l.mfspr r25,r0,SPR_UPR << 730 l.andi r25,r25,SPR_UPR_PMP << 731 l.sfeq r25,r0 << 732 l.bf secondary_check_release << 733 l.nop << 734 << 735 /* Setup special secondary exception h << 736 LOAD_SYMBOL_2_GPR(r3, _secondary_evbar << 737 tophys(r25,r3) << 738 l.mtspr r0,r25,SPR_EVBAR << 739 << 740 /* Enable Interrupts */ << 741 l.mfspr r25,r0,SPR_SR << 742 l.ori r25,r25,SPR_SR_IEE << 743 l.mtspr r0,r25,SPR_SR << 744 << 745 /* Unmask interrupts interrupts */ << 746 l.mfspr r25,r0,SPR_PICMR << 747 l.ori r25,r25,0xffff << 748 l.mtspr r0,r25,SPR_PICMR << 749 << 750 /* Doze */ << 751 l.mfspr r25,r0,SPR_PMR << 752 LOAD_SYMBOL_2_GPR(r3, SPR_PMR_DME) << 753 l.or r25,r25,r3 << 754 l.mtspr r0,r25,SPR_PMR << 755 << 756 /* Wakeup - Restore exception handler << 757 l.mtspr r0,r0,SPR_EVBAR << 758 << 759 secondary_check_release: << 760 /* << 761 * Check if we actually got the releas << 762 * sleep. << 763 */ << 764 l.mfspr r25,r0,SPR_COREID << 765 LOAD_SYMBOL_2_GPR(r3, secondary_releas << 766 tophys(r4, r3) << 767 l.lwz r3,0(r4) << 768 l.sfeq r25,r3 << 769 l.bnf secondary_wait << 770 l.nop << 771 /* fall through to secondary_init */ << 772 << 773 secondary_init: << 774 /* << 775 * set up initial ksp and current << 776 */ << 777 LOAD_SYMBOL_2_GPR(r10, secondary_threa << 778 tophys (r30,r10) << 779 l.lwz r10,0(r30) << 780 l.addi r1,r10,THREAD_SIZE << 781 tophys (r30,r10) << 782 l.sw TI_KSP(r30),r1 << 783 << 784 l.jal _ic_enable << 785 l.nop << 786 << 787 l.jal _dc_enable << 788 l.nop << 789 << 790 l.jal _flush_tlb << 791 l.nop << 792 << 793 /* << 794 * enable dmmu & immu << 795 */ << 796 l.mfspr r30,r0,SPR_SR << 797 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME << 798 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR << 799 l.or r30,r30,r28 << 800 /* << 801 * This is a bit tricky, we need to sw << 802 * to virtual addresses on the fly. << 803 * To do that, we first set up ESR wit << 804 * Then EPCR is set to secondary_start << 805 * "jump" to that. << 806 */ << 807 l.mtspr r0,r30,SPR_ESR_BASE << 808 LOAD_SYMBOL_2_GPR(r30, secondary_start << 809 l.mtspr r0,r30,SPR_EPCR_BASE << 810 l.rfe << 811 << 812 secondary_start: << 813 LOAD_SYMBOL_2_GPR(r30, secondary_start << 814 l.jr r30 << 815 l.nop << 816 << 817 #endif << 818 << 819 /* ========================================[ c << 820 << 821 /* alignment here so we don't change m << 822 * memory controller defined << 823 */ << 824 .align 0x2000 << 825 << 826 _ic_enable: << 827 /* Check if IC present and skip enabli << 828 l.mfspr r24,r0,SPR_UPR << 829 l.andi r26,r24,SPR_UPR_ICP << 830 l.sfeq r26,r0 << 831 l.bf 9f << 832 l.nop << 833 << 834 /* Disable IC */ << 835 l.mfspr r6,r0,SPR_SR << 836 l.addi r5,r0,-1 << 837 l.xori r5,r5,SPR_SR_ICE << 838 l.and r5,r6,r5 << 839 l.mtspr r0,r5,SPR_SR << 840 << 841 /* Establish cache block size << 842 If BS=0, 16; << 843 If BS=1, 32; << 844 r14 contain block size << 845 */ << 846 l.mfspr r24,r0,SPR_ICCFGR << 847 l.andi r26,r24,SPR_ICCFGR_CBS << 848 l.srli r28,r26,7 << 849 l.ori r30,r0,16 << 850 l.sll r14,r30,r28 << 851 << 852 /* Establish number of cache sets << 853 r16 contains number of cache sets << 854 r28 contains log(# of cache sets) << 855 */ << 856 l.andi r26,r24,SPR_ICCFGR_NCS << 857 l.srli r28,r26,3 << 858 l.ori r30,r0,1 << 859 l.sll r16,r30,r28 << 860 << 861 /* Invalidate IC */ << 862 l.addi r6,r0,0 << 863 l.sll r5,r14,r28 << 864 // l.mul r5,r14,r16 << 865 // l.trap 1 << 866 // l.addi r5,r0,IC_SIZE << 867 1: << 868 l.mtspr r0,r6,SPR_ICBIR << 869 l.sfne r6,r5 << 870 l.bf 1b << 871 l.add r6,r6,r14 << 872 // l.addi r6,r6,IC_LINE << 873 << 874 /* Enable IC */ << 875 l.mfspr r6,r0,SPR_SR << 876 l.ori r6,r6,SPR_SR_ICE << 877 l.mtspr r0,r6,SPR_SR << 878 l.nop << 879 l.nop << 880 l.nop << 881 l.nop << 882 l.nop << 883 l.nop << 884 l.nop << 885 l.nop << 886 l.nop << 887 l.nop << 888 9: << 889 l.jr r9 << 890 l.nop << 891 << 892 _dc_enable: << 893 /* Check if DC present and skip enabli << 894 l.mfspr r24,r0,SPR_UPR << 895 l.andi r26,r24,SPR_UPR_DCP << 896 l.sfeq r26,r0 << 897 l.bf 9f << 898 l.nop << 899 << 900 /* Disable DC */ << 901 l.mfspr r6,r0,SPR_SR << 902 l.addi r5,r0,-1 << 903 l.xori r5,r5,SPR_SR_DCE << 904 l.and r5,r6,r5 << 905 l.mtspr r0,r5,SPR_SR << 906 << 907 /* Establish cache block size << 908 If BS=0, 16; << 909 If BS=1, 32; << 910 r14 contain block size << 911 */ << 912 l.mfspr r24,r0,SPR_DCCFGR << 913 l.andi r26,r24,SPR_DCCFGR_CBS << 914 l.srli r28,r26,7 << 915 l.ori r30,r0,16 << 916 l.sll r14,r30,r28 << 917 << 918 /* Establish number of cache sets << 919 r16 contains number of cache sets << 920 r28 contains log(# of cache sets) << 921 */ << 922 l.andi r26,r24,SPR_DCCFGR_NCS << 923 l.srli r28,r26,3 << 924 l.ori r30,r0,1 << 925 l.sll r16,r30,r28 << 926 << 927 /* Invalidate DC */ << 928 l.addi r6,r0,0 << 929 l.sll r5,r14,r28 << 930 1: << 931 l.mtspr r0,r6,SPR_DCBIR << 932 l.sfne r6,r5 << 933 l.bf 1b << 934 l.add r6,r6,r14 << 935 << 936 /* Enable DC */ << 937 l.mfspr r6,r0,SPR_SR << 938 l.ori r6,r6,SPR_SR_DCE << 939 l.mtspr r0,r6,SPR_SR << 940 9: << 941 l.jr r9 << 942 l.nop << 943 << 944 /* =========================================== << 945 << 946 #define DTLB_UP_CONVERT_MASK 0x3fa << 947 #define ITLB_UP_CONVERT_MASK 0x3a << 948 << 949 /* for SMP we'd have (this is a bit subtle, CC << 950 * for SMP, but since we have _PAGE_PRESENT bi << 951 * we can just modify the mask) << 952 */ << 953 #define DTLB_SMP_CONVERT_MASK 0x3fb << 954 #define ITLB_SMP_CONVERT_MASK 0x3b << 955 << 956 /* ---[ boot dtlb miss handler ]-------------- << 957 << 958 boot_dtlb_miss_handler: << 959 << 960 /* mask for DTLB_MR register: - (0) sets V (va << 961 * - (31-12) sets b << 962 */ << 963 #define DTLB_MR_MASK 0xfffff001 << 964 << 965 /* mask for DTLB_TR register: - (2) sets CI (c << 966 * - (4) sets A (ac << 967 * - (5) sets D (di << 968 * - (8) sets SRE ( << 969 * - (9) sets SWE ( << 970 * - (31-12) sets b << 971 */ << 972 #define DTLB_TR_MASK 0xfffff332 << 973 << 974 /* These are for masking out the VPN/PPN value << 975 * it's not the same as the PFN */ << 976 #define VPN_MASK 0xfffff000 << 977 #define PPN_MASK 0xfffff000 << 978 << 979 << 980 EXCEPTION_STORE_GPR6 << 981 << 982 #if 0 << 983 l.mfspr r6,r0,SPR_ESR_BASE // << 984 l.andi r6,r6,SPR_SR_SM // << 985 l.sfeqi r6,0 // << 986 l.bf exit_with_no_dtranslation // << 987 l.nop << 988 #endif << 989 << 990 /* this could be optimized by moving s << 991 * non r6 registers here, and jumping << 992 * if not in supervisor mode << 993 */ << 994 << 995 EXCEPTION_STORE_GPR2 << 996 EXCEPTION_STORE_GPR3 << 997 EXCEPTION_STORE_GPR4 << 998 EXCEPTION_STORE_GPR5 << 999 << 1000 l.mfspr r4,r0,SPR_EEAR_BASE // << 1001 << 1002 immediate_translation: << 1003 CLEAR_GPR(r6) << 1004 << 1005 l.srli r3,r4,0xd // << 1006 << 1007 l.mfspr r6, r0, SPR_DMMUCFGR << 1008 l.andi r6, r6, SPR_DMMUCFGR_NTS << 1009 l.srli r6, r6, SPR_DMMUCFGR_NTS_OFF << 1010 l.ori r5, r0, 0x1 << 1011 l.sll r5, r5, r6 // r5 = numbe << 1012 l.addi r6, r5, -1 // r6 = nsets << 1013 l.and r2, r3, r6 // r2 <- r3 % << 1014 << 1015 l.or r6,r6,r4 // << 1016 l.ori r6,r6,~(VPN_MASK) // << 1017 l.movhi r5,hi(DTLB_MR_MASK) // << 1018 l.ori r5,r5,lo(DTLB_MR_MASK) // << 1019 l.and r5,r5,r6 // << 1020 l.mtspr r2,r5,SPR_DTLBMR_BASE(0) // << 1021 << 1022 /* set up DTLB with no translation fo << 1023 LOAD_SYMBOL_2_GPR(r6,0xbfffffff) << 1024 l.sfgeu r6,r4 // << 1025 l.bf 1f // << 1026 l.and r3,r4,r4 // << 1027 << 1028 tophys(r3,r4) // << 1029 1: << 1030 l.ori r3,r3,~(PPN_MASK) // << 1031 l.movhi r5,hi(DTLB_TR_MASK) // << 1032 l.ori r5,r5,lo(DTLB_TR_MASK) // << 1033 l.and r5,r5,r3 // << 1034 l.mtspr r2,r5,SPR_DTLBTR_BASE(0) // << 1035 << 1036 EXCEPTION_LOAD_GPR6 << 1037 EXCEPTION_LOAD_GPR5 << 1038 EXCEPTION_LOAD_GPR4 << 1039 EXCEPTION_LOAD_GPR3 << 1040 EXCEPTION_LOAD_GPR2 << 1041 << 1042 l.rfe // << 1043 << 1044 exit_with_no_dtranslation: << 1045 /* EA out of memory or not in supervi << 1046 EXCEPTION_LOAD_GPR6 << 1047 EXCEPTION_LOAD_GPR4 << 1048 l.j _dispatch_bus_fault << 1049 << 1050 /* ---[ boot itlb miss handler ]------------- << 1051 << 1052 boot_itlb_miss_handler: << 1053 << 1054 /* mask for ITLB_MR register: - sets V (valid << 1055 * - sets bits bel << 1056 */ << 1057 #define ITLB_MR_MASK 0xfffff001 << 1058 << 1059 /* mask for ITLB_TR register: - sets A (acces << 1060 * - sets SXE (sup << 1061 * - sets bits bel << 1062 */ << 1063 #define ITLB_TR_MASK 0xfffff050 << 1064 << 1065 /* << 1066 #define VPN_MASK 0xffffe000 << 1067 #define PPN_MASK 0xffffe000 << 1068 */ << 1069 << 1070 << 1071 << 1072 EXCEPTION_STORE_GPR2 << 1073 EXCEPTION_STORE_GPR3 << 1074 EXCEPTION_STORE_GPR4 << 1075 EXCEPTION_STORE_GPR5 << 1076 EXCEPTION_STORE_GPR6 << 1077 << 1078 #if 0 << 1079 l.mfspr r6,r0,SPR_ESR_BASE // << 1080 l.andi r6,r6,SPR_SR_SM // << 1081 l.sfeqi r6,0 // << 1082 l.bf exit_with_no_itranslation << 1083 l.nop << 1084 #endif << 1085 << 1086 << 1087 l.mfspr r4,r0,SPR_EEAR_BASE // << 1088 << 1089 earlyearly: << 1090 CLEAR_GPR(r6) << 1091 << 1092 l.srli r3,r4,0xd // << 1093 << 1094 l.mfspr r6, r0, SPR_IMMUCFGR << 1095 l.andi r6, r6, SPR_IMMUCFGR_NTS << 1096 l.srli r6, r6, SPR_IMMUCFGR_NTS_OFF << 1097 l.ori r5, r0, 0x1 << 1098 l.sll r5, r5, r6 // r5 = numbe << 1099 l.addi r6, r5, -1 // r6 = nsets << 1100 l.and r2, r3, r6 // r2 <- r3 % << 1101 << 1102 l.or r6,r6,r4 // << 1103 l.ori r6,r6,~(VPN_MASK) // << 1104 l.movhi r5,hi(ITLB_MR_MASK) // << 1105 l.ori r5,r5,lo(ITLB_MR_MASK) // << 1106 l.and r5,r5,r6 // << 1107 l.mtspr r2,r5,SPR_ITLBMR_BASE(0) // << 1108 << 1109 /* << 1110 * set up ITLB with no translation fo << 1111 * << 1112 * we need this for head.S mapping (E << 1113 * which run with mmu enabled into en << 1114 * << 1115 */ << 1116 LOAD_SYMBOL_2_GPR(r6,0x0fffffff) << 1117 l.sfgeu r6,r4 // << 1118 l.bf 1f // << 1119 l.and r3,r4,r4 // << 1120 << 1121 tophys(r3,r4) // << 1122 1: << 1123 l.ori r3,r3,~(PPN_MASK) // << 1124 l.movhi r5,hi(ITLB_TR_MASK) // << 1125 l.ori r5,r5,lo(ITLB_TR_MASK) // << 1126 l.and r5,r5,r3 // << 1127 l.mtspr r2,r5,SPR_ITLBTR_BASE(0) // << 1128 << 1129 EXCEPTION_LOAD_GPR6 << 1130 EXCEPTION_LOAD_GPR5 << 1131 EXCEPTION_LOAD_GPR4 << 1132 EXCEPTION_LOAD_GPR3 << 1133 EXCEPTION_LOAD_GPR2 << 1134 << 1135 l.rfe // << 1136 << 1137 exit_with_no_itranslation: << 1138 EXCEPTION_LOAD_GPR4 << 1139 EXCEPTION_LOAD_GPR6 << 1140 l.j _dispatch_bus_fault << 1141 l.nop << 1142 << 1143 /* ========================================== << 1144 /* << 1145 * Stuff below here shouldn't go into .head s << 1146 * can be moved to entry.S ??? << 1147 */ << 1148 << 1149 /* ========================================== << 1150 << 1151 /* << 1152 * Comments: << 1153 * Exception handlers are entered with MMU << 1154 * needs to use physical addressing << 1155 * << 1156 */ << 1157 << 1158 .text << 1159 ENTRY(dtlb_miss_handler) << 1160 EXCEPTION_STORE_GPR2 << 1161 EXCEPTION_STORE_GPR3 << 1162 EXCEPTION_STORE_GPR4 << 1163 /* << 1164 * get EA of the miss << 1165 */ << 1166 l.mfspr r2,r0,SPR_EEAR_BASE << 1167 /* << 1168 * pmd = (pmd_t *)(current_pgd + pgd_ << 1169 */ << 1170 GET_CURRENT_PGD(r3,r4) // r3 << 1171 l.srli r4,r2,0x18 // >> << 1172 l.slli r4,r4,0x2 // to << 1173 l.add r3,r4,r3 // r4 << 1174 /* << 1175 * if (pmd_none(*pmd)) << 1176 * goto pmd_none: << 1177 */ << 1178 tophys (r4,r3) << 1179 l.lwz r3,0x0(r4) // ge << 1180 l.sfne r3,r0 << 1181 l.bnf d_pmd_none << 1182 l.addi r3,r0,0xffffe000 // PA << 1183 << 1184 d_pmd_good: << 1185 /* << 1186 * pte = *pte_offset(pmd, daddr); << 1187 */ << 1188 l.lwz r4,0x0(r4) // ge << 1189 l.and r4,r4,r3 // & << 1190 l.srli r2,r2,0xd // >> << 1191 l.andi r3,r2,0x7ff // (1 << 1192 l.slli r3,r3,0x2 // to << 1193 l.add r3,r3,r4 << 1194 l.lwz r3,0x0(r3) // th << 1195 /* << 1196 * if (!pte_present(pte)) << 1197 */ << 1198 l.andi r4,r3,0x1 << 1199 l.sfne r4,r0 // is << 1200 l.bnf d_pte_not_present << 1201 l.addi r4,r0,0xffffe3fa // PA << 1202 /* << 1203 * fill DTLB TR register << 1204 */ << 1205 l.and r4,r3,r4 // ap << 1206 // Determine number of DMMU sets << 1207 l.mfspr r2, r0, SPR_DMMUCFGR << 1208 l.andi r2, r2, SPR_DMMUCFGR_NTS << 1209 l.srli r2, r2, SPR_DMMUCFGR_NTS_OFF << 1210 l.ori r3, r0, 0x1 << 1211 l.sll r3, r3, r2 // r3 = numbe << 1212 l.addi r2, r3, -1 // r2 = nsets << 1213 l.mfspr r3, r0, SPR_EEAR_BASE << 1214 l.srli r3, r3, 0xd // >> PAGE_SH << 1215 l.and r2, r3, r2 // calc offse << 1216 << 1217 l.mtspr r2,r4,SPR_DTLBTR_BASE(0) << 1218 /* << 1219 * fill DTLB MR register << 1220 */ << 1221 l.slli r3, r3, 0xd /* << << 1222 l.ori r4,r3,0x1 // se << 1223 l.mtspr r2,r4,SPR_DTLBMR_BASE(0) << 1224 << 1225 EXCEPTION_LOAD_GPR2 << 1226 EXCEPTION_LOAD_GPR3 << 1227 EXCEPTION_LOAD_GPR4 << 1228 l.rfe << 1229 d_pmd_none: << 1230 d_pte_not_present: << 1231 EXCEPTION_LOAD_GPR2 << 1232 EXCEPTION_LOAD_GPR3 << 1233 EXCEPTION_LOAD_GPR4 << 1234 EXCEPTION_HANDLE(_dtlb_miss_page_faul << 1235 << 1236 /* ========================================== << 1237 ENTRY(itlb_miss_handler) << 1238 EXCEPTION_STORE_GPR2 << 1239 EXCEPTION_STORE_GPR3 << 1240 EXCEPTION_STORE_GPR4 << 1241 /* << 1242 * get EA of the miss << 1243 */ << 1244 l.mfspr r2,r0,SPR_EEAR_BASE << 1245 << 1246 /* << 1247 * pmd = (pmd_t *)(current_pgd + pgd_ << 1248 * << 1249 */ << 1250 GET_CURRENT_PGD(r3,r4) // r3 << 1251 l.srli r4,r2,0x18 // >> << 1252 l.slli r4,r4,0x2 // to << 1253 l.add r3,r4,r3 // r4 << 1254 /* << 1255 * if (pmd_none(*pmd)) << 1256 * goto pmd_none: << 1257 */ << 1258 tophys (r4,r3) << 1259 l.lwz r3,0x0(r4) // ge << 1260 l.sfne r3,r0 << 1261 l.bnf i_pmd_none << 1262 l.addi r3,r0,0xffffe000 // PA << 1263 << 1264 i_pmd_good: << 1265 /* << 1266 * pte = *pte_offset(pmd, iaddr); << 1267 * << 1268 */ << 1269 l.lwz r4,0x0(r4) // ge << 1270 l.and r4,r4,r3 // & << 1271 l.srli r2,r2,0xd // >> << 1272 l.andi r3,r2,0x7ff // (1 << 1273 l.slli r3,r3,0x2 // to << 1274 l.add r3,r3,r4 << 1275 l.lwz r3,0x0(r3) // th << 1276 /* << 1277 * if (!pte_present(pte)) << 1278 * << 1279 */ << 1280 l.andi r4,r3,0x1 << 1281 l.sfne r4,r0 // is << 1282 l.bnf i_pte_not_present << 1283 l.addi r4,r0,0xffffe03a // PA << 1284 /* << 1285 * fill ITLB TR register << 1286 */ << 1287 l.and r4,r3,r4 // ap << 1288 l.andi r3,r3,0x7c0 // _P << 1289 l.sfeq r3,r0 << 1290 l.bf itlb_tr_fill //_workaround << 1291 // Determine number of IMMU sets << 1292 l.mfspr r2, r0, SPR_IMMUCFGR << 1293 l.andi r2, r2, SPR_IMMUCFGR_NTS << 1294 l.srli r2, r2, SPR_IMMUCFGR_NTS_OFF << 1295 l.ori r3, r0, 0x1 << 1296 l.sll r3, r3, r2 // r3 = numbe << 1297 l.addi r2, r3, -1 // r2 = nsets << 1298 l.mfspr r3, r0, SPR_EEAR_BASE << 1299 l.srli r3, r3, 0xd // >> PAGE_SH << 1300 l.and r2, r3, r2 // calc offse << 1301 << 1302 /* << 1303 * __PHX__ :: fixme << 1304 * we should not just blindly set executable << 1305 * but it does help with ping. the clean way << 1306 * (and fix it) why stack doesn't have execut << 1307 */ << 1308 << 1309 itlb_tr_fill_workaround: << 1310 l.ori r4,r4,0xc0 // | << 1311 itlb_tr_fill: << 1312 l.mtspr r2,r4,SPR_ITLBTR_BASE(0) << 1313 /* << 1314 * fill DTLB MR register << 1315 */ << 1316 l.slli r3, r3, 0xd /* << << 1317 l.ori r4,r3,0x1 // se << 1318 l.mtspr r2,r4,SPR_ITLBMR_BASE(0) << 1319 << 1320 EXCEPTION_LOAD_GPR2 << 1321 EXCEPTION_LOAD_GPR3 << 1322 EXCEPTION_LOAD_GPR4 << 1323 l.rfe << 1324 << 1325 i_pmd_none: << 1326 i_pte_not_present: << 1327 EXCEPTION_LOAD_GPR2 << 1328 EXCEPTION_LOAD_GPR3 << 1329 EXCEPTION_LOAD_GPR4 << 1330 EXCEPTION_HANDLE(_itlb_miss_page_faul << 1331 << 1332 /* ========================================== << 1333 << 1334 << 1335 /* ========================================== << 1336 << 1337 /* << 1338 * DESC: Prints ASCII character stored in r7 << 1339 * << 1340 * PRMS: r7 - a 32-bit value with an ASCI << 1341 * position. << 1342 * << 1343 * PREQ: The UART at UART_BASE_ADD has to be << 1344 * << 1345 * POST: internally used but restores: << 1346 * r4 - to store UART_BASE_ADD << 1347 * r5 - for loading OFF_TXFULL / TH << 1348 * r6 - for storing bitmask (SERIAL << 1349 */ << 1350 ENTRY(_emergency_putc) << 1351 EMERGENCY_PRINT_STORE_GPR4 << 1352 EMERGENCY_PRINT_STORE_GPR5 << 1353 EMERGENCY_PRINT_STORE_GPR6 << 1354 << 1355 l.movhi r4,hi(UART_BASE_ADD) << 1356 l.ori r4,r4,lo(UART_BASE_ADD) << 1357 << 1358 #if defined(CONFIG_SERIAL_LITEUART) << 1359 /* Check OFF_TXFULL status */ << 1360 1: l.lwz r5,4(r4) << 1361 l.andi r5,r5,0xff << 1362 l.sfnei r5,0 << 1363 l.bf 1b << 1364 l.nop << 1365 << 1366 /* Write character */ << 1367 l.andi r7,r7,0xff << 1368 l.sw 0(r4),r7 << 1369 #elif defined(CONFIG_SERIAL_8250) << 1370 /* Check UART LSR THRE (hold) bit */ << 1371 l.addi r6,r0,0x20 << 1372 1: l.lbz r5,5(r4) << 1373 l.andi r5,r5,0x20 << 1374 l.sfeq r5,r6 << 1375 l.bnf 1b << 1376 l.nop << 1377 << 1378 /* Write character */ << 1379 l.sb 0(r4),r7 << 1380 << 1381 /* Check UART LSR THRE|TEMT (hold, em << 1382 l.addi r6,r0,0x60 << 1383 1: l.lbz r5,5(r4) << 1384 l.andi r5,r5,0x60 << 1385 l.sfeq r5,r6 << 1386 l.bnf 1b << 1387 l.nop << 1388 #endif << 1389 EMERGENCY_PRINT_LOAD_GPR6 << 1390 EMERGENCY_PRINT_LOAD_GPR5 << 1391 EMERGENCY_PRINT_LOAD_GPR4 << 1392 l.jr r9 << 1393 l.nop << 1394 << 1395 /* << 1396 * DSCR: prints a string referenced by r3. << 1397 * << 1398 * PRMS: r3 - address of the firs << 1399 * terminated string to << 1400 * << 1401 * PREQ: UART at UART_BASE_ADD has to be init << 1402 * << 1403 * POST: caller should be aware that r3, r9 a << 1404 */ << 1405 ENTRY(_emergency_print) << 1406 EMERGENCY_PRINT_STORE_GPR7 << 1407 EMERGENCY_PRINT_STORE_GPR9 << 1408 << 1409 /* Load character to r7, check for nu << 1410 2: l.lbz r7,0(r3) << 1411 l.sfeqi r7,0x0 << 1412 l.bf 9f << 1413 l.nop << 1414 << 1415 l.jal _emergency_putc << 1416 l.nop << 1417 << 1418 /* next character */ << 1419 l.j 2b << 1420 l.addi r3,r3,0x1 << 1421 << 1422 9: << 1423 EMERGENCY_PRINT_LOAD_GPR9 << 1424 EMERGENCY_PRINT_LOAD_GPR7 << 1425 l.jr r9 << 1426 l.nop << 1427 << 1428 /* << 1429 * DSCR: prints a number in r3 in hex. << 1430 * << 1431 * PRMS: r3 - a 32-bit unsigned i << 1432 * << 1433 * PREQ: UART at UART_BASE_ADD has to be init << 1434 * << 1435 * POST: caller should be aware that r3, r9 a << 1436 */ << 1437 ENTRY(_emergency_print_nr) << 1438 EMERGENCY_PRINT_STORE_GPR7 << 1439 EMERGENCY_PRINT_STORE_GPR8 << 1440 EMERGENCY_PRINT_STORE_GPR9 << 1441 << 1442 l.addi r8,r0,32 // sh << 1443 << 1444 1: /* remove leading zeros */ << 1445 l.addi r8,r8,-0x4 << 1446 l.srl r7,r3,r8 << 1447 l.andi r7,r7,0xf << 1448 << 1449 /* don't skip the last zero if number << 1450 l.sfeqi r8,0x4 << 1451 l.bf 2f << 1452 l.nop << 1453 << 1454 l.sfeq r7,r0 << 1455 l.bf 1b << 1456 l.nop << 1457 << 1458 2: << 1459 l.srl r7,r3,r8 << 1460 << 1461 l.andi r7,r7,0xf << 1462 l.sflts r8,r0 << 1463 l.bf 9f << 1464 << 1465 /* Numbers greater than 9 translate t << 1466 l.sfgtui r7,0x9 << 1467 l.bnf 8f << 1468 l.nop << 1469 l.addi r7,r7,0x27 << 1470 << 1471 /* Convert to ascii and output charac << 1472 8: l.jal _emergency_putc << 1473 l.addi r7,r7,0x30 << 1474 << 1475 /* next character */ << 1476 l.j 2b << 1477 l.addi r8,r8,-0x4 << 1478 << 1479 9: << 1480 EMERGENCY_PRINT_LOAD_GPR9 << 1481 EMERGENCY_PRINT_LOAD_GPR8 << 1482 EMERGENCY_PRINT_LOAD_GPR7 << 1483 l.jr r9 << 1484 l.nop << 1485 << 1486 /* << 1487 * This should be used for debugging only. << 1488 * It messes up the Linux early serial output << 1489 * somehow, so use it sparingly and essential << 1490 * only if you need to debug something that g << 1491 * before Linux gets the early serial going. << 1492 * << 1493 * Furthermore, you'll have to make sure you << 1494 * UART_DEVISOR correctly according to the sy << 1495 * clock rate. << 1496 * << 1497 * << 1498 */ << 1499 << 1500 << 1501 << 1502 #define SYS_CLK 20000000 << 1503 //#define SYS_CLK 1843200 << 1504 #define OR32_CONSOLE_BAUD 115200 << 1505 #define UART_DIVISOR SYS_CLK/(16*OR32_C << 1506 << 1507 ENTRY(_early_uart_init) << 1508 l.movhi r3,hi(UART_BASE_ADD) << 1509 l.ori r3,r3,lo(UART_BASE_ADD) << 1510 << 1511 #if defined(CONFIG_SERIAL_8250) << 1512 l.addi r4,r0,0x7 << 1513 l.sb 0x2(r3),r4 << 1514 << 1515 l.addi r4,r0,0x0 << 1516 l.sb 0x1(r3),r4 << 1517 << 1518 l.addi r4,r0,0x3 << 1519 l.sb 0x3(r3),r4 << 1520 << 1521 l.lbz r5,3(r3) << 1522 l.ori r4,r5,0x80 << 1523 l.sb 0x3(r3),r4 << 1524 l.addi r4,r0,((UART_DIVISOR>>8) & 0x << 1525 l.sb UART_DLM(r3),r4 << 1526 l.addi r4,r0,((UART_DIVISOR) & 0x000 << 1527 l.sb UART_DLL(r3),r4 << 1528 l.sb 0x3(r3),r5 << 1529 #endif << 1530 << 1531 l.jr r9 << 1532 l.nop << 1533 << 1534 .align 0x1000 << 1535 .global _secondary_evbar << 1536 _secondary_evbar: << 1537 << 1538 .space 0x800 << 1539 /* Just disable interrupts and Return << 1540 l.ori r3,r0,SPR_SR_SM << 1541 l.mtspr r0,r3,SPR_ESR_BASE << 1542 l.rfe << 1543 << 1544 << 1545 .section .rodata << 1546 _string_unhandled_exception: << 1547 .string "\r\nRunarunaround: Unhandled << 1548 << 1549 _string_epc_prefix: << 1550 .string ": EPC=0x\0" << 1551 << 1552 _string_nl: << 1553 .string "\r\n\0" << 1554 << 1555 << 1556 /* ========================================[ << 1557 << 1558 /* << 1559 * .data section should be page aligned << 1560 * (look into arch/openrisc/kernel/vmlin << 1561 */ << 1562 .section .data,"aw" << 1563 .align 8192 << 1564 .global empty_zero_page << 1565 empty_zero_page: << 1566 .space 8192 << 1567 << 1568 .global swapper_pg_dir << 1569 swapper_pg_dir: << 1570 .space 8192 << 1571 << 1572 .global _unhandled_stack << 1573 _unhandled_stack: << 1574 .space 8192 << 1575 _unhandled_stack_top: << 1576 << 1577 /* ========================================== <<
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.