1 /* 1 /* 2 * This file is subject to the terms and condi 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the mai 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 4 * for more details. 5 * 5 * 6 * Copyright (C) 2011 by Kevin Cernekee (cerne 6 * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) 7 * 7 * 8 * Reset/NMI/re-entry vectors for BMIPS proces 8 * Reset/NMI/re-entry vectors for BMIPS processors 9 */ 9 */ 10 10 11 11 12 #include <asm/asm.h> 12 #include <asm/asm.h> 13 #include <asm/asmmacro.h> 13 #include <asm/asmmacro.h> 14 #include <asm/cacheops.h> 14 #include <asm/cacheops.h> 15 #include <asm/cpu.h> 15 #include <asm/cpu.h> 16 #include <asm/regdef.h> 16 #include <asm/regdef.h> 17 #include <asm/mipsregs.h> 17 #include <asm/mipsregs.h> 18 #include <asm/stackframe.h> 18 #include <asm/stackframe.h> 19 #include <asm/addrspace.h> 19 #include <asm/addrspace.h> 20 #include <asm/hazards.h> 20 #include <asm/hazards.h> 21 #include <asm/bmips.h> 21 #include <asm/bmips.h> 22 22 23 .macro BARRIER 23 .macro BARRIER 24 .set mips32 24 .set mips32 25 _ssnop 25 _ssnop 26 _ssnop 26 _ssnop 27 _ssnop 27 _ssnop 28 .set mips0 28 .set mips0 29 .endm 29 .endm 30 30 31 /********************************************* 31 /*********************************************************************** 32 * Alternate CPU1 startup vector for BMIPS4350 32 * Alternate CPU1 startup vector for BMIPS4350 33 * 33 * 34 * On some systems the bootloader has already 34 * On some systems the bootloader has already started CPU1 and configured 35 * it to resume execution at 0x8000_0200 (!BEV 35 * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is 36 * triggered by the SW1 interrupt. If that is 36 * triggered by the SW1 interrupt. If that is the case we try to move 37 * it to a more convenient place: BMIPS_WARM_R 37 * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. 38 ********************************************* 38 ***********************************************************************/ 39 39 40 LEAF(bmips_smp_movevec) 40 LEAF(bmips_smp_movevec) 41 la k0, 1f 41 la k0, 1f 42 li k1, CKSEG1 42 li k1, CKSEG1 43 or k0, k1 43 or k0, k1 44 jr k0 44 jr k0 45 45 46 1: 46 1: 47 /* clear IV, pending IPIs */ 47 /* clear IV, pending IPIs */ 48 mtc0 zero, CP0_CAUSE 48 mtc0 zero, CP0_CAUSE 49 49 50 /* re-enable IRQs to wait for SW1 */ 50 /* re-enable IRQs to wait for SW1 */ 51 li k0, ST0_IE | ST0_BEV | STATUSF 51 li k0, ST0_IE | ST0_BEV | STATUSF_IP1 52 mtc0 k0, CP0_STATUS 52 mtc0 k0, CP0_STATUS 53 53 54 /* set up CPU1 CBR; move BASE to 0xa00 54 /* set up CPU1 CBR; move BASE to 0xa000_0000 */ 55 li k0, 0xff400000 55 li k0, 0xff400000 56 mtc0 k0, $22, 6 56 mtc0 k0, $22, 6 57 /* set up relocation vector address ba 57 /* set up relocation vector address based on thread ID */ 58 mfc0 k1, $22, 3 58 mfc0 k1, $22, 3 59 srl k1, 16 59 srl k1, 16 60 andi k1, 0x8000 60 andi k1, 0x8000 61 or k1, CKSEG1 | BMIPS_RELO_VECTOR 61 or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0 62 or k0, k1 62 or k0, k1 63 li k1, 0xa0080000 63 li k1, 0xa0080000 64 sw k1, 0(k0) 64 sw k1, 0(k0) 65 65 66 /* wait here for SW1 interrupt from bm 66 /* wait here for SW1 interrupt from bmips_boot_secondary() */ 67 wait 67 wait 68 68 69 la k0, bmips_reset_nmi_vec 69 la k0, bmips_reset_nmi_vec 70 li k1, CKSEG1 70 li k1, CKSEG1 71 or k0, k1 71 or k0, k1 72 jr k0 72 jr k0 73 END(bmips_smp_movevec) 73 END(bmips_smp_movevec) 74 74 75 /********************************************* 75 /*********************************************************************** 76 * Reset/NMI vector 76 * Reset/NMI vector 77 * For BMIPS processors that can relocate thei 77 * For BMIPS processors that can relocate their exception vectors, this 78 * entire function gets copied to 0x8000_0000. 78 * entire function gets copied to 0x8000_0000. 79 ********************************************* 79 ***********************************************************************/ 80 80 81 NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) 81 NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) 82 .set push 82 .set push 83 .set noat 83 .set noat 84 .align 4 84 .align 4 85 85 86 #ifdef CONFIG_SMP 86 #ifdef CONFIG_SMP 87 /* if the NMI bit is clear, assume thi 87 /* if the NMI bit is clear, assume this is a CPU1 reset instead */ 88 li k1, (1 << 19) 88 li k1, (1 << 19) 89 mfc0 k0, CP0_STATUS 89 mfc0 k0, CP0_STATUS 90 and k0, k1 90 and k0, k1 91 beqz k0, soft_reset 91 beqz k0, soft_reset 92 92 93 #if defined(CONFIG_CPU_BMIPS5000) 93 #if defined(CONFIG_CPU_BMIPS5000) 94 mfc0 k0, CP0_PRID 94 mfc0 k0, CP0_PRID 95 li k1, PRID_IMP_BMIPS5000 95 li k1, PRID_IMP_BMIPS5000 96 /* mask with PRID_IMP_BMIPS5000 to cov 96 /* mask with PRID_IMP_BMIPS5000 to cover both variants */ 97 andi k0, PRID_IMP_BMIPS5000 97 andi k0, PRID_IMP_BMIPS5000 98 bne k0, k1, 1f 98 bne k0, k1, 1f 99 99 100 /* if we're not on core 0, this must b 100 /* if we're not on core 0, this must be the SMP boot signal */ 101 li k1, (3 << 25) 101 li k1, (3 << 25) 102 mfc0 k0, $22 102 mfc0 k0, $22 103 and k0, k1 103 and k0, k1 104 bnez k0, bmips_smp_entry 104 bnez k0, bmips_smp_entry 105 1: 105 1: 106 #endif /* CONFIG_CPU_BMIPS5000 */ 106 #endif /* CONFIG_CPU_BMIPS5000 */ 107 #endif /* CONFIG_SMP */ 107 #endif /* CONFIG_SMP */ 108 108 109 /* nope, it's just a regular NMI */ 109 /* nope, it's just a regular NMI */ 110 SAVE_ALL 110 SAVE_ALL 111 move a0, sp 111 move a0, sp 112 112 113 /* clear EXL, ERL, BEV so that TLB ref 113 /* clear EXL, ERL, BEV so that TLB refills still work */ 114 mfc0 k0, CP0_STATUS 114 mfc0 k0, CP0_STATUS 115 li k1, ST0_ERL | ST0_EXL | ST0_BE 115 li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE 116 or k0, k1 116 or k0, k1 117 xor k0, k1 117 xor k0, k1 118 mtc0 k0, CP0_STATUS 118 mtc0 k0, CP0_STATUS 119 BARRIER 119 BARRIER 120 120 121 /* jump to the NMI handler function */ 121 /* jump to the NMI handler function */ 122 la k0, nmi_handler 122 la k0, nmi_handler 123 jr k0 123 jr k0 124 124 125 RESTORE_ALL 125 RESTORE_ALL 126 .set arch=r4000 126 .set arch=r4000 127 eret 127 eret 128 128 129 #ifdef CONFIG_SMP 129 #ifdef CONFIG_SMP 130 soft_reset: 130 soft_reset: 131 131 132 #if defined(CONFIG_CPU_BMIPS5000) 132 #if defined(CONFIG_CPU_BMIPS5000) 133 mfc0 k0, CP0_PRID 133 mfc0 k0, CP0_PRID 134 andi k0, 0xff00 134 andi k0, 0xff00 135 li k1, PRID_IMP_BMIPS5200 135 li k1, PRID_IMP_BMIPS5200 136 bne k0, k1, bmips_smp_entry 136 bne k0, k1, bmips_smp_entry 137 137 138 /* if running on TP 1, jump to bmips 138 /* if running on TP 1, jump to bmips_smp_entry */ 139 mfc0 k0, $22 139 mfc0 k0, $22 140 li k1, (1 << 24) 140 li k1, (1 << 24) 141 and k1, k0 141 and k1, k0 142 bnez k1, bmips_smp_entry 142 bnez k1, bmips_smp_entry 143 nop 143 nop 144 144 145 /* 145 /* 146 * running on TP0, can not be core 0 ( 146 * running on TP0, can not be core 0 (the boot core). 147 * Check for soft reset. Indicates a 147 * Check for soft reset. Indicates a warm boot 148 */ 148 */ 149 mfc0 k0, $12 149 mfc0 k0, $12 150 li k1, (1 << 20) 150 li k1, (1 << 20) 151 and k0, k1 151 and k0, k1 152 beqz k0, bmips_smp_entry 152 beqz k0, bmips_smp_entry 153 153 154 /* 154 /* 155 * Warm boot. 155 * Warm boot. 156 * Cache init is only done on TP0 156 * Cache init is only done on TP0 157 */ 157 */ 158 la k0, bmips_5xxx_init 158 la k0, bmips_5xxx_init 159 jalr k0 159 jalr k0 160 nop 160 nop 161 161 162 b bmips_smp_entry 162 b bmips_smp_entry 163 nop 163 nop 164 #endif 164 #endif 165 165 166 /********************************************* 166 /*********************************************************************** 167 * CPU1 reset vector (used for the initial boo 167 * CPU1 reset vector (used for the initial boot only) 168 * This is still part of bmips_reset_nmi_vec() 168 * This is still part of bmips_reset_nmi_vec(). 169 ********************************************* 169 ***********************************************************************/ 170 170 171 bmips_smp_entry: 171 bmips_smp_entry: 172 172 173 /* set up CP0 STATUS; enable FPU */ 173 /* set up CP0 STATUS; enable FPU */ 174 li k0, 0x30000000 174 li k0, 0x30000000 175 mtc0 k0, CP0_STATUS 175 mtc0 k0, CP0_STATUS 176 BARRIER 176 BARRIER 177 177 178 /* set local CP0 CONFIG to make kseg0 178 /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ 179 mfc0 k0, CP0_CONFIG 179 mfc0 k0, CP0_CONFIG 180 ori k0, 0x07 180 ori k0, 0x07 181 xori k0, 0x04 181 xori k0, 0x04 182 mtc0 k0, CP0_CONFIG 182 mtc0 k0, CP0_CONFIG 183 183 184 mfc0 k0, CP0_PRID 184 mfc0 k0, CP0_PRID 185 andi k0, 0xff00 185 andi k0, 0xff00 186 #if defined(CONFIG_CPU_BMIPS4350) || defined(C 186 #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 187 li k1, PRID_IMP_BMIPS43XX 187 li k1, PRID_IMP_BMIPS43XX 188 bne k0, k1, 2f 188 bne k0, k1, 2f 189 189 190 /* initialize CPU1's local I-cache */ 190 /* initialize CPU1's local I-cache */ 191 li k0, 0x80000000 191 li k0, 0x80000000 192 li k1, 0x80010000 192 li k1, 0x80010000 193 mtc0 zero, $28 193 mtc0 zero, $28 194 mtc0 zero, $28, 1 194 mtc0 zero, $28, 1 195 BARRIER 195 BARRIER 196 196 197 1: cache Index_Store_Tag_I, 0(k0) 197 1: cache Index_Store_Tag_I, 0(k0) 198 addiu k0, 16 198 addiu k0, 16 199 bne k0, k1, 1b 199 bne k0, k1, 1b 200 200 201 b 3f 201 b 3f 202 2: 202 2: 203 #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_B 203 #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ 204 #if defined(CONFIG_CPU_BMIPS5000) 204 #if defined(CONFIG_CPU_BMIPS5000) 205 /* mask with PRID_IMP_BMIPS5000 to cov 205 /* mask with PRID_IMP_BMIPS5000 to cover both variants */ 206 li k1, PRID_IMP_BMIPS5000 206 li k1, PRID_IMP_BMIPS5000 207 andi k0, PRID_IMP_BMIPS5000 207 andi k0, PRID_IMP_BMIPS5000 208 bne k0, k1, 3f 208 bne k0, k1, 3f 209 209 210 /* set exception vector base */ 210 /* set exception vector base */ 211 la k0, ebase 211 la k0, ebase 212 lw k0, 0(k0) 212 lw k0, 0(k0) 213 mtc0 k0, $15, 1 213 mtc0 k0, $15, 1 214 BARRIER 214 BARRIER 215 #endif /* CONFIG_CPU_BMIPS5000 */ 215 #endif /* CONFIG_CPU_BMIPS5000 */ 216 3: 216 3: 217 /* jump back to kseg0 in case we need 217 /* jump back to kseg0 in case we need to remap the kseg1 area */ 218 la k0, 1f 218 la k0, 1f 219 jr k0 219 jr k0 220 1: 220 1: 221 la k0, bmips_enable_xks01 221 la k0, bmips_enable_xks01 222 jalr k0 222 jalr k0 223 223 224 /* use temporary stack to set up upper 224 /* use temporary stack to set up upper memory TLB */ 225 li sp, BMIPS_WARM_RESTART_VEC 225 li sp, BMIPS_WARM_RESTART_VEC 226 la k0, plat_wired_tlb_setup 226 la k0, plat_wired_tlb_setup 227 jalr k0 227 jalr k0 228 228 229 /* switch to permanent stack and conti 229 /* switch to permanent stack and continue booting */ 230 230 231 .global bmips_secondary_reentry 231 .global bmips_secondary_reentry 232 bmips_secondary_reentry: 232 bmips_secondary_reentry: 233 la k0, bmips_smp_boot_sp 233 la k0, bmips_smp_boot_sp 234 lw sp, 0(k0) 234 lw sp, 0(k0) 235 la k0, bmips_smp_boot_gp 235 la k0, bmips_smp_boot_gp 236 lw gp, 0(k0) 236 lw gp, 0(k0) 237 la k0, start_secondary 237 la k0, start_secondary 238 jr k0 238 jr k0 239 239 240 #endif /* CONFIG_SMP */ 240 #endif /* CONFIG_SMP */ 241 241 242 .align 4 242 .align 4 243 .global bmips_reset_nmi_vec_end 243 .global bmips_reset_nmi_vec_end 244 bmips_reset_nmi_vec_end: 244 bmips_reset_nmi_vec_end: 245 245 246 END(bmips_reset_nmi_vec) 246 END(bmips_reset_nmi_vec) 247 247 248 .set pop 248 .set pop 249 249 250 /********************************************* 250 /*********************************************************************** 251 * CPU1 warm restart vector (used for second a 251 * CPU1 warm restart vector (used for second and subsequent boots). 252 * Also used for S2 standby recovery (PM). 252 * Also used for S2 standby recovery (PM). 253 * This entire function gets copied to (BMIPS_ 253 * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) 254 ********************************************* 254 ***********************************************************************/ 255 255 256 LEAF(bmips_smp_int_vec) 256 LEAF(bmips_smp_int_vec) 257 257 258 .align 4 258 .align 4 259 mfc0 k0, CP0_STATUS 259 mfc0 k0, CP0_STATUS 260 ori k0, 0x01 260 ori k0, 0x01 261 xori k0, 0x01 261 xori k0, 0x01 262 mtc0 k0, CP0_STATUS 262 mtc0 k0, CP0_STATUS 263 eret 263 eret 264 264 265 .align 4 265 .align 4 266 .global bmips_smp_int_vec_end 266 .global bmips_smp_int_vec_end 267 bmips_smp_int_vec_end: 267 bmips_smp_int_vec_end: 268 268 269 END(bmips_smp_int_vec) 269 END(bmips_smp_int_vec) 270 270 271 /********************************************* 271 /*********************************************************************** 272 * XKS01 support 272 * XKS01 support 273 * Certain CPUs support extending kseg0 to 102 273 * Certain CPUs support extending kseg0 to 1024MB. 274 ********************************************* 274 ***********************************************************************/ 275 275 276 LEAF(bmips_enable_xks01) 276 LEAF(bmips_enable_xks01) 277 277 278 #if defined(CONFIG_XKS01) 278 #if defined(CONFIG_XKS01) 279 mfc0 t0, CP0_PRID 279 mfc0 t0, CP0_PRID 280 andi t2, t0, 0xff00 280 andi t2, t0, 0xff00 281 #if defined(CONFIG_CPU_BMIPS4380) 281 #if defined(CONFIG_CPU_BMIPS4380) 282 li t1, PRID_IMP_BMIPS43XX 282 li t1, PRID_IMP_BMIPS43XX 283 bne t2, t1, 1f 283 bne t2, t1, 1f 284 284 285 andi t0, 0xff 285 andi t0, 0xff 286 addiu t1, t0, -PRID_REV_BMIPS4380_HI 286 addiu t1, t0, -PRID_REV_BMIPS4380_HI 287 bgtz t1, 2f 287 bgtz t1, 2f 288 addiu t0, -PRID_REV_BMIPS4380_LO 288 addiu t0, -PRID_REV_BMIPS4380_LO 289 bltz t0, 2f 289 bltz t0, 2f 290 290 291 mfc0 t0, $22, 3 291 mfc0 t0, $22, 3 292 li t1, 0x1ff0 292 li t1, 0x1ff0 293 li t2, (1 << 12) | (1 << 9) 293 li t2, (1 << 12) | (1 << 9) 294 or t0, t1 294 or t0, t1 295 xor t0, t1 295 xor t0, t1 296 or t0, t2 296 or t0, t2 297 mtc0 t0, $22, 3 297 mtc0 t0, $22, 3 298 BARRIER 298 BARRIER 299 b 2f 299 b 2f 300 1: 300 1: 301 #endif /* CONFIG_CPU_BMIPS4380 */ 301 #endif /* CONFIG_CPU_BMIPS4380 */ 302 #if defined(CONFIG_CPU_BMIPS5000) 302 #if defined(CONFIG_CPU_BMIPS5000) 303 li t1, PRID_IMP_BMIPS5000 303 li t1, PRID_IMP_BMIPS5000 304 /* mask with PRID_IMP_BMIPS5000 to cov 304 /* mask with PRID_IMP_BMIPS5000 to cover both variants */ 305 andi t2, PRID_IMP_BMIPS5000 305 andi t2, PRID_IMP_BMIPS5000 306 bne t2, t1, 2f 306 bne t2, t1, 2f 307 307 308 mfc0 t0, $22, 5 308 mfc0 t0, $22, 5 309 li t1, 0x01ff 309 li t1, 0x01ff 310 li t2, (1 << 8) | (1 << 5) 310 li t2, (1 << 8) | (1 << 5) 311 or t0, t1 311 or t0, t1 312 xor t0, t1 312 xor t0, t1 313 or t0, t2 313 or t0, t2 314 mtc0 t0, $22, 5 314 mtc0 t0, $22, 5 315 BARRIER 315 BARRIER 316 #endif /* CONFIG_CPU_BMIPS5000 */ 316 #endif /* CONFIG_CPU_BMIPS5000 */ 317 2: 317 2: 318 #endif /* defined(CONFIG_XKS01) */ 318 #endif /* defined(CONFIG_XKS01) */ 319 319 320 jr ra 320 jr ra 321 321 322 END(bmips_enable_xks01) 322 END(bmips_enable_xks01)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.