1 /* SPDX-License-Identifier: GPL-2.0-or-later * 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 2 /* 3 * Copyright (C) 2013 Imagination Technologies 3 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> 4 * Author: Paul Burton <paul.burton@mips.com> 5 */ 5 */ 6 6 7 #include <linux/init.h> << 8 #include <asm/addrspace.h> 7 #include <asm/addrspace.h> 9 #include <asm/asm.h> 8 #include <asm/asm.h> 10 #include <asm/asm-offsets.h> 9 #include <asm/asm-offsets.h> 11 #include <asm/asmmacro.h> 10 #include <asm/asmmacro.h> 12 #include <asm/cacheops.h> 11 #include <asm/cacheops.h> 13 #include <asm/eva.h> 12 #include <asm/eva.h> 14 #include <asm/mipsregs.h> 13 #include <asm/mipsregs.h> 15 #include <asm/mipsmtregs.h> 14 #include <asm/mipsmtregs.h> 16 #include <asm/pm.h> 15 #include <asm/pm.h> 17 #include <asm/smp-cps.h> 16 #include <asm/smp-cps.h> 18 17 19 #define GCR_CPC_BASE_OFS 0x0088 18 #define GCR_CPC_BASE_OFS 0x0088 20 #define GCR_CL_COHERENCE_OFS 0x2008 19 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_ID_OFS 0x2028 20 #define GCR_CL_ID_OFS 0x2028 22 21 23 #define CPC_CL_VC_STOP_OFS 0x2020 22 #define CPC_CL_VC_STOP_OFS 0x2020 24 #define CPC_CL_VC_RUN_OFS 0x2028 23 #define CPC_CL_VC_RUN_OFS 0x2028 25 24 26 .extern mips_cm_base 25 .extern mips_cm_base 27 26 28 .set noreorder 27 .set noreorder 29 28 30 #ifdef CONFIG_64BIT 29 #ifdef CONFIG_64BIT 31 # define STATUS_BITDEPS ST0_KX 30 # define STATUS_BITDEPS ST0_KX 32 #else 31 #else 33 # define STATUS_BITDEPS 0 32 # define STATUS_BITDEPS 0 34 #endif 33 #endif 35 34 36 #ifdef CONFIG_MIPS_CPS_NS16550 35 #ifdef CONFIG_MIPS_CPS_NS16550 37 36 38 #define DUMP_EXCEP(name) \ 37 #define DUMP_EXCEP(name) \ 39 PTR_LA a0, 8f; \ 38 PTR_LA a0, 8f; \ 40 jal mips_cps_bev_dump; \ 39 jal mips_cps_bev_dump; \ 41 nop; \ 40 nop; \ 42 TEXT(name) 41 TEXT(name) 43 42 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 43 #else /* !CONFIG_MIPS_CPS_NS16550 */ 45 44 46 #define DUMP_EXCEP(name) 45 #define DUMP_EXCEP(name) 47 46 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 47 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 49 48 50 /* 49 /* 51 * Set dest to non-zero if the core su 50 * Set dest to non-zero if the core supports the MT ASE, else zero. If 52 * MT is not supported then branch to 51 * MT is not supported then branch to nomt. 53 */ 52 */ 54 .macro has_mt dest, nomt 53 .macro has_mt dest, nomt 55 mfc0 \dest, CP0_CONFIG, 1 54 mfc0 \dest, CP0_CONFIG, 1 56 bgez \dest, \nomt 55 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 2 56 mfc0 \dest, CP0_CONFIG, 2 58 bgez \dest, \nomt 57 bgez \dest, \nomt 59 mfc0 \dest, CP0_CONFIG, 3 58 mfc0 \dest, CP0_CONFIG, 3 60 andi \dest, \dest, MIPS_CONF3_MT 59 andi \dest, \dest, MIPS_CONF3_MT 61 beqz \dest, \nomt 60 beqz \dest, \nomt 62 nop 61 nop 63 .endm 62 .endm 64 63 65 /* 64 /* 66 * Set dest to non-zero if the core su 65 * Set dest to non-zero if the core supports MIPSr6 multithreading 67 * (ie. VPs), else zero. If MIPSr6 mul 66 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then 68 * branch to nomt. 67 * branch to nomt. 69 */ 68 */ 70 .macro has_vp dest, nomt 69 .macro has_vp dest, nomt 71 mfc0 \dest, CP0_CONFIG, 1 70 mfc0 \dest, CP0_CONFIG, 1 72 bgez \dest, \nomt 71 bgez \dest, \nomt 73 mfc0 \dest, CP0_CONFIG, 2 72 mfc0 \dest, CP0_CONFIG, 2 74 bgez \dest, \nomt 73 bgez \dest, \nomt 75 mfc0 \dest, CP0_CONFIG, 3 74 mfc0 \dest, CP0_CONFIG, 3 76 bgez \dest, \nomt 75 bgez \dest, \nomt 77 mfc0 \dest, CP0_CONFIG, 4 76 mfc0 \dest, CP0_CONFIG, 4 78 bgez \dest, \nomt 77 bgez \dest, \nomt 79 mfc0 \dest, CP0_CONFIG, 5 78 mfc0 \dest, CP0_CONFIG, 5 80 andi \dest, \dest, MIPS_CONF5_VP 79 andi \dest, \dest, MIPS_CONF5_VP 81 beqz \dest, \nomt 80 beqz \dest, \nomt 82 nop 81 nop 83 .endm 82 .endm 84 83 85 84 86 LEAF(mips_cps_core_boot) !! 85 .balign 0x1000 87 /* Save CCA and GCR base */ !! 86 88 move s0, a0 !! 87 LEAF(mips_cps_core_entry) 89 move s1, a1 !! 88 /* >> 89 * These first several instructions will be patched by cps_smp_setup to load the >> 90 * CCA to use into register s0 and GCR base address to register s1. >> 91 */ >> 92 .rept CPS_ENTRY_PATCH_INSNS >> 93 nop >> 94 .endr >> 95 >> 96 .global mips_cps_core_entry_patch_end >> 97 mips_cps_core_entry_patch_end: >> 98 >> 99 /* Check whether we're here due to an NMI */ >> 100 mfc0 k0, CP0_STATUS >> 101 and k0, k0, ST0_NMI >> 102 beqz k0, not_nmi >> 103 nop >> 104 >> 105 /* This is an NMI */ >> 106 PTR_LA k0, nmi_handler >> 107 jr k0 >> 108 nop >> 109 >> 110 not_nmi: >> 111 /* Setup Cause */ >> 112 li t0, CAUSEF_IV >> 113 mtc0 t0, CP0_CAUSE >> 114 >> 115 /* Setup Status */ >> 116 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS >> 117 mtc0 t0, CP0_STATUS 90 118 91 /* We don't know how to do coherence s << 92 #if MIPS_ISA_REV > 0 << 93 /* Skip cache & coherence setup if we' 119 /* Skip cache & coherence setup if we're already coherent */ 94 lw s7, GCR_CL_COHERENCE_OFS(s1) 120 lw s7, GCR_CL_COHERENCE_OFS(s1) 95 bnez s7, 1f 121 bnez s7, 1f 96 nop 122 nop 97 123 98 /* Initialize the L1 caches */ 124 /* Initialize the L1 caches */ 99 jal mips_cps_cache_init 125 jal mips_cps_cache_init 100 nop 126 nop 101 127 102 /* Enter the coherent domain */ 128 /* Enter the coherent domain */ 103 li t0, 0xff 129 li t0, 0xff 104 sw t0, GCR_CL_COHERENCE_OFS(s1) 130 sw t0, GCR_CL_COHERENCE_OFS(s1) 105 ehb 131 ehb 106 #endif /* MIPS_ISA_REV > 0 */ << 107 132 108 /* Set Kseg0 CCA to that in s0 */ 133 /* Set Kseg0 CCA to that in s0 */ 109 1: mfc0 t0, CP0_CONFIG 134 1: mfc0 t0, CP0_CONFIG 110 ori t0, 0x7 135 ori t0, 0x7 111 xori t0, 0x7 136 xori t0, 0x7 112 or t0, t0, s0 137 or t0, t0, s0 113 mtc0 t0, CP0_CONFIG 138 mtc0 t0, CP0_CONFIG 114 ehb 139 ehb 115 140 116 /* Jump to kseg0 */ 141 /* Jump to kseg0 */ 117 PTR_LA t0, 1f 142 PTR_LA t0, 1f 118 jr t0 143 jr t0 119 nop 144 nop 120 145 121 /* 146 /* 122 * We're up, cached & coherent. Perfor 147 * We're up, cached & coherent. Perform any EVA initialization necessary 123 * before we access memory. 148 * before we access memory. 124 */ 149 */ 125 1: eva_init 150 1: eva_init 126 151 127 /* Retrieve boot configuration pointer 152 /* Retrieve boot configuration pointers */ 128 jal mips_cps_get_bootcfg 153 jal mips_cps_get_bootcfg 129 nop 154 nop 130 155 131 /* Skip core-level init if we started 156 /* Skip core-level init if we started up coherent */ 132 bnez s7, 1f 157 bnez s7, 1f 133 nop 158 nop 134 159 135 /* Perform any further required core-l 160 /* Perform any further required core-level initialisation */ 136 jal mips_cps_core_init 161 jal mips_cps_core_init 137 nop 162 nop 138 163 139 /* 164 /* 140 * Boot any other VPEs within this cor 165 * Boot any other VPEs within this core that should be online, and 141 * deactivate this VPE if it should be 166 * deactivate this VPE if it should be offline. 142 */ 167 */ 143 move a1, t9 168 move a1, t9 144 jal mips_cps_boot_vpes 169 jal mips_cps_boot_vpes 145 move a0, v0 170 move a0, v0 146 171 147 /* Off we go! */ 172 /* Off we go! */ 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) 173 1: PTR_L t1, VPEBOOTCFG_PC(v1) 149 PTR_L gp, VPEBOOTCFG_GP(v1) 174 PTR_L gp, VPEBOOTCFG_GP(v1) 150 PTR_L sp, VPEBOOTCFG_SP(v1) 175 PTR_L sp, VPEBOOTCFG_SP(v1) 151 jr t1 176 jr t1 152 nop 177 nop 153 END(mips_cps_core_boot) !! 178 END(mips_cps_core_entry) 154 179 155 __INIT !! 180 .org 0x200 156 LEAF(excep_tlbfill) 181 LEAF(excep_tlbfill) 157 DUMP_EXCEP("TLB Fill") 182 DUMP_EXCEP("TLB Fill") 158 b . 183 b . 159 nop 184 nop 160 END(excep_tlbfill) 185 END(excep_tlbfill) 161 186 >> 187 .org 0x280 162 LEAF(excep_xtlbfill) 188 LEAF(excep_xtlbfill) 163 DUMP_EXCEP("XTLB Fill") 189 DUMP_EXCEP("XTLB Fill") 164 b . 190 b . 165 nop 191 nop 166 END(excep_xtlbfill) 192 END(excep_xtlbfill) 167 193 >> 194 .org 0x300 168 LEAF(excep_cache) 195 LEAF(excep_cache) 169 DUMP_EXCEP("Cache") 196 DUMP_EXCEP("Cache") 170 b . 197 b . 171 nop 198 nop 172 END(excep_cache) 199 END(excep_cache) 173 200 >> 201 .org 0x380 174 LEAF(excep_genex) 202 LEAF(excep_genex) 175 DUMP_EXCEP("General") 203 DUMP_EXCEP("General") 176 b . 204 b . 177 nop 205 nop 178 END(excep_genex) 206 END(excep_genex) 179 207 >> 208 .org 0x400 180 LEAF(excep_intex) 209 LEAF(excep_intex) 181 DUMP_EXCEP("Interrupt") 210 DUMP_EXCEP("Interrupt") 182 b . 211 b . 183 nop 212 nop 184 END(excep_intex) 213 END(excep_intex) 185 214 >> 215 .org 0x480 186 LEAF(excep_ejtag) 216 LEAF(excep_ejtag) 187 PTR_LA k0, ejtag_debug_handler 217 PTR_LA k0, ejtag_debug_handler 188 jr k0 218 jr k0 189 nop 219 nop 190 END(excep_ejtag) 220 END(excep_ejtag) 191 __FINIT << 192 221 193 LEAF(mips_cps_core_init) 222 LEAF(mips_cps_core_init) 194 #ifdef CONFIG_MIPS_MT_SMP 223 #ifdef CONFIG_MIPS_MT_SMP 195 /* Check that the core implements the 224 /* Check that the core implements the MT ASE */ 196 has_mt t0, 3f 225 has_mt t0, 3f 197 226 198 .set push 227 .set push 199 .set MIPS_ISA_LEVEL_RAW 228 .set MIPS_ISA_LEVEL_RAW 200 .set mt 229 .set mt 201 230 202 /* Only allow 1 TC per VPE to execute. 231 /* Only allow 1 TC per VPE to execute... */ 203 dmt 232 dmt 204 233 205 /* ...and for the moment only 1 VPE */ 234 /* ...and for the moment only 1 VPE */ 206 dvpe 235 dvpe 207 PTR_LA t1, 1f 236 PTR_LA t1, 1f 208 jr.hb t1 237 jr.hb t1 209 nop 238 nop 210 239 211 /* Enter VPE configuration state */ 240 /* Enter VPE configuration state */ 212 1: mfc0 t0, CP0_MVPCONTROL 241 1: mfc0 t0, CP0_MVPCONTROL 213 ori t0, t0, MVPCONTROL_VPC 242 ori t0, t0, MVPCONTROL_VPC 214 mtc0 t0, CP0_MVPCONTROL 243 mtc0 t0, CP0_MVPCONTROL 215 244 216 /* Retrieve the number of VPEs within 245 /* Retrieve the number of VPEs within the core */ 217 mfc0 t0, CP0_MVPCONF0 246 mfc0 t0, CP0_MVPCONF0 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 247 srl t0, t0, MVPCONF0_PVPE_SHIFT 219 andi t0, t0, (MVPCONF0_PVPE >> MVPC 248 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 addiu ta3, t0, 1 249 addiu ta3, t0, 1 221 250 222 /* If there's only 1, we're done */ 251 /* If there's only 1, we're done */ 223 beqz t0, 2f 252 beqz t0, 2f 224 nop 253 nop 225 254 226 /* Loop through each VPE within this c 255 /* Loop through each VPE within this core */ 227 li ta1, 1 256 li ta1, 1 228 257 229 1: /* Operate on the appropriate TC */ 258 1: /* Operate on the appropriate TC */ 230 mtc0 ta1, CP0_VPECONTROL 259 mtc0 ta1, CP0_VPECONTROL 231 ehb 260 ehb 232 261 233 /* Bind TC to VPE (1:1 TC:VPE mapping) 262 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 mttc0 ta1, CP0_TCBIND 263 mttc0 ta1, CP0_TCBIND 235 264 236 /* Set exclusive TC, non-active, maste 265 /* Set exclusive TC, non-active, master */ 237 li t0, VPECONF0_MVP 266 li t0, VPECONF0_MVP 238 sll t1, ta1, VPECONF0_XTC_SHIFT 267 sll t1, ta1, VPECONF0_XTC_SHIFT 239 or t0, t0, t1 268 or t0, t0, t1 240 mttc0 t0, CP0_VPECONF0 269 mttc0 t0, CP0_VPECONF0 241 270 242 /* Set TC non-active, non-allocatable 271 /* Set TC non-active, non-allocatable */ 243 mttc0 zero, CP0_TCSTATUS 272 mttc0 zero, CP0_TCSTATUS 244 273 245 /* Set TC halted */ 274 /* Set TC halted */ 246 li t0, TCHALT_H 275 li t0, TCHALT_H 247 mttc0 t0, CP0_TCHALT 276 mttc0 t0, CP0_TCHALT 248 277 249 /* Next VPE */ 278 /* Next VPE */ 250 addiu ta1, ta1, 1 279 addiu ta1, ta1, 1 251 slt t0, ta1, ta3 280 slt t0, ta1, ta3 252 bnez t0, 1b 281 bnez t0, 1b 253 nop 282 nop 254 283 255 /* Leave VPE configuration state */ 284 /* Leave VPE configuration state */ 256 2: mfc0 t0, CP0_MVPCONTROL 285 2: mfc0 t0, CP0_MVPCONTROL 257 xori t0, t0, MVPCONTROL_VPC 286 xori t0, t0, MVPCONTROL_VPC 258 mtc0 t0, CP0_MVPCONTROL 287 mtc0 t0, CP0_MVPCONTROL 259 288 260 3: .set pop 289 3: .set pop 261 #endif 290 #endif 262 jr ra 291 jr ra 263 nop 292 nop 264 END(mips_cps_core_init) 293 END(mips_cps_core_init) 265 294 266 /** 295 /** 267 * mips_cps_get_bootcfg() - retrieve boot conf 296 * mips_cps_get_bootcfg() - retrieve boot configuration pointers 268 * 297 * 269 * Returns: pointer to struct core_boot_config 298 * Returns: pointer to struct core_boot_config in v0, pointer to 270 * struct vpe_boot_config in v1, VPE 299 * struct vpe_boot_config in v1, VPE ID in t9 271 */ 300 */ 272 LEAF(mips_cps_get_bootcfg) 301 LEAF(mips_cps_get_bootcfg) 273 /* Calculate a pointer to this cores s 302 /* Calculate a pointer to this cores struct core_boot_config */ 274 lw t0, GCR_CL_ID_OFS(s1) 303 lw t0, GCR_CL_ID_OFS(s1) 275 li t1, COREBOOTCFG_SIZE 304 li t1, COREBOOTCFG_SIZE 276 mul t0, t0, t1 305 mul t0, t0, t1 277 PTR_LA t1, mips_cps_core_bootcfg 306 PTR_LA t1, mips_cps_core_bootcfg 278 PTR_L t1, 0(t1) 307 PTR_L t1, 0(t1) 279 PTR_ADDU v0, t0, t1 308 PTR_ADDU v0, t0, t1 280 309 281 /* Calculate this VPEs ID. If the core 310 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 282 li t9, 0 311 li t9, 0 283 #if defined(CONFIG_CPU_MIPSR6) 312 #if defined(CONFIG_CPU_MIPSR6) 284 has_vp ta2, 1f 313 has_vp ta2, 1f 285 314 286 /* 315 /* 287 * Assume non-contiguous numbering. Pe 316 * Assume non-contiguous numbering. Perhaps some day we'll need 288 * to handle contiguous VP numbering, 317 * to handle contiguous VP numbering, but no such systems yet 289 * exist. 318 * exist. 290 */ 319 */ 291 mfc0 t9, CP0_GLOBALNUMBER 320 mfc0 t9, CP0_GLOBALNUMBER 292 andi t9, t9, MIPS_GLOBALNUMBER_VP 321 andi t9, t9, MIPS_GLOBALNUMBER_VP 293 #elif defined(CONFIG_MIPS_MT_SMP) 322 #elif defined(CONFIG_MIPS_MT_SMP) 294 has_mt ta2, 1f 323 has_mt ta2, 1f 295 324 296 /* Find the number of VPEs present in 325 /* Find the number of VPEs present in the core */ 297 mfc0 t1, CP0_MVPCONF0 326 mfc0 t1, CP0_MVPCONF0 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 327 srl t1, t1, MVPCONF0_PVPE_SHIFT 299 andi t1, t1, MVPCONF0_PVPE >> MVPCO 328 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 300 addiu t1, t1, 1 329 addiu t1, t1, 1 301 330 302 /* Calculate a mask for the VPE ID fro 331 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 303 clz t1, t1 332 clz t1, t1 304 li t2, 31 333 li t2, 31 305 subu t1, t2, t1 334 subu t1, t2, t1 306 li t2, 1 335 li t2, 1 307 sll t1, t2, t1 336 sll t1, t2, t1 308 addiu t1, t1, -1 337 addiu t1, t1, -1 309 338 310 /* Retrieve the VPE ID from EBase.CPUN 339 /* Retrieve the VPE ID from EBase.CPUNum */ 311 mfc0 t9, $15, 1 340 mfc0 t9, $15, 1 312 and t9, t9, t1 341 and t9, t9, t1 313 #endif 342 #endif 314 343 315 1: /* Calculate a pointer to this VPEs st 344 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 316 li t1, VPEBOOTCFG_SIZE 345 li t1, VPEBOOTCFG_SIZE 317 mul v1, t9, t1 346 mul v1, t9, t1 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 347 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 319 PTR_ADDU v1, v1, ta3 348 PTR_ADDU v1, v1, ta3 320 349 321 jr ra 350 jr ra 322 nop 351 nop 323 END(mips_cps_get_bootcfg) 352 END(mips_cps_get_bootcfg) 324 353 325 LEAF(mips_cps_boot_vpes) 354 LEAF(mips_cps_boot_vpes) 326 lw ta2, COREBOOTCFG_VPEMASK(a0) 355 lw ta2, COREBOOTCFG_VPEMASK(a0) 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 356 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 328 357 329 #if defined(CONFIG_CPU_MIPSR6) 358 #if defined(CONFIG_CPU_MIPSR6) 330 359 331 has_vp t0, 5f 360 has_vp t0, 5f 332 361 333 /* Find base address of CPC */ 362 /* Find base address of CPC */ 334 PTR_LA t1, mips_gcr_base 363 PTR_LA t1, mips_gcr_base 335 PTR_L t1, 0(t1) 364 PTR_L t1, 0(t1) 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) 365 PTR_L t1, GCR_CPC_BASE_OFS(t1) 337 PTR_LI t2, ~0x7fff 366 PTR_LI t2, ~0x7fff 338 and t1, t1, t2 367 and t1, t1, t2 339 PTR_LI t2, UNCAC_BASE 368 PTR_LI t2, UNCAC_BASE 340 PTR_ADD t1, t1, t2 369 PTR_ADD t1, t1, t2 341 370 342 /* Start any other VPs that ought to b 371 /* Start any other VPs that ought to be running */ 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 372 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 344 373 345 /* Ensure this VP stops running if it 374 /* Ensure this VP stops running if it shouldn't be */ 346 not ta2 375 not ta2 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 376 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 348 ehb 377 ehb 349 378 350 #elif defined(CONFIG_MIPS_MT) 379 #elif defined(CONFIG_MIPS_MT) 351 380 352 /* If the core doesn't support MT then 381 /* If the core doesn't support MT then return */ 353 has_mt t0, 5f 382 has_mt t0, 5f 354 383 355 /* Enter VPE configuration state */ 384 /* Enter VPE configuration state */ 356 .set push 385 .set push 357 .set MIPS_ISA_LEVEL_RAW 386 .set MIPS_ISA_LEVEL_RAW 358 .set mt 387 .set mt 359 dvpe 388 dvpe 360 .set pop 389 .set pop 361 390 362 PTR_LA t1, 1f 391 PTR_LA t1, 1f 363 jr.hb t1 392 jr.hb t1 364 nop 393 nop 365 1: mfc0 t1, CP0_MVPCONTROL 394 1: mfc0 t1, CP0_MVPCONTROL 366 ori t1, t1, MVPCONTROL_VPC 395 ori t1, t1, MVPCONTROL_VPC 367 mtc0 t1, CP0_MVPCONTROL 396 mtc0 t1, CP0_MVPCONTROL 368 ehb 397 ehb 369 398 370 /* Loop through each VPE */ 399 /* Loop through each VPE */ 371 move t8, ta2 400 move t8, ta2 372 li ta1, 0 401 li ta1, 0 373 402 374 /* Check whether the VPE should be run 403 /* Check whether the VPE should be running. If not, skip it */ 375 1: andi t0, ta2, 1 404 1: andi t0, ta2, 1 376 beqz t0, 2f 405 beqz t0, 2f 377 nop 406 nop 378 407 379 /* Operate on the appropriate TC */ 408 /* Operate on the appropriate TC */ 380 mfc0 t0, CP0_VPECONTROL 409 mfc0 t0, CP0_VPECONTROL 381 ori t0, t0, VPECONTROL_TARGTC 410 ori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 411 xori t0, t0, VPECONTROL_TARGTC 383 or t0, t0, ta1 412 or t0, t0, ta1 384 mtc0 t0, CP0_VPECONTROL 413 mtc0 t0, CP0_VPECONTROL 385 ehb 414 ehb 386 415 387 .set push 416 .set push 388 .set MIPS_ISA_LEVEL_RAW 417 .set MIPS_ISA_LEVEL_RAW 389 .set mt 418 .set mt 390 419 391 /* Skip the VPE if its TC is not halte 420 /* Skip the VPE if its TC is not halted */ 392 mftc0 t0, CP0_TCHALT 421 mftc0 t0, CP0_TCHALT 393 beqz t0, 2f 422 beqz t0, 2f 394 nop 423 nop 395 424 396 /* Calculate a pointer to the VPEs str 425 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 li t0, VPEBOOTCFG_SIZE 426 li t0, VPEBOOTCFG_SIZE 398 mul t0, t0, ta1 427 mul t0, t0, ta1 399 PTR_ADDU t0, t0, ta3 !! 428 addu t0, t0, ta3 400 429 401 /* Set the TC restart PC */ 430 /* Set the TC restart PC */ 402 lw t1, VPEBOOTCFG_PC(t0) 431 lw t1, VPEBOOTCFG_PC(t0) 403 mttc0 t1, CP0_TCRESTART 432 mttc0 t1, CP0_TCRESTART 404 433 405 /* Set the TC stack pointer */ 434 /* Set the TC stack pointer */ 406 lw t1, VPEBOOTCFG_SP(t0) 435 lw t1, VPEBOOTCFG_SP(t0) 407 mttgpr t1, sp 436 mttgpr t1, sp 408 437 409 /* Set the TC global pointer */ 438 /* Set the TC global pointer */ 410 lw t1, VPEBOOTCFG_GP(t0) 439 lw t1, VPEBOOTCFG_GP(t0) 411 mttgpr t1, gp 440 mttgpr t1, gp 412 441 413 /* Copy config from this VPE */ 442 /* Copy config from this VPE */ 414 mfc0 t0, CP0_CONFIG 443 mfc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 444 mttc0 t0, CP0_CONFIG 416 445 417 /* 446 /* 418 * Copy the EVA config from this VPE i 447 * Copy the EVA config from this VPE if the CPU supports it. 419 * CONFIG3 must exist to be running MT 448 * CONFIG3 must exist to be running MT startup - just read it. 420 */ 449 */ 421 mfc0 t0, CP0_CONFIG, 3 450 mfc0 t0, CP0_CONFIG, 3 422 and t0, t0, MIPS_CONF3_SC 451 and t0, t0, MIPS_CONF3_SC 423 beqz t0, 3f 452 beqz t0, 3f 424 nop 453 nop 425 mfc0 t0, CP0_SEGCTL0 454 mfc0 t0, CP0_SEGCTL0 426 mttc0 t0, CP0_SEGCTL0 455 mttc0 t0, CP0_SEGCTL0 427 mfc0 t0, CP0_SEGCTL1 456 mfc0 t0, CP0_SEGCTL1 428 mttc0 t0, CP0_SEGCTL1 457 mttc0 t0, CP0_SEGCTL1 429 mfc0 t0, CP0_SEGCTL2 458 mfc0 t0, CP0_SEGCTL2 430 mttc0 t0, CP0_SEGCTL2 459 mttc0 t0, CP0_SEGCTL2 431 3: 460 3: 432 /* Ensure no software interrupts are p 461 /* Ensure no software interrupts are pending */ 433 mttc0 zero, CP0_CAUSE 462 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_STATUS 463 mttc0 zero, CP0_STATUS 435 464 436 /* Set TC active, not interrupt exempt 465 /* Set TC active, not interrupt exempt */ 437 mftc0 t0, CP0_TCSTATUS 466 mftc0 t0, CP0_TCSTATUS 438 li t1, ~TCSTATUS_IXMT 467 li t1, ~TCSTATUS_IXMT 439 and t0, t0, t1 468 and t0, t0, t1 440 ori t0, t0, TCSTATUS_A 469 ori t0, t0, TCSTATUS_A 441 mttc0 t0, CP0_TCSTATUS 470 mttc0 t0, CP0_TCSTATUS 442 471 443 /* Clear the TC halt bit */ 472 /* Clear the TC halt bit */ 444 mttc0 zero, CP0_TCHALT 473 mttc0 zero, CP0_TCHALT 445 474 446 /* Set VPE active */ 475 /* Set VPE active */ 447 mftc0 t0, CP0_VPECONF0 476 mftc0 t0, CP0_VPECONF0 448 ori t0, t0, VPECONF0_VPA 477 ori t0, t0, VPECONF0_VPA 449 mttc0 t0, CP0_VPECONF0 478 mttc0 t0, CP0_VPECONF0 450 479 451 /* Next VPE */ 480 /* Next VPE */ 452 2: srl ta2, ta2, 1 481 2: srl ta2, ta2, 1 453 addiu ta1, ta1, 1 482 addiu ta1, ta1, 1 454 bnez ta2, 1b 483 bnez ta2, 1b 455 nop 484 nop 456 485 457 /* Leave VPE configuration state */ 486 /* Leave VPE configuration state */ 458 mfc0 t1, CP0_MVPCONTROL 487 mfc0 t1, CP0_MVPCONTROL 459 xori t1, t1, MVPCONTROL_VPC 488 xori t1, t1, MVPCONTROL_VPC 460 mtc0 t1, CP0_MVPCONTROL 489 mtc0 t1, CP0_MVPCONTROL 461 ehb 490 ehb 462 evpe 491 evpe 463 492 464 .set pop 493 .set pop 465 494 466 /* Check whether this VPE is meant to 495 /* Check whether this VPE is meant to be running */ 467 li t0, 1 496 li t0, 1 468 sll t0, t0, a1 497 sll t0, t0, a1 469 and t0, t0, t8 498 and t0, t0, t8 470 bnez t0, 2f 499 bnez t0, 2f 471 nop 500 nop 472 501 473 /* This VPE should be offline, halt th 502 /* This VPE should be offline, halt the TC */ 474 li t0, TCHALT_H 503 li t0, TCHALT_H 475 mtc0 t0, CP0_TCHALT 504 mtc0 t0, CP0_TCHALT 476 PTR_LA t0, 1f 505 PTR_LA t0, 1f 477 1: jr.hb t0 506 1: jr.hb t0 478 nop 507 nop 479 508 480 2: 509 2: 481 510 482 #endif /* CONFIG_MIPS_MT_SMP */ 511 #endif /* CONFIG_MIPS_MT_SMP */ 483 512 484 /* Return */ 513 /* Return */ 485 5: jr ra 514 5: jr ra 486 nop 515 nop 487 END(mips_cps_boot_vpes) 516 END(mips_cps_boot_vpes) 488 517 489 #if MIPS_ISA_REV > 0 << 490 LEAF(mips_cps_cache_init) 518 LEAF(mips_cps_cache_init) 491 /* 519 /* 492 * Clear the bits used to index the ca 520 * Clear the bits used to index the caches. Note that the architecture 493 * dictates that writing to any of Tag 521 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should 494 * be valid for all MIPS32 CPUs, even 522 * be valid for all MIPS32 CPUs, even those for which said writes are 495 * unnecessary. 523 * unnecessary. 496 */ 524 */ 497 mtc0 zero, CP0_TAGLO, 0 525 mtc0 zero, CP0_TAGLO, 0 498 mtc0 zero, CP0_TAGHI, 0 526 mtc0 zero, CP0_TAGHI, 0 499 mtc0 zero, CP0_TAGLO, 2 527 mtc0 zero, CP0_TAGLO, 2 500 mtc0 zero, CP0_TAGHI, 2 528 mtc0 zero, CP0_TAGHI, 2 501 ehb 529 ehb 502 530 503 /* Primary cache configuration is indi 531 /* Primary cache configuration is indicated by Config1 */ 504 mfc0 v0, CP0_CONFIG, 1 532 mfc0 v0, CP0_CONFIG, 1 505 533 506 /* Detect I-cache line size */ 534 /* Detect I-cache line size */ 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIP 535 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ 508 beqz t0, icache_done 536 beqz t0, icache_done 509 li t1, 2 537 li t1, 2 510 sllv t0, t1, t0 538 sllv t0, t1, t0 511 539 512 /* Detect I-cache size */ 540 /* Detect I-cache size */ 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIP 541 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ 514 xori t2, t1, 0x7 542 xori t2, t1, 0x7 515 beqz t2, 1f 543 beqz t2, 1f 516 li t3, 32 544 li t3, 32 517 addiu t1, t1, 1 545 addiu t1, t1, 1 518 sllv t1, t3, t1 546 sllv t1, t3, t1 519 1: /* At this point t1 == I-cache sets pe 547 1: /* At this point t1 == I-cache sets per way */ 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIP 548 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 521 addiu t2, t2, 1 549 addiu t2, t2, 1 522 mul t1, t1, t0 550 mul t1, t1, t0 523 mul t1, t1, t2 551 mul t1, t1, t2 524 552 525 li a0, CKSEG0 553 li a0, CKSEG0 526 PTR_ADD a1, a0, t1 554 PTR_ADD a1, a0, t1 527 1: cache Index_Store_Tag_I, 0(a0) 555 1: cache Index_Store_Tag_I, 0(a0) 528 PTR_ADD a0, a0, t0 556 PTR_ADD a0, a0, t0 529 bne a0, a1, 1b 557 bne a0, a1, 1b 530 nop 558 nop 531 icache_done: 559 icache_done: 532 560 533 /* Detect D-cache line size */ 561 /* Detect D-cache line size */ 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIP 562 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ 535 beqz t0, dcache_done 563 beqz t0, dcache_done 536 li t1, 2 564 li t1, 2 537 sllv t0, t1, t0 565 sllv t0, t1, t0 538 566 539 /* Detect D-cache size */ 567 /* Detect D-cache size */ 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIP 568 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ 541 xori t2, t1, 0x7 569 xori t2, t1, 0x7 542 beqz t2, 1f 570 beqz t2, 1f 543 li t3, 32 571 li t3, 32 544 addiu t1, t1, 1 572 addiu t1, t1, 1 545 sllv t1, t3, t1 573 sllv t1, t3, t1 546 1: /* At this point t1 == D-cache sets pe 574 1: /* At this point t1 == D-cache sets per way */ 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIP 575 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 548 addiu t2, t2, 1 576 addiu t2, t2, 1 549 mul t1, t1, t0 577 mul t1, t1, t0 550 mul t1, t1, t2 578 mul t1, t1, t2 551 579 552 li a0, CKSEG0 580 li a0, CKSEG0 553 PTR_ADDU a1, a0, t1 581 PTR_ADDU a1, a0, t1 554 PTR_SUBU a1, a1, t0 582 PTR_SUBU a1, a1, t0 555 1: cache Index_Store_Tag_D, 0(a0) 583 1: cache Index_Store_Tag_D, 0(a0) 556 bne a0, a1, 1b 584 bne a0, a1, 1b 557 PTR_ADD a0, a0, t0 585 PTR_ADD a0, a0, t0 558 dcache_done: 586 dcache_done: 559 587 560 jr ra 588 jr ra 561 nop 589 nop 562 END(mips_cps_cache_init) 590 END(mips_cps_cache_init) 563 #endif /* MIPS_ISA_REV > 0 */ << 564 591 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CON 592 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 566 593 567 /* Calculate a pointer to this CPUs st 594 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 568 .macro psstate dest 595 .macro psstate dest 569 .set push 596 .set push 570 .set noat 597 .set noat 571 lw $1, TI_CPU(gp) 598 lw $1, TI_CPU(gp) 572 sll $1, $1, LONGLOG 599 sll $1, $1, LONGLOG 573 PTR_LA \dest, __per_cpu_offset 600 PTR_LA \dest, __per_cpu_offset 574 PTR_ADDU $1, $1, \dest !! 601 addu $1, $1, \dest 575 lw $1, 0($1) 602 lw $1, 0($1) 576 PTR_LA \dest, cps_cpu_state 603 PTR_LA \dest, cps_cpu_state 577 PTR_ADDU \dest, \dest, $1 !! 604 addu \dest, \dest, $1 578 .set pop 605 .set pop 579 .endm 606 .endm 580 607 581 LEAF(mips_cps_pm_save) 608 LEAF(mips_cps_pm_save) 582 /* Save CPU state */ 609 /* Save CPU state */ 583 SUSPEND_SAVE_REGS 610 SUSPEND_SAVE_REGS 584 psstate t1 611 psstate t1 585 SUSPEND_SAVE_STATIC 612 SUSPEND_SAVE_STATIC 586 jr v0 613 jr v0 587 nop 614 nop 588 END(mips_cps_pm_save) 615 END(mips_cps_pm_save) 589 616 590 LEAF(mips_cps_pm_restore) 617 LEAF(mips_cps_pm_restore) 591 /* Restore CPU state */ 618 /* Restore CPU state */ 592 psstate t1 619 psstate t1 593 RESUME_RESTORE_STATIC 620 RESUME_RESTORE_STATIC 594 RESUME_RESTORE_REGS_RETURN 621 RESUME_RESTORE_REGS_RETURN 595 END(mips_cps_pm_restore) 622 END(mips_cps_pm_restore) 596 623 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM 624 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.