1 /* SPDX-License-Identifier: GPL-2.0-or-later * 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 2 /* 3 * Copyright (C) 2013 Imagination Technologies 3 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> 4 * Author: Paul Burton <paul.burton@mips.com> 5 */ 5 */ 6 6 7 #include <linux/init.h> 7 #include <linux/init.h> 8 #include <asm/addrspace.h> 8 #include <asm/addrspace.h> 9 #include <asm/asm.h> 9 #include <asm/asm.h> 10 #include <asm/asm-offsets.h> 10 #include <asm/asm-offsets.h> 11 #include <asm/asmmacro.h> 11 #include <asm/asmmacro.h> 12 #include <asm/cacheops.h> 12 #include <asm/cacheops.h> 13 #include <asm/eva.h> 13 #include <asm/eva.h> 14 #include <asm/mipsregs.h> 14 #include <asm/mipsregs.h> 15 #include <asm/mipsmtregs.h> 15 #include <asm/mipsmtregs.h> 16 #include <asm/pm.h> 16 #include <asm/pm.h> 17 #include <asm/smp-cps.h> 17 #include <asm/smp-cps.h> 18 18 19 #define GCR_CPC_BASE_OFS 0x0088 19 #define GCR_CPC_BASE_OFS 0x0088 20 #define GCR_CL_COHERENCE_OFS 0x2008 20 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_ID_OFS 0x2028 21 #define GCR_CL_ID_OFS 0x2028 22 22 23 #define CPC_CL_VC_STOP_OFS 0x2020 23 #define CPC_CL_VC_STOP_OFS 0x2020 24 #define CPC_CL_VC_RUN_OFS 0x2028 24 #define CPC_CL_VC_RUN_OFS 0x2028 25 25 26 .extern mips_cm_base 26 .extern mips_cm_base 27 27 28 .set noreorder 28 .set noreorder 29 29 30 #ifdef CONFIG_64BIT 30 #ifdef CONFIG_64BIT 31 # define STATUS_BITDEPS ST0_KX 31 # define STATUS_BITDEPS ST0_KX 32 #else 32 #else 33 # define STATUS_BITDEPS 0 33 # define STATUS_BITDEPS 0 34 #endif 34 #endif 35 35 36 #ifdef CONFIG_MIPS_CPS_NS16550 36 #ifdef CONFIG_MIPS_CPS_NS16550 37 37 38 #define DUMP_EXCEP(name) \ 38 #define DUMP_EXCEP(name) \ 39 PTR_LA a0, 8f; \ 39 PTR_LA a0, 8f; \ 40 jal mips_cps_bev_dump; \ 40 jal mips_cps_bev_dump; \ 41 nop; \ 41 nop; \ 42 TEXT(name) 42 TEXT(name) 43 43 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 45 45 46 #define DUMP_EXCEP(name) 46 #define DUMP_EXCEP(name) 47 47 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 49 49 50 /* 50 /* 51 * Set dest to non-zero if the core su 51 * Set dest to non-zero if the core supports the MT ASE, else zero. If 52 * MT is not supported then branch to 52 * MT is not supported then branch to nomt. 53 */ 53 */ 54 .macro has_mt dest, nomt 54 .macro has_mt dest, nomt 55 mfc0 \dest, CP0_CONFIG, 1 55 mfc0 \dest, CP0_CONFIG, 1 56 bgez \dest, \nomt 56 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 2 57 mfc0 \dest, CP0_CONFIG, 2 58 bgez \dest, \nomt 58 bgez \dest, \nomt 59 mfc0 \dest, CP0_CONFIG, 3 59 mfc0 \dest, CP0_CONFIG, 3 60 andi \dest, \dest, MIPS_CONF3_MT 60 andi \dest, \dest, MIPS_CONF3_MT 61 beqz \dest, \nomt 61 beqz \dest, \nomt 62 nop 62 nop 63 .endm 63 .endm 64 64 65 /* 65 /* 66 * Set dest to non-zero if the core su 66 * Set dest to non-zero if the core supports MIPSr6 multithreading 67 * (ie. VPs), else zero. If MIPSr6 mul 67 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then 68 * branch to nomt. 68 * branch to nomt. 69 */ 69 */ 70 .macro has_vp dest, nomt 70 .macro has_vp dest, nomt 71 mfc0 \dest, CP0_CONFIG, 1 71 mfc0 \dest, CP0_CONFIG, 1 72 bgez \dest, \nomt 72 bgez \dest, \nomt 73 mfc0 \dest, CP0_CONFIG, 2 73 mfc0 \dest, CP0_CONFIG, 2 74 bgez \dest, \nomt 74 bgez \dest, \nomt 75 mfc0 \dest, CP0_CONFIG, 3 75 mfc0 \dest, CP0_CONFIG, 3 76 bgez \dest, \nomt 76 bgez \dest, \nomt 77 mfc0 \dest, CP0_CONFIG, 4 77 mfc0 \dest, CP0_CONFIG, 4 78 bgez \dest, \nomt 78 bgez \dest, \nomt 79 mfc0 \dest, CP0_CONFIG, 5 79 mfc0 \dest, CP0_CONFIG, 5 80 andi \dest, \dest, MIPS_CONF5_VP 80 andi \dest, \dest, MIPS_CONF5_VP 81 beqz \dest, \nomt 81 beqz \dest, \nomt 82 nop 82 nop 83 .endm 83 .endm 84 84 85 85 86 LEAF(mips_cps_core_boot) 86 LEAF(mips_cps_core_boot) 87 /* Save CCA and GCR base */ 87 /* Save CCA and GCR base */ 88 move s0, a0 88 move s0, a0 89 move s1, a1 89 move s1, a1 90 90 91 /* We don't know how to do coherence s 91 /* We don't know how to do coherence setup on earlier ISA */ 92 #if MIPS_ISA_REV > 0 92 #if MIPS_ISA_REV > 0 93 /* Skip cache & coherence setup if we' 93 /* Skip cache & coherence setup if we're already coherent */ 94 lw s7, GCR_CL_COHERENCE_OFS(s1) 94 lw s7, GCR_CL_COHERENCE_OFS(s1) 95 bnez s7, 1f 95 bnez s7, 1f 96 nop 96 nop 97 97 98 /* Initialize the L1 caches */ 98 /* Initialize the L1 caches */ 99 jal mips_cps_cache_init 99 jal mips_cps_cache_init 100 nop 100 nop 101 101 102 /* Enter the coherent domain */ 102 /* Enter the coherent domain */ 103 li t0, 0xff 103 li t0, 0xff 104 sw t0, GCR_CL_COHERENCE_OFS(s1) 104 sw t0, GCR_CL_COHERENCE_OFS(s1) 105 ehb 105 ehb 106 #endif /* MIPS_ISA_REV > 0 */ 106 #endif /* MIPS_ISA_REV > 0 */ 107 107 108 /* Set Kseg0 CCA to that in s0 */ 108 /* Set Kseg0 CCA to that in s0 */ 109 1: mfc0 t0, CP0_CONFIG 109 1: mfc0 t0, CP0_CONFIG 110 ori t0, 0x7 110 ori t0, 0x7 111 xori t0, 0x7 111 xori t0, 0x7 112 or t0, t0, s0 112 or t0, t0, s0 113 mtc0 t0, CP0_CONFIG 113 mtc0 t0, CP0_CONFIG 114 ehb 114 ehb 115 115 116 /* Jump to kseg0 */ 116 /* Jump to kseg0 */ 117 PTR_LA t0, 1f 117 PTR_LA t0, 1f 118 jr t0 118 jr t0 119 nop 119 nop 120 120 121 /* 121 /* 122 * We're up, cached & coherent. Perfor 122 * We're up, cached & coherent. Perform any EVA initialization necessary 123 * before we access memory. 123 * before we access memory. 124 */ 124 */ 125 1: eva_init 125 1: eva_init 126 126 127 /* Retrieve boot configuration pointer 127 /* Retrieve boot configuration pointers */ 128 jal mips_cps_get_bootcfg 128 jal mips_cps_get_bootcfg 129 nop 129 nop 130 130 131 /* Skip core-level init if we started 131 /* Skip core-level init if we started up coherent */ 132 bnez s7, 1f 132 bnez s7, 1f 133 nop 133 nop 134 134 135 /* Perform any further required core-l 135 /* Perform any further required core-level initialisation */ 136 jal mips_cps_core_init 136 jal mips_cps_core_init 137 nop 137 nop 138 138 139 /* 139 /* 140 * Boot any other VPEs within this cor 140 * Boot any other VPEs within this core that should be online, and 141 * deactivate this VPE if it should be 141 * deactivate this VPE if it should be offline. 142 */ 142 */ 143 move a1, t9 143 move a1, t9 144 jal mips_cps_boot_vpes 144 jal mips_cps_boot_vpes 145 move a0, v0 145 move a0, v0 146 146 147 /* Off we go! */ 147 /* Off we go! */ 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) 149 PTR_L gp, VPEBOOTCFG_GP(v1) 149 PTR_L gp, VPEBOOTCFG_GP(v1) 150 PTR_L sp, VPEBOOTCFG_SP(v1) 150 PTR_L sp, VPEBOOTCFG_SP(v1) 151 jr t1 151 jr t1 152 nop 152 nop 153 END(mips_cps_core_boot) 153 END(mips_cps_core_boot) 154 154 155 __INIT 155 __INIT 156 LEAF(excep_tlbfill) 156 LEAF(excep_tlbfill) 157 DUMP_EXCEP("TLB Fill") 157 DUMP_EXCEP("TLB Fill") 158 b . 158 b . 159 nop 159 nop 160 END(excep_tlbfill) 160 END(excep_tlbfill) 161 161 162 LEAF(excep_xtlbfill) 162 LEAF(excep_xtlbfill) 163 DUMP_EXCEP("XTLB Fill") 163 DUMP_EXCEP("XTLB Fill") 164 b . 164 b . 165 nop 165 nop 166 END(excep_xtlbfill) 166 END(excep_xtlbfill) 167 167 168 LEAF(excep_cache) 168 LEAF(excep_cache) 169 DUMP_EXCEP("Cache") 169 DUMP_EXCEP("Cache") 170 b . 170 b . 171 nop 171 nop 172 END(excep_cache) 172 END(excep_cache) 173 173 174 LEAF(excep_genex) 174 LEAF(excep_genex) 175 DUMP_EXCEP("General") 175 DUMP_EXCEP("General") 176 b . 176 b . 177 nop 177 nop 178 END(excep_genex) 178 END(excep_genex) 179 179 180 LEAF(excep_intex) 180 LEAF(excep_intex) 181 DUMP_EXCEP("Interrupt") 181 DUMP_EXCEP("Interrupt") 182 b . 182 b . 183 nop 183 nop 184 END(excep_intex) 184 END(excep_intex) 185 185 186 LEAF(excep_ejtag) 186 LEAF(excep_ejtag) 187 PTR_LA k0, ejtag_debug_handler 187 PTR_LA k0, ejtag_debug_handler 188 jr k0 188 jr k0 189 nop 189 nop 190 END(excep_ejtag) 190 END(excep_ejtag) 191 __FINIT 191 __FINIT 192 192 193 LEAF(mips_cps_core_init) 193 LEAF(mips_cps_core_init) 194 #ifdef CONFIG_MIPS_MT_SMP 194 #ifdef CONFIG_MIPS_MT_SMP 195 /* Check that the core implements the 195 /* Check that the core implements the MT ASE */ 196 has_mt t0, 3f 196 has_mt t0, 3f 197 197 198 .set push 198 .set push 199 .set MIPS_ISA_LEVEL_RAW 199 .set MIPS_ISA_LEVEL_RAW 200 .set mt 200 .set mt 201 201 202 /* Only allow 1 TC per VPE to execute. 202 /* Only allow 1 TC per VPE to execute... */ 203 dmt 203 dmt 204 204 205 /* ...and for the moment only 1 VPE */ 205 /* ...and for the moment only 1 VPE */ 206 dvpe 206 dvpe 207 PTR_LA t1, 1f 207 PTR_LA t1, 1f 208 jr.hb t1 208 jr.hb t1 209 nop 209 nop 210 210 211 /* Enter VPE configuration state */ 211 /* Enter VPE configuration state */ 212 1: mfc0 t0, CP0_MVPCONTROL 212 1: mfc0 t0, CP0_MVPCONTROL 213 ori t0, t0, MVPCONTROL_VPC 213 ori t0, t0, MVPCONTROL_VPC 214 mtc0 t0, CP0_MVPCONTROL 214 mtc0 t0, CP0_MVPCONTROL 215 215 216 /* Retrieve the number of VPEs within 216 /* Retrieve the number of VPEs within the core */ 217 mfc0 t0, CP0_MVPCONF0 217 mfc0 t0, CP0_MVPCONF0 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 219 andi t0, t0, (MVPCONF0_PVPE >> MVPC 219 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 addiu ta3, t0, 1 220 addiu ta3, t0, 1 221 221 222 /* If there's only 1, we're done */ 222 /* If there's only 1, we're done */ 223 beqz t0, 2f 223 beqz t0, 2f 224 nop 224 nop 225 225 226 /* Loop through each VPE within this c 226 /* Loop through each VPE within this core */ 227 li ta1, 1 227 li ta1, 1 228 228 229 1: /* Operate on the appropriate TC */ 229 1: /* Operate on the appropriate TC */ 230 mtc0 ta1, CP0_VPECONTROL 230 mtc0 ta1, CP0_VPECONTROL 231 ehb 231 ehb 232 232 233 /* Bind TC to VPE (1:1 TC:VPE mapping) 233 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 mttc0 ta1, CP0_TCBIND 234 mttc0 ta1, CP0_TCBIND 235 235 236 /* Set exclusive TC, non-active, maste 236 /* Set exclusive TC, non-active, master */ 237 li t0, VPECONF0_MVP 237 li t0, VPECONF0_MVP 238 sll t1, ta1, VPECONF0_XTC_SHIFT 238 sll t1, ta1, VPECONF0_XTC_SHIFT 239 or t0, t0, t1 239 or t0, t0, t1 240 mttc0 t0, CP0_VPECONF0 240 mttc0 t0, CP0_VPECONF0 241 241 242 /* Set TC non-active, non-allocatable 242 /* Set TC non-active, non-allocatable */ 243 mttc0 zero, CP0_TCSTATUS 243 mttc0 zero, CP0_TCSTATUS 244 244 245 /* Set TC halted */ 245 /* Set TC halted */ 246 li t0, TCHALT_H 246 li t0, TCHALT_H 247 mttc0 t0, CP0_TCHALT 247 mttc0 t0, CP0_TCHALT 248 248 249 /* Next VPE */ 249 /* Next VPE */ 250 addiu ta1, ta1, 1 250 addiu ta1, ta1, 1 251 slt t0, ta1, ta3 251 slt t0, ta1, ta3 252 bnez t0, 1b 252 bnez t0, 1b 253 nop 253 nop 254 254 255 /* Leave VPE configuration state */ 255 /* Leave VPE configuration state */ 256 2: mfc0 t0, CP0_MVPCONTROL 256 2: mfc0 t0, CP0_MVPCONTROL 257 xori t0, t0, MVPCONTROL_VPC 257 xori t0, t0, MVPCONTROL_VPC 258 mtc0 t0, CP0_MVPCONTROL 258 mtc0 t0, CP0_MVPCONTROL 259 259 260 3: .set pop 260 3: .set pop 261 #endif 261 #endif 262 jr ra 262 jr ra 263 nop 263 nop 264 END(mips_cps_core_init) 264 END(mips_cps_core_init) 265 265 266 /** 266 /** 267 * mips_cps_get_bootcfg() - retrieve boot conf 267 * mips_cps_get_bootcfg() - retrieve boot configuration pointers 268 * 268 * 269 * Returns: pointer to struct core_boot_config 269 * Returns: pointer to struct core_boot_config in v0, pointer to 270 * struct vpe_boot_config in v1, VPE 270 * struct vpe_boot_config in v1, VPE ID in t9 271 */ 271 */ 272 LEAF(mips_cps_get_bootcfg) 272 LEAF(mips_cps_get_bootcfg) 273 /* Calculate a pointer to this cores s 273 /* Calculate a pointer to this cores struct core_boot_config */ 274 lw t0, GCR_CL_ID_OFS(s1) 274 lw t0, GCR_CL_ID_OFS(s1) 275 li t1, COREBOOTCFG_SIZE 275 li t1, COREBOOTCFG_SIZE 276 mul t0, t0, t1 276 mul t0, t0, t1 277 PTR_LA t1, mips_cps_core_bootcfg 277 PTR_LA t1, mips_cps_core_bootcfg 278 PTR_L t1, 0(t1) 278 PTR_L t1, 0(t1) 279 PTR_ADDU v0, t0, t1 279 PTR_ADDU v0, t0, t1 280 280 281 /* Calculate this VPEs ID. If the core 281 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 282 li t9, 0 282 li t9, 0 283 #if defined(CONFIG_CPU_MIPSR6) 283 #if defined(CONFIG_CPU_MIPSR6) 284 has_vp ta2, 1f 284 has_vp ta2, 1f 285 285 286 /* 286 /* 287 * Assume non-contiguous numbering. Pe 287 * Assume non-contiguous numbering. Perhaps some day we'll need 288 * to handle contiguous VP numbering, 288 * to handle contiguous VP numbering, but no such systems yet 289 * exist. 289 * exist. 290 */ 290 */ 291 mfc0 t9, CP0_GLOBALNUMBER 291 mfc0 t9, CP0_GLOBALNUMBER 292 andi t9, t9, MIPS_GLOBALNUMBER_VP 292 andi t9, t9, MIPS_GLOBALNUMBER_VP 293 #elif defined(CONFIG_MIPS_MT_SMP) 293 #elif defined(CONFIG_MIPS_MT_SMP) 294 has_mt ta2, 1f 294 has_mt ta2, 1f 295 295 296 /* Find the number of VPEs present in 296 /* Find the number of VPEs present in the core */ 297 mfc0 t1, CP0_MVPCONF0 297 mfc0 t1, CP0_MVPCONF0 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 299 andi t1, t1, MVPCONF0_PVPE >> MVPCO 299 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 300 addiu t1, t1, 1 300 addiu t1, t1, 1 301 301 302 /* Calculate a mask for the VPE ID fro 302 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 303 clz t1, t1 303 clz t1, t1 304 li t2, 31 304 li t2, 31 305 subu t1, t2, t1 305 subu t1, t2, t1 306 li t2, 1 306 li t2, 1 307 sll t1, t2, t1 307 sll t1, t2, t1 308 addiu t1, t1, -1 308 addiu t1, t1, -1 309 309 310 /* Retrieve the VPE ID from EBase.CPUN 310 /* Retrieve the VPE ID from EBase.CPUNum */ 311 mfc0 t9, $15, 1 311 mfc0 t9, $15, 1 312 and t9, t9, t1 312 and t9, t9, t1 313 #endif 313 #endif 314 314 315 1: /* Calculate a pointer to this VPEs st 315 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 316 li t1, VPEBOOTCFG_SIZE 316 li t1, VPEBOOTCFG_SIZE 317 mul v1, t9, t1 317 mul v1, t9, t1 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 319 PTR_ADDU v1, v1, ta3 319 PTR_ADDU v1, v1, ta3 320 320 321 jr ra 321 jr ra 322 nop 322 nop 323 END(mips_cps_get_bootcfg) 323 END(mips_cps_get_bootcfg) 324 324 325 LEAF(mips_cps_boot_vpes) 325 LEAF(mips_cps_boot_vpes) 326 lw ta2, COREBOOTCFG_VPEMASK(a0) 326 lw ta2, COREBOOTCFG_VPEMASK(a0) 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 328 328 329 #if defined(CONFIG_CPU_MIPSR6) 329 #if defined(CONFIG_CPU_MIPSR6) 330 330 331 has_vp t0, 5f 331 has_vp t0, 5f 332 332 333 /* Find base address of CPC */ 333 /* Find base address of CPC */ 334 PTR_LA t1, mips_gcr_base 334 PTR_LA t1, mips_gcr_base 335 PTR_L t1, 0(t1) 335 PTR_L t1, 0(t1) 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) 337 PTR_LI t2, ~0x7fff 337 PTR_LI t2, ~0x7fff 338 and t1, t1, t2 338 and t1, t1, t2 339 PTR_LI t2, UNCAC_BASE 339 PTR_LI t2, UNCAC_BASE 340 PTR_ADD t1, t1, t2 340 PTR_ADD t1, t1, t2 341 341 342 /* Start any other VPs that ought to b 342 /* Start any other VPs that ought to be running */ 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 344 344 345 /* Ensure this VP stops running if it 345 /* Ensure this VP stops running if it shouldn't be */ 346 not ta2 346 not ta2 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 348 ehb 348 ehb 349 349 350 #elif defined(CONFIG_MIPS_MT) 350 #elif defined(CONFIG_MIPS_MT) 351 351 352 /* If the core doesn't support MT then 352 /* If the core doesn't support MT then return */ 353 has_mt t0, 5f 353 has_mt t0, 5f 354 354 355 /* Enter VPE configuration state */ 355 /* Enter VPE configuration state */ 356 .set push 356 .set push 357 .set MIPS_ISA_LEVEL_RAW 357 .set MIPS_ISA_LEVEL_RAW 358 .set mt 358 .set mt 359 dvpe 359 dvpe 360 .set pop 360 .set pop 361 361 362 PTR_LA t1, 1f 362 PTR_LA t1, 1f 363 jr.hb t1 363 jr.hb t1 364 nop 364 nop 365 1: mfc0 t1, CP0_MVPCONTROL 365 1: mfc0 t1, CP0_MVPCONTROL 366 ori t1, t1, MVPCONTROL_VPC 366 ori t1, t1, MVPCONTROL_VPC 367 mtc0 t1, CP0_MVPCONTROL 367 mtc0 t1, CP0_MVPCONTROL 368 ehb 368 ehb 369 369 370 /* Loop through each VPE */ 370 /* Loop through each VPE */ 371 move t8, ta2 371 move t8, ta2 372 li ta1, 0 372 li ta1, 0 373 373 374 /* Check whether the VPE should be run 374 /* Check whether the VPE should be running. If not, skip it */ 375 1: andi t0, ta2, 1 375 1: andi t0, ta2, 1 376 beqz t0, 2f 376 beqz t0, 2f 377 nop 377 nop 378 378 379 /* Operate on the appropriate TC */ 379 /* Operate on the appropriate TC */ 380 mfc0 t0, CP0_VPECONTROL 380 mfc0 t0, CP0_VPECONTROL 381 ori t0, t0, VPECONTROL_TARGTC 381 ori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 383 or t0, t0, ta1 383 or t0, t0, ta1 384 mtc0 t0, CP0_VPECONTROL 384 mtc0 t0, CP0_VPECONTROL 385 ehb 385 ehb 386 386 387 .set push 387 .set push 388 .set MIPS_ISA_LEVEL_RAW 388 .set MIPS_ISA_LEVEL_RAW 389 .set mt 389 .set mt 390 390 391 /* Skip the VPE if its TC is not halte 391 /* Skip the VPE if its TC is not halted */ 392 mftc0 t0, CP0_TCHALT 392 mftc0 t0, CP0_TCHALT 393 beqz t0, 2f 393 beqz t0, 2f 394 nop 394 nop 395 395 396 /* Calculate a pointer to the VPEs str 396 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 li t0, VPEBOOTCFG_SIZE 397 li t0, VPEBOOTCFG_SIZE 398 mul t0, t0, ta1 398 mul t0, t0, ta1 399 PTR_ADDU t0, t0, ta3 399 PTR_ADDU t0, t0, ta3 400 400 401 /* Set the TC restart PC */ 401 /* Set the TC restart PC */ 402 lw t1, VPEBOOTCFG_PC(t0) 402 lw t1, VPEBOOTCFG_PC(t0) 403 mttc0 t1, CP0_TCRESTART 403 mttc0 t1, CP0_TCRESTART 404 404 405 /* Set the TC stack pointer */ 405 /* Set the TC stack pointer */ 406 lw t1, VPEBOOTCFG_SP(t0) 406 lw t1, VPEBOOTCFG_SP(t0) 407 mttgpr t1, sp 407 mttgpr t1, sp 408 408 409 /* Set the TC global pointer */ 409 /* Set the TC global pointer */ 410 lw t1, VPEBOOTCFG_GP(t0) 410 lw t1, VPEBOOTCFG_GP(t0) 411 mttgpr t1, gp 411 mttgpr t1, gp 412 412 413 /* Copy config from this VPE */ 413 /* Copy config from this VPE */ 414 mfc0 t0, CP0_CONFIG 414 mfc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 416 416 417 /* 417 /* 418 * Copy the EVA config from this VPE i 418 * Copy the EVA config from this VPE if the CPU supports it. 419 * CONFIG3 must exist to be running MT 419 * CONFIG3 must exist to be running MT startup - just read it. 420 */ 420 */ 421 mfc0 t0, CP0_CONFIG, 3 421 mfc0 t0, CP0_CONFIG, 3 422 and t0, t0, MIPS_CONF3_SC 422 and t0, t0, MIPS_CONF3_SC 423 beqz t0, 3f 423 beqz t0, 3f 424 nop 424 nop 425 mfc0 t0, CP0_SEGCTL0 425 mfc0 t0, CP0_SEGCTL0 426 mttc0 t0, CP0_SEGCTL0 426 mttc0 t0, CP0_SEGCTL0 427 mfc0 t0, CP0_SEGCTL1 427 mfc0 t0, CP0_SEGCTL1 428 mttc0 t0, CP0_SEGCTL1 428 mttc0 t0, CP0_SEGCTL1 429 mfc0 t0, CP0_SEGCTL2 429 mfc0 t0, CP0_SEGCTL2 430 mttc0 t0, CP0_SEGCTL2 430 mttc0 t0, CP0_SEGCTL2 431 3: 431 3: 432 /* Ensure no software interrupts are p 432 /* Ensure no software interrupts are pending */ 433 mttc0 zero, CP0_CAUSE 433 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_STATUS 434 mttc0 zero, CP0_STATUS 435 435 436 /* Set TC active, not interrupt exempt 436 /* Set TC active, not interrupt exempt */ 437 mftc0 t0, CP0_TCSTATUS 437 mftc0 t0, CP0_TCSTATUS 438 li t1, ~TCSTATUS_IXMT 438 li t1, ~TCSTATUS_IXMT 439 and t0, t0, t1 439 and t0, t0, t1 440 ori t0, t0, TCSTATUS_A 440 ori t0, t0, TCSTATUS_A 441 mttc0 t0, CP0_TCSTATUS 441 mttc0 t0, CP0_TCSTATUS 442 442 443 /* Clear the TC halt bit */ 443 /* Clear the TC halt bit */ 444 mttc0 zero, CP0_TCHALT 444 mttc0 zero, CP0_TCHALT 445 445 446 /* Set VPE active */ 446 /* Set VPE active */ 447 mftc0 t0, CP0_VPECONF0 447 mftc0 t0, CP0_VPECONF0 448 ori t0, t0, VPECONF0_VPA 448 ori t0, t0, VPECONF0_VPA 449 mttc0 t0, CP0_VPECONF0 449 mttc0 t0, CP0_VPECONF0 450 450 451 /* Next VPE */ 451 /* Next VPE */ 452 2: srl ta2, ta2, 1 452 2: srl ta2, ta2, 1 453 addiu ta1, ta1, 1 453 addiu ta1, ta1, 1 454 bnez ta2, 1b 454 bnez ta2, 1b 455 nop 455 nop 456 456 457 /* Leave VPE configuration state */ 457 /* Leave VPE configuration state */ 458 mfc0 t1, CP0_MVPCONTROL 458 mfc0 t1, CP0_MVPCONTROL 459 xori t1, t1, MVPCONTROL_VPC 459 xori t1, t1, MVPCONTROL_VPC 460 mtc0 t1, CP0_MVPCONTROL 460 mtc0 t1, CP0_MVPCONTROL 461 ehb 461 ehb 462 evpe 462 evpe 463 463 464 .set pop 464 .set pop 465 465 466 /* Check whether this VPE is meant to 466 /* Check whether this VPE is meant to be running */ 467 li t0, 1 467 li t0, 1 468 sll t0, t0, a1 468 sll t0, t0, a1 469 and t0, t0, t8 469 and t0, t0, t8 470 bnez t0, 2f 470 bnez t0, 2f 471 nop 471 nop 472 472 473 /* This VPE should be offline, halt th 473 /* This VPE should be offline, halt the TC */ 474 li t0, TCHALT_H 474 li t0, TCHALT_H 475 mtc0 t0, CP0_TCHALT 475 mtc0 t0, CP0_TCHALT 476 PTR_LA t0, 1f 476 PTR_LA t0, 1f 477 1: jr.hb t0 477 1: jr.hb t0 478 nop 478 nop 479 479 480 2: 480 2: 481 481 482 #endif /* CONFIG_MIPS_MT_SMP */ 482 #endif /* CONFIG_MIPS_MT_SMP */ 483 483 484 /* Return */ 484 /* Return */ 485 5: jr ra 485 5: jr ra 486 nop 486 nop 487 END(mips_cps_boot_vpes) 487 END(mips_cps_boot_vpes) 488 488 489 #if MIPS_ISA_REV > 0 489 #if MIPS_ISA_REV > 0 490 LEAF(mips_cps_cache_init) 490 LEAF(mips_cps_cache_init) 491 /* 491 /* 492 * Clear the bits used to index the ca 492 * Clear the bits used to index the caches. Note that the architecture 493 * dictates that writing to any of Tag 493 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should 494 * be valid for all MIPS32 CPUs, even 494 * be valid for all MIPS32 CPUs, even those for which said writes are 495 * unnecessary. 495 * unnecessary. 496 */ 496 */ 497 mtc0 zero, CP0_TAGLO, 0 497 mtc0 zero, CP0_TAGLO, 0 498 mtc0 zero, CP0_TAGHI, 0 498 mtc0 zero, CP0_TAGHI, 0 499 mtc0 zero, CP0_TAGLO, 2 499 mtc0 zero, CP0_TAGLO, 2 500 mtc0 zero, CP0_TAGHI, 2 500 mtc0 zero, CP0_TAGHI, 2 501 ehb 501 ehb 502 502 503 /* Primary cache configuration is indi 503 /* Primary cache configuration is indicated by Config1 */ 504 mfc0 v0, CP0_CONFIG, 1 504 mfc0 v0, CP0_CONFIG, 1 505 505 506 /* Detect I-cache line size */ 506 /* Detect I-cache line size */ 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIP 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ 508 beqz t0, icache_done 508 beqz t0, icache_done 509 li t1, 2 509 li t1, 2 510 sllv t0, t1, t0 510 sllv t0, t1, t0 511 511 512 /* Detect I-cache size */ 512 /* Detect I-cache size */ 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIP 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ 514 xori t2, t1, 0x7 514 xori t2, t1, 0x7 515 beqz t2, 1f 515 beqz t2, 1f 516 li t3, 32 516 li t3, 32 517 addiu t1, t1, 1 517 addiu t1, t1, 1 518 sllv t1, t3, t1 518 sllv t1, t3, t1 519 1: /* At this point t1 == I-cache sets pe 519 1: /* At this point t1 == I-cache sets per way */ 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIP 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 521 addiu t2, t2, 1 521 addiu t2, t2, 1 522 mul t1, t1, t0 522 mul t1, t1, t0 523 mul t1, t1, t2 523 mul t1, t1, t2 524 524 525 li a0, CKSEG0 525 li a0, CKSEG0 526 PTR_ADD a1, a0, t1 526 PTR_ADD a1, a0, t1 527 1: cache Index_Store_Tag_I, 0(a0) 527 1: cache Index_Store_Tag_I, 0(a0) 528 PTR_ADD a0, a0, t0 528 PTR_ADD a0, a0, t0 529 bne a0, a1, 1b 529 bne a0, a1, 1b 530 nop 530 nop 531 icache_done: 531 icache_done: 532 532 533 /* Detect D-cache line size */ 533 /* Detect D-cache line size */ 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIP 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ 535 beqz t0, dcache_done 535 beqz t0, dcache_done 536 li t1, 2 536 li t1, 2 537 sllv t0, t1, t0 537 sllv t0, t1, t0 538 538 539 /* Detect D-cache size */ 539 /* Detect D-cache size */ 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIP 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ 541 xori t2, t1, 0x7 541 xori t2, t1, 0x7 542 beqz t2, 1f 542 beqz t2, 1f 543 li t3, 32 543 li t3, 32 544 addiu t1, t1, 1 544 addiu t1, t1, 1 545 sllv t1, t3, t1 545 sllv t1, t3, t1 546 1: /* At this point t1 == D-cache sets pe 546 1: /* At this point t1 == D-cache sets per way */ 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIP 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 548 addiu t2, t2, 1 548 addiu t2, t2, 1 549 mul t1, t1, t0 549 mul t1, t1, t0 550 mul t1, t1, t2 550 mul t1, t1, t2 551 551 552 li a0, CKSEG0 552 li a0, CKSEG0 553 PTR_ADDU a1, a0, t1 553 PTR_ADDU a1, a0, t1 554 PTR_SUBU a1, a1, t0 554 PTR_SUBU a1, a1, t0 555 1: cache Index_Store_Tag_D, 0(a0) 555 1: cache Index_Store_Tag_D, 0(a0) 556 bne a0, a1, 1b 556 bne a0, a1, 1b 557 PTR_ADD a0, a0, t0 557 PTR_ADD a0, a0, t0 558 dcache_done: 558 dcache_done: 559 559 560 jr ra 560 jr ra 561 nop 561 nop 562 END(mips_cps_cache_init) 562 END(mips_cps_cache_init) 563 #endif /* MIPS_ISA_REV > 0 */ 563 #endif /* MIPS_ISA_REV > 0 */ 564 564 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CON 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 566 566 567 /* Calculate a pointer to this CPUs st 567 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 568 .macro psstate dest 568 .macro psstate dest 569 .set push 569 .set push 570 .set noat 570 .set noat 571 lw $1, TI_CPU(gp) 571 lw $1, TI_CPU(gp) 572 sll $1, $1, LONGLOG 572 sll $1, $1, LONGLOG 573 PTR_LA \dest, __per_cpu_offset 573 PTR_LA \dest, __per_cpu_offset 574 PTR_ADDU $1, $1, \dest 574 PTR_ADDU $1, $1, \dest 575 lw $1, 0($1) 575 lw $1, 0($1) 576 PTR_LA \dest, cps_cpu_state 576 PTR_LA \dest, cps_cpu_state 577 PTR_ADDU \dest, \dest, $1 577 PTR_ADDU \dest, \dest, $1 578 .set pop 578 .set pop 579 .endm 579 .endm 580 580 581 LEAF(mips_cps_pm_save) 581 LEAF(mips_cps_pm_save) 582 /* Save CPU state */ 582 /* Save CPU state */ 583 SUSPEND_SAVE_REGS 583 SUSPEND_SAVE_REGS 584 psstate t1 584 psstate t1 585 SUSPEND_SAVE_STATIC 585 SUSPEND_SAVE_STATIC 586 jr v0 586 jr v0 587 nop 587 nop 588 END(mips_cps_pm_save) 588 END(mips_cps_pm_save) 589 589 590 LEAF(mips_cps_pm_restore) 590 LEAF(mips_cps_pm_restore) 591 /* Restore CPU state */ 591 /* Restore CPU state */ 592 psstate t1 592 psstate t1 593 RESUME_RESTORE_STATIC 593 RESUME_RESTORE_STATIC 594 RESUME_RESTORE_REGS_RETURN 594 RESUME_RESTORE_REGS_RETURN 595 END(mips_cps_pm_restore) 595 END(mips_cps_pm_restore) 596 596 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.