1 /* SPDX-License-Identifier: GPL-2.0-or-later * 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 2 /* 3 * Copyright (C) 2013 Imagination Technologies 3 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> 4 * Author: Paul Burton <paul.burton@mips.com> 5 */ 5 */ 6 6 7 #include <linux/init.h> << 8 #include <asm/addrspace.h> 7 #include <asm/addrspace.h> 9 #include <asm/asm.h> 8 #include <asm/asm.h> 10 #include <asm/asm-offsets.h> 9 #include <asm/asm-offsets.h> 11 #include <asm/asmmacro.h> 10 #include <asm/asmmacro.h> 12 #include <asm/cacheops.h> 11 #include <asm/cacheops.h> 13 #include <asm/eva.h> 12 #include <asm/eva.h> 14 #include <asm/mipsregs.h> 13 #include <asm/mipsregs.h> 15 #include <asm/mipsmtregs.h> 14 #include <asm/mipsmtregs.h> 16 #include <asm/pm.h> 15 #include <asm/pm.h> 17 #include <asm/smp-cps.h> << 18 16 19 #define GCR_CPC_BASE_OFS 0x0088 17 #define GCR_CPC_BASE_OFS 0x0088 20 #define GCR_CL_COHERENCE_OFS 0x2008 18 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_ID_OFS 0x2028 19 #define GCR_CL_ID_OFS 0x2028 22 20 23 #define CPC_CL_VC_STOP_OFS 0x2020 21 #define CPC_CL_VC_STOP_OFS 0x2020 24 #define CPC_CL_VC_RUN_OFS 0x2028 22 #define CPC_CL_VC_RUN_OFS 0x2028 25 23 26 .extern mips_cm_base 24 .extern mips_cm_base 27 25 28 .set noreorder 26 .set noreorder 29 27 30 #ifdef CONFIG_64BIT 28 #ifdef CONFIG_64BIT 31 # define STATUS_BITDEPS ST0_KX 29 # define STATUS_BITDEPS ST0_KX 32 #else 30 #else 33 # define STATUS_BITDEPS 0 31 # define STATUS_BITDEPS 0 34 #endif 32 #endif 35 33 36 #ifdef CONFIG_MIPS_CPS_NS16550 34 #ifdef CONFIG_MIPS_CPS_NS16550 37 35 38 #define DUMP_EXCEP(name) \ 36 #define DUMP_EXCEP(name) \ 39 PTR_LA a0, 8f; \ 37 PTR_LA a0, 8f; \ 40 jal mips_cps_bev_dump; \ 38 jal mips_cps_bev_dump; \ 41 nop; \ 39 nop; \ 42 TEXT(name) 40 TEXT(name) 43 41 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 42 #else /* !CONFIG_MIPS_CPS_NS16550 */ 45 43 46 #define DUMP_EXCEP(name) 44 #define DUMP_EXCEP(name) 47 45 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 46 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 49 47 50 /* 48 /* 51 * Set dest to non-zero if the core su 49 * Set dest to non-zero if the core supports the MT ASE, else zero. If 52 * MT is not supported then branch to 50 * MT is not supported then branch to nomt. 53 */ 51 */ 54 .macro has_mt dest, nomt 52 .macro has_mt dest, nomt 55 mfc0 \dest, CP0_CONFIG, 1 53 mfc0 \dest, CP0_CONFIG, 1 56 bgez \dest, \nomt 54 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 2 55 mfc0 \dest, CP0_CONFIG, 2 58 bgez \dest, \nomt 56 bgez \dest, \nomt 59 mfc0 \dest, CP0_CONFIG, 3 57 mfc0 \dest, CP0_CONFIG, 3 60 andi \dest, \dest, MIPS_CONF3_MT 58 andi \dest, \dest, MIPS_CONF3_MT 61 beqz \dest, \nomt 59 beqz \dest, \nomt 62 nop 60 nop 63 .endm 61 .endm 64 62 65 /* 63 /* 66 * Set dest to non-zero if the core su 64 * Set dest to non-zero if the core supports MIPSr6 multithreading 67 * (ie. VPs), else zero. If MIPSr6 mul 65 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then 68 * branch to nomt. 66 * branch to nomt. 69 */ 67 */ 70 .macro has_vp dest, nomt 68 .macro has_vp dest, nomt 71 mfc0 \dest, CP0_CONFIG, 1 69 mfc0 \dest, CP0_CONFIG, 1 72 bgez \dest, \nomt 70 bgez \dest, \nomt 73 mfc0 \dest, CP0_CONFIG, 2 71 mfc0 \dest, CP0_CONFIG, 2 74 bgez \dest, \nomt 72 bgez \dest, \nomt 75 mfc0 \dest, CP0_CONFIG, 3 73 mfc0 \dest, CP0_CONFIG, 3 76 bgez \dest, \nomt 74 bgez \dest, \nomt 77 mfc0 \dest, CP0_CONFIG, 4 75 mfc0 \dest, CP0_CONFIG, 4 78 bgez \dest, \nomt 76 bgez \dest, \nomt 79 mfc0 \dest, CP0_CONFIG, 5 77 mfc0 \dest, CP0_CONFIG, 5 80 andi \dest, \dest, MIPS_CONF5_VP 78 andi \dest, \dest, MIPS_CONF5_VP 81 beqz \dest, \nomt 79 beqz \dest, \nomt 82 nop 80 nop 83 .endm 81 .endm 84 82 >> 83 /* Calculate an uncached address for the CM GCRs */ >> 84 .macro cmgcrb dest >> 85 .set push >> 86 .set noat >> 87 MFC0 $1, CP0_CMGCRBASE >> 88 PTR_SLL $1, $1, 4 >> 89 PTR_LI \dest, UNCAC_BASE >> 90 PTR_ADDU \dest, \dest, $1 >> 91 .set pop >> 92 .endm >> 93 >> 94 .section .text.cps-vec >> 95 .balign 0x1000 >> 96 >> 97 LEAF(mips_cps_core_entry) >> 98 /* >> 99 * These first 4 bytes will be patched by cps_smp_setup to load the >> 100 * CCA to use into register s0. >> 101 */ >> 102 .word 0 >> 103 >> 104 /* Check whether we're here due to an NMI */ >> 105 mfc0 k0, CP0_STATUS >> 106 and k0, k0, ST0_NMI >> 107 beqz k0, not_nmi >> 108 nop >> 109 >> 110 /* This is an NMI */ >> 111 PTR_LA k0, nmi_handler >> 112 jr k0 >> 113 nop 85 114 86 LEAF(mips_cps_core_boot) !! 115 not_nmi: 87 /* Save CCA and GCR base */ !! 116 /* Setup Cause */ 88 move s0, a0 !! 117 li t0, CAUSEF_IV 89 move s1, a1 !! 118 mtc0 t0, CP0_CAUSE >> 119 >> 120 /* Setup Status */ >> 121 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS >> 122 mtc0 t0, CP0_STATUS 90 123 91 /* We don't know how to do coherence s << 92 #if MIPS_ISA_REV > 0 << 93 /* Skip cache & coherence setup if we' 124 /* Skip cache & coherence setup if we're already coherent */ 94 lw s7, GCR_CL_COHERENCE_OFS(s1) !! 125 cmgcrb v1 >> 126 lw s7, GCR_CL_COHERENCE_OFS(v1) 95 bnez s7, 1f 127 bnez s7, 1f 96 nop 128 nop 97 129 98 /* Initialize the L1 caches */ 130 /* Initialize the L1 caches */ 99 jal mips_cps_cache_init 131 jal mips_cps_cache_init 100 nop 132 nop 101 133 102 /* Enter the coherent domain */ 134 /* Enter the coherent domain */ 103 li t0, 0xff 135 li t0, 0xff 104 sw t0, GCR_CL_COHERENCE_OFS(s1) !! 136 sw t0, GCR_CL_COHERENCE_OFS(v1) 105 ehb 137 ehb 106 #endif /* MIPS_ISA_REV > 0 */ << 107 138 108 /* Set Kseg0 CCA to that in s0 */ 139 /* Set Kseg0 CCA to that in s0 */ 109 1: mfc0 t0, CP0_CONFIG 140 1: mfc0 t0, CP0_CONFIG 110 ori t0, 0x7 141 ori t0, 0x7 111 xori t0, 0x7 142 xori t0, 0x7 112 or t0, t0, s0 143 or t0, t0, s0 113 mtc0 t0, CP0_CONFIG 144 mtc0 t0, CP0_CONFIG 114 ehb 145 ehb 115 146 116 /* Jump to kseg0 */ 147 /* Jump to kseg0 */ 117 PTR_LA t0, 1f 148 PTR_LA t0, 1f 118 jr t0 149 jr t0 119 nop 150 nop 120 151 121 /* 152 /* 122 * We're up, cached & coherent. Perfor 153 * We're up, cached & coherent. Perform any EVA initialization necessary 123 * before we access memory. 154 * before we access memory. 124 */ 155 */ 125 1: eva_init 156 1: eva_init 126 157 127 /* Retrieve boot configuration pointer 158 /* Retrieve boot configuration pointers */ 128 jal mips_cps_get_bootcfg 159 jal mips_cps_get_bootcfg 129 nop 160 nop 130 161 131 /* Skip core-level init if we started 162 /* Skip core-level init if we started up coherent */ 132 bnez s7, 1f 163 bnez s7, 1f 133 nop 164 nop 134 165 135 /* Perform any further required core-l 166 /* Perform any further required core-level initialisation */ 136 jal mips_cps_core_init 167 jal mips_cps_core_init 137 nop 168 nop 138 169 139 /* 170 /* 140 * Boot any other VPEs within this cor 171 * Boot any other VPEs within this core that should be online, and 141 * deactivate this VPE if it should be 172 * deactivate this VPE if it should be offline. 142 */ 173 */ 143 move a1, t9 174 move a1, t9 144 jal mips_cps_boot_vpes 175 jal mips_cps_boot_vpes 145 move a0, v0 176 move a0, v0 146 177 147 /* Off we go! */ 178 /* Off we go! */ 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) 179 1: PTR_L t1, VPEBOOTCFG_PC(v1) 149 PTR_L gp, VPEBOOTCFG_GP(v1) 180 PTR_L gp, VPEBOOTCFG_GP(v1) 150 PTR_L sp, VPEBOOTCFG_SP(v1) 181 PTR_L sp, VPEBOOTCFG_SP(v1) 151 jr t1 182 jr t1 152 nop 183 nop 153 END(mips_cps_core_boot) !! 184 END(mips_cps_core_entry) 154 185 155 __INIT !! 186 .org 0x200 156 LEAF(excep_tlbfill) 187 LEAF(excep_tlbfill) 157 DUMP_EXCEP("TLB Fill") 188 DUMP_EXCEP("TLB Fill") 158 b . 189 b . 159 nop 190 nop 160 END(excep_tlbfill) 191 END(excep_tlbfill) 161 192 >> 193 .org 0x280 162 LEAF(excep_xtlbfill) 194 LEAF(excep_xtlbfill) 163 DUMP_EXCEP("XTLB Fill") 195 DUMP_EXCEP("XTLB Fill") 164 b . 196 b . 165 nop 197 nop 166 END(excep_xtlbfill) 198 END(excep_xtlbfill) 167 199 >> 200 .org 0x300 168 LEAF(excep_cache) 201 LEAF(excep_cache) 169 DUMP_EXCEP("Cache") 202 DUMP_EXCEP("Cache") 170 b . 203 b . 171 nop 204 nop 172 END(excep_cache) 205 END(excep_cache) 173 206 >> 207 .org 0x380 174 LEAF(excep_genex) 208 LEAF(excep_genex) 175 DUMP_EXCEP("General") 209 DUMP_EXCEP("General") 176 b . 210 b . 177 nop 211 nop 178 END(excep_genex) 212 END(excep_genex) 179 213 >> 214 .org 0x400 180 LEAF(excep_intex) 215 LEAF(excep_intex) 181 DUMP_EXCEP("Interrupt") 216 DUMP_EXCEP("Interrupt") 182 b . 217 b . 183 nop 218 nop 184 END(excep_intex) 219 END(excep_intex) 185 220 >> 221 .org 0x480 186 LEAF(excep_ejtag) 222 LEAF(excep_ejtag) 187 PTR_LA k0, ejtag_debug_handler 223 PTR_LA k0, ejtag_debug_handler 188 jr k0 224 jr k0 189 nop 225 nop 190 END(excep_ejtag) 226 END(excep_ejtag) 191 __FINIT << 192 227 193 LEAF(mips_cps_core_init) 228 LEAF(mips_cps_core_init) 194 #ifdef CONFIG_MIPS_MT_SMP 229 #ifdef CONFIG_MIPS_MT_SMP 195 /* Check that the core implements the 230 /* Check that the core implements the MT ASE */ 196 has_mt t0, 3f 231 has_mt t0, 3f 197 232 198 .set push 233 .set push 199 .set MIPS_ISA_LEVEL_RAW 234 .set MIPS_ISA_LEVEL_RAW 200 .set mt 235 .set mt 201 236 202 /* Only allow 1 TC per VPE to execute. 237 /* Only allow 1 TC per VPE to execute... */ 203 dmt 238 dmt 204 239 205 /* ...and for the moment only 1 VPE */ 240 /* ...and for the moment only 1 VPE */ 206 dvpe 241 dvpe 207 PTR_LA t1, 1f 242 PTR_LA t1, 1f 208 jr.hb t1 243 jr.hb t1 209 nop 244 nop 210 245 211 /* Enter VPE configuration state */ 246 /* Enter VPE configuration state */ 212 1: mfc0 t0, CP0_MVPCONTROL 247 1: mfc0 t0, CP0_MVPCONTROL 213 ori t0, t0, MVPCONTROL_VPC 248 ori t0, t0, MVPCONTROL_VPC 214 mtc0 t0, CP0_MVPCONTROL 249 mtc0 t0, CP0_MVPCONTROL 215 250 216 /* Retrieve the number of VPEs within 251 /* Retrieve the number of VPEs within the core */ 217 mfc0 t0, CP0_MVPCONF0 252 mfc0 t0, CP0_MVPCONF0 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 253 srl t0, t0, MVPCONF0_PVPE_SHIFT 219 andi t0, t0, (MVPCONF0_PVPE >> MVPC 254 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 addiu ta3, t0, 1 255 addiu ta3, t0, 1 221 256 222 /* If there's only 1, we're done */ 257 /* If there's only 1, we're done */ 223 beqz t0, 2f 258 beqz t0, 2f 224 nop 259 nop 225 260 226 /* Loop through each VPE within this c 261 /* Loop through each VPE within this core */ 227 li ta1, 1 262 li ta1, 1 228 263 229 1: /* Operate on the appropriate TC */ 264 1: /* Operate on the appropriate TC */ 230 mtc0 ta1, CP0_VPECONTROL 265 mtc0 ta1, CP0_VPECONTROL 231 ehb 266 ehb 232 267 233 /* Bind TC to VPE (1:1 TC:VPE mapping) 268 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 mttc0 ta1, CP0_TCBIND 269 mttc0 ta1, CP0_TCBIND 235 270 236 /* Set exclusive TC, non-active, maste 271 /* Set exclusive TC, non-active, master */ 237 li t0, VPECONF0_MVP 272 li t0, VPECONF0_MVP 238 sll t1, ta1, VPECONF0_XTC_SHIFT 273 sll t1, ta1, VPECONF0_XTC_SHIFT 239 or t0, t0, t1 274 or t0, t0, t1 240 mttc0 t0, CP0_VPECONF0 275 mttc0 t0, CP0_VPECONF0 241 276 242 /* Set TC non-active, non-allocatable 277 /* Set TC non-active, non-allocatable */ 243 mttc0 zero, CP0_TCSTATUS 278 mttc0 zero, CP0_TCSTATUS 244 279 245 /* Set TC halted */ 280 /* Set TC halted */ 246 li t0, TCHALT_H 281 li t0, TCHALT_H 247 mttc0 t0, CP0_TCHALT 282 mttc0 t0, CP0_TCHALT 248 283 249 /* Next VPE */ 284 /* Next VPE */ 250 addiu ta1, ta1, 1 285 addiu ta1, ta1, 1 251 slt t0, ta1, ta3 286 slt t0, ta1, ta3 252 bnez t0, 1b 287 bnez t0, 1b 253 nop 288 nop 254 289 255 /* Leave VPE configuration state */ 290 /* Leave VPE configuration state */ 256 2: mfc0 t0, CP0_MVPCONTROL 291 2: mfc0 t0, CP0_MVPCONTROL 257 xori t0, t0, MVPCONTROL_VPC 292 xori t0, t0, MVPCONTROL_VPC 258 mtc0 t0, CP0_MVPCONTROL 293 mtc0 t0, CP0_MVPCONTROL 259 294 260 3: .set pop 295 3: .set pop 261 #endif 296 #endif 262 jr ra 297 jr ra 263 nop 298 nop 264 END(mips_cps_core_init) 299 END(mips_cps_core_init) 265 300 266 /** 301 /** 267 * mips_cps_get_bootcfg() - retrieve boot conf 302 * mips_cps_get_bootcfg() - retrieve boot configuration pointers 268 * 303 * 269 * Returns: pointer to struct core_boot_config 304 * Returns: pointer to struct core_boot_config in v0, pointer to 270 * struct vpe_boot_config in v1, VPE 305 * struct vpe_boot_config in v1, VPE ID in t9 271 */ 306 */ 272 LEAF(mips_cps_get_bootcfg) 307 LEAF(mips_cps_get_bootcfg) 273 /* Calculate a pointer to this cores s 308 /* Calculate a pointer to this cores struct core_boot_config */ 274 lw t0, GCR_CL_ID_OFS(s1) !! 309 cmgcrb t0 >> 310 lw t0, GCR_CL_ID_OFS(t0) 275 li t1, COREBOOTCFG_SIZE 311 li t1, COREBOOTCFG_SIZE 276 mul t0, t0, t1 312 mul t0, t0, t1 277 PTR_LA t1, mips_cps_core_bootcfg 313 PTR_LA t1, mips_cps_core_bootcfg 278 PTR_L t1, 0(t1) 314 PTR_L t1, 0(t1) 279 PTR_ADDU v0, t0, t1 315 PTR_ADDU v0, t0, t1 280 316 281 /* Calculate this VPEs ID. If the core 317 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 282 li t9, 0 318 li t9, 0 283 #if defined(CONFIG_CPU_MIPSR6) 319 #if defined(CONFIG_CPU_MIPSR6) 284 has_vp ta2, 1f 320 has_vp ta2, 1f 285 321 286 /* 322 /* 287 * Assume non-contiguous numbering. Pe 323 * Assume non-contiguous numbering. Perhaps some day we'll need 288 * to handle contiguous VP numbering, 324 * to handle contiguous VP numbering, but no such systems yet 289 * exist. 325 * exist. 290 */ 326 */ 291 mfc0 t9, CP0_GLOBALNUMBER 327 mfc0 t9, CP0_GLOBALNUMBER 292 andi t9, t9, MIPS_GLOBALNUMBER_VP 328 andi t9, t9, MIPS_GLOBALNUMBER_VP 293 #elif defined(CONFIG_MIPS_MT_SMP) 329 #elif defined(CONFIG_MIPS_MT_SMP) 294 has_mt ta2, 1f 330 has_mt ta2, 1f 295 331 296 /* Find the number of VPEs present in 332 /* Find the number of VPEs present in the core */ 297 mfc0 t1, CP0_MVPCONF0 333 mfc0 t1, CP0_MVPCONF0 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 334 srl t1, t1, MVPCONF0_PVPE_SHIFT 299 andi t1, t1, MVPCONF0_PVPE >> MVPCO 335 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 300 addiu t1, t1, 1 336 addiu t1, t1, 1 301 337 302 /* Calculate a mask for the VPE ID fro 338 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 303 clz t1, t1 339 clz t1, t1 304 li t2, 31 340 li t2, 31 305 subu t1, t2, t1 341 subu t1, t2, t1 306 li t2, 1 342 li t2, 1 307 sll t1, t2, t1 343 sll t1, t2, t1 308 addiu t1, t1, -1 344 addiu t1, t1, -1 309 345 310 /* Retrieve the VPE ID from EBase.CPUN 346 /* Retrieve the VPE ID from EBase.CPUNum */ 311 mfc0 t9, $15, 1 347 mfc0 t9, $15, 1 312 and t9, t9, t1 348 and t9, t9, t1 313 #endif 349 #endif 314 350 315 1: /* Calculate a pointer to this VPEs st 351 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 316 li t1, VPEBOOTCFG_SIZE 352 li t1, VPEBOOTCFG_SIZE 317 mul v1, t9, t1 353 mul v1, t9, t1 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 354 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 319 PTR_ADDU v1, v1, ta3 355 PTR_ADDU v1, v1, ta3 320 356 321 jr ra 357 jr ra 322 nop 358 nop 323 END(mips_cps_get_bootcfg) 359 END(mips_cps_get_bootcfg) 324 360 325 LEAF(mips_cps_boot_vpes) 361 LEAF(mips_cps_boot_vpes) 326 lw ta2, COREBOOTCFG_VPEMASK(a0) 362 lw ta2, COREBOOTCFG_VPEMASK(a0) 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 363 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 328 364 329 #if defined(CONFIG_CPU_MIPSR6) 365 #if defined(CONFIG_CPU_MIPSR6) 330 366 331 has_vp t0, 5f 367 has_vp t0, 5f 332 368 333 /* Find base address of CPC */ 369 /* Find base address of CPC */ 334 PTR_LA t1, mips_gcr_base !! 370 cmgcrb t3 335 PTR_L t1, 0(t1) !! 371 PTR_L t1, GCR_CPC_BASE_OFS(t3) 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) << 337 PTR_LI t2, ~0x7fff 372 PTR_LI t2, ~0x7fff 338 and t1, t1, t2 373 and t1, t1, t2 339 PTR_LI t2, UNCAC_BASE 374 PTR_LI t2, UNCAC_BASE 340 PTR_ADD t1, t1, t2 375 PTR_ADD t1, t1, t2 341 376 342 /* Start any other VPs that ought to b 377 /* Start any other VPs that ought to be running */ 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 378 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 344 379 345 /* Ensure this VP stops running if it 380 /* Ensure this VP stops running if it shouldn't be */ 346 not ta2 381 not ta2 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 382 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 348 ehb 383 ehb 349 384 350 #elif defined(CONFIG_MIPS_MT) 385 #elif defined(CONFIG_MIPS_MT) 351 386 352 /* If the core doesn't support MT then 387 /* If the core doesn't support MT then return */ 353 has_mt t0, 5f 388 has_mt t0, 5f 354 389 355 /* Enter VPE configuration state */ 390 /* Enter VPE configuration state */ 356 .set push 391 .set push 357 .set MIPS_ISA_LEVEL_RAW 392 .set MIPS_ISA_LEVEL_RAW 358 .set mt 393 .set mt 359 dvpe 394 dvpe 360 .set pop 395 .set pop 361 396 362 PTR_LA t1, 1f 397 PTR_LA t1, 1f 363 jr.hb t1 398 jr.hb t1 364 nop 399 nop 365 1: mfc0 t1, CP0_MVPCONTROL 400 1: mfc0 t1, CP0_MVPCONTROL 366 ori t1, t1, MVPCONTROL_VPC 401 ori t1, t1, MVPCONTROL_VPC 367 mtc0 t1, CP0_MVPCONTROL 402 mtc0 t1, CP0_MVPCONTROL 368 ehb 403 ehb 369 404 370 /* Loop through each VPE */ 405 /* Loop through each VPE */ 371 move t8, ta2 406 move t8, ta2 372 li ta1, 0 407 li ta1, 0 373 408 374 /* Check whether the VPE should be run 409 /* Check whether the VPE should be running. If not, skip it */ 375 1: andi t0, ta2, 1 410 1: andi t0, ta2, 1 376 beqz t0, 2f 411 beqz t0, 2f 377 nop 412 nop 378 413 379 /* Operate on the appropriate TC */ 414 /* Operate on the appropriate TC */ 380 mfc0 t0, CP0_VPECONTROL 415 mfc0 t0, CP0_VPECONTROL 381 ori t0, t0, VPECONTROL_TARGTC 416 ori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 417 xori t0, t0, VPECONTROL_TARGTC 383 or t0, t0, ta1 418 or t0, t0, ta1 384 mtc0 t0, CP0_VPECONTROL 419 mtc0 t0, CP0_VPECONTROL 385 ehb 420 ehb 386 421 387 .set push 422 .set push 388 .set MIPS_ISA_LEVEL_RAW 423 .set MIPS_ISA_LEVEL_RAW 389 .set mt 424 .set mt 390 425 391 /* Skip the VPE if its TC is not halte 426 /* Skip the VPE if its TC is not halted */ 392 mftc0 t0, CP0_TCHALT 427 mftc0 t0, CP0_TCHALT 393 beqz t0, 2f 428 beqz t0, 2f 394 nop 429 nop 395 430 396 /* Calculate a pointer to the VPEs str 431 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 li t0, VPEBOOTCFG_SIZE 432 li t0, VPEBOOTCFG_SIZE 398 mul t0, t0, ta1 433 mul t0, t0, ta1 399 PTR_ADDU t0, t0, ta3 !! 434 addu t0, t0, ta3 400 435 401 /* Set the TC restart PC */ 436 /* Set the TC restart PC */ 402 lw t1, VPEBOOTCFG_PC(t0) 437 lw t1, VPEBOOTCFG_PC(t0) 403 mttc0 t1, CP0_TCRESTART 438 mttc0 t1, CP0_TCRESTART 404 439 405 /* Set the TC stack pointer */ 440 /* Set the TC stack pointer */ 406 lw t1, VPEBOOTCFG_SP(t0) 441 lw t1, VPEBOOTCFG_SP(t0) 407 mttgpr t1, sp 442 mttgpr t1, sp 408 443 409 /* Set the TC global pointer */ 444 /* Set the TC global pointer */ 410 lw t1, VPEBOOTCFG_GP(t0) 445 lw t1, VPEBOOTCFG_GP(t0) 411 mttgpr t1, gp 446 mttgpr t1, gp 412 447 413 /* Copy config from this VPE */ 448 /* Copy config from this VPE */ 414 mfc0 t0, CP0_CONFIG 449 mfc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 450 mttc0 t0, CP0_CONFIG 416 451 417 /* 452 /* 418 * Copy the EVA config from this VPE i 453 * Copy the EVA config from this VPE if the CPU supports it. 419 * CONFIG3 must exist to be running MT 454 * CONFIG3 must exist to be running MT startup - just read it. 420 */ 455 */ 421 mfc0 t0, CP0_CONFIG, 3 456 mfc0 t0, CP0_CONFIG, 3 422 and t0, t0, MIPS_CONF3_SC 457 and t0, t0, MIPS_CONF3_SC 423 beqz t0, 3f 458 beqz t0, 3f 424 nop 459 nop 425 mfc0 t0, CP0_SEGCTL0 460 mfc0 t0, CP0_SEGCTL0 426 mttc0 t0, CP0_SEGCTL0 461 mttc0 t0, CP0_SEGCTL0 427 mfc0 t0, CP0_SEGCTL1 462 mfc0 t0, CP0_SEGCTL1 428 mttc0 t0, CP0_SEGCTL1 463 mttc0 t0, CP0_SEGCTL1 429 mfc0 t0, CP0_SEGCTL2 464 mfc0 t0, CP0_SEGCTL2 430 mttc0 t0, CP0_SEGCTL2 465 mttc0 t0, CP0_SEGCTL2 431 3: 466 3: 432 /* Ensure no software interrupts are p 467 /* Ensure no software interrupts are pending */ 433 mttc0 zero, CP0_CAUSE 468 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_STATUS 469 mttc0 zero, CP0_STATUS 435 470 436 /* Set TC active, not interrupt exempt 471 /* Set TC active, not interrupt exempt */ 437 mftc0 t0, CP0_TCSTATUS 472 mftc0 t0, CP0_TCSTATUS 438 li t1, ~TCSTATUS_IXMT 473 li t1, ~TCSTATUS_IXMT 439 and t0, t0, t1 474 and t0, t0, t1 440 ori t0, t0, TCSTATUS_A 475 ori t0, t0, TCSTATUS_A 441 mttc0 t0, CP0_TCSTATUS 476 mttc0 t0, CP0_TCSTATUS 442 477 443 /* Clear the TC halt bit */ 478 /* Clear the TC halt bit */ 444 mttc0 zero, CP0_TCHALT 479 mttc0 zero, CP0_TCHALT 445 480 446 /* Set VPE active */ 481 /* Set VPE active */ 447 mftc0 t0, CP0_VPECONF0 482 mftc0 t0, CP0_VPECONF0 448 ori t0, t0, VPECONF0_VPA 483 ori t0, t0, VPECONF0_VPA 449 mttc0 t0, CP0_VPECONF0 484 mttc0 t0, CP0_VPECONF0 450 485 451 /* Next VPE */ 486 /* Next VPE */ 452 2: srl ta2, ta2, 1 487 2: srl ta2, ta2, 1 453 addiu ta1, ta1, 1 488 addiu ta1, ta1, 1 454 bnez ta2, 1b 489 bnez ta2, 1b 455 nop 490 nop 456 491 457 /* Leave VPE configuration state */ 492 /* Leave VPE configuration state */ 458 mfc0 t1, CP0_MVPCONTROL 493 mfc0 t1, CP0_MVPCONTROL 459 xori t1, t1, MVPCONTROL_VPC 494 xori t1, t1, MVPCONTROL_VPC 460 mtc0 t1, CP0_MVPCONTROL 495 mtc0 t1, CP0_MVPCONTROL 461 ehb 496 ehb 462 evpe 497 evpe 463 498 464 .set pop 499 .set pop 465 500 466 /* Check whether this VPE is meant to 501 /* Check whether this VPE is meant to be running */ 467 li t0, 1 502 li t0, 1 468 sll t0, t0, a1 503 sll t0, t0, a1 469 and t0, t0, t8 504 and t0, t0, t8 470 bnez t0, 2f 505 bnez t0, 2f 471 nop 506 nop 472 507 473 /* This VPE should be offline, halt th 508 /* This VPE should be offline, halt the TC */ 474 li t0, TCHALT_H 509 li t0, TCHALT_H 475 mtc0 t0, CP0_TCHALT 510 mtc0 t0, CP0_TCHALT 476 PTR_LA t0, 1f 511 PTR_LA t0, 1f 477 1: jr.hb t0 512 1: jr.hb t0 478 nop 513 nop 479 514 480 2: 515 2: 481 516 482 #endif /* CONFIG_MIPS_MT_SMP */ 517 #endif /* CONFIG_MIPS_MT_SMP */ 483 518 484 /* Return */ 519 /* Return */ 485 5: jr ra 520 5: jr ra 486 nop 521 nop 487 END(mips_cps_boot_vpes) 522 END(mips_cps_boot_vpes) 488 523 489 #if MIPS_ISA_REV > 0 << 490 LEAF(mips_cps_cache_init) 524 LEAF(mips_cps_cache_init) 491 /* 525 /* 492 * Clear the bits used to index the ca 526 * Clear the bits used to index the caches. Note that the architecture 493 * dictates that writing to any of Tag 527 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should 494 * be valid for all MIPS32 CPUs, even 528 * be valid for all MIPS32 CPUs, even those for which said writes are 495 * unnecessary. 529 * unnecessary. 496 */ 530 */ 497 mtc0 zero, CP0_TAGLO, 0 531 mtc0 zero, CP0_TAGLO, 0 498 mtc0 zero, CP0_TAGHI, 0 532 mtc0 zero, CP0_TAGHI, 0 499 mtc0 zero, CP0_TAGLO, 2 533 mtc0 zero, CP0_TAGLO, 2 500 mtc0 zero, CP0_TAGHI, 2 534 mtc0 zero, CP0_TAGHI, 2 501 ehb 535 ehb 502 536 503 /* Primary cache configuration is indi 537 /* Primary cache configuration is indicated by Config1 */ 504 mfc0 v0, CP0_CONFIG, 1 538 mfc0 v0, CP0_CONFIG, 1 505 539 506 /* Detect I-cache line size */ 540 /* Detect I-cache line size */ 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIP 541 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ 508 beqz t0, icache_done 542 beqz t0, icache_done 509 li t1, 2 543 li t1, 2 510 sllv t0, t1, t0 544 sllv t0, t1, t0 511 545 512 /* Detect I-cache size */ 546 /* Detect I-cache size */ 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIP 547 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ 514 xori t2, t1, 0x7 548 xori t2, t1, 0x7 515 beqz t2, 1f 549 beqz t2, 1f 516 li t3, 32 550 li t3, 32 517 addiu t1, t1, 1 551 addiu t1, t1, 1 518 sllv t1, t3, t1 552 sllv t1, t3, t1 519 1: /* At this point t1 == I-cache sets pe 553 1: /* At this point t1 == I-cache sets per way */ 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIP 554 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 521 addiu t2, t2, 1 555 addiu t2, t2, 1 522 mul t1, t1, t0 556 mul t1, t1, t0 523 mul t1, t1, t2 557 mul t1, t1, t2 524 558 525 li a0, CKSEG0 559 li a0, CKSEG0 526 PTR_ADD a1, a0, t1 560 PTR_ADD a1, a0, t1 527 1: cache Index_Store_Tag_I, 0(a0) 561 1: cache Index_Store_Tag_I, 0(a0) 528 PTR_ADD a0, a0, t0 562 PTR_ADD a0, a0, t0 529 bne a0, a1, 1b 563 bne a0, a1, 1b 530 nop 564 nop 531 icache_done: 565 icache_done: 532 566 533 /* Detect D-cache line size */ 567 /* Detect D-cache line size */ 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIP 568 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ 535 beqz t0, dcache_done 569 beqz t0, dcache_done 536 li t1, 2 570 li t1, 2 537 sllv t0, t1, t0 571 sllv t0, t1, t0 538 572 539 /* Detect D-cache size */ 573 /* Detect D-cache size */ 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIP 574 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ 541 xori t2, t1, 0x7 575 xori t2, t1, 0x7 542 beqz t2, 1f 576 beqz t2, 1f 543 li t3, 32 577 li t3, 32 544 addiu t1, t1, 1 578 addiu t1, t1, 1 545 sllv t1, t3, t1 579 sllv t1, t3, t1 546 1: /* At this point t1 == D-cache sets pe 580 1: /* At this point t1 == D-cache sets per way */ 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIP 581 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 548 addiu t2, t2, 1 582 addiu t2, t2, 1 549 mul t1, t1, t0 583 mul t1, t1, t0 550 mul t1, t1, t2 584 mul t1, t1, t2 551 585 552 li a0, CKSEG0 586 li a0, CKSEG0 553 PTR_ADDU a1, a0, t1 587 PTR_ADDU a1, a0, t1 554 PTR_SUBU a1, a1, t0 588 PTR_SUBU a1, a1, t0 555 1: cache Index_Store_Tag_D, 0(a0) 589 1: cache Index_Store_Tag_D, 0(a0) 556 bne a0, a1, 1b 590 bne a0, a1, 1b 557 PTR_ADD a0, a0, t0 591 PTR_ADD a0, a0, t0 558 dcache_done: 592 dcache_done: 559 593 560 jr ra 594 jr ra 561 nop 595 nop 562 END(mips_cps_cache_init) 596 END(mips_cps_cache_init) 563 #endif /* MIPS_ISA_REV > 0 */ << 564 597 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CON 598 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 566 599 567 /* Calculate a pointer to this CPUs st 600 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 568 .macro psstate dest 601 .macro psstate dest 569 .set push 602 .set push 570 .set noat 603 .set noat 571 lw $1, TI_CPU(gp) 604 lw $1, TI_CPU(gp) 572 sll $1, $1, LONGLOG 605 sll $1, $1, LONGLOG 573 PTR_LA \dest, __per_cpu_offset 606 PTR_LA \dest, __per_cpu_offset 574 PTR_ADDU $1, $1, \dest !! 607 addu $1, $1, \dest 575 lw $1, 0($1) 608 lw $1, 0($1) 576 PTR_LA \dest, cps_cpu_state 609 PTR_LA \dest, cps_cpu_state 577 PTR_ADDU \dest, \dest, $1 !! 610 addu \dest, \dest, $1 578 .set pop 611 .set pop 579 .endm 612 .endm 580 613 581 LEAF(mips_cps_pm_save) 614 LEAF(mips_cps_pm_save) 582 /* Save CPU state */ 615 /* Save CPU state */ 583 SUSPEND_SAVE_REGS 616 SUSPEND_SAVE_REGS 584 psstate t1 617 psstate t1 585 SUSPEND_SAVE_STATIC 618 SUSPEND_SAVE_STATIC 586 jr v0 619 jr v0 587 nop 620 nop 588 END(mips_cps_pm_save) 621 END(mips_cps_pm_save) 589 622 590 LEAF(mips_cps_pm_restore) 623 LEAF(mips_cps_pm_restore) 591 /* Restore CPU state */ 624 /* Restore CPU state */ 592 psstate t1 625 psstate t1 593 RESUME_RESTORE_STATIC 626 RESUME_RESTORE_STATIC 594 RESUME_RESTORE_REGS_RETURN 627 RESUME_RESTORE_REGS_RETURN 595 END(mips_cps_pm_restore) 628 END(mips_cps_pm_restore) 596 629 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM 630 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.