1 /* SPDX-License-Identifier: GPL-2.0-or-later * 1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 2 /* 3 * Copyright (C) 2013 Imagination Technologies 3 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> 4 * Author: Paul Burton <paul.burton@mips.com> 5 */ 5 */ 6 6 7 #include <linux/init.h> << 8 #include <asm/addrspace.h> 7 #include <asm/addrspace.h> 9 #include <asm/asm.h> 8 #include <asm/asm.h> 10 #include <asm/asm-offsets.h> 9 #include <asm/asm-offsets.h> 11 #include <asm/asmmacro.h> 10 #include <asm/asmmacro.h> 12 #include <asm/cacheops.h> 11 #include <asm/cacheops.h> 13 #include <asm/eva.h> 12 #include <asm/eva.h> 14 #include <asm/mipsregs.h> 13 #include <asm/mipsregs.h> 15 #include <asm/mipsmtregs.h> 14 #include <asm/mipsmtregs.h> 16 #include <asm/pm.h> 15 #include <asm/pm.h> 17 #include <asm/smp-cps.h> << 18 16 19 #define GCR_CPC_BASE_OFS 0x0088 17 #define GCR_CPC_BASE_OFS 0x0088 20 #define GCR_CL_COHERENCE_OFS 0x2008 18 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_ID_OFS 0x2028 19 #define GCR_CL_ID_OFS 0x2028 22 20 23 #define CPC_CL_VC_STOP_OFS 0x2020 21 #define CPC_CL_VC_STOP_OFS 0x2020 24 #define CPC_CL_VC_RUN_OFS 0x2028 22 #define CPC_CL_VC_RUN_OFS 0x2028 25 23 26 .extern mips_cm_base 24 .extern mips_cm_base 27 25 28 .set noreorder 26 .set noreorder 29 27 30 #ifdef CONFIG_64BIT 28 #ifdef CONFIG_64BIT 31 # define STATUS_BITDEPS ST0_KX 29 # define STATUS_BITDEPS ST0_KX 32 #else 30 #else 33 # define STATUS_BITDEPS 0 31 # define STATUS_BITDEPS 0 34 #endif 32 #endif 35 33 36 #ifdef CONFIG_MIPS_CPS_NS16550 34 #ifdef CONFIG_MIPS_CPS_NS16550 37 35 38 #define DUMP_EXCEP(name) \ 36 #define DUMP_EXCEP(name) \ 39 PTR_LA a0, 8f; \ 37 PTR_LA a0, 8f; \ 40 jal mips_cps_bev_dump; \ 38 jal mips_cps_bev_dump; \ 41 nop; \ 39 nop; \ 42 TEXT(name) 40 TEXT(name) 43 41 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 42 #else /* !CONFIG_MIPS_CPS_NS16550 */ 45 43 46 #define DUMP_EXCEP(name) 44 #define DUMP_EXCEP(name) 47 45 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 46 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 49 47 50 /* 48 /* 51 * Set dest to non-zero if the core su 49 * Set dest to non-zero if the core supports the MT ASE, else zero. If 52 * MT is not supported then branch to 50 * MT is not supported then branch to nomt. 53 */ 51 */ 54 .macro has_mt dest, nomt 52 .macro has_mt dest, nomt 55 mfc0 \dest, CP0_CONFIG, 1 53 mfc0 \dest, CP0_CONFIG, 1 56 bgez \dest, \nomt 54 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 2 55 mfc0 \dest, CP0_CONFIG, 2 58 bgez \dest, \nomt 56 bgez \dest, \nomt 59 mfc0 \dest, CP0_CONFIG, 3 57 mfc0 \dest, CP0_CONFIG, 3 60 andi \dest, \dest, MIPS_CONF3_MT 58 andi \dest, \dest, MIPS_CONF3_MT 61 beqz \dest, \nomt 59 beqz \dest, \nomt 62 nop 60 nop 63 .endm 61 .endm 64 62 65 /* 63 /* 66 * Set dest to non-zero if the core su 64 * Set dest to non-zero if the core supports MIPSr6 multithreading 67 * (ie. VPs), else zero. If MIPSr6 mul 65 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then 68 * branch to nomt. 66 * branch to nomt. 69 */ 67 */ 70 .macro has_vp dest, nomt 68 .macro has_vp dest, nomt 71 mfc0 \dest, CP0_CONFIG, 1 69 mfc0 \dest, CP0_CONFIG, 1 72 bgez \dest, \nomt 70 bgez \dest, \nomt 73 mfc0 \dest, CP0_CONFIG, 2 71 mfc0 \dest, CP0_CONFIG, 2 74 bgez \dest, \nomt 72 bgez \dest, \nomt 75 mfc0 \dest, CP0_CONFIG, 3 73 mfc0 \dest, CP0_CONFIG, 3 76 bgez \dest, \nomt 74 bgez \dest, \nomt 77 mfc0 \dest, CP0_CONFIG, 4 75 mfc0 \dest, CP0_CONFIG, 4 78 bgez \dest, \nomt 76 bgez \dest, \nomt 79 mfc0 \dest, CP0_CONFIG, 5 77 mfc0 \dest, CP0_CONFIG, 5 80 andi \dest, \dest, MIPS_CONF5_VP 78 andi \dest, \dest, MIPS_CONF5_VP 81 beqz \dest, \nomt 79 beqz \dest, \nomt 82 nop 80 nop 83 .endm 81 .endm 84 82 >> 83 /* Calculate an uncached address for the CM GCRs */ >> 84 .macro cmgcrb dest >> 85 .set push >> 86 .set noat >> 87 MFC0 $1, CP0_CMGCRBASE >> 88 PTR_SLL $1, $1, 4 >> 89 PTR_LI \dest, UNCAC_BASE >> 90 PTR_ADDU \dest, \dest, $1 >> 91 .set pop >> 92 .endm >> 93 >> 94 .balign 0x1000 >> 95 >> 96 LEAF(mips_cps_core_entry) >> 97 /* >> 98 * These first 4 bytes will be patched by cps_smp_setup to load the >> 99 * CCA to use into register s0. >> 100 */ >> 101 .word 0 >> 102 >> 103 /* Check whether we're here due to an NMI */ >> 104 mfc0 k0, CP0_STATUS >> 105 and k0, k0, ST0_NMI >> 106 beqz k0, not_nmi >> 107 nop >> 108 >> 109 /* This is an NMI */ >> 110 PTR_LA k0, nmi_handler >> 111 jr k0 >> 112 nop 85 113 86 LEAF(mips_cps_core_boot) !! 114 not_nmi: 87 /* Save CCA and GCR base */ !! 115 /* Setup Cause */ 88 move s0, a0 !! 116 li t0, CAUSEF_IV 89 move s1, a1 !! 117 mtc0 t0, CP0_CAUSE >> 118 >> 119 /* Setup Status */ >> 120 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS >> 121 mtc0 t0, CP0_STATUS 90 122 91 /* We don't know how to do coherence s << 92 #if MIPS_ISA_REV > 0 << 93 /* Skip cache & coherence setup if we' 123 /* Skip cache & coherence setup if we're already coherent */ 94 lw s7, GCR_CL_COHERENCE_OFS(s1) !! 124 cmgcrb v1 >> 125 lw s7, GCR_CL_COHERENCE_OFS(v1) 95 bnez s7, 1f 126 bnez s7, 1f 96 nop 127 nop 97 128 98 /* Initialize the L1 caches */ 129 /* Initialize the L1 caches */ 99 jal mips_cps_cache_init 130 jal mips_cps_cache_init 100 nop 131 nop 101 132 102 /* Enter the coherent domain */ 133 /* Enter the coherent domain */ 103 li t0, 0xff 134 li t0, 0xff 104 sw t0, GCR_CL_COHERENCE_OFS(s1) !! 135 sw t0, GCR_CL_COHERENCE_OFS(v1) 105 ehb 136 ehb 106 #endif /* MIPS_ISA_REV > 0 */ << 107 137 108 /* Set Kseg0 CCA to that in s0 */ 138 /* Set Kseg0 CCA to that in s0 */ 109 1: mfc0 t0, CP0_CONFIG 139 1: mfc0 t0, CP0_CONFIG 110 ori t0, 0x7 140 ori t0, 0x7 111 xori t0, 0x7 141 xori t0, 0x7 112 or t0, t0, s0 142 or t0, t0, s0 113 mtc0 t0, CP0_CONFIG 143 mtc0 t0, CP0_CONFIG 114 ehb 144 ehb 115 145 116 /* Jump to kseg0 */ 146 /* Jump to kseg0 */ 117 PTR_LA t0, 1f 147 PTR_LA t0, 1f 118 jr t0 148 jr t0 119 nop 149 nop 120 150 121 /* 151 /* 122 * We're up, cached & coherent. Perfor 152 * We're up, cached & coherent. Perform any EVA initialization necessary 123 * before we access memory. 153 * before we access memory. 124 */ 154 */ 125 1: eva_init 155 1: eva_init 126 156 127 /* Retrieve boot configuration pointer 157 /* Retrieve boot configuration pointers */ 128 jal mips_cps_get_bootcfg 158 jal mips_cps_get_bootcfg 129 nop 159 nop 130 160 131 /* Skip core-level init if we started 161 /* Skip core-level init if we started up coherent */ 132 bnez s7, 1f 162 bnez s7, 1f 133 nop 163 nop 134 164 135 /* Perform any further required core-l 165 /* Perform any further required core-level initialisation */ 136 jal mips_cps_core_init 166 jal mips_cps_core_init 137 nop 167 nop 138 168 139 /* 169 /* 140 * Boot any other VPEs within this cor 170 * Boot any other VPEs within this core that should be online, and 141 * deactivate this VPE if it should be 171 * deactivate this VPE if it should be offline. 142 */ 172 */ 143 move a1, t9 173 move a1, t9 144 jal mips_cps_boot_vpes 174 jal mips_cps_boot_vpes 145 move a0, v0 175 move a0, v0 146 176 147 /* Off we go! */ 177 /* Off we go! */ 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) 178 1: PTR_L t1, VPEBOOTCFG_PC(v1) 149 PTR_L gp, VPEBOOTCFG_GP(v1) 179 PTR_L gp, VPEBOOTCFG_GP(v1) 150 PTR_L sp, VPEBOOTCFG_SP(v1) 180 PTR_L sp, VPEBOOTCFG_SP(v1) 151 jr t1 181 jr t1 152 nop 182 nop 153 END(mips_cps_core_boot) !! 183 END(mips_cps_core_entry) 154 184 155 __INIT !! 185 .org 0x200 156 LEAF(excep_tlbfill) 186 LEAF(excep_tlbfill) 157 DUMP_EXCEP("TLB Fill") 187 DUMP_EXCEP("TLB Fill") 158 b . 188 b . 159 nop 189 nop 160 END(excep_tlbfill) 190 END(excep_tlbfill) 161 191 >> 192 .org 0x280 162 LEAF(excep_xtlbfill) 193 LEAF(excep_xtlbfill) 163 DUMP_EXCEP("XTLB Fill") 194 DUMP_EXCEP("XTLB Fill") 164 b . 195 b . 165 nop 196 nop 166 END(excep_xtlbfill) 197 END(excep_xtlbfill) 167 198 >> 199 .org 0x300 168 LEAF(excep_cache) 200 LEAF(excep_cache) 169 DUMP_EXCEP("Cache") 201 DUMP_EXCEP("Cache") 170 b . 202 b . 171 nop 203 nop 172 END(excep_cache) 204 END(excep_cache) 173 205 >> 206 .org 0x380 174 LEAF(excep_genex) 207 LEAF(excep_genex) 175 DUMP_EXCEP("General") 208 DUMP_EXCEP("General") 176 b . 209 b . 177 nop 210 nop 178 END(excep_genex) 211 END(excep_genex) 179 212 >> 213 .org 0x400 180 LEAF(excep_intex) 214 LEAF(excep_intex) 181 DUMP_EXCEP("Interrupt") 215 DUMP_EXCEP("Interrupt") 182 b . 216 b . 183 nop 217 nop 184 END(excep_intex) 218 END(excep_intex) 185 219 >> 220 .org 0x480 186 LEAF(excep_ejtag) 221 LEAF(excep_ejtag) 187 PTR_LA k0, ejtag_debug_handler 222 PTR_LA k0, ejtag_debug_handler 188 jr k0 223 jr k0 189 nop 224 nop 190 END(excep_ejtag) 225 END(excep_ejtag) 191 __FINIT << 192 226 193 LEAF(mips_cps_core_init) 227 LEAF(mips_cps_core_init) 194 #ifdef CONFIG_MIPS_MT_SMP 228 #ifdef CONFIG_MIPS_MT_SMP 195 /* Check that the core implements the 229 /* Check that the core implements the MT ASE */ 196 has_mt t0, 3f 230 has_mt t0, 3f 197 231 198 .set push 232 .set push 199 .set MIPS_ISA_LEVEL_RAW 233 .set MIPS_ISA_LEVEL_RAW 200 .set mt 234 .set mt 201 235 202 /* Only allow 1 TC per VPE to execute. 236 /* Only allow 1 TC per VPE to execute... */ 203 dmt 237 dmt 204 238 205 /* ...and for the moment only 1 VPE */ 239 /* ...and for the moment only 1 VPE */ 206 dvpe 240 dvpe 207 PTR_LA t1, 1f 241 PTR_LA t1, 1f 208 jr.hb t1 242 jr.hb t1 209 nop 243 nop 210 244 211 /* Enter VPE configuration state */ 245 /* Enter VPE configuration state */ 212 1: mfc0 t0, CP0_MVPCONTROL 246 1: mfc0 t0, CP0_MVPCONTROL 213 ori t0, t0, MVPCONTROL_VPC 247 ori t0, t0, MVPCONTROL_VPC 214 mtc0 t0, CP0_MVPCONTROL 248 mtc0 t0, CP0_MVPCONTROL 215 249 216 /* Retrieve the number of VPEs within 250 /* Retrieve the number of VPEs within the core */ 217 mfc0 t0, CP0_MVPCONF0 251 mfc0 t0, CP0_MVPCONF0 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 252 srl t0, t0, MVPCONF0_PVPE_SHIFT 219 andi t0, t0, (MVPCONF0_PVPE >> MVPC 253 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 addiu ta3, t0, 1 254 addiu ta3, t0, 1 221 255 222 /* If there's only 1, we're done */ 256 /* If there's only 1, we're done */ 223 beqz t0, 2f 257 beqz t0, 2f 224 nop 258 nop 225 259 226 /* Loop through each VPE within this c 260 /* Loop through each VPE within this core */ 227 li ta1, 1 261 li ta1, 1 228 262 229 1: /* Operate on the appropriate TC */ 263 1: /* Operate on the appropriate TC */ 230 mtc0 ta1, CP0_VPECONTROL 264 mtc0 ta1, CP0_VPECONTROL 231 ehb 265 ehb 232 266 233 /* Bind TC to VPE (1:1 TC:VPE mapping) 267 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 mttc0 ta1, CP0_TCBIND 268 mttc0 ta1, CP0_TCBIND 235 269 236 /* Set exclusive TC, non-active, maste 270 /* Set exclusive TC, non-active, master */ 237 li t0, VPECONF0_MVP 271 li t0, VPECONF0_MVP 238 sll t1, ta1, VPECONF0_XTC_SHIFT 272 sll t1, ta1, VPECONF0_XTC_SHIFT 239 or t0, t0, t1 273 or t0, t0, t1 240 mttc0 t0, CP0_VPECONF0 274 mttc0 t0, CP0_VPECONF0 241 275 242 /* Set TC non-active, non-allocatable 276 /* Set TC non-active, non-allocatable */ 243 mttc0 zero, CP0_TCSTATUS 277 mttc0 zero, CP0_TCSTATUS 244 278 245 /* Set TC halted */ 279 /* Set TC halted */ 246 li t0, TCHALT_H 280 li t0, TCHALT_H 247 mttc0 t0, CP0_TCHALT 281 mttc0 t0, CP0_TCHALT 248 282 249 /* Next VPE */ 283 /* Next VPE */ 250 addiu ta1, ta1, 1 284 addiu ta1, ta1, 1 251 slt t0, ta1, ta3 285 slt t0, ta1, ta3 252 bnez t0, 1b 286 bnez t0, 1b 253 nop 287 nop 254 288 255 /* Leave VPE configuration state */ 289 /* Leave VPE configuration state */ 256 2: mfc0 t0, CP0_MVPCONTROL 290 2: mfc0 t0, CP0_MVPCONTROL 257 xori t0, t0, MVPCONTROL_VPC 291 xori t0, t0, MVPCONTROL_VPC 258 mtc0 t0, CP0_MVPCONTROL 292 mtc0 t0, CP0_MVPCONTROL 259 293 260 3: .set pop 294 3: .set pop 261 #endif 295 #endif 262 jr ra 296 jr ra 263 nop 297 nop 264 END(mips_cps_core_init) 298 END(mips_cps_core_init) 265 299 266 /** 300 /** 267 * mips_cps_get_bootcfg() - retrieve boot conf 301 * mips_cps_get_bootcfg() - retrieve boot configuration pointers 268 * 302 * 269 * Returns: pointer to struct core_boot_config 303 * Returns: pointer to struct core_boot_config in v0, pointer to 270 * struct vpe_boot_config in v1, VPE 304 * struct vpe_boot_config in v1, VPE ID in t9 271 */ 305 */ 272 LEAF(mips_cps_get_bootcfg) 306 LEAF(mips_cps_get_bootcfg) 273 /* Calculate a pointer to this cores s 307 /* Calculate a pointer to this cores struct core_boot_config */ 274 lw t0, GCR_CL_ID_OFS(s1) !! 308 cmgcrb t0 >> 309 lw t0, GCR_CL_ID_OFS(t0) 275 li t1, COREBOOTCFG_SIZE 310 li t1, COREBOOTCFG_SIZE 276 mul t0, t0, t1 311 mul t0, t0, t1 277 PTR_LA t1, mips_cps_core_bootcfg 312 PTR_LA t1, mips_cps_core_bootcfg 278 PTR_L t1, 0(t1) 313 PTR_L t1, 0(t1) 279 PTR_ADDU v0, t0, t1 314 PTR_ADDU v0, t0, t1 280 315 281 /* Calculate this VPEs ID. If the core 316 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 282 li t9, 0 317 li t9, 0 283 #if defined(CONFIG_CPU_MIPSR6) 318 #if defined(CONFIG_CPU_MIPSR6) 284 has_vp ta2, 1f 319 has_vp ta2, 1f 285 320 286 /* 321 /* 287 * Assume non-contiguous numbering. Pe 322 * Assume non-contiguous numbering. Perhaps some day we'll need 288 * to handle contiguous VP numbering, 323 * to handle contiguous VP numbering, but no such systems yet 289 * exist. 324 * exist. 290 */ 325 */ 291 mfc0 t9, CP0_GLOBALNUMBER 326 mfc0 t9, CP0_GLOBALNUMBER 292 andi t9, t9, MIPS_GLOBALNUMBER_VP 327 andi t9, t9, MIPS_GLOBALNUMBER_VP 293 #elif defined(CONFIG_MIPS_MT_SMP) 328 #elif defined(CONFIG_MIPS_MT_SMP) 294 has_mt ta2, 1f 329 has_mt ta2, 1f 295 330 296 /* Find the number of VPEs present in 331 /* Find the number of VPEs present in the core */ 297 mfc0 t1, CP0_MVPCONF0 332 mfc0 t1, CP0_MVPCONF0 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 333 srl t1, t1, MVPCONF0_PVPE_SHIFT 299 andi t1, t1, MVPCONF0_PVPE >> MVPCO 334 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 300 addiu t1, t1, 1 335 addiu t1, t1, 1 301 336 302 /* Calculate a mask for the VPE ID fro 337 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 303 clz t1, t1 338 clz t1, t1 304 li t2, 31 339 li t2, 31 305 subu t1, t2, t1 340 subu t1, t2, t1 306 li t2, 1 341 li t2, 1 307 sll t1, t2, t1 342 sll t1, t2, t1 308 addiu t1, t1, -1 343 addiu t1, t1, -1 309 344 310 /* Retrieve the VPE ID from EBase.CPUN 345 /* Retrieve the VPE ID from EBase.CPUNum */ 311 mfc0 t9, $15, 1 346 mfc0 t9, $15, 1 312 and t9, t9, t1 347 and t9, t9, t1 313 #endif 348 #endif 314 349 315 1: /* Calculate a pointer to this VPEs st 350 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 316 li t1, VPEBOOTCFG_SIZE 351 li t1, VPEBOOTCFG_SIZE 317 mul v1, t9, t1 352 mul v1, t9, t1 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 353 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 319 PTR_ADDU v1, v1, ta3 354 PTR_ADDU v1, v1, ta3 320 355 321 jr ra 356 jr ra 322 nop 357 nop 323 END(mips_cps_get_bootcfg) 358 END(mips_cps_get_bootcfg) 324 359 325 LEAF(mips_cps_boot_vpes) 360 LEAF(mips_cps_boot_vpes) 326 lw ta2, COREBOOTCFG_VPEMASK(a0) 361 lw ta2, COREBOOTCFG_VPEMASK(a0) 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 362 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 328 363 329 #if defined(CONFIG_CPU_MIPSR6) 364 #if defined(CONFIG_CPU_MIPSR6) 330 365 331 has_vp t0, 5f 366 has_vp t0, 5f 332 367 333 /* Find base address of CPC */ 368 /* Find base address of CPC */ 334 PTR_LA t1, mips_gcr_base !! 369 cmgcrb t3 335 PTR_L t1, 0(t1) !! 370 PTR_L t1, GCR_CPC_BASE_OFS(t3) 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) << 337 PTR_LI t2, ~0x7fff 371 PTR_LI t2, ~0x7fff 338 and t1, t1, t2 372 and t1, t1, t2 339 PTR_LI t2, UNCAC_BASE 373 PTR_LI t2, UNCAC_BASE 340 PTR_ADD t1, t1, t2 374 PTR_ADD t1, t1, t2 341 375 342 /* Start any other VPs that ought to b 376 /* Start any other VPs that ought to be running */ 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 377 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 344 378 345 /* Ensure this VP stops running if it 379 /* Ensure this VP stops running if it shouldn't be */ 346 not ta2 380 not ta2 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 381 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 348 ehb 382 ehb 349 383 350 #elif defined(CONFIG_MIPS_MT) 384 #elif defined(CONFIG_MIPS_MT) 351 385 352 /* If the core doesn't support MT then 386 /* If the core doesn't support MT then return */ 353 has_mt t0, 5f 387 has_mt t0, 5f 354 388 355 /* Enter VPE configuration state */ 389 /* Enter VPE configuration state */ 356 .set push 390 .set push 357 .set MIPS_ISA_LEVEL_RAW 391 .set MIPS_ISA_LEVEL_RAW 358 .set mt 392 .set mt 359 dvpe 393 dvpe 360 .set pop 394 .set pop 361 395 362 PTR_LA t1, 1f 396 PTR_LA t1, 1f 363 jr.hb t1 397 jr.hb t1 364 nop 398 nop 365 1: mfc0 t1, CP0_MVPCONTROL 399 1: mfc0 t1, CP0_MVPCONTROL 366 ori t1, t1, MVPCONTROL_VPC 400 ori t1, t1, MVPCONTROL_VPC 367 mtc0 t1, CP0_MVPCONTROL 401 mtc0 t1, CP0_MVPCONTROL 368 ehb 402 ehb 369 403 370 /* Loop through each VPE */ 404 /* Loop through each VPE */ 371 move t8, ta2 405 move t8, ta2 372 li ta1, 0 406 li ta1, 0 373 407 374 /* Check whether the VPE should be run 408 /* Check whether the VPE should be running. If not, skip it */ 375 1: andi t0, ta2, 1 409 1: andi t0, ta2, 1 376 beqz t0, 2f 410 beqz t0, 2f 377 nop 411 nop 378 412 379 /* Operate on the appropriate TC */ 413 /* Operate on the appropriate TC */ 380 mfc0 t0, CP0_VPECONTROL 414 mfc0 t0, CP0_VPECONTROL 381 ori t0, t0, VPECONTROL_TARGTC 415 ori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 416 xori t0, t0, VPECONTROL_TARGTC 383 or t0, t0, ta1 417 or t0, t0, ta1 384 mtc0 t0, CP0_VPECONTROL 418 mtc0 t0, CP0_VPECONTROL 385 ehb 419 ehb 386 420 387 .set push 421 .set push 388 .set MIPS_ISA_LEVEL_RAW 422 .set MIPS_ISA_LEVEL_RAW 389 .set mt 423 .set mt 390 424 391 /* Skip the VPE if its TC is not halte 425 /* Skip the VPE if its TC is not halted */ 392 mftc0 t0, CP0_TCHALT 426 mftc0 t0, CP0_TCHALT 393 beqz t0, 2f 427 beqz t0, 2f 394 nop 428 nop 395 429 396 /* Calculate a pointer to the VPEs str 430 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 li t0, VPEBOOTCFG_SIZE 431 li t0, VPEBOOTCFG_SIZE 398 mul t0, t0, ta1 432 mul t0, t0, ta1 399 PTR_ADDU t0, t0, ta3 !! 433 addu t0, t0, ta3 400 434 401 /* Set the TC restart PC */ 435 /* Set the TC restart PC */ 402 lw t1, VPEBOOTCFG_PC(t0) 436 lw t1, VPEBOOTCFG_PC(t0) 403 mttc0 t1, CP0_TCRESTART 437 mttc0 t1, CP0_TCRESTART 404 438 405 /* Set the TC stack pointer */ 439 /* Set the TC stack pointer */ 406 lw t1, VPEBOOTCFG_SP(t0) 440 lw t1, VPEBOOTCFG_SP(t0) 407 mttgpr t1, sp 441 mttgpr t1, sp 408 442 409 /* Set the TC global pointer */ 443 /* Set the TC global pointer */ 410 lw t1, VPEBOOTCFG_GP(t0) 444 lw t1, VPEBOOTCFG_GP(t0) 411 mttgpr t1, gp 445 mttgpr t1, gp 412 446 413 /* Copy config from this VPE */ 447 /* Copy config from this VPE */ 414 mfc0 t0, CP0_CONFIG 448 mfc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 449 mttc0 t0, CP0_CONFIG 416 450 417 /* 451 /* 418 * Copy the EVA config from this VPE i 452 * Copy the EVA config from this VPE if the CPU supports it. 419 * CONFIG3 must exist to be running MT 453 * CONFIG3 must exist to be running MT startup - just read it. 420 */ 454 */ 421 mfc0 t0, CP0_CONFIG, 3 455 mfc0 t0, CP0_CONFIG, 3 422 and t0, t0, MIPS_CONF3_SC 456 and t0, t0, MIPS_CONF3_SC 423 beqz t0, 3f 457 beqz t0, 3f 424 nop 458 nop 425 mfc0 t0, CP0_SEGCTL0 459 mfc0 t0, CP0_SEGCTL0 426 mttc0 t0, CP0_SEGCTL0 460 mttc0 t0, CP0_SEGCTL0 427 mfc0 t0, CP0_SEGCTL1 461 mfc0 t0, CP0_SEGCTL1 428 mttc0 t0, CP0_SEGCTL1 462 mttc0 t0, CP0_SEGCTL1 429 mfc0 t0, CP0_SEGCTL2 463 mfc0 t0, CP0_SEGCTL2 430 mttc0 t0, CP0_SEGCTL2 464 mttc0 t0, CP0_SEGCTL2 431 3: 465 3: 432 /* Ensure no software interrupts are p 466 /* Ensure no software interrupts are pending */ 433 mttc0 zero, CP0_CAUSE 467 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_STATUS 468 mttc0 zero, CP0_STATUS 435 469 436 /* Set TC active, not interrupt exempt 470 /* Set TC active, not interrupt exempt */ 437 mftc0 t0, CP0_TCSTATUS 471 mftc0 t0, CP0_TCSTATUS 438 li t1, ~TCSTATUS_IXMT 472 li t1, ~TCSTATUS_IXMT 439 and t0, t0, t1 473 and t0, t0, t1 440 ori t0, t0, TCSTATUS_A 474 ori t0, t0, TCSTATUS_A 441 mttc0 t0, CP0_TCSTATUS 475 mttc0 t0, CP0_TCSTATUS 442 476 443 /* Clear the TC halt bit */ 477 /* Clear the TC halt bit */ 444 mttc0 zero, CP0_TCHALT 478 mttc0 zero, CP0_TCHALT 445 479 446 /* Set VPE active */ 480 /* Set VPE active */ 447 mftc0 t0, CP0_VPECONF0 481 mftc0 t0, CP0_VPECONF0 448 ori t0, t0, VPECONF0_VPA 482 ori t0, t0, VPECONF0_VPA 449 mttc0 t0, CP0_VPECONF0 483 mttc0 t0, CP0_VPECONF0 450 484 451 /* Next VPE */ 485 /* Next VPE */ 452 2: srl ta2, ta2, 1 486 2: srl ta2, ta2, 1 453 addiu ta1, ta1, 1 487 addiu ta1, ta1, 1 454 bnez ta2, 1b 488 bnez ta2, 1b 455 nop 489 nop 456 490 457 /* Leave VPE configuration state */ 491 /* Leave VPE configuration state */ 458 mfc0 t1, CP0_MVPCONTROL 492 mfc0 t1, CP0_MVPCONTROL 459 xori t1, t1, MVPCONTROL_VPC 493 xori t1, t1, MVPCONTROL_VPC 460 mtc0 t1, CP0_MVPCONTROL 494 mtc0 t1, CP0_MVPCONTROL 461 ehb 495 ehb 462 evpe 496 evpe 463 497 464 .set pop 498 .set pop 465 499 466 /* Check whether this VPE is meant to 500 /* Check whether this VPE is meant to be running */ 467 li t0, 1 501 li t0, 1 468 sll t0, t0, a1 502 sll t0, t0, a1 469 and t0, t0, t8 503 and t0, t0, t8 470 bnez t0, 2f 504 bnez t0, 2f 471 nop 505 nop 472 506 473 /* This VPE should be offline, halt th 507 /* This VPE should be offline, halt the TC */ 474 li t0, TCHALT_H 508 li t0, TCHALT_H 475 mtc0 t0, CP0_TCHALT 509 mtc0 t0, CP0_TCHALT 476 PTR_LA t0, 1f 510 PTR_LA t0, 1f 477 1: jr.hb t0 511 1: jr.hb t0 478 nop 512 nop 479 513 480 2: 514 2: 481 515 482 #endif /* CONFIG_MIPS_MT_SMP */ 516 #endif /* CONFIG_MIPS_MT_SMP */ 483 517 484 /* Return */ 518 /* Return */ 485 5: jr ra 519 5: jr ra 486 nop 520 nop 487 END(mips_cps_boot_vpes) 521 END(mips_cps_boot_vpes) 488 522 489 #if MIPS_ISA_REV > 0 << 490 LEAF(mips_cps_cache_init) 523 LEAF(mips_cps_cache_init) 491 /* 524 /* 492 * Clear the bits used to index the ca 525 * Clear the bits used to index the caches. Note that the architecture 493 * dictates that writing to any of Tag 526 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should 494 * be valid for all MIPS32 CPUs, even 527 * be valid for all MIPS32 CPUs, even those for which said writes are 495 * unnecessary. 528 * unnecessary. 496 */ 529 */ 497 mtc0 zero, CP0_TAGLO, 0 530 mtc0 zero, CP0_TAGLO, 0 498 mtc0 zero, CP0_TAGHI, 0 531 mtc0 zero, CP0_TAGHI, 0 499 mtc0 zero, CP0_TAGLO, 2 532 mtc0 zero, CP0_TAGLO, 2 500 mtc0 zero, CP0_TAGHI, 2 533 mtc0 zero, CP0_TAGHI, 2 501 ehb 534 ehb 502 535 503 /* Primary cache configuration is indi 536 /* Primary cache configuration is indicated by Config1 */ 504 mfc0 v0, CP0_CONFIG, 1 537 mfc0 v0, CP0_CONFIG, 1 505 538 506 /* Detect I-cache line size */ 539 /* Detect I-cache line size */ 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIP 540 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ 508 beqz t0, icache_done 541 beqz t0, icache_done 509 li t1, 2 542 li t1, 2 510 sllv t0, t1, t0 543 sllv t0, t1, t0 511 544 512 /* Detect I-cache size */ 545 /* Detect I-cache size */ 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIP 546 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ 514 xori t2, t1, 0x7 547 xori t2, t1, 0x7 515 beqz t2, 1f 548 beqz t2, 1f 516 li t3, 32 549 li t3, 32 517 addiu t1, t1, 1 550 addiu t1, t1, 1 518 sllv t1, t3, t1 551 sllv t1, t3, t1 519 1: /* At this point t1 == I-cache sets pe 552 1: /* At this point t1 == I-cache sets per way */ 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIP 553 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 521 addiu t2, t2, 1 554 addiu t2, t2, 1 522 mul t1, t1, t0 555 mul t1, t1, t0 523 mul t1, t1, t2 556 mul t1, t1, t2 524 557 525 li a0, CKSEG0 558 li a0, CKSEG0 526 PTR_ADD a1, a0, t1 559 PTR_ADD a1, a0, t1 527 1: cache Index_Store_Tag_I, 0(a0) 560 1: cache Index_Store_Tag_I, 0(a0) 528 PTR_ADD a0, a0, t0 561 PTR_ADD a0, a0, t0 529 bne a0, a1, 1b 562 bne a0, a1, 1b 530 nop 563 nop 531 icache_done: 564 icache_done: 532 565 533 /* Detect D-cache line size */ 566 /* Detect D-cache line size */ 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIP 567 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ 535 beqz t0, dcache_done 568 beqz t0, dcache_done 536 li t1, 2 569 li t1, 2 537 sllv t0, t1, t0 570 sllv t0, t1, t0 538 571 539 /* Detect D-cache size */ 572 /* Detect D-cache size */ 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIP 573 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ 541 xori t2, t1, 0x7 574 xori t2, t1, 0x7 542 beqz t2, 1f 575 beqz t2, 1f 543 li t3, 32 576 li t3, 32 544 addiu t1, t1, 1 577 addiu t1, t1, 1 545 sllv t1, t3, t1 578 sllv t1, t3, t1 546 1: /* At this point t1 == D-cache sets pe 579 1: /* At this point t1 == D-cache sets per way */ 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIP 580 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 548 addiu t2, t2, 1 581 addiu t2, t2, 1 549 mul t1, t1, t0 582 mul t1, t1, t0 550 mul t1, t1, t2 583 mul t1, t1, t2 551 584 552 li a0, CKSEG0 585 li a0, CKSEG0 553 PTR_ADDU a1, a0, t1 586 PTR_ADDU a1, a0, t1 554 PTR_SUBU a1, a1, t0 587 PTR_SUBU a1, a1, t0 555 1: cache Index_Store_Tag_D, 0(a0) 588 1: cache Index_Store_Tag_D, 0(a0) 556 bne a0, a1, 1b 589 bne a0, a1, 1b 557 PTR_ADD a0, a0, t0 590 PTR_ADD a0, a0, t0 558 dcache_done: 591 dcache_done: 559 592 560 jr ra 593 jr ra 561 nop 594 nop 562 END(mips_cps_cache_init) 595 END(mips_cps_cache_init) 563 #endif /* MIPS_ISA_REV > 0 */ << 564 596 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CON 597 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 566 598 567 /* Calculate a pointer to this CPUs st 599 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 568 .macro psstate dest 600 .macro psstate dest 569 .set push 601 .set push 570 .set noat 602 .set noat 571 lw $1, TI_CPU(gp) 603 lw $1, TI_CPU(gp) 572 sll $1, $1, LONGLOG 604 sll $1, $1, LONGLOG 573 PTR_LA \dest, __per_cpu_offset 605 PTR_LA \dest, __per_cpu_offset 574 PTR_ADDU $1, $1, \dest !! 606 addu $1, $1, \dest 575 lw $1, 0($1) 607 lw $1, 0($1) 576 PTR_LA \dest, cps_cpu_state 608 PTR_LA \dest, cps_cpu_state 577 PTR_ADDU \dest, \dest, $1 !! 609 addu \dest, \dest, $1 578 .set pop 610 .set pop 579 .endm 611 .endm 580 612 581 LEAF(mips_cps_pm_save) 613 LEAF(mips_cps_pm_save) 582 /* Save CPU state */ 614 /* Save CPU state */ 583 SUSPEND_SAVE_REGS 615 SUSPEND_SAVE_REGS 584 psstate t1 616 psstate t1 585 SUSPEND_SAVE_STATIC 617 SUSPEND_SAVE_STATIC 586 jr v0 618 jr v0 587 nop 619 nop 588 END(mips_cps_pm_save) 620 END(mips_cps_pm_save) 589 621 590 LEAF(mips_cps_pm_restore) 622 LEAF(mips_cps_pm_restore) 591 /* Restore CPU state */ 623 /* Restore CPU state */ 592 psstate t1 624 psstate t1 593 RESUME_RESTORE_STATIC 625 RESUME_RESTORE_STATIC 594 RESUME_RESTORE_REGS_RETURN 626 RESUME_RESTORE_REGS_RETURN 595 END(mips_cps_pm_restore) 627 END(mips_cps_pm_restore) 596 628 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM 629 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.