1 /* SPDX-License-Identifier: GPL-2.0-or-later * << 2 /* 1 /* 3 * Copyright (C) 2013 Imagination Technologies 2 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> !! 3 * Author: Paul Burton <paul.burton@imgtec.com> >> 4 * >> 5 * This program is free software; you can redistribute it and/or modify it >> 6 * under the terms of the GNU General Public License as published by the >> 7 * Free Software Foundation; either version 2 of the License, or (at your >> 8 * option) any later version. 5 */ 9 */ 6 10 7 #include <linux/init.h> << 8 #include <asm/addrspace.h> 11 #include <asm/addrspace.h> 9 #include <asm/asm.h> 12 #include <asm/asm.h> 10 #include <asm/asm-offsets.h> 13 #include <asm/asm-offsets.h> 11 #include <asm/asmmacro.h> 14 #include <asm/asmmacro.h> 12 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 13 #include <asm/eva.h> 16 #include <asm/eva.h> 14 #include <asm/mipsregs.h> 17 #include <asm/mipsregs.h> 15 #include <asm/mipsmtregs.h> 18 #include <asm/mipsmtregs.h> 16 #include <asm/pm.h> 19 #include <asm/pm.h> 17 #include <asm/smp-cps.h> << 18 20 19 #define GCR_CPC_BASE_OFS 0x0088 << 20 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_ID_OFS 0x2028 22 #define GCR_CL_ID_OFS 0x2028 22 23 23 #define CPC_CL_VC_STOP_OFS 0x2020 << 24 #define CPC_CL_VC_RUN_OFS 0x2028 << 25 << 26 .extern mips_cm_base 24 .extern mips_cm_base 27 25 28 .set noreorder 26 .set noreorder 29 27 30 #ifdef CONFIG_64BIT 28 #ifdef CONFIG_64BIT 31 # define STATUS_BITDEPS ST0_KX 29 # define STATUS_BITDEPS ST0_KX 32 #else 30 #else 33 # define STATUS_BITDEPS 0 31 # define STATUS_BITDEPS 0 34 #endif 32 #endif 35 33 36 #ifdef CONFIG_MIPS_CPS_NS16550 34 #ifdef CONFIG_MIPS_CPS_NS16550 37 35 38 #define DUMP_EXCEP(name) \ 36 #define DUMP_EXCEP(name) \ 39 PTR_LA a0, 8f; \ 37 PTR_LA a0, 8f; \ 40 jal mips_cps_bev_dump; \ 38 jal mips_cps_bev_dump; \ 41 nop; \ 39 nop; \ 42 TEXT(name) 40 TEXT(name) 43 41 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 42 #else /* !CONFIG_MIPS_CPS_NS16550 */ 45 43 46 #define DUMP_EXCEP(name) 44 #define DUMP_EXCEP(name) 47 45 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 46 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 49 47 50 /* 48 /* 51 * Set dest to non-zero if the core su 49 * Set dest to non-zero if the core supports the MT ASE, else zero. If 52 * MT is not supported then branch to 50 * MT is not supported then branch to nomt. 53 */ 51 */ 54 .macro has_mt dest, nomt 52 .macro has_mt dest, nomt 55 mfc0 \dest, CP0_CONFIG, 1 53 mfc0 \dest, CP0_CONFIG, 1 56 bgez \dest, \nomt 54 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 2 55 mfc0 \dest, CP0_CONFIG, 2 58 bgez \dest, \nomt 56 bgez \dest, \nomt 59 mfc0 \dest, CP0_CONFIG, 3 57 mfc0 \dest, CP0_CONFIG, 3 60 andi \dest, \dest, MIPS_CONF3_MT 58 andi \dest, \dest, MIPS_CONF3_MT 61 beqz \dest, \nomt 59 beqz \dest, \nomt 62 nop 60 nop 63 .endm 61 .endm 64 62 >> 63 .section .text.cps-vec >> 64 .balign 0x1000 >> 65 >> 66 LEAF(mips_cps_core_entry) 65 /* 67 /* 66 * Set dest to non-zero if the core su !! 68 * These first 4 bytes will be patched by cps_smp_setup to load the 67 * (ie. VPs), else zero. If MIPSr6 mul !! 69 * CCA to use into register s0. 68 * branch to nomt. << 69 */ 70 */ 70 .macro has_vp dest, nomt !! 71 .word 0 71 mfc0 \dest, CP0_CONFIG, 1 << 72 bgez \dest, \nomt << 73 mfc0 \dest, CP0_CONFIG, 2 << 74 bgez \dest, \nomt << 75 mfc0 \dest, CP0_CONFIG, 3 << 76 bgez \dest, \nomt << 77 mfc0 \dest, CP0_CONFIG, 4 << 78 bgez \dest, \nomt << 79 mfc0 \dest, CP0_CONFIG, 5 << 80 andi \dest, \dest, MIPS_CONF5_VP << 81 beqz \dest, \nomt << 82 nop << 83 .endm << 84 72 85 !! 73 /* Check whether we're here due to an NMI */ 86 LEAF(mips_cps_core_boot) !! 74 mfc0 k0, CP0_STATUS 87 /* Save CCA and GCR base */ !! 75 and k0, k0, ST0_NMI 88 move s0, a0 !! 76 beqz k0, not_nmi 89 move s1, a1 << 90 << 91 /* We don't know how to do coherence s << 92 #if MIPS_ISA_REV > 0 << 93 /* Skip cache & coherence setup if we' << 94 lw s7, GCR_CL_COHERENCE_OFS(s1) << 95 bnez s7, 1f << 96 nop 77 nop 97 78 98 /* Initialize the L1 caches */ !! 79 /* This is an NMI */ 99 jal mips_cps_cache_init !! 80 PTR_LA k0, nmi_handler >> 81 jr k0 100 nop 82 nop 101 83 102 /* Enter the coherent domain */ !! 84 not_nmi: 103 li t0, 0xff !! 85 /* Setup Cause */ 104 sw t0, GCR_CL_COHERENCE_OFS(s1) !! 86 li t0, CAUSEF_IV >> 87 mtc0 t0, CP0_CAUSE >> 88 >> 89 /* Setup Status */ >> 90 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS >> 91 mtc0 t0, CP0_STATUS >> 92 >> 93 /* >> 94 * Clear the bits used to index the caches. Note that the architecture >> 95 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should >> 96 * be valid for all MIPS32 CPUs, even those for which said writes are >> 97 * unnecessary. >> 98 */ >> 99 mtc0 zero, CP0_TAGLO, 0 >> 100 mtc0 zero, CP0_TAGHI, 0 >> 101 mtc0 zero, CP0_TAGLO, 2 >> 102 mtc0 zero, CP0_TAGHI, 2 105 ehb 103 ehb 106 #endif /* MIPS_ISA_REV > 0 */ !! 104 >> 105 /* Primary cache configuration is indicated by Config1 */ >> 106 mfc0 v0, CP0_CONFIG, 1 >> 107 >> 108 /* Detect I-cache line size */ >> 109 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ >> 110 beqz t0, icache_done >> 111 li t1, 2 >> 112 sllv t0, t1, t0 >> 113 >> 114 /* Detect I-cache size */ >> 115 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ >> 116 xori t2, t1, 0x7 >> 117 beqz t2, 1f >> 118 li t3, 32 >> 119 addiu t1, t1, 1 >> 120 sllv t1, t3, t1 >> 121 1: /* At this point t1 == I-cache sets per way */ >> 122 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ >> 123 addiu t2, t2, 1 >> 124 mul t1, t1, t0 >> 125 mul t1, t1, t2 >> 126 >> 127 li a0, CKSEG0 >> 128 PTR_ADD a1, a0, t1 >> 129 1: cache Index_Store_Tag_I, 0(a0) >> 130 PTR_ADD a0, a0, t0 >> 131 bne a0, a1, 1b >> 132 nop >> 133 icache_done: >> 134 >> 135 /* Detect D-cache line size */ >> 136 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ >> 137 beqz t0, dcache_done >> 138 li t1, 2 >> 139 sllv t0, t1, t0 >> 140 >> 141 /* Detect D-cache size */ >> 142 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ >> 143 xori t2, t1, 0x7 >> 144 beqz t2, 1f >> 145 li t3, 32 >> 146 addiu t1, t1, 1 >> 147 sllv t1, t3, t1 >> 148 1: /* At this point t1 == D-cache sets per way */ >> 149 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ >> 150 addiu t2, t2, 1 >> 151 mul t1, t1, t0 >> 152 mul t1, t1, t2 >> 153 >> 154 li a0, CKSEG0 >> 155 PTR_ADDU a1, a0, t1 >> 156 PTR_SUBU a1, a1, t0 >> 157 1: cache Index_Store_Tag_D, 0(a0) >> 158 bne a0, a1, 1b >> 159 PTR_ADD a0, a0, t0 >> 160 dcache_done: 107 161 108 /* Set Kseg0 CCA to that in s0 */ 162 /* Set Kseg0 CCA to that in s0 */ 109 1: mfc0 t0, CP0_CONFIG !! 163 mfc0 t0, CP0_CONFIG 110 ori t0, 0x7 164 ori t0, 0x7 111 xori t0, 0x7 165 xori t0, 0x7 112 or t0, t0, s0 166 or t0, t0, s0 113 mtc0 t0, CP0_CONFIG 167 mtc0 t0, CP0_CONFIG 114 ehb 168 ehb 115 169 >> 170 /* Calculate an uncached address for the CM GCRs */ >> 171 MFC0 v1, CP0_CMGCRBASE >> 172 PTR_SLL v1, v1, 4 >> 173 PTR_LI t0, UNCAC_BASE >> 174 PTR_ADDU v1, v1, t0 >> 175 >> 176 /* Enter the coherent domain */ >> 177 li t0, 0xff >> 178 sw t0, GCR_CL_COHERENCE_OFS(v1) >> 179 ehb >> 180 116 /* Jump to kseg0 */ 181 /* Jump to kseg0 */ 117 PTR_LA t0, 1f 182 PTR_LA t0, 1f 118 jr t0 183 jr t0 119 nop 184 nop 120 185 121 /* 186 /* 122 * We're up, cached & coherent. Perfor !! 187 * We're up, cached & coherent. Perform any further required core-level 123 * before we access memory. !! 188 * initialisation. 124 */ 189 */ 125 1: eva_init !! 190 1: jal mips_cps_core_init 126 << 127 /* Retrieve boot configuration pointer << 128 jal mips_cps_get_bootcfg << 129 nop << 130 << 131 /* Skip core-level init if we started << 132 bnez s7, 1f << 133 nop 191 nop 134 192 135 /* Perform any further required core-l !! 193 /* Do any EVA initialization if necessary */ 136 jal mips_cps_core_init !! 194 eva_init 137 nop << 138 195 139 /* 196 /* 140 * Boot any other VPEs within this cor 197 * Boot any other VPEs within this core that should be online, and 141 * deactivate this VPE if it should be 198 * deactivate this VPE if it should be offline. 142 */ 199 */ 143 move a1, t9 << 144 jal mips_cps_boot_vpes 200 jal mips_cps_boot_vpes 145 move a0, v0 !! 201 nop 146 202 147 /* Off we go! */ 203 /* Off we go! */ 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) !! 204 PTR_L t1, VPEBOOTCFG_PC(v0) 149 PTR_L gp, VPEBOOTCFG_GP(v1) !! 205 PTR_L gp, VPEBOOTCFG_GP(v0) 150 PTR_L sp, VPEBOOTCFG_SP(v1) !! 206 PTR_L sp, VPEBOOTCFG_SP(v0) 151 jr t1 207 jr t1 152 nop 208 nop 153 END(mips_cps_core_boot) !! 209 END(mips_cps_core_entry) 154 210 155 __INIT !! 211 .org 0x200 156 LEAF(excep_tlbfill) 212 LEAF(excep_tlbfill) 157 DUMP_EXCEP("TLB Fill") 213 DUMP_EXCEP("TLB Fill") 158 b . 214 b . 159 nop 215 nop 160 END(excep_tlbfill) 216 END(excep_tlbfill) 161 217 >> 218 .org 0x280 162 LEAF(excep_xtlbfill) 219 LEAF(excep_xtlbfill) 163 DUMP_EXCEP("XTLB Fill") 220 DUMP_EXCEP("XTLB Fill") 164 b . 221 b . 165 nop 222 nop 166 END(excep_xtlbfill) 223 END(excep_xtlbfill) 167 224 >> 225 .org 0x300 168 LEAF(excep_cache) 226 LEAF(excep_cache) 169 DUMP_EXCEP("Cache") 227 DUMP_EXCEP("Cache") 170 b . 228 b . 171 nop 229 nop 172 END(excep_cache) 230 END(excep_cache) 173 231 >> 232 .org 0x380 174 LEAF(excep_genex) 233 LEAF(excep_genex) 175 DUMP_EXCEP("General") 234 DUMP_EXCEP("General") 176 b . 235 b . 177 nop 236 nop 178 END(excep_genex) 237 END(excep_genex) 179 238 >> 239 .org 0x400 180 LEAF(excep_intex) 240 LEAF(excep_intex) 181 DUMP_EXCEP("Interrupt") 241 DUMP_EXCEP("Interrupt") 182 b . 242 b . 183 nop 243 nop 184 END(excep_intex) 244 END(excep_intex) 185 245 >> 246 .org 0x480 186 LEAF(excep_ejtag) 247 LEAF(excep_ejtag) 187 PTR_LA k0, ejtag_debug_handler 248 PTR_LA k0, ejtag_debug_handler 188 jr k0 249 jr k0 189 nop 250 nop 190 END(excep_ejtag) 251 END(excep_ejtag) 191 __FINIT << 192 252 193 LEAF(mips_cps_core_init) 253 LEAF(mips_cps_core_init) 194 #ifdef CONFIG_MIPS_MT_SMP 254 #ifdef CONFIG_MIPS_MT_SMP 195 /* Check that the core implements the 255 /* Check that the core implements the MT ASE */ 196 has_mt t0, 3f 256 has_mt t0, 3f 197 257 198 .set push 258 .set push 199 .set MIPS_ISA_LEVEL_RAW << 200 .set mt 259 .set mt 201 260 202 /* Only allow 1 TC per VPE to execute. 261 /* Only allow 1 TC per VPE to execute... */ 203 dmt 262 dmt 204 263 205 /* ...and for the moment only 1 VPE */ 264 /* ...and for the moment only 1 VPE */ 206 dvpe 265 dvpe 207 PTR_LA t1, 1f 266 PTR_LA t1, 1f 208 jr.hb t1 267 jr.hb t1 209 nop 268 nop 210 269 211 /* Enter VPE configuration state */ 270 /* Enter VPE configuration state */ 212 1: mfc0 t0, CP0_MVPCONTROL 271 1: mfc0 t0, CP0_MVPCONTROL 213 ori t0, t0, MVPCONTROL_VPC 272 ori t0, t0, MVPCONTROL_VPC 214 mtc0 t0, CP0_MVPCONTROL 273 mtc0 t0, CP0_MVPCONTROL 215 274 216 /* Retrieve the number of VPEs within 275 /* Retrieve the number of VPEs within the core */ 217 mfc0 t0, CP0_MVPCONF0 276 mfc0 t0, CP0_MVPCONF0 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 277 srl t0, t0, MVPCONF0_PVPE_SHIFT 219 andi t0, t0, (MVPCONF0_PVPE >> MVPC 278 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 addiu ta3, t0, 1 279 addiu ta3, t0, 1 221 280 222 /* If there's only 1, we're done */ 281 /* If there's only 1, we're done */ 223 beqz t0, 2f 282 beqz t0, 2f 224 nop 283 nop 225 284 226 /* Loop through each VPE within this c 285 /* Loop through each VPE within this core */ 227 li ta1, 1 286 li ta1, 1 228 287 229 1: /* Operate on the appropriate TC */ 288 1: /* Operate on the appropriate TC */ 230 mtc0 ta1, CP0_VPECONTROL 289 mtc0 ta1, CP0_VPECONTROL 231 ehb 290 ehb 232 291 233 /* Bind TC to VPE (1:1 TC:VPE mapping) 292 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 mttc0 ta1, CP0_TCBIND 293 mttc0 ta1, CP0_TCBIND 235 294 236 /* Set exclusive TC, non-active, maste 295 /* Set exclusive TC, non-active, master */ 237 li t0, VPECONF0_MVP 296 li t0, VPECONF0_MVP 238 sll t1, ta1, VPECONF0_XTC_SHIFT 297 sll t1, ta1, VPECONF0_XTC_SHIFT 239 or t0, t0, t1 298 or t0, t0, t1 240 mttc0 t0, CP0_VPECONF0 299 mttc0 t0, CP0_VPECONF0 241 300 242 /* Set TC non-active, non-allocatable 301 /* Set TC non-active, non-allocatable */ 243 mttc0 zero, CP0_TCSTATUS 302 mttc0 zero, CP0_TCSTATUS 244 303 245 /* Set TC halted */ 304 /* Set TC halted */ 246 li t0, TCHALT_H 305 li t0, TCHALT_H 247 mttc0 t0, CP0_TCHALT 306 mttc0 t0, CP0_TCHALT 248 307 249 /* Next VPE */ 308 /* Next VPE */ 250 addiu ta1, ta1, 1 309 addiu ta1, ta1, 1 251 slt t0, ta1, ta3 310 slt t0, ta1, ta3 252 bnez t0, 1b 311 bnez t0, 1b 253 nop 312 nop 254 313 255 /* Leave VPE configuration state */ 314 /* Leave VPE configuration state */ 256 2: mfc0 t0, CP0_MVPCONTROL 315 2: mfc0 t0, CP0_MVPCONTROL 257 xori t0, t0, MVPCONTROL_VPC 316 xori t0, t0, MVPCONTROL_VPC 258 mtc0 t0, CP0_MVPCONTROL 317 mtc0 t0, CP0_MVPCONTROL 259 318 260 3: .set pop 319 3: .set pop 261 #endif 320 #endif 262 jr ra 321 jr ra 263 nop 322 nop 264 END(mips_cps_core_init) 323 END(mips_cps_core_init) 265 324 266 /** !! 325 LEAF(mips_cps_boot_vpes) 267 * mips_cps_get_bootcfg() - retrieve boot conf !! 326 /* Retrieve CM base address */ 268 * !! 327 PTR_LA t0, mips_cm_base 269 * Returns: pointer to struct core_boot_config !! 328 PTR_L t0, 0(t0) 270 * struct vpe_boot_config in v1, VPE !! 329 271 */ << 272 LEAF(mips_cps_get_bootcfg) << 273 /* Calculate a pointer to this cores s 330 /* Calculate a pointer to this cores struct core_boot_config */ 274 lw t0, GCR_CL_ID_OFS(s1) !! 331 lw t0, GCR_CL_ID_OFS(t0) 275 li t1, COREBOOTCFG_SIZE 332 li t1, COREBOOTCFG_SIZE 276 mul t0, t0, t1 333 mul t0, t0, t1 277 PTR_LA t1, mips_cps_core_bootcfg 334 PTR_LA t1, mips_cps_core_bootcfg 278 PTR_L t1, 0(t1) 335 PTR_L t1, 0(t1) 279 PTR_ADDU v0, t0, t1 !! 336 PTR_ADDU t0, t0, t1 280 337 281 /* Calculate this VPEs ID. If the core 338 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 282 li t9, 0 339 li t9, 0 283 #if defined(CONFIG_CPU_MIPSR6) !! 340 #ifdef CONFIG_MIPS_MT_SMP 284 has_vp ta2, 1f << 285 << 286 /* << 287 * Assume non-contiguous numbering. Pe << 288 * to handle contiguous VP numbering, << 289 * exist. << 290 */ << 291 mfc0 t9, CP0_GLOBALNUMBER << 292 andi t9, t9, MIPS_GLOBALNUMBER_VP << 293 #elif defined(CONFIG_MIPS_MT_SMP) << 294 has_mt ta2, 1f 341 has_mt ta2, 1f 295 342 296 /* Find the number of VPEs present in 343 /* Find the number of VPEs present in the core */ 297 mfc0 t1, CP0_MVPCONF0 344 mfc0 t1, CP0_MVPCONF0 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 345 srl t1, t1, MVPCONF0_PVPE_SHIFT 299 andi t1, t1, MVPCONF0_PVPE >> MVPCO 346 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 300 addiu t1, t1, 1 347 addiu t1, t1, 1 301 348 302 /* Calculate a mask for the VPE ID fro 349 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 303 clz t1, t1 350 clz t1, t1 304 li t2, 31 351 li t2, 31 305 subu t1, t2, t1 352 subu t1, t2, t1 306 li t2, 1 353 li t2, 1 307 sll t1, t2, t1 354 sll t1, t2, t1 308 addiu t1, t1, -1 355 addiu t1, t1, -1 309 356 310 /* Retrieve the VPE ID from EBase.CPUN 357 /* Retrieve the VPE ID from EBase.CPUNum */ 311 mfc0 t9, $15, 1 358 mfc0 t9, $15, 1 312 and t9, t9, t1 359 and t9, t9, t1 313 #endif 360 #endif 314 361 315 1: /* Calculate a pointer to this VPEs st 362 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 316 li t1, VPEBOOTCFG_SIZE 363 li t1, VPEBOOTCFG_SIZE 317 mul v1, t9, t1 !! 364 mul v0, t9, t1 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) !! 365 PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) 319 PTR_ADDU v1, v1, ta3 !! 366 PTR_ADDU v0, v0, ta3 320 << 321 jr ra << 322 nop << 323 END(mips_cps_get_bootcfg) << 324 367 325 LEAF(mips_cps_boot_vpes) !! 368 #ifdef CONFIG_MIPS_MT_SMP 326 lw ta2, COREBOOTCFG_VPEMASK(a0) << 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) << 328 << 329 #if defined(CONFIG_CPU_MIPSR6) << 330 << 331 has_vp t0, 5f << 332 << 333 /* Find base address of CPC */ << 334 PTR_LA t1, mips_gcr_base << 335 PTR_L t1, 0(t1) << 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) << 337 PTR_LI t2, ~0x7fff << 338 and t1, t1, t2 << 339 PTR_LI t2, UNCAC_BASE << 340 PTR_ADD t1, t1, t2 << 341 << 342 /* Start any other VPs that ought to b << 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) << 344 << 345 /* Ensure this VP stops running if it << 346 not ta2 << 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) << 348 ehb << 349 << 350 #elif defined(CONFIG_MIPS_MT) << 351 369 352 /* If the core doesn't support MT then 370 /* If the core doesn't support MT then return */ 353 has_mt t0, 5f !! 371 bnez ta2, 1f >> 372 nop >> 373 jr ra >> 374 nop 354 375 355 /* Enter VPE configuration state */ << 356 .set push 376 .set push 357 .set MIPS_ISA_LEVEL_RAW << 358 .set mt 377 .set mt 359 dvpe << 360 .set pop << 361 378 >> 379 1: /* Enter VPE configuration state */ >> 380 dvpe 362 PTR_LA t1, 1f 381 PTR_LA t1, 1f 363 jr.hb t1 382 jr.hb t1 364 nop 383 nop 365 1: mfc0 t1, CP0_MVPCONTROL 384 1: mfc0 t1, CP0_MVPCONTROL 366 ori t1, t1, MVPCONTROL_VPC 385 ori t1, t1, MVPCONTROL_VPC 367 mtc0 t1, CP0_MVPCONTROL 386 mtc0 t1, CP0_MVPCONTROL 368 ehb 387 ehb 369 388 370 /* Loop through each VPE */ 389 /* Loop through each VPE */ >> 390 PTR_L ta2, COREBOOTCFG_VPEMASK(t0) 371 move t8, ta2 391 move t8, ta2 372 li ta1, 0 392 li ta1, 0 373 393 374 /* Check whether the VPE should be run 394 /* Check whether the VPE should be running. If not, skip it */ 375 1: andi t0, ta2, 1 395 1: andi t0, ta2, 1 376 beqz t0, 2f 396 beqz t0, 2f 377 nop 397 nop 378 398 379 /* Operate on the appropriate TC */ 399 /* Operate on the appropriate TC */ 380 mfc0 t0, CP0_VPECONTROL 400 mfc0 t0, CP0_VPECONTROL 381 ori t0, t0, VPECONTROL_TARGTC 401 ori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 402 xori t0, t0, VPECONTROL_TARGTC 383 or t0, t0, ta1 403 or t0, t0, ta1 384 mtc0 t0, CP0_VPECONTROL 404 mtc0 t0, CP0_VPECONTROL 385 ehb 405 ehb 386 406 387 .set push << 388 .set MIPS_ISA_LEVEL_RAW << 389 .set mt << 390 << 391 /* Skip the VPE if its TC is not halte 407 /* Skip the VPE if its TC is not halted */ 392 mftc0 t0, CP0_TCHALT 408 mftc0 t0, CP0_TCHALT 393 beqz t0, 2f 409 beqz t0, 2f 394 nop 410 nop 395 411 396 /* Calculate a pointer to the VPEs str 412 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 li t0, VPEBOOTCFG_SIZE 413 li t0, VPEBOOTCFG_SIZE 398 mul t0, t0, ta1 414 mul t0, t0, ta1 399 PTR_ADDU t0, t0, ta3 !! 415 addu t0, t0, ta3 400 416 401 /* Set the TC restart PC */ 417 /* Set the TC restart PC */ 402 lw t1, VPEBOOTCFG_PC(t0) 418 lw t1, VPEBOOTCFG_PC(t0) 403 mttc0 t1, CP0_TCRESTART 419 mttc0 t1, CP0_TCRESTART 404 420 405 /* Set the TC stack pointer */ 421 /* Set the TC stack pointer */ 406 lw t1, VPEBOOTCFG_SP(t0) 422 lw t1, VPEBOOTCFG_SP(t0) 407 mttgpr t1, sp 423 mttgpr t1, sp 408 424 409 /* Set the TC global pointer */ 425 /* Set the TC global pointer */ 410 lw t1, VPEBOOTCFG_GP(t0) 426 lw t1, VPEBOOTCFG_GP(t0) 411 mttgpr t1, gp 427 mttgpr t1, gp 412 428 413 /* Copy config from this VPE */ 429 /* Copy config from this VPE */ 414 mfc0 t0, CP0_CONFIG 430 mfc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 431 mttc0 t0, CP0_CONFIG 416 432 417 /* << 418 * Copy the EVA config from this VPE i << 419 * CONFIG3 must exist to be running MT << 420 */ << 421 mfc0 t0, CP0_CONFIG, 3 << 422 and t0, t0, MIPS_CONF3_SC << 423 beqz t0, 3f << 424 nop << 425 mfc0 t0, CP0_SEGCTL0 << 426 mttc0 t0, CP0_SEGCTL0 << 427 mfc0 t0, CP0_SEGCTL1 << 428 mttc0 t0, CP0_SEGCTL1 << 429 mfc0 t0, CP0_SEGCTL2 << 430 mttc0 t0, CP0_SEGCTL2 << 431 3: << 432 /* Ensure no software interrupts are p 433 /* Ensure no software interrupts are pending */ 433 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_STATUS 435 mttc0 zero, CP0_STATUS 435 436 436 /* Set TC active, not interrupt exempt 437 /* Set TC active, not interrupt exempt */ 437 mftc0 t0, CP0_TCSTATUS 438 mftc0 t0, CP0_TCSTATUS 438 li t1, ~TCSTATUS_IXMT 439 li t1, ~TCSTATUS_IXMT 439 and t0, t0, t1 440 and t0, t0, t1 440 ori t0, t0, TCSTATUS_A 441 ori t0, t0, TCSTATUS_A 441 mttc0 t0, CP0_TCSTATUS 442 mttc0 t0, CP0_TCSTATUS 442 443 443 /* Clear the TC halt bit */ 444 /* Clear the TC halt bit */ 444 mttc0 zero, CP0_TCHALT 445 mttc0 zero, CP0_TCHALT 445 446 446 /* Set VPE active */ 447 /* Set VPE active */ 447 mftc0 t0, CP0_VPECONF0 448 mftc0 t0, CP0_VPECONF0 448 ori t0, t0, VPECONF0_VPA 449 ori t0, t0, VPECONF0_VPA 449 mttc0 t0, CP0_VPECONF0 450 mttc0 t0, CP0_VPECONF0 450 451 451 /* Next VPE */ 452 /* Next VPE */ 452 2: srl ta2, ta2, 1 453 2: srl ta2, ta2, 1 453 addiu ta1, ta1, 1 454 addiu ta1, ta1, 1 454 bnez ta2, 1b 455 bnez ta2, 1b 455 nop 456 nop 456 457 457 /* Leave VPE configuration state */ 458 /* Leave VPE configuration state */ 458 mfc0 t1, CP0_MVPCONTROL 459 mfc0 t1, CP0_MVPCONTROL 459 xori t1, t1, MVPCONTROL_VPC 460 xori t1, t1, MVPCONTROL_VPC 460 mtc0 t1, CP0_MVPCONTROL 461 mtc0 t1, CP0_MVPCONTROL 461 ehb 462 ehb 462 evpe 463 evpe 463 464 464 .set pop << 465 << 466 /* Check whether this VPE is meant to 465 /* Check whether this VPE is meant to be running */ 467 li t0, 1 466 li t0, 1 468 sll t0, t0, a1 !! 467 sll t0, t0, t9 469 and t0, t0, t8 468 and t0, t0, t8 470 bnez t0, 2f 469 bnez t0, 2f 471 nop 470 nop 472 471 473 /* This VPE should be offline, halt th 472 /* This VPE should be offline, halt the TC */ 474 li t0, TCHALT_H 473 li t0, TCHALT_H 475 mtc0 t0, CP0_TCHALT 474 mtc0 t0, CP0_TCHALT 476 PTR_LA t0, 1f 475 PTR_LA t0, 1f 477 1: jr.hb t0 476 1: jr.hb t0 478 nop 477 nop 479 478 480 2: !! 479 2: .set pop 481 480 482 #endif /* CONFIG_MIPS_MT_SMP */ 481 #endif /* CONFIG_MIPS_MT_SMP */ 483 482 484 /* Return */ 483 /* Return */ 485 5: jr ra << 486 nop << 487 END(mips_cps_boot_vpes) << 488 << 489 #if MIPS_ISA_REV > 0 << 490 LEAF(mips_cps_cache_init) << 491 /* << 492 * Clear the bits used to index the ca << 493 * dictates that writing to any of Tag << 494 * be valid for all MIPS32 CPUs, even << 495 * unnecessary. << 496 */ << 497 mtc0 zero, CP0_TAGLO, 0 << 498 mtc0 zero, CP0_TAGHI, 0 << 499 mtc0 zero, CP0_TAGLO, 2 << 500 mtc0 zero, CP0_TAGHI, 2 << 501 ehb << 502 << 503 /* Primary cache configuration is indi << 504 mfc0 v0, CP0_CONFIG, 1 << 505 << 506 /* Detect I-cache line size */ << 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIP << 508 beqz t0, icache_done << 509 li t1, 2 << 510 sllv t0, t1, t0 << 511 << 512 /* Detect I-cache size */ << 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIP << 514 xori t2, t1, 0x7 << 515 beqz t2, 1f << 516 li t3, 32 << 517 addiu t1, t1, 1 << 518 sllv t1, t3, t1 << 519 1: /* At this point t1 == I-cache sets pe << 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIP << 521 addiu t2, t2, 1 << 522 mul t1, t1, t0 << 523 mul t1, t1, t2 << 524 << 525 li a0, CKSEG0 << 526 PTR_ADD a1, a0, t1 << 527 1: cache Index_Store_Tag_I, 0(a0) << 528 PTR_ADD a0, a0, t0 << 529 bne a0, a1, 1b << 530 nop << 531 icache_done: << 532 << 533 /* Detect D-cache line size */ << 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIP << 535 beqz t0, dcache_done << 536 li t1, 2 << 537 sllv t0, t1, t0 << 538 << 539 /* Detect D-cache size */ << 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIP << 541 xori t2, t1, 0x7 << 542 beqz t2, 1f << 543 li t3, 32 << 544 addiu t1, t1, 1 << 545 sllv t1, t3, t1 << 546 1: /* At this point t1 == D-cache sets pe << 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIP << 548 addiu t2, t2, 1 << 549 mul t1, t1, t0 << 550 mul t1, t1, t2 << 551 << 552 li a0, CKSEG0 << 553 PTR_ADDU a1, a0, t1 << 554 PTR_SUBU a1, a1, t0 << 555 1: cache Index_Store_Tag_D, 0(a0) << 556 bne a0, a1, 1b << 557 PTR_ADD a0, a0, t0 << 558 dcache_done: << 559 << 560 jr ra 484 jr ra 561 nop 485 nop 562 END(mips_cps_cache_init) !! 486 END(mips_cps_boot_vpes) 563 #endif /* MIPS_ISA_REV > 0 */ << 564 487 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CON 488 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 566 489 567 /* Calculate a pointer to this CPUs st 490 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 568 .macro psstate dest 491 .macro psstate dest 569 .set push 492 .set push 570 .set noat 493 .set noat 571 lw $1, TI_CPU(gp) 494 lw $1, TI_CPU(gp) 572 sll $1, $1, LONGLOG 495 sll $1, $1, LONGLOG 573 PTR_LA \dest, __per_cpu_offset 496 PTR_LA \dest, __per_cpu_offset 574 PTR_ADDU $1, $1, \dest !! 497 addu $1, $1, \dest 575 lw $1, 0($1) 498 lw $1, 0($1) 576 PTR_LA \dest, cps_cpu_state 499 PTR_LA \dest, cps_cpu_state 577 PTR_ADDU \dest, \dest, $1 !! 500 addu \dest, \dest, $1 578 .set pop 501 .set pop 579 .endm 502 .endm 580 503 581 LEAF(mips_cps_pm_save) 504 LEAF(mips_cps_pm_save) 582 /* Save CPU state */ 505 /* Save CPU state */ 583 SUSPEND_SAVE_REGS 506 SUSPEND_SAVE_REGS 584 psstate t1 507 psstate t1 585 SUSPEND_SAVE_STATIC 508 SUSPEND_SAVE_STATIC 586 jr v0 509 jr v0 587 nop 510 nop 588 END(mips_cps_pm_save) 511 END(mips_cps_pm_save) 589 512 590 LEAF(mips_cps_pm_restore) 513 LEAF(mips_cps_pm_restore) 591 /* Restore CPU state */ 514 /* Restore CPU state */ 592 psstate t1 515 psstate t1 593 RESUME_RESTORE_STATIC 516 RESUME_RESTORE_STATIC 594 RESUME_RESTORE_REGS_RETURN 517 RESUME_RESTORE_REGS_RETURN 595 END(mips_cps_pm_restore) 518 END(mips_cps_pm_restore) 596 519 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM 520 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.