1 /* SPDX-License-Identifier: GPL-2.0-or-later * << 2 /* 1 /* 3 * Copyright (C) 2013 Imagination Technologies 2 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> !! 3 * Author: Paul Burton <paul.burton@imgtec.com> >> 4 * >> 5 * This program is free software; you can redistribute it and/or modify it >> 6 * under the terms of the GNU General Public License as published by the >> 7 * Free Software Foundation; either version 2 of the License, or (at your >> 8 * option) any later version. 5 */ 9 */ 6 10 7 #include <linux/init.h> << 8 #include <asm/addrspace.h> 11 #include <asm/addrspace.h> 9 #include <asm/asm.h> 12 #include <asm/asm.h> 10 #include <asm/asm-offsets.h> 13 #include <asm/asm-offsets.h> 11 #include <asm/asmmacro.h> 14 #include <asm/asmmacro.h> 12 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 13 #include <asm/eva.h> 16 #include <asm/eva.h> 14 #include <asm/mipsregs.h> 17 #include <asm/mipsregs.h> 15 #include <asm/mipsmtregs.h> 18 #include <asm/mipsmtregs.h> 16 #include <asm/pm.h> 19 #include <asm/pm.h> 17 #include <asm/smp-cps.h> << 18 20 19 #define GCR_CPC_BASE_OFS 0x0088 21 #define GCR_CPC_BASE_OFS 0x0088 20 #define GCR_CL_COHERENCE_OFS 0x2008 22 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_ID_OFS 0x2028 23 #define GCR_CL_ID_OFS 0x2028 22 24 23 #define CPC_CL_VC_STOP_OFS 0x2020 25 #define CPC_CL_VC_STOP_OFS 0x2020 24 #define CPC_CL_VC_RUN_OFS 0x2028 26 #define CPC_CL_VC_RUN_OFS 0x2028 25 27 26 .extern mips_cm_base 28 .extern mips_cm_base 27 29 28 .set noreorder 30 .set noreorder 29 31 30 #ifdef CONFIG_64BIT 32 #ifdef CONFIG_64BIT 31 # define STATUS_BITDEPS ST0_KX 33 # define STATUS_BITDEPS ST0_KX 32 #else 34 #else 33 # define STATUS_BITDEPS 0 35 # define STATUS_BITDEPS 0 34 #endif 36 #endif 35 37 36 #ifdef CONFIG_MIPS_CPS_NS16550 38 #ifdef CONFIG_MIPS_CPS_NS16550 37 39 38 #define DUMP_EXCEP(name) \ 40 #define DUMP_EXCEP(name) \ 39 PTR_LA a0, 8f; \ 41 PTR_LA a0, 8f; \ 40 jal mips_cps_bev_dump; \ 42 jal mips_cps_bev_dump; \ 41 nop; \ 43 nop; \ 42 TEXT(name) 44 TEXT(name) 43 45 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 46 #else /* !CONFIG_MIPS_CPS_NS16550 */ 45 47 46 #define DUMP_EXCEP(name) 48 #define DUMP_EXCEP(name) 47 49 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 50 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 49 51 50 /* 52 /* 51 * Set dest to non-zero if the core su 53 * Set dest to non-zero if the core supports the MT ASE, else zero. If 52 * MT is not supported then branch to 54 * MT is not supported then branch to nomt. 53 */ 55 */ 54 .macro has_mt dest, nomt 56 .macro has_mt dest, nomt 55 mfc0 \dest, CP0_CONFIG, 1 57 mfc0 \dest, CP0_CONFIG, 1 56 bgez \dest, \nomt 58 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 2 59 mfc0 \dest, CP0_CONFIG, 2 58 bgez \dest, \nomt 60 bgez \dest, \nomt 59 mfc0 \dest, CP0_CONFIG, 3 61 mfc0 \dest, CP0_CONFIG, 3 60 andi \dest, \dest, MIPS_CONF3_MT 62 andi \dest, \dest, MIPS_CONF3_MT 61 beqz \dest, \nomt 63 beqz \dest, \nomt 62 nop 64 nop 63 .endm 65 .endm 64 66 65 /* 67 /* 66 * Set dest to non-zero if the core su 68 * Set dest to non-zero if the core supports MIPSr6 multithreading 67 * (ie. VPs), else zero. If MIPSr6 mul 69 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then 68 * branch to nomt. 70 * branch to nomt. 69 */ 71 */ 70 .macro has_vp dest, nomt 72 .macro has_vp dest, nomt 71 mfc0 \dest, CP0_CONFIG, 1 73 mfc0 \dest, CP0_CONFIG, 1 72 bgez \dest, \nomt 74 bgez \dest, \nomt 73 mfc0 \dest, CP0_CONFIG, 2 75 mfc0 \dest, CP0_CONFIG, 2 74 bgez \dest, \nomt 76 bgez \dest, \nomt 75 mfc0 \dest, CP0_CONFIG, 3 77 mfc0 \dest, CP0_CONFIG, 3 76 bgez \dest, \nomt 78 bgez \dest, \nomt 77 mfc0 \dest, CP0_CONFIG, 4 79 mfc0 \dest, CP0_CONFIG, 4 78 bgez \dest, \nomt 80 bgez \dest, \nomt 79 mfc0 \dest, CP0_CONFIG, 5 81 mfc0 \dest, CP0_CONFIG, 5 80 andi \dest, \dest, MIPS_CONF5_VP 82 andi \dest, \dest, MIPS_CONF5_VP 81 beqz \dest, \nomt 83 beqz \dest, \nomt 82 nop 84 nop 83 .endm 85 .endm 84 86 >> 87 /* Calculate an uncached address for the CM GCRs */ >> 88 .macro cmgcrb dest >> 89 .set push >> 90 .set noat >> 91 MFC0 $1, CP0_CMGCRBASE >> 92 PTR_SLL $1, $1, 4 >> 93 PTR_LI \dest, UNCAC_BASE >> 94 PTR_ADDU \dest, \dest, $1 >> 95 .set pop >> 96 .endm >> 97 >> 98 .section .text.cps-vec >> 99 .balign 0x1000 >> 100 >> 101 LEAF(mips_cps_core_entry) >> 102 /* >> 103 * These first 4 bytes will be patched by cps_smp_setup to load the >> 104 * CCA to use into register s0. >> 105 */ >> 106 .word 0 >> 107 >> 108 /* Check whether we're here due to an NMI */ >> 109 mfc0 k0, CP0_STATUS >> 110 and k0, k0, ST0_NMI >> 111 beqz k0, not_nmi >> 112 nop >> 113 >> 114 /* This is an NMI */ >> 115 PTR_LA k0, nmi_handler >> 116 jr k0 >> 117 nop 85 118 86 LEAF(mips_cps_core_boot) !! 119 not_nmi: 87 /* Save CCA and GCR base */ !! 120 /* Setup Cause */ 88 move s0, a0 !! 121 li t0, CAUSEF_IV 89 move s1, a1 !! 122 mtc0 t0, CP0_CAUSE >> 123 >> 124 /* Setup Status */ >> 125 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS >> 126 mtc0 t0, CP0_STATUS 90 127 91 /* We don't know how to do coherence s << 92 #if MIPS_ISA_REV > 0 << 93 /* Skip cache & coherence setup if we' 128 /* Skip cache & coherence setup if we're already coherent */ 94 lw s7, GCR_CL_COHERENCE_OFS(s1) !! 129 cmgcrb v1 >> 130 lw s7, GCR_CL_COHERENCE_OFS(v1) 95 bnez s7, 1f 131 bnez s7, 1f 96 nop 132 nop 97 133 98 /* Initialize the L1 caches */ 134 /* Initialize the L1 caches */ 99 jal mips_cps_cache_init 135 jal mips_cps_cache_init 100 nop 136 nop 101 137 102 /* Enter the coherent domain */ 138 /* Enter the coherent domain */ 103 li t0, 0xff 139 li t0, 0xff 104 sw t0, GCR_CL_COHERENCE_OFS(s1) !! 140 sw t0, GCR_CL_COHERENCE_OFS(v1) 105 ehb 141 ehb 106 #endif /* MIPS_ISA_REV > 0 */ << 107 142 108 /* Set Kseg0 CCA to that in s0 */ 143 /* Set Kseg0 CCA to that in s0 */ 109 1: mfc0 t0, CP0_CONFIG 144 1: mfc0 t0, CP0_CONFIG 110 ori t0, 0x7 145 ori t0, 0x7 111 xori t0, 0x7 146 xori t0, 0x7 112 or t0, t0, s0 147 or t0, t0, s0 113 mtc0 t0, CP0_CONFIG 148 mtc0 t0, CP0_CONFIG 114 ehb 149 ehb 115 150 116 /* Jump to kseg0 */ 151 /* Jump to kseg0 */ 117 PTR_LA t0, 1f 152 PTR_LA t0, 1f 118 jr t0 153 jr t0 119 nop 154 nop 120 155 121 /* 156 /* 122 * We're up, cached & coherent. Perfor 157 * We're up, cached & coherent. Perform any EVA initialization necessary 123 * before we access memory. 158 * before we access memory. 124 */ 159 */ 125 1: eva_init 160 1: eva_init 126 161 127 /* Retrieve boot configuration pointer 162 /* Retrieve boot configuration pointers */ 128 jal mips_cps_get_bootcfg 163 jal mips_cps_get_bootcfg 129 nop 164 nop 130 165 131 /* Skip core-level init if we started 166 /* Skip core-level init if we started up coherent */ 132 bnez s7, 1f 167 bnez s7, 1f 133 nop 168 nop 134 169 135 /* Perform any further required core-l 170 /* Perform any further required core-level initialisation */ 136 jal mips_cps_core_init 171 jal mips_cps_core_init 137 nop 172 nop 138 173 139 /* 174 /* 140 * Boot any other VPEs within this cor 175 * Boot any other VPEs within this core that should be online, and 141 * deactivate this VPE if it should be 176 * deactivate this VPE if it should be offline. 142 */ 177 */ 143 move a1, t9 178 move a1, t9 144 jal mips_cps_boot_vpes 179 jal mips_cps_boot_vpes 145 move a0, v0 180 move a0, v0 146 181 147 /* Off we go! */ 182 /* Off we go! */ 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) 183 1: PTR_L t1, VPEBOOTCFG_PC(v1) 149 PTR_L gp, VPEBOOTCFG_GP(v1) 184 PTR_L gp, VPEBOOTCFG_GP(v1) 150 PTR_L sp, VPEBOOTCFG_SP(v1) 185 PTR_L sp, VPEBOOTCFG_SP(v1) 151 jr t1 186 jr t1 152 nop 187 nop 153 END(mips_cps_core_boot) !! 188 END(mips_cps_core_entry) 154 189 155 __INIT !! 190 .org 0x200 156 LEAF(excep_tlbfill) 191 LEAF(excep_tlbfill) 157 DUMP_EXCEP("TLB Fill") 192 DUMP_EXCEP("TLB Fill") 158 b . 193 b . 159 nop 194 nop 160 END(excep_tlbfill) 195 END(excep_tlbfill) 161 196 >> 197 .org 0x280 162 LEAF(excep_xtlbfill) 198 LEAF(excep_xtlbfill) 163 DUMP_EXCEP("XTLB Fill") 199 DUMP_EXCEP("XTLB Fill") 164 b . 200 b . 165 nop 201 nop 166 END(excep_xtlbfill) 202 END(excep_xtlbfill) 167 203 >> 204 .org 0x300 168 LEAF(excep_cache) 205 LEAF(excep_cache) 169 DUMP_EXCEP("Cache") 206 DUMP_EXCEP("Cache") 170 b . 207 b . 171 nop 208 nop 172 END(excep_cache) 209 END(excep_cache) 173 210 >> 211 .org 0x380 174 LEAF(excep_genex) 212 LEAF(excep_genex) 175 DUMP_EXCEP("General") 213 DUMP_EXCEP("General") 176 b . 214 b . 177 nop 215 nop 178 END(excep_genex) 216 END(excep_genex) 179 217 >> 218 .org 0x400 180 LEAF(excep_intex) 219 LEAF(excep_intex) 181 DUMP_EXCEP("Interrupt") 220 DUMP_EXCEP("Interrupt") 182 b . 221 b . 183 nop 222 nop 184 END(excep_intex) 223 END(excep_intex) 185 224 >> 225 .org 0x480 186 LEAF(excep_ejtag) 226 LEAF(excep_ejtag) 187 PTR_LA k0, ejtag_debug_handler 227 PTR_LA k0, ejtag_debug_handler 188 jr k0 228 jr k0 189 nop 229 nop 190 END(excep_ejtag) 230 END(excep_ejtag) 191 __FINIT << 192 231 193 LEAF(mips_cps_core_init) 232 LEAF(mips_cps_core_init) 194 #ifdef CONFIG_MIPS_MT_SMP 233 #ifdef CONFIG_MIPS_MT_SMP 195 /* Check that the core implements the 234 /* Check that the core implements the MT ASE */ 196 has_mt t0, 3f 235 has_mt t0, 3f 197 236 198 .set push 237 .set push 199 .set MIPS_ISA_LEVEL_RAW << 200 .set mt 238 .set mt 201 239 202 /* Only allow 1 TC per VPE to execute. 240 /* Only allow 1 TC per VPE to execute... */ 203 dmt 241 dmt 204 242 205 /* ...and for the moment only 1 VPE */ 243 /* ...and for the moment only 1 VPE */ 206 dvpe 244 dvpe 207 PTR_LA t1, 1f 245 PTR_LA t1, 1f 208 jr.hb t1 246 jr.hb t1 209 nop 247 nop 210 248 211 /* Enter VPE configuration state */ 249 /* Enter VPE configuration state */ 212 1: mfc0 t0, CP0_MVPCONTROL 250 1: mfc0 t0, CP0_MVPCONTROL 213 ori t0, t0, MVPCONTROL_VPC 251 ori t0, t0, MVPCONTROL_VPC 214 mtc0 t0, CP0_MVPCONTROL 252 mtc0 t0, CP0_MVPCONTROL 215 253 216 /* Retrieve the number of VPEs within 254 /* Retrieve the number of VPEs within the core */ 217 mfc0 t0, CP0_MVPCONF0 255 mfc0 t0, CP0_MVPCONF0 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 256 srl t0, t0, MVPCONF0_PVPE_SHIFT 219 andi t0, t0, (MVPCONF0_PVPE >> MVPC 257 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 addiu ta3, t0, 1 258 addiu ta3, t0, 1 221 259 222 /* If there's only 1, we're done */ 260 /* If there's only 1, we're done */ 223 beqz t0, 2f 261 beqz t0, 2f 224 nop 262 nop 225 263 226 /* Loop through each VPE within this c 264 /* Loop through each VPE within this core */ 227 li ta1, 1 265 li ta1, 1 228 266 229 1: /* Operate on the appropriate TC */ 267 1: /* Operate on the appropriate TC */ 230 mtc0 ta1, CP0_VPECONTROL 268 mtc0 ta1, CP0_VPECONTROL 231 ehb 269 ehb 232 270 233 /* Bind TC to VPE (1:1 TC:VPE mapping) 271 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 mttc0 ta1, CP0_TCBIND 272 mttc0 ta1, CP0_TCBIND 235 273 236 /* Set exclusive TC, non-active, maste 274 /* Set exclusive TC, non-active, master */ 237 li t0, VPECONF0_MVP 275 li t0, VPECONF0_MVP 238 sll t1, ta1, VPECONF0_XTC_SHIFT 276 sll t1, ta1, VPECONF0_XTC_SHIFT 239 or t0, t0, t1 277 or t0, t0, t1 240 mttc0 t0, CP0_VPECONF0 278 mttc0 t0, CP0_VPECONF0 241 279 242 /* Set TC non-active, non-allocatable 280 /* Set TC non-active, non-allocatable */ 243 mttc0 zero, CP0_TCSTATUS 281 mttc0 zero, CP0_TCSTATUS 244 282 245 /* Set TC halted */ 283 /* Set TC halted */ 246 li t0, TCHALT_H 284 li t0, TCHALT_H 247 mttc0 t0, CP0_TCHALT 285 mttc0 t0, CP0_TCHALT 248 286 249 /* Next VPE */ 287 /* Next VPE */ 250 addiu ta1, ta1, 1 288 addiu ta1, ta1, 1 251 slt t0, ta1, ta3 289 slt t0, ta1, ta3 252 bnez t0, 1b 290 bnez t0, 1b 253 nop 291 nop 254 292 255 /* Leave VPE configuration state */ 293 /* Leave VPE configuration state */ 256 2: mfc0 t0, CP0_MVPCONTROL 294 2: mfc0 t0, CP0_MVPCONTROL 257 xori t0, t0, MVPCONTROL_VPC 295 xori t0, t0, MVPCONTROL_VPC 258 mtc0 t0, CP0_MVPCONTROL 296 mtc0 t0, CP0_MVPCONTROL 259 297 260 3: .set pop 298 3: .set pop 261 #endif 299 #endif 262 jr ra 300 jr ra 263 nop 301 nop 264 END(mips_cps_core_init) 302 END(mips_cps_core_init) 265 303 266 /** 304 /** 267 * mips_cps_get_bootcfg() - retrieve boot conf 305 * mips_cps_get_bootcfg() - retrieve boot configuration pointers 268 * 306 * 269 * Returns: pointer to struct core_boot_config 307 * Returns: pointer to struct core_boot_config in v0, pointer to 270 * struct vpe_boot_config in v1, VPE 308 * struct vpe_boot_config in v1, VPE ID in t9 271 */ 309 */ 272 LEAF(mips_cps_get_bootcfg) 310 LEAF(mips_cps_get_bootcfg) 273 /* Calculate a pointer to this cores s 311 /* Calculate a pointer to this cores struct core_boot_config */ 274 lw t0, GCR_CL_ID_OFS(s1) !! 312 cmgcrb t0 >> 313 lw t0, GCR_CL_ID_OFS(t0) 275 li t1, COREBOOTCFG_SIZE 314 li t1, COREBOOTCFG_SIZE 276 mul t0, t0, t1 315 mul t0, t0, t1 277 PTR_LA t1, mips_cps_core_bootcfg 316 PTR_LA t1, mips_cps_core_bootcfg 278 PTR_L t1, 0(t1) 317 PTR_L t1, 0(t1) 279 PTR_ADDU v0, t0, t1 318 PTR_ADDU v0, t0, t1 280 319 281 /* Calculate this VPEs ID. If the core 320 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 282 li t9, 0 321 li t9, 0 283 #if defined(CONFIG_CPU_MIPSR6) 322 #if defined(CONFIG_CPU_MIPSR6) 284 has_vp ta2, 1f 323 has_vp ta2, 1f 285 324 286 /* 325 /* 287 * Assume non-contiguous numbering. Pe 326 * Assume non-contiguous numbering. Perhaps some day we'll need 288 * to handle contiguous VP numbering, 327 * to handle contiguous VP numbering, but no such systems yet 289 * exist. 328 * exist. 290 */ 329 */ 291 mfc0 t9, CP0_GLOBALNUMBER !! 330 mfc0 t9, $3, 1 292 andi t9, t9, MIPS_GLOBALNUMBER_VP !! 331 andi t9, t9, 0xff 293 #elif defined(CONFIG_MIPS_MT_SMP) 332 #elif defined(CONFIG_MIPS_MT_SMP) 294 has_mt ta2, 1f 333 has_mt ta2, 1f 295 334 296 /* Find the number of VPEs present in 335 /* Find the number of VPEs present in the core */ 297 mfc0 t1, CP0_MVPCONF0 336 mfc0 t1, CP0_MVPCONF0 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 337 srl t1, t1, MVPCONF0_PVPE_SHIFT 299 andi t1, t1, MVPCONF0_PVPE >> MVPCO 338 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 300 addiu t1, t1, 1 339 addiu t1, t1, 1 301 340 302 /* Calculate a mask for the VPE ID fro 341 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 303 clz t1, t1 342 clz t1, t1 304 li t2, 31 343 li t2, 31 305 subu t1, t2, t1 344 subu t1, t2, t1 306 li t2, 1 345 li t2, 1 307 sll t1, t2, t1 346 sll t1, t2, t1 308 addiu t1, t1, -1 347 addiu t1, t1, -1 309 348 310 /* Retrieve the VPE ID from EBase.CPUN 349 /* Retrieve the VPE ID from EBase.CPUNum */ 311 mfc0 t9, $15, 1 350 mfc0 t9, $15, 1 312 and t9, t9, t1 351 and t9, t9, t1 313 #endif 352 #endif 314 353 315 1: /* Calculate a pointer to this VPEs st 354 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 316 li t1, VPEBOOTCFG_SIZE 355 li t1, VPEBOOTCFG_SIZE 317 mul v1, t9, t1 356 mul v1, t9, t1 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 357 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 319 PTR_ADDU v1, v1, ta3 358 PTR_ADDU v1, v1, ta3 320 359 321 jr ra 360 jr ra 322 nop 361 nop 323 END(mips_cps_get_bootcfg) 362 END(mips_cps_get_bootcfg) 324 363 325 LEAF(mips_cps_boot_vpes) 364 LEAF(mips_cps_boot_vpes) 326 lw ta2, COREBOOTCFG_VPEMASK(a0) 365 lw ta2, COREBOOTCFG_VPEMASK(a0) 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 366 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 328 367 329 #if defined(CONFIG_CPU_MIPSR6) 368 #if defined(CONFIG_CPU_MIPSR6) 330 369 331 has_vp t0, 5f 370 has_vp t0, 5f 332 371 333 /* Find base address of CPC */ 372 /* Find base address of CPC */ 334 PTR_LA t1, mips_gcr_base !! 373 cmgcrb t3 335 PTR_L t1, 0(t1) !! 374 PTR_L t1, GCR_CPC_BASE_OFS(t3) 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) << 337 PTR_LI t2, ~0x7fff 375 PTR_LI t2, ~0x7fff 338 and t1, t1, t2 376 and t1, t1, t2 339 PTR_LI t2, UNCAC_BASE 377 PTR_LI t2, UNCAC_BASE 340 PTR_ADD t1, t1, t2 378 PTR_ADD t1, t1, t2 341 379 342 /* Start any other VPs that ought to b 380 /* Start any other VPs that ought to be running */ 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 381 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 344 382 345 /* Ensure this VP stops running if it 383 /* Ensure this VP stops running if it shouldn't be */ 346 not ta2 384 not ta2 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 385 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 348 ehb 386 ehb 349 387 350 #elif defined(CONFIG_MIPS_MT) 388 #elif defined(CONFIG_MIPS_MT) 351 389 >> 390 .set push >> 391 .set mt >> 392 352 /* If the core doesn't support MT then 393 /* If the core doesn't support MT then return */ 353 has_mt t0, 5f 394 has_mt t0, 5f 354 395 355 /* Enter VPE configuration state */ 396 /* Enter VPE configuration state */ 356 .set push << 357 .set MIPS_ISA_LEVEL_RAW << 358 .set mt << 359 dvpe 397 dvpe 360 .set pop << 361 << 362 PTR_LA t1, 1f 398 PTR_LA t1, 1f 363 jr.hb t1 399 jr.hb t1 364 nop 400 nop 365 1: mfc0 t1, CP0_MVPCONTROL 401 1: mfc0 t1, CP0_MVPCONTROL 366 ori t1, t1, MVPCONTROL_VPC 402 ori t1, t1, MVPCONTROL_VPC 367 mtc0 t1, CP0_MVPCONTROL 403 mtc0 t1, CP0_MVPCONTROL 368 ehb 404 ehb 369 405 370 /* Loop through each VPE */ 406 /* Loop through each VPE */ 371 move t8, ta2 407 move t8, ta2 372 li ta1, 0 408 li ta1, 0 373 409 374 /* Check whether the VPE should be run 410 /* Check whether the VPE should be running. If not, skip it */ 375 1: andi t0, ta2, 1 411 1: andi t0, ta2, 1 376 beqz t0, 2f 412 beqz t0, 2f 377 nop 413 nop 378 414 379 /* Operate on the appropriate TC */ 415 /* Operate on the appropriate TC */ 380 mfc0 t0, CP0_VPECONTROL 416 mfc0 t0, CP0_VPECONTROL 381 ori t0, t0, VPECONTROL_TARGTC 417 ori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 418 xori t0, t0, VPECONTROL_TARGTC 383 or t0, t0, ta1 419 or t0, t0, ta1 384 mtc0 t0, CP0_VPECONTROL 420 mtc0 t0, CP0_VPECONTROL 385 ehb 421 ehb 386 422 387 .set push << 388 .set MIPS_ISA_LEVEL_RAW << 389 .set mt << 390 << 391 /* Skip the VPE if its TC is not halte 423 /* Skip the VPE if its TC is not halted */ 392 mftc0 t0, CP0_TCHALT 424 mftc0 t0, CP0_TCHALT 393 beqz t0, 2f 425 beqz t0, 2f 394 nop 426 nop 395 427 396 /* Calculate a pointer to the VPEs str 428 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 li t0, VPEBOOTCFG_SIZE 429 li t0, VPEBOOTCFG_SIZE 398 mul t0, t0, ta1 430 mul t0, t0, ta1 399 PTR_ADDU t0, t0, ta3 !! 431 addu t0, t0, ta3 400 432 401 /* Set the TC restart PC */ 433 /* Set the TC restart PC */ 402 lw t1, VPEBOOTCFG_PC(t0) 434 lw t1, VPEBOOTCFG_PC(t0) 403 mttc0 t1, CP0_TCRESTART 435 mttc0 t1, CP0_TCRESTART 404 436 405 /* Set the TC stack pointer */ 437 /* Set the TC stack pointer */ 406 lw t1, VPEBOOTCFG_SP(t0) 438 lw t1, VPEBOOTCFG_SP(t0) 407 mttgpr t1, sp 439 mttgpr t1, sp 408 440 409 /* Set the TC global pointer */ 441 /* Set the TC global pointer */ 410 lw t1, VPEBOOTCFG_GP(t0) 442 lw t1, VPEBOOTCFG_GP(t0) 411 mttgpr t1, gp 443 mttgpr t1, gp 412 444 413 /* Copy config from this VPE */ 445 /* Copy config from this VPE */ 414 mfc0 t0, CP0_CONFIG 446 mfc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 447 mttc0 t0, CP0_CONFIG 416 448 417 /* 449 /* 418 * Copy the EVA config from this VPE i 450 * Copy the EVA config from this VPE if the CPU supports it. 419 * CONFIG3 must exist to be running MT 451 * CONFIG3 must exist to be running MT startup - just read it. 420 */ 452 */ 421 mfc0 t0, CP0_CONFIG, 3 453 mfc0 t0, CP0_CONFIG, 3 422 and t0, t0, MIPS_CONF3_SC 454 and t0, t0, MIPS_CONF3_SC 423 beqz t0, 3f 455 beqz t0, 3f 424 nop 456 nop 425 mfc0 t0, CP0_SEGCTL0 457 mfc0 t0, CP0_SEGCTL0 426 mttc0 t0, CP0_SEGCTL0 458 mttc0 t0, CP0_SEGCTL0 427 mfc0 t0, CP0_SEGCTL1 459 mfc0 t0, CP0_SEGCTL1 428 mttc0 t0, CP0_SEGCTL1 460 mttc0 t0, CP0_SEGCTL1 429 mfc0 t0, CP0_SEGCTL2 461 mfc0 t0, CP0_SEGCTL2 430 mttc0 t0, CP0_SEGCTL2 462 mttc0 t0, CP0_SEGCTL2 431 3: 463 3: 432 /* Ensure no software interrupts are p 464 /* Ensure no software interrupts are pending */ 433 mttc0 zero, CP0_CAUSE 465 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_STATUS 466 mttc0 zero, CP0_STATUS 435 467 436 /* Set TC active, not interrupt exempt 468 /* Set TC active, not interrupt exempt */ 437 mftc0 t0, CP0_TCSTATUS 469 mftc0 t0, CP0_TCSTATUS 438 li t1, ~TCSTATUS_IXMT 470 li t1, ~TCSTATUS_IXMT 439 and t0, t0, t1 471 and t0, t0, t1 440 ori t0, t0, TCSTATUS_A 472 ori t0, t0, TCSTATUS_A 441 mttc0 t0, CP0_TCSTATUS 473 mttc0 t0, CP0_TCSTATUS 442 474 443 /* Clear the TC halt bit */ 475 /* Clear the TC halt bit */ 444 mttc0 zero, CP0_TCHALT 476 mttc0 zero, CP0_TCHALT 445 477 446 /* Set VPE active */ 478 /* Set VPE active */ 447 mftc0 t0, CP0_VPECONF0 479 mftc0 t0, CP0_VPECONF0 448 ori t0, t0, VPECONF0_VPA 480 ori t0, t0, VPECONF0_VPA 449 mttc0 t0, CP0_VPECONF0 481 mttc0 t0, CP0_VPECONF0 450 482 451 /* Next VPE */ 483 /* Next VPE */ 452 2: srl ta2, ta2, 1 484 2: srl ta2, ta2, 1 453 addiu ta1, ta1, 1 485 addiu ta1, ta1, 1 454 bnez ta2, 1b 486 bnez ta2, 1b 455 nop 487 nop 456 488 457 /* Leave VPE configuration state */ 489 /* Leave VPE configuration state */ 458 mfc0 t1, CP0_MVPCONTROL 490 mfc0 t1, CP0_MVPCONTROL 459 xori t1, t1, MVPCONTROL_VPC 491 xori t1, t1, MVPCONTROL_VPC 460 mtc0 t1, CP0_MVPCONTROL 492 mtc0 t1, CP0_MVPCONTROL 461 ehb 493 ehb 462 evpe 494 evpe 463 495 464 .set pop << 465 << 466 /* Check whether this VPE is meant to 496 /* Check whether this VPE is meant to be running */ 467 li t0, 1 497 li t0, 1 468 sll t0, t0, a1 498 sll t0, t0, a1 469 and t0, t0, t8 499 and t0, t0, t8 470 bnez t0, 2f 500 bnez t0, 2f 471 nop 501 nop 472 502 473 /* This VPE should be offline, halt th 503 /* This VPE should be offline, halt the TC */ 474 li t0, TCHALT_H 504 li t0, TCHALT_H 475 mtc0 t0, CP0_TCHALT 505 mtc0 t0, CP0_TCHALT 476 PTR_LA t0, 1f 506 PTR_LA t0, 1f 477 1: jr.hb t0 507 1: jr.hb t0 478 nop 508 nop 479 509 480 2: !! 510 2: .set pop 481 511 482 #endif /* CONFIG_MIPS_MT_SMP */ 512 #endif /* CONFIG_MIPS_MT_SMP */ 483 513 484 /* Return */ 514 /* Return */ 485 5: jr ra 515 5: jr ra 486 nop 516 nop 487 END(mips_cps_boot_vpes) 517 END(mips_cps_boot_vpes) 488 518 489 #if MIPS_ISA_REV > 0 << 490 LEAF(mips_cps_cache_init) 519 LEAF(mips_cps_cache_init) 491 /* 520 /* 492 * Clear the bits used to index the ca 521 * Clear the bits used to index the caches. Note that the architecture 493 * dictates that writing to any of Tag 522 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should 494 * be valid for all MIPS32 CPUs, even 523 * be valid for all MIPS32 CPUs, even those for which said writes are 495 * unnecessary. 524 * unnecessary. 496 */ 525 */ 497 mtc0 zero, CP0_TAGLO, 0 526 mtc0 zero, CP0_TAGLO, 0 498 mtc0 zero, CP0_TAGHI, 0 527 mtc0 zero, CP0_TAGHI, 0 499 mtc0 zero, CP0_TAGLO, 2 528 mtc0 zero, CP0_TAGLO, 2 500 mtc0 zero, CP0_TAGHI, 2 529 mtc0 zero, CP0_TAGHI, 2 501 ehb 530 ehb 502 531 503 /* Primary cache configuration is indi 532 /* Primary cache configuration is indicated by Config1 */ 504 mfc0 v0, CP0_CONFIG, 1 533 mfc0 v0, CP0_CONFIG, 1 505 534 506 /* Detect I-cache line size */ 535 /* Detect I-cache line size */ 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIP 536 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ 508 beqz t0, icache_done 537 beqz t0, icache_done 509 li t1, 2 538 li t1, 2 510 sllv t0, t1, t0 539 sllv t0, t1, t0 511 540 512 /* Detect I-cache size */ 541 /* Detect I-cache size */ 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIP 542 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ 514 xori t2, t1, 0x7 543 xori t2, t1, 0x7 515 beqz t2, 1f 544 beqz t2, 1f 516 li t3, 32 545 li t3, 32 517 addiu t1, t1, 1 546 addiu t1, t1, 1 518 sllv t1, t3, t1 547 sllv t1, t3, t1 519 1: /* At this point t1 == I-cache sets pe 548 1: /* At this point t1 == I-cache sets per way */ 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIP 549 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 521 addiu t2, t2, 1 550 addiu t2, t2, 1 522 mul t1, t1, t0 551 mul t1, t1, t0 523 mul t1, t1, t2 552 mul t1, t1, t2 524 553 525 li a0, CKSEG0 554 li a0, CKSEG0 526 PTR_ADD a1, a0, t1 555 PTR_ADD a1, a0, t1 527 1: cache Index_Store_Tag_I, 0(a0) 556 1: cache Index_Store_Tag_I, 0(a0) 528 PTR_ADD a0, a0, t0 557 PTR_ADD a0, a0, t0 529 bne a0, a1, 1b 558 bne a0, a1, 1b 530 nop 559 nop 531 icache_done: 560 icache_done: 532 561 533 /* Detect D-cache line size */ 562 /* Detect D-cache line size */ 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIP 563 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ 535 beqz t0, dcache_done 564 beqz t0, dcache_done 536 li t1, 2 565 li t1, 2 537 sllv t0, t1, t0 566 sllv t0, t1, t0 538 567 539 /* Detect D-cache size */ 568 /* Detect D-cache size */ 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIP 569 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ 541 xori t2, t1, 0x7 570 xori t2, t1, 0x7 542 beqz t2, 1f 571 beqz t2, 1f 543 li t3, 32 572 li t3, 32 544 addiu t1, t1, 1 573 addiu t1, t1, 1 545 sllv t1, t3, t1 574 sllv t1, t3, t1 546 1: /* At this point t1 == D-cache sets pe 575 1: /* At this point t1 == D-cache sets per way */ 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIP 576 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 548 addiu t2, t2, 1 577 addiu t2, t2, 1 549 mul t1, t1, t0 578 mul t1, t1, t0 550 mul t1, t1, t2 579 mul t1, t1, t2 551 580 552 li a0, CKSEG0 581 li a0, CKSEG0 553 PTR_ADDU a1, a0, t1 582 PTR_ADDU a1, a0, t1 554 PTR_SUBU a1, a1, t0 583 PTR_SUBU a1, a1, t0 555 1: cache Index_Store_Tag_D, 0(a0) 584 1: cache Index_Store_Tag_D, 0(a0) 556 bne a0, a1, 1b 585 bne a0, a1, 1b 557 PTR_ADD a0, a0, t0 586 PTR_ADD a0, a0, t0 558 dcache_done: 587 dcache_done: 559 588 560 jr ra 589 jr ra 561 nop 590 nop 562 END(mips_cps_cache_init) 591 END(mips_cps_cache_init) 563 #endif /* MIPS_ISA_REV > 0 */ << 564 592 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CON 593 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 566 594 567 /* Calculate a pointer to this CPUs st 595 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 568 .macro psstate dest 596 .macro psstate dest 569 .set push 597 .set push 570 .set noat 598 .set noat 571 lw $1, TI_CPU(gp) 599 lw $1, TI_CPU(gp) 572 sll $1, $1, LONGLOG 600 sll $1, $1, LONGLOG 573 PTR_LA \dest, __per_cpu_offset 601 PTR_LA \dest, __per_cpu_offset 574 PTR_ADDU $1, $1, \dest !! 602 addu $1, $1, \dest 575 lw $1, 0($1) 603 lw $1, 0($1) 576 PTR_LA \dest, cps_cpu_state 604 PTR_LA \dest, cps_cpu_state 577 PTR_ADDU \dest, \dest, $1 !! 605 addu \dest, \dest, $1 578 .set pop 606 .set pop 579 .endm 607 .endm 580 608 581 LEAF(mips_cps_pm_save) 609 LEAF(mips_cps_pm_save) 582 /* Save CPU state */ 610 /* Save CPU state */ 583 SUSPEND_SAVE_REGS 611 SUSPEND_SAVE_REGS 584 psstate t1 612 psstate t1 585 SUSPEND_SAVE_STATIC 613 SUSPEND_SAVE_STATIC 586 jr v0 614 jr v0 587 nop 615 nop 588 END(mips_cps_pm_save) 616 END(mips_cps_pm_save) 589 617 590 LEAF(mips_cps_pm_restore) 618 LEAF(mips_cps_pm_restore) 591 /* Restore CPU state */ 619 /* Restore CPU state */ 592 psstate t1 620 psstate t1 593 RESUME_RESTORE_STATIC 621 RESUME_RESTORE_STATIC 594 RESUME_RESTORE_REGS_RETURN 622 RESUME_RESTORE_REGS_RETURN 595 END(mips_cps_pm_restore) 623 END(mips_cps_pm_restore) 596 624 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM 625 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.