1 /* SPDX-License-Identifier: GPL-2.0-or-later * << 2 /* 1 /* 3 * Copyright (C) 2013 Imagination Technologies 2 * Copyright (C) 2013 Imagination Technologies 4 * Author: Paul Burton <paul.burton@mips.com> 3 * Author: Paul Burton <paul.burton@mips.com> >> 4 * >> 5 * This program is free software; you can redistribute it and/or modify it >> 6 * under the terms of the GNU General Public License as published by the >> 7 * Free Software Foundation; either version 2 of the License, or (at your >> 8 * option) any later version. 5 */ 9 */ 6 10 7 #include <linux/init.h> << 8 #include <asm/addrspace.h> 11 #include <asm/addrspace.h> 9 #include <asm/asm.h> 12 #include <asm/asm.h> 10 #include <asm/asm-offsets.h> 13 #include <asm/asm-offsets.h> 11 #include <asm/asmmacro.h> 14 #include <asm/asmmacro.h> 12 #include <asm/cacheops.h> 15 #include <asm/cacheops.h> 13 #include <asm/eva.h> 16 #include <asm/eva.h> 14 #include <asm/mipsregs.h> 17 #include <asm/mipsregs.h> 15 #include <asm/mipsmtregs.h> 18 #include <asm/mipsmtregs.h> 16 #include <asm/pm.h> 19 #include <asm/pm.h> 17 #include <asm/smp-cps.h> << 18 20 19 #define GCR_CPC_BASE_OFS 0x0088 21 #define GCR_CPC_BASE_OFS 0x0088 20 #define GCR_CL_COHERENCE_OFS 0x2008 22 #define GCR_CL_COHERENCE_OFS 0x2008 21 #define GCR_CL_ID_OFS 0x2028 23 #define GCR_CL_ID_OFS 0x2028 22 24 23 #define CPC_CL_VC_STOP_OFS 0x2020 25 #define CPC_CL_VC_STOP_OFS 0x2020 24 #define CPC_CL_VC_RUN_OFS 0x2028 26 #define CPC_CL_VC_RUN_OFS 0x2028 25 27 26 .extern mips_cm_base 28 .extern mips_cm_base 27 29 28 .set noreorder 30 .set noreorder 29 31 30 #ifdef CONFIG_64BIT 32 #ifdef CONFIG_64BIT 31 # define STATUS_BITDEPS ST0_KX 33 # define STATUS_BITDEPS ST0_KX 32 #else 34 #else 33 # define STATUS_BITDEPS 0 35 # define STATUS_BITDEPS 0 34 #endif 36 #endif 35 37 36 #ifdef CONFIG_MIPS_CPS_NS16550 38 #ifdef CONFIG_MIPS_CPS_NS16550 37 39 38 #define DUMP_EXCEP(name) \ 40 #define DUMP_EXCEP(name) \ 39 PTR_LA a0, 8f; \ 41 PTR_LA a0, 8f; \ 40 jal mips_cps_bev_dump; \ 42 jal mips_cps_bev_dump; \ 41 nop; \ 43 nop; \ 42 TEXT(name) 44 TEXT(name) 43 45 44 #else /* !CONFIG_MIPS_CPS_NS16550 */ 46 #else /* !CONFIG_MIPS_CPS_NS16550 */ 45 47 46 #define DUMP_EXCEP(name) 48 #define DUMP_EXCEP(name) 47 49 48 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 50 #endif /* !CONFIG_MIPS_CPS_NS16550 */ 49 51 50 /* 52 /* 51 * Set dest to non-zero if the core su 53 * Set dest to non-zero if the core supports the MT ASE, else zero. If 52 * MT is not supported then branch to 54 * MT is not supported then branch to nomt. 53 */ 55 */ 54 .macro has_mt dest, nomt 56 .macro has_mt dest, nomt 55 mfc0 \dest, CP0_CONFIG, 1 57 mfc0 \dest, CP0_CONFIG, 1 56 bgez \dest, \nomt 58 bgez \dest, \nomt 57 mfc0 \dest, CP0_CONFIG, 2 59 mfc0 \dest, CP0_CONFIG, 2 58 bgez \dest, \nomt 60 bgez \dest, \nomt 59 mfc0 \dest, CP0_CONFIG, 3 61 mfc0 \dest, CP0_CONFIG, 3 60 andi \dest, \dest, MIPS_CONF3_MT 62 andi \dest, \dest, MIPS_CONF3_MT 61 beqz \dest, \nomt 63 beqz \dest, \nomt 62 nop 64 nop 63 .endm 65 .endm 64 66 65 /* 67 /* 66 * Set dest to non-zero if the core su 68 * Set dest to non-zero if the core supports MIPSr6 multithreading 67 * (ie. VPs), else zero. If MIPSr6 mul 69 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then 68 * branch to nomt. 70 * branch to nomt. 69 */ 71 */ 70 .macro has_vp dest, nomt 72 .macro has_vp dest, nomt 71 mfc0 \dest, CP0_CONFIG, 1 73 mfc0 \dest, CP0_CONFIG, 1 72 bgez \dest, \nomt 74 bgez \dest, \nomt 73 mfc0 \dest, CP0_CONFIG, 2 75 mfc0 \dest, CP0_CONFIG, 2 74 bgez \dest, \nomt 76 bgez \dest, \nomt 75 mfc0 \dest, CP0_CONFIG, 3 77 mfc0 \dest, CP0_CONFIG, 3 76 bgez \dest, \nomt 78 bgez \dest, \nomt 77 mfc0 \dest, CP0_CONFIG, 4 79 mfc0 \dest, CP0_CONFIG, 4 78 bgez \dest, \nomt 80 bgez \dest, \nomt 79 mfc0 \dest, CP0_CONFIG, 5 81 mfc0 \dest, CP0_CONFIG, 5 80 andi \dest, \dest, MIPS_CONF5_VP 82 andi \dest, \dest, MIPS_CONF5_VP 81 beqz \dest, \nomt 83 beqz \dest, \nomt 82 nop 84 nop 83 .endm 85 .endm 84 86 >> 87 /* Calculate an uncached address for the CM GCRs */ >> 88 .macro cmgcrb dest >> 89 .set push >> 90 .set noat >> 91 MFC0 $1, CP0_CMGCRBASE >> 92 PTR_SLL $1, $1, 4 >> 93 PTR_LI \dest, UNCAC_BASE >> 94 PTR_ADDU \dest, \dest, $1 >> 95 .set pop >> 96 .endm >> 97 >> 98 .section .text.cps-vec >> 99 .balign 0x1000 85 100 86 LEAF(mips_cps_core_boot) !! 101 LEAF(mips_cps_core_entry) 87 /* Save CCA and GCR base */ !! 102 /* 88 move s0, a0 !! 103 * These first 4 bytes will be patched by cps_smp_setup to load the 89 move s1, a1 !! 104 * CCA to use into register s0. >> 105 */ >> 106 .word 0 >> 107 >> 108 /* Check whether we're here due to an NMI */ >> 109 mfc0 k0, CP0_STATUS >> 110 and k0, k0, ST0_NMI >> 111 beqz k0, not_nmi >> 112 nop >> 113 >> 114 /* This is an NMI */ >> 115 PTR_LA k0, nmi_handler >> 116 jr k0 >> 117 nop >> 118 >> 119 not_nmi: >> 120 /* Setup Cause */ >> 121 li t0, CAUSEF_IV >> 122 mtc0 t0, CP0_CAUSE >> 123 >> 124 /* Setup Status */ >> 125 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS >> 126 mtc0 t0, CP0_STATUS 90 127 91 /* We don't know how to do coherence s << 92 #if MIPS_ISA_REV > 0 << 93 /* Skip cache & coherence setup if we' 128 /* Skip cache & coherence setup if we're already coherent */ 94 lw s7, GCR_CL_COHERENCE_OFS(s1) !! 129 cmgcrb v1 >> 130 lw s7, GCR_CL_COHERENCE_OFS(v1) 95 bnez s7, 1f 131 bnez s7, 1f 96 nop 132 nop 97 133 98 /* Initialize the L1 caches */ 134 /* Initialize the L1 caches */ 99 jal mips_cps_cache_init 135 jal mips_cps_cache_init 100 nop 136 nop 101 137 102 /* Enter the coherent domain */ 138 /* Enter the coherent domain */ 103 li t0, 0xff 139 li t0, 0xff 104 sw t0, GCR_CL_COHERENCE_OFS(s1) !! 140 sw t0, GCR_CL_COHERENCE_OFS(v1) 105 ehb 141 ehb 106 #endif /* MIPS_ISA_REV > 0 */ << 107 142 108 /* Set Kseg0 CCA to that in s0 */ 143 /* Set Kseg0 CCA to that in s0 */ 109 1: mfc0 t0, CP0_CONFIG 144 1: mfc0 t0, CP0_CONFIG 110 ori t0, 0x7 145 ori t0, 0x7 111 xori t0, 0x7 146 xori t0, 0x7 112 or t0, t0, s0 147 or t0, t0, s0 113 mtc0 t0, CP0_CONFIG 148 mtc0 t0, CP0_CONFIG 114 ehb 149 ehb 115 150 116 /* Jump to kseg0 */ 151 /* Jump to kseg0 */ 117 PTR_LA t0, 1f 152 PTR_LA t0, 1f 118 jr t0 153 jr t0 119 nop 154 nop 120 155 121 /* 156 /* 122 * We're up, cached & coherent. Perfor 157 * We're up, cached & coherent. Perform any EVA initialization necessary 123 * before we access memory. 158 * before we access memory. 124 */ 159 */ 125 1: eva_init 160 1: eva_init 126 161 127 /* Retrieve boot configuration pointer 162 /* Retrieve boot configuration pointers */ 128 jal mips_cps_get_bootcfg 163 jal mips_cps_get_bootcfg 129 nop 164 nop 130 165 131 /* Skip core-level init if we started 166 /* Skip core-level init if we started up coherent */ 132 bnez s7, 1f 167 bnez s7, 1f 133 nop 168 nop 134 169 135 /* Perform any further required core-l 170 /* Perform any further required core-level initialisation */ 136 jal mips_cps_core_init 171 jal mips_cps_core_init 137 nop 172 nop 138 173 139 /* 174 /* 140 * Boot any other VPEs within this cor 175 * Boot any other VPEs within this core that should be online, and 141 * deactivate this VPE if it should be 176 * deactivate this VPE if it should be offline. 142 */ 177 */ 143 move a1, t9 178 move a1, t9 144 jal mips_cps_boot_vpes 179 jal mips_cps_boot_vpes 145 move a0, v0 180 move a0, v0 146 181 147 /* Off we go! */ 182 /* Off we go! */ 148 1: PTR_L t1, VPEBOOTCFG_PC(v1) 183 1: PTR_L t1, VPEBOOTCFG_PC(v1) 149 PTR_L gp, VPEBOOTCFG_GP(v1) 184 PTR_L gp, VPEBOOTCFG_GP(v1) 150 PTR_L sp, VPEBOOTCFG_SP(v1) 185 PTR_L sp, VPEBOOTCFG_SP(v1) 151 jr t1 186 jr t1 152 nop 187 nop 153 END(mips_cps_core_boot) !! 188 END(mips_cps_core_entry) 154 189 155 __INIT !! 190 .org 0x200 156 LEAF(excep_tlbfill) 191 LEAF(excep_tlbfill) 157 DUMP_EXCEP("TLB Fill") 192 DUMP_EXCEP("TLB Fill") 158 b . 193 b . 159 nop 194 nop 160 END(excep_tlbfill) 195 END(excep_tlbfill) 161 196 >> 197 .org 0x280 162 LEAF(excep_xtlbfill) 198 LEAF(excep_xtlbfill) 163 DUMP_EXCEP("XTLB Fill") 199 DUMP_EXCEP("XTLB Fill") 164 b . 200 b . 165 nop 201 nop 166 END(excep_xtlbfill) 202 END(excep_xtlbfill) 167 203 >> 204 .org 0x300 168 LEAF(excep_cache) 205 LEAF(excep_cache) 169 DUMP_EXCEP("Cache") 206 DUMP_EXCEP("Cache") 170 b . 207 b . 171 nop 208 nop 172 END(excep_cache) 209 END(excep_cache) 173 210 >> 211 .org 0x380 174 LEAF(excep_genex) 212 LEAF(excep_genex) 175 DUMP_EXCEP("General") 213 DUMP_EXCEP("General") 176 b . 214 b . 177 nop 215 nop 178 END(excep_genex) 216 END(excep_genex) 179 217 >> 218 .org 0x400 180 LEAF(excep_intex) 219 LEAF(excep_intex) 181 DUMP_EXCEP("Interrupt") 220 DUMP_EXCEP("Interrupt") 182 b . 221 b . 183 nop 222 nop 184 END(excep_intex) 223 END(excep_intex) 185 224 >> 225 .org 0x480 186 LEAF(excep_ejtag) 226 LEAF(excep_ejtag) 187 PTR_LA k0, ejtag_debug_handler 227 PTR_LA k0, ejtag_debug_handler 188 jr k0 228 jr k0 189 nop 229 nop 190 END(excep_ejtag) 230 END(excep_ejtag) 191 __FINIT << 192 231 193 LEAF(mips_cps_core_init) 232 LEAF(mips_cps_core_init) 194 #ifdef CONFIG_MIPS_MT_SMP 233 #ifdef CONFIG_MIPS_MT_SMP 195 /* Check that the core implements the 234 /* Check that the core implements the MT ASE */ 196 has_mt t0, 3f 235 has_mt t0, 3f 197 236 198 .set push 237 .set push 199 .set MIPS_ISA_LEVEL_RAW 238 .set MIPS_ISA_LEVEL_RAW 200 .set mt 239 .set mt 201 240 202 /* Only allow 1 TC per VPE to execute. 241 /* Only allow 1 TC per VPE to execute... */ 203 dmt 242 dmt 204 243 205 /* ...and for the moment only 1 VPE */ 244 /* ...and for the moment only 1 VPE */ 206 dvpe 245 dvpe 207 PTR_LA t1, 1f 246 PTR_LA t1, 1f 208 jr.hb t1 247 jr.hb t1 209 nop 248 nop 210 249 211 /* Enter VPE configuration state */ 250 /* Enter VPE configuration state */ 212 1: mfc0 t0, CP0_MVPCONTROL 251 1: mfc0 t0, CP0_MVPCONTROL 213 ori t0, t0, MVPCONTROL_VPC 252 ori t0, t0, MVPCONTROL_VPC 214 mtc0 t0, CP0_MVPCONTROL 253 mtc0 t0, CP0_MVPCONTROL 215 254 216 /* Retrieve the number of VPEs within 255 /* Retrieve the number of VPEs within the core */ 217 mfc0 t0, CP0_MVPCONF0 256 mfc0 t0, CP0_MVPCONF0 218 srl t0, t0, MVPCONF0_PVPE_SHIFT 257 srl t0, t0, MVPCONF0_PVPE_SHIFT 219 andi t0, t0, (MVPCONF0_PVPE >> MVPC 258 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 220 addiu ta3, t0, 1 259 addiu ta3, t0, 1 221 260 222 /* If there's only 1, we're done */ 261 /* If there's only 1, we're done */ 223 beqz t0, 2f 262 beqz t0, 2f 224 nop 263 nop 225 264 226 /* Loop through each VPE within this c 265 /* Loop through each VPE within this core */ 227 li ta1, 1 266 li ta1, 1 228 267 229 1: /* Operate on the appropriate TC */ 268 1: /* Operate on the appropriate TC */ 230 mtc0 ta1, CP0_VPECONTROL 269 mtc0 ta1, CP0_VPECONTROL 231 ehb 270 ehb 232 271 233 /* Bind TC to VPE (1:1 TC:VPE mapping) 272 /* Bind TC to VPE (1:1 TC:VPE mapping) */ 234 mttc0 ta1, CP0_TCBIND 273 mttc0 ta1, CP0_TCBIND 235 274 236 /* Set exclusive TC, non-active, maste 275 /* Set exclusive TC, non-active, master */ 237 li t0, VPECONF0_MVP 276 li t0, VPECONF0_MVP 238 sll t1, ta1, VPECONF0_XTC_SHIFT 277 sll t1, ta1, VPECONF0_XTC_SHIFT 239 or t0, t0, t1 278 or t0, t0, t1 240 mttc0 t0, CP0_VPECONF0 279 mttc0 t0, CP0_VPECONF0 241 280 242 /* Set TC non-active, non-allocatable 281 /* Set TC non-active, non-allocatable */ 243 mttc0 zero, CP0_TCSTATUS 282 mttc0 zero, CP0_TCSTATUS 244 283 245 /* Set TC halted */ 284 /* Set TC halted */ 246 li t0, TCHALT_H 285 li t0, TCHALT_H 247 mttc0 t0, CP0_TCHALT 286 mttc0 t0, CP0_TCHALT 248 287 249 /* Next VPE */ 288 /* Next VPE */ 250 addiu ta1, ta1, 1 289 addiu ta1, ta1, 1 251 slt t0, ta1, ta3 290 slt t0, ta1, ta3 252 bnez t0, 1b 291 bnez t0, 1b 253 nop 292 nop 254 293 255 /* Leave VPE configuration state */ 294 /* Leave VPE configuration state */ 256 2: mfc0 t0, CP0_MVPCONTROL 295 2: mfc0 t0, CP0_MVPCONTROL 257 xori t0, t0, MVPCONTROL_VPC 296 xori t0, t0, MVPCONTROL_VPC 258 mtc0 t0, CP0_MVPCONTROL 297 mtc0 t0, CP0_MVPCONTROL 259 298 260 3: .set pop 299 3: .set pop 261 #endif 300 #endif 262 jr ra 301 jr ra 263 nop 302 nop 264 END(mips_cps_core_init) 303 END(mips_cps_core_init) 265 304 266 /** 305 /** 267 * mips_cps_get_bootcfg() - retrieve boot conf 306 * mips_cps_get_bootcfg() - retrieve boot configuration pointers 268 * 307 * 269 * Returns: pointer to struct core_boot_config 308 * Returns: pointer to struct core_boot_config in v0, pointer to 270 * struct vpe_boot_config in v1, VPE 309 * struct vpe_boot_config in v1, VPE ID in t9 271 */ 310 */ 272 LEAF(mips_cps_get_bootcfg) 311 LEAF(mips_cps_get_bootcfg) 273 /* Calculate a pointer to this cores s 312 /* Calculate a pointer to this cores struct core_boot_config */ 274 lw t0, GCR_CL_ID_OFS(s1) !! 313 cmgcrb t0 >> 314 lw t0, GCR_CL_ID_OFS(t0) 275 li t1, COREBOOTCFG_SIZE 315 li t1, COREBOOTCFG_SIZE 276 mul t0, t0, t1 316 mul t0, t0, t1 277 PTR_LA t1, mips_cps_core_bootcfg 317 PTR_LA t1, mips_cps_core_bootcfg 278 PTR_L t1, 0(t1) 318 PTR_L t1, 0(t1) 279 PTR_ADDU v0, t0, t1 319 PTR_ADDU v0, t0, t1 280 320 281 /* Calculate this VPEs ID. If the core 321 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ 282 li t9, 0 322 li t9, 0 283 #if defined(CONFIG_CPU_MIPSR6) 323 #if defined(CONFIG_CPU_MIPSR6) 284 has_vp ta2, 1f 324 has_vp ta2, 1f 285 325 286 /* 326 /* 287 * Assume non-contiguous numbering. Pe 327 * Assume non-contiguous numbering. Perhaps some day we'll need 288 * to handle contiguous VP numbering, 328 * to handle contiguous VP numbering, but no such systems yet 289 * exist. 329 * exist. 290 */ 330 */ 291 mfc0 t9, CP0_GLOBALNUMBER 331 mfc0 t9, CP0_GLOBALNUMBER 292 andi t9, t9, MIPS_GLOBALNUMBER_VP 332 andi t9, t9, MIPS_GLOBALNUMBER_VP 293 #elif defined(CONFIG_MIPS_MT_SMP) 333 #elif defined(CONFIG_MIPS_MT_SMP) 294 has_mt ta2, 1f 334 has_mt ta2, 1f 295 335 296 /* Find the number of VPEs present in 336 /* Find the number of VPEs present in the core */ 297 mfc0 t1, CP0_MVPCONF0 337 mfc0 t1, CP0_MVPCONF0 298 srl t1, t1, MVPCONF0_PVPE_SHIFT 338 srl t1, t1, MVPCONF0_PVPE_SHIFT 299 andi t1, t1, MVPCONF0_PVPE >> MVPCO 339 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 300 addiu t1, t1, 1 340 addiu t1, t1, 1 301 341 302 /* Calculate a mask for the VPE ID fro 342 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 303 clz t1, t1 343 clz t1, t1 304 li t2, 31 344 li t2, 31 305 subu t1, t2, t1 345 subu t1, t2, t1 306 li t2, 1 346 li t2, 1 307 sll t1, t2, t1 347 sll t1, t2, t1 308 addiu t1, t1, -1 348 addiu t1, t1, -1 309 349 310 /* Retrieve the VPE ID from EBase.CPUN 350 /* Retrieve the VPE ID from EBase.CPUNum */ 311 mfc0 t9, $15, 1 351 mfc0 t9, $15, 1 312 and t9, t9, t1 352 and t9, t9, t1 313 #endif 353 #endif 314 354 315 1: /* Calculate a pointer to this VPEs st 355 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ 316 li t1, VPEBOOTCFG_SIZE 356 li t1, VPEBOOTCFG_SIZE 317 mul v1, t9, t1 357 mul v1, t9, t1 318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 358 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) 319 PTR_ADDU v1, v1, ta3 359 PTR_ADDU v1, v1, ta3 320 360 321 jr ra 361 jr ra 322 nop 362 nop 323 END(mips_cps_get_bootcfg) 363 END(mips_cps_get_bootcfg) 324 364 325 LEAF(mips_cps_boot_vpes) 365 LEAF(mips_cps_boot_vpes) 326 lw ta2, COREBOOTCFG_VPEMASK(a0) 366 lw ta2, COREBOOTCFG_VPEMASK(a0) 327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 367 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) 328 368 329 #if defined(CONFIG_CPU_MIPSR6) 369 #if defined(CONFIG_CPU_MIPSR6) 330 370 331 has_vp t0, 5f 371 has_vp t0, 5f 332 372 333 /* Find base address of CPC */ 373 /* Find base address of CPC */ 334 PTR_LA t1, mips_gcr_base !! 374 cmgcrb t3 335 PTR_L t1, 0(t1) !! 375 PTR_L t1, GCR_CPC_BASE_OFS(t3) 336 PTR_L t1, GCR_CPC_BASE_OFS(t1) << 337 PTR_LI t2, ~0x7fff 376 PTR_LI t2, ~0x7fff 338 and t1, t1, t2 377 and t1, t1, t2 339 PTR_LI t2, UNCAC_BASE 378 PTR_LI t2, UNCAC_BASE 340 PTR_ADD t1, t1, t2 379 PTR_ADD t1, t1, t2 341 380 342 /* Start any other VPs that ought to b 381 /* Start any other VPs that ought to be running */ 343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 382 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) 344 383 345 /* Ensure this VP stops running if it 384 /* Ensure this VP stops running if it shouldn't be */ 346 not ta2 385 not ta2 347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 386 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) 348 ehb 387 ehb 349 388 350 #elif defined(CONFIG_MIPS_MT) 389 #elif defined(CONFIG_MIPS_MT) 351 390 352 /* If the core doesn't support MT then 391 /* If the core doesn't support MT then return */ 353 has_mt t0, 5f 392 has_mt t0, 5f 354 393 355 /* Enter VPE configuration state */ 394 /* Enter VPE configuration state */ 356 .set push 395 .set push 357 .set MIPS_ISA_LEVEL_RAW 396 .set MIPS_ISA_LEVEL_RAW 358 .set mt 397 .set mt 359 dvpe 398 dvpe 360 .set pop 399 .set pop 361 400 362 PTR_LA t1, 1f 401 PTR_LA t1, 1f 363 jr.hb t1 402 jr.hb t1 364 nop 403 nop 365 1: mfc0 t1, CP0_MVPCONTROL 404 1: mfc0 t1, CP0_MVPCONTROL 366 ori t1, t1, MVPCONTROL_VPC 405 ori t1, t1, MVPCONTROL_VPC 367 mtc0 t1, CP0_MVPCONTROL 406 mtc0 t1, CP0_MVPCONTROL 368 ehb 407 ehb 369 408 370 /* Loop through each VPE */ 409 /* Loop through each VPE */ 371 move t8, ta2 410 move t8, ta2 372 li ta1, 0 411 li ta1, 0 373 412 374 /* Check whether the VPE should be run 413 /* Check whether the VPE should be running. If not, skip it */ 375 1: andi t0, ta2, 1 414 1: andi t0, ta2, 1 376 beqz t0, 2f 415 beqz t0, 2f 377 nop 416 nop 378 417 379 /* Operate on the appropriate TC */ 418 /* Operate on the appropriate TC */ 380 mfc0 t0, CP0_VPECONTROL 419 mfc0 t0, CP0_VPECONTROL 381 ori t0, t0, VPECONTROL_TARGTC 420 ori t0, t0, VPECONTROL_TARGTC 382 xori t0, t0, VPECONTROL_TARGTC 421 xori t0, t0, VPECONTROL_TARGTC 383 or t0, t0, ta1 422 or t0, t0, ta1 384 mtc0 t0, CP0_VPECONTROL 423 mtc0 t0, CP0_VPECONTROL 385 ehb 424 ehb 386 425 387 .set push 426 .set push 388 .set MIPS_ISA_LEVEL_RAW 427 .set MIPS_ISA_LEVEL_RAW 389 .set mt 428 .set mt 390 429 391 /* Skip the VPE if its TC is not halte 430 /* Skip the VPE if its TC is not halted */ 392 mftc0 t0, CP0_TCHALT 431 mftc0 t0, CP0_TCHALT 393 beqz t0, 2f 432 beqz t0, 2f 394 nop 433 nop 395 434 396 /* Calculate a pointer to the VPEs str 435 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 li t0, VPEBOOTCFG_SIZE 436 li t0, VPEBOOTCFG_SIZE 398 mul t0, t0, ta1 437 mul t0, t0, ta1 399 PTR_ADDU t0, t0, ta3 !! 438 addu t0, t0, ta3 400 439 401 /* Set the TC restart PC */ 440 /* Set the TC restart PC */ 402 lw t1, VPEBOOTCFG_PC(t0) 441 lw t1, VPEBOOTCFG_PC(t0) 403 mttc0 t1, CP0_TCRESTART 442 mttc0 t1, CP0_TCRESTART 404 443 405 /* Set the TC stack pointer */ 444 /* Set the TC stack pointer */ 406 lw t1, VPEBOOTCFG_SP(t0) 445 lw t1, VPEBOOTCFG_SP(t0) 407 mttgpr t1, sp 446 mttgpr t1, sp 408 447 409 /* Set the TC global pointer */ 448 /* Set the TC global pointer */ 410 lw t1, VPEBOOTCFG_GP(t0) 449 lw t1, VPEBOOTCFG_GP(t0) 411 mttgpr t1, gp 450 mttgpr t1, gp 412 451 413 /* Copy config from this VPE */ 452 /* Copy config from this VPE */ 414 mfc0 t0, CP0_CONFIG 453 mfc0 t0, CP0_CONFIG 415 mttc0 t0, CP0_CONFIG 454 mttc0 t0, CP0_CONFIG 416 455 417 /* 456 /* 418 * Copy the EVA config from this VPE i 457 * Copy the EVA config from this VPE if the CPU supports it. 419 * CONFIG3 must exist to be running MT 458 * CONFIG3 must exist to be running MT startup - just read it. 420 */ 459 */ 421 mfc0 t0, CP0_CONFIG, 3 460 mfc0 t0, CP0_CONFIG, 3 422 and t0, t0, MIPS_CONF3_SC 461 and t0, t0, MIPS_CONF3_SC 423 beqz t0, 3f 462 beqz t0, 3f 424 nop 463 nop 425 mfc0 t0, CP0_SEGCTL0 464 mfc0 t0, CP0_SEGCTL0 426 mttc0 t0, CP0_SEGCTL0 465 mttc0 t0, CP0_SEGCTL0 427 mfc0 t0, CP0_SEGCTL1 466 mfc0 t0, CP0_SEGCTL1 428 mttc0 t0, CP0_SEGCTL1 467 mttc0 t0, CP0_SEGCTL1 429 mfc0 t0, CP0_SEGCTL2 468 mfc0 t0, CP0_SEGCTL2 430 mttc0 t0, CP0_SEGCTL2 469 mttc0 t0, CP0_SEGCTL2 431 3: 470 3: 432 /* Ensure no software interrupts are p 471 /* Ensure no software interrupts are pending */ 433 mttc0 zero, CP0_CAUSE 472 mttc0 zero, CP0_CAUSE 434 mttc0 zero, CP0_STATUS 473 mttc0 zero, CP0_STATUS 435 474 436 /* Set TC active, not interrupt exempt 475 /* Set TC active, not interrupt exempt */ 437 mftc0 t0, CP0_TCSTATUS 476 mftc0 t0, CP0_TCSTATUS 438 li t1, ~TCSTATUS_IXMT 477 li t1, ~TCSTATUS_IXMT 439 and t0, t0, t1 478 and t0, t0, t1 440 ori t0, t0, TCSTATUS_A 479 ori t0, t0, TCSTATUS_A 441 mttc0 t0, CP0_TCSTATUS 480 mttc0 t0, CP0_TCSTATUS 442 481 443 /* Clear the TC halt bit */ 482 /* Clear the TC halt bit */ 444 mttc0 zero, CP0_TCHALT 483 mttc0 zero, CP0_TCHALT 445 484 446 /* Set VPE active */ 485 /* Set VPE active */ 447 mftc0 t0, CP0_VPECONF0 486 mftc0 t0, CP0_VPECONF0 448 ori t0, t0, VPECONF0_VPA 487 ori t0, t0, VPECONF0_VPA 449 mttc0 t0, CP0_VPECONF0 488 mttc0 t0, CP0_VPECONF0 450 489 451 /* Next VPE */ 490 /* Next VPE */ 452 2: srl ta2, ta2, 1 491 2: srl ta2, ta2, 1 453 addiu ta1, ta1, 1 492 addiu ta1, ta1, 1 454 bnez ta2, 1b 493 bnez ta2, 1b 455 nop 494 nop 456 495 457 /* Leave VPE configuration state */ 496 /* Leave VPE configuration state */ 458 mfc0 t1, CP0_MVPCONTROL 497 mfc0 t1, CP0_MVPCONTROL 459 xori t1, t1, MVPCONTROL_VPC 498 xori t1, t1, MVPCONTROL_VPC 460 mtc0 t1, CP0_MVPCONTROL 499 mtc0 t1, CP0_MVPCONTROL 461 ehb 500 ehb 462 evpe 501 evpe 463 502 464 .set pop 503 .set pop 465 504 466 /* Check whether this VPE is meant to 505 /* Check whether this VPE is meant to be running */ 467 li t0, 1 506 li t0, 1 468 sll t0, t0, a1 507 sll t0, t0, a1 469 and t0, t0, t8 508 and t0, t0, t8 470 bnez t0, 2f 509 bnez t0, 2f 471 nop 510 nop 472 511 473 /* This VPE should be offline, halt th 512 /* This VPE should be offline, halt the TC */ 474 li t0, TCHALT_H 513 li t0, TCHALT_H 475 mtc0 t0, CP0_TCHALT 514 mtc0 t0, CP0_TCHALT 476 PTR_LA t0, 1f 515 PTR_LA t0, 1f 477 1: jr.hb t0 516 1: jr.hb t0 478 nop 517 nop 479 518 480 2: 519 2: 481 520 482 #endif /* CONFIG_MIPS_MT_SMP */ 521 #endif /* CONFIG_MIPS_MT_SMP */ 483 522 484 /* Return */ 523 /* Return */ 485 5: jr ra 524 5: jr ra 486 nop 525 nop 487 END(mips_cps_boot_vpes) 526 END(mips_cps_boot_vpes) 488 527 489 #if MIPS_ISA_REV > 0 << 490 LEAF(mips_cps_cache_init) 528 LEAF(mips_cps_cache_init) 491 /* 529 /* 492 * Clear the bits used to index the ca 530 * Clear the bits used to index the caches. Note that the architecture 493 * dictates that writing to any of Tag 531 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should 494 * be valid for all MIPS32 CPUs, even 532 * be valid for all MIPS32 CPUs, even those for which said writes are 495 * unnecessary. 533 * unnecessary. 496 */ 534 */ 497 mtc0 zero, CP0_TAGLO, 0 535 mtc0 zero, CP0_TAGLO, 0 498 mtc0 zero, CP0_TAGHI, 0 536 mtc0 zero, CP0_TAGHI, 0 499 mtc0 zero, CP0_TAGLO, 2 537 mtc0 zero, CP0_TAGLO, 2 500 mtc0 zero, CP0_TAGHI, 2 538 mtc0 zero, CP0_TAGHI, 2 501 ehb 539 ehb 502 540 503 /* Primary cache configuration is indi 541 /* Primary cache configuration is indicated by Config1 */ 504 mfc0 v0, CP0_CONFIG, 1 542 mfc0 v0, CP0_CONFIG, 1 505 543 506 /* Detect I-cache line size */ 544 /* Detect I-cache line size */ 507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIP 545 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ 508 beqz t0, icache_done 546 beqz t0, icache_done 509 li t1, 2 547 li t1, 2 510 sllv t0, t1, t0 548 sllv t0, t1, t0 511 549 512 /* Detect I-cache size */ 550 /* Detect I-cache size */ 513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIP 551 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ 514 xori t2, t1, 0x7 552 xori t2, t1, 0x7 515 beqz t2, 1f 553 beqz t2, 1f 516 li t3, 32 554 li t3, 32 517 addiu t1, t1, 1 555 addiu t1, t1, 1 518 sllv t1, t3, t1 556 sllv t1, t3, t1 519 1: /* At this point t1 == I-cache sets pe 557 1: /* At this point t1 == I-cache sets per way */ 520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIP 558 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 521 addiu t2, t2, 1 559 addiu t2, t2, 1 522 mul t1, t1, t0 560 mul t1, t1, t0 523 mul t1, t1, t2 561 mul t1, t1, t2 524 562 525 li a0, CKSEG0 563 li a0, CKSEG0 526 PTR_ADD a1, a0, t1 564 PTR_ADD a1, a0, t1 527 1: cache Index_Store_Tag_I, 0(a0) 565 1: cache Index_Store_Tag_I, 0(a0) 528 PTR_ADD a0, a0, t0 566 PTR_ADD a0, a0, t0 529 bne a0, a1, 1b 567 bne a0, a1, 1b 530 nop 568 nop 531 icache_done: 569 icache_done: 532 570 533 /* Detect D-cache line size */ 571 /* Detect D-cache line size */ 534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIP 572 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ 535 beqz t0, dcache_done 573 beqz t0, dcache_done 536 li t1, 2 574 li t1, 2 537 sllv t0, t1, t0 575 sllv t0, t1, t0 538 576 539 /* Detect D-cache size */ 577 /* Detect D-cache size */ 540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIP 578 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ 541 xori t2, t1, 0x7 579 xori t2, t1, 0x7 542 beqz t2, 1f 580 beqz t2, 1f 543 li t3, 32 581 li t3, 32 544 addiu t1, t1, 1 582 addiu t1, t1, 1 545 sllv t1, t3, t1 583 sllv t1, t3, t1 546 1: /* At this point t1 == D-cache sets pe 584 1: /* At this point t1 == D-cache sets per way */ 547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIP 585 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 548 addiu t2, t2, 1 586 addiu t2, t2, 1 549 mul t1, t1, t0 587 mul t1, t1, t0 550 mul t1, t1, t2 588 mul t1, t1, t2 551 589 552 li a0, CKSEG0 590 li a0, CKSEG0 553 PTR_ADDU a1, a0, t1 591 PTR_ADDU a1, a0, t1 554 PTR_SUBU a1, a1, t0 592 PTR_SUBU a1, a1, t0 555 1: cache Index_Store_Tag_D, 0(a0) 593 1: cache Index_Store_Tag_D, 0(a0) 556 bne a0, a1, 1b 594 bne a0, a1, 1b 557 PTR_ADD a0, a0, t0 595 PTR_ADD a0, a0, t0 558 dcache_done: 596 dcache_done: 559 597 560 jr ra 598 jr ra 561 nop 599 nop 562 END(mips_cps_cache_init) 600 END(mips_cps_cache_init) 563 #endif /* MIPS_ISA_REV > 0 */ << 564 601 565 #if defined(CONFIG_MIPS_CPS_PM) && defined(CON 602 #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) 566 603 567 /* Calculate a pointer to this CPUs st 604 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ 568 .macro psstate dest 605 .macro psstate dest 569 .set push 606 .set push 570 .set noat 607 .set noat 571 lw $1, TI_CPU(gp) 608 lw $1, TI_CPU(gp) 572 sll $1, $1, LONGLOG 609 sll $1, $1, LONGLOG 573 PTR_LA \dest, __per_cpu_offset 610 PTR_LA \dest, __per_cpu_offset 574 PTR_ADDU $1, $1, \dest !! 611 addu $1, $1, \dest 575 lw $1, 0($1) 612 lw $1, 0($1) 576 PTR_LA \dest, cps_cpu_state 613 PTR_LA \dest, cps_cpu_state 577 PTR_ADDU \dest, \dest, $1 !! 614 addu \dest, \dest, $1 578 .set pop 615 .set pop 579 .endm 616 .endm 580 617 581 LEAF(mips_cps_pm_save) 618 LEAF(mips_cps_pm_save) 582 /* Save CPU state */ 619 /* Save CPU state */ 583 SUSPEND_SAVE_REGS 620 SUSPEND_SAVE_REGS 584 psstate t1 621 psstate t1 585 SUSPEND_SAVE_STATIC 622 SUSPEND_SAVE_STATIC 586 jr v0 623 jr v0 587 nop 624 nop 588 END(mips_cps_pm_save) 625 END(mips_cps_pm_save) 589 626 590 LEAF(mips_cps_pm_restore) 627 LEAF(mips_cps_pm_restore) 591 /* Restore CPU state */ 628 /* Restore CPU state */ 592 psstate t1 629 psstate t1 593 RESUME_RESTORE_STATIC 630 RESUME_RESTORE_STATIC 594 RESUME_RESTORE_REGS_RETURN 631 RESUME_RESTORE_REGS_RETURN 595 END(mips_cps_pm_restore) 632 END(mips_cps_pm_restore) 596 633 597 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM 634 #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.