1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/arch/arm/mm/cache-v7.S 4 * 5 * Copyright (C) 2001 Deep Blue Solutions Ltd. 6 * Copyright (C) 2005 ARM Ltd. 7 * 8 * This is the "shell" of the ARMv7 processor support. 9 */ 10 #include <linux/linkage.h> 11 #include <linux/init.h> 12 #include <linux/cfi_types.h> 13 #include <asm/assembler.h> 14 #include <asm/errno.h> 15 #include <asm/unwind.h> 16 #include <asm/hardware/cache-b15-rac.h> 17 18 #include "proc-macros.S" 19 20 .arch armv7-a 21 22 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND 23 .globl icache_size 24 .data 25 .align 2 26 icache_size: 27 .long 64 28 .text 29 #endif 30 /* 31 * The secondary kernel init calls v7_flush_dcache_all before it enables 32 * the L1; however, the L1 comes out of reset in an undefined state, so 33 * the clean + invalidate performed by v7_flush_dcache_all causes a bunch 34 * of cache lines with uninitialized data and uninitialized tags to get 35 * written out to memory, which does really unpleasant things to the main 36 * processor. We fix this by performing an invalidate, rather than a 37 * clean + invalidate, before jumping into the kernel. 38 * 39 * This function needs to be called for both secondary cores startup and 40 * primary core resume procedures. 41 */ 42 ENTRY(v7_invalidate_l1) 43 mov r0, #0 44 mcr p15, 2, r0, c0, c0, 0 @ select L1 data cache in CSSELR 45 isb 46 mrc p15, 1, r0, c0, c0, 0 @ read cache geometry from CCSIDR 47 48 movw r3, #0x3ff 49 and r3, r3, r0, lsr #3 @ 'Associativity' in CCSIDR[12:3] 50 clz r1, r3 @ WayShift 51 mov r2, #1 52 mov r3, r3, lsl r1 @ NumWays-1 shifted into bits [31:...] 53 movs r1, r2, lsl r1 @ #1 shifted left by same amount 54 moveq r1, #1 @ r1 needs value > 0 even if only 1 way 55 56 and r2, r0, #0x7 57 add r2, r2, #4 @ SetShift 58 59 1: movw ip, #0x7fff 60 and r0, ip, r0, lsr #13 @ 'NumSets' in CCSIDR[27:13] 61 62 2: mov ip, r0, lsl r2 @ NumSet << SetShift 63 orr ip, ip, r3 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) 64 mcr p15, 0, ip, c7, c6, 2 65 subs r0, r0, #1 @ Set-- 66 bpl 2b 67 subs r3, r3, r1 @ Way-- 68 bcc 3f 69 mrc p15, 1, r0, c0, c0, 0 @ re-read cache geometry from CCSIDR 70 b 1b 71 3: dsb st 72 isb 73 ret lr 74 ENDPROC(v7_invalidate_l1) 75 76 /* 77 * v7_flush_icache_all() 78 * 79 * Flush the whole I-cache. 80 * 81 * Registers: 82 * r0 - set to 0 83 */ 84 SYM_TYPED_FUNC_START(v7_flush_icache_all) 85 mov r0, #0 86 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 87 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 88 ret lr 89 SYM_FUNC_END(v7_flush_icache_all) 90 91 /* 92 * v7_flush_dcache_louis() 93 * 94 * Flush the D-cache up to the Level of Unification Inner Shareable 95 * 96 * Corrupted registers: r0-r6, r9-r10 97 */ 98 99 ENTRY(v7_flush_dcache_louis) 100 dmb @ ensure ordering with previous memory accesses 101 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr 102 ALT_SMP(mov r3, r0, lsr #20) @ move LoUIS into position 103 ALT_UP( mov r3, r0, lsr #26) @ move LoUU into position 104 ands r3, r3, #7 << 1 @ extract LoU*2 field from clidr 105 bne start_flush_levels @ LoU != 0, start flushing 106 #ifdef CONFIG_ARM_ERRATA_643719 107 ALT_SMP(mrc p15, 0, r2, c0, c0, 0) @ read main ID register 108 ALT_UP( ret lr) @ LoUU is zero, so nothing to do 109 movw r1, #:lower16:(0x410fc090 >> 4) @ ID of ARM Cortex A9 r0p? 110 movt r1, #:upper16:(0x410fc090 >> 4) 111 teq r1, r2, lsr #4 @ test for errata affected core and if so... 112 moveq r3, #1 << 1 @ fix LoUIS value 113 beq start_flush_levels @ start flushing cache levels 114 #endif 115 ret lr 116 ENDPROC(v7_flush_dcache_louis) 117 118 /* 119 * v7_flush_dcache_all() 120 * 121 * Flush the whole D-cache. 122 * 123 * Corrupted registers: r0-r6, r9-r10 124 * 125 * - mm - mm_struct describing address space 126 */ 127 ENTRY(v7_flush_dcache_all) 128 dmb @ ensure ordering with previous memory accesses 129 mrc p15, 1, r0, c0, c0, 1 @ read clidr 130 mov r3, r0, lsr #23 @ move LoC into position 131 ands r3, r3, #7 << 1 @ extract LoC*2 from clidr 132 beq finished @ if loc is 0, then no need to clean 133 start_flush_levels: 134 mov r10, #0 @ start clean at cache level 0 135 flush_levels: 136 add r2, r10, r10, lsr #1 @ work out 3x current cache level 137 mov r1, r0, lsr r2 @ extract cache type bits from clidr 138 and r1, r1, #7 @ mask of the bits for current cache only 139 cmp r1, #2 @ see what cache we have at this level 140 blt skip @ skip if no cache, or just i-cache 141 #ifdef CONFIG_PREEMPTION 142 save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic 143 #endif 144 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 145 isb @ isb to sych the new cssr&csidr 146 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 147 #ifdef CONFIG_PREEMPTION 148 restore_irqs_notrace r9 149 #endif 150 and r2, r1, #7 @ extract the length of the cache lines 151 add r2, r2, #4 @ add 4 (line length offset) 152 movw r4, #0x3ff 153 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 154 clz r5, r4 @ find bit position of way size increment 155 movw r6, #0x7fff 156 and r1, r6, r1, lsr #13 @ extract max number of the index size 157 mov r6, #1 158 movne r4, r4, lsl r5 @ # of ways shifted into bits [31:...] 159 movne r6, r6, lsl r5 @ 1 shifted left by same amount 160 loop1: 161 mov r9, r1 @ create working copy of max index 162 loop2: 163 mov r5, r9, lsl r2 @ factor set number into r5 164 orr r5, r5, r4 @ factor way number into r5 165 orr r5, r5, r10 @ factor cache level into r5 166 mcr p15, 0, r5, c7, c14, 2 @ clean & invalidate by set/way 167 subs r9, r9, #1 @ decrement the index 168 bge loop2 169 subs r4, r4, r6 @ decrement the way 170 bcs loop1 171 skip: 172 add r10, r10, #2 @ increment cache number 173 cmp r3, r10 174 #ifdef CONFIG_ARM_ERRATA_814220 175 dsb 176 #endif 177 bgt flush_levels 178 finished: 179 mov r10, #0 @ switch back to cache level 0 180 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 181 dsb st 182 isb 183 ret lr 184 ENDPROC(v7_flush_dcache_all) 185 186 /* 187 * v7_flush_cache_all() 188 * 189 * Flush the entire cache system. 190 * The data cache flush is now achieved using atomic clean / invalidates 191 * working outwards from L1 cache. This is done using Set/Way based cache 192 * maintenance instructions. 193 * The instruction cache can still be invalidated back to the point of 194 * unification in a single instruction. 195 * 196 */ 197 SYM_TYPED_FUNC_START(v7_flush_kern_cache_all) 198 stmfd sp!, {r4-r6, r9-r10, lr} 199 bl v7_flush_dcache_all 200 mov r0, #0 201 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 202 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 203 ldmfd sp!, {r4-r6, r9-r10, lr} 204 ret lr 205 SYM_FUNC_END(v7_flush_kern_cache_all) 206 207 /* 208 * v7_flush_kern_cache_louis(void) 209 * 210 * Flush the data cache up to Level of Unification Inner Shareable. 211 * Invalidate the I-cache to the point of unification. 212 */ 213 SYM_TYPED_FUNC_START(v7_flush_kern_cache_louis) 214 stmfd sp!, {r4-r6, r9-r10, lr} 215 bl v7_flush_dcache_louis 216 mov r0, #0 217 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 218 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 219 ldmfd sp!, {r4-r6, r9-r10, lr} 220 ret lr 221 SYM_FUNC_END(v7_flush_kern_cache_louis) 222 223 /* 224 * v7_flush_cache_all() 225 * 226 * Flush all TLB entries in a particular address space 227 * 228 * - mm - mm_struct describing address space 229 */ 230 SYM_TYPED_FUNC_START(v7_flush_user_cache_all) 231 ret lr 232 SYM_FUNC_END(v7_flush_user_cache_all) 233 234 /* 235 * v7_flush_cache_range(start, end, flags) 236 * 237 * Flush a range of TLB entries in the specified address space. 238 * 239 * - start - start address (may not be aligned) 240 * - end - end address (exclusive, may not be aligned) 241 * - flags - vm_area_struct flags describing address space 242 * 243 * It is assumed that: 244 * - we have a VIPT cache. 245 */ 246 SYM_TYPED_FUNC_START(v7_flush_user_cache_range) 247 ret lr 248 SYM_FUNC_END(v7_flush_user_cache_range) 249 250 /* 251 * v7_coherent_kern_range(start,end) 252 * 253 * Ensure that the I and D caches are coherent within specified 254 * region. This is typically used when code has been written to 255 * a memory region, and will be executed. 256 * 257 * - start - virtual start address of region 258 * - end - virtual end address of region 259 * 260 * It is assumed that: 261 * - the Icache does not read data from the write buffer 262 */ 263 SYM_TYPED_FUNC_START(v7_coherent_kern_range) 264 #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 265 b v7_coherent_user_range 266 #endif 267 SYM_FUNC_END(v7_coherent_kern_range) 268 269 /* 270 * v7_coherent_user_range(start,end) 271 * 272 * Ensure that the I and D caches are coherent within specified 273 * region. This is typically used when code has been written to 274 * a memory region, and will be executed. 275 * 276 * - start - virtual start address of region 277 * - end - virtual end address of region 278 * 279 * It is assumed that: 280 * - the Icache does not read data from the write buffer 281 */ 282 SYM_TYPED_FUNC_START(v7_coherent_user_range) 283 UNWIND(.fnstart ) 284 dcache_line_size r2, r3 285 sub r3, r2, #1 286 bic r12, r0, r3 287 #ifdef CONFIG_ARM_ERRATA_764369 288 ALT_SMP(W(dsb)) 289 ALT_UP(W(nop)) 290 #endif 291 1: 292 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification 293 add r12, r12, r2 294 cmp r12, r1 295 blo 1b 296 dsb ishst 297 #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND 298 ldr r3, =icache_size 299 ldr r2, [r3, #0] 300 #else 301 icache_line_size r2, r3 302 #endif 303 sub r3, r2, #1 304 bic r12, r0, r3 305 2: 306 USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line 307 add r12, r12, r2 308 cmp r12, r1 309 blo 2b 310 mov r0, #0 311 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable 312 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 313 dsb ishst 314 isb 315 ret lr 316 317 /* 318 * Fault handling for the cache operation above. If the virtual address in r0 319 * isn't mapped, fail with -EFAULT. 320 */ 321 9001: 322 #ifdef CONFIG_ARM_ERRATA_775420 323 dsb 324 #endif 325 mov r0, #-EFAULT 326 ret lr 327 UNWIND(.fnend ) 328 SYM_FUNC_END(v7_coherent_user_range) 329 330 /* 331 * v7_flush_kern_dcache_area(void *addr, size_t size) 332 * 333 * Ensure that the data held in the page kaddr is written back 334 * to the page in question. 335 * 336 * - addr - kernel address 337 * - size - region size 338 */ 339 SYM_TYPED_FUNC_START(v7_flush_kern_dcache_area) 340 dcache_line_size r2, r3 341 add r1, r0, r1 342 sub r3, r2, #1 343 bic r0, r0, r3 344 #ifdef CONFIG_ARM_ERRATA_764369 345 ALT_SMP(W(dsb)) 346 ALT_UP(W(nop)) 347 #endif 348 1: 349 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 350 add r0, r0, r2 351 cmp r0, r1 352 blo 1b 353 dsb st 354 ret lr 355 SYM_FUNC_END(v7_flush_kern_dcache_area) 356 357 /* 358 * v7_dma_inv_range(start,end) 359 * 360 * Invalidate the data cache within the specified region; we will 361 * be performing a DMA operation in this region and we want to 362 * purge old data in the cache. 363 * 364 * - start - virtual start address of region 365 * - end - virtual end address of region 366 */ 367 v7_dma_inv_range: 368 dcache_line_size r2, r3 369 sub r3, r2, #1 370 tst r0, r3 371 bic r0, r0, r3 372 #ifdef CONFIG_ARM_ERRATA_764369 373 ALT_SMP(W(dsb)) 374 ALT_UP(W(nop)) 375 #endif 376 mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 377 addne r0, r0, r2 378 379 tst r1, r3 380 bic r1, r1, r3 381 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line 382 cmp r0, r1 383 1: 384 mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line 385 addlo r0, r0, r2 386 cmplo r0, r1 387 blo 1b 388 dsb st 389 ret lr 390 ENDPROC(v7_dma_inv_range) 391 392 /* 393 * v7_dma_clean_range(start,end) 394 * - start - virtual start address of region 395 * - end - virtual end address of region 396 */ 397 v7_dma_clean_range: 398 dcache_line_size r2, r3 399 sub r3, r2, #1 400 bic r0, r0, r3 401 #ifdef CONFIG_ARM_ERRATA_764369 402 ALT_SMP(W(dsb)) 403 ALT_UP(W(nop)) 404 #endif 405 1: 406 mcr p15, 0, r0, c7, c10, 1 @ clean D / U line 407 add r0, r0, r2 408 cmp r0, r1 409 blo 1b 410 dsb st 411 ret lr 412 ENDPROC(v7_dma_clean_range) 413 414 /* 415 * v7_dma_flush_range(start,end) 416 * - start - virtual start address of region 417 * - end - virtual end address of region 418 */ 419 SYM_TYPED_FUNC_START(v7_dma_flush_range) 420 dcache_line_size r2, r3 421 sub r3, r2, #1 422 bic r0, r0, r3 423 #ifdef CONFIG_ARM_ERRATA_764369 424 ALT_SMP(W(dsb)) 425 ALT_UP(W(nop)) 426 #endif 427 1: 428 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 429 add r0, r0, r2 430 cmp r0, r1 431 blo 1b 432 dsb st 433 ret lr 434 SYM_FUNC_END(v7_dma_flush_range) 435 436 /* 437 * dma_map_area(start, size, dir) 438 * - start - kernel virtual start address 439 * - size - size of region 440 * - dir - DMA direction 441 */ 442 SYM_TYPED_FUNC_START(v7_dma_map_area) 443 add r1, r1, r0 444 teq r2, #DMA_FROM_DEVICE 445 beq v7_dma_inv_range 446 b v7_dma_clean_range 447 SYM_FUNC_END(v7_dma_map_area) 448 449 /* 450 * dma_unmap_area(start, size, dir) 451 * - start - kernel virtual start address 452 * - size - size of region 453 * - dir - DMA direction 454 */ 455 SYM_TYPED_FUNC_START(v7_dma_unmap_area) 456 add r1, r1, r0 457 teq r2, #DMA_TO_DEVICE 458 bne v7_dma_inv_range 459 ret lr 460 SYM_FUNC_END(v7_dma_unmap_area)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.