1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/arch/arm/mm/cache-v7m.S 4 * 5 * Based on linux/arch/arm/mm/cache-v7.S 6 * 7 * Copyright (C) 2001 Deep Blue Solutions Ltd. 8 * Copyright (C) 2005 ARM Ltd. 9 * 10 * This is the "shell" of the ARMv7M processor support. 11 */ 12 #include <linux/linkage.h> 13 #include <linux/init.h> 14 #include <linux/cfi_types.h> 15 #include <asm/assembler.h> 16 #include <asm/errno.h> 17 #include <asm/unwind.h> 18 #include <asm/v7m.h> 19 20 #include "proc-macros.S" 21 22 .arch armv7-m 23 24 /* Generic V7M read/write macros for memory mapped cache operations */ 25 .macro v7m_cache_read, rt, reg 26 movw \rt, #:lower16:BASEADDR_V7M_SCB + \reg 27 movt \rt, #:upper16:BASEADDR_V7M_SCB + \reg 28 ldr \rt, [\rt] 29 .endm 30 31 .macro v7m_cacheop, rt, tmp, op, c = al 32 movw\c \tmp, #:lower16:BASEADDR_V7M_SCB + \op 33 movt\c \tmp, #:upper16:BASEADDR_V7M_SCB + \op 34 str\c \rt, [\tmp] 35 .endm 36 37 38 .macro read_ccsidr, rt 39 v7m_cache_read \rt, V7M_SCB_CCSIDR 40 .endm 41 42 .macro read_clidr, rt 43 v7m_cache_read \rt, V7M_SCB_CLIDR 44 .endm 45 46 .macro write_csselr, rt, tmp 47 v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR 48 .endm 49 50 /* 51 * dcisw: Invalidate data cache by set/way 52 */ 53 .macro dcisw, rt, tmp 54 v7m_cacheop \rt, \tmp, V7M_SCB_DCISW 55 .endm 56 57 /* 58 * dccisw: Clean and invalidate data cache by set/way 59 */ 60 .macro dccisw, rt, tmp 61 v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW 62 .endm 63 64 /* 65 * dccimvac: Clean and invalidate data cache line by MVA to PoC. 66 */ 67 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 68 .macro dccimvac\c, rt, tmp 69 v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c 70 .endm 71 .endr 72 73 /* 74 * dcimvac: Invalidate data cache line by MVA to PoC 75 */ 76 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 77 .macro dcimvac\c, rt, tmp 78 v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c 79 .endm 80 .endr 81 82 /* 83 * dccmvau: Clean data cache line by MVA to PoU 84 */ 85 .macro dccmvau, rt, tmp 86 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU 87 .endm 88 89 /* 90 * dccmvac: Clean data cache line by MVA to PoC 91 */ 92 .macro dccmvac, rt, tmp 93 v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC 94 .endm 95 96 /* 97 * icimvau: Invalidate instruction caches by MVA to PoU 98 */ 99 .macro icimvau, rt, tmp 100 v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU 101 .endm 102 103 /* 104 * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP. 105 * rt data ignored by ICIALLU(IS), so can be used for the address 106 */ 107 .macro invalidate_icache, rt 108 v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU 109 mov \rt, #0 110 .endm 111 112 /* 113 * Invalidate the BTB, inner shareable if SMP. 114 * rt data ignored by BPIALL, so it can be used for the address 115 */ 116 .macro invalidate_bp, rt 117 v7m_cacheop \rt, \rt, V7M_SCB_BPIALL 118 mov \rt, #0 119 .endm 120 121 ENTRY(v7m_invalidate_l1) 122 mov r0, #0 123 124 write_csselr r0, r1 125 read_ccsidr r0 126 127 movw r1, #0x7fff 128 and r2, r1, r0, lsr #13 129 130 movw r1, #0x3ff 131 132 and r3, r1, r0, lsr #3 @ NumWays - 1 133 add r2, r2, #1 @ NumSets 134 135 and r0, r0, #0x7 136 add r0, r0, #4 @ SetShift 137 138 clz r1, r3 @ WayShift 139 add r4, r3, #1 @ NumWays 140 1: sub r2, r2, #1 @ NumSets-- 141 mov r3, r4 @ Temp = NumWays 142 2: subs r3, r3, #1 @ Temp-- 143 mov r5, r3, lsl r1 144 mov r6, r2, lsl r0 145 orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) 146 dcisw r5, r6 147 bgt 2b 148 cmp r2, #0 149 bgt 1b 150 dsb st 151 isb 152 ret lr 153 ENDPROC(v7m_invalidate_l1) 154 155 /* 156 * v7m_flush_icache_all() 157 * 158 * Flush the whole I-cache. 159 * 160 * Registers: 161 * r0 - set to 0 162 */ 163 SYM_TYPED_FUNC_START(v7m_flush_icache_all) 164 invalidate_icache r0 165 ret lr 166 SYM_FUNC_END(v7m_flush_icache_all) 167 168 /* 169 * v7m_flush_dcache_all() 170 * 171 * Flush the whole D-cache. 172 * 173 * Corrupted registers: r0-r7, r9-r11 174 */ 175 ENTRY(v7m_flush_dcache_all) 176 dmb @ ensure ordering with previous memory accesses 177 read_clidr r0 178 mov r3, r0, lsr #23 @ move LoC into position 179 ands r3, r3, #7 << 1 @ extract LoC*2 from clidr 180 beq finished @ if loc is 0, then no need to clean 181 start_flush_levels: 182 mov r10, #0 @ start clean at cache level 0 183 flush_levels: 184 add r2, r10, r10, lsr #1 @ work out 3x current cache level 185 mov r1, r0, lsr r2 @ extract cache type bits from clidr 186 and r1, r1, #7 @ mask of the bits for current cache only 187 cmp r1, #2 @ see what cache we have at this level 188 blt skip @ skip if no cache, or just i-cache 189 #ifdef CONFIG_PREEMPTION 190 save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic 191 #endif 192 write_csselr r10, r1 @ set current cache level 193 isb @ isb to sych the new cssr&csidr 194 read_ccsidr r1 @ read the new csidr 195 #ifdef CONFIG_PREEMPTION 196 restore_irqs_notrace r9 197 #endif 198 and r2, r1, #7 @ extract the length of the cache lines 199 add r2, r2, #4 @ add 4 (line length offset) 200 movw r4, #0x3ff 201 ands r4, r4, r1, lsr #3 @ find maximum number on the way size 202 clz r5, r4 @ find bit position of way size increment 203 movw r7, #0x7fff 204 ands r7, r7, r1, lsr #13 @ extract max number of the index size 205 loop1: 206 mov r9, r7 @ create working copy of max index 207 loop2: 208 lsl r6, r4, r5 209 orr r11, r10, r6 @ factor way and cache number into r11 210 lsl r6, r9, r2 211 orr r11, r11, r6 @ factor index number into r11 212 dccisw r11, r6 @ clean/invalidate by set/way 213 subs r9, r9, #1 @ decrement the index 214 bge loop2 215 subs r4, r4, #1 @ decrement the way 216 bge loop1 217 skip: 218 add r10, r10, #2 @ increment cache number 219 cmp r3, r10 220 bgt flush_levels 221 finished: 222 mov r10, #0 @ switch back to cache level 0 223 write_csselr r10, r3 @ select current cache level in cssr 224 dsb st 225 isb 226 ret lr 227 ENDPROC(v7m_flush_dcache_all) 228 229 /* 230 * v7m_flush_cache_all() 231 * 232 * Flush the entire cache system. 233 * The data cache flush is now achieved using atomic clean / invalidates 234 * working outwards from L1 cache. This is done using Set/Way based cache 235 * maintenance instructions. 236 * The instruction cache can still be invalidated back to the point of 237 * unification in a single instruction. 238 * 239 */ 240 SYM_TYPED_FUNC_START(v7m_flush_kern_cache_all) 241 stmfd sp!, {r4-r7, r9-r11, lr} 242 bl v7m_flush_dcache_all 243 invalidate_icache r0 244 ldmfd sp!, {r4-r7, r9-r11, lr} 245 ret lr 246 SYM_FUNC_END(v7m_flush_kern_cache_all) 247 248 /* 249 * v7m_flush_cache_all() 250 * 251 * Flush all TLB entries in a particular address space 252 * 253 * - mm - mm_struct describing address space 254 */ 255 SYM_TYPED_FUNC_START(v7m_flush_user_cache_all) 256 ret lr 257 SYM_FUNC_END(v7m_flush_user_cache_all) 258 259 /* 260 * v7m_flush_cache_range(start, end, flags) 261 * 262 * Flush a range of TLB entries in the specified address space. 263 * 264 * - start - start address (may not be aligned) 265 * - end - end address (exclusive, may not be aligned) 266 * - flags - vm_area_struct flags describing address space 267 * 268 * It is assumed that: 269 * - we have a VIPT cache. 270 */ 271 SYM_TYPED_FUNC_START(v7m_flush_user_cache_range) 272 ret lr 273 SYM_FUNC_END(v7m_flush_user_cache_range) 274 275 /* 276 * v7m_coherent_kern_range(start,end) 277 * 278 * Ensure that the I and D caches are coherent within specified 279 * region. This is typically used when code has been written to 280 * a memory region, and will be executed. 281 * 282 * - start - virtual start address of region 283 * - end - virtual end address of region 284 * 285 * It is assumed that: 286 * - the Icache does not read data from the write buffer 287 */ 288 SYM_TYPED_FUNC_START(v7m_coherent_kern_range) 289 #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */ 290 b v7m_coherent_user_range 291 #endif 292 SYM_FUNC_END(v7m_coherent_kern_range) 293 294 /* 295 * v7m_coherent_user_range(start,end) 296 * 297 * Ensure that the I and D caches are coherent within specified 298 * region. This is typically used when code has been written to 299 * a memory region, and will be executed. 300 * 301 * - start - virtual start address of region 302 * - end - virtual end address of region 303 * 304 * It is assumed that: 305 * - the Icache does not read data from the write buffer 306 */ 307 SYM_TYPED_FUNC_START(v7m_coherent_user_range) 308 UNWIND(.fnstart ) 309 dcache_line_size r2, r3 310 sub r3, r2, #1 311 bic r12, r0, r3 312 1: 313 /* 314 * We use open coded version of dccmvau otherwise USER() would 315 * point at movw instruction. 316 */ 317 dccmvau r12, r3 318 add r12, r12, r2 319 cmp r12, r1 320 blo 1b 321 dsb ishst 322 icache_line_size r2, r3 323 sub r3, r2, #1 324 bic r12, r0, r3 325 2: 326 icimvau r12, r3 327 add r12, r12, r2 328 cmp r12, r1 329 blo 2b 330 invalidate_bp r0 331 dsb ishst 332 isb 333 ret lr 334 UNWIND(.fnend ) 335 SYM_FUNC_END(v7m_coherent_user_range) 336 337 /* 338 * v7m_flush_kern_dcache_area(void *addr, size_t size) 339 * 340 * Ensure that the data held in the page kaddr is written back 341 * to the page in question. 342 * 343 * - addr - kernel address 344 * - size - region size 345 */ 346 SYM_TYPED_FUNC_START(v7m_flush_kern_dcache_area) 347 dcache_line_size r2, r3 348 add r1, r0, r1 349 sub r3, r2, #1 350 bic r0, r0, r3 351 1: 352 dccimvac r0, r3 @ clean & invalidate D line / unified line 353 add r0, r0, r2 354 cmp r0, r1 355 blo 1b 356 dsb st 357 ret lr 358 SYM_FUNC_END(v7m_flush_kern_dcache_area) 359 360 /* 361 * v7m_dma_inv_range(start,end) 362 * 363 * Invalidate the data cache within the specified region; we will 364 * be performing a DMA operation in this region and we want to 365 * purge old data in the cache. 366 * 367 * - start - virtual start address of region 368 * - end - virtual end address of region 369 */ 370 v7m_dma_inv_range: 371 dcache_line_size r2, r3 372 sub r3, r2, #1 373 tst r0, r3 374 bic r0, r0, r3 375 dccimvacne r0, r3 376 addne r0, r0, r2 377 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac 378 tst r1, r3 379 bic r1, r1, r3 380 dccimvacne r1, r3 381 cmp r0, r1 382 1: 383 dcimvaclo r0, r3 384 addlo r0, r0, r2 385 cmplo r0, r1 386 blo 1b 387 dsb st 388 ret lr 389 ENDPROC(v7m_dma_inv_range) 390 391 /* 392 * v7m_dma_clean_range(start,end) 393 * - start - virtual start address of region 394 * - end - virtual end address of region 395 */ 396 v7m_dma_clean_range: 397 dcache_line_size r2, r3 398 sub r3, r2, #1 399 bic r0, r0, r3 400 1: 401 dccmvac r0, r3 @ clean D / U line 402 add r0, r0, r2 403 cmp r0, r1 404 blo 1b 405 dsb st 406 ret lr 407 ENDPROC(v7m_dma_clean_range) 408 409 /* 410 * v7m_dma_flush_range(start,end) 411 * - start - virtual start address of region 412 * - end - virtual end address of region 413 */ 414 SYM_TYPED_FUNC_START(v7m_dma_flush_range) 415 dcache_line_size r2, r3 416 sub r3, r2, #1 417 bic r0, r0, r3 418 1: 419 dccimvac r0, r3 @ clean & invalidate D / U line 420 add r0, r0, r2 421 cmp r0, r1 422 blo 1b 423 dsb st 424 ret lr 425 SYM_FUNC_END(v7m_dma_flush_range) 426 427 /* 428 * dma_map_area(start, size, dir) 429 * - start - kernel virtual start address 430 * - size - size of region 431 * - dir - DMA direction 432 */ 433 SYM_TYPED_FUNC_START(v7m_dma_map_area) 434 add r1, r1, r0 435 teq r2, #DMA_FROM_DEVICE 436 beq v7m_dma_inv_range 437 b v7m_dma_clean_range 438 SYM_FUNC_END(v7m_dma_map_area) 439 440 /* 441 * dma_unmap_area(start, size, dir) 442 * - start - kernel virtual start address 443 * - size - size of region 444 * - dir - DMA direction 445 */ 446 SYM_TYPED_FUNC_START(v7m_dma_unmap_area) 447 add r1, r1, r0 448 teq r2, #DMA_TO_DEVICE 449 bne v7m_dma_inv_range 450 ret lr 451 SYM_FUNC_END(v7m_dma_unmap_area)
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.