1 // SPDX-License-Identifier: GPL-2.0-only 1 2 /* 3 * linux/arch/arm/mm/flush.c 4 * 5 * Copyright (C) 1995-2002 Russell King 6 */ 7 #include <linux/module.h> 8 #include <linux/mm.h> 9 #include <linux/pagemap.h> 10 #include <linux/highmem.h> 11 12 #include <asm/cacheflush.h> 13 #include <asm/cachetype.h> 14 #include <asm/highmem.h> 15 #include <asm/smp_plat.h> 16 #include <asm/tlbflush.h> 17 #include <linux/hugetlb.h> 18 19 #include "mm.h" 20 21 #ifdef CONFIG_ARM_HEAVY_MB 22 void (*soc_mb)(void); 23 24 void arm_heavy_mb(void) 25 { 26 #ifdef CONFIG_OUTER_CACHE_SYNC 27 if (outer_cache.sync) 28 outer_cache.sync(); 29 #endif 30 if (soc_mb) 31 soc_mb(); 32 } 33 EXPORT_SYMBOL(arm_heavy_mb); 34 #endif 35 36 #ifdef CONFIG_CPU_CACHE_VIPT 37 38 static void flush_pfn_alias(unsigned long pfn, 39 { 40 unsigned long to = FLUSH_ALIAS_START + 41 const int zero = 0; 42 43 set_top_pte(to, pfn_pte(pfn, PAGE_KERN 44 45 asm( "mcrr p15, 0, %1, %0, c14\n" 46 " mcr p15, 0, %2, c7, c10, 4 47 : 48 : "r" (to), "r" (to + PAGE_SIZE - 49 : "cc"); 50 } 51 52 static void flush_icache_alias(unsigned long p 53 { 54 unsigned long va = FLUSH_ALIAS_START + 55 unsigned long offset = vaddr & (PAGE_S 56 unsigned long to; 57 58 set_top_pte(va, pfn_pte(pfn, PAGE_KERN 59 to = va + offset; 60 flush_icache_range(to, to + len); 61 } 62 63 void flush_cache_mm(struct mm_struct *mm) 64 { 65 if (cache_is_vivt()) { 66 vivt_flush_cache_mm(mm); 67 return; 68 } 69 70 if (cache_is_vipt_aliasing()) { 71 asm( "mcr p15, 0, %0, c7 72 " mcr p15, 0, %0, c7 73 : 74 : "r" (0) 75 : "cc"); 76 } 77 } 78 79 void flush_cache_range(struct vm_area_struct * 80 { 81 if (cache_is_vivt()) { 82 vivt_flush_cache_range(vma, st 83 return; 84 } 85 86 if (cache_is_vipt_aliasing()) { 87 asm( "mcr p15, 0, %0, c7 88 " mcr p15, 0, %0, c7 89 : 90 : "r" (0) 91 : "cc"); 92 } 93 94 if (vma->vm_flags & VM_EXEC) 95 __flush_icache_all(); 96 } 97 98 void flush_cache_pages(struct vm_area_struct * 99 { 100 if (cache_is_vivt()) { 101 vivt_flush_cache_pages(vma, us 102 return; 103 } 104 105 if (cache_is_vipt_aliasing()) { 106 flush_pfn_alias(pfn, user_addr 107 __flush_icache_all(); 108 } 109 110 if (vma->vm_flags & VM_EXEC && icache_ 111 __flush_icache_all(); 112 } 113 114 #else 115 #define flush_pfn_alias(pfn,vaddr) 116 #define flush_icache_alias(pfn,vaddr,len) 117 #endif 118 119 #define FLAG_PA_IS_EXEC 1 120 #define FLAG_PA_CORE_IN_MM 2 121 122 static void flush_ptrace_access_other(void *ar 123 { 124 __flush_icache_all(); 125 } 126 127 static inline 128 void __flush_ptrace_access(struct page *page, 129 unsigned long len, 130 { 131 if (cache_is_vivt()) { 132 if (flags & FLAG_PA_CORE_IN_MM 133 unsigned long addr = ( 134 __cpuc_coherent_kern_r 135 } 136 return; 137 } 138 139 if (cache_is_vipt_aliasing()) { 140 flush_pfn_alias(page_to_pfn(pa 141 __flush_icache_all(); 142 return; 143 } 144 145 /* VIPT non-aliasing D-cache */ 146 if (flags & FLAG_PA_IS_EXEC) { 147 unsigned long addr = (unsigned 148 if (icache_is_vipt_aliasing()) 149 flush_icache_alias(pag 150 else 151 __cpuc_coherent_kern_r 152 if (cache_ops_need_broadcast() 153 smp_call_function(flus 154 NULL 155 } 156 } 157 158 static 159 void flush_ptrace_access(struct vm_area_struct 160 unsigned long uaddr, 161 { 162 unsigned int flags = 0; 163 if (cpumask_test_cpu(smp_processor_id( 164 flags |= FLAG_PA_CORE_IN_MM; 165 if (vma->vm_flags & VM_EXEC) 166 flags |= FLAG_PA_IS_EXEC; 167 __flush_ptrace_access(page, uaddr, kad 168 } 169 170 void flush_uprobe_xol_access(struct page *page 171 void *kaddr, unsi 172 { 173 unsigned int flags = FLAG_PA_CORE_IN_M 174 175 __flush_ptrace_access(page, uaddr, kad 176 } 177 178 /* 179 * Copy user data from/to a page which is mapp 180 * processes address space. Really, we want t 181 * space" model to handle this. 182 * 183 * Note that this code needs to run on the cur 184 */ 185 void copy_to_user_page(struct vm_area_struct * 186 unsigned long uaddr, vo 187 unsigned long len) 188 { 189 #ifdef CONFIG_SMP 190 preempt_disable(); 191 #endif 192 memcpy(dst, src, len); 193 flush_ptrace_access(vma, page, uaddr, 194 #ifdef CONFIG_SMP 195 preempt_enable(); 196 #endif 197 } 198 199 void __flush_dcache_folio(struct address_space 200 { 201 /* 202 * Writeback any data associated with 203 * page. This ensures that data in th 204 * coherent with the kernels mapping. 205 */ 206 if (!folio_test_highmem(folio)) { 207 __cpuc_flush_dcache_area(folio 208 folio_ 209 } else { 210 unsigned long i; 211 if (cache_is_vipt_nonaliasing( 212 for (i = 0; i < folio_ 213 void *addr = k 214 215 __cpuc_flush_d 216 kunmap_local(a 217 } 218 } else { 219 for (i = 0; i < folio_ 220 void *addr = k 221 if (addr) { 222 __cpuc 223 kunmap 224 } 225 } 226 } 227 } 228 229 /* 230 * If this is a page cache page, and w 231 * we only need to do one flush - whic 232 * userspace colour, which is congruen 233 */ 234 if (mapping && cache_is_vipt_aliasing( 235 flush_pfn_alias(folio_pfn(foli 236 } 237 238 static void __flush_dcache_aliases(struct addr 239 { 240 struct mm_struct *mm = current->active 241 struct vm_area_struct *vma; 242 pgoff_t pgoff, pgoff_end; 243 244 /* 245 * There are possible user space mappi 246 * - VIVT cache: we need to also write 247 * data in the current VM view assoc 248 * - aliasing VIPT: we only need to fi 249 */ 250 pgoff = folio->index; 251 pgoff_end = pgoff + folio_nr_pages(fol 252 253 flush_dcache_mmap_lock(mapping); 254 vma_interval_tree_foreach(vma, &mappin 255 unsigned long start, offset, p 256 unsigned int nr; 257 258 /* 259 * If this VMA is not in our M 260 */ 261 if (vma->vm_mm != mm) 262 continue; 263 if (!(vma->vm_flags & VM_MAYSH 264 continue; 265 266 start = vma->vm_start; 267 pfn = folio_pfn(folio); 268 nr = folio_nr_pages(folio); 269 offset = pgoff - vma->vm_pgoff 270 if (offset > -nr) { 271 pfn -= offset; 272 nr += offset; 273 } else { 274 start += offset * PAGE 275 } 276 if (start + nr * PAGE_SIZE > v 277 nr = (vma->vm_end - st 278 279 flush_cache_pages(vma, start, 280 } 281 flush_dcache_mmap_unlock(mapping); 282 } 283 284 #if __LINUX_ARM_ARCH__ >= 6 285 void __sync_icache_dcache(pte_t pteval) 286 { 287 unsigned long pfn; 288 struct folio *folio; 289 struct address_space *mapping; 290 291 if (cache_is_vipt_nonaliasing() && !pt 292 /* only flush non-aliasing VIP 293 return; 294 pfn = pte_pfn(pteval); 295 if (!pfn_valid(pfn)) 296 return; 297 298 folio = page_folio(pfn_to_page(pfn)); 299 if (folio_test_reserved(folio)) 300 return; 301 302 if (cache_is_vipt_aliasing()) 303 mapping = folio_flush_mapping( 304 else 305 mapping = NULL; 306 307 if (!test_and_set_bit(PG_dcache_clean, 308 __flush_dcache_folio(mapping, 309 310 if (pte_exec(pteval)) 311 __flush_icache_all(); 312 } 313 #endif 314 315 /* 316 * Ensure cache coherency between kernel mappi 317 * of this page. 318 * 319 * We have three cases to consider: 320 * - VIPT non-aliasing cache: fully coherent 321 * - VIVT: fully aliasing, so we need to hand 322 * current VM view. 323 * - VIPT aliasing: need to handle one alias 324 * 325 * If we need to handle aliasing: 326 * If the page only exists in the page cache 327 * space mappings, we can be lazy and remembe 328 * kernel cache lines for later. Otherwise, 329 * aliasing mappings. 330 * 331 * Note that we disable the lazy flush for SMP 332 * the cache maintenance operations are not au 333 */ 334 void flush_dcache_folio(struct folio *folio) 335 { 336 struct address_space *mapping; 337 338 /* 339 * The zero page is never written to, 340 * cache lines, and therefore never ne 341 */ 342 if (is_zero_pfn(folio_pfn(folio))) 343 return; 344 345 if (!cache_ops_need_broadcast() && cac 346 if (test_bit(PG_dcache_clean, 347 clear_bit(PG_dcache_cl 348 return; 349 } 350 351 mapping = folio_flush_mapping(folio); 352 353 if (!cache_ops_need_broadcast() && 354 mapping && !folio_mapped(folio)) 355 clear_bit(PG_dcache_clean, &fo 356 else { 357 __flush_dcache_folio(mapping, 358 if (mapping && cache_is_vivt() 359 __flush_dcache_aliases 360 else if (mapping) 361 __flush_icache_all(); 362 set_bit(PG_dcache_clean, &foli 363 } 364 } 365 EXPORT_SYMBOL(flush_dcache_folio); 366 367 void flush_dcache_page(struct page *page) 368 { 369 flush_dcache_folio(page_folio(page)); 370 } 371 EXPORT_SYMBOL(flush_dcache_page); 372 /* 373 * Flush an anonymous page so that users of ge 374 * can safely access the data. The expected s 375 * 376 * get_user_pages() 377 * -> flush_anon_page 378 * memcpy() to/from page 379 * if written to page, flush_dcache_page() 380 */ 381 void __flush_anon_page(struct vm_area_struct * 382 void __flush_anon_page(struct vm_area_struct * 383 { 384 unsigned long pfn; 385 386 /* VIPT non-aliasing caches need do no 387 if (cache_is_vipt_nonaliasing()) 388 return; 389 390 /* 391 * Write back and invalidate userspace 392 */ 393 pfn = page_to_pfn(page); 394 if (cache_is_vivt()) { 395 flush_cache_page(vma, vmaddr, 396 } else { 397 /* 398 * For aliasing VIPT, we can f 399 * userspace address only. 400 */ 401 flush_pfn_alias(pfn, vmaddr); 402 __flush_icache_all(); 403 } 404 405 /* 406 * Invalidate kernel mapping. No data 407 * in this mapping of the page. FIXME 408 * since we actually ask for a write-b 409 */ 410 __cpuc_flush_dcache_area(page_address( 411 } 412
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.