~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/cacheflush.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Based on arch/arm/include/asm/cacheflush.h
  4  *
  5  * Copyright (C) 1999-2002 Russell King.
  6  * Copyright (C) 2012 ARM Ltd.
  7  */
  8 #ifndef __ASM_CACHEFLUSH_H
  9 #define __ASM_CACHEFLUSH_H
 10 
 11 #include <linux/kgdb.h>
 12 #include <linux/mm.h>
 13 
 14 /*
 15  * This flag is used to indicate that the page pointed to by a pte is clean
 16  * and does not require cleaning before returning it to the user.
 17  */
 18 #define PG_dcache_clean PG_arch_1
 19 
 20 /*
 21  *      MM Cache Management
 22  *      ===================
 23  *
 24  *      The arch/arm64/mm/cache.S implements these methods.
 25  *
 26  *      Start addresses are inclusive and end addresses are exclusive; start
 27  *      addresses should be rounded down, end addresses up.
 28  *
 29  *      See Documentation/core-api/cachetlb.rst for more information. Please note that
 30  *      the implementation assumes non-aliasing VIPT D-cache and (aliasing)
 31  *      VIPT I-cache.
 32  *
 33  *      All functions below apply to the interval [start, end)
 34  *              - start  - virtual start address (inclusive)
 35  *              - end    - virtual end address (exclusive)
 36  *
 37  *      caches_clean_inval_pou(start, end)
 38  *
 39  *              Ensure coherency between the I-cache and the D-cache region to
 40  *              the Point of Unification.
 41  *
 42  *      caches_clean_inval_user_pou(start, end)
 43  *
 44  *              Ensure coherency between the I-cache and the D-cache region to
 45  *              the Point of Unification.
 46  *              Use only if the region might access user memory.
 47  *
 48  *      icache_inval_pou(start, end)
 49  *
 50  *              Invalidate I-cache region to the Point of Unification.
 51  *
 52  *      dcache_clean_inval_poc(start, end)
 53  *
 54  *              Clean and invalidate D-cache region to the Point of Coherency.
 55  *
 56  *      dcache_inval_poc(start, end)
 57  *
 58  *              Invalidate D-cache region to the Point of Coherency.
 59  *
 60  *      dcache_clean_poc(start, end)
 61  *
 62  *              Clean D-cache region to the Point of Coherency.
 63  *
 64  *      dcache_clean_pop(start, end)
 65  *
 66  *              Clean D-cache region to the Point of Persistence.
 67  *
 68  *      dcache_clean_pou(start, end)
 69  *
 70  *              Clean D-cache region to the Point of Unification.
 71  */
 72 extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
 73 extern void icache_inval_pou(unsigned long start, unsigned long end);
 74 extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
 75 extern void dcache_inval_poc(unsigned long start, unsigned long end);
 76 extern void dcache_clean_poc(unsigned long start, unsigned long end);
 77 extern void dcache_clean_pop(unsigned long start, unsigned long end);
 78 extern void dcache_clean_pou(unsigned long start, unsigned long end);
 79 extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
 80 extern void sync_icache_aliases(unsigned long start, unsigned long end);
 81 
 82 static inline void flush_icache_range(unsigned long start, unsigned long end)
 83 {
 84         caches_clean_inval_pou(start, end);
 85 
 86         /*
 87          * IPI all online CPUs so that they undergo a context synchronization
 88          * event and are forced to refetch the new instructions.
 89          */
 90 
 91         /*
 92          * KGDB performs cache maintenance with interrupts disabled, so we
 93          * will deadlock trying to IPI the secondary CPUs. In theory, we can
 94          * set CACHE_FLUSH_IS_SAFE to 0 to avoid this known issue, but that
 95          * just means that KGDB will elide the maintenance altogether! As it
 96          * turns out, KGDB uses IPIs to round-up the secondary CPUs during
 97          * the patching operation, so we don't need extra IPIs here anyway.
 98          * In which case, add a KGDB-specific bodge and return early.
 99          */
100         if (in_dbg_master())
101                 return;
102 
103         kick_all_cpus_sync();
104 }
105 #define flush_icache_range flush_icache_range
106 
107 /*
108  * Copy user data from/to a page which is mapped into a different
109  * processes address space.  Really, we want to allow our "user
110  * space" model to handle this.
111  */
112 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
113         unsigned long, void *, const void *, unsigned long);
114 #define copy_to_user_page copy_to_user_page
115 
116 /*
117  * flush_dcache_folio is used when the kernel has written to the page
118  * cache page at virtual address page->virtual.
119  *
120  * If this page isn't mapped (ie, folio_mapping == NULL), or it might
121  * have userspace mappings, then we _must_ always clean + invalidate
122  * the dcache entries associated with the kernel mapping.
123  *
124  * Otherwise we can defer the operation, and clean the cache when we are
125  * about to change to user space.  This is the same method as used on SPARC64.
126  * See update_mmu_cache for the user space part.
127  */
128 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
129 extern void flush_dcache_page(struct page *);
130 void flush_dcache_folio(struct folio *);
131 #define flush_dcache_folio flush_dcache_folio
132 
133 static __always_inline void icache_inval_all_pou(void)
134 {
135         if (alternative_has_cap_unlikely(ARM64_HAS_CACHE_DIC))
136                 return;
137 
138         asm("ic ialluis");
139         dsb(ish);
140 }
141 
142 #include <asm-generic/cacheflush.h>
143 
144 #endif /* __ASM_CACHEFLUSH_H */
145 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php