~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/m68k/include/asm/cacheflush_mm.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _M68K_CACHEFLUSH_H
  3 #define _M68K_CACHEFLUSH_H
  4 
  5 #include <linux/mm.h>
  6 #ifdef CONFIG_COLDFIRE
  7 #include <asm/mcfsim.h>
  8 #endif
  9 
 10 /* cache code */
 11 #define FLUSH_I_AND_D   (0x00000808)
 12 #define FLUSH_I         (0x00000008)
 13 
 14 #ifndef ICACHE_MAX_ADDR
 15 #define ICACHE_MAX_ADDR 0
 16 #define ICACHE_SET_MASK 0
 17 #define DCACHE_MAX_ADDR 0
 18 #define DCACHE_SETMASK  0
 19 #endif
 20 #ifndef CACHE_MODE
 21 #define CACHE_MODE      0
 22 #define CACR_ICINVA     0
 23 #define CACR_DCINVA     0
 24 #define CACR_BCINVA     0
 25 #endif
 26 
 27 /*
 28  * ColdFire architecture has no way to clear individual cache lines, so we
 29  * are stuck invalidating all the cache entries when we want a clear operation.
 30  */
 31 static inline void clear_cf_icache(unsigned long start, unsigned long end)
 32 {
 33         __asm__ __volatile__ (
 34                 "movec  %0,%%cacr\n\t"
 35                 "nop"
 36                 :
 37                 : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
 38 }
 39 
 40 static inline void clear_cf_dcache(unsigned long start, unsigned long end)
 41 {
 42         __asm__ __volatile__ (
 43                 "movec  %0,%%cacr\n\t"
 44                 "nop"
 45                 :
 46                 : "r" (CACHE_MODE | CACR_DCINVA));
 47 }
 48 
 49 static inline void clear_cf_bcache(unsigned long start, unsigned long end)
 50 {
 51         __asm__ __volatile__ (
 52                 "movec  %0,%%cacr\n\t"
 53                 "nop"
 54                 :
 55                 : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
 56 }
 57 
 58 /*
 59  * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
 60  * The start and end addresses are cache line numbers not memory addresses.
 61  */
 62 static inline void flush_cf_icache(unsigned long start, unsigned long end)
 63 {
 64         unsigned long set;
 65 
 66         for (set = start; set <= end; set += (0x10 - 3)) {
 67                 __asm__ __volatile__ (
 68                         "cpushl %%ic,(%0)\n\t"
 69                         "addq%.l #1,%0\n\t"
 70                         "cpushl %%ic,(%0)\n\t"
 71                         "addq%.l #1,%0\n\t"
 72                         "cpushl %%ic,(%0)\n\t"
 73                         "addq%.l #1,%0\n\t"
 74                         "cpushl %%ic,(%0)"
 75                         : "=a" (set)
 76                         : "a" (set));
 77         }
 78 }
 79 
 80 static inline void flush_cf_dcache(unsigned long start, unsigned long end)
 81 {
 82         unsigned long set;
 83 
 84         for (set = start; set <= end; set += (0x10 - 3)) {
 85                 __asm__ __volatile__ (
 86                         "cpushl %%dc,(%0)\n\t"
 87                         "addq%.l #1,%0\n\t"
 88                         "cpushl %%dc,(%0)\n\t"
 89                         "addq%.l #1,%0\n\t"
 90                         "cpushl %%dc,(%0)\n\t"
 91                         "addq%.l #1,%0\n\t"
 92                         "cpushl %%dc,(%0)"
 93                         : "=a" (set)
 94                         : "a" (set));
 95         }
 96 }
 97 
 98 static inline void flush_cf_bcache(unsigned long start, unsigned long end)
 99 {
100         unsigned long set;
101 
102         for (set = start; set <= end; set += (0x10 - 3)) {
103                 __asm__ __volatile__ (
104                         "cpushl %%bc,(%0)\n\t"
105                         "addq%.l #1,%0\n\t"
106                         "cpushl %%bc,(%0)\n\t"
107                         "addq%.l #1,%0\n\t"
108                         "cpushl %%bc,(%0)\n\t"
109                         "addq%.l #1,%0\n\t"
110                         "cpushl %%bc,(%0)"
111                         : "=a" (set)
112                         : "a" (set));
113         }
114 }
115 
116 /*
117  * Cache handling functions
118  */
119 
120 static inline void flush_icache(void)
121 {
122         if (CPU_IS_COLDFIRE) {
123                 flush_cf_icache(0, ICACHE_MAX_ADDR);
124         } else if (CPU_IS_040_OR_060) {
125                 asm volatile (  "nop\n"
126                         "       .chip   68040\n"
127                         "       cpusha  %bc\n"
128                         "       .chip   68k");
129         } else {
130                 unsigned long tmp;
131                 asm volatile (  "movec  %%cacr,%0\n"
132                         "       or.w    %1,%0\n"
133                         "       movec   %0,%%cacr"
134                         : "=&d" (tmp)
135                         : "id" (FLUSH_I));
136         }
137 }
138 
139 /*
140  * invalidate the cache for the specified memory range.
141  * It starts at the physical address specified for
142  * the given number of bytes.
143  */
144 extern void cache_clear(unsigned long paddr, int len);
145 /*
146  * push any dirty cache in the specified memory range.
147  * It starts at the physical address specified for
148  * the given number of bytes.
149  */
150 extern void cache_push(unsigned long paddr, int len);
151 
152 /*
153  * push and invalidate pages in the specified user virtual
154  * memory range.
155  */
156 extern void cache_push_v(unsigned long vaddr, int len);
157 
158 /* This is needed whenever the virtual mapping of the current
159    process changes.  */
160 #define __flush_cache_all()                                     \
161 ({                                                              \
162         if (CPU_IS_COLDFIRE) {                                  \
163                 flush_cf_dcache(0, DCACHE_MAX_ADDR);            \
164         } else if (CPU_IS_040_OR_060) {                         \
165                 __asm__ __volatile__("nop\n\t"                  \
166                                      ".chip 68040\n\t"          \
167                                      "cpusha %dc\n\t"           \
168                                      ".chip 68k");              \
169         } else {                                                \
170                 unsigned long _tmp;                             \
171                 __asm__ __volatile__("movec %%cacr,%0\n\t"      \
172                                      "orw %1,%0\n\t"            \
173                                      "movec %0,%%cacr"          \
174                                      : "=&d" (_tmp)             \
175                                      : "di" (FLUSH_I_AND_D));   \
176         }                                                       \
177 })
178 
179 #define __flush_cache_030()                                     \
180 ({                                                              \
181         if (CPU_IS_020_OR_030) {                                \
182                 unsigned long _tmp;                             \
183                 __asm__ __volatile__("movec %%cacr,%0\n\t"      \
184                                      "orw %1,%0\n\t"            \
185                                      "movec %0,%%cacr"          \
186                                      : "=&d" (_tmp)             \
187                                      : "di" (FLUSH_I_AND_D));   \
188         }                                                       \
189 })
190 
191 #define flush_cache_all() __flush_cache_all()
192 
193 #define flush_cache_vmap(start, end)            flush_cache_all()
194 #define flush_cache_vmap_early(start, end)      do { } while (0)
195 #define flush_cache_vunmap(start, end)          flush_cache_all()
196 
197 static inline void flush_cache_mm(struct mm_struct *mm)
198 {
199         if (mm == current->mm)
200                 __flush_cache_030();
201 }
202 
203 #define flush_cache_dup_mm(mm)                  flush_cache_mm(mm)
204 
205 /* flush_cache_range/flush_cache_page must be macros to avoid
206    a dependency on linux/mm.h, which includes this file... */
207 static inline void flush_cache_range(struct vm_area_struct *vma,
208                                      unsigned long start,
209                                      unsigned long end)
210 {
211         if (vma->vm_mm == current->mm)
212                 __flush_cache_030();
213 }
214 
215 static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
216 {
217         if (vma->vm_mm == current->mm)
218                 __flush_cache_030();
219 }
220 
221 
222 /* Push the page at kernel virtual address and clear the icache */
223 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
224 static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr)
225 {
226         if (CPU_IS_COLDFIRE) {
227                 unsigned long addr, start, end;
228                 addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
229                 start = addr & ICACHE_SET_MASK;
230                 end = (addr + nr * PAGE_SIZE - 1) & ICACHE_SET_MASK;
231                 if (start > end) {
232                         flush_cf_bcache(0, end);
233                         end = ICACHE_MAX_ADDR;
234                 }
235                 flush_cf_bcache(start, end);
236         } else if (CPU_IS_040_OR_060) {
237                 unsigned long paddr = __pa(vaddr);
238 
239                 do {
240                         __asm__ __volatile__("nop\n\t"
241                                              ".chip 68040\n\t"
242                                              "cpushp %%bc,(%0)\n\t"
243                                              ".chip 68k"
244                                              : : "a" (paddr));
245                         paddr += PAGE_SIZE;
246                 } while (--nr);
247         } else {
248                 unsigned long _tmp;
249                 __asm__ __volatile__("movec %%cacr,%0\n\t"
250                                      "orw %1,%0\n\t"
251                                      "movec %0,%%cacr"
252                                      : "=&d" (_tmp)
253                                      : "di" (FLUSH_I));
254         }
255 }
256 
257 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
258 #define flush_dcache_page(page) __flush_pages_to_ram(page_address(page), 1)
259 #define flush_dcache_folio(folio)               \
260         __flush_pages_to_ram(folio_address(folio), folio_nr_pages(folio))
261 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
262 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
263 #define flush_icache_pages(vma, page, nr)       \
264         __flush_pages_to_ram(page_address(page), nr)
265 
266 extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
267                                     unsigned long addr, int len);
268 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
269 extern void flush_icache_user_range(unsigned long address,
270                 unsigned long endaddr);
271 
272 static inline void copy_to_user_page(struct vm_area_struct *vma,
273                                      struct page *page, unsigned long vaddr,
274                                      void *dst, void *src, int len)
275 {
276         flush_cache_page(vma, vaddr, page_to_pfn(page));
277         memcpy(dst, src, len);
278         flush_icache_user_page(vma, page, vaddr, len);
279 }
280 static inline void copy_from_user_page(struct vm_area_struct *vma,
281                                        struct page *page, unsigned long vaddr,
282                                        void *dst, void *src, int len)
283 {
284         flush_cache_page(vma, vaddr, page_to_pfn(page));
285         memcpy(dst, src, len);
286 }
287 
288 #endif /* _M68K_CACHEFLUSH_H */
289 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php