~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/parisc/kernel/cache.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file is subject to the terms and conditions of the GNU General Public
  3  * License.  See the file "COPYING" in the main directory of this archive
  4  * for more details.
  5  *
  6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
  7  * Copyright (C) 1999 SuSE GmbH Nuernberg
  8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
  9  *
 10  * Cache and TLB management
 11  *
 12  */
 13  
 14 #include <linux/init.h>
 15 #include <linux/kernel.h>
 16 #include <linux/mm.h>
 17 #include <linux/module.h>
 18 #include <linux/seq_file.h>
 19 #include <linux/pagemap.h>
 20 #include <linux/sched.h>
 21 #include <linux/sched/mm.h>
 22 #include <linux/syscalls.h>
 23 #include <linux/vmalloc.h>
 24 #include <asm/pdc.h>
 25 #include <asm/cache.h>
 26 #include <asm/cacheflush.h>
 27 #include <asm/tlbflush.h>
 28 #include <asm/page.h>
 29 #include <asm/processor.h>
 30 #include <asm/sections.h>
 31 #include <asm/shmparam.h>
 32 #include <asm/mmu_context.h>
 33 #include <asm/cachectl.h>
 34 
 35 #define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
 36 
 37 /*
 38  * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
 39  * of page flushes done flush_cache_page_if_present. There are some
 40  * pros and cons in using this option. It may increase the risk of
 41  * random segmentation faults.
 42  */
 43 #define CONFIG_FLUSH_PAGE_ACCESSED      0
 44 
 45 int split_tlb __ro_after_init;
 46 int dcache_stride __ro_after_init;
 47 int icache_stride __ro_after_init;
 48 EXPORT_SYMBOL(dcache_stride);
 49 
 50 /* Internal implementation in arch/parisc/kernel/pacache.S */
 51 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 52 EXPORT_SYMBOL(flush_dcache_page_asm);
 53 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 54 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
 55 void flush_data_cache_local(void *);  /* flushes local data-cache only */
 56 void flush_instruction_cache_local(void); /* flushes local code-cache only */
 57 
 58 static void flush_kernel_dcache_page_addr(const void *addr);
 59 
 60 /* On some machines (i.e., ones with the Merced bus), there can be
 61  * only a single PxTLB broadcast at a time; this must be guaranteed
 62  * by software. We need a spinlock around all TLB flushes to ensure
 63  * this.
 64  */
 65 DEFINE_SPINLOCK(pa_tlb_flush_lock);
 66 
 67 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
 68 int pa_serialize_tlb_flushes __ro_after_init;
 69 #endif
 70 
 71 struct pdc_cache_info cache_info __ro_after_init;
 72 #ifndef CONFIG_PA20
 73 struct pdc_btlb_info btlb_info;
 74 #endif
 75 
 76 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
 77 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
 78 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
 79 
 80 static void cache_flush_local_cpu(void *dummy)
 81 {
 82         if (static_branch_likely(&parisc_has_icache))
 83                 flush_instruction_cache_local();
 84         if (static_branch_likely(&parisc_has_dcache))
 85                 flush_data_cache_local(NULL);
 86 }
 87 
 88 void flush_cache_all_local(void)
 89 {
 90         cache_flush_local_cpu(NULL);
 91 }
 92 
 93 void flush_cache_all(void)
 94 {
 95         if (static_branch_likely(&parisc_has_cache))
 96                 on_each_cpu(cache_flush_local_cpu, NULL, 1);
 97 }
 98 
 99 static inline void flush_data_cache(void)
100 {
101         if (static_branch_likely(&parisc_has_dcache))
102                 on_each_cpu(flush_data_cache_local, NULL, 1);
103 }
104 
105 
106 /* Kernel virtual address of pfn.  */
107 #define pfn_va(pfn)     __va(PFN_PHYS(pfn))
108 
109 void __update_cache(pte_t pte)
110 {
111         unsigned long pfn = pte_pfn(pte);
112         struct folio *folio;
113         unsigned int nr;
114 
115         /* We don't have pte special.  As a result, we can be called with
116            an invalid pfn and we don't need to flush the kernel dcache page.
117            This occurs with FireGL card in C8000.  */
118         if (!pfn_valid(pfn))
119                 return;
120 
121         folio = page_folio(pfn_to_page(pfn));
122         pfn = folio_pfn(folio);
123         nr = folio_nr_pages(folio);
124         if (folio_flush_mapping(folio) &&
125             test_bit(PG_dcache_dirty, &folio->flags)) {
126                 while (nr--)
127                         flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
128                 clear_bit(PG_dcache_dirty, &folio->flags);
129         } else if (parisc_requires_coherency())
130                 while (nr--)
131                         flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
132 }
133 
134 void
135 show_cache_info(struct seq_file *m)
136 {
137         char buf[32];
138 
139         seq_printf(m, "I-cache\t\t: %ld KB\n", 
140                 cache_info.ic_size/1024 );
141         if (cache_info.dc_loop != 1)
142                 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
143         seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
144                 cache_info.dc_size/1024,
145                 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
146                 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
147                 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
148                 cache_info.dc_conf.cc_alias
149         );
150         seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
151                 cache_info.it_size,
152                 cache_info.dt_size,
153                 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
154         );
155                 
156 #ifndef CONFIG_PA20
157         /* BTLB - Block TLB */
158         if (btlb_info.max_size==0) {
159                 seq_printf(m, "BTLB\t\t: not supported\n" );
160         } else {
161                 seq_printf(m, 
162                 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
163                 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
164                 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
165                 btlb_info.max_size, (int)4096,
166                 btlb_info.max_size>>8,
167                 btlb_info.fixed_range_info.num_i,
168                 btlb_info.fixed_range_info.num_d,
169                 btlb_info.fixed_range_info.num_comb, 
170                 btlb_info.variable_range_info.num_i,
171                 btlb_info.variable_range_info.num_d,
172                 btlb_info.variable_range_info.num_comb
173                 );
174         }
175 #endif
176 }
177 
178 void __init 
179 parisc_cache_init(void)
180 {
181         if (pdc_cache_info(&cache_info) < 0)
182                 panic("parisc_cache_init: pdc_cache_info failed");
183 
184 #if 0
185         printk("ic_size %lx dc_size %lx it_size %lx\n",
186                 cache_info.ic_size,
187                 cache_info.dc_size,
188                 cache_info.it_size);
189 
190         printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
191                 cache_info.dc_base,
192                 cache_info.dc_stride,
193                 cache_info.dc_count,
194                 cache_info.dc_loop);
195 
196         printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
197                 *(unsigned long *) (&cache_info.dc_conf),
198                 cache_info.dc_conf.cc_alias,
199                 cache_info.dc_conf.cc_block,
200                 cache_info.dc_conf.cc_line,
201                 cache_info.dc_conf.cc_shift);
202         printk("        wt %d sh %d cst %d hv %d\n",
203                 cache_info.dc_conf.cc_wt,
204                 cache_info.dc_conf.cc_sh,
205                 cache_info.dc_conf.cc_cst,
206                 cache_info.dc_conf.cc_hv);
207 
208         printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
209                 cache_info.ic_base,
210                 cache_info.ic_stride,
211                 cache_info.ic_count,
212                 cache_info.ic_loop);
213 
214         printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
215                 cache_info.it_sp_base,
216                 cache_info.it_sp_stride,
217                 cache_info.it_sp_count,
218                 cache_info.it_loop,
219                 cache_info.it_off_base,
220                 cache_info.it_off_stride,
221                 cache_info.it_off_count);
222 
223         printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
224                 cache_info.dt_sp_base,
225                 cache_info.dt_sp_stride,
226                 cache_info.dt_sp_count,
227                 cache_info.dt_loop,
228                 cache_info.dt_off_base,
229                 cache_info.dt_off_stride,
230                 cache_info.dt_off_count);
231 
232         printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
233                 *(unsigned long *) (&cache_info.ic_conf),
234                 cache_info.ic_conf.cc_alias,
235                 cache_info.ic_conf.cc_block,
236                 cache_info.ic_conf.cc_line,
237                 cache_info.ic_conf.cc_shift);
238         printk("        wt %d sh %d cst %d hv %d\n",
239                 cache_info.ic_conf.cc_wt,
240                 cache_info.ic_conf.cc_sh,
241                 cache_info.ic_conf.cc_cst,
242                 cache_info.ic_conf.cc_hv);
243 
244         printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
245                 cache_info.dt_conf.tc_sh,
246                 cache_info.dt_conf.tc_page,
247                 cache_info.dt_conf.tc_cst,
248                 cache_info.dt_conf.tc_aid,
249                 cache_info.dt_conf.tc_sr);
250 
251         printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
252                 cache_info.it_conf.tc_sh,
253                 cache_info.it_conf.tc_page,
254                 cache_info.it_conf.tc_cst,
255                 cache_info.it_conf.tc_aid,
256                 cache_info.it_conf.tc_sr);
257 #endif
258 
259         split_tlb = 0;
260         if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
261                 if (cache_info.dt_conf.tc_sh == 2)
262                         printk(KERN_WARNING "Unexpected TLB configuration. "
263                         "Will flush I/D separately (could be optimized).\n");
264 
265                 split_tlb = 1;
266         }
267 
268         /* "New and Improved" version from Jim Hull 
269          *      (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
270          * The following CAFL_STRIDE is an optimized version, see
271          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
272          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
273          */
274 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
275         dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
276         icache_stride = CAFL_STRIDE(cache_info.ic_conf);
277 #undef CAFL_STRIDE
278 
279         /* stride needs to be non-zero, otherwise cache flushes will not work */
280         WARN_ON(cache_info.dc_size && dcache_stride == 0);
281         WARN_ON(cache_info.ic_size && icache_stride == 0);
282 
283         if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
284                                                 PDC_MODEL_NVA_UNSUPPORTED) {
285                 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
286 #if 0
287                 panic("SMP kernel required to avoid non-equivalent aliasing");
288 #endif
289         }
290 }
291 
292 void disable_sr_hashing(void)
293 {
294         int srhash_type, retval;
295         unsigned long space_bits;
296 
297         switch (boot_cpu_data.cpu_type) {
298         case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
299                 BUG();
300                 return;
301 
302         case pcxs:
303         case pcxt:
304         case pcxt_:
305                 srhash_type = SRHASH_PCXST;
306                 break;
307 
308         case pcxl:
309                 srhash_type = SRHASH_PCXL;
310                 break;
311 
312         case pcxl2: /* pcxl2 doesn't support space register hashing */
313                 return;
314 
315         default: /* Currently all PA2.0 machines use the same ins. sequence */
316                 srhash_type = SRHASH_PA20;
317                 break;
318         }
319 
320         disable_sr_hashing_asm(srhash_type);
321 
322         retval = pdc_spaceid_bits(&space_bits);
323         /* If this procedure isn't implemented, don't panic. */
324         if (retval < 0 && retval != PDC_BAD_OPTION)
325                 panic("pdc_spaceid_bits call failed.\n");
326         if (space_bits != 0)
327                 panic("SpaceID hashing is still on!\n");
328 }
329 
330 static inline void
331 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
332                    unsigned long physaddr)
333 {
334         if (!static_branch_likely(&parisc_has_cache))
335                 return;
336 
337         /*
338          * The TLB is the engine of coherence on parisc.  The CPU is
339          * entitled to speculate any page with a TLB mapping, so here
340          * we kill the mapping then flush the page along a special flush
341          * only alias mapping. This guarantees that the page is no-longer
342          * in the cache for any process and nor may it be speculatively
343          * read in (until the user or kernel specifically accesses it,
344          * of course).
345          */
346         flush_tlb_page(vma, vmaddr);
347 
348         preempt_disable();
349         flush_dcache_page_asm(physaddr, vmaddr);
350         if (vma->vm_flags & VM_EXEC)
351                 flush_icache_page_asm(physaddr, vmaddr);
352         preempt_enable();
353 }
354 
355 static void flush_kernel_dcache_page_addr(const void *addr)
356 {
357         unsigned long vaddr = (unsigned long)addr;
358         unsigned long flags;
359 
360         /* Purge TLB entry to remove translation on all CPUs */
361         purge_tlb_start(flags);
362         pdtlb(SR_KERNEL, addr);
363         purge_tlb_end(flags);
364 
365         /* Use tmpalias flush to prevent data cache move-in */
366         preempt_disable();
367         flush_dcache_page_asm(__pa(vaddr), vaddr);
368         preempt_enable();
369 }
370 
371 static void flush_kernel_icache_page_addr(const void *addr)
372 {
373         unsigned long vaddr = (unsigned long)addr;
374         unsigned long flags;
375 
376         /* Purge TLB entry to remove translation on all CPUs */
377         purge_tlb_start(flags);
378         pdtlb(SR_KERNEL, addr);
379         purge_tlb_end(flags);
380 
381         /* Use tmpalias flush to prevent instruction cache move-in */
382         preempt_disable();
383         flush_icache_page_asm(__pa(vaddr), vaddr);
384         preempt_enable();
385 }
386 
387 void kunmap_flush_on_unmap(const void *addr)
388 {
389         flush_kernel_dcache_page_addr(addr);
390 }
391 EXPORT_SYMBOL(kunmap_flush_on_unmap);
392 
393 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
394                 unsigned int nr)
395 {
396         void *kaddr = page_address(page);
397 
398         for (;;) {
399                 flush_kernel_dcache_page_addr(kaddr);
400                 flush_kernel_icache_page_addr(kaddr);
401                 if (--nr == 0)
402                         break;
403                 kaddr += PAGE_SIZE;
404         }
405 }
406 
407 /*
408  * Walk page directory for MM to find PTEP pointer for address ADDR.
409  */
410 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
411 {
412         pte_t *ptep = NULL;
413         pgd_t *pgd = mm->pgd;
414         p4d_t *p4d;
415         pud_t *pud;
416         pmd_t *pmd;
417 
418         if (!pgd_none(*pgd)) {
419                 p4d = p4d_offset(pgd, addr);
420                 if (!p4d_none(*p4d)) {
421                         pud = pud_offset(p4d, addr);
422                         if (!pud_none(*pud)) {
423                                 pmd = pmd_offset(pud, addr);
424                                 if (!pmd_none(*pmd))
425                                         ptep = pte_offset_map(pmd, addr);
426                         }
427                 }
428         }
429         return ptep;
430 }
431 
432 static inline bool pte_needs_flush(pte_t pte)
433 {
434         return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
435                 == (_PAGE_PRESENT | _PAGE_ACCESSED);
436 }
437 
438 /*
439  * Return user physical address. Returns 0 if page is not present.
440  */
441 static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
442 {
443         unsigned long flags, space, pgd, prot, pa;
444 #ifdef CONFIG_TLB_PTLOCK
445         unsigned long pgd_lock;
446 #endif
447 
448         /* Save context */
449         local_irq_save(flags);
450         prot = mfctl(8);
451         space = mfsp(SR_USER);
452         pgd = mfctl(25);
453 #ifdef CONFIG_TLB_PTLOCK
454         pgd_lock = mfctl(28);
455 #endif
456 
457         /* Set context for lpa_user */
458         switch_mm_irqs_off(NULL, mm, NULL);
459         pa = lpa_user(addr);
460 
461         /* Restore previous context */
462 #ifdef CONFIG_TLB_PTLOCK
463         mtctl(pgd_lock, 28);
464 #endif
465         mtctl(pgd, 25);
466         mtsp(space, SR_USER);
467         mtctl(prot, 8);
468         local_irq_restore(flags);
469 
470         return pa;
471 }
472 
473 void flush_dcache_folio(struct folio *folio)
474 {
475         struct address_space *mapping = folio_flush_mapping(folio);
476         struct vm_area_struct *vma;
477         unsigned long addr, old_addr = 0;
478         void *kaddr;
479         unsigned long count = 0;
480         unsigned long i, nr, flags;
481         pgoff_t pgoff;
482 
483         if (mapping && !mapping_mapped(mapping)) {
484                 set_bit(PG_dcache_dirty, &folio->flags);
485                 return;
486         }
487 
488         nr = folio_nr_pages(folio);
489         kaddr = folio_address(folio);
490         for (i = 0; i < nr; i++)
491                 flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
492 
493         if (!mapping)
494                 return;
495 
496         pgoff = folio->index;
497 
498         /*
499          * We have carefully arranged in arch_get_unmapped_area() that
500          * *any* mappings of a file are always congruently mapped (whether
501          * declared as MAP_PRIVATE or MAP_SHARED), so we only need
502          * to flush one address here for them all to become coherent
503          * on machines that support equivalent aliasing
504          */
505         flush_dcache_mmap_lock_irqsave(mapping, flags);
506         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
507                 unsigned long offset = pgoff - vma->vm_pgoff;
508                 unsigned long pfn = folio_pfn(folio);
509 
510                 addr = vma->vm_start;
511                 nr = folio_nr_pages(folio);
512                 if (offset > -nr) {
513                         pfn -= offset;
514                         nr += offset;
515                 } else {
516                         addr += offset * PAGE_SIZE;
517                 }
518                 if (addr + nr * PAGE_SIZE > vma->vm_end)
519                         nr = (vma->vm_end - addr) / PAGE_SIZE;
520 
521                 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
522                                         != (addr & (SHM_COLOUR - 1))) {
523                         for (i = 0; i < nr; i++)
524                                 __flush_cache_page(vma,
525                                         addr + i * PAGE_SIZE,
526                                         (pfn + i) * PAGE_SIZE);
527                         /*
528                          * Software is allowed to have any number
529                          * of private mappings to a page.
530                          */
531                         if (!(vma->vm_flags & VM_SHARED))
532                                 continue;
533                         if (old_addr)
534                                 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
535                                         old_addr, addr, vma->vm_file);
536                         if (nr == folio_nr_pages(folio))
537                                 old_addr = addr;
538                 }
539                 WARN_ON(++count == 4096);
540         }
541         flush_dcache_mmap_unlock_irqrestore(mapping, flags);
542 }
543 EXPORT_SYMBOL(flush_dcache_folio);
544 
545 /* Defined in arch/parisc/kernel/pacache.S */
546 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
547 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
548 
549 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
550 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
551 
552 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
553 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
554 
555 void __init parisc_setup_cache_timing(void)
556 {
557         unsigned long rangetime, alltime;
558         unsigned long size;
559         unsigned long threshold, threshold2;
560 
561         alltime = mfctl(16);
562         flush_data_cache();
563         alltime = mfctl(16) - alltime;
564 
565         size = (unsigned long)(_end - _text);
566         rangetime = mfctl(16);
567         flush_kernel_dcache_range((unsigned long)_text, size);
568         rangetime = mfctl(16) - rangetime;
569 
570         printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
571                 alltime, size, rangetime);
572 
573         threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
574         pr_info("Calculated flush threshold is %lu KiB\n",
575                 threshold/1024);
576 
577         /*
578          * The threshold computed above isn't very reliable. The following
579          * heuristic works reasonably well on c8000/rp3440.
580          */
581         threshold2 = cache_info.dc_size * num_online_cpus();
582         parisc_cache_flush_threshold = threshold2;
583         printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
584                 parisc_cache_flush_threshold/1024);
585 
586         /* calculate TLB flush threshold */
587 
588         /* On SMP machines, skip the TLB measure of kernel text which
589          * has been mapped as huge pages. */
590         if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
591                 threshold = max(cache_info.it_size, cache_info.dt_size);
592                 threshold *= PAGE_SIZE;
593                 threshold /= num_online_cpus();
594                 goto set_tlb_threshold;
595         }
596 
597         size = (unsigned long)_end - (unsigned long)_text;
598         rangetime = mfctl(16);
599         flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
600         rangetime = mfctl(16) - rangetime;
601 
602         alltime = mfctl(16);
603         flush_tlb_all();
604         alltime = mfctl(16) - alltime;
605 
606         printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
607                 alltime, size, rangetime);
608 
609         threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
610         printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
611                 threshold/1024);
612 
613 set_tlb_threshold:
614         parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD);
615         printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
616                 parisc_tlb_flush_threshold/1024);
617 }
618 
619 extern void purge_kernel_dcache_page_asm(unsigned long);
620 extern void clear_user_page_asm(void *, unsigned long);
621 extern void copy_user_page_asm(void *, void *, unsigned long);
622 
623 static void flush_cache_page_if_present(struct vm_area_struct *vma,
624         unsigned long vmaddr)
625 {
626 #if CONFIG_FLUSH_PAGE_ACCESSED
627         bool needs_flush = false;
628         pte_t *ptep, pte;
629 
630         ptep = get_ptep(vma->vm_mm, vmaddr);
631         if (ptep) {
632                 pte = ptep_get(ptep);
633                 needs_flush = pte_needs_flush(pte);
634                 pte_unmap(ptep);
635         }
636         if (needs_flush)
637                 __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
638 #else
639         struct mm_struct *mm = vma->vm_mm;
640         unsigned long physaddr = get_upa(mm, vmaddr);
641 
642         if (physaddr)
643                 __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
644 #endif
645 }
646 
647 void copy_user_highpage(struct page *to, struct page *from,
648         unsigned long vaddr, struct vm_area_struct *vma)
649 {
650         void *kto, *kfrom;
651 
652         kfrom = kmap_local_page(from);
653         kto = kmap_local_page(to);
654         __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
655         copy_page_asm(kto, kfrom);
656         kunmap_local(kto);
657         kunmap_local(kfrom);
658 }
659 
660 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
661                 unsigned long user_vaddr, void *dst, void *src, int len)
662 {
663         __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
664         memcpy(dst, src, len);
665         flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
666 }
667 
668 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
669                 unsigned long user_vaddr, void *dst, void *src, int len)
670 {
671         __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
672         memcpy(dst, src, len);
673         flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
674 }
675 
676 /* __flush_tlb_range()
677  *
678  * returns 1 if all TLBs were flushed.
679  */
680 int __flush_tlb_range(unsigned long sid, unsigned long start,
681                       unsigned long end)
682 {
683         unsigned long flags;
684 
685         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
686             end - start >= parisc_tlb_flush_threshold) {
687                 flush_tlb_all();
688                 return 1;
689         }
690 
691         /* Purge TLB entries for small ranges using the pdtlb and
692            pitlb instructions.  These instructions execute locally
693            but cause a purge request to be broadcast to other TLBs.  */
694         while (start < end) {
695                 purge_tlb_start(flags);
696                 mtsp(sid, SR_TEMP1);
697                 pdtlb(SR_TEMP1, start);
698                 pitlb(SR_TEMP1, start);
699                 purge_tlb_end(flags);
700                 start += PAGE_SIZE;
701         }
702         return 0;
703 }
704 
705 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
706 {
707         unsigned long addr;
708 
709         for (addr = start; addr < end; addr += PAGE_SIZE)
710                 flush_cache_page_if_present(vma, addr);
711 }
712 
713 static inline unsigned long mm_total_size(struct mm_struct *mm)
714 {
715         struct vm_area_struct *vma;
716         unsigned long usize = 0;
717         VMA_ITERATOR(vmi, mm, 0);
718 
719         for_each_vma(vmi, vma) {
720                 if (usize >= parisc_cache_flush_threshold)
721                         break;
722                 usize += vma->vm_end - vma->vm_start;
723         }
724         return usize;
725 }
726 
727 void flush_cache_mm(struct mm_struct *mm)
728 {
729         struct vm_area_struct *vma;
730         VMA_ITERATOR(vmi, mm, 0);
731 
732         /*
733          * Flushing the whole cache on each cpu takes forever on
734          * rp3440, etc. So, avoid it if the mm isn't too big.
735          *
736          * Note that we must flush the entire cache on machines
737          * with aliasing caches to prevent random segmentation
738          * faults.
739          */
740         if (!parisc_requires_coherency()
741             ||  mm_total_size(mm) >= parisc_cache_flush_threshold) {
742                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
743                         return;
744                 flush_tlb_all();
745                 flush_cache_all();
746                 return;
747         }
748 
749         /* Flush mm */
750         for_each_vma(vmi, vma)
751                 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
752 }
753 
754 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
755 {
756         if (!parisc_requires_coherency()
757             || end - start >= parisc_cache_flush_threshold) {
758                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
759                         return;
760                 flush_tlb_range(vma, start, end);
761                 if (vma->vm_flags & VM_EXEC)
762                         flush_cache_all();
763                 else
764                         flush_data_cache();
765                 return;
766         }
767 
768         flush_cache_pages(vma, start & PAGE_MASK, end);
769 }
770 
771 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
772 {
773         __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
774 }
775 
776 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
777 {
778         if (!PageAnon(page))
779                 return;
780 
781         __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
782 }
783 
784 int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
785                            pte_t *ptep)
786 {
787         pte_t pte = ptep_get(ptep);
788 
789         if (!pte_young(pte))
790                 return 0;
791         set_pte(ptep, pte_mkold(pte));
792 #if CONFIG_FLUSH_PAGE_ACCESSED
793         __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
794 #endif
795         return 1;
796 }
797 
798 /*
799  * After a PTE is cleared, we have no way to flush the cache for
800  * the physical page. On PA8800 and PA8900 processors, these lines
801  * can cause random cache corruption. Thus, we must flush the cache
802  * as well as the TLB when clearing a PTE that's valid.
803  */
804 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
805                        pte_t *ptep)
806 {
807         struct mm_struct *mm = (vma)->vm_mm;
808         pte_t pte = ptep_get_and_clear(mm, addr, ptep);
809         unsigned long pfn = pte_pfn(pte);
810 
811         if (pfn_valid(pfn))
812                 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
813         else if (pte_accessible(mm, pte))
814                 flush_tlb_page(vma, addr);
815 
816         return pte;
817 }
818 
819 /*
820  * The physical address for pages in the ioremap case can be obtained
821  * from the vm_struct struct. I wasn't able to successfully handle the
822  * vmalloc and vmap cases. We have an array of struct page pointers in
823  * the uninitialized vmalloc case but the flush failed using page_to_pfn.
824  */
825 void flush_cache_vmap(unsigned long start, unsigned long end)
826 {
827         unsigned long addr, physaddr;
828         struct vm_struct *vm;
829 
830         /* Prevent cache move-in */
831         flush_tlb_kernel_range(start, end);
832 
833         if (end - start >= parisc_cache_flush_threshold) {
834                 flush_cache_all();
835                 return;
836         }
837 
838         if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
839                 flush_cache_all();
840                 return;
841         }
842 
843         vm = find_vm_area((void *)start);
844         if (WARN_ON_ONCE(!vm)) {
845                 flush_cache_all();
846                 return;
847         }
848 
849         /* The physical addresses of IOREMAP regions are contiguous */
850         if (vm->flags & VM_IOREMAP) {
851                 physaddr = vm->phys_addr;
852                 for (addr = start; addr < end; addr += PAGE_SIZE) {
853                         preempt_disable();
854                         flush_dcache_page_asm(physaddr, start);
855                         flush_icache_page_asm(physaddr, start);
856                         preempt_enable();
857                         physaddr += PAGE_SIZE;
858                 }
859                 return;
860         }
861 
862         flush_cache_all();
863 }
864 EXPORT_SYMBOL(flush_cache_vmap);
865 
866 /*
867  * The vm_struct has been retired and the page table is set up. The
868  * last page in the range is a guard page. Its physical address can't
869  * be determined using lpa, so there is no way to flush the range
870  * using flush_dcache_page_asm.
871  */
872 void flush_cache_vunmap(unsigned long start, unsigned long end)
873 {
874         /* Prevent cache move-in */
875         flush_tlb_kernel_range(start, end);
876         flush_data_cache();
877 }
878 EXPORT_SYMBOL(flush_cache_vunmap);
879 
880 /*
881  * On systems with PA8800/PA8900 processors, there is no way to flush
882  * a vmap range other than using the architected loop to flush the
883  * entire cache. The page directory is not set up, so we can't use
884  * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
885  * L2 is physically indexed but FDCE/FICE instructions in virtual
886  * mode output their virtual address on the core bus, not their
887  * real address. As a result, the L2 cache index formed from the
888  * virtual address will most likely not be the same as the L2 index
889  * formed from the real address.
890  */
891 void flush_kernel_vmap_range(void *vaddr, int size)
892 {
893         unsigned long start = (unsigned long)vaddr;
894         unsigned long end = start + size;
895 
896         flush_tlb_kernel_range(start, end);
897 
898         if (!static_branch_likely(&parisc_has_dcache))
899                 return;
900 
901         /* If interrupts are disabled, we can only do local flush */
902         if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
903                 flush_data_cache_local(NULL);
904                 return;
905         }
906 
907         flush_data_cache();
908 }
909 EXPORT_SYMBOL(flush_kernel_vmap_range);
910 
911 void invalidate_kernel_vmap_range(void *vaddr, int size)
912 {
913         unsigned long start = (unsigned long)vaddr;
914         unsigned long end = start + size;
915 
916         /* Ensure DMA is complete */
917         asm_syncdma();
918 
919         flush_tlb_kernel_range(start, end);
920 
921         if (!static_branch_likely(&parisc_has_dcache))
922                 return;
923 
924         /* If interrupts are disabled, we can only do local flush */
925         if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
926                 flush_data_cache_local(NULL);
927                 return;
928         }
929 
930         flush_data_cache();
931 }
932 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
933 
934 
935 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
936         unsigned int, cache)
937 {
938         unsigned long start, end;
939         ASM_EXCEPTIONTABLE_VAR(error);
940 
941         if (bytes == 0)
942                 return 0;
943         if (!access_ok((void __user *) addr, bytes))
944                 return -EFAULT;
945 
946         end = addr + bytes;
947 
948         if (cache & DCACHE) {
949                 start = addr;
950                 __asm__ __volatile__ (
951 #ifdef CONFIG_64BIT
952                         "1: cmpb,*<<,n  %0,%2,1b\n"
953 #else
954                         "1: cmpb,<<,n   %0,%2,1b\n"
955 #endif
956                         "   fic,m       %3(%4,%0)\n"
957                         "2: sync\n"
958                         ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
959                         : "+r" (start), "+r" (error)
960                         : "r" (end), "r" (dcache_stride), "i" (SR_USER));
961         }
962 
963         if (cache & ICACHE && error == 0) {
964                 start = addr;
965                 __asm__ __volatile__ (
966 #ifdef CONFIG_64BIT
967                         "1: cmpb,*<<,n  %0,%2,1b\n"
968 #else
969                         "1: cmpb,<<,n   %0,%2,1b\n"
970 #endif
971                         "   fdc,m       %3(%4,%0)\n"
972                         "2: sync\n"
973                         ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
974                         : "+r" (start), "+r" (error)
975                         : "r" (end), "r" (icache_stride), "i" (SR_USER));
976         }
977 
978         return error;
979 }
980 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php