1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * OpenRISC cache.c !! 2 * This file is subject to the terms and conditions of the GNU General Public >> 3 * License. See the file "COPYING" in the main directory of this archive >> 4 * for more details. 4 * 5 * 5 * Linux architectural port borrowing liberall !! 6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) 6 * others. All original copyrights apply as p !! 7 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * declaration. << 8 * << 9 * Modifications for the OpenRISC architecture << 10 * Copyright (C) 2015 Jan Henrik Weinstock <ja << 11 */ 8 */ >> 9 #include <linux/fs.h> >> 10 #include <linux/fcntl.h> >> 11 #include <linux/kernel.h> >> 12 #include <linux/linkage.h> >> 13 #include <linux/export.h> >> 14 #include <linux/sched.h> >> 15 #include <linux/syscalls.h> >> 16 #include <linux/mm.h> >> 17 #include <linux/highmem.h> >> 18 #include <linux/pagemap.h> 12 19 13 #include <asm/spr.h> !! 20 #include <asm/bcache.h> 14 #include <asm/spr_defs.h> << 15 #include <asm/cache.h> << 16 #include <asm/cacheflush.h> 21 #include <asm/cacheflush.h> 17 #include <asm/tlbflush.h> !! 22 #include <asm/processor.h> >> 23 #include <asm/cpu.h> >> 24 #include <asm/cpu-features.h> >> 25 #include <asm/setup.h> >> 26 #include <asm/pgtable.h> >> 27 >> 28 /* Cache operations. */ >> 29 void (*flush_cache_all)(void); >> 30 void (*__flush_cache_all)(void); >> 31 EXPORT_SYMBOL_GPL(__flush_cache_all); >> 32 void (*flush_cache_mm)(struct mm_struct *mm); >> 33 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, >> 34 unsigned long end); >> 35 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, >> 36 unsigned long pfn); >> 37 void (*flush_icache_range)(unsigned long start, unsigned long end); >> 38 EXPORT_SYMBOL_GPL(flush_icache_range); >> 39 void (*local_flush_icache_range)(unsigned long start, unsigned long end); >> 40 EXPORT_SYMBOL_GPL(local_flush_icache_range); >> 41 void (*__flush_icache_user_range)(unsigned long start, unsigned long end); >> 42 void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end); >> 43 EXPORT_SYMBOL_GPL(__local_flush_icache_user_range); >> 44 >> 45 void (*__flush_cache_vmap)(void); >> 46 void (*__flush_cache_vunmap)(void); >> 47 >> 48 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); >> 49 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); >> 50 >> 51 /* MIPS specific cache operations */ >> 52 void (*flush_data_cache_page)(unsigned long addr); >> 53 void (*flush_icache_all)(void); >> 54 >> 55 EXPORT_SYMBOL(flush_data_cache_page); >> 56 EXPORT_SYMBOL(flush_icache_all); >> 57 >> 58 /* >> 59 * Dummy cache handling routine >> 60 */ >> 61 >> 62 void cache_noop(void) {} >> 63 >> 64 #ifdef CONFIG_BOARD_SCACHE >> 65 >> 66 static struct bcache_ops no_sc_ops = { >> 67 .bc_enable = (void *)cache_noop, >> 68 .bc_disable = (void *)cache_noop, >> 69 .bc_wback_inv = (void *)cache_noop, >> 70 .bc_inv = (void *)cache_noop >> 71 }; >> 72 >> 73 struct bcache_ops *bcops = &no_sc_ops; >> 74 #endif >> 75 >> 76 #ifdef CONFIG_DMA_NONCOHERENT 18 77 19 static __always_inline void cache_loop(struct !! 78 /* DMA cache operations. */ >> 79 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); >> 80 void (*_dma_cache_wback)(unsigned long start, unsigned long size); >> 81 void (*_dma_cache_inv)(unsigned long start, unsigned long size); >> 82 >> 83 #endif /* CONFIG_DMA_NONCOHERENT */ >> 84 >> 85 /* >> 86 * We could optimize the case where the cache argument is not BCACHE but >> 87 * that seems very atypical use ... >> 88 */ >> 89 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, >> 90 unsigned int, cache) 20 { 91 { 21 unsigned long paddr = page_to_pfn(page !! 92 if (bytes == 0) 22 unsigned long line = paddr & ~(L1_CACH !! 93 return 0; >> 94 if (!access_ok((void __user *) addr, bytes)) >> 95 return -EFAULT; 23 96 24 while (line < paddr + PAGE_SIZE) { !! 97 __flush_icache_user_range(addr, addr + bytes); 25 mtspr(reg, line); !! 98 26 line += L1_CACHE_BYTES; !! 99 return 0; 27 } << 28 } 100 } 29 101 30 void local_dcache_page_flush(struct page *page !! 102 void __flush_dcache_pages(struct page *page, unsigned int nr) 31 { 103 { 32 cache_loop(page, SPR_DCBFR); !! 104 struct folio *folio = page_folio(page); >> 105 struct address_space *mapping = folio_flush_mapping(folio); >> 106 unsigned long addr; >> 107 unsigned int i; >> 108 >> 109 if (mapping && !mapping_mapped(mapping)) { >> 110 folio_set_dcache_dirty(folio); >> 111 return; >> 112 } >> 113 >> 114 /* >> 115 * We could delay the flush for the !folio_mapping case too. But that >> 116 * case is for exec env/arg pages and those are %99 certainly going to >> 117 * get faulted into the tlb (and thus flushed) anyways. >> 118 */ >> 119 for (i = 0; i < nr; i++) { >> 120 addr = (unsigned long)kmap_local_page(nth_page(page, i)); >> 121 flush_data_cache_page(addr); >> 122 kunmap_local((void *)addr); >> 123 } 33 } 124 } 34 EXPORT_SYMBOL(local_dcache_page_flush); !! 125 EXPORT_SYMBOL(__flush_dcache_pages); 35 126 36 void local_icache_page_inv(struct page *page) !! 127 void __flush_anon_page(struct page *page, unsigned long vmaddr) 37 { 128 { 38 cache_loop(page, SPR_ICBIR); !! 129 unsigned long addr = (unsigned long) page_address(page); >> 130 struct folio *folio = page_folio(page); >> 131 >> 132 if (pages_do_alias(addr, vmaddr)) { >> 133 if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) { >> 134 void *kaddr; >> 135 >> 136 kaddr = kmap_coherent(page, vmaddr); >> 137 flush_data_cache_page((unsigned long)kaddr); >> 138 kunmap_coherent(); >> 139 } else >> 140 flush_data_cache_page(addr); >> 141 } 39 } 142 } 40 EXPORT_SYMBOL(local_icache_page_inv); << 41 143 42 void update_cache(struct vm_area_struct *vma, !! 144 EXPORT_SYMBOL(__flush_anon_page); 43 pte_t *pte) !! 145 >> 146 void __update_cache(unsigned long address, pte_t pte) 44 { 147 { 45 unsigned long pfn = pte_val(*pte) >> P !! 148 struct folio *folio; 46 struct folio *folio = page_folio(pfn_t !! 149 unsigned long pfn, addr; 47 int dirty = !test_and_set_bit(PG_dc_cl !! 150 int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; >> 151 unsigned int i; >> 152 >> 153 pfn = pte_pfn(pte); >> 154 if (unlikely(!pfn_valid(pfn))) >> 155 return; >> 156 >> 157 folio = page_folio(pfn_to_page(pfn)); >> 158 address &= PAGE_MASK; >> 159 address -= offset_in_folio(folio, pfn << PAGE_SHIFT); >> 160 >> 161 if (folio_test_dcache_dirty(folio)) { >> 162 for (i = 0; i < folio_nr_pages(folio); i++) { >> 163 addr = (unsigned long)kmap_local_folio(folio, i); >> 164 >> 165 if (exec || pages_do_alias(addr, address)) >> 166 flush_data_cache_page(addr); >> 167 kunmap_local((void *)addr); >> 168 address += PAGE_SIZE; >> 169 } >> 170 folio_clear_dcache_dirty(folio); >> 171 } >> 172 } 48 173 49 /* !! 174 unsigned long _page_cachable_default; 50 * Since icaches do not snoop for upda !! 175 EXPORT_SYMBOL(_page_cachable_default); 51 * must write back and invalidate any << 52 * can skip data pages, since they wil << 53 */ << 54 if ((vma->vm_flags & VM_EXEC) && dirty << 55 unsigned int nr = folio_nr_pag << 56 176 57 while (nr--) !! 177 #define PM(p) __pgprot(_page_cachable_default | (p)) 58 sync_icache_dcache(fol !! 178 59 } !! 179 static pgprot_t protection_map[16] __ro_after_init; >> 180 DECLARE_VM_GET_PAGE_PROT >> 181 >> 182 static inline void setup_protection_map(void) >> 183 { >> 184 protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); >> 185 protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); >> 186 protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); >> 187 protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); >> 188 protection_map[4] = PM(_PAGE_PRESENT); >> 189 protection_map[5] = PM(_PAGE_PRESENT); >> 190 protection_map[6] = PM(_PAGE_PRESENT); >> 191 protection_map[7] = PM(_PAGE_PRESENT); >> 192 >> 193 protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); >> 194 protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); >> 195 protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | >> 196 _PAGE_NO_READ); >> 197 protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); >> 198 protection_map[12] = PM(_PAGE_PRESENT); >> 199 protection_map[13] = PM(_PAGE_PRESENT); >> 200 protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); >> 201 protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); 60 } 202 } 61 203 >> 204 #undef PM >> 205 >> 206 void cpu_cache_init(void) >> 207 { >> 208 if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache) >> 209 r3k_cache_init(); >> 210 if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache) >> 211 r4k_cache_init(); >> 212 >> 213 if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache) >> 214 octeon_cache_init(); >> 215 >> 216 setup_protection_map(); >> 217 } 62 218
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.