1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __M68K_MMU_CONTEXT_H 3 #define __M68K_MMU_CONTEXT_H 4 5 #include <asm-generic/mm_hooks.h> 6 #include <linux/mm_types.h> 7 8 #ifdef CONFIG_MMU 9 10 #if defined(CONFIG_COLDFIRE) 11 12 #include <asm/atomic.h> 13 #include <asm/bitops.h> 14 #include <asm/mcfmmu.h> 15 #include <asm/mmu.h> 16 17 #define NO_CONTEXT 256 18 #define LAST_CONTEXT 255 19 #define FIRST_CONTEXT 1 20 21 extern unsigned long context_map[]; 22 extern mm_context_t next_mmu_context; 23 24 extern atomic_t nr_free_contexts; 25 extern struct mm_struct *context_mm[LAST_CONTEXT+1]; 26 extern void steal_context(void); 27 28 static inline void get_mmu_context(struct mm_struct *mm) 29 { 30 mm_context_t ctx; 31 32 if (mm->context != NO_CONTEXT) 33 return; 34 while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) { 35 atomic_inc(&nr_free_contexts); 36 steal_context(); 37 } 38 ctx = next_mmu_context; 39 while (test_and_set_bit(ctx, context_map)) { 40 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); 41 if (ctx > LAST_CONTEXT) 42 ctx = 0; 43 } 44 next_mmu_context = (ctx + 1) & LAST_CONTEXT; 45 mm->context = ctx; 46 context_mm[ctx] = mm; 47 } 48 49 /* 50 * Set up the context for a new address space. 51 */ 52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) 53 54 /* 55 * We're finished using the context for an address space. 56 */ 57 #define destroy_context destroy_context 58 static inline void destroy_context(struct mm_struct *mm) 59 { 60 if (mm->context != NO_CONTEXT) { 61 clear_bit(mm->context, context_map); 62 mm->context = NO_CONTEXT; 63 atomic_inc(&nr_free_contexts); 64 } 65 } 66 67 static inline void set_context(mm_context_t context, pgd_t *pgd) 68 { 69 __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context)); 70 } 71 72 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 73 struct task_struct *tsk) 74 { 75 get_mmu_context(tsk->mm); 76 set_context(tsk->mm->context, next->pgd); 77 } 78 79 /* 80 * After we have set current->mm to a new value, this activates 81 * the context for the new mm so we see the new mappings. 82 */ 83 #define activate_mm activate_mm 84 static inline void activate_mm(struct mm_struct *active_mm, 85 struct mm_struct *mm) 86 { 87 get_mmu_context(mm); 88 set_context(mm->context, mm->pgd); 89 } 90 91 #define prepare_arch_switch(next) load_ksp_mmu(next) 92 93 static inline void load_ksp_mmu(struct task_struct *task) 94 { 95 unsigned long flags; 96 struct mm_struct *mm; 97 int asid; 98 pgd_t *pgd; 99 p4d_t *p4d; 100 pud_t *pud; 101 pmd_t *pmd; 102 pte_t *pte = NULL; 103 unsigned long mmuar; 104 105 local_irq_save(flags); 106 mmuar = task->thread.ksp; 107 108 /* Search for a valid TLB entry, if one is found, don't remap */ 109 mmu_write(MMUAR, mmuar); 110 mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR); 111 if (mmu_read(MMUSR) & MMUSR_HIT) 112 goto end; 113 114 if (mmuar >= PAGE_OFFSET) { 115 mm = &init_mm; 116 } else { 117 pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm); 118 mm = task->mm; 119 } 120 121 if (!mm) 122 goto bug; 123 124 pgd = pgd_offset(mm, mmuar); 125 if (pgd_none(*pgd)) 126 goto bug; 127 128 p4d = p4d_offset(pgd, mmuar); 129 if (p4d_none(*p4d)) 130 goto bug; 131 132 pud = pud_offset(p4d, mmuar); 133 if (pud_none(*pud)) 134 goto bug; 135 136 pmd = pmd_offset(pud, mmuar); 137 if (pmd_none(*pmd)) 138 goto bug; 139 140 pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar) 141 : pte_offset_map(pmd, mmuar); 142 if (!pte || pte_none(*pte) || !pte_present(*pte)) 143 goto bug; 144 145 set_pte(pte, pte_mkyoung(*pte)); 146 asid = mm->context & 0xff; 147 if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET) 148 set_pte(pte, pte_wrprotect(*pte)); 149 150 mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | 151 (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK) 152 >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V); 153 154 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) | 155 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X); 156 157 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA); 158 159 goto end; 160 161 bug: 162 pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar); 163 end: 164 if (pte && mmuar < PAGE_OFFSET) 165 pte_unmap(pte); 166 local_irq_restore(flags); 167 } 168 169 #elif defined(CONFIG_SUN3) 170 #include <asm/sun3mmu.h> 171 #include <linux/sched.h> 172 173 extern unsigned long get_free_context(struct mm_struct *mm); 174 extern void clear_context(unsigned long context); 175 176 /* set the context for a new task to unmapped */ 177 #define init_new_context init_new_context 178 static inline int init_new_context(struct task_struct *tsk, 179 struct mm_struct *mm) 180 { 181 mm->context = SUN3_INVALID_CONTEXT; 182 return 0; 183 } 184 185 /* find the context given to this process, and if it hasn't already 186 got one, go get one for it. */ 187 static inline void get_mmu_context(struct mm_struct *mm) 188 { 189 if (mm->context == SUN3_INVALID_CONTEXT) 190 mm->context = get_free_context(mm); 191 } 192 193 /* flush context if allocated... */ 194 #define destroy_context destroy_context 195 static inline void destroy_context(struct mm_struct *mm) 196 { 197 if (mm->context != SUN3_INVALID_CONTEXT) 198 clear_context(mm->context); 199 } 200 201 static inline void activate_context(struct mm_struct *mm) 202 { 203 get_mmu_context(mm); 204 sun3_put_context(mm->context); 205 } 206 207 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 208 struct task_struct *tsk) 209 { 210 activate_context(tsk->mm); 211 } 212 213 #define activate_mm activate_mm 214 static inline void activate_mm(struct mm_struct *prev_mm, 215 struct mm_struct *next_mm) 216 { 217 activate_context(next_mm); 218 } 219 220 #else 221 222 #include <asm/setup.h> 223 #include <asm/page.h> 224 #include <asm/cacheflush.h> 225 226 #define init_new_context init_new_context 227 static inline int init_new_context(struct task_struct *tsk, 228 struct mm_struct *mm) 229 { 230 mm->context = virt_to_phys(mm->pgd); 231 return 0; 232 } 233 234 static inline void switch_mm_0230(struct mm_struct *mm) 235 { 236 unsigned long crp[2] = { 237 0x80000000 | _PAGE_TABLE, mm->context 238 }; 239 unsigned long tmp; 240 241 asm volatile (".chip 68030"); 242 243 /* flush MC68030/MC68020 caches (they are virtually addressed) */ 244 asm volatile ( 245 "movec %%cacr,%0;" 246 "orw %1,%0; " 247 "movec %0,%%cacr" 248 : "=d" (tmp) : "di" (FLUSH_I_AND_D)); 249 250 /* Switch the root pointer. For a 030-only kernel, 251 * avoid flushing the whole ATC, we only need to 252 * flush the user entries. The 68851 does this by 253 * itself. Avoid a runtime check here. 254 */ 255 asm volatile ( 256 #ifdef CPU_M68030_ONLY 257 "pmovefd %0,%%crp; " 258 "pflush #0,#4" 259 #else 260 "pmove %0,%%crp" 261 #endif 262 : : "m" (crp[0])); 263 264 asm volatile (".chip 68k"); 265 } 266 267 static inline void switch_mm_0460(struct mm_struct *mm) 268 { 269 asm volatile (".chip 68040"); 270 271 /* flush address translation cache (user entries) */ 272 asm volatile ("pflushan"); 273 274 /* switch the root pointer */ 275 asm volatile ("movec %0,%%urp" : : "r" (mm->context)); 276 277 if (CPU_IS_060) { 278 unsigned long tmp; 279 280 /* clear user entries in the branch cache */ 281 asm volatile ( 282 "movec %%cacr,%0; " 283 "orl %1,%0; " 284 "movec %0,%%cacr" 285 : "=d" (tmp): "di" (0x00200000)); 286 } 287 288 asm volatile (".chip 68k"); 289 } 290 291 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) 292 { 293 if (prev != next) { 294 if (CPU_IS_020_OR_030) 295 switch_mm_0230(next); 296 else 297 switch_mm_0460(next); 298 } 299 } 300 301 #define activate_mm activate_mm 302 static inline void activate_mm(struct mm_struct *prev_mm, 303 struct mm_struct *next_mm) 304 { 305 next_mm->context = virt_to_phys(next_mm->pgd); 306 307 if (CPU_IS_020_OR_030) 308 switch_mm_0230(next_mm); 309 else 310 switch_mm_0460(next_mm); 311 } 312 313 #endif 314 315 #include <asm-generic/mmu_context.h> 316 317 #else /* !CONFIG_MMU */ 318 319 #include <asm-generic/nommu_context.h> 320 321 #endif /* CONFIG_MMU */ 322 #endif /* __M68K_MMU_CONTEXT_H */ 323
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.