~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/pgtable.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * This file contains common routines for dealing with free of page tables
  4  * Along with common page table handling code
  5  *
  6  *  Derived from arch/powerpc/mm/tlb_64.c:
  7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8  *
  9  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 10  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 11  *    Copyright (C) 1996 Paul Mackerras
 12  *
 13  *  Derived from "arch/i386/mm/init.c"
 14  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 15  *
 16  *  Dave Engebretsen <engebret@us.ibm.com>
 17  *      Rework for PPC64 port.
 18  */
 19 
 20 #include <linux/kernel.h>
 21 #include <linux/gfp.h>
 22 #include <linux/mm.h>
 23 #include <linux/percpu.h>
 24 #include <linux/hardirq.h>
 25 #include <linux/hugetlb.h>
 26 #include <asm/tlbflush.h>
 27 #include <asm/tlb.h>
 28 #include <asm/hugetlb.h>
 29 #include <asm/pte-walk.h>
 30 
 31 #ifdef CONFIG_PPC64
 32 #define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
 33 #else
 34 #define PGD_ALIGN PAGE_SIZE
 35 #endif
 36 
 37 pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
 38 
 39 static inline int is_exec_fault(void)
 40 {
 41         return current->thread.regs && TRAP(current->thread.regs) == 0x400;
 42 }
 43 
 44 /* We only try to do i/d cache coherency on stuff that looks like
 45  * reasonably "normal" PTEs. We currently require a PTE to be present
 46  * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
 47  * on userspace PTEs
 48  */
 49 static inline int pte_looks_normal(pte_t pte, unsigned long addr)
 50 {
 51 
 52         if (pte_present(pte) && !pte_special(pte)) {
 53                 if (pte_ci(pte))
 54                         return 0;
 55                 if (!is_kernel_addr(addr))
 56                         return 1;
 57         }
 58         return 0;
 59 }
 60 
 61 static struct folio *maybe_pte_to_folio(pte_t pte)
 62 {
 63         unsigned long pfn = pte_pfn(pte);
 64         struct page *page;
 65 
 66         if (unlikely(!pfn_valid(pfn)))
 67                 return NULL;
 68         page = pfn_to_page(pfn);
 69         if (PageReserved(page))
 70                 return NULL;
 71         return page_folio(page);
 72 }
 73 
 74 #ifdef CONFIG_PPC_BOOK3S
 75 
 76 /* Server-style MMU handles coherency when hashing if HW exec permission
 77  * is supposed per page (currently 64-bit only). If not, then, we always
 78  * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
 79  * support falls into the same category.
 80  */
 81 
 82 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr)
 83 {
 84         pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
 85         if (pte_looks_normal(pte, addr) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
 86                                              cpu_has_feature(CPU_FTR_NOEXECUTE))) {
 87                 struct folio *folio = maybe_pte_to_folio(pte);
 88                 if (!folio)
 89                         return pte;
 90                 if (!test_bit(PG_dcache_clean, &folio->flags)) {
 91                         flush_dcache_icache_folio(folio);
 92                         set_bit(PG_dcache_clean, &folio->flags);
 93                 }
 94         }
 95         return pte;
 96 }
 97 
 98 #else /* CONFIG_PPC_BOOK3S */
 99 
100 static pte_t set_pte_filter_hash(pte_t pte, unsigned long addr) { return pte; }
101 
102 #endif /* CONFIG_PPC_BOOK3S */
103 
104 /* Embedded type MMU with HW exec support. This is a bit more complicated
105  * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
106  * instead we "filter out" the exec permission for non clean pages.
107  *
108  * This is also called once for the folio. So only work with folio->flags here.
109  */
110 static inline pte_t set_pte_filter(pte_t pte, unsigned long addr)
111 {
112         struct folio *folio;
113 
114         if (radix_enabled())
115                 return pte;
116 
117         if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
118                 return set_pte_filter_hash(pte, addr);
119 
120         /* No exec permission in the first place, move on */
121         if (!pte_exec(pte) || !pte_looks_normal(pte, addr))
122                 return pte;
123 
124         /* If you set _PAGE_EXEC on weird pages you're on your own */
125         folio = maybe_pte_to_folio(pte);
126         if (unlikely(!folio))
127                 return pte;
128 
129         /* If the page clean, we move on */
130         if (test_bit(PG_dcache_clean, &folio->flags))
131                 return pte;
132 
133         /* If it's an exec fault, we flush the cache and make it clean */
134         if (is_exec_fault()) {
135                 flush_dcache_icache_folio(folio);
136                 set_bit(PG_dcache_clean, &folio->flags);
137                 return pte;
138         }
139 
140         /* Else, we filter out _PAGE_EXEC */
141         return pte_exprotect(pte);
142 }
143 
144 static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
145                                      int dirty)
146 {
147         struct folio *folio;
148 
149         if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
150                 return pte;
151 
152         if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
153                 return pte;
154 
155         /* So here, we only care about exec faults, as we use them
156          * to recover lost _PAGE_EXEC and perform I$/D$ coherency
157          * if necessary. Also if _PAGE_EXEC is already set, same deal,
158          * we just bail out
159          */
160         if (dirty || pte_exec(pte) || !is_exec_fault())
161                 return pte;
162 
163 #ifdef CONFIG_DEBUG_VM
164         /* So this is an exec fault, _PAGE_EXEC is not set. If it was
165          * an error we would have bailed out earlier in do_page_fault()
166          * but let's make sure of it
167          */
168         if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
169                 return pte;
170 #endif /* CONFIG_DEBUG_VM */
171 
172         /* If you set _PAGE_EXEC on weird pages you're on your own */
173         folio = maybe_pte_to_folio(pte);
174         if (unlikely(!folio))
175                 goto bail;
176 
177         /* If the page is already clean, we move on */
178         if (test_bit(PG_dcache_clean, &folio->flags))
179                 goto bail;
180 
181         /* Clean the page and set PG_dcache_clean */
182         flush_dcache_icache_folio(folio);
183         set_bit(PG_dcache_clean, &folio->flags);
184 
185  bail:
186         return pte_mkexec(pte);
187 }
188 
189 /*
190  * set_pte stores a linux PTE into the linux page table.
191  */
192 void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
193                 pte_t pte, unsigned int nr)
194 {
195 
196         /* Note: mm->context.id might not yet have been assigned as
197          * this context might not have been activated yet when this
198          * is called. Filter the pte value and use the filtered value
199          * to setup all the ptes in the range.
200          */
201         pte = set_pte_filter(pte, addr);
202 
203         /*
204          * We don't need to call arch_enter/leave_lazy_mmu_mode()
205          * because we expect set_ptes to be only be used on not present
206          * and not hw_valid ptes. Hence there is no translation cache flush
207          * involved that need to be batched.
208          */
209         for (;;) {
210 
211                 /*
212                  * Make sure hardware valid bit is not set. We don't do
213                  * tlb flush for this update.
214                  */
215                 VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
216 
217                 /* Perform the setting of the PTE */
218                 __set_pte_at(mm, addr, ptep, pte, 0);
219                 if (--nr == 0)
220                         break;
221                 ptep++;
222                 addr += PAGE_SIZE;
223                 pte = pte_next_pfn(pte);
224         }
225 }
226 
227 void unmap_kernel_page(unsigned long va)
228 {
229         pmd_t *pmdp = pmd_off_k(va);
230         pte_t *ptep = pte_offset_kernel(pmdp, va);
231 
232         pte_clear(&init_mm, va, ptep);
233         flush_tlb_kernel_range(va, va + PAGE_SIZE);
234 }
235 
236 /*
237  * This is called when relaxing access to a PTE. It's also called in the page
238  * fault path when we don't hit any of the major fault cases, ie, a minor
239  * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
240  * handled those two for us, we additionally deal with missing execute
241  * permission here on some processors
242  */
243 int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
244                           pte_t *ptep, pte_t entry, int dirty)
245 {
246         int changed;
247         entry = set_access_flags_filter(entry, vma, dirty);
248         changed = !pte_same(*(ptep), entry);
249         if (changed) {
250                 assert_pte_locked(vma->vm_mm, address);
251                 __ptep_set_access_flags(vma, ptep, entry,
252                                         address, mmu_virtual_psize);
253         }
254         return changed;
255 }
256 
257 #ifdef CONFIG_HUGETLB_PAGE
258 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
259                                unsigned long addr, pte_t *ptep,
260                                pte_t pte, int dirty)
261 {
262 #ifdef HUGETLB_NEED_PRELOAD
263         /*
264          * The "return 1" forces a call of update_mmu_cache, which will write a
265          * TLB entry.  Without this, platforms that don't do a write of the TLB
266          * entry in the TLB miss handler asm will fault ad infinitum.
267          */
268         ptep_set_access_flags(vma, addr, ptep, pte, dirty);
269         return 1;
270 #else
271         int changed, psize;
272 
273         pte = set_access_flags_filter(pte, vma, dirty);
274         changed = !pte_same(*(ptep), pte);
275         if (changed) {
276 
277 #ifdef CONFIG_PPC_BOOK3S_64
278                 struct hstate *h = hstate_vma(vma);
279 
280                 psize = hstate_get_psize(h);
281 #ifdef CONFIG_DEBUG_VM
282                 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
283 #endif
284 
285 #else
286                 /*
287                  * Not used on non book3s64 platforms.
288                  * 8xx compares it with mmu_virtual_psize to
289                  * know if it is a huge page or not.
290                  */
291                 psize = MMU_PAGE_COUNT;
292 #endif
293                 __ptep_set_access_flags(vma, ptep, pte, addr, psize);
294         }
295         return changed;
296 #endif
297 }
298 
299 #if defined(CONFIG_PPC_8xx)
300 static void __set_huge_pte_at(pmd_t *pmd, pte_t *ptep, pte_basic_t val)
301 {
302         pte_basic_t *entry = (pte_basic_t *)ptep;
303         int num, i;
304 
305         /*
306          * Make sure hardware valid bit is not set. We don't do
307          * tlb flush for this update.
308          */
309         VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
310 
311         num = number_of_cells_per_pte(pmd, val, 1);
312 
313         for (i = 0; i < num; i++, entry++, val += SZ_4K)
314                 *entry = val;
315 }
316 
317 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
318                      pte_t pte, unsigned long sz)
319 {
320         pmd_t *pmdp = pmd_off(mm, addr);
321 
322         pte = set_pte_filter(pte, addr);
323 
324         if (sz == SZ_8M) { /* Flag both PMD entries as 8M and fill both page tables */
325                 *pmdp = __pmd(pmd_val(*pmdp) | _PMD_PAGE_8M);
326                 *(pmdp + 1) = __pmd(pmd_val(*(pmdp + 1)) | _PMD_PAGE_8M);
327 
328                 __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp, 0), pte_val(pte));
329                 __set_huge_pte_at(pmdp, pte_offset_kernel(pmdp + 1, 0), pte_val(pte) + SZ_4M);
330         } else {
331                 __set_huge_pte_at(pmdp, ptep, pte_val(pte));
332         }
333 }
334 #else
335 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
336                      pte_t pte, unsigned long sz)
337 {
338         unsigned long pdsize;
339         int i;
340 
341         pte = set_pte_filter(pte, addr);
342 
343         /*
344          * Make sure hardware valid bit is not set. We don't do
345          * tlb flush for this update.
346          */
347         VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
348 
349         if (sz < PMD_SIZE)
350                 pdsize = PAGE_SIZE;
351         else if (sz < PUD_SIZE)
352                 pdsize = PMD_SIZE;
353         else if (sz < P4D_SIZE)
354                 pdsize = PUD_SIZE;
355         else if (sz < PGDIR_SIZE)
356                 pdsize = P4D_SIZE;
357         else
358                 pdsize = PGDIR_SIZE;
359 
360         for (i = 0; i < sz / pdsize; i++, ptep++, addr += pdsize) {
361                 __set_pte_at(mm, addr, ptep, pte, 0);
362                 pte = __pte(pte_val(pte) + ((unsigned long long)pdsize / PAGE_SIZE << PFN_PTE_SHIFT));
363         }
364 }
365 #endif
366 #endif /* CONFIG_HUGETLB_PAGE */
367 
368 #ifdef CONFIG_DEBUG_VM
369 void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
370 {
371         pgd_t *pgd;
372         p4d_t *p4d;
373         pud_t *pud;
374         pmd_t *pmd;
375         pte_t *pte;
376         spinlock_t *ptl;
377 
378         if (mm == &init_mm)
379                 return;
380         pgd = mm->pgd + pgd_index(addr);
381         BUG_ON(pgd_none(*pgd));
382         p4d = p4d_offset(pgd, addr);
383         BUG_ON(p4d_none(*p4d));
384         pud = pud_offset(p4d, addr);
385         BUG_ON(pud_none(*pud));
386         pmd = pmd_offset(pud, addr);
387         /*
388          * khugepaged to collapse normal pages to hugepage, first set
389          * pmd to none to force page fault/gup to take mmap_lock. After
390          * pmd is set to none, we do a pte_clear which does this assertion
391          * so if we find pmd none, return.
392          */
393         if (pmd_none(*pmd))
394                 return;
395         pte = pte_offset_map_nolock(mm, pmd, addr, &ptl);
396         BUG_ON(!pte);
397         assert_spin_locked(ptl);
398         pte_unmap(pte);
399 }
400 #endif /* CONFIG_DEBUG_VM */
401 
402 unsigned long vmalloc_to_phys(void *va)
403 {
404         unsigned long pfn = vmalloc_to_pfn(va);
405 
406         BUG_ON(!pfn);
407         return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
408 }
409 EXPORT_SYMBOL_GPL(vmalloc_to_phys);
410 
411 /*
412  * We have 3 cases for pgds and pmds:
413  * (1) invalid (all zeroes)
414  * (2) pointer to next table, as normal; bottom 6 bits == 0
415  * (3) leaf pte for huge page _PAGE_PTE set
416  *
417  * So long as we atomically load page table pointers we are safe against teardown,
418  * we can follow the address down to the page and take a ref on it.
419  * This function need to be called with interrupts disabled. We use this variant
420  * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
421  */
422 pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
423                         bool *is_thp, unsigned *hpage_shift)
424 {
425         pgd_t *pgdp;
426 #ifdef CONFIG_PPC64
427         p4d_t p4d, *p4dp;
428         pud_t pud, *pudp;
429 #endif
430         pmd_t pmd, *pmdp;
431         pte_t *ret_pte;
432         unsigned pdshift;
433 
434         if (hpage_shift)
435                 *hpage_shift = 0;
436 
437         if (is_thp)
438                 *is_thp = false;
439 
440         /*
441          * Always operate on the local stack value. This make sure the
442          * value don't get updated by a parallel THP split/collapse,
443          * page fault or a page unmap. The return pte_t * is still not
444          * stable. So should be checked there for above conditions.
445          * Top level is an exception because it is folded into p4d.
446          *
447          * On PPC32, P4D/PUD/PMD are folded into PGD so go straight to
448          * PMD level.
449          */
450         pgdp = pgdir + pgd_index(ea);
451 #ifdef CONFIG_PPC64
452         p4dp = p4d_offset(pgdp, ea);
453         p4d  = READ_ONCE(*p4dp);
454         pdshift = P4D_SHIFT;
455 
456         if (p4d_none(p4d))
457                 return NULL;
458 
459         if (p4d_leaf(p4d)) {
460                 ret_pte = (pte_t *)p4dp;
461                 goto out;
462         }
463 
464         /*
465          * Even if we end up with an unmap, the pgtable will not
466          * be freed, because we do an rcu free and here we are
467          * irq disabled
468          */
469         pdshift = PUD_SHIFT;
470         pudp = pud_offset(&p4d, ea);
471         pud  = READ_ONCE(*pudp);
472 
473         if (pud_none(pud))
474                 return NULL;
475 
476         if (pud_leaf(pud)) {
477                 ret_pte = (pte_t *)pudp;
478                 goto out;
479         }
480 
481         pmdp = pmd_offset(&pud, ea);
482 #else
483         pmdp = pmd_offset(pud_offset(p4d_offset(pgdp, ea), ea), ea);
484 #endif
485         pdshift = PMD_SHIFT;
486         pmd  = READ_ONCE(*pmdp);
487 
488         /*
489          * A hugepage collapse is captured by this condition, see
490          * pmdp_collapse_flush.
491          */
492         if (pmd_none(pmd))
493                 return NULL;
494 
495 #ifdef CONFIG_PPC_BOOK3S_64
496         /*
497          * A hugepage split is captured by this condition, see
498          * pmdp_invalidate.
499          *
500          * Huge page modification can be caught here too.
501          */
502         if (pmd_is_serializing(pmd))
503                 return NULL;
504 #endif
505 
506         if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
507                 if (is_thp)
508                         *is_thp = true;
509                 ret_pte = (pte_t *)pmdp;
510                 goto out;
511         }
512 
513         if (pmd_leaf(pmd)) {
514                 ret_pte = (pte_t *)pmdp;
515                 goto out;
516         }
517 
518         return pte_offset_kernel(&pmd, ea);
519 
520 out:
521         if (hpage_shift)
522                 *hpage_shift = pdshift;
523         return ret_pte;
524 }
525 EXPORT_SYMBOL_GPL(__find_linux_pte);
526 
527 /* Note due to the way vm flags are laid out, the bits are XWR */
528 const pgprot_t protection_map[16] = {
529         [VM_NONE]                                       = PAGE_NONE,
530         [VM_READ]                                       = PAGE_READONLY,
531         [VM_WRITE]                                      = PAGE_COPY,
532         [VM_WRITE | VM_READ]                            = PAGE_COPY,
533         [VM_EXEC]                                       = PAGE_EXECONLY_X,
534         [VM_EXEC | VM_READ]                             = PAGE_READONLY_X,
535         [VM_EXEC | VM_WRITE]                            = PAGE_COPY_X,
536         [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_COPY_X,
537         [VM_SHARED]                                     = PAGE_NONE,
538         [VM_SHARED | VM_READ]                           = PAGE_READONLY,
539         [VM_SHARED | VM_WRITE]                          = PAGE_SHARED,
540         [VM_SHARED | VM_WRITE | VM_READ]                = PAGE_SHARED,
541         [VM_SHARED | VM_EXEC]                           = PAGE_EXECONLY_X,
542         [VM_SHARED | VM_EXEC | VM_READ]                 = PAGE_READONLY_X,
543         [VM_SHARED | VM_EXEC | VM_WRITE]                = PAGE_SHARED_X,
544         [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = PAGE_SHARED_X
545 };
546 
547 #ifndef CONFIG_PPC_BOOK3S_64
548 DECLARE_VM_GET_PAGE_PROT
549 #endif
550 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php