~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/mm/pgtable.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #include <asm/pgalloc.h>
  4 #include <linux/gfp.h>
  5 #include <linux/kernel.h>
  6 #include <linux/pgtable.h>
  7 
  8 int ptep_set_access_flags(struct vm_area_struct *vma,
  9                           unsigned long address, pte_t *ptep,
 10                           pte_t entry, int dirty)
 11 {
 12         if (!pte_same(ptep_get(ptep), entry))
 13                 __set_pte_at(vma->vm_mm, ptep, entry);
 14         /*
 15          * update_mmu_cache will unconditionally execute, handling both
 16          * the case that the PTE changed and the spurious fault case.
 17          */
 18         return true;
 19 }
 20 
 21 int ptep_test_and_clear_young(struct vm_area_struct *vma,
 22                               unsigned long address,
 23                               pte_t *ptep)
 24 {
 25         if (!pte_young(ptep_get(ptep)))
 26                 return 0;
 27         return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
 28 }
 29 EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
 30 
 31 #ifdef CONFIG_64BIT
 32 pud_t *pud_offset(p4d_t *p4d, unsigned long address)
 33 {
 34         if (pgtable_l4_enabled)
 35                 return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
 36 
 37         return (pud_t *)p4d;
 38 }
 39 
 40 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 41 {
 42         if (pgtable_l5_enabled)
 43                 return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
 44 
 45         return (p4d_t *)pgd;
 46 }
 47 #endif
 48 
 49 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 50 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
 51 {
 52         return 0;
 53 }
 54 
 55 void p4d_clear_huge(p4d_t *p4d)
 56 {
 57 }
 58 
 59 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
 60 {
 61         pud_t new_pud = pfn_pud(__phys_to_pfn(phys), prot);
 62 
 63         set_pud(pud, new_pud);
 64         return 1;
 65 }
 66 
 67 int pud_clear_huge(pud_t *pud)
 68 {
 69         if (!pud_leaf(pudp_get(pud)))
 70                 return 0;
 71         pud_clear(pud);
 72         return 1;
 73 }
 74 
 75 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 76 {
 77         pmd_t *pmd = pud_pgtable(pudp_get(pud));
 78         int i;
 79 
 80         pud_clear(pud);
 81 
 82         flush_tlb_kernel_range(addr, addr + PUD_SIZE);
 83 
 84         for (i = 0; i < PTRS_PER_PMD; i++) {
 85                 if (!pmd_none(pmd[i])) {
 86                         pte_t *pte = (pte_t *)pmd_page_vaddr(pmd[i]);
 87 
 88                         pte_free_kernel(NULL, pte);
 89                 }
 90         }
 91 
 92         pmd_free(NULL, pmd);
 93 
 94         return 1;
 95 }
 96 
 97 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
 98 {
 99         pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), prot);
100 
101         set_pmd(pmd, new_pmd);
102         return 1;
103 }
104 
105 int pmd_clear_huge(pmd_t *pmd)
106 {
107         if (!pmd_leaf(pmdp_get(pmd)))
108                 return 0;
109         pmd_clear(pmd);
110         return 1;
111 }
112 
113 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
114 {
115         pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
116 
117         pmd_clear(pmd);
118 
119         flush_tlb_kernel_range(addr, addr + PMD_SIZE);
120         pte_free_kernel(NULL, pte);
121         return 1;
122 }
123 
124 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
125 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
126 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
127                                         unsigned long address, pmd_t *pmdp)
128 {
129         pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
130 
131         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
132         VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
133         /*
134          * When leaf PTE entries (regular pages) are collapsed into a leaf
135          * PMD entry (huge page), a valid non-leaf PTE is converted into a
136          * valid leaf PTE at the level 1 page table.  Since the sfence.vma
137          * forms that specify an address only apply to leaf PTEs, we need a
138          * global flush here.  collapse_huge_page() assumes these flushes are
139          * eager, so just do the fence here.
140          */
141         flush_tlb_mm(vma->vm_mm);
142         return pmd;
143 }
144 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
145 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php