~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/pgalloc.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_X86_PGALLOC_H
  3 #define _ASM_X86_PGALLOC_H
  4 
  5 #include <linux/threads.h>
  6 #include <linux/mm.h>           /* for struct page */
  7 #include <linux/pagemap.h>
  8 
  9 #define __HAVE_ARCH_PTE_ALLOC_ONE
 10 #define __HAVE_ARCH_PGD_FREE
 11 #include <asm-generic/pgalloc.h>
 12 
 13 static inline int  __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
 14 
 15 #ifdef CONFIG_PARAVIRT_XXL
 16 #include <asm/paravirt.h>
 17 #else
 18 #define paravirt_pgd_alloc(mm)  __paravirt_pgd_alloc(mm)
 19 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
 20 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)  {}
 21 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)  {}
 22 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
 23                                             unsigned long start, unsigned long count) {}
 24 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)  {}
 25 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)  {}
 26 static inline void paravirt_release_pte(unsigned long pfn) {}
 27 static inline void paravirt_release_pmd(unsigned long pfn) {}
 28 static inline void paravirt_release_pud(unsigned long pfn) {}
 29 static inline void paravirt_release_p4d(unsigned long pfn) {}
 30 #endif
 31 
 32 /*
 33  * Flags to use when allocating a user page table page.
 34  */
 35 extern gfp_t __userpte_alloc_gfp;
 36 
 37 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
 38 /*
 39  * Instead of one PGD, we acquire two PGDs.  Being order-1, it is
 40  * both 8k in size and 8k-aligned.  That lets us just flip bit 12
 41  * in a pointer to swap between the two 4k halves.
 42  */
 43 #define PGD_ALLOCATION_ORDER 1
 44 #else
 45 #define PGD_ALLOCATION_ORDER 0
 46 #endif
 47 
 48 /*
 49  * Allocate and free page tables.
 50  */
 51 extern pgd_t *pgd_alloc(struct mm_struct *);
 52 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 53 
 54 extern pgtable_t pte_alloc_one(struct mm_struct *);
 55 
 56 extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
 57 
 58 static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
 59                                   unsigned long address)
 60 {
 61         ___pte_free_tlb(tlb, pte);
 62 }
 63 
 64 static inline void pmd_populate_kernel(struct mm_struct *mm,
 65                                        pmd_t *pmd, pte_t *pte)
 66 {
 67         paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
 68         set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
 69 }
 70 
 71 static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
 72                                        pmd_t *pmd, pte_t *pte)
 73 {
 74         paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
 75         set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
 76 }
 77 
 78 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 79                                 struct page *pte)
 80 {
 81         unsigned long pfn = page_to_pfn(pte);
 82 
 83         paravirt_alloc_pte(mm, pfn);
 84         set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
 85 }
 86 
 87 #if CONFIG_PGTABLE_LEVELS > 2
 88 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
 89 
 90 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 91                                   unsigned long address)
 92 {
 93         ___pmd_free_tlb(tlb, pmd);
 94 }
 95 
 96 #ifdef CONFIG_X86_PAE
 97 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
 98 #else   /* !CONFIG_X86_PAE */
 99 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
100 {
101         paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
102         set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
103 }
104 
105 static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
106 {
107         paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
108         set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
109 }
110 #endif  /* CONFIG_X86_PAE */
111 
112 #if CONFIG_PGTABLE_LEVELS > 3
113 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
114 {
115         paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
116         set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
117 }
118 
119 static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
120 {
121         paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
122         set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
123 }
124 
125 extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
126 
127 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
128                                   unsigned long address)
129 {
130         ___pud_free_tlb(tlb, pud);
131 }
132 
133 #if CONFIG_PGTABLE_LEVELS > 4
134 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
135 {
136         if (!pgtable_l5_enabled())
137                 return;
138         paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
139         set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
140 }
141 
142 static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
143 {
144         if (!pgtable_l5_enabled())
145                 return;
146         paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
147         set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
148 }
149 
150 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
151 {
152         gfp_t gfp = GFP_KERNEL_ACCOUNT;
153 
154         if (mm == &init_mm)
155                 gfp &= ~__GFP_ACCOUNT;
156         return (p4d_t *)get_zeroed_page(gfp);
157 }
158 
159 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
160 {
161         if (!pgtable_l5_enabled())
162                 return;
163 
164         BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
165         free_page((unsigned long)p4d);
166 }
167 
168 extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
169 
170 static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
171                                   unsigned long address)
172 {
173         if (pgtable_l5_enabled())
174                 ___p4d_free_tlb(tlb, p4d);
175 }
176 
177 #endif  /* CONFIG_PGTABLE_LEVELS > 4 */
178 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
179 #endif  /* CONFIG_PGTABLE_LEVELS > 2 */
180 
181 #endif /* _ASM_X86_PGALLOC_H */
182 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php