~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/um/include/asm/pgtable.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  4  * Copyright 2003 PathScale, Inc.
  5  * Derived from include/asm-i386/pgtable.h
  6  */
  7 
  8 #ifndef __UM_PGTABLE_H
  9 #define __UM_PGTABLE_H
 10 
 11 #include <asm/fixmap.h>
 12 
 13 #define _PAGE_PRESENT   0x001
 14 #define _PAGE_NEWPAGE   0x002
 15 #define _PAGE_NEWPROT   0x004
 16 #define _PAGE_RW        0x020
 17 #define _PAGE_USER      0x040
 18 #define _PAGE_ACCESSED  0x080
 19 #define _PAGE_DIRTY     0x100
 20 /* If _PAGE_PRESENT is clear, we use these: */
 21 #define _PAGE_PROTNONE  0x010   /* if the user mapped it with PROT_NONE;
 22                                    pte_present gives true */
 23 
 24 /* We borrow bit 10 to store the exclusive marker in swap PTEs. */
 25 #define _PAGE_SWP_EXCLUSIVE     0x400
 26 
 27 #ifdef CONFIG_3_LEVEL_PGTABLES
 28 #include <asm/pgtable-3level.h>
 29 #else
 30 #include <asm/pgtable-2level.h>
 31 #endif
 32 
 33 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 34 
 35 /* zero page used for uninitialized stuff */
 36 extern unsigned long *empty_zero_page;
 37 
 38 /* Just any arbitrary offset to the start of the vmalloc VM area: the
 39  * current 8MB value just means that there will be a 8MB "hole" after the
 40  * physical memory until the kernel virtual memory starts.  That means that
 41  * any out-of-bounds memory accesses will hopefully be caught.
 42  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 43  * area for the same reason. ;)
 44  */
 45 
 46 extern unsigned long end_iomem;
 47 
 48 #define VMALLOC_OFFSET  (__va_space)
 49 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 50 #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
 51 #define VMALLOC_END     (FIXADDR_START-2*PAGE_SIZE)
 52 #define MODULES_VADDR   VMALLOC_START
 53 #define MODULES_END     VMALLOC_END
 54 #define MODULES_LEN     (MODULES_VADDR - MODULES_END)
 55 
 56 #define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
 57 #define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
 58 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 59 #define __PAGE_KERNEL_EXEC                                              \
 60          (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 61 #define PAGE_NONE       __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 62 #define PAGE_SHARED     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
 63 #define PAGE_COPY       __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 64 #define PAGE_READONLY   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 65 #define PAGE_KERNEL     __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 66 #define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
 67 
 68 /*
 69  * The i386 can't do page protection for execute, and considers that the same
 70  * are read.
 71  * Also, write permissions imply read permissions. This is the closest we can
 72  * get..
 73  */
 74 
 75 /*
 76  * ZERO_PAGE is a global shared page that is always zero: used
 77  * for zero-mapped memory areas etc..
 78  */
 79 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
 80 
 81 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
 82 
 83 #define pmd_none(x)     (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
 84 #define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
 85 
 86 #define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
 87 #define pmd_clear(xp)   do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
 88 
 89 #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
 90 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
 91 
 92 #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
 93 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
 94 
 95 #define p4d_newpage(x)  (p4d_val(x) & _PAGE_NEWPAGE)
 96 #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
 97 
 98 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
 99 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
100 
101 #define pte_page(x) pfn_to_page(pte_pfn(x))
102 
103 #define pte_present(x)  pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
104 
105 /*
106  * =================================
107  * Flags checking section.
108  * =================================
109  */
110 
111 static inline int pte_none(pte_t pte)
112 {
113         return pte_is_zero(pte);
114 }
115 
116 /*
117  * The following only work if pte_present() is true.
118  * Undefined behaviour if not..
119  */
120 static inline int pte_read(pte_t pte)
121 {
122         return((pte_get_bits(pte, _PAGE_USER)) &&
123                !(pte_get_bits(pte, _PAGE_PROTNONE)));
124 }
125 
126 static inline int pte_exec(pte_t pte){
127         return((pte_get_bits(pte, _PAGE_USER)) &&
128                !(pte_get_bits(pte, _PAGE_PROTNONE)));
129 }
130 
131 static inline int pte_write(pte_t pte)
132 {
133         return((pte_get_bits(pte, _PAGE_RW)) &&
134                !(pte_get_bits(pte, _PAGE_PROTNONE)));
135 }
136 
137 static inline int pte_dirty(pte_t pte)
138 {
139         return pte_get_bits(pte, _PAGE_DIRTY);
140 }
141 
142 static inline int pte_young(pte_t pte)
143 {
144         return pte_get_bits(pte, _PAGE_ACCESSED);
145 }
146 
147 static inline int pte_newpage(pte_t pte)
148 {
149         return pte_get_bits(pte, _PAGE_NEWPAGE);
150 }
151 
152 static inline int pte_newprot(pte_t pte)
153 {
154         return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
155 }
156 
157 /*
158  * =================================
159  * Flags setting section.
160  * =================================
161  */
162 
163 static inline pte_t pte_mknewprot(pte_t pte)
164 {
165         pte_set_bits(pte, _PAGE_NEWPROT);
166         return(pte);
167 }
168 
169 static inline pte_t pte_mkclean(pte_t pte)
170 {
171         pte_clear_bits(pte, _PAGE_DIRTY);
172         return(pte);
173 }
174 
175 static inline pte_t pte_mkold(pte_t pte)
176 {
177         pte_clear_bits(pte, _PAGE_ACCESSED);
178         return(pte);
179 }
180 
181 static inline pte_t pte_wrprotect(pte_t pte)
182 {
183         if (likely(pte_get_bits(pte, _PAGE_RW)))
184                 pte_clear_bits(pte, _PAGE_RW);
185         else
186                 return pte;
187         return(pte_mknewprot(pte));
188 }
189 
190 static inline pte_t pte_mkread(pte_t pte)
191 {
192         if (unlikely(pte_get_bits(pte, _PAGE_USER)))
193                 return pte;
194         pte_set_bits(pte, _PAGE_USER);
195         return(pte_mknewprot(pte));
196 }
197 
198 static inline pte_t pte_mkdirty(pte_t pte)
199 {
200         pte_set_bits(pte, _PAGE_DIRTY);
201         return(pte);
202 }
203 
204 static inline pte_t pte_mkyoung(pte_t pte)
205 {
206         pte_set_bits(pte, _PAGE_ACCESSED);
207         return(pte);
208 }
209 
210 static inline pte_t pte_mkwrite_novma(pte_t pte)
211 {
212         if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
213                 return pte;
214         pte_set_bits(pte, _PAGE_RW);
215         return(pte_mknewprot(pte));
216 }
217 
218 static inline pte_t pte_mkuptodate(pte_t pte)
219 {
220         pte_clear_bits(pte, _PAGE_NEWPAGE);
221         if(pte_present(pte))
222                 pte_clear_bits(pte, _PAGE_NEWPROT);
223         return(pte);
224 }
225 
226 static inline pte_t pte_mknewpage(pte_t pte)
227 {
228         pte_set_bits(pte, _PAGE_NEWPAGE);
229         return(pte);
230 }
231 
232 static inline void set_pte(pte_t *pteptr, pte_t pteval)
233 {
234         pte_copy(*pteptr, pteval);
235 
236         /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
237          * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
238          * mapped pages.
239          */
240 
241         *pteptr = pte_mknewpage(*pteptr);
242         if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
243 }
244 
245 #define PFN_PTE_SHIFT           PAGE_SHIFT
246 
247 static inline void um_tlb_mark_sync(struct mm_struct *mm, unsigned long start,
248                                     unsigned long end)
249 {
250         if (!mm->context.sync_tlb_range_to) {
251                 mm->context.sync_tlb_range_from = start;
252                 mm->context.sync_tlb_range_to = end;
253         } else {
254                 if (start < mm->context.sync_tlb_range_from)
255                         mm->context.sync_tlb_range_from = start;
256                 if (end > mm->context.sync_tlb_range_to)
257                         mm->context.sync_tlb_range_to = end;
258         }
259 }
260 
261 #define set_ptes set_ptes
262 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
263                             pte_t *ptep, pte_t pte, int nr)
264 {
265         /* Basically the default implementation */
266         size_t length = nr * PAGE_SIZE;
267 
268         for (;;) {
269                 set_pte(ptep, pte);
270                 if (--nr == 0)
271                         break;
272                 ptep++;
273                 pte = __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
274         }
275 
276         um_tlb_mark_sync(mm, addr, addr + length);
277 }
278 
279 #define __HAVE_ARCH_PTE_SAME
280 static inline int pte_same(pte_t pte_a, pte_t pte_b)
281 {
282         return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
283 }
284 
285 /*
286  * Conversion functions: convert a page and protection to a page entry,
287  * and a page entry and page directory to the page they refer to.
288  */
289 
290 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
291 #define __virt_to_page(virt) phys_to_page(__pa(virt))
292 #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
293 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
294 
295 #define mk_pte(page, pgprot) \
296         ({ pte_t pte;                                   \
297                                                         \
298         pte_set_val(pte, page_to_phys(page), (pgprot)); \
299         if (pte_present(pte))                           \
300                 pte_mknewprot(pte_mknewpage(pte));      \
301         pte;})
302 
303 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
304 {
305         pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
306         return pte;
307 }
308 
309 /*
310  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
311  *
312  * this macro returns the index of the entry in the pmd page which would
313  * control the given virtual address
314  */
315 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
316 
317 struct mm_struct;
318 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
319 
320 #define update_mmu_cache(vma,address,ptep) do {} while (0)
321 #define update_mmu_cache_range(vmf, vma, address, ptep, nr) do {} while (0)
322 
323 /*
324  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
325  * are !pte_none() && !pte_present().
326  *
327  * Format of swap PTEs:
328  *
329  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
330  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
331  *   <--------------- offset ----------------> E < type -> 0 0 0 1 0
332  *
333  *   E is the exclusive marker that is not stored in swap entries.
334  *   _PAGE_NEWPAGE (bit 1) is always set to 1 in set_pte().
335  */
336 #define __swp_type(x)                   (((x).val >> 5) & 0x1f)
337 #define __swp_offset(x)                 ((x).val >> 11)
338 
339 #define __swp_entry(type, offset) \
340         ((swp_entry_t) { (((type) & 0x1f) << 5) | ((offset) << 11) })
341 #define __pte_to_swp_entry(pte) \
342         ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
343 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val })
344 
345 static inline int pte_swp_exclusive(pte_t pte)
346 {
347         return pte_get_bits(pte, _PAGE_SWP_EXCLUSIVE);
348 }
349 
350 static inline pte_t pte_swp_mkexclusive(pte_t pte)
351 {
352         pte_set_bits(pte, _PAGE_SWP_EXCLUSIVE);
353         return pte;
354 }
355 
356 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
357 {
358         pte_clear_bits(pte, _PAGE_SWP_EXCLUSIVE);
359         return pte;
360 }
361 
362 /* Clear a kernel PTE and flush it from the TLB */
363 #define kpte_clear_flush(ptep, vaddr)           \
364 do {                                            \
365         pte_clear(&init_mm, (vaddr), (ptep));   \
366         __flush_tlb_one((vaddr));               \
367 } while (0)
368 
369 #endif
370 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php