~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/book3s/32/pgtable.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
  3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
  4 
  5 #include <asm-generic/pgtable-nopmd.h>
  6 
  7 /*
  8  * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
  9  * table containing PTEs, together with a set of 16 segment registers,
 10  * to define the virtual to physical address mapping.
 11  *
 12  * We use the hash table as an extended TLB, i.e. a cache of currently
 13  * active mappings.  We maintain a two-level page table tree, much
 14  * like that used by the i386, for the sake of the Linux memory
 15  * management code.  Low-level assembler code in hash_low_32.S
 16  * (procedure hash_page) is responsible for extracting ptes from the
 17  * tree and putting them into the hash table when necessary, and
 18  * updating the accessed and modified bits in the page table tree.
 19  */
 20 
 21 #define _PAGE_PRESENT   0x001   /* software: pte contains a translation */
 22 #define _PAGE_HASHPTE   0x002   /* hash_page has made an HPTE for this pte */
 23 #define _PAGE_READ      0x004   /* software: read access allowed */
 24 #define _PAGE_GUARDED   0x008   /* G: prohibit speculative access */
 25 #define _PAGE_COHERENT  0x010   /* M: enforce memory coherence (SMP systems) */
 26 #define _PAGE_NO_CACHE  0x020   /* I: cache inhibit */
 27 #define _PAGE_WRITETHRU 0x040   /* W: cache write-through */
 28 #define _PAGE_DIRTY     0x080   /* C: page changed */
 29 #define _PAGE_ACCESSED  0x100   /* R: page referenced */
 30 #define _PAGE_EXEC      0x200   /* software: exec allowed */
 31 #define _PAGE_WRITE     0x400   /* software: user write access allowed */
 32 #define _PAGE_SPECIAL   0x800   /* software: Special page */
 33 
 34 #ifdef CONFIG_PTE_64BIT
 35 /* We never clear the high word of the pte */
 36 #define _PTE_NONE_MASK  (0xffffffff00000000ULL | _PAGE_HASHPTE)
 37 #else
 38 #define _PTE_NONE_MASK  _PAGE_HASHPTE
 39 #endif
 40 
 41 #define _PMD_PRESENT    0
 42 #define _PMD_PRESENT_MASK (PAGE_MASK)
 43 #define _PMD_BAD        (~PAGE_MASK)
 44 
 45 /* We borrow the _PAGE_READ bit to store the exclusive marker in swap PTEs. */
 46 #define _PAGE_SWP_EXCLUSIVE     _PAGE_READ
 47 
 48 /* And here we include common definitions */
 49 
 50 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
 51 
 52 /*
 53  * Location of the PFN in the PTE. Most 32-bit platforms use the same
 54  * as _PAGE_SHIFT here (ie, naturally aligned).
 55  * Platform who don't just pre-define the value so we don't override it here.
 56  */
 57 #define PTE_RPN_SHIFT   (PAGE_SHIFT)
 58 
 59 /*
 60  * The mask covered by the RPN must be a ULL on 32-bit platforms with
 61  * 64-bit PTEs.
 62  */
 63 #ifdef CONFIG_PTE_64BIT
 64 #define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
 65 #define MAX_POSSIBLE_PHYSMEM_BITS 36
 66 #else
 67 #define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
 68 #define MAX_POSSIBLE_PHYSMEM_BITS 32
 69 #endif
 70 
 71 /*
 72  * _PAGE_CHG_MASK masks of bits that are to be preserved across
 73  * pgprot changes.
 74  */
 75 #define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
 76                          _PAGE_ACCESSED | _PAGE_SPECIAL)
 77 
 78 /*
 79  * We define 2 sets of base prot bits, one for basic pages (ie,
 80  * cacheable kernel and user pages) and one for non cacheable
 81  * pages. We always set _PAGE_COHERENT when SMP is enabled or
 82  * the processor might need it for DMA coherency.
 83  */
 84 #define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED)
 85 #define _PAGE_BASE      (_PAGE_BASE_NC | _PAGE_COHERENT)
 86 
 87 #include <asm/pgtable-masks.h>
 88 
 89 /* Permission masks used for kernel mappings */
 90 #define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
 91 #define PAGE_KERNEL_NC  __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
 92 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
 93 #define PAGE_KERNEL_X   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
 94 #define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
 95 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
 96 
 97 #define PTE_INDEX_SIZE  PTE_SHIFT
 98 #define PMD_INDEX_SIZE  0
 99 #define PUD_INDEX_SIZE  0
100 #define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
101 
102 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
103 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
104 
105 #ifndef __ASSEMBLY__
106 #define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
107 #define PMD_TABLE_SIZE  0
108 #define PUD_TABLE_SIZE  0
109 #define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
110 
111 /* Bits to mask out from a PMD to get to the PTE page */
112 #define PMD_MASKED_BITS         (PTE_TABLE_SIZE - 1)
113 #endif  /* __ASSEMBLY__ */
114 
115 #define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
116 #define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
117 
118 /*
119  * The normal case is that PTEs are 32-bits and we have a 1-page
120  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
121  *
122  * For any >32-bit physical address platform, we can use the following
123  * two level page table layout where the pgdir is 8KB and the MS 13 bits
124  * are an index to the second level table.  The combined pgdir/pmd first
125  * level has 2048 entries and the second level has 512 64-bit PTE entries.
126  * -Matt
127  */
128 /* PGDIR_SHIFT determines what a top-level page table entry can map */
129 #define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
130 #define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
131 #define PGDIR_MASK      (~(PGDIR_SIZE-1))
132 
133 #define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
134 
135 #ifndef __ASSEMBLY__
136 
137 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
138 void unmap_kernel_page(unsigned long va);
139 
140 #endif /* !__ASSEMBLY__ */
141 
142 /*
143  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
144  * value (for now) on others, from where we can start layout kernel
145  * virtual space that goes below PKMAP and FIXMAP
146  */
147 
148 #define FIXADDR_SIZE    0
149 #ifdef CONFIG_KASAN
150 #include <asm/kasan.h>
151 #define FIXADDR_TOP     (KASAN_SHADOW_START - PAGE_SIZE)
152 #else
153 #define FIXADDR_TOP     ((unsigned long)(-PAGE_SIZE))
154 #endif
155 
156 /*
157  * ioremap_bot starts at that address. Early ioremaps move down from there,
158  * until mem_init() at which point this becomes the top of the vmalloc
159  * and ioremap space
160  */
161 #ifdef CONFIG_HIGHMEM
162 #define IOREMAP_TOP     PKMAP_BASE
163 #else
164 #define IOREMAP_TOP     FIXADDR_START
165 #endif
166 
167 /* PPC32 shares vmalloc area with ioremap */
168 #define IOREMAP_START   VMALLOC_START
169 #define IOREMAP_END     VMALLOC_END
170 
171 /*
172  * Just any arbitrary offset to the start of the vmalloc VM area: the
173  * current 16MB value just means that there will be a 64MB "hole" after the
174  * physical memory until the kernel virtual memory starts.  That means that
175  * any out-of-bounds memory accesses will hopefully be caught.
176  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
177  * area for the same reason. ;)
178  *
179  * We no longer map larger than phys RAM with the BATs so we don't have
180  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
181  * about clashes between our early calls to ioremap() that start growing down
182  * from ioremap_base being run into the VM area allocations (growing upwards
183  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
184  * we actually run into our mappings setup in the early boot with the VM
185  * system.  This really does become a problem for machines with good amounts
186  * of RAM.  -- Cort
187  */
188 #define VMALLOC_OFFSET (0x1000000) /* 16M */
189 
190 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
191 
192 #ifdef CONFIG_KASAN_VMALLOC
193 #define VMALLOC_END     ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
194 #else
195 #define VMALLOC_END     ioremap_bot
196 #endif
197 
198 #define MODULES_END     ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
199 #define MODULES_VADDR   (MODULES_END - SZ_256M)
200 
201 #ifndef __ASSEMBLY__
202 #include <linux/sched.h>
203 #include <linux/threads.h>
204 
205 /* Bits to mask out from a PGD to get to the PUD page */
206 #define PGD_MASKED_BITS         0
207 
208 #define pgd_ERROR(e) \
209         pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
210 /*
211  * Bits in a linux-style PTE.  These match the bits in the
212  * (hardware-defined) PowerPC PTE as closely as possible.
213  */
214 
215 #define pte_clear(mm, addr, ptep) \
216         do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
217 
218 #define pmd_none(pmd)           (!pmd_val(pmd))
219 #define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
220 #define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
221 static inline void pmd_clear(pmd_t *pmdp)
222 {
223         *pmdp = __pmd(0);
224 }
225 
226 
227 /*
228  * When flushing the tlb entry for a page, we also need to flush the hash
229  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
230  */
231 extern int flush_hash_pages(unsigned context, unsigned long va,
232                             unsigned long pmdval, int count);
233 
234 /* Add an HPTE to the hash table */
235 extern void add_hash_page(unsigned context, unsigned long va,
236                           unsigned long pmdval);
237 
238 /* Flush an entry from the TLB/hash table */
239 static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
240 {
241         if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
242                 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
243 
244                 flush_hash_pages(mm->context.id, addr, ptephys, 1);
245         }
246 }
247 
248 /*
249  * PTE updates. This function is called whenever an existing
250  * valid PTE is updated. This does -not- include set_pte_at()
251  * which nowadays only sets a new PTE.
252  *
253  * Depending on the type of MMU, we may need to use atomic updates
254  * and the PTE may be either 32 or 64 bit wide. In the later case,
255  * when using atomic updates, only the low part of the PTE is
256  * accessed atomically.
257  */
258 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
259                                      unsigned long clr, unsigned long set, int huge)
260 {
261         pte_basic_t old;
262 
263         if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
264                 unsigned long tmp;
265 
266                 asm volatile(
267 #ifndef CONFIG_PTE_64BIT
268         "1:     lwarx   %0, 0, %3\n"
269         "       andc    %1, %0, %4\n"
270 #else
271         "1:     lwarx   %L0, 0, %3\n"
272         "       lwz     %0, -4(%3)\n"
273         "       andc    %1, %L0, %4\n"
274 #endif
275         "       or      %1, %1, %5\n"
276         "       stwcx.  %1, 0, %3\n"
277         "       bne-    1b"
278                 : "=&r" (old), "=&r" (tmp), "=m" (*p)
279 #ifndef CONFIG_PTE_64BIT
280                 : "r" (p),
281 #else
282                 : "b" ((unsigned long)(p) + 4),
283 #endif
284                   "r" (clr), "r" (set), "m" (*p)
285                 : "cc" );
286         } else {
287                 old = pte_val(*p);
288 
289                 *p = __pte((old & ~(pte_basic_t)clr) | set);
290         }
291 
292         return old;
293 }
294 
295 /*
296  * 2.6 calls this without flushing the TLB entry; this is wrong
297  * for our hash-based implementation, we fix that up here.
298  */
299 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
300 static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
301                                               unsigned long addr, pte_t *ptep)
302 {
303         unsigned long old;
304         old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
305         if (old & _PAGE_HASHPTE)
306                 flush_hash_entry(mm, ptep, addr);
307 
308         return (old & _PAGE_ACCESSED) != 0;
309 }
310 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
311         __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
312 
313 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
314 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
315                                        pte_t *ptep)
316 {
317         return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
318 }
319 
320 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
321 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
322                                       pte_t *ptep)
323 {
324         pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
325 }
326 
327 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
328                                            pte_t *ptep, pte_t entry,
329                                            unsigned long address,
330                                            int psize)
331 {
332         unsigned long set = pte_val(entry) &
333                 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
334 
335         pte_update(vma->vm_mm, address, ptep, 0, set, 0);
336 
337         flush_tlb_page(vma, address);
338 }
339 
340 #define __HAVE_ARCH_PTE_SAME
341 #define pte_same(A,B)   (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
342 
343 #define pmd_pfn(pmd)            (pmd_val(pmd) >> PAGE_SHIFT)
344 #define pmd_page(pmd)           pfn_to_page(pmd_pfn(pmd))
345 
346 /*
347  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
348  * are !pte_none() && !pte_present().
349  *
350  * Format of swap PTEs (32bit PTEs):
351  *
352  *                         1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3
353  *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
354  *   <----------------- offset --------------------> < type -> E H P
355  *
356  *   E is the exclusive marker that is not stored in swap entries.
357  *   _PAGE_PRESENT (P) and __PAGE_HASHPTE (H) must be 0.
358  *
359  * For 64bit PTEs, the offset is extended by 32bit.
360  */
361 #define __swp_type(entry)               ((entry).val & 0x1f)
362 #define __swp_offset(entry)             ((entry).val >> 5)
363 #define __swp_entry(type, offset)       ((swp_entry_t) { ((type) & 0x1f) | ((offset) << 5) })
364 #define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
365 #define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
366 
367 static inline int pte_swp_exclusive(pte_t pte)
368 {
369         return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
370 }
371 
372 static inline pte_t pte_swp_mkexclusive(pte_t pte)
373 {
374         return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
375 }
376 
377 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
378 {
379         return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
380 }
381 
382 /* Generic accessors to PTE bits */
383 static inline bool pte_read(pte_t pte)
384 {
385         return !!(pte_val(pte) & _PAGE_READ);
386 }
387 
388 static inline bool pte_write(pte_t pte)
389 {
390         return !!(pte_val(pte) & _PAGE_WRITE);
391 }
392 
393 static inline int pte_dirty(pte_t pte)          { return !!(pte_val(pte) & _PAGE_DIRTY); }
394 static inline int pte_young(pte_t pte)          { return !!(pte_val(pte) & _PAGE_ACCESSED); }
395 static inline int pte_special(pte_t pte)        { return !!(pte_val(pte) & _PAGE_SPECIAL); }
396 static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
397 static inline bool pte_exec(pte_t pte)          { return pte_val(pte) & _PAGE_EXEC; }
398 
399 static inline int pte_present(pte_t pte)
400 {
401         return pte_val(pte) & _PAGE_PRESENT;
402 }
403 
404 static inline bool pte_hw_valid(pte_t pte)
405 {
406         return pte_val(pte) & _PAGE_PRESENT;
407 }
408 
409 static inline bool pte_hashpte(pte_t pte)
410 {
411         return !!(pte_val(pte) & _PAGE_HASHPTE);
412 }
413 
414 static inline bool pte_ci(pte_t pte)
415 {
416         return !!(pte_val(pte) & _PAGE_NO_CACHE);
417 }
418 
419 /*
420  * We only find page table entry in the last level
421  * Hence no need for other accessors
422  */
423 #define pte_access_permitted pte_access_permitted
424 static inline bool pte_access_permitted(pte_t pte, bool write)
425 {
426         /*
427          * A read-only access is controlled by _PAGE_READ bit.
428          * We have _PAGE_READ set for WRITE
429          */
430         if (!pte_present(pte) || !pte_read(pte))
431                 return false;
432 
433         if (write && !pte_write(pte))
434                 return false;
435 
436         return true;
437 }
438 
439 /* Conversion functions: convert a page and protection to a page entry,
440  * and a page entry and page directory to the page they refer to.
441  *
442  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
443  * long for now.
444  */
445 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
446 {
447         return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
448                      pgprot_val(pgprot));
449 }
450 
451 /* Generic modifiers for PTE bits */
452 static inline pte_t pte_wrprotect(pte_t pte)
453 {
454         return __pte(pte_val(pte) & ~_PAGE_WRITE);
455 }
456 
457 static inline pte_t pte_exprotect(pte_t pte)
458 {
459         return __pte(pte_val(pte) & ~_PAGE_EXEC);
460 }
461 
462 static inline pte_t pte_mkclean(pte_t pte)
463 {
464         return __pte(pte_val(pte) & ~_PAGE_DIRTY);
465 }
466 
467 static inline pte_t pte_mkold(pte_t pte)
468 {
469         return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
470 }
471 
472 static inline pte_t pte_mkexec(pte_t pte)
473 {
474         return __pte(pte_val(pte) | _PAGE_EXEC);
475 }
476 
477 static inline pte_t pte_mkpte(pte_t pte)
478 {
479         return pte;
480 }
481 
482 static inline pte_t pte_mkwrite_novma(pte_t pte)
483 {
484         /*
485          * write implies read, hence set both
486          */
487         return __pte(pte_val(pte) | _PAGE_RW);
488 }
489 
490 static inline pte_t pte_mkdirty(pte_t pte)
491 {
492         return __pte(pte_val(pte) | _PAGE_DIRTY);
493 }
494 
495 static inline pte_t pte_mkyoung(pte_t pte)
496 {
497         return __pte(pte_val(pte) | _PAGE_ACCESSED);
498 }
499 
500 static inline pte_t pte_mkspecial(pte_t pte)
501 {
502         return __pte(pte_val(pte) | _PAGE_SPECIAL);
503 }
504 
505 static inline pte_t pte_mkhuge(pte_t pte)
506 {
507         return pte;
508 }
509 
510 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
511 {
512         return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
513 }
514 
515 
516 
517 /* This low level function performs the actual PTE insertion
518  * Setting the PTE depends on the MMU type and other factors.
519  *
520  * First case is 32-bit in UP mode with 32-bit PTEs, we need to preserve
521  * the _PAGE_HASHPTE bit since we may not have invalidated the previous
522  * translation in the hash yet (done in a subsequent flush_tlb_xxx())
523  * and see we need to keep track that this PTE needs invalidating.
524  *
525  * Second case is 32-bit with 64-bit PTE.  In this case, we
526  * can just store as long as we do the two halves in the right order
527  * with a barrier in between. This is possible because we take care,
528  * in the hash code, to pre-invalidate if the PTE was already hashed,
529  * which synchronizes us with any concurrent invalidation.
530  * In the percpu case, we fallback to the simple update preserving
531  * the hash bits (ie, same as the non-SMP case).
532  *
533  * Third case is 32-bit in SMP mode with 32-bit PTEs. We use the
534  * helper pte_update() which does an atomic update. We need to do that
535  * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
536  * per-CPU PTE such as a kmap_atomic, we also do a simple update preserving
537  * the hash bits instead.
538  */
539 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
540                                 pte_t *ptep, pte_t pte, int percpu)
541 {
542         if ((!IS_ENABLED(CONFIG_SMP) && !IS_ENABLED(CONFIG_PTE_64BIT)) || percpu) {
543                 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) |
544                               (pte_val(pte) & ~_PAGE_HASHPTE));
545         } else if (IS_ENABLED(CONFIG_PTE_64BIT)) {
546                 if (pte_val(*ptep) & _PAGE_HASHPTE)
547                         flush_hash_entry(mm, ptep, addr);
548 
549                 asm volatile("stw%X0 %2,%0; eieio; stw%X1 %L2,%1" :
550                              "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) :
551                              "r" (pte) : "memory");
552         } else {
553                 pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
554         }
555 }
556 
557 /*
558  * Macro to mark a page protection value as "uncacheable".
559  */
560 
561 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
562                          _PAGE_WRITETHRU)
563 
564 #define pgprot_noncached pgprot_noncached
565 static inline pgprot_t pgprot_noncached(pgprot_t prot)
566 {
567         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
568                         _PAGE_NO_CACHE | _PAGE_GUARDED);
569 }
570 
571 #define pgprot_noncached_wc pgprot_noncached_wc
572 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
573 {
574         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
575                         _PAGE_NO_CACHE);
576 }
577 
578 #define pgprot_cached pgprot_cached
579 static inline pgprot_t pgprot_cached(pgprot_t prot)
580 {
581         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
582                         _PAGE_COHERENT);
583 }
584 
585 #define pgprot_cached_wthru pgprot_cached_wthru
586 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
587 {
588         return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
589                         _PAGE_COHERENT | _PAGE_WRITETHRU);
590 }
591 
592 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
593 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
594 {
595         return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
596 }
597 
598 #define pgprot_writecombine pgprot_writecombine
599 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
600 {
601         return pgprot_noncached_wc(prot);
602 }
603 
604 #endif /* !__ASSEMBLY__ */
605 
606 #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
607 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php