~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/nohash/32/pte-8xx.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H
  3 #define _ASM_POWERPC_NOHASH_32_PTE_8xx_H
  4 #ifdef __KERNEL__
  5 
  6 /*
  7  * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
  8  * We also use the two level tables, but we can put the real bits in them
  9  * needed for the TLB and tablewalk.  These definitions require Mx_CTR.PPM = 0,
 10  * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1.  The level 2 descriptor has
 11  * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
 12  * based upon user/super access.  The TLB does not have accessed nor write
 13  * protect.  We assume that if the TLB get loaded with an entry it is
 14  * accessed, and overload the changed bit for write protect.  We use
 15  * two bits in the software pte that are supposed to be set to zero in
 16  * the TLB entry (24 and 25) for these indicators.  Although the level 1
 17  * descriptor contains the guarded and writethrough/copyback bits, we can
 18  * set these at the page level since they get copied from the Mx_TWC
 19  * register when the TLB entry is loaded.  We will use bit 27 for guard, since
 20  * that is where it exists in the MD_TWC, and bit 26 for writethrough.
 21  * These will get masked from the level 2 descriptor at TLB load time, and
 22  * copied to the MD_TWC before it gets loaded.
 23  * Large page sizes added.  We currently support two sizes, 4K and 8M.
 24  * This also allows a TLB hander optimization because we can directly
 25  * load the PMD into MD_TWC.  The 8M pages are only used for kernel
 26  * mapping of well known areas.  The PMD (PGD) entries contain control
 27  * flags in addition to the address, so care must be taken that the
 28  * software no longer assumes these are only pointers.
 29  */
 30 
 31 /* Definitions for 8xx embedded chips. */
 32 #define _PAGE_PRESENT   0x0001  /* V: Page is valid */
 33 #define _PAGE_NO_CACHE  0x0002  /* CI: cache inhibit */
 34 #define _PAGE_SH        0x0004  /* SH: No ASID (context) compare */
 35 #define _PAGE_SPS       0x0008  /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
 36 #define _PAGE_DIRTY     0x0100  /* C: page changed */
 37 
 38 /* These 4 software bits must be masked out when the L2 entry is loaded
 39  * into the TLB.
 40  */
 41 #define _PAGE_GUARDED   0x0010  /* Copied to L1 G entry in DTLB */
 42 #define _PAGE_ACCESSED  0x0020  /* Copied to L1 APG 1 entry in I/DTLB */
 43 #define _PAGE_EXEC      0x0040  /* Copied to PP (bit 21) in ITLB */
 44 #define _PAGE_SPECIAL   0x0080  /* SW entry */
 45 
 46 #define _PAGE_NA        0x0200  /* Supervisor NA, User no access */
 47 #define _PAGE_RO        0x0600  /* Supervisor RO, User no access */
 48 
 49 #define _PAGE_HUGE      0x0800  /* Copied to L1 PS bit 29 */
 50 
 51 #define _PAGE_NAX       (_PAGE_NA | _PAGE_EXEC)
 52 #define _PAGE_ROX       (_PAGE_RO | _PAGE_EXEC)
 53 #define _PAGE_RW        0
 54 #define _PAGE_RWX       _PAGE_EXEC
 55 
 56 /* cache related flags non existing on 8xx */
 57 #define _PAGE_COHERENT  0
 58 #define _PAGE_WRITETHRU 0
 59 
 60 #define _PAGE_KERNEL_RO         (_PAGE_SH | _PAGE_RO)
 61 #define _PAGE_KERNEL_ROX        (_PAGE_SH | _PAGE_RO | _PAGE_EXEC)
 62 #define _PAGE_KERNEL_RW         (_PAGE_SH | _PAGE_DIRTY)
 63 #define _PAGE_KERNEL_RWX        (_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC)
 64 
 65 #define _PMD_PRESENT    0x0001
 66 #define _PMD_PRESENT_MASK       _PMD_PRESENT
 67 #define _PMD_BAD        0x0f90
 68 #define _PMD_PAGE_MASK  0x000c
 69 #define _PMD_PAGE_8M    0x000c
 70 #define _PMD_PAGE_512K  0x0004
 71 #define _PMD_ACCESSED   0x0020  /* APG 1 */
 72 #define _PMD_USER       0x0040  /* APG 2 */
 73 
 74 #define _PTE_NONE_MASK  0
 75 
 76 #ifdef CONFIG_PPC_16K_PAGES
 77 #define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SPS)
 78 #else
 79 #define _PAGE_BASE_NC   (_PAGE_PRESENT | _PAGE_ACCESSED)
 80 #endif
 81 
 82 #define _PAGE_BASE      (_PAGE_BASE_NC)
 83 
 84 #include <asm/pgtable-masks.h>
 85 
 86 #ifndef __ASSEMBLY__
 87 static inline pte_t pte_wrprotect(pte_t pte)
 88 {
 89         return __pte(pte_val(pte) | _PAGE_RO);
 90 }
 91 
 92 #define pte_wrprotect pte_wrprotect
 93 
 94 static inline int pte_read(pte_t pte)
 95 {
 96         return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
 97 }
 98 
 99 #define pte_read pte_read
100 
101 static inline int pte_write(pte_t pte)
102 {
103         return !(pte_val(pte) & _PAGE_RO);
104 }
105 
106 #define pte_write pte_write
107 
108 static inline pte_t pte_mkwrite_novma(pte_t pte)
109 {
110         return __pte(pte_val(pte) & ~_PAGE_RO);
111 }
112 
113 #define pte_mkwrite_novma pte_mkwrite_novma
114 
115 static inline pte_t pte_mkhuge(pte_t pte)
116 {
117         return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
118 }
119 
120 #define pte_mkhuge pte_mkhuge
121 
122 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
123                                      unsigned long clr, unsigned long set, int huge);
124 
125 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
126 {
127         pte_update(mm, addr, ptep, 0, _PAGE_RO, 0);
128 }
129 #define ptep_set_wrprotect ptep_set_wrprotect
130 
131 static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
132                                            pte_t entry, unsigned long address, int psize)
133 {
134         unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC);
135         unsigned long clr = ~pte_val(entry) & _PAGE_RO;
136         int huge = psize > mmu_virtual_psize ? 1 : 0;
137 
138         pte_update(vma->vm_mm, address, ptep, clr, set, huge);
139 
140         flush_tlb_page(vma, address);
141 }
142 #define __ptep_set_access_flags __ptep_set_access_flags
143 
144 static inline unsigned long __pte_leaf_size(pmd_t pmd, pte_t pte)
145 {
146         pte_basic_t val = pte_val(pte);
147 
148         if (pmd_val(pmd) & _PMD_PAGE_8M)
149                 return SZ_8M;
150         if (val & _PAGE_HUGE)
151                 return SZ_512K;
152         if (val & _PAGE_SPS)
153                 return SZ_16K;
154         return SZ_4K;
155 }
156 
157 #define __pte_leaf_size __pte_leaf_size
158 
159 /*
160  * On the 8xx, the page tables are a bit special. For 16k pages, we have
161  * 4 identical entries. For 512k pages, we have 128 entries as if it was
162  * 4k pages, but they are flagged as 512k pages for the hardware.
163  * For 8M pages, we have 1024 entries as if it was 4M pages (PMD_SIZE)
164  * but they are flagged as 8M pages for the hardware.
165  * For 4k pages, we have a single entry in the table.
166  */
167 static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
168 static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address);
169 
170 static inline bool ptep_is_8m_pmdp(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
171 {
172         return (pmd_t *)ptep == pmd_off(mm, ALIGN_DOWN(addr, SZ_8M));
173 }
174 
175 static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
176 {
177         if (!huge)
178                 return PAGE_SIZE / SZ_4K;
179         else if ((pmd_val(*pmd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M)
180                 return SZ_4M / SZ_4K;
181         else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
182                 return SZ_16K / SZ_4K;
183         else
184                 return SZ_512K / SZ_4K;
185 }
186 
187 static inline pte_basic_t __pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
188                                        unsigned long clr, unsigned long set, int huge)
189 {
190         pte_basic_t *entry = (pte_basic_t *)p;
191         pte_basic_t old = pte_val(*p);
192         pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
193         int num, i;
194         pmd_t *pmd = pmd_off(mm, addr);
195 
196         num = number_of_cells_per_pte(pmd, new, huge);
197 
198         for (i = 0; i < num; i += PAGE_SIZE / SZ_4K, new += PAGE_SIZE) {
199                 *entry++ = new;
200                 if (IS_ENABLED(CONFIG_PPC_16K_PAGES)) {
201                         *entry++ = new;
202                         *entry++ = new;
203                         *entry++ = new;
204                 }
205         }
206 
207         return old;
208 }
209 
210 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
211                                      unsigned long clr, unsigned long set, int huge)
212 {
213         pte_basic_t old;
214 
215         if (huge && ptep_is_8m_pmdp(mm, addr, ptep)) {
216                 pmd_t *pmdp = (pmd_t *)ptep;
217 
218                 old = __pte_update(mm, addr, pte_offset_kernel(pmdp, 0), clr, set, huge);
219                 __pte_update(mm, addr, pte_offset_kernel(pmdp + 1, 0), clr, set, huge);
220         } else {
221                 old = __pte_update(mm, addr, ptep, clr, set, huge);
222         }
223         return old;
224 }
225 #define pte_update pte_update
226 
227 #ifdef CONFIG_PPC_16K_PAGES
228 #define ptep_get ptep_get
229 static inline pte_t ptep_get(pte_t *ptep)
230 {
231         pte_basic_t val = READ_ONCE(ptep->pte);
232         pte_t pte = {val, val, val, val};
233 
234         return pte;
235 }
236 #endif /* CONFIG_PPC_16K_PAGES */
237 
238 #endif
239 
240 #endif /* __KERNEL__ */
241 #endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
242 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php