~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/include/asm/nohash/pgtable.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
  3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
  4 
  5 #ifndef __ASSEMBLY__
  6 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
  7                                      unsigned long clr, unsigned long set, int huge);
  8 #endif
  9 
 10 #if defined(CONFIG_PPC64)
 11 #include <asm/nohash/64/pgtable.h>
 12 #else
 13 #include <asm/nohash/32/pgtable.h>
 14 #endif
 15 
 16 /*
 17  * _PAGE_CHG_MASK masks of bits that are to be preserved across
 18  * pgprot changes.
 19  */
 20 #define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
 21 
 22 /* Permission masks used for kernel mappings */
 23 #define PAGE_KERNEL     __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
 24 #define PAGE_KERNEL_NC  __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
 25 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
 26 #define PAGE_KERNEL_X   __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
 27 #define PAGE_KERNEL_RO  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
 28 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
 29 
 30 #ifndef __ASSEMBLY__
 31 
 32 extern int icache_44x_need_flush;
 33 
 34 #ifndef pte_huge_size
 35 static inline unsigned long pte_huge_size(pte_t pte)
 36 {
 37         return PAGE_SIZE;
 38 }
 39 #endif
 40 
 41 /*
 42  * PTE updates. This function is called whenever an existing
 43  * valid PTE is updated. This does -not- include set_pte_at()
 44  * which nowadays only sets a new PTE.
 45  *
 46  * Depending on the type of MMU, we may need to use atomic updates
 47  * and the PTE may be either 32 or 64 bit wide. In the later case,
 48  * when using atomic updates, only the low part of the PTE is
 49  * accessed atomically.
 50  *
 51  * In addition, on 44x, we also maintain a global flag indicating
 52  * that an executable user mapping was modified, which is needed
 53  * to properly flush the virtually tagged instruction cache of
 54  * those implementations.
 55  */
 56 #ifndef pte_update
 57 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
 58                                      unsigned long clr, unsigned long set, int huge)
 59 {
 60         pte_basic_t old = pte_val(*p);
 61         pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
 62         unsigned long sz;
 63         unsigned long pdsize;
 64         int i;
 65 
 66         if (new == old)
 67                 return old;
 68 
 69         if (huge)
 70                 sz = pte_huge_size(__pte(old));
 71         else
 72                 sz = PAGE_SIZE;
 73 
 74         if (sz < PMD_SIZE)
 75                 pdsize = PAGE_SIZE;
 76         else if (sz < PUD_SIZE)
 77                 pdsize = PMD_SIZE;
 78         else if (sz < P4D_SIZE)
 79                 pdsize = PUD_SIZE;
 80         else if (sz < PGDIR_SIZE)
 81                 pdsize = P4D_SIZE;
 82         else
 83                 pdsize = PGDIR_SIZE;
 84 
 85         for (i = 0; i < sz / pdsize; i++, p++) {
 86                 *p = __pte(new);
 87                 if (new)
 88                         new += (unsigned long long)(pdsize / PAGE_SIZE) << PTE_RPN_SHIFT;
 89         }
 90 
 91         if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
 92                 icache_44x_need_flush = 1;
 93 
 94         /* huge pages use the old page table lock */
 95         if (!huge)
 96                 assert_pte_locked(mm, addr);
 97 
 98         return old;
 99 }
100 #endif
101 
102 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
103                                             unsigned long addr, pte_t *ptep)
104 {
105         unsigned long old;
106 
107         old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
108 
109         return (old & _PAGE_ACCESSED) != 0;
110 }
111 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
112 
113 #ifndef ptep_set_wrprotect
114 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
115                                       pte_t *ptep)
116 {
117         pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
118 }
119 #endif
120 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
121 
122 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
123                                        pte_t *ptep)
124 {
125         return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
126 }
127 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
128 
129 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
130 {
131         pte_update(mm, addr, ptep, ~0UL, 0, 0);
132 }
133 
134 /* Set the dirty and/or accessed bits atomically in a linux PTE */
135 #ifndef __ptep_set_access_flags
136 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
137                                            pte_t *ptep, pte_t entry,
138                                            unsigned long address,
139                                            int psize)
140 {
141         unsigned long set = pte_val(entry) &
142                             (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
143         int huge = psize > mmu_virtual_psize ? 1 : 0;
144 
145         pte_update(vma->vm_mm, address, ptep, 0, set, huge);
146 
147         flush_tlb_page(vma, address);
148 }
149 #endif
150 
151 /* Generic accessors to PTE bits */
152 #ifndef pte_mkwrite_novma
153 static inline pte_t pte_mkwrite_novma(pte_t pte)
154 {
155         /*
156          * write implies read, hence set both
157          */
158         return __pte(pte_val(pte) | _PAGE_RW);
159 }
160 #endif
161 
162 static inline pte_t pte_mkdirty(pte_t pte)
163 {
164         return __pte(pte_val(pte) | _PAGE_DIRTY);
165 }
166 
167 static inline pte_t pte_mkyoung(pte_t pte)
168 {
169         return __pte(pte_val(pte) | _PAGE_ACCESSED);
170 }
171 
172 #ifndef pte_wrprotect
173 static inline pte_t pte_wrprotect(pte_t pte)
174 {
175         return __pte(pte_val(pte) & ~_PAGE_WRITE);
176 }
177 #endif
178 
179 #ifndef pte_mkexec
180 static inline pte_t pte_mkexec(pte_t pte)
181 {
182         return __pte(pte_val(pte) | _PAGE_EXEC);
183 }
184 #endif
185 
186 #ifndef pte_write
187 static inline int pte_write(pte_t pte)
188 {
189         return pte_val(pte) & _PAGE_WRITE;
190 }
191 #endif
192 static inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_DIRTY; }
193 static inline int pte_special(pte_t pte)        { return pte_val(pte) & _PAGE_SPECIAL; }
194 static inline int pte_none(pte_t pte)           { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
195 static inline bool pte_hashpte(pte_t pte)       { return false; }
196 static inline bool pte_ci(pte_t pte)            { return pte_val(pte) & _PAGE_NO_CACHE; }
197 static inline bool pte_exec(pte_t pte)          { return pte_val(pte) & _PAGE_EXEC; }
198 
199 static inline int pte_present(pte_t pte)
200 {
201         return pte_val(pte) & _PAGE_PRESENT;
202 }
203 
204 static inline bool pte_hw_valid(pte_t pte)
205 {
206         return pte_val(pte) & _PAGE_PRESENT;
207 }
208 
209 static inline int pte_young(pte_t pte)
210 {
211         return pte_val(pte) & _PAGE_ACCESSED;
212 }
213 
214 /*
215  * Don't just check for any non zero bits in __PAGE_READ, since for book3e
216  * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
217  * _PAGE_READ.  Need to explicitly match _PAGE_BAP_UR bit in that case too.
218  */
219 #ifndef pte_read
220 static inline bool pte_read(pte_t pte)
221 {
222         return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
223 }
224 #endif
225 
226 /*
227  * We only find page table entry in the last level
228  * Hence no need for other accessors
229  */
230 #define pte_access_permitted pte_access_permitted
231 static inline bool pte_access_permitted(pte_t pte, bool write)
232 {
233         /*
234          * A read-only access is controlled by _PAGE_READ bit.
235          * We have _PAGE_READ set for WRITE
236          */
237         if (!pte_present(pte) || !pte_read(pte))
238                 return false;
239 
240         if (write && !pte_write(pte))
241                 return false;
242 
243         return true;
244 }
245 
246 /* Conversion functions: convert a page and protection to a page entry,
247  * and a page entry and page directory to the page they refer to.
248  *
249  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
250  * long for now.
251  */
252 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
253         return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
254                      pgprot_val(pgprot)); }
255 
256 /* Generic modifiers for PTE bits */
257 static inline pte_t pte_exprotect(pte_t pte)
258 {
259         return __pte(pte_val(pte) & ~_PAGE_EXEC);
260 }
261 
262 static inline pte_t pte_mkclean(pte_t pte)
263 {
264         return __pte(pte_val(pte) & ~_PAGE_DIRTY);
265 }
266 
267 static inline pte_t pte_mkold(pte_t pte)
268 {
269         return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
270 }
271 
272 static inline pte_t pte_mkspecial(pte_t pte)
273 {
274         return __pte(pte_val(pte) | _PAGE_SPECIAL);
275 }
276 
277 #ifndef pte_mkhuge
278 static inline pte_t pte_mkhuge(pte_t pte)
279 {
280         return __pte(pte_val(pte));
281 }
282 #endif
283 
284 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
285 {
286         return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
287 }
288 
289 static inline int pte_swp_exclusive(pte_t pte)
290 {
291         return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
292 }
293 
294 static inline pte_t pte_swp_mkexclusive(pte_t pte)
295 {
296         return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
297 }
298 
299 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
300 {
301         return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
302 }
303 
304 /* This low level function performs the actual PTE insertion
305  * Setting the PTE depends on the MMU type and other factors. It's
306  * an horrible mess that I'm not going to try to clean up now but
307  * I'm keeping it in one place rather than spread around
308  */
309 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
310                                 pte_t *ptep, pte_t pte, int percpu)
311 {
312         /* Second case is 32-bit with 64-bit PTE.  In this case, we
313          * can just store as long as we do the two halves in the right order
314          * with a barrier in between.
315          * In the percpu case, we also fallback to the simple update
316          */
317         if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
318                 __asm__ __volatile__("\
319                         stw%X0 %2,%0\n\
320                         mbar\n\
321                         stw%X1 %L2,%1"
322                 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
323                 : "r" (pte) : "memory");
324                 return;
325         }
326         /* Anything else just stores the PTE normally. That covers all 64-bit
327          * cases, and 32-bit non-hash with 32-bit PTEs.
328          */
329 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
330         ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte);
331 #else
332         *ptep = pte;
333 #endif
334 
335         /*
336          * With hardware tablewalk, a sync is needed to ensure that
337          * subsequent accesses see the PTE we just wrote.  Unlike userspace
338          * mappings, we can't tolerate spurious faults, so make sure
339          * the new PTE will be seen the first time.
340          */
341         if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
342                 mb();
343 }
344 
345 /*
346  * Macro to mark a page protection value as "uncacheable".
347  */
348 
349 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
350                          _PAGE_WRITETHRU)
351 
352 #define pgprot_noncached(prot)    (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
353                                             _PAGE_NO_CACHE | _PAGE_GUARDED))
354 
355 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
356                                             _PAGE_NO_CACHE))
357 
358 #define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
359                                             _PAGE_COHERENT))
360 
361 #if _PAGE_WRITETHRU != 0
362 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
363                                             _PAGE_COHERENT | _PAGE_WRITETHRU))
364 #else
365 #define pgprot_cached_wthru(prot)       pgprot_noncached(prot)
366 #endif
367 
368 #define pgprot_cached_noncoherent(prot) \
369                 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
370 
371 #define pgprot_writecombine pgprot_noncached_wc
372 
373 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
374 void unmap_kernel_page(unsigned long va);
375 
376 #endif /* __ASSEMBLY__ */
377 #endif
378 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php