~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/mm/pgtable.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #include <linux/mm.h>
  3 #include <linux/gfp.h>
  4 #include <linux/hugetlb.h>
  5 #include <asm/pgalloc.h>
  6 #include <asm/tlb.h>
  7 #include <asm/fixmap.h>
  8 #include <asm/mtrr.h>
  9 
 10 #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
 11 phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
 12 EXPORT_SYMBOL(physical_mask);
 13 #endif
 14 
 15 #ifdef CONFIG_HIGHPTE
 16 #define PGTABLE_HIGHMEM __GFP_HIGHMEM
 17 #else
 18 #define PGTABLE_HIGHMEM 0
 19 #endif
 20 
 21 #ifndef CONFIG_PARAVIRT
 22 static inline
 23 void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
 24 {
 25         tlb_remove_page(tlb, table);
 26 }
 27 #endif
 28 
 29 gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
 30 
 31 pgtable_t pte_alloc_one(struct mm_struct *mm)
 32 {
 33         return __pte_alloc_one(mm, __userpte_alloc_gfp);
 34 }
 35 
 36 static int __init setup_userpte(char *arg)
 37 {
 38         if (!arg)
 39                 return -EINVAL;
 40 
 41         /*
 42          * "userpte=nohigh" disables allocation of user pagetables in
 43          * high memory.
 44          */
 45         if (strcmp(arg, "nohigh") == 0)
 46                 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
 47         else
 48                 return -EINVAL;
 49         return 0;
 50 }
 51 early_param("userpte", setup_userpte);
 52 
 53 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 54 {
 55         pagetable_pte_dtor(page_ptdesc(pte));
 56         paravirt_release_pte(page_to_pfn(pte));
 57         paravirt_tlb_remove_table(tlb, pte);
 58 }
 59 
 60 #if CONFIG_PGTABLE_LEVELS > 2
 61 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 62 {
 63         struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
 64         paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
 65         /*
 66          * NOTE! For PAE, any changes to the top page-directory-pointer-table
 67          * entries need a full cr3 reload to flush.
 68          */
 69 #ifdef CONFIG_X86_PAE
 70         tlb->need_flush_all = 1;
 71 #endif
 72         pagetable_pmd_dtor(ptdesc);
 73         paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc));
 74 }
 75 
 76 #if CONFIG_PGTABLE_LEVELS > 3
 77 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
 78 {
 79         struct ptdesc *ptdesc = virt_to_ptdesc(pud);
 80 
 81         pagetable_pud_dtor(ptdesc);
 82         paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
 83         paravirt_tlb_remove_table(tlb, virt_to_page(pud));
 84 }
 85 
 86 #if CONFIG_PGTABLE_LEVELS > 4
 87 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
 88 {
 89         paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
 90         paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
 91 }
 92 #endif  /* CONFIG_PGTABLE_LEVELS > 4 */
 93 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 94 #endif  /* CONFIG_PGTABLE_LEVELS > 2 */
 95 
 96 static inline void pgd_list_add(pgd_t *pgd)
 97 {
 98         struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
 99 
100         list_add(&ptdesc->pt_list, &pgd_list);
101 }
102 
103 static inline void pgd_list_del(pgd_t *pgd)
104 {
105         struct ptdesc *ptdesc = virt_to_ptdesc(pgd);
106 
107         list_del(&ptdesc->pt_list);
108 }
109 
110 #define UNSHARED_PTRS_PER_PGD                           \
111         (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
112 #define MAX_UNSHARED_PTRS_PER_PGD                       \
113         MAX_T(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
114 
115 
116 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
117 {
118         virt_to_ptdesc(pgd)->pt_mm = mm;
119 }
120 
121 struct mm_struct *pgd_page_get_mm(struct page *page)
122 {
123         return page_ptdesc(page)->pt_mm;
124 }
125 
126 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
127 {
128         /* If the pgd points to a shared pagetable level (either the
129            ptes in non-PAE, or shared PMD in PAE), then just copy the
130            references from swapper_pg_dir. */
131         if (CONFIG_PGTABLE_LEVELS == 2 ||
132             (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
133             CONFIG_PGTABLE_LEVELS >= 4) {
134                 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
135                                 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
136                                 KERNEL_PGD_PTRS);
137         }
138 
139         /* list required to sync kernel mapping updates */
140         if (!SHARED_KERNEL_PMD) {
141                 pgd_set_mm(pgd, mm);
142                 pgd_list_add(pgd);
143         }
144 }
145 
146 static void pgd_dtor(pgd_t *pgd)
147 {
148         if (SHARED_KERNEL_PMD)
149                 return;
150 
151         spin_lock(&pgd_lock);
152         pgd_list_del(pgd);
153         spin_unlock(&pgd_lock);
154 }
155 
156 /*
157  * List of all pgd's needed for non-PAE so it can invalidate entries
158  * in both cached and uncached pgd's; not needed for PAE since the
159  * kernel pmd is shared. If PAE were not to share the pmd a similar
160  * tactic would be needed. This is essentially codepath-based locking
161  * against pageattr.c; it is the unique case in which a valid change
162  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
163  * vmalloc faults work because attached pagetables are never freed.
164  * -- nyc
165  */
166 
167 #ifdef CONFIG_X86_PAE
168 /*
169  * In PAE mode, we need to do a cr3 reload (=tlb flush) when
170  * updating the top-level pagetable entries to guarantee the
171  * processor notices the update.  Since this is expensive, and
172  * all 4 top-level entries are used almost immediately in a
173  * new process's life, we just pre-populate them here.
174  *
175  * Also, if we're in a paravirt environment where the kernel pmd is
176  * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
177  * and initialize the kernel pmds here.
178  */
179 #define PREALLOCATED_PMDS       UNSHARED_PTRS_PER_PGD
180 #define MAX_PREALLOCATED_PMDS   MAX_UNSHARED_PTRS_PER_PGD
181 
182 /*
183  * We allocate separate PMDs for the kernel part of the user page-table
184  * when PTI is enabled. We need them to map the per-process LDT into the
185  * user-space page-table.
186  */
187 #define PREALLOCATED_USER_PMDS   (boot_cpu_has(X86_FEATURE_PTI) ? \
188                                         KERNEL_PGD_PTRS : 0)
189 #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
190 
191 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
192 {
193         paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
194 
195         /* Note: almost everything apart from _PAGE_PRESENT is
196            reserved at the pmd (PDPT) level. */
197         set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
198 
199         /*
200          * According to Intel App note "TLBs, Paging-Structure Caches,
201          * and Their Invalidation", April 2007, document 317080-001,
202          * section 8.1: in PAE mode we explicitly have to flush the
203          * TLB via cr3 if the top-level pgd is changed...
204          */
205         flush_tlb_mm(mm);
206 }
207 #else  /* !CONFIG_X86_PAE */
208 
209 /* No need to prepopulate any pagetable entries in non-PAE modes. */
210 #define PREALLOCATED_PMDS       0
211 #define MAX_PREALLOCATED_PMDS   0
212 #define PREALLOCATED_USER_PMDS   0
213 #define MAX_PREALLOCATED_USER_PMDS 0
214 #endif  /* CONFIG_X86_PAE */
215 
216 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
217 {
218         int i;
219         struct ptdesc *ptdesc;
220 
221         for (i = 0; i < count; i++)
222                 if (pmds[i]) {
223                         ptdesc = virt_to_ptdesc(pmds[i]);
224 
225                         pagetable_pmd_dtor(ptdesc);
226                         pagetable_free(ptdesc);
227                         mm_dec_nr_pmds(mm);
228                 }
229 }
230 
231 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
232 {
233         int i;
234         bool failed = false;
235         gfp_t gfp = GFP_PGTABLE_USER;
236 
237         if (mm == &init_mm)
238                 gfp &= ~__GFP_ACCOUNT;
239         gfp &= ~__GFP_HIGHMEM;
240 
241         for (i = 0; i < count; i++) {
242                 pmd_t *pmd = NULL;
243                 struct ptdesc *ptdesc = pagetable_alloc(gfp, 0);
244 
245                 if (!ptdesc)
246                         failed = true;
247                 if (ptdesc && !pagetable_pmd_ctor(ptdesc)) {
248                         pagetable_free(ptdesc);
249                         ptdesc = NULL;
250                         failed = true;
251                 }
252                 if (ptdesc) {
253                         mm_inc_nr_pmds(mm);
254                         pmd = ptdesc_address(ptdesc);
255                 }
256 
257                 pmds[i] = pmd;
258         }
259 
260         if (failed) {
261                 free_pmds(mm, pmds, count);
262                 return -ENOMEM;
263         }
264 
265         return 0;
266 }
267 
268 /*
269  * Mop up any pmd pages which may still be attached to the pgd.
270  * Normally they will be freed by munmap/exit_mmap, but any pmd we
271  * preallocate which never got a corresponding vma will need to be
272  * freed manually.
273  */
274 static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
275 {
276         pgd_t pgd = *pgdp;
277 
278         if (pgd_val(pgd) != 0) {
279                 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
280 
281                 pgd_clear(pgdp);
282 
283                 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
284                 pmd_free(mm, pmd);
285                 mm_dec_nr_pmds(mm);
286         }
287 }
288 
289 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
290 {
291         int i;
292 
293         for (i = 0; i < PREALLOCATED_PMDS; i++)
294                 mop_up_one_pmd(mm, &pgdp[i]);
295 
296 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
297 
298         if (!boot_cpu_has(X86_FEATURE_PTI))
299                 return;
300 
301         pgdp = kernel_to_user_pgdp(pgdp);
302 
303         for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
304                 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
305 #endif
306 }
307 
308 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
309 {
310         p4d_t *p4d;
311         pud_t *pud;
312         int i;
313 
314         p4d = p4d_offset(pgd, 0);
315         pud = pud_offset(p4d, 0);
316 
317         for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
318                 pmd_t *pmd = pmds[i];
319 
320                 if (i >= KERNEL_PGD_BOUNDARY)
321                         memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
322                                sizeof(pmd_t) * PTRS_PER_PMD);
323 
324                 pud_populate(mm, pud, pmd);
325         }
326 }
327 
328 #ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
329 static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
330                                      pgd_t *k_pgd, pmd_t *pmds[])
331 {
332         pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
333         pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
334         p4d_t *u_p4d;
335         pud_t *u_pud;
336         int i;
337 
338         u_p4d = p4d_offset(u_pgd, 0);
339         u_pud = pud_offset(u_p4d, 0);
340 
341         s_pgd += KERNEL_PGD_BOUNDARY;
342         u_pud += KERNEL_PGD_BOUNDARY;
343 
344         for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
345                 pmd_t *pmd = pmds[i];
346 
347                 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
348                        sizeof(pmd_t) * PTRS_PER_PMD);
349 
350                 pud_populate(mm, u_pud, pmd);
351         }
352 
353 }
354 #else
355 static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
356                                      pgd_t *k_pgd, pmd_t *pmds[])
357 {
358 }
359 #endif
360 /*
361  * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
362  * assumes that pgd should be in one page.
363  *
364  * But kernel with PAE paging that is not running as a Xen domain
365  * only needs to allocate 32 bytes for pgd instead of one page.
366  */
367 #ifdef CONFIG_X86_PAE
368 
369 #include <linux/slab.h>
370 
371 #define PGD_SIZE        (PTRS_PER_PGD * sizeof(pgd_t))
372 #define PGD_ALIGN       32
373 
374 static struct kmem_cache *pgd_cache;
375 
376 void __init pgtable_cache_init(void)
377 {
378         /*
379          * When PAE kernel is running as a Xen domain, it does not use
380          * shared kernel pmd. And this requires a whole page for pgd.
381          */
382         if (!SHARED_KERNEL_PMD)
383                 return;
384 
385         /*
386          * when PAE kernel is not running as a Xen domain, it uses
387          * shared kernel pmd. Shared kernel pmd does not require a whole
388          * page for pgd. We are able to just allocate a 32-byte for pgd.
389          * During boot time, we create a 32-byte slab for pgd table allocation.
390          */
391         pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
392                                       SLAB_PANIC, NULL);
393 }
394 
395 static inline pgd_t *_pgd_alloc(void)
396 {
397         /*
398          * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
399          * We allocate one page for pgd.
400          */
401         if (!SHARED_KERNEL_PMD)
402                 return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
403                                                  PGD_ALLOCATION_ORDER);
404 
405         /*
406          * Now PAE kernel is not running as a Xen domain. We can allocate
407          * a 32-byte slab for pgd to save memory space.
408          */
409         return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
410 }
411 
412 static inline void _pgd_free(pgd_t *pgd)
413 {
414         if (!SHARED_KERNEL_PMD)
415                 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
416         else
417                 kmem_cache_free(pgd_cache, pgd);
418 }
419 #else
420 
421 static inline pgd_t *_pgd_alloc(void)
422 {
423         return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
424                                          PGD_ALLOCATION_ORDER);
425 }
426 
427 static inline void _pgd_free(pgd_t *pgd)
428 {
429         free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
430 }
431 #endif /* CONFIG_X86_PAE */
432 
433 pgd_t *pgd_alloc(struct mm_struct *mm)
434 {
435         pgd_t *pgd;
436         pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
437         pmd_t *pmds[MAX_PREALLOCATED_PMDS];
438 
439         pgd = _pgd_alloc();
440 
441         if (pgd == NULL)
442                 goto out;
443 
444         mm->pgd = pgd;
445 
446         if (sizeof(pmds) != 0 &&
447                         preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
448                 goto out_free_pgd;
449 
450         if (sizeof(u_pmds) != 0 &&
451                         preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
452                 goto out_free_pmds;
453 
454         if (paravirt_pgd_alloc(mm) != 0)
455                 goto out_free_user_pmds;
456 
457         /*
458          * Make sure that pre-populating the pmds is atomic with
459          * respect to anything walking the pgd_list, so that they
460          * never see a partially populated pgd.
461          */
462         spin_lock(&pgd_lock);
463 
464         pgd_ctor(mm, pgd);
465         if (sizeof(pmds) != 0)
466                 pgd_prepopulate_pmd(mm, pgd, pmds);
467 
468         if (sizeof(u_pmds) != 0)
469                 pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
470 
471         spin_unlock(&pgd_lock);
472 
473         return pgd;
474 
475 out_free_user_pmds:
476         if (sizeof(u_pmds) != 0)
477                 free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
478 out_free_pmds:
479         if (sizeof(pmds) != 0)
480                 free_pmds(mm, pmds, PREALLOCATED_PMDS);
481 out_free_pgd:
482         _pgd_free(pgd);
483 out:
484         return NULL;
485 }
486 
487 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
488 {
489         pgd_mop_up_pmds(mm, pgd);
490         pgd_dtor(pgd);
491         paravirt_pgd_free(mm, pgd);
492         _pgd_free(pgd);
493 }
494 
495 /*
496  * Used to set accessed or dirty bits in the page table entries
497  * on other architectures. On x86, the accessed and dirty bits
498  * are tracked by hardware. However, do_wp_page calls this function
499  * to also make the pte writeable at the same time the dirty bit is
500  * set. In that case we do actually need to write the PTE.
501  */
502 int ptep_set_access_flags(struct vm_area_struct *vma,
503                           unsigned long address, pte_t *ptep,
504                           pte_t entry, int dirty)
505 {
506         int changed = !pte_same(*ptep, entry);
507 
508         if (changed && dirty)
509                 set_pte(ptep, entry);
510 
511         return changed;
512 }
513 
514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
515 int pmdp_set_access_flags(struct vm_area_struct *vma,
516                           unsigned long address, pmd_t *pmdp,
517                           pmd_t entry, int dirty)
518 {
519         int changed = !pmd_same(*pmdp, entry);
520 
521         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
522 
523         if (changed && dirty) {
524                 set_pmd(pmdp, entry);
525                 /*
526                  * We had a write-protection fault here and changed the pmd
527                  * to to more permissive. No need to flush the TLB for that,
528                  * #PF is architecturally guaranteed to do that and in the
529                  * worst-case we'll generate a spurious fault.
530                  */
531         }
532 
533         return changed;
534 }
535 
536 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
537                           pud_t *pudp, pud_t entry, int dirty)
538 {
539         int changed = !pud_same(*pudp, entry);
540 
541         VM_BUG_ON(address & ~HPAGE_PUD_MASK);
542 
543         if (changed && dirty) {
544                 set_pud(pudp, entry);
545                 /*
546                  * We had a write-protection fault here and changed the pud
547                  * to to more permissive. No need to flush the TLB for that,
548                  * #PF is architecturally guaranteed to do that and in the
549                  * worst-case we'll generate a spurious fault.
550                  */
551         }
552 
553         return changed;
554 }
555 #endif
556 
557 int ptep_test_and_clear_young(struct vm_area_struct *vma,
558                               unsigned long addr, pte_t *ptep)
559 {
560         int ret = 0;
561 
562         if (pte_young(*ptep))
563                 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
564                                          (unsigned long *) &ptep->pte);
565 
566         return ret;
567 }
568 
569 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
570 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
571                               unsigned long addr, pmd_t *pmdp)
572 {
573         int ret = 0;
574 
575         if (pmd_young(*pmdp))
576                 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
577                                          (unsigned long *)pmdp);
578 
579         return ret;
580 }
581 #endif
582 
583 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
584 int pudp_test_and_clear_young(struct vm_area_struct *vma,
585                               unsigned long addr, pud_t *pudp)
586 {
587         int ret = 0;
588 
589         if (pud_young(*pudp))
590                 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
591                                          (unsigned long *)pudp);
592 
593         return ret;
594 }
595 #endif
596 
597 int ptep_clear_flush_young(struct vm_area_struct *vma,
598                            unsigned long address, pte_t *ptep)
599 {
600         /*
601          * On x86 CPUs, clearing the accessed bit without a TLB flush
602          * doesn't cause data corruption. [ It could cause incorrect
603          * page aging and the (mistaken) reclaim of hot pages, but the
604          * chance of that should be relatively low. ]
605          *
606          * So as a performance optimization don't flush the TLB when
607          * clearing the accessed bit, it will eventually be flushed by
608          * a context switch or a VM operation anyway. [ In the rare
609          * event of it not getting flushed for a long time the delay
610          * shouldn't really matter because there's no real memory
611          * pressure for swapout to react to. ]
612          */
613         return ptep_test_and_clear_young(vma, address, ptep);
614 }
615 
616 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
617 int pmdp_clear_flush_young(struct vm_area_struct *vma,
618                            unsigned long address, pmd_t *pmdp)
619 {
620         int young;
621 
622         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
623 
624         young = pmdp_test_and_clear_young(vma, address, pmdp);
625         if (young)
626                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
627 
628         return young;
629 }
630 
631 pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
632                          pmd_t *pmdp)
633 {
634         VM_WARN_ON_ONCE(!pmd_present(*pmdp));
635 
636         /*
637          * No flush is necessary. Once an invalid PTE is established, the PTE's
638          * access and dirty bits cannot be updated.
639          */
640         return pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
641 }
642 #endif
643 
644 /**
645  * reserve_top_address - reserves a hole in the top of kernel address space
646  * @reserve - size of hole to reserve
647  *
648  * Can be used to relocate the fixmap area and poke a hole in the top
649  * of kernel address space to make room for a hypervisor.
650  */
651 void __init reserve_top_address(unsigned long reserve)
652 {
653 #ifdef CONFIG_X86_32
654         BUG_ON(fixmaps_set > 0);
655         __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
656         printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
657                -reserve, __FIXADDR_TOP + PAGE_SIZE);
658 #endif
659 }
660 
661 int fixmaps_set;
662 
663 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
664 {
665         unsigned long address = __fix_to_virt(idx);
666 
667 #ifdef CONFIG_X86_64
668        /*
669         * Ensure that the static initial page tables are covering the
670         * fixmap completely.
671         */
672         BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
673                      (FIXMAP_PMD_NUM * PTRS_PER_PTE));
674 #endif
675 
676         if (idx >= __end_of_fixed_addresses) {
677                 BUG();
678                 return;
679         }
680         set_pte_vaddr(address, pte);
681         fixmaps_set++;
682 }
683 
684 void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
685                        phys_addr_t phys, pgprot_t flags)
686 {
687         /* Sanitize 'prot' against any unsupported bits: */
688         pgprot_val(flags) &= __default_kernel_pte_mask;
689 
690         __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
691 }
692 
693 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
694 #ifdef CONFIG_X86_5LEVEL
695 /**
696  * p4d_set_huge - setup kernel P4D mapping
697  *
698  * No 512GB pages yet -- always return 0
699  */
700 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
701 {
702         return 0;
703 }
704 
705 /**
706  * p4d_clear_huge - clear kernel P4D mapping when it is set
707  *
708  * No 512GB pages yet -- always return 0
709  */
710 void p4d_clear_huge(p4d_t *p4d)
711 {
712 }
713 #endif
714 
715 /**
716  * pud_set_huge - setup kernel PUD mapping
717  *
718  * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
719  * function sets up a huge page only if the complete range has the same MTRR
720  * caching mode.
721  *
722  * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
723  * page mapping attempt fails.
724  *
725  * Returns 1 on success and 0 on failure.
726  */
727 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
728 {
729         u8 uniform;
730 
731         mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
732         if (!uniform)
733                 return 0;
734 
735         /* Bail out if we are we on a populated non-leaf entry: */
736         if (pud_present(*pud) && !pud_leaf(*pud))
737                 return 0;
738 
739         set_pte((pte_t *)pud, pfn_pte(
740                 (u64)addr >> PAGE_SHIFT,
741                 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
742 
743         return 1;
744 }
745 
746 /**
747  * pmd_set_huge - setup kernel PMD mapping
748  *
749  * See text over pud_set_huge() above.
750  *
751  * Returns 1 on success and 0 on failure.
752  */
753 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
754 {
755         u8 uniform;
756 
757         mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
758         if (!uniform) {
759                 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
760                              __func__, addr, addr + PMD_SIZE);
761                 return 0;
762         }
763 
764         /* Bail out if we are we on a populated non-leaf entry: */
765         if (pmd_present(*pmd) && !pmd_leaf(*pmd))
766                 return 0;
767 
768         set_pte((pte_t *)pmd, pfn_pte(
769                 (u64)addr >> PAGE_SHIFT,
770                 __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
771 
772         return 1;
773 }
774 
775 /**
776  * pud_clear_huge - clear kernel PUD mapping when it is set
777  *
778  * Returns 1 on success and 0 on failure (no PUD map is found).
779  */
780 int pud_clear_huge(pud_t *pud)
781 {
782         if (pud_leaf(*pud)) {
783                 pud_clear(pud);
784                 return 1;
785         }
786 
787         return 0;
788 }
789 
790 /**
791  * pmd_clear_huge - clear kernel PMD mapping when it is set
792  *
793  * Returns 1 on success and 0 on failure (no PMD map is found).
794  */
795 int pmd_clear_huge(pmd_t *pmd)
796 {
797         if (pmd_leaf(*pmd)) {
798                 pmd_clear(pmd);
799                 return 1;
800         }
801 
802         return 0;
803 }
804 
805 #ifdef CONFIG_X86_64
806 /**
807  * pud_free_pmd_page - Clear pud entry and free pmd page.
808  * @pud: Pointer to a PUD.
809  * @addr: Virtual address associated with pud.
810  *
811  * Context: The pud range has been unmapped and TLB purged.
812  * Return: 1 if clearing the entry succeeded. 0 otherwise.
813  *
814  * NOTE: Callers must allow a single page allocation.
815  */
816 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
817 {
818         pmd_t *pmd, *pmd_sv;
819         pte_t *pte;
820         int i;
821 
822         pmd = pud_pgtable(*pud);
823         pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
824         if (!pmd_sv)
825                 return 0;
826 
827         for (i = 0; i < PTRS_PER_PMD; i++) {
828                 pmd_sv[i] = pmd[i];
829                 if (!pmd_none(pmd[i]))
830                         pmd_clear(&pmd[i]);
831         }
832 
833         pud_clear(pud);
834 
835         /* INVLPG to clear all paging-structure caches */
836         flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
837 
838         for (i = 0; i < PTRS_PER_PMD; i++) {
839                 if (!pmd_none(pmd_sv[i])) {
840                         pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
841                         free_page((unsigned long)pte);
842                 }
843         }
844 
845         free_page((unsigned long)pmd_sv);
846 
847         pagetable_pmd_dtor(virt_to_ptdesc(pmd));
848         free_page((unsigned long)pmd);
849 
850         return 1;
851 }
852 
853 /**
854  * pmd_free_pte_page - Clear pmd entry and free pte page.
855  * @pmd: Pointer to a PMD.
856  * @addr: Virtual address associated with pmd.
857  *
858  * Context: The pmd range has been unmapped and TLB purged.
859  * Return: 1 if clearing the entry succeeded. 0 otherwise.
860  */
861 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
862 {
863         pte_t *pte;
864 
865         pte = (pte_t *)pmd_page_vaddr(*pmd);
866         pmd_clear(pmd);
867 
868         /* INVLPG to clear all paging-structure caches */
869         flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
870 
871         free_page((unsigned long)pte);
872 
873         return 1;
874 }
875 
876 #else /* !CONFIG_X86_64 */
877 
878 /*
879  * Disable free page handling on x86-PAE. This assures that ioremap()
880  * does not update sync'd pmd entries. See vmalloc_sync_one().
881  */
882 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
883 {
884         return pmd_none(*pmd);
885 }
886 
887 #endif /* CONFIG_X86_64 */
888 #endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
889 
890 pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
891 {
892         if (vma->vm_flags & VM_SHADOW_STACK)
893                 return pte_mkwrite_shstk(pte);
894 
895         pte = pte_mkwrite_novma(pte);
896 
897         return pte_clear_saveddirty(pte);
898 }
899 
900 pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
901 {
902         if (vma->vm_flags & VM_SHADOW_STACK)
903                 return pmd_mkwrite_shstk(pmd);
904 
905         pmd = pmd_mkwrite_novma(pmd);
906 
907         return pmd_clear_saveddirty(pmd);
908 }
909 
910 void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte)
911 {
912         /*
913          * Hardware before shadow stack can (rarely) set Dirty=1
914          * on a Write=0 PTE. So the below condition
915          * only indicates a software bug when shadow stack is
916          * supported by the HW. This checking is covered in
917          * pte_shstk().
918          */
919         VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
920                         pte_shstk(pte));
921 }
922 
923 void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd)
924 {
925         /* See note in arch_check_zapped_pte() */
926         VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) &&
927                         pmd_shstk(pmd));
928 }
929 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php