1 // SPDX-License-Identifier: GPL-2.0 1 2 3 /* 4 * Handling Page Tables through page fragment 5 * 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/gfp.h> 10 #include <linux/mm.h> 11 #include <linux/percpu.h> 12 #include <linux/hardirq.h> 13 #include <linux/hugetlb.h> 14 #include <asm/pgalloc.h> 15 #include <asm/tlbflush.h> 16 #include <asm/tlb.h> 17 18 void pte_frag_destroy(void *pte_frag) 19 { 20 int count; 21 struct ptdesc *ptdesc; 22 23 ptdesc = virt_to_ptdesc(pte_frag); 24 /* drop all the pending references */ 25 count = ((unsigned long)pte_frag & ~PA 26 /* We allow PTE_FRAG_NR fragments from 27 if (atomic_sub_and_test(PTE_FRAG_NR - 28 pagetable_pte_dtor(ptdesc); 29 pagetable_free(ptdesc); 30 } 31 } 32 33 static pte_t *get_pte_from_cache(struct mm_str 34 { 35 void *pte_frag, *ret; 36 37 if (PTE_FRAG_NR == 1) 38 return NULL; 39 40 spin_lock(&mm->page_table_lock); 41 ret = pte_frag_get(&mm->context); 42 if (ret) { 43 pte_frag = ret + PTE_FRAG_SIZE 44 /* 45 * If we have taken up all the 46 */ 47 if (((unsigned long)pte_frag & 48 pte_frag = NULL; 49 pte_frag_set(&mm->context, pte 50 } 51 spin_unlock(&mm->page_table_lock); 52 return (pte_t *)ret; 53 } 54 55 static pte_t *__alloc_for_ptecache(struct mm_s 56 { 57 void *ret = NULL; 58 struct ptdesc *ptdesc; 59 60 if (!kernel) { 61 ptdesc = pagetable_alloc(PGALL 62 if (!ptdesc) 63 return NULL; 64 if (!pagetable_pte_ctor(ptdesc 65 pagetable_free(ptdesc) 66 return NULL; 67 } 68 } else { 69 ptdesc = pagetable_alloc(PGALL 70 if (!ptdesc) 71 return NULL; 72 } 73 74 atomic_set(&ptdesc->pt_frag_refcount, 75 76 ret = ptdesc_address(ptdesc); 77 /* 78 * if we support only one fragment jus 79 * allocated page. 80 */ 81 if (PTE_FRAG_NR == 1) 82 return ret; 83 spin_lock(&mm->page_table_lock); 84 /* 85 * If we find ptdesc_page set, we retu 86 * the allocated page with single frag 87 * count. 88 */ 89 if (likely(!pte_frag_get(&mm->context) 90 atomic_set(&ptdesc->pt_frag_re 91 pte_frag_set(&mm->context, ret 92 } 93 spin_unlock(&mm->page_table_lock); 94 95 return (pte_t *)ret; 96 } 97 98 pte_t *pte_fragment_alloc(struct mm_struct *mm 99 { 100 pte_t *pte; 101 102 pte = get_pte_from_cache(mm); 103 if (pte) 104 return pte; 105 106 return __alloc_for_ptecache(mm, kernel 107 } 108 109 static void pte_free_now(struct rcu_head *head 110 { 111 struct ptdesc *ptdesc; 112 113 ptdesc = container_of(head, struct ptd 114 pagetable_pte_dtor(ptdesc); 115 pagetable_free(ptdesc); 116 } 117 118 void pte_fragment_free(unsigned long *table, i 119 { 120 struct ptdesc *ptdesc = virt_to_ptdesc 121 122 if (pagetable_is_reserved(ptdesc)) 123 return free_reserved_ptdesc(pt 124 125 BUG_ON(atomic_read(&ptdesc->pt_frag_re 126 if (atomic_dec_and_test(&ptdesc->pt_fr 127 if (kernel) 128 pagetable_free(ptdesc) 129 else if (folio_test_clear_acti 130 call_rcu(&ptdesc->pt_r 131 else 132 pte_free_now(&ptdesc-> 133 } 134 } 135 136 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 137 void pte_free_defer(struct mm_struct *mm, pgta 138 { 139 struct folio *folio; 140 141 folio = virt_to_folio(pgtable); 142 folio_set_active(folio); 143 pte_fragment_free((unsigned long *)pgt 144 } 145 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 146
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.