1 /* SPDX-License-Identifier: GPL-2.0 */ 1 2 #ifndef _LINUX_PAGEWALK_H 3 #define _LINUX_PAGEWALK_H 4 5 #include <linux/mm.h> 6 7 struct mm_walk; 8 9 /* Locking requirement during a page walk. */ 10 enum page_walk_lock { 11 /* mmap_lock should be locked for read 12 PGWALK_RDLOCK = 0, 13 /* vma will be write-locked during the 14 PGWALK_WRLOCK = 1, 15 /* vma is expected to be already write 16 PGWALK_WRLOCK_VERIFY = 2, 17 }; 18 19 /** 20 * struct mm_walk_ops - callbacks for walk_pag 21 * @pgd_entry: if set, called for eac 22 * @p4d_entry: if set, called for eac 23 * @pud_entry: if set, called for eac 24 * @pmd_entry: if set, called for eac 25 * this handler is requir 26 * pmd_trans_huge() pmds. 27 * split_huge_page() inst 28 * @pte_entry: if set, called for eac 29 * including empty ones 30 * @pte_hole: if set, called for eac 31 * depth is -1 if not kno 32 * Any folded depths (whe 33 * are skipped. 34 * @hugetlb_entry: if set, called for eac 35 * function is called wit 36 * protect against a conc 37 * the ptl. In some cases 38 * and retake the vma loc 39 * while calling other fu 40 * function must either r 41 * ptl after dropping the 42 * those items after re-a 43 * accessing them. 44 * @test_walk: caller specific callba 45 * we walk over the curre 46 * "do page table walk ov 47 * a negative value means 48 * right now" and returni 49 * Note that this callbac 50 * passes in a single VMA 51 * @pre_vma: if set, called before 52 * @post_vma: if set, called after a 53 * that @pre_vma and the 54 * 55 * p?d_entry callbacks are called even if thos 56 * particular architecture/configuration. 57 */ 58 struct mm_walk_ops { 59 int (*pgd_entry)(pgd_t *pgd, unsigned 60 unsigned long next, s 61 int (*p4d_entry)(p4d_t *p4d, unsigned 62 unsigned long next, s 63 int (*pud_entry)(pud_t *pud, unsigned 64 unsigned long next, s 65 int (*pmd_entry)(pmd_t *pmd, unsigned 66 unsigned long next, s 67 int (*pte_entry)(pte_t *pte, unsigned 68 unsigned long next, s 69 int (*pte_hole)(unsigned long addr, un 70 int depth, struct mm_w 71 int (*hugetlb_entry)(pte_t *pte, unsig 72 unsigned long add 73 struct mm_walk *w 74 int (*test_walk)(unsigned long addr, u 75 struct mm_walk *walk); 76 int (*pre_vma)(unsigned long start, un 77 struct mm_walk *walk); 78 void (*post_vma)(struct mm_walk *walk) 79 enum page_walk_lock walk_lock; 80 }; 81 82 /* 83 * Action for pud_entry / pmd_entry callbacks. 84 * ACTION_SUBTREE is the default 85 */ 86 enum page_walk_action { 87 /* Descend to next level, splitting hu 88 ACTION_SUBTREE = 0, 89 /* Continue to next entry at this leve 90 ACTION_CONTINUE = 1, 91 /* Call again for this entry */ 92 ACTION_AGAIN = 2 93 }; 94 95 /** 96 * struct mm_walk - walk_page_range data 97 * @ops: operation to call during the w 98 * @mm: mm_struct representing the tar 99 * @pgd: pointer to PGD; only valid wit 100 * @vma: vma currently walked (NULL if 101 * @action: next action to perform (see en 102 * @no_vma: walk ignoring vmas (vma will a 103 * @private: private data for callbacks' us 104 * 105 * (see the comment on walk_page_range() for m 106 */ 107 struct mm_walk { 108 const struct mm_walk_ops *ops; 109 struct mm_struct *mm; 110 pgd_t *pgd; 111 struct vm_area_struct *vma; 112 enum page_walk_action action; 113 bool no_vma; 114 void *private; 115 }; 116 117 int walk_page_range(struct mm_struct *mm, unsi 118 unsigned long end, const struc 119 void *private); 120 int walk_page_range_novma(struct mm_struct *mm 121 unsigned long end, c 122 pgd_t *pgd, 123 void *private); 124 int walk_page_range_vma(struct vm_area_struct 125 unsigned long end, con 126 void *private); 127 int walk_page_vma(struct vm_area_struct *vma, 128 void *private); 129 int walk_page_mapping(struct address_space *ma 130 pgoff_t nr, const struct 131 void *private); 132 133 typedef int __bitwise folio_walk_flags_t; 134 135 /* 136 * Walk migration entries as well. Careful: a 137 * concurrently. 138 */ 139 #define FW_MIGRATION ((__fo 140 141 /* Walk shared zeropages (small + huge) as wel 142 #define FW_ZEROPAGE ((__fo 143 144 enum folio_walk_level { 145 FW_LEVEL_PTE, 146 FW_LEVEL_PMD, 147 FW_LEVEL_PUD, 148 }; 149 150 /** 151 * struct folio_walk - folio_walk_start() / fo 152 * @page: exact folio page referenced (i 153 * @level: page table level identifying t 154 * @pte: pointer to the page table entr 155 * @pmd: pointer to the page table entr 156 * @pud: pointer to the page table entr 157 * @ptl: pointer to the page table lock 158 * 159 * (see folio_walk_start() documentation for m 160 */ 161 struct folio_walk { 162 /* public */ 163 struct page *page; 164 enum folio_walk_level level; 165 union { 166 pte_t *ptep; 167 pud_t *pudp; 168 pmd_t *pmdp; 169 }; 170 union { 171 pte_t pte; 172 pud_t pud; 173 pmd_t pmd; 174 }; 175 /* private */ 176 struct vm_area_struct *vma; 177 spinlock_t *ptl; 178 }; 179 180 struct folio *folio_walk_start(struct folio_wa 181 struct vm_area_struct *vma, un 182 folio_walk_flags_t flags); 183 184 #define folio_walk_end(__fw, __vma) do { \ 185 spin_unlock((__fw)->ptl); \ 186 if (likely((__fw)->level == FW_LEVEL_P 187 pte_unmap((__fw)->ptep); \ 188 vma_pgtable_walk_end(__vma); \ 189 } while (0) 190 191 #endif /* _LINUX_PAGEWALK_H */ 192
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.