1 // SPDX-License-Identifier: GPL-2.0 1 2 /* 3 * mm/pgtable-generic.c 4 * 5 * Generic pgtable methods declared in linux/ 6 * 7 * Copyright (C) 2010 Linus Torvalds 8 */ 9 10 #include <linux/pagemap.h> 11 #include <linux/hugetlb.h> 12 #include <linux/pgtable.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mm_inline.h> 16 #include <asm/pgalloc.h> 17 #include <asm/tlb.h> 18 19 /* 20 * If a p?d_bad entry is found while walking p 21 * the error, before resetting entry to p?d_no 22 * very seldom) called out from the p?d_none_o 23 */ 24 25 void pgd_clear_bad(pgd_t *pgd) 26 { 27 pgd_ERROR(*pgd); 28 pgd_clear(pgd); 29 } 30 31 #ifndef __PAGETABLE_P4D_FOLDED 32 void p4d_clear_bad(p4d_t *p4d) 33 { 34 p4d_ERROR(*p4d); 35 p4d_clear(p4d); 36 } 37 #endif 38 39 #ifndef __PAGETABLE_PUD_FOLDED 40 void pud_clear_bad(pud_t *pud) 41 { 42 pud_ERROR(*pud); 43 pud_clear(pud); 44 } 45 #endif 46 47 /* 48 * Note that the pmd variant below can't be st 49 * above. pmd folding is special and typically 50 * level even when folded 51 */ 52 void pmd_clear_bad(pmd_t *pmd) 53 { 54 pmd_ERROR(*pmd); 55 pmd_clear(pmd); 56 } 57 58 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 59 /* 60 * Only sets the access flags (dirty, accessed 61 * permission. Furthermore, we know it always 62 * permissive" setting, which allows most arch 63 * this. We return whether the PTE actually ch 64 * instructs the caller to do things like upda 65 * used to be done in the caller, but sparc ne 66 * force that call on sun4c so we changed this 67 */ 68 int ptep_set_access_flags(struct vm_area_struc 69 unsigned long addres 70 pte_t entry, int dir 71 { 72 int changed = !pte_same(ptep_get(ptep) 73 if (changed) { 74 set_pte_at(vma->vm_mm, address 75 flush_tlb_fix_spurious_fault(v 76 } 77 return changed; 78 } 79 #endif 80 81 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 82 int ptep_clear_flush_young(struct vm_area_stru 83 unsigned long addre 84 { 85 int young; 86 young = ptep_test_and_clear_young(vma, 87 if (young) 88 flush_tlb_page(vma, address); 89 return young; 90 } 91 #endif 92 93 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH 94 pte_t ptep_clear_flush(struct vm_area_struct * 95 pte_t *ptep) 96 { 97 struct mm_struct *mm = (vma)->vm_mm; 98 pte_t pte; 99 pte = ptep_get_and_clear(mm, address, 100 if (pte_accessible(mm, pte)) 101 flush_tlb_page(vma, address); 102 return pte; 103 } 104 #endif 105 106 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 107 108 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 109 int pmdp_set_access_flags(struct vm_area_struc 110 unsigned long addres 111 pmd_t entry, int dir 112 { 113 int changed = !pmd_same(*pmdp, entry); 114 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 115 if (changed) { 116 set_pmd_at(vma->vm_mm, address 117 flush_pmd_tlb_range(vma, addre 118 } 119 return changed; 120 } 121 #endif 122 123 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH 124 int pmdp_clear_flush_young(struct vm_area_stru 125 unsigned long addre 126 { 127 int young; 128 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 129 young = pmdp_test_and_clear_young(vma, 130 if (young) 131 flush_pmd_tlb_range(vma, addre 132 return young; 133 } 134 #endif 135 136 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH 137 pmd_t pmdp_huge_clear_flush(struct vm_area_str 138 pmd_t *pmdp) 139 { 140 pmd_t pmd; 141 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 142 VM_BUG_ON(pmd_present(*pmdp) && !pmd_t 143 !pmd_devmap(*pmdp)) 144 pmd = pmdp_huge_get_and_clear(vma->vm_ 145 flush_pmd_tlb_range(vma, address, addr 146 return pmd; 147 } 148 149 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_P 150 pud_t pudp_huge_clear_flush(struct vm_area_str 151 pud_t *pudp) 152 { 153 pud_t pud; 154 155 VM_BUG_ON(address & ~HPAGE_PUD_MASK); 156 VM_BUG_ON(!pud_trans_huge(*pudp) && !p 157 pud = pudp_huge_get_and_clear(vma->vm_ 158 flush_pud_tlb_range(vma, address, addr 159 return pud; 160 } 161 #endif 162 #endif 163 164 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 165 void pgtable_trans_huge_deposit(struct mm_stru 166 pgtable_t pgta 167 { 168 assert_spin_locked(pmd_lockptr(mm, pmd 169 170 /* FIFO */ 171 if (!pmd_huge_pte(mm, pmdp)) 172 INIT_LIST_HEAD(&pgtable->lru); 173 else 174 list_add(&pgtable->lru, &pmd_h 175 pmd_huge_pte(mm, pmdp) = pgtable; 176 } 177 #endif 178 179 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW 180 /* no "address" argument so destroys page colo 181 pgtable_t pgtable_trans_huge_withdraw(struct m 182 { 183 pgtable_t pgtable; 184 185 assert_spin_locked(pmd_lockptr(mm, pmd 186 187 /* FIFO */ 188 pgtable = pmd_huge_pte(mm, pmdp); 189 pmd_huge_pte(mm, pmdp) = list_first_en 190 191 if (pmd_huge_pte(mm, pmdp)) 192 list_del(&pgtable->lru); 193 return pgtable; 194 } 195 #endif 196 197 #ifndef __HAVE_ARCH_PMDP_INVALIDATE 198 pmd_t pmdp_invalidate(struct vm_area_struct *v 199 pmd_t *pmdp) 200 { 201 VM_WARN_ON_ONCE(!pmd_present(*pmdp)); 202 pmd_t old = pmdp_establish(vma, addres 203 flush_pmd_tlb_range(vma, address, addr 204 return old; 205 } 206 #endif 207 208 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD 209 pmd_t pmdp_invalidate_ad(struct vm_area_struct 210 pmd_t *pmdp) 211 { 212 VM_WARN_ON_ONCE(!pmd_present(*pmdp)); 213 return pmdp_invalidate(vma, address, p 214 } 215 #endif 216 217 #ifndef pmdp_collapse_flush 218 pmd_t pmdp_collapse_flush(struct vm_area_struc 219 pmd_t *pmdp) 220 { 221 /* 222 * pmd and hugepage pte format are sam 223 * use the same function. 224 */ 225 pmd_t pmd; 226 227 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 228 VM_BUG_ON(pmd_trans_huge(*pmdp)); 229 pmd = pmdp_huge_get_and_clear(vma->vm_ 230 231 /* collapse entails shooting down ptes 232 flush_tlb_range(vma, address, address 233 return pmd; 234 } 235 #endif 236 237 /* arch define pte_free_defer in asm/pgalloc.h 238 #ifndef pte_free_defer 239 static void pte_free_now(struct rcu_head *head 240 { 241 struct page *page; 242 243 page = container_of(head, struct page, 244 pte_free(NULL /* mm not passed and not 245 } 246 247 void pte_free_defer(struct mm_struct *mm, pgta 248 { 249 struct page *page; 250 251 page = pgtable; 252 call_rcu(&page->rcu_head, pte_free_now 253 } 254 #endif /* pte_free_defer */ 255 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 256 257 #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \ 258 (defined(CONFIG_SMP) || defined(CONFIG 259 /* 260 * See the comment above ptep_get_lockless() i 261 * the barriers in pmdp_get_lockless() cannot 262 * pmd_high actually belongs with the value in 263 * off blocks the TLB flush between present up 264 * successful __pte_offset_map() points to a p 265 */ 266 static unsigned long pmdp_get_lockless_start(v 267 { 268 unsigned long irqflags; 269 270 local_irq_save(irqflags); 271 return irqflags; 272 } 273 static void pmdp_get_lockless_end(unsigned lon 274 { 275 local_irq_restore(irqflags); 276 } 277 #else 278 static unsigned long pmdp_get_lockless_start(v 279 static void pmdp_get_lockless_end(unsigned lon 280 #endif 281 282 pte_t *__pte_offset_map(pmd_t *pmd, unsigned l 283 { 284 unsigned long irqflags; 285 pmd_t pmdval; 286 287 rcu_read_lock(); 288 irqflags = pmdp_get_lockless_start(); 289 pmdval = pmdp_get_lockless(pmd); 290 pmdp_get_lockless_end(irqflags); 291 292 if (pmdvalp) 293 *pmdvalp = pmdval; 294 if (unlikely(pmd_none(pmdval) || is_pm 295 goto nomap; 296 if (unlikely(pmd_trans_huge(pmdval) || 297 goto nomap; 298 if (unlikely(pmd_bad(pmdval))) { 299 pmd_clear_bad(pmd); 300 goto nomap; 301 } 302 return __pte_map(&pmdval, addr); 303 nomap: 304 rcu_read_unlock(); 305 return NULL; 306 } 307 308 pte_t *pte_offset_map_nolock(struct mm_struct 309 unsigned long add 310 { 311 pmd_t pmdval; 312 pte_t *pte; 313 314 pte = __pte_offset_map(pmd, addr, &pmd 315 if (likely(pte)) 316 *ptlp = pte_lockptr(mm, &pmdva 317 return pte; 318 } 319 320 /* 321 * pte_offset_map_lock(mm, pmd, addr, ptlp), a 322 * __pte_offset_map_lock() below, is usually c 323 * addr, reached by walking down the mm's pgd, 324 * holding mmap_lock or vma lock for read or f 325 * context, while holding file's i_mmap_lock o 326 * write). In a few cases, it may be used with 327 * copied to or constructed on the stack. 328 * 329 * When successful, it returns the pte pointer 330 * kmapped if necessary (when CONFIG_HIGHPTE), 331 * modification by software, with a pointer to 332 * configs mm->page_table_lock, in SPLIT_PTLOC 333 * struct page). pte_unmap_unlock(pte, ptl) t 334 * 335 * But it is unsuccessful, returning NULL with 336 * page table at *pmd: if, for example, the pa 337 * or replaced by the huge pmd of a THP. (Whe 338 * after acquiring the ptlock, and retried int 339 * page table can be safely removed or replace 340 * 341 * pte_offset_map(pmd, addr), and its internal 342 * just returns the pte pointer for addr, its 343 * or NULL if there is no page table at *pmd. 344 * page table, so cannot normally be used when 345 * or when entries read must be stable. But i 346 * that even when page table is racily removed 347 * and disconnected table. Until pte_unmap(pt 348 * afterwards. 349 * 350 * pte_offset_map_nolock(mm, pmd, addr, ptlp), 351 * but when successful, it also outputs a poin 352 * pte_offset_map_lock() does, but in this cas 353 * the caller to avoid a later pte_lockptr(mm, 354 * act on a changed *pmd: pte_offset_map_noloc 355 * pointer for the page table that it returns. 356 * recheck *pmd once the lock is taken; in pra 357 * either the mmap_lock for write, or pte_same 358 * 359 * Note that free_pgtables(), used after unmap 360 * exiting the whole mm, does not take page ta 361 * table, and may not use RCU at all: "outside 362 * pte_offset_map() and co once the vma is det 363 */ 364 pte_t *__pte_offset_map_lock(struct mm_struct 365 unsigned long add 366 { 367 spinlock_t *ptl; 368 pmd_t pmdval; 369 pte_t *pte; 370 again: 371 pte = __pte_offset_map(pmd, addr, &pmd 372 if (unlikely(!pte)) 373 return pte; 374 ptl = pte_lockptr(mm, &pmdval); 375 spin_lock(ptl); 376 if (likely(pmd_same(pmdval, pmdp_get_l 377 *ptlp = ptl; 378 return pte; 379 } 380 pte_unmap_unlock(pte, ptl); 381 goto again; 382 } 383
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.