1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * linux/mm/filemap.c 3 * linux/mm/filemap.c 4 * 4 * 5 * Copyright (C) 1994-1999 Linus Torvalds 5 * Copyright (C) 1994-1999 Linus Torvalds 6 */ 6 */ 7 7 8 /* 8 /* 9 * This file handles the generic file mmap sem 9 * This file handles the generic file mmap semantics used by 10 * most "normal" filesystems (but you don't /h 10 * most "normal" filesystems (but you don't /have/ to use this: 11 * the NFS filesystem used to do this differen 11 * the NFS filesystem used to do this differently, for example) 12 */ 12 */ 13 #include <linux/export.h> 13 #include <linux/export.h> 14 #include <linux/compiler.h> 14 #include <linux/compiler.h> 15 #include <linux/dax.h> 15 #include <linux/dax.h> 16 #include <linux/fs.h> 16 #include <linux/fs.h> 17 #include <linux/sched/signal.h> 17 #include <linux/sched/signal.h> 18 #include <linux/uaccess.h> 18 #include <linux/uaccess.h> 19 #include <linux/capability.h> 19 #include <linux/capability.h> 20 #include <linux/kernel_stat.h> 20 #include <linux/kernel_stat.h> 21 #include <linux/gfp.h> 21 #include <linux/gfp.h> 22 #include <linux/mm.h> 22 #include <linux/mm.h> 23 #include <linux/swap.h> 23 #include <linux/swap.h> 24 #include <linux/swapops.h> << 25 #include <linux/syscalls.h> << 26 #include <linux/mman.h> 24 #include <linux/mman.h> 27 #include <linux/pagemap.h> 25 #include <linux/pagemap.h> 28 #include <linux/file.h> 26 #include <linux/file.h> 29 #include <linux/uio.h> 27 #include <linux/uio.h> 30 #include <linux/error-injection.h> 28 #include <linux/error-injection.h> 31 #include <linux/hash.h> 29 #include <linux/hash.h> 32 #include <linux/writeback.h> 30 #include <linux/writeback.h> 33 #include <linux/backing-dev.h> 31 #include <linux/backing-dev.h> 34 #include <linux/pagevec.h> 32 #include <linux/pagevec.h> >> 33 #include <linux/blkdev.h> 35 #include <linux/security.h> 34 #include <linux/security.h> 36 #include <linux/cpuset.h> 35 #include <linux/cpuset.h> 37 #include <linux/hugetlb.h> 36 #include <linux/hugetlb.h> 38 #include <linux/memcontrol.h> 37 #include <linux/memcontrol.h> >> 38 #include <linux/cleancache.h> 39 #include <linux/shmem_fs.h> 39 #include <linux/shmem_fs.h> 40 #include <linux/rmap.h> 40 #include <linux/rmap.h> 41 #include <linux/delayacct.h> 41 #include <linux/delayacct.h> 42 #include <linux/psi.h> 42 #include <linux/psi.h> 43 #include <linux/ramfs.h> 43 #include <linux/ramfs.h> 44 #include <linux/page_idle.h> 44 #include <linux/page_idle.h> 45 #include <linux/migrate.h> << 46 #include <linux/pipe_fs_i.h> << 47 #include <linux/splice.h> << 48 #include <linux/rcupdate_wait.h> << 49 #include <linux/sched/mm.h> << 50 #include <asm/pgalloc.h> 45 #include <asm/pgalloc.h> 51 #include <asm/tlbflush.h> 46 #include <asm/tlbflush.h> 52 #include "internal.h" 47 #include "internal.h" 53 48 54 #define CREATE_TRACE_POINTS 49 #define CREATE_TRACE_POINTS 55 #include <trace/events/filemap.h> 50 #include <trace/events/filemap.h> 56 51 57 /* 52 /* 58 * FIXME: remove all knowledge of the buffer l 53 * FIXME: remove all knowledge of the buffer layer from the core VM 59 */ 54 */ 60 #include <linux/buffer_head.h> /* for try_to_f 55 #include <linux/buffer_head.h> /* for try_to_free_buffers */ 61 56 62 #include <asm/mman.h> 57 #include <asm/mman.h> 63 58 64 #include "swap.h" << 65 << 66 /* 59 /* 67 * Shared mappings implemented 30.11.1994. It' 60 * Shared mappings implemented 30.11.1994. It's not fully working yet, 68 * though. 61 * though. 69 * 62 * 70 * Shared mappings now work. 15.8.1995 Bruno. 63 * Shared mappings now work. 15.8.1995 Bruno. 71 * 64 * 72 * finished 'unifying' the page and buffer cac 65 * finished 'unifying' the page and buffer cache and SMP-threaded the 73 * page-cache, 21.05.1999, Ingo Molnar <mingo@ 66 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> 74 * 67 * 75 * SMP-threaded pagemap-LRU 1999, Andrea Arcan 68 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> 76 */ 69 */ 77 70 78 /* 71 /* 79 * Lock ordering: 72 * Lock ordering: 80 * 73 * 81 * ->i_mmap_rwsem (truncate_page 74 * ->i_mmap_rwsem (truncate_pagecache) 82 * ->private_lock (__free_pte->b !! 75 * ->private_lock (__free_pte->__set_page_dirty_buffers) 83 * ->swap_lock (exclusive_swa 76 * ->swap_lock (exclusive_swap_page, others) 84 * ->i_pages lock 77 * ->i_pages lock 85 * 78 * 86 * ->i_rwsem !! 79 * ->i_mutex 87 * ->invalidate_lock (acquired by f !! 80 * ->i_mmap_rwsem (truncate->unmap_mapping_range) 88 * ->i_mmap_rwsem (truncate->unm << 89 * 81 * 90 * ->mmap_lock 82 * ->mmap_lock 91 * ->i_mmap_rwsem 83 * ->i_mmap_rwsem 92 * ->page_table_lock or pte_lock (vario 84 * ->page_table_lock or pte_lock (various, mainly in memory.c) 93 * ->i_pages lock (arch-dependen 85 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock) 94 * 86 * 95 * ->mmap_lock 87 * ->mmap_lock 96 * ->invalidate_lock (filemap_fault !! 88 * ->lock_page (access_process_vm) 97 * ->lock_page (filemap_fault << 98 * 89 * 99 * ->i_rwsem (generic_perfo !! 90 * ->i_mutex (generic_perform_write) 100 * ->mmap_lock (fault_in_read !! 91 * ->mmap_lock (fault_in_pages_readable->do_page_fault) 101 * 92 * 102 * bdi->wb.list_lock 93 * bdi->wb.list_lock 103 * sb_lock (fs/fs-writeba 94 * sb_lock (fs/fs-writeback.c) 104 * ->i_pages lock (__sync_single 95 * ->i_pages lock (__sync_single_inode) 105 * 96 * 106 * ->i_mmap_rwsem 97 * ->i_mmap_rwsem 107 * ->anon_vma.lock (vma_merge) !! 98 * ->anon_vma.lock (vma_adjust) 108 * 99 * 109 * ->anon_vma.lock 100 * ->anon_vma.lock 110 * ->page_table_lock or pte_lock (anon_ 101 * ->page_table_lock or pte_lock (anon_vma_prepare and various) 111 * 102 * 112 * ->page_table_lock or pte_lock 103 * ->page_table_lock or pte_lock 113 * ->swap_lock (try_to_unmap_ 104 * ->swap_lock (try_to_unmap_one) 114 * ->private_lock (try_to_unmap_ 105 * ->private_lock (try_to_unmap_one) 115 * ->i_pages lock (try_to_unmap_ 106 * ->i_pages lock (try_to_unmap_one) 116 * ->lruvec->lru_lock (follow_page_m !! 107 * ->lruvec->lru_lock (follow_page->mark_page_accessed) 117 * ->lruvec->lru_lock (check_pte_ran !! 108 * ->lruvec->lru_lock (check_pte_range->isolate_lru_page) 118 * ->private_lock (folio_remove_ !! 109 * ->private_lock (page_remove_rmap->set_page_dirty) 119 * ->i_pages lock (folio_remove_ !! 110 * ->i_pages lock (page_remove_rmap->set_page_dirty) 120 * bdi.wb->list_lock (folio_remove_ !! 111 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) 121 * ->inode->i_lock (folio_remove_ !! 112 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 122 * ->memcg->move_lock (folio_remove_ !! 113 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) 123 * bdi.wb->list_lock (zap_pte_range 114 * bdi.wb->list_lock (zap_pte_range->set_page_dirty) 124 * ->inode->i_lock (zap_pte_range 115 * ->inode->i_lock (zap_pte_range->set_page_dirty) 125 * ->private_lock (zap_pte_range !! 116 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) >> 117 * >> 118 * ->i_mmap_rwsem >> 119 * ->tasklist_lock (memory_failure, collect_procs_ao) 126 */ 120 */ 127 121 128 static void mapping_set_update(struct xa_state << 129 struct address_space *mapping) << 130 { << 131 if (dax_mapping(mapping) || shmem_mapp << 132 return; << 133 xas_set_update(xas, workingset_update_ << 134 xas_set_lru(xas, &shadow_nodes); << 135 } << 136 << 137 static void page_cache_delete(struct address_s 122 static void page_cache_delete(struct address_space *mapping, 138 struct foli !! 123 struct page *page, void *shadow) 139 { 124 { 140 XA_STATE(xas, &mapping->i_pages, folio !! 125 XA_STATE(xas, &mapping->i_pages, page->index); 141 long nr = 1; !! 126 unsigned int nr = 1; 142 127 143 mapping_set_update(&xas, mapping); 128 mapping_set_update(&xas, mapping); 144 129 145 xas_set_order(&xas, folio->index, foli !! 130 /* hugetlb pages are represented by a single entry in the xarray */ 146 nr = folio_nr_pages(folio); !! 131 if (!PageHuge(page)) { >> 132 xas_set_order(&xas, page->index, compound_order(page)); >> 133 nr = compound_nr(page); >> 134 } 147 135 148 VM_BUG_ON_FOLIO(!folio_test_locked(fol !! 136 VM_BUG_ON_PAGE(!PageLocked(page), page); >> 137 VM_BUG_ON_PAGE(PageTail(page), page); >> 138 VM_BUG_ON_PAGE(nr != 1 && shadow, page); 149 139 150 xas_store(&xas, shadow); 140 xas_store(&xas, shadow); 151 xas_init_marks(&xas); 141 xas_init_marks(&xas); 152 142 153 folio->mapping = NULL; !! 143 page->mapping = NULL; 154 /* Leave page->index set: truncation l 144 /* Leave page->index set: truncation lookup relies upon it */ 155 mapping->nrpages -= nr; 145 mapping->nrpages -= nr; 156 } 146 } 157 147 158 static void filemap_unaccount_folio(struct add !! 148 static void unaccount_page_cache_page(struct address_space *mapping, 159 struct folio *folio) !! 149 struct page *page) 160 { 150 { 161 long nr; !! 151 int nr; >> 152 >> 153 /* >> 154 * if we're uptodate, flush out into the cleancache, otherwise >> 155 * invalidate any existing cleancache entries. We can't leave >> 156 * stale data around in the cleancache once our page is gone >> 157 */ >> 158 if (PageUptodate(page) && PageMappedToDisk(page)) >> 159 cleancache_put_page(page); >> 160 else >> 161 cleancache_invalidate_page(mapping, page); >> 162 >> 163 VM_BUG_ON_PAGE(PageTail(page), page); >> 164 VM_BUG_ON_PAGE(page_mapped(page), page); >> 165 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { >> 166 int mapcount; 162 167 163 VM_BUG_ON_FOLIO(folio_mapped(folio), f << 164 if (!IS_ENABLED(CONFIG_DEBUG_VM) && un << 165 pr_alert("BUG: Bad page cache 168 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", 166 current->comm, folio_ !! 169 current->comm, page_to_pfn(page)); 167 dump_page(&folio->page, "still !! 170 dump_page(page, "still mapped when deleted"); 168 dump_stack(); 171 dump_stack(); 169 add_taint(TAINT_BAD_PAGE, LOCK 172 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 170 173 171 if (mapping_exiting(mapping) & !! 174 mapcount = page_mapcount(page); 172 int mapcount = folio_m !! 175 if (mapping_exiting(mapping) && 173 !! 176 page_count(page) >= mapcount + 2) { 174 if (folio_ref_count(fo !! 177 /* 175 /* !! 178 * All vmas have already been torn down, so it's 176 * All vmas ha !! 179 * a good bet that actually the page is unmapped, 177 * a good bet !! 180 * and we'd prefer not to leak it: if we're wrong, 178 * and we'd ra !! 181 * some other bad page check should catch it later. 179 * another bad !! 182 */ 180 */ !! 183 page_mapcount_reset(page); 181 atomic_set(&fo !! 184 page_ref_sub(page, mapcount); 182 folio_ref_sub( << 183 } << 184 } 185 } 185 } 186 } 186 187 187 /* hugetlb folios do not participate i !! 188 /* hugetlb pages do not participate in page cache accounting. */ 188 if (folio_test_hugetlb(folio)) !! 189 if (PageHuge(page)) 189 return; 190 return; 190 191 191 nr = folio_nr_pages(folio); !! 192 nr = thp_nr_pages(page); 192 193 193 __lruvec_stat_mod_folio(folio, NR_FILE !! 194 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); 194 if (folio_test_swapbacked(folio)) { !! 195 if (PageSwapBacked(page)) { 195 __lruvec_stat_mod_folio(folio, !! 196 __mod_lruvec_page_state(page, NR_SHMEM, -nr); 196 if (folio_test_pmd_mappable(fo !! 197 if (PageTransHuge(page)) 197 __lruvec_stat_mod_foli !! 198 __mod_lruvec_page_state(page, NR_SHMEM_THPS, -nr); 198 } else if (folio_test_pmd_mappable(fol !! 199 } else if (PageTransHuge(page)) { 199 __lruvec_stat_mod_folio(folio, !! 200 __mod_lruvec_page_state(page, NR_FILE_THPS, -nr); 200 filemap_nr_thps_dec(mapping); 201 filemap_nr_thps_dec(mapping); 201 } 202 } 202 203 203 /* 204 /* 204 * At this point folio must be either !! 205 * At this point page must be either written or cleaned by 205 * truncate. Dirty folio here signals !! 206 * truncate. Dirty page here signals a bug and loss of 206 * unwritten data - on ordinary filesy !! 207 * unwritten data. 207 * << 208 * But it's harmless on in-memory file << 209 * occur when a driver which did get_u << 210 * before putting it, while the inode << 211 * 208 * 212 * Below fixes dirty accounting after !! 209 * This fixes dirty accounting after removing the page entirely 213 * but leaves the dirty flag set: it h !! 210 * but leaves PageDirty set: it has no effect for truncated 214 * folio and anyway will be cleared be !! 211 * page and anyway will be cleared before returning page into 215 * buddy allocator. 212 * buddy allocator. 216 */ 213 */ 217 if (WARN_ON_ONCE(folio_test_dirty(foli !! 214 if (WARN_ON_ONCE(PageDirty(page))) 218 mapping_can_writeback !! 215 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); 219 folio_account_cleaned(folio, i << 220 } 216 } 221 217 222 /* 218 /* 223 * Delete a page from the page cache and free 219 * Delete a page from the page cache and free it. Caller has to make 224 * sure the page is locked and that nobody els 220 * sure the page is locked and that nobody else uses it - or that usage 225 * is safe. The caller must hold the i_pages 221 * is safe. The caller must hold the i_pages lock. 226 */ 222 */ 227 void __filemap_remove_folio(struct folio *foli !! 223 void __delete_from_page_cache(struct page *page, void *shadow) 228 { 224 { 229 struct address_space *mapping = folio- !! 225 struct address_space *mapping = page->mapping; >> 226 >> 227 trace_mm_filemap_delete_from_page_cache(page); 230 228 231 trace_mm_filemap_delete_from_page_cach !! 229 unaccount_page_cache_page(mapping, page); 232 filemap_unaccount_folio(mapping, folio !! 230 page_cache_delete(mapping, page, shadow); 233 page_cache_delete(mapping, folio, shad << 234 } 231 } 235 232 236 void filemap_free_folio(struct address_space * !! 233 static void page_cache_free_page(struct address_space *mapping, >> 234 struct page *page) 237 { 235 { 238 void (*free_folio)(struct folio *); !! 236 void (*freepage)(struct page *); 239 int refs = 1; << 240 237 241 free_folio = mapping->a_ops->free_foli !! 238 freepage = mapping->a_ops->freepage; 242 if (free_folio) !! 239 if (freepage) 243 free_folio(folio); !! 240 freepage(page); 244 241 245 if (folio_test_large(folio)) !! 242 if (PageTransHuge(page) && !PageHuge(page)) { 246 refs = folio_nr_pages(folio); !! 243 page_ref_sub(page, thp_nr_pages(page)); 247 folio_put_refs(folio, refs); !! 244 VM_BUG_ON_PAGE(page_count(page) <= 0, page); >> 245 } else { >> 246 put_page(page); >> 247 } 248 } 248 } 249 249 250 /** 250 /** 251 * filemap_remove_folio - Remove folio from pa !! 251 * delete_from_page_cache - delete page from page cache 252 * @folio: The folio. !! 252 * @page: the page which the kernel is trying to remove from page cache 253 * 253 * 254 * This must be called only on folios that are !! 254 * This must be called only on pages that have been verified to be in the page 255 * verified to be in the page cache. It will !! 255 * cache and locked. It will never put the page into the free list, the caller 256 * the free list because the caller has a refe !! 256 * has a reference on the page. 257 */ !! 257 */ 258 void filemap_remove_folio(struct folio *folio) !! 258 void delete_from_page_cache(struct page *page) 259 { !! 259 { 260 struct address_space *mapping = folio- !! 260 struct address_space *mapping = page_mapping(page); 261 !! 261 unsigned long flags; 262 BUG_ON(!folio_test_locked(folio)); << 263 spin_lock(&mapping->host->i_lock); << 264 xa_lock_irq(&mapping->i_pages); << 265 __filemap_remove_folio(folio, NULL); << 266 xa_unlock_irq(&mapping->i_pages); << 267 if (mapping_shrinkable(mapping)) << 268 inode_add_lru(mapping->host); << 269 spin_unlock(&mapping->host->i_lock); << 270 << 271 filemap_free_folio(mapping, folio); << 272 } << 273 262 274 /* !! 263 BUG_ON(!PageLocked(page)); 275 * page_cache_delete_batch - delete several fo !! 264 xa_lock_irqsave(&mapping->i_pages, flags); 276 * @mapping: the mapping to which folios belon !! 265 __delete_from_page_cache(page, NULL); 277 * @fbatch: batch of folios to delete !! 266 xa_unlock_irqrestore(&mapping->i_pages, flags); 278 * !! 267 279 * The function walks over mapping->i_pages an !! 268 page_cache_free_page(mapping, page); 280 * @fbatch from the mapping. The function expe !! 269 } 281 * by page index and is optimised for it to be !! 270 EXPORT_SYMBOL(delete_from_page_cache); 282 * It tolerates holes in @fbatch (mapping entr !! 271 283 * modified). !! 272 /* >> 273 * page_cache_delete_batch - delete several pages from page cache >> 274 * @mapping: the mapping to which pages belong >> 275 * @pvec: pagevec with pages to delete >> 276 * >> 277 * The function walks over mapping->i_pages and removes pages passed in @pvec >> 278 * from the mapping. The function expects @pvec to be sorted by page index >> 279 * and is optimised for it to be dense. >> 280 * It tolerates holes in @pvec (mapping entries at those indices are not >> 281 * modified). The function expects only THP head pages to be present in the >> 282 * @pvec. 284 * 283 * 285 * The function expects the i_pages lock to be 284 * The function expects the i_pages lock to be held. 286 */ 285 */ 287 static void page_cache_delete_batch(struct add 286 static void page_cache_delete_batch(struct address_space *mapping, 288 struct folio_batc !! 287 struct pagevec *pvec) 289 { 288 { 290 XA_STATE(xas, &mapping->i_pages, fbatc !! 289 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); 291 long total_pages = 0; !! 290 int total_pages = 0; 292 int i = 0; 291 int i = 0; 293 struct folio *folio; !! 292 struct page *page; 294 293 295 mapping_set_update(&xas, mapping); 294 mapping_set_update(&xas, mapping); 296 xas_for_each(&xas, folio, ULONG_MAX) { !! 295 xas_for_each(&xas, page, ULONG_MAX) { 297 if (i >= folio_batch_count(fba !! 296 if (i >= pagevec_count(pvec)) 298 break; 297 break; 299 298 300 /* A swap/dax/shadow entry got 299 /* A swap/dax/shadow entry got inserted? Skip it. */ 301 if (xa_is_value(folio)) !! 300 if (xa_is_value(page)) 302 continue; 301 continue; 303 /* 302 /* 304 * A page got inserted in our 303 * A page got inserted in our range? Skip it. We have our 305 * pages locked so they are pr 304 * pages locked so they are protected from being removed. 306 * If we see a page whose inde 305 * If we see a page whose index is higher than ours, it 307 * means our page has been rem 306 * means our page has been removed, which shouldn't be 308 * possible because we're hold 307 * possible because we're holding the PageLock. 309 */ 308 */ 310 if (folio != fbatch->folios[i] !! 309 if (page != pvec->pages[i]) { 311 VM_BUG_ON_FOLIO(folio- !! 310 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, 312 fbatch !! 311 page); 313 continue; 312 continue; 314 } 313 } 315 314 316 WARN_ON_ONCE(!folio_test_locke !! 315 WARN_ON_ONCE(!PageLocked(page)); 317 316 318 folio->mapping = NULL; !! 317 if (page->index == xas.xa_index) 319 /* Leave folio->index set: tru !! 318 page->mapping = NULL; >> 319 /* Leave page->index set: truncation lookup relies on it */ 320 320 321 i++; !! 321 /* >> 322 * Move to the next page in the vector if this is a regular >> 323 * page or the index is of the last sub-page of this compound >> 324 * page. >> 325 */ >> 326 if (page->index + compound_nr(page) - 1 == xas.xa_index) >> 327 i++; 322 xas_store(&xas, NULL); 328 xas_store(&xas, NULL); 323 total_pages += folio_nr_pages( !! 329 total_pages++; 324 } 330 } 325 mapping->nrpages -= total_pages; 331 mapping->nrpages -= total_pages; 326 } 332 } 327 333 328 void delete_from_page_cache_batch(struct addre 334 void delete_from_page_cache_batch(struct address_space *mapping, 329 struct folio !! 335 struct pagevec *pvec) 330 { 336 { 331 int i; 337 int i; >> 338 unsigned long flags; 332 339 333 if (!folio_batch_count(fbatch)) !! 340 if (!pagevec_count(pvec)) 334 return; 341 return; 335 342 336 spin_lock(&mapping->host->i_lock); !! 343 xa_lock_irqsave(&mapping->i_pages, flags); 337 xa_lock_irq(&mapping->i_pages); !! 344 for (i = 0; i < pagevec_count(pvec); i++) { 338 for (i = 0; i < folio_batch_count(fbat !! 345 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); 339 struct folio *folio = fbatch-> << 340 << 341 trace_mm_filemap_delete_from_p << 342 filemap_unaccount_folio(mappin << 343 } << 344 page_cache_delete_batch(mapping, fbatc << 345 xa_unlock_irq(&mapping->i_pages); << 346 if (mapping_shrinkable(mapping)) << 347 inode_add_lru(mapping->host); << 348 spin_unlock(&mapping->host->i_lock); << 349 346 350 for (i = 0; i < folio_batch_count(fbat !! 347 unaccount_page_cache_page(mapping, pvec->pages[i]); 351 filemap_free_folio(mapping, fb !! 348 } >> 349 page_cache_delete_batch(mapping, pvec); >> 350 xa_unlock_irqrestore(&mapping->i_pages, flags); >> 351 >> 352 for (i = 0; i < pagevec_count(pvec); i++) >> 353 page_cache_free_page(mapping, pvec->pages[i]); 352 } 354 } 353 355 354 int filemap_check_errors(struct address_space 356 int filemap_check_errors(struct address_space *mapping) 355 { 357 { 356 int ret = 0; 358 int ret = 0; 357 /* Check for outstanding write errors 359 /* Check for outstanding write errors */ 358 if (test_bit(AS_ENOSPC, &mapping->flag 360 if (test_bit(AS_ENOSPC, &mapping->flags) && 359 test_and_clear_bit(AS_ENOSPC, &map 361 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) 360 ret = -ENOSPC; 362 ret = -ENOSPC; 361 if (test_bit(AS_EIO, &mapping->flags) 363 if (test_bit(AS_EIO, &mapping->flags) && 362 test_and_clear_bit(AS_EIO, &mappin 364 test_and_clear_bit(AS_EIO, &mapping->flags)) 363 ret = -EIO; 365 ret = -EIO; 364 return ret; 366 return ret; 365 } 367 } 366 EXPORT_SYMBOL(filemap_check_errors); 368 EXPORT_SYMBOL(filemap_check_errors); 367 369 368 static int filemap_check_and_keep_errors(struc 370 static int filemap_check_and_keep_errors(struct address_space *mapping) 369 { 371 { 370 /* Check for outstanding write errors 372 /* Check for outstanding write errors */ 371 if (test_bit(AS_EIO, &mapping->flags)) 373 if (test_bit(AS_EIO, &mapping->flags)) 372 return -EIO; 374 return -EIO; 373 if (test_bit(AS_ENOSPC, &mapping->flag 375 if (test_bit(AS_ENOSPC, &mapping->flags)) 374 return -ENOSPC; 376 return -ENOSPC; 375 return 0; 377 return 0; 376 } 378 } 377 379 378 /** 380 /** 379 * filemap_fdatawrite_wbc - start writeback on << 380 * @mapping: address space structure to wri << 381 * @wbc: the writeback_control controll << 382 * << 383 * Call writepages on the mapping using the pr << 384 * writeout. << 385 * << 386 * Return: %0 on success, negative error code << 387 */ << 388 int filemap_fdatawrite_wbc(struct address_spac << 389 struct writeback_co << 390 { << 391 int ret; << 392 << 393 if (!mapping_can_writeback(mapping) || << 394 !mapping_tagged(mapping, PAGECACHE << 395 return 0; << 396 << 397 wbc_attach_fdatawrite_inode(wbc, mappi << 398 ret = do_writepages(mapping, wbc); << 399 wbc_detach_inode(wbc); << 400 return ret; << 401 } << 402 EXPORT_SYMBOL(filemap_fdatawrite_wbc); << 403 << 404 /** << 405 * __filemap_fdatawrite_range - start writebac 381 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range 406 * @mapping: address space structure to wri 382 * @mapping: address space structure to write 407 * @start: offset in bytes where the rang 383 * @start: offset in bytes where the range starts 408 * @end: offset in bytes where the rang 384 * @end: offset in bytes where the range ends (inclusive) 409 * @sync_mode: enable synchronous operation 385 * @sync_mode: enable synchronous operation 410 * 386 * 411 * Start writeback against all of a mapping's 387 * Start writeback against all of a mapping's dirty pages that lie 412 * within the byte offsets <start, end> inclus 388 * within the byte offsets <start, end> inclusive. 413 * 389 * 414 * If sync_mode is WB_SYNC_ALL then this is a 390 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as 415 * opposed to a regular memory cleansing write 391 * opposed to a regular memory cleansing writeback. The difference between 416 * these two operations is that if a dirty pag 392 * these two operations is that if a dirty page/buffer is encountered, it must 417 * be waited upon, and not just skipped over. 393 * be waited upon, and not just skipped over. 418 * 394 * 419 * Return: %0 on success, negative error code 395 * Return: %0 on success, negative error code otherwise. 420 */ 396 */ 421 int __filemap_fdatawrite_range(struct address_ 397 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 422 loff_t end, in 398 loff_t end, int sync_mode) 423 { 399 { >> 400 int ret; 424 struct writeback_control wbc = { 401 struct writeback_control wbc = { 425 .sync_mode = sync_mode, 402 .sync_mode = sync_mode, 426 .nr_to_write = LONG_MAX, 403 .nr_to_write = LONG_MAX, 427 .range_start = start, 404 .range_start = start, 428 .range_end = end, 405 .range_end = end, 429 }; 406 }; 430 407 431 return filemap_fdatawrite_wbc(mapping, !! 408 if (!mapping_can_writeback(mapping) || >> 409 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) >> 410 return 0; >> 411 >> 412 wbc_attach_fdatawrite_inode(&wbc, mapping->host); >> 413 ret = do_writepages(mapping, &wbc); >> 414 wbc_detach_inode(&wbc); >> 415 return ret; 432 } 416 } 433 417 434 static inline int __filemap_fdatawrite(struct 418 static inline int __filemap_fdatawrite(struct address_space *mapping, 435 int sync_mode) 419 int sync_mode) 436 { 420 { 437 return __filemap_fdatawrite_range(mapp 421 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); 438 } 422 } 439 423 440 int filemap_fdatawrite(struct address_space *m 424 int filemap_fdatawrite(struct address_space *mapping) 441 { 425 { 442 return __filemap_fdatawrite(mapping, W 426 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); 443 } 427 } 444 EXPORT_SYMBOL(filemap_fdatawrite); 428 EXPORT_SYMBOL(filemap_fdatawrite); 445 429 446 int filemap_fdatawrite_range(struct address_sp 430 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, 447 loff_t end) 431 loff_t end) 448 { 432 { 449 return __filemap_fdatawrite_range(mapp 433 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); 450 } 434 } 451 EXPORT_SYMBOL(filemap_fdatawrite_range); 435 EXPORT_SYMBOL(filemap_fdatawrite_range); 452 436 453 /** 437 /** 454 * filemap_flush - mostly a non-blocking flush 438 * filemap_flush - mostly a non-blocking flush 455 * @mapping: target address_space 439 * @mapping: target address_space 456 * 440 * 457 * This is a mostly non-blocking flush. Not s 441 * This is a mostly non-blocking flush. Not suitable for data-integrity 458 * purposes - I/O may not be started against a 442 * purposes - I/O may not be started against all dirty pages. 459 * 443 * 460 * Return: %0 on success, negative error code 444 * Return: %0 on success, negative error code otherwise. 461 */ 445 */ 462 int filemap_flush(struct address_space *mappin 446 int filemap_flush(struct address_space *mapping) 463 { 447 { 464 return __filemap_fdatawrite(mapping, W 448 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); 465 } 449 } 466 EXPORT_SYMBOL(filemap_flush); 450 EXPORT_SYMBOL(filemap_flush); 467 451 468 /** 452 /** 469 * filemap_range_has_page - check if a page ex 453 * filemap_range_has_page - check if a page exists in range. 470 * @mapping: address space within wh 454 * @mapping: address space within which to check 471 * @start_byte: offset in bytes where t 455 * @start_byte: offset in bytes where the range starts 472 * @end_byte: offset in bytes where t 456 * @end_byte: offset in bytes where the range ends (inclusive) 473 * 457 * 474 * Find at least one page in the range supplie 458 * Find at least one page in the range supplied, usually used to check if 475 * direct writing in this range will trigger a 459 * direct writing in this range will trigger a writeback. 476 * 460 * 477 * Return: %true if at least one page exists i 461 * Return: %true if at least one page exists in the specified range, 478 * %false otherwise. 462 * %false otherwise. 479 */ 463 */ 480 bool filemap_range_has_page(struct address_spa 464 bool filemap_range_has_page(struct address_space *mapping, 481 loff_t start_byte, 465 loff_t start_byte, loff_t end_byte) 482 { 466 { 483 struct folio *folio; !! 467 struct page *page; 484 XA_STATE(xas, &mapping->i_pages, start 468 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 485 pgoff_t max = end_byte >> PAGE_SHIFT; 469 pgoff_t max = end_byte >> PAGE_SHIFT; 486 470 487 if (end_byte < start_byte) 471 if (end_byte < start_byte) 488 return false; 472 return false; 489 473 490 rcu_read_lock(); 474 rcu_read_lock(); 491 for (;;) { 475 for (;;) { 492 folio = xas_find(&xas, max); !! 476 page = xas_find(&xas, max); 493 if (xas_retry(&xas, folio)) !! 477 if (xas_retry(&xas, page)) 494 continue; 478 continue; 495 /* Shadow entries don't count 479 /* Shadow entries don't count */ 496 if (xa_is_value(folio)) !! 480 if (xa_is_value(page)) 497 continue; 481 continue; 498 /* 482 /* 499 * We don't need to try to pin 483 * We don't need to try to pin this page; we're about to 500 * release the RCU lock anyway 484 * release the RCU lock anyway. It is enough to know that 501 * there was a page here recen 485 * there was a page here recently. 502 */ 486 */ 503 break; 487 break; 504 } 488 } 505 rcu_read_unlock(); 489 rcu_read_unlock(); 506 490 507 return folio != NULL; !! 491 return page != NULL; 508 } 492 } 509 EXPORT_SYMBOL(filemap_range_has_page); 493 EXPORT_SYMBOL(filemap_range_has_page); 510 494 511 static void __filemap_fdatawait_range(struct a 495 static void __filemap_fdatawait_range(struct address_space *mapping, 512 loff_t st 496 loff_t start_byte, loff_t end_byte) 513 { 497 { 514 pgoff_t index = start_byte >> PAGE_SHI 498 pgoff_t index = start_byte >> PAGE_SHIFT; 515 pgoff_t end = end_byte >> PAGE_SHIFT; 499 pgoff_t end = end_byte >> PAGE_SHIFT; 516 struct folio_batch fbatch; !! 500 struct pagevec pvec; 517 unsigned nr_folios; !! 501 int nr_pages; 518 502 519 folio_batch_init(&fbatch); !! 503 if (end_byte < start_byte) >> 504 return; 520 505 >> 506 pagevec_init(&pvec); 521 while (index <= end) { 507 while (index <= end) { 522 unsigned i; 508 unsigned i; 523 509 524 nr_folios = filemap_get_folios !! 510 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, 525 PAGECACHE_TAG_ !! 511 end, PAGECACHE_TAG_WRITEBACK); 526 !! 512 if (!nr_pages) 527 if (!nr_folios) << 528 break; 513 break; 529 514 530 for (i = 0; i < nr_folios; i++ !! 515 for (i = 0; i < nr_pages; i++) { 531 struct folio *folio = !! 516 struct page *page = pvec.pages[i]; 532 517 533 folio_wait_writeback(f !! 518 wait_on_page_writeback(page); >> 519 ClearPageError(page); 534 } 520 } 535 folio_batch_release(&fbatch); !! 521 pagevec_release(&pvec); 536 cond_resched(); 522 cond_resched(); 537 } 523 } 538 } 524 } 539 525 540 /** 526 /** 541 * filemap_fdatawait_range - wait for writebac 527 * filemap_fdatawait_range - wait for writeback to complete 542 * @mapping: address space structur 528 * @mapping: address space structure to wait for 543 * @start_byte: offset in bytes where 529 * @start_byte: offset in bytes where the range starts 544 * @end_byte: offset in bytes where 530 * @end_byte: offset in bytes where the range ends (inclusive) 545 * 531 * 546 * Walk the list of under-writeback pages of t 532 * Walk the list of under-writeback pages of the given address space 547 * in the given range and wait for all of them 533 * in the given range and wait for all of them. Check error status of 548 * the address space and return it. 534 * the address space and return it. 549 * 535 * 550 * Since the error status of the address space 536 * Since the error status of the address space is cleared by this function, 551 * callers are responsible for checking the re 537 * callers are responsible for checking the return value and handling and/or 552 * reporting the error. 538 * reporting the error. 553 * 539 * 554 * Return: error status of the address space. 540 * Return: error status of the address space. 555 */ 541 */ 556 int filemap_fdatawait_range(struct address_spa 542 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, 557 loff_t end_byte) 543 loff_t end_byte) 558 { 544 { 559 __filemap_fdatawait_range(mapping, sta 545 __filemap_fdatawait_range(mapping, start_byte, end_byte); 560 return filemap_check_errors(mapping); 546 return filemap_check_errors(mapping); 561 } 547 } 562 EXPORT_SYMBOL(filemap_fdatawait_range); 548 EXPORT_SYMBOL(filemap_fdatawait_range); 563 549 564 /** 550 /** 565 * filemap_fdatawait_range_keep_errors - wait 551 * filemap_fdatawait_range_keep_errors - wait for writeback to complete 566 * @mapping: address space structur 552 * @mapping: address space structure to wait for 567 * @start_byte: offset in bytes where 553 * @start_byte: offset in bytes where the range starts 568 * @end_byte: offset in bytes where 554 * @end_byte: offset in bytes where the range ends (inclusive) 569 * 555 * 570 * Walk the list of under-writeback pages of t 556 * Walk the list of under-writeback pages of the given address space in the 571 * given range and wait for all of them. Unli 557 * given range and wait for all of them. Unlike filemap_fdatawait_range(), 572 * this function does not clear error status o 558 * this function does not clear error status of the address space. 573 * 559 * 574 * Use this function if callers don't handle e 560 * Use this function if callers don't handle errors themselves. Expected 575 * call sites are system-wide / filesystem-wid 561 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 576 * fsfreeze(8) 562 * fsfreeze(8) 577 */ 563 */ 578 int filemap_fdatawait_range_keep_errors(struct 564 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, 579 loff_t start_byte, loff_t end_ 565 loff_t start_byte, loff_t end_byte) 580 { 566 { 581 __filemap_fdatawait_range(mapping, sta 567 __filemap_fdatawait_range(mapping, start_byte, end_byte); 582 return filemap_check_and_keep_errors(m 568 return filemap_check_and_keep_errors(mapping); 583 } 569 } 584 EXPORT_SYMBOL(filemap_fdatawait_range_keep_err 570 EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); 585 571 586 /** 572 /** 587 * file_fdatawait_range - wait for writeback t 573 * file_fdatawait_range - wait for writeback to complete 588 * @file: file pointing to addre 574 * @file: file pointing to address space structure to wait for 589 * @start_byte: offset in bytes where 575 * @start_byte: offset in bytes where the range starts 590 * @end_byte: offset in bytes where 576 * @end_byte: offset in bytes where the range ends (inclusive) 591 * 577 * 592 * Walk the list of under-writeback pages of t 578 * Walk the list of under-writeback pages of the address space that file 593 * refers to, in the given range and wait for 579 * refers to, in the given range and wait for all of them. Check error 594 * status of the address space vs. the file->f 580 * status of the address space vs. the file->f_wb_err cursor and return it. 595 * 581 * 596 * Since the error status of the file is advan 582 * Since the error status of the file is advanced by this function, 597 * callers are responsible for checking the re 583 * callers are responsible for checking the return value and handling and/or 598 * reporting the error. 584 * reporting the error. 599 * 585 * 600 * Return: error status of the address space v 586 * Return: error status of the address space vs. the file->f_wb_err cursor. 601 */ 587 */ 602 int file_fdatawait_range(struct file *file, lo 588 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte) 603 { 589 { 604 struct address_space *mapping = file-> 590 struct address_space *mapping = file->f_mapping; 605 591 606 __filemap_fdatawait_range(mapping, sta 592 __filemap_fdatawait_range(mapping, start_byte, end_byte); 607 return file_check_and_advance_wb_err(f 593 return file_check_and_advance_wb_err(file); 608 } 594 } 609 EXPORT_SYMBOL(file_fdatawait_range); 595 EXPORT_SYMBOL(file_fdatawait_range); 610 596 611 /** 597 /** 612 * filemap_fdatawait_keep_errors - wait for wr 598 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors 613 * @mapping: address space structure to wait f 599 * @mapping: address space structure to wait for 614 * 600 * 615 * Walk the list of under-writeback pages of t 601 * Walk the list of under-writeback pages of the given address space 616 * and wait for all of them. Unlike filemap_f 602 * and wait for all of them. Unlike filemap_fdatawait(), this function 617 * does not clear error status of the address 603 * does not clear error status of the address space. 618 * 604 * 619 * Use this function if callers don't handle e 605 * Use this function if callers don't handle errors themselves. Expected 620 * call sites are system-wide / filesystem-wid 606 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), 621 * fsfreeze(8) 607 * fsfreeze(8) 622 * 608 * 623 * Return: error status of the address space. 609 * Return: error status of the address space. 624 */ 610 */ 625 int filemap_fdatawait_keep_errors(struct addre 611 int filemap_fdatawait_keep_errors(struct address_space *mapping) 626 { 612 { 627 __filemap_fdatawait_range(mapping, 0, 613 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); 628 return filemap_check_and_keep_errors(m 614 return filemap_check_and_keep_errors(mapping); 629 } 615 } 630 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 616 EXPORT_SYMBOL(filemap_fdatawait_keep_errors); 631 617 632 /* Returns true if writeback might be needed o 618 /* Returns true if writeback might be needed or already in progress. */ 633 static bool mapping_needs_writeback(struct add 619 static bool mapping_needs_writeback(struct address_space *mapping) 634 { 620 { 635 return mapping->nrpages; 621 return mapping->nrpages; 636 } 622 } 637 623 638 bool filemap_range_has_writeback(struct addres !! 624 /** 639 loff_t start_ !! 625 * filemap_range_needs_writeback - check if range potentially needs writeback >> 626 * @mapping: address space within which to check >> 627 * @start_byte: offset in bytes where the range starts >> 628 * @end_byte: offset in bytes where the range ends (inclusive) >> 629 * >> 630 * Find at least one page in the range supplied, usually used to check if >> 631 * direct writing in this range will trigger a writeback. Used by O_DIRECT >> 632 * read/write with IOCB_NOWAIT, to see if the caller needs to do >> 633 * filemap_write_and_wait_range() before proceeding. >> 634 * >> 635 * Return: %true if the caller should do filemap_write_and_wait_range() before >> 636 * doing O_DIRECT to a page in this range, %false otherwise. >> 637 */ >> 638 bool filemap_range_needs_writeback(struct address_space *mapping, >> 639 loff_t start_byte, loff_t end_byte) 640 { 640 { 641 XA_STATE(xas, &mapping->i_pages, start 641 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); 642 pgoff_t max = end_byte >> PAGE_SHIFT; 642 pgoff_t max = end_byte >> PAGE_SHIFT; 643 struct folio *folio; !! 643 struct page *page; 644 644 >> 645 if (!mapping_needs_writeback(mapping)) >> 646 return false; >> 647 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && >> 648 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) >> 649 return false; 645 if (end_byte < start_byte) 650 if (end_byte < start_byte) 646 return false; 651 return false; 647 652 648 rcu_read_lock(); 653 rcu_read_lock(); 649 xas_for_each(&xas, folio, max) { !! 654 xas_for_each(&xas, page, max) { 650 if (xas_retry(&xas, folio)) !! 655 if (xas_retry(&xas, page)) 651 continue; 656 continue; 652 if (xa_is_value(folio)) !! 657 if (xa_is_value(page)) 653 continue; 658 continue; 654 if (folio_test_dirty(folio) || !! 659 if (PageDirty(page) || PageLocked(page) || PageWriteback(page)) 655 folio_test_wri << 656 break; 660 break; 657 } 661 } 658 rcu_read_unlock(); 662 rcu_read_unlock(); 659 return folio != NULL; !! 663 return page != NULL; 660 } 664 } 661 EXPORT_SYMBOL_GPL(filemap_range_has_writeback) !! 665 EXPORT_SYMBOL_GPL(filemap_range_needs_writeback); 662 666 663 /** 667 /** 664 * filemap_write_and_wait_range - write out & 668 * filemap_write_and_wait_range - write out & wait on a file range 665 * @mapping: the address_space for the page 669 * @mapping: the address_space for the pages 666 * @lstart: offset in bytes where the rang 670 * @lstart: offset in bytes where the range starts 667 * @lend: offset in bytes where the rang 671 * @lend: offset in bytes where the range ends (inclusive) 668 * 672 * 669 * Write out and wait upon file offsets lstart 673 * Write out and wait upon file offsets lstart->lend, inclusive. 670 * 674 * 671 * Note that @lend is inclusive (describes the 675 * Note that @lend is inclusive (describes the last byte to be written) so 672 * that this function can be used to write to 676 * that this function can be used to write to the very end-of-file (end = -1). 673 * 677 * 674 * Return: error status of the address space. 678 * Return: error status of the address space. 675 */ 679 */ 676 int filemap_write_and_wait_range(struct addres 680 int filemap_write_and_wait_range(struct address_space *mapping, 677 loff_t lstart 681 loff_t lstart, loff_t lend) 678 { 682 { 679 int err = 0, err2; !! 683 int err = 0; 680 << 681 if (lend < lstart) << 682 return 0; << 683 684 684 if (mapping_needs_writeback(mapping)) 685 if (mapping_needs_writeback(mapping)) { 685 err = __filemap_fdatawrite_ran 686 err = __filemap_fdatawrite_range(mapping, lstart, lend, 686 687 WB_SYNC_ALL); 687 /* 688 /* 688 * Even if the above returned 689 * Even if the above returned error, the pages may be 689 * written partially (e.g. -EN 690 * written partially (e.g. -ENOSPC), so we wait for it. 690 * But the -EIO is special cas 691 * But the -EIO is special case, it may indicate the worst 691 * thing (e.g. bug) happened, 692 * thing (e.g. bug) happened, so we avoid waiting for it. 692 */ 693 */ 693 if (err != -EIO) !! 694 if (err != -EIO) { 694 __filemap_fdatawait_ra !! 695 int err2 = filemap_fdatawait_range(mapping, >> 696 lstart, lend); >> 697 if (!err) >> 698 err = err2; >> 699 } else { >> 700 /* Clear any previously stored errors */ >> 701 filemap_check_errors(mapping); >> 702 } >> 703 } else { >> 704 err = filemap_check_errors(mapping); 695 } 705 } 696 err2 = filemap_check_errors(mapping); << 697 if (!err) << 698 err = err2; << 699 return err; 706 return err; 700 } 707 } 701 EXPORT_SYMBOL(filemap_write_and_wait_range); 708 EXPORT_SYMBOL(filemap_write_and_wait_range); 702 709 703 void __filemap_set_wb_err(struct address_space 710 void __filemap_set_wb_err(struct address_space *mapping, int err) 704 { 711 { 705 errseq_t eseq = errseq_set(&mapping->w 712 errseq_t eseq = errseq_set(&mapping->wb_err, err); 706 713 707 trace_filemap_set_wb_err(mapping, eseq 714 trace_filemap_set_wb_err(mapping, eseq); 708 } 715 } 709 EXPORT_SYMBOL(__filemap_set_wb_err); 716 EXPORT_SYMBOL(__filemap_set_wb_err); 710 717 711 /** 718 /** 712 * file_check_and_advance_wb_err - report wb e 719 * file_check_and_advance_wb_err - report wb error (if any) that was previously 713 * and advance 720 * and advance wb_err to current one 714 * @file: struct file on which the error is be 721 * @file: struct file on which the error is being reported 715 * 722 * 716 * When userland calls fsync (or something lik 723 * When userland calls fsync (or something like nfsd does the equivalent), we 717 * want to report any writeback errors that oc 724 * want to report any writeback errors that occurred since the last fsync (or 718 * since the file was opened if there haven't 725 * since the file was opened if there haven't been any). 719 * 726 * 720 * Grab the wb_err from the mapping. If it mat 727 * Grab the wb_err from the mapping. If it matches what we have in the file, 721 * then just quickly return 0. The file is all 728 * then just quickly return 0. The file is all caught up. 722 * 729 * 723 * If it doesn't match, then take the mapping 730 * If it doesn't match, then take the mapping value, set the "seen" flag in 724 * it and try to swap it into place. If it wor 731 * it and try to swap it into place. If it works, or another task beat us 725 * to it with the new value, then update the f 732 * to it with the new value, then update the f_wb_err and return the error 726 * portion. The error at this point must be re 733 * portion. The error at this point must be reported via proper channels 727 * (a'la fsync, or NFS COMMIT operation, etc.) 734 * (a'la fsync, or NFS COMMIT operation, etc.). 728 * 735 * 729 * While we handle mapping->wb_err with atomic 736 * While we handle mapping->wb_err with atomic operations, the f_wb_err 730 * value is protected by the f_lock since we m 737 * value is protected by the f_lock since we must ensure that it reflects 731 * the latest value swapped in for this file d 738 * the latest value swapped in for this file descriptor. 732 * 739 * 733 * Return: %0 on success, negative error code 740 * Return: %0 on success, negative error code otherwise. 734 */ 741 */ 735 int file_check_and_advance_wb_err(struct file 742 int file_check_and_advance_wb_err(struct file *file) 736 { 743 { 737 int err = 0; 744 int err = 0; 738 errseq_t old = READ_ONCE(file->f_wb_er 745 errseq_t old = READ_ONCE(file->f_wb_err); 739 struct address_space *mapping = file-> 746 struct address_space *mapping = file->f_mapping; 740 747 741 /* Locklessly handle the common case w 748 /* Locklessly handle the common case where nothing has changed */ 742 if (errseq_check(&mapping->wb_err, old 749 if (errseq_check(&mapping->wb_err, old)) { 743 /* Something changed, must use 750 /* Something changed, must use slow path */ 744 spin_lock(&file->f_lock); 751 spin_lock(&file->f_lock); 745 old = file->f_wb_err; 752 old = file->f_wb_err; 746 err = errseq_check_and_advance 753 err = errseq_check_and_advance(&mapping->wb_err, 747 754 &file->f_wb_err); 748 trace_file_check_and_advance_w 755 trace_file_check_and_advance_wb_err(file, old); 749 spin_unlock(&file->f_lock); 756 spin_unlock(&file->f_lock); 750 } 757 } 751 758 752 /* 759 /* 753 * We're mostly using this function as 760 * We're mostly using this function as a drop in replacement for 754 * filemap_check_errors. Clear AS_EIO/ 761 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect 755 * that the legacy code would have had 762 * that the legacy code would have had on these flags. 756 */ 763 */ 757 clear_bit(AS_EIO, &mapping->flags); 764 clear_bit(AS_EIO, &mapping->flags); 758 clear_bit(AS_ENOSPC, &mapping->flags); 765 clear_bit(AS_ENOSPC, &mapping->flags); 759 return err; 766 return err; 760 } 767 } 761 EXPORT_SYMBOL(file_check_and_advance_wb_err); 768 EXPORT_SYMBOL(file_check_and_advance_wb_err); 762 769 763 /** 770 /** 764 * file_write_and_wait_range - write out & wai 771 * file_write_and_wait_range - write out & wait on a file range 765 * @file: file pointing to address_space 772 * @file: file pointing to address_space with pages 766 * @lstart: offset in bytes where the rang 773 * @lstart: offset in bytes where the range starts 767 * @lend: offset in bytes where the rang 774 * @lend: offset in bytes where the range ends (inclusive) 768 * 775 * 769 * Write out and wait upon file offsets lstart 776 * Write out and wait upon file offsets lstart->lend, inclusive. 770 * 777 * 771 * Note that @lend is inclusive (describes the 778 * Note that @lend is inclusive (describes the last byte to be written) so 772 * that this function can be used to write to 779 * that this function can be used to write to the very end-of-file (end = -1). 773 * 780 * 774 * After writing out and waiting on the data, 781 * After writing out and waiting on the data, we check and advance the 775 * f_wb_err cursor to the latest value, and re 782 * f_wb_err cursor to the latest value, and return any errors detected there. 776 * 783 * 777 * Return: %0 on success, negative error code 784 * Return: %0 on success, negative error code otherwise. 778 */ 785 */ 779 int file_write_and_wait_range(struct file *fil 786 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend) 780 { 787 { 781 int err = 0, err2; 788 int err = 0, err2; 782 struct address_space *mapping = file-> 789 struct address_space *mapping = file->f_mapping; 783 790 784 if (lend < lstart) << 785 return 0; << 786 << 787 if (mapping_needs_writeback(mapping)) 791 if (mapping_needs_writeback(mapping)) { 788 err = __filemap_fdatawrite_ran 792 err = __filemap_fdatawrite_range(mapping, lstart, lend, 789 793 WB_SYNC_ALL); 790 /* See comment of filemap_writ 794 /* See comment of filemap_write_and_wait() */ 791 if (err != -EIO) 795 if (err != -EIO) 792 __filemap_fdatawait_ra 796 __filemap_fdatawait_range(mapping, lstart, lend); 793 } 797 } 794 err2 = file_check_and_advance_wb_err(f 798 err2 = file_check_and_advance_wb_err(file); 795 if (!err) 799 if (!err) 796 err = err2; 800 err = err2; 797 return err; 801 return err; 798 } 802 } 799 EXPORT_SYMBOL(file_write_and_wait_range); 803 EXPORT_SYMBOL(file_write_and_wait_range); 800 804 801 /** 805 /** 802 * replace_page_cache_folio - replace a pageca !! 806 * replace_page_cache_page - replace a pagecache page with a new one 803 * @old: folio to be replaced !! 807 * @old: page to be replaced 804 * @new: folio to replace with !! 808 * @new: page to replace with 805 * !! 809 * 806 * This function replaces a folio in the pagec !! 810 * This function replaces a page in the pagecache with a new one. On 807 * success it acquires the pagecache reference !! 811 * success it acquires the pagecache reference for the new page and 808 * drops it for the old folio. Both the old a !! 812 * drops it for the old page. Both the old and new pages must be 809 * locked. This function does not add the new !! 813 * locked. This function does not add the new page to the LRU, the 810 * caller must do that. 814 * caller must do that. 811 * 815 * 812 * The remove + add is atomic. This function 816 * The remove + add is atomic. This function cannot fail. 813 */ 817 */ 814 void replace_page_cache_folio(struct folio *ol !! 818 void replace_page_cache_page(struct page *old, struct page *new) 815 { 819 { 816 struct address_space *mapping = old->m 820 struct address_space *mapping = old->mapping; 817 void (*free_folio)(struct folio *) = m !! 821 void (*freepage)(struct page *) = mapping->a_ops->freepage; 818 pgoff_t offset = old->index; 822 pgoff_t offset = old->index; 819 XA_STATE(xas, &mapping->i_pages, offse 823 XA_STATE(xas, &mapping->i_pages, offset); >> 824 unsigned long flags; 820 825 821 VM_BUG_ON_FOLIO(!folio_test_locked(old !! 826 VM_BUG_ON_PAGE(!PageLocked(old), old); 822 VM_BUG_ON_FOLIO(!folio_test_locked(new !! 827 VM_BUG_ON_PAGE(!PageLocked(new), new); 823 VM_BUG_ON_FOLIO(new->mapping, new); !! 828 VM_BUG_ON_PAGE(new->mapping, new); 824 829 825 folio_get(new); !! 830 get_page(new); 826 new->mapping = mapping; 831 new->mapping = mapping; 827 new->index = offset; 832 new->index = offset; 828 833 829 mem_cgroup_replace_folio(old, new); !! 834 mem_cgroup_migrate(old, new); 830 835 831 xas_lock_irq(&xas); !! 836 xas_lock_irqsave(&xas, flags); 832 xas_store(&xas, new); 837 xas_store(&xas, new); 833 838 834 old->mapping = NULL; 839 old->mapping = NULL; 835 /* hugetlb pages do not participate in 840 /* hugetlb pages do not participate in page cache accounting. */ 836 if (!folio_test_hugetlb(old)) !! 841 if (!PageHuge(old)) 837 __lruvec_stat_sub_folio(old, N !! 842 __dec_lruvec_page_state(old, NR_FILE_PAGES); 838 if (!folio_test_hugetlb(new)) !! 843 if (!PageHuge(new)) 839 __lruvec_stat_add_folio(new, N !! 844 __inc_lruvec_page_state(new, NR_FILE_PAGES); 840 if (folio_test_swapbacked(old)) !! 845 if (PageSwapBacked(old)) 841 __lruvec_stat_sub_folio(old, N !! 846 __dec_lruvec_page_state(old, NR_SHMEM); 842 if (folio_test_swapbacked(new)) !! 847 if (PageSwapBacked(new)) 843 __lruvec_stat_add_folio(new, N !! 848 __inc_lruvec_page_state(new, NR_SHMEM); 844 xas_unlock_irq(&xas); !! 849 xas_unlock_irqrestore(&xas, flags); 845 if (free_folio) !! 850 if (freepage) 846 free_folio(old); !! 851 freepage(old); 847 folio_put(old); !! 852 put_page(old); 848 } !! 853 } 849 EXPORT_SYMBOL_GPL(replace_page_cache_folio); !! 854 EXPORT_SYMBOL_GPL(replace_page_cache_page); 850 !! 855 851 noinline int __filemap_add_folio(struct addres !! 856 noinline int __add_to_page_cache_locked(struct page *page, 852 struct folio *folio, pgoff_t i !! 857 struct address_space *mapping, >> 858 pgoff_t offset, gfp_t gfp, >> 859 void **shadowp) 853 { 860 { 854 XA_STATE(xas, &mapping->i_pages, index !! 861 XA_STATE(xas, &mapping->i_pages, offset); 855 void *alloced_shadow = NULL; !! 862 int huge = PageHuge(page); 856 int alloced_order = 0; !! 863 int error; 857 bool huge; !! 864 bool charged = false; 858 long nr; !! 865 859 !! 866 VM_BUG_ON_PAGE(!PageLocked(page), page); 860 VM_BUG_ON_FOLIO(!folio_test_locked(fol !! 867 VM_BUG_ON_PAGE(PageSwapBacked(page), page); 861 VM_BUG_ON_FOLIO(folio_test_swapbacked( << 862 VM_BUG_ON_FOLIO(folio_order(folio) < m << 863 folio); << 864 mapping_set_update(&xas, mapping); 868 mapping_set_update(&xas, mapping); 865 869 866 VM_BUG_ON_FOLIO(index & (folio_nr_page !! 870 get_page(page); 867 xas_set_order(&xas, index, folio_order !! 871 page->mapping = mapping; 868 huge = folio_test_hugetlb(folio); !! 872 page->index = offset; 869 nr = folio_nr_pages(folio); !! 873 >> 874 if (!huge) { >> 875 error = mem_cgroup_charge(page, current->mm, gfp); >> 876 if (error) >> 877 goto error; >> 878 charged = true; >> 879 } 870 880 871 gfp &= GFP_RECLAIM_MASK; 881 gfp &= GFP_RECLAIM_MASK; 872 folio_ref_add(folio, nr); << 873 folio->mapping = mapping; << 874 folio->index = xas.xa_index; << 875 882 876 for (;;) { !! 883 do { 877 int order = -1, split_order = !! 884 unsigned int order = xa_get_order(xas.xa, xas.xa_index); 878 void *entry, *old = NULL; 885 void *entry, *old = NULL; 879 886 >> 887 if (order > thp_order(page)) >> 888 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), >> 889 order, gfp); 880 xas_lock_irq(&xas); 890 xas_lock_irq(&xas); 881 xas_for_each_conflict(&xas, en 891 xas_for_each_conflict(&xas, entry) { 882 old = entry; 892 old = entry; 883 if (!xa_is_value(entry 893 if (!xa_is_value(entry)) { 884 xas_set_err(&x 894 xas_set_err(&xas, -EEXIST); 885 goto unlock; 895 goto unlock; 886 } 896 } 887 /* << 888 * If a larger entry e << 889 * it will be the firs << 890 */ << 891 if (order == -1) << 892 order = xas_ge << 893 } << 894 << 895 /* entry may have changed befo << 896 if (alloced_order && (old != a << 897 xas_destroy(&xas); << 898 alloced_order = 0; << 899 } 897 } 900 898 901 if (old) { 899 if (old) { 902 if (order > 0 && order !! 900 if (shadowp) 903 /* How to hand !! 901 *shadowp = old; 904 BUG_ON(shmem_m !! 902 /* entry may have been split before we acquired lock */ 905 if (!alloced_o !! 903 order = xa_get_order(xas.xa, xas.xa_index); 906 split_ !! 904 if (order > thp_order(page)) { 907 goto u << 908 } << 909 xas_split(&xas 905 xas_split(&xas, old, order); 910 xas_reset(&xas 906 xas_reset(&xas); 911 } 907 } 912 if (shadowp) << 913 *shadowp = old << 914 } 908 } 915 909 916 xas_store(&xas, folio); !! 910 xas_store(&xas, page); 917 if (xas_error(&xas)) 911 if (xas_error(&xas)) 918 goto unlock; 912 goto unlock; 919 913 920 mapping->nrpages += nr; !! 914 mapping->nrpages++; 921 915 922 /* hugetlb pages do not partic 916 /* hugetlb pages do not participate in page cache accounting */ 923 if (!huge) { !! 917 if (!huge) 924 __lruvec_stat_mod_foli !! 918 __inc_lruvec_page_state(page, NR_FILE_PAGES); 925 if (folio_test_pmd_map << 926 __lruvec_stat_ << 927 << 928 } << 929 << 930 unlock: 919 unlock: 931 xas_unlock_irq(&xas); 920 xas_unlock_irq(&xas); >> 921 } while (xas_nomem(&xas, gfp)); 932 922 933 /* split needed, alloc here an !! 923 if (xas_error(&xas)) { 934 if (split_order) { !! 924 error = xas_error(&xas); 935 xas_split_alloc(&xas, !! 925 if (charged) 936 if (xas_error(&xas)) !! 926 mem_cgroup_uncharge(page); 937 goto error; << 938 alloced_shadow = old; << 939 alloced_order = split_ << 940 xas_reset(&xas); << 941 continue; << 942 } << 943 << 944 if (!xas_nomem(&xas, gfp)) << 945 break; << 946 } << 947 << 948 if (xas_error(&xas)) << 949 goto error; 927 goto error; >> 928 } 950 929 951 trace_mm_filemap_add_to_page_cache(fol !! 930 trace_mm_filemap_add_to_page_cache(page); 952 return 0; 931 return 0; 953 error: 932 error: 954 folio->mapping = NULL; !! 933 page->mapping = NULL; 955 /* Leave page->index set: truncation r 934 /* Leave page->index set: truncation relies upon it */ 956 folio_put_refs(folio, nr); !! 935 put_page(page); 957 return xas_error(&xas); !! 936 return error; >> 937 } >> 938 ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO); >> 939 >> 940 /** >> 941 * add_to_page_cache_locked - add a locked page to the pagecache >> 942 * @page: page to add >> 943 * @mapping: the page's address_space >> 944 * @offset: page index >> 945 * @gfp_mask: page allocation mode >> 946 * >> 947 * This function is used to add a page to the pagecache. It must be locked. >> 948 * This function does not add the page to the LRU. The caller must do that. >> 949 * >> 950 * Return: %0 on success, negative error code otherwise. >> 951 */ >> 952 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, >> 953 pgoff_t offset, gfp_t gfp_mask) >> 954 { >> 955 return __add_to_page_cache_locked(page, mapping, offset, >> 956 gfp_mask, NULL); 958 } 957 } 959 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERR !! 958 EXPORT_SYMBOL(add_to_page_cache_locked); 960 959 961 int filemap_add_folio(struct address_space *ma !! 960 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 962 pgoff_t index, !! 961 pgoff_t offset, gfp_t gfp_mask) 963 { 962 { 964 void *shadow = NULL; 963 void *shadow = NULL; 965 int ret; 964 int ret; 966 965 967 ret = mem_cgroup_charge(folio, NULL, g !! 966 __SetPageLocked(page); 968 if (ret) !! 967 ret = __add_to_page_cache_locked(page, mapping, offset, 969 return ret; !! 968 gfp_mask, &shadow); 970 !! 969 if (unlikely(ret)) 971 __folio_set_locked(folio); !! 970 __ClearPageLocked(page); 972 ret = __filemap_add_folio(mapping, fol !! 971 else { 973 if (unlikely(ret)) { << 974 mem_cgroup_uncharge(folio); << 975 __folio_clear_locked(folio); << 976 } else { << 977 /* 972 /* 978 * The folio might have been e !! 973 * The page might have been evicted from cache only 979 * recently, in which case it 974 * recently, in which case it should be activated like 980 * any other repeatedly access !! 975 * any other repeatedly accessed page. 981 * The exception is folios get !! 976 * The exception is pages getting rewritten; evicting other 982 * data from the working set, 977 * data from the working set, only to cache data that will 983 * get overwritten with someth 978 * get overwritten with something else, is a waste of memory. 984 */ 979 */ 985 WARN_ON_ONCE(folio_test_active !! 980 WARN_ON_ONCE(PageActive(page)); 986 if (!(gfp & __GFP_WRITE) && sh !! 981 if (!(gfp_mask & __GFP_WRITE) && shadow) 987 workingset_refault(fol !! 982 workingset_refault(page, shadow); 988 folio_add_lru(folio); !! 983 lru_cache_add(page); 989 } 984 } 990 return ret; 985 return ret; 991 } 986 } 992 EXPORT_SYMBOL_GPL(filemap_add_folio); !! 987 EXPORT_SYMBOL_GPL(add_to_page_cache_lru); 993 988 994 #ifdef CONFIG_NUMA 989 #ifdef CONFIG_NUMA 995 struct folio *filemap_alloc_folio_noprof(gfp_t !! 990 struct page *__page_cache_alloc(gfp_t gfp) 996 { 991 { 997 int n; 992 int n; 998 struct folio *folio; !! 993 struct page *page; 999 994 1000 if (cpuset_do_page_mem_spread()) { 995 if (cpuset_do_page_mem_spread()) { 1001 unsigned int cpuset_mems_cook 996 unsigned int cpuset_mems_cookie; 1002 do { 997 do { 1003 cpuset_mems_cookie = 998 cpuset_mems_cookie = read_mems_allowed_begin(); 1004 n = cpuset_mem_spread 999 n = cpuset_mem_spread_node(); 1005 folio = __folio_alloc !! 1000 page = __alloc_pages_node(n, gfp, 0); 1006 } while (!folio && read_mems_ !! 1001 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); 1007 1002 1008 return folio; !! 1003 return page; 1009 } 1004 } 1010 return folio_alloc_noprof(gfp, order) !! 1005 return alloc_pages(gfp, 0); 1011 } 1006 } 1012 EXPORT_SYMBOL(filemap_alloc_folio_noprof); !! 1007 EXPORT_SYMBOL(__page_cache_alloc); 1013 #endif 1008 #endif 1014 1009 1015 /* 1010 /* 1016 * filemap_invalidate_lock_two - lock invalid << 1017 * << 1018 * Lock exclusively invalidate_lock of any pa << 1019 * << 1020 * @mapping1: the first mapping to lock << 1021 * @mapping2: the second mapping to lock << 1022 */ << 1023 void filemap_invalidate_lock_two(struct addre << 1024 struct addre << 1025 { << 1026 if (mapping1 > mapping2) << 1027 swap(mapping1, mapping2); << 1028 if (mapping1) << 1029 down_write(&mapping1->invalid << 1030 if (mapping2 && mapping1 != mapping2) << 1031 down_write_nested(&mapping2-> << 1032 } << 1033 EXPORT_SYMBOL(filemap_invalidate_lock_two); << 1034 << 1035 /* << 1036 * filemap_invalidate_unlock_two - unlock inv << 1037 * << 1038 * Unlock exclusive invalidate_lock of any pa << 1039 * << 1040 * @mapping1: the first mapping to unlock << 1041 * @mapping2: the second mapping to unlock << 1042 */ << 1043 void filemap_invalidate_unlock_two(struct add << 1044 struct add << 1045 { << 1046 if (mapping1) << 1047 up_write(&mapping1->invalidat << 1048 if (mapping2 && mapping1 != mapping2) << 1049 up_write(&mapping2->invalidat << 1050 } << 1051 EXPORT_SYMBOL(filemap_invalidate_unlock_two); << 1052 << 1053 /* << 1054 * In order to wait for pages to become avail 1011 * In order to wait for pages to become available there must be 1055 * waitqueues associated with pages. By using 1012 * waitqueues associated with pages. By using a hash table of 1056 * waitqueues where the bucket discipline is 1013 * waitqueues where the bucket discipline is to maintain all 1057 * waiters on the same queue and wake all whe 1014 * waiters on the same queue and wake all when any of the pages 1058 * become available, and for the woken contex 1015 * become available, and for the woken contexts to check to be 1059 * sure the appropriate page became available 1016 * sure the appropriate page became available, this saves space 1060 * at a cost of "thundering herd" phenomena d 1017 * at a cost of "thundering herd" phenomena during rare hash 1061 * collisions. 1018 * collisions. 1062 */ 1019 */ 1063 #define PAGE_WAIT_TABLE_BITS 8 1020 #define PAGE_WAIT_TABLE_BITS 8 1064 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_ 1021 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS) 1065 static wait_queue_head_t folio_wait_table[PAG !! 1022 static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned; 1066 1023 1067 static wait_queue_head_t *folio_waitqueue(str !! 1024 static wait_queue_head_t *page_waitqueue(struct page *page) 1068 { 1025 { 1069 return &folio_wait_table[hash_ptr(fol !! 1026 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; 1070 } 1027 } 1071 1028 1072 void __init pagecache_init(void) 1029 void __init pagecache_init(void) 1073 { 1030 { 1074 int i; 1031 int i; 1075 1032 1076 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; 1033 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++) 1077 init_waitqueue_head(&folio_wa !! 1034 init_waitqueue_head(&page_wait_table[i]); 1078 1035 1079 page_writeback_init(); 1036 page_writeback_init(); 1080 } 1037 } 1081 1038 1082 /* 1039 /* 1083 * The page wait code treats the "wait->flags 1040 * The page wait code treats the "wait->flags" somewhat unusually, because 1084 * we have multiple different kinds of waits, 1041 * we have multiple different kinds of waits, not just the usual "exclusive" 1085 * one. 1042 * one. 1086 * 1043 * 1087 * We have: 1044 * We have: 1088 * 1045 * 1089 * (a) no special bits set: 1046 * (a) no special bits set: 1090 * 1047 * 1091 * We're just waiting for the bit to be 1048 * We're just waiting for the bit to be released, and when a waker 1092 * calls the wakeup function, we set WQ_ 1049 * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up, 1093 * and remove it from the wait queue. 1050 * and remove it from the wait queue. 1094 * 1051 * 1095 * Simple and straightforward. 1052 * Simple and straightforward. 1096 * 1053 * 1097 * (b) WQ_FLAG_EXCLUSIVE: 1054 * (b) WQ_FLAG_EXCLUSIVE: 1098 * 1055 * 1099 * The waiter is waiting to get the lock 1056 * The waiter is waiting to get the lock, and only one waiter should 1100 * be woken up to avoid any thundering h 1057 * be woken up to avoid any thundering herd behavior. We'll set the 1101 * WQ_FLAG_WOKEN bit, wake it up, and re 1058 * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue. 1102 * 1059 * 1103 * This is the traditional exclusive wai 1060 * This is the traditional exclusive wait. 1104 * 1061 * 1105 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: 1062 * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM: 1106 * 1063 * 1107 * The waiter is waiting to get the bit, 1064 * The waiter is waiting to get the bit, and additionally wants the 1108 * lock to be transferred to it for fair 1065 * lock to be transferred to it for fair lock behavior. If the lock 1109 * cannot be taken, we stop walking the 1066 * cannot be taken, we stop walking the wait queue without waking 1110 * the waiter. 1067 * the waiter. 1111 * 1068 * 1112 * This is the "fair lock handoff" case, 1069 * This is the "fair lock handoff" case, and in addition to setting 1113 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to 1070 * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see 1114 * that it now has the lock. 1071 * that it now has the lock. 1115 */ 1072 */ 1116 static int wake_page_function(wait_queue_entr 1073 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 1117 { 1074 { 1118 unsigned int flags; 1075 unsigned int flags; 1119 struct wait_page_key *key = arg; 1076 struct wait_page_key *key = arg; 1120 struct wait_page_queue *wait_page 1077 struct wait_page_queue *wait_page 1121 = container_of(wait, struct w 1078 = container_of(wait, struct wait_page_queue, wait); 1122 1079 1123 if (!wake_page_match(wait_page, key)) 1080 if (!wake_page_match(wait_page, key)) 1124 return 0; 1081 return 0; 1125 1082 1126 /* 1083 /* 1127 * If it's a lock handoff wait, we ge 1084 * If it's a lock handoff wait, we get the bit for it, and 1128 * stop walking (and do not wake it u 1085 * stop walking (and do not wake it up) if we can't. 1129 */ 1086 */ 1130 flags = wait->flags; 1087 flags = wait->flags; 1131 if (flags & WQ_FLAG_EXCLUSIVE) { 1088 if (flags & WQ_FLAG_EXCLUSIVE) { 1132 if (test_bit(key->bit_nr, &ke !! 1089 if (test_bit(key->bit_nr, &key->page->flags)) 1133 return -1; 1090 return -1; 1134 if (flags & WQ_FLAG_CUSTOM) { 1091 if (flags & WQ_FLAG_CUSTOM) { 1135 if (test_and_set_bit( !! 1092 if (test_and_set_bit(key->bit_nr, &key->page->flags)) 1136 return -1; 1093 return -1; 1137 flags |= WQ_FLAG_DONE 1094 flags |= WQ_FLAG_DONE; 1138 } 1095 } 1139 } 1096 } 1140 1097 1141 /* 1098 /* 1142 * We are holding the wait-queue lock 1099 * We are holding the wait-queue lock, but the waiter that 1143 * is waiting for this will be checki 1100 * is waiting for this will be checking the flags without 1144 * any locking. 1101 * any locking. 1145 * 1102 * 1146 * So update the flags atomically, an 1103 * So update the flags atomically, and wake up the waiter 1147 * afterwards to avoid any races. Thi 1104 * afterwards to avoid any races. This store-release pairs 1148 * with the load-acquire in folio_wai !! 1105 * with the load-acquire in wait_on_page_bit_common(). 1149 */ 1106 */ 1150 smp_store_release(&wait->flags, flags 1107 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); 1151 wake_up_state(wait->private, mode); 1108 wake_up_state(wait->private, mode); 1152 1109 1153 /* 1110 /* 1154 * Ok, we have successfully done what 1111 * Ok, we have successfully done what we're waiting for, 1155 * and we can unconditionally remove 1112 * and we can unconditionally remove the wait entry. 1156 * 1113 * 1157 * Note that this pairs with the "fin 1114 * Note that this pairs with the "finish_wait()" in the 1158 * waiter, and has to be the absolute 1115 * waiter, and has to be the absolute last thing we do. 1159 * After this list_del_init(&wait->en 1116 * After this list_del_init(&wait->entry) the wait entry 1160 * might be de-allocated and the proc 1117 * might be de-allocated and the process might even have 1161 * exited. 1118 * exited. 1162 */ 1119 */ 1163 list_del_init_careful(&wait->entry); 1120 list_del_init_careful(&wait->entry); 1164 return (flags & WQ_FLAG_EXCLUSIVE) != 1121 return (flags & WQ_FLAG_EXCLUSIVE) != 0; 1165 } 1122 } 1166 1123 1167 static void folio_wake_bit(struct folio *foli !! 1124 static void wake_up_page_bit(struct page *page, int bit_nr) 1168 { 1125 { 1169 wait_queue_head_t *q = folio_waitqueu !! 1126 wait_queue_head_t *q = page_waitqueue(page); 1170 struct wait_page_key key; 1127 struct wait_page_key key; 1171 unsigned long flags; 1128 unsigned long flags; >> 1129 wait_queue_entry_t bookmark; 1172 1130 1173 key.folio = folio; !! 1131 key.page = page; 1174 key.bit_nr = bit_nr; 1132 key.bit_nr = bit_nr; 1175 key.page_match = 0; 1133 key.page_match = 0; 1176 1134 >> 1135 bookmark.flags = 0; >> 1136 bookmark.private = NULL; >> 1137 bookmark.func = NULL; >> 1138 INIT_LIST_HEAD(&bookmark.entry); >> 1139 1177 spin_lock_irqsave(&q->lock, flags); 1140 spin_lock_irqsave(&q->lock, flags); 1178 __wake_up_locked_key(q, TASK_NORMAL, !! 1141 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); >> 1142 >> 1143 while (bookmark.flags & WQ_FLAG_BOOKMARK) { >> 1144 /* >> 1145 * Take a breather from holding the lock, >> 1146 * allow pages that finish wake up asynchronously >> 1147 * to acquire the lock and remove themselves >> 1148 * from wait queue >> 1149 */ >> 1150 spin_unlock_irqrestore(&q->lock, flags); >> 1151 cpu_relax(); >> 1152 spin_lock_irqsave(&q->lock, flags); >> 1153 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); >> 1154 } 1179 1155 1180 /* 1156 /* 1181 * It's possible to miss clearing wai !! 1157 * It is possible for other pages to have collided on the waitqueue 1182 * waiters, but the hashed waitqueue !! 1158 * hash, so in that case check for a page match. That prevents a long- 1183 * That's okay, it's a rare case. The !! 1159 * term waiter 1184 * 1160 * 1185 * Note that, depending on the page p !! 1161 * It is still possible to miss a case here, when we woke page waiters 1186 * other), the flag may be cleared in !! 1162 * and removed them from the waitqueue, but there are still other 1187 * but that is not required for corre !! 1163 * page waiters. 1188 */ !! 1164 */ 1189 if (!waitqueue_active(q) || !key.page !! 1165 if (!waitqueue_active(q) || !key.page_match) { 1190 folio_clear_waiters(folio); !! 1166 ClearPageWaiters(page); 1191 !! 1167 /* >> 1168 * It's possible to miss clearing Waiters here, when we woke >> 1169 * our page waiters, but the hashed waitqueue has waiters for >> 1170 * other pages on it. >> 1171 * >> 1172 * That's okay, it's a rare case. The next waker will clear it. >> 1173 */ >> 1174 } 1192 spin_unlock_irqrestore(&q->lock, flag 1175 spin_unlock_irqrestore(&q->lock, flags); 1193 } 1176 } 1194 1177 >> 1178 static void wake_up_page(struct page *page, int bit) >> 1179 { >> 1180 if (!PageWaiters(page)) >> 1181 return; >> 1182 wake_up_page_bit(page, bit); >> 1183 } >> 1184 1195 /* 1185 /* 1196 * A choice of three behaviors for folio_wait !! 1186 * A choice of three behaviors for wait_on_page_bit_common(): 1197 */ 1187 */ 1198 enum behavior { 1188 enum behavior { 1199 EXCLUSIVE, /* Hold ref to page a 1189 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like 1200 * __folio_lock() wai !! 1190 * __lock_page() waiting on then setting PG_locked. 1201 */ 1191 */ 1202 SHARED, /* Hold ref to page a 1192 SHARED, /* Hold ref to page and check the bit when woken, like 1203 * folio_wait_writeba !! 1193 * wait_on_page_writeback() waiting on PG_writeback. 1204 */ 1194 */ 1205 DROP, /* Drop ref to page b 1195 DROP, /* Drop ref to page before wait, no check when woken, 1206 * like folio_put_wai !! 1196 * like put_and_wait_on_page_locked() on PG_locked. 1207 */ 1197 */ 1208 }; 1198 }; 1209 1199 1210 /* 1200 /* 1211 * Attempt to check (or get) the folio flag, !! 1201 * Attempt to check (or get) the page bit, and mark us done 1212 * if successful. 1202 * if successful. 1213 */ 1203 */ 1214 static inline bool folio_trylock_flag(struct !! 1204 static inline bool trylock_page_bit_common(struct page *page, int bit_nr, 1215 struc 1205 struct wait_queue_entry *wait) 1216 { 1206 { 1217 if (wait->flags & WQ_FLAG_EXCLUSIVE) 1207 if (wait->flags & WQ_FLAG_EXCLUSIVE) { 1218 if (test_and_set_bit(bit_nr, !! 1208 if (test_and_set_bit(bit_nr, &page->flags)) 1219 return false; 1209 return false; 1220 } else if (test_bit(bit_nr, &folio->f !! 1210 } else if (test_bit(bit_nr, &page->flags)) 1221 return false; 1211 return false; 1222 1212 1223 wait->flags |= WQ_FLAG_WOKEN | WQ_FLA 1213 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; 1224 return true; 1214 return true; 1225 } 1215 } 1226 1216 1227 /* How many times do we accept lock stealing 1217 /* How many times do we accept lock stealing from under a waiter? */ 1228 int sysctl_page_lock_unfairness = 5; 1218 int sysctl_page_lock_unfairness = 5; 1229 1219 1230 static inline int folio_wait_bit_common(struc !! 1220 static inline int wait_on_page_bit_common(wait_queue_head_t *q, 1231 int state, enum behavior beha !! 1221 struct page *page, int bit_nr, int state, enum behavior behavior) 1232 { 1222 { 1233 wait_queue_head_t *q = folio_waitqueu << 1234 int unfairness = sysctl_page_lock_unf 1223 int unfairness = sysctl_page_lock_unfairness; 1235 struct wait_page_queue wait_page; 1224 struct wait_page_queue wait_page; 1236 wait_queue_entry_t *wait = &wait_page 1225 wait_queue_entry_t *wait = &wait_page.wait; 1237 bool thrashing = false; 1226 bool thrashing = false; >> 1227 bool delayacct = false; 1238 unsigned long pflags; 1228 unsigned long pflags; 1239 bool in_thrashing; << 1240 1229 1241 if (bit_nr == PG_locked && 1230 if (bit_nr == PG_locked && 1242 !folio_test_uptodate(folio) && fo !! 1231 !PageUptodate(page) && PageWorkingset(page)) { 1243 delayacct_thrashing_start(&in !! 1232 if (!PageSwapBacked(page)) { >> 1233 delayacct_thrashing_start(); >> 1234 delayacct = true; >> 1235 } 1244 psi_memstall_enter(&pflags); 1236 psi_memstall_enter(&pflags); 1245 thrashing = true; 1237 thrashing = true; 1246 } 1238 } 1247 1239 1248 init_wait(wait); 1240 init_wait(wait); 1249 wait->func = wake_page_function; 1241 wait->func = wake_page_function; 1250 wait_page.folio = folio; !! 1242 wait_page.page = page; 1251 wait_page.bit_nr = bit_nr; 1243 wait_page.bit_nr = bit_nr; 1252 1244 1253 repeat: 1245 repeat: 1254 wait->flags = 0; 1246 wait->flags = 0; 1255 if (behavior == EXCLUSIVE) { 1247 if (behavior == EXCLUSIVE) { 1256 wait->flags = WQ_FLAG_EXCLUSI 1248 wait->flags = WQ_FLAG_EXCLUSIVE; 1257 if (--unfairness < 0) 1249 if (--unfairness < 0) 1258 wait->flags |= WQ_FLA 1250 wait->flags |= WQ_FLAG_CUSTOM; 1259 } 1251 } 1260 1252 1261 /* 1253 /* 1262 * Do one last check whether we can g 1254 * Do one last check whether we can get the 1263 * page bit synchronously. 1255 * page bit synchronously. 1264 * 1256 * 1265 * Do the folio_set_waiters() marking !! 1257 * Do the SetPageWaiters() marking before that 1266 * to let any waker we _just_ missed 1258 * to let any waker we _just_ missed know they 1267 * need to wake us up (otherwise they 1259 * need to wake us up (otherwise they'll never 1268 * even go to the slow case that look 1260 * even go to the slow case that looks at the 1269 * page queue), and add ourselves to 1261 * page queue), and add ourselves to the wait 1270 * queue if we need to sleep. 1262 * queue if we need to sleep. 1271 * 1263 * 1272 * This part needs to be done under t 1264 * This part needs to be done under the queue 1273 * lock to avoid races. 1265 * lock to avoid races. 1274 */ 1266 */ 1275 spin_lock_irq(&q->lock); 1267 spin_lock_irq(&q->lock); 1276 folio_set_waiters(folio); !! 1268 SetPageWaiters(page); 1277 if (!folio_trylock_flag(folio, bit_nr !! 1269 if (!trylock_page_bit_common(page, bit_nr, wait)) 1278 __add_wait_queue_entry_tail(q 1270 __add_wait_queue_entry_tail(q, wait); 1279 spin_unlock_irq(&q->lock); 1271 spin_unlock_irq(&q->lock); 1280 1272 1281 /* 1273 /* 1282 * From now on, all the logic will be 1274 * From now on, all the logic will be based on 1283 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE 1275 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to 1284 * see whether the page bit testing h 1276 * see whether the page bit testing has already 1285 * been done by the wake function. 1277 * been done by the wake function. 1286 * 1278 * 1287 * We can drop our reference to the f !! 1279 * We can drop our reference to the page. 1288 */ 1280 */ 1289 if (behavior == DROP) 1281 if (behavior == DROP) 1290 folio_put(folio); !! 1282 put_page(page); 1291 1283 1292 /* 1284 /* 1293 * Note that until the "finish_wait() 1285 * Note that until the "finish_wait()", or until 1294 * we see the WQ_FLAG_WOKEN flag, we 1286 * we see the WQ_FLAG_WOKEN flag, we need to 1295 * be very careful with the 'wait->fl 1287 * be very careful with the 'wait->flags', because 1296 * we may race with a waker that sets 1288 * we may race with a waker that sets them. 1297 */ 1289 */ 1298 for (;;) { 1290 for (;;) { 1299 unsigned int flags; 1291 unsigned int flags; 1300 1292 1301 set_current_state(state); 1293 set_current_state(state); 1302 1294 1303 /* Loop until we've been woke 1295 /* Loop until we've been woken or interrupted */ 1304 flags = smp_load_acquire(&wai 1296 flags = smp_load_acquire(&wait->flags); 1305 if (!(flags & WQ_FLAG_WOKEN)) 1297 if (!(flags & WQ_FLAG_WOKEN)) { 1306 if (signal_pending_st 1298 if (signal_pending_state(state, current)) 1307 break; 1299 break; 1308 1300 1309 io_schedule(); 1301 io_schedule(); 1310 continue; 1302 continue; 1311 } 1303 } 1312 1304 1313 /* If we were non-exclusive, 1305 /* If we were non-exclusive, we're done */ 1314 if (behavior != EXCLUSIVE) 1306 if (behavior != EXCLUSIVE) 1315 break; 1307 break; 1316 1308 1317 /* If the waker got the lock 1309 /* If the waker got the lock for us, we're done */ 1318 if (flags & WQ_FLAG_DONE) 1310 if (flags & WQ_FLAG_DONE) 1319 break; 1311 break; 1320 1312 1321 /* 1313 /* 1322 * Otherwise, if we're gettin 1314 * Otherwise, if we're getting the lock, we need to 1323 * try to get it ourselves. 1315 * try to get it ourselves. 1324 * 1316 * 1325 * And if that fails, we'll h 1317 * And if that fails, we'll have to retry this all. 1326 */ 1318 */ 1327 if (unlikely(test_and_set_bit !! 1319 if (unlikely(test_and_set_bit(bit_nr, &page->flags))) 1328 goto repeat; 1320 goto repeat; 1329 1321 1330 wait->flags |= WQ_FLAG_DONE; 1322 wait->flags |= WQ_FLAG_DONE; 1331 break; 1323 break; 1332 } 1324 } 1333 1325 1334 /* 1326 /* 1335 * If a signal happened, this 'finish 1327 * If a signal happened, this 'finish_wait()' may remove the last 1336 * waiter from the wait-queues, but t !! 1328 * waiter from the wait-queues, but the PageWaiters bit will remain 1337 * set. That's ok. The next wakeup wi 1329 * set. That's ok. The next wakeup will take care of it, and trying 1338 * to do it here would be difficult a 1330 * to do it here would be difficult and prone to races. 1339 */ 1331 */ 1340 finish_wait(q, wait); 1332 finish_wait(q, wait); 1341 1333 1342 if (thrashing) { 1334 if (thrashing) { 1343 delayacct_thrashing_end(&in_t !! 1335 if (delayacct) >> 1336 delayacct_thrashing_end(); 1344 psi_memstall_leave(&pflags); 1337 psi_memstall_leave(&pflags); 1345 } 1338 } 1346 1339 1347 /* 1340 /* 1348 * NOTE! The wait->flags weren't stab 1341 * NOTE! The wait->flags weren't stable until we've done the 1349 * 'finish_wait()', and we could have 1342 * 'finish_wait()', and we could have exited the loop above due 1350 * to a signal, and had a wakeup even 1343 * to a signal, and had a wakeup event happen after the signal 1351 * test but before the 'finish_wait() 1344 * test but before the 'finish_wait()'. 1352 * 1345 * 1353 * So only after the finish_wait() ca 1346 * So only after the finish_wait() can we reliably determine 1354 * if we got woken up or not, so we c 1347 * if we got woken up or not, so we can now figure out the final 1355 * return value based on that state w 1348 * return value based on that state without races. 1356 * 1349 * 1357 * Also note that WQ_FLAG_WOKEN is su 1350 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive 1358 * waiter, but an exclusive one requi 1351 * waiter, but an exclusive one requires WQ_FLAG_DONE. 1359 */ 1352 */ 1360 if (behavior == EXCLUSIVE) 1353 if (behavior == EXCLUSIVE) 1361 return wait->flags & WQ_FLAG_ 1354 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; 1362 1355 1363 return wait->flags & WQ_FLAG_WOKEN ? 1356 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; 1364 } 1357 } 1365 1358 1366 #ifdef CONFIG_MIGRATION !! 1359 void wait_on_page_bit(struct page *page, int bit_nr) 1367 /** << 1368 * migration_entry_wait_on_locked - Wait for << 1369 * @entry: migration swap entry. << 1370 * @ptl: already locked ptl. This function wi << 1371 * << 1372 * Wait for a migration entry referencing the << 1373 * equivalent to put_and_wait_on_page_locked( << 1374 * this can be called without taking a refere << 1375 * should be called while holding the ptl for << 1376 * the page. << 1377 * << 1378 * Returns after unlocking the ptl. << 1379 * << 1380 * This follows the same logic as folio_wait_ << 1381 * there. << 1382 */ << 1383 void migration_entry_wait_on_locked(swp_entry << 1384 __releases(ptl) << 1385 { << 1386 struct wait_page_queue wait_page; << 1387 wait_queue_entry_t *wait = &wait_page << 1388 bool thrashing = false; << 1389 unsigned long pflags; << 1390 bool in_thrashing; << 1391 wait_queue_head_t *q; << 1392 struct folio *folio = pfn_swap_entry_ << 1393 << 1394 q = folio_waitqueue(folio); << 1395 if (!folio_test_uptodate(folio) && fo << 1396 delayacct_thrashing_start(&in << 1397 psi_memstall_enter(&pflags); << 1398 thrashing = true; << 1399 } << 1400 << 1401 init_wait(wait); << 1402 wait->func = wake_page_function; << 1403 wait_page.folio = folio; << 1404 wait_page.bit_nr = PG_locked; << 1405 wait->flags = 0; << 1406 << 1407 spin_lock_irq(&q->lock); << 1408 folio_set_waiters(folio); << 1409 if (!folio_trylock_flag(folio, PG_loc << 1410 __add_wait_queue_entry_tail(q << 1411 spin_unlock_irq(&q->lock); << 1412 << 1413 /* << 1414 * If a migration entry exists for th << 1415 * a valid reference to the page, and << 1416 * migration entry. So the page is va << 1417 */ << 1418 spin_unlock(ptl); << 1419 << 1420 for (;;) { << 1421 unsigned int flags; << 1422 << 1423 set_current_state(TASK_UNINTE << 1424 << 1425 /* Loop until we've been woke << 1426 flags = smp_load_acquire(&wai << 1427 if (!(flags & WQ_FLAG_WOKEN)) << 1428 if (signal_pending_st << 1429 break; << 1430 << 1431 io_schedule(); << 1432 continue; << 1433 } << 1434 break; << 1435 } << 1436 << 1437 finish_wait(q, wait); << 1438 << 1439 if (thrashing) { << 1440 delayacct_thrashing_end(&in_t << 1441 psi_memstall_leave(&pflags); << 1442 } << 1443 } << 1444 #endif << 1445 << 1446 void folio_wait_bit(struct folio *folio, int << 1447 { 1360 { 1448 folio_wait_bit_common(folio, bit_nr, !! 1361 wait_queue_head_t *q = page_waitqueue(page); >> 1362 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); 1449 } 1363 } 1450 EXPORT_SYMBOL(folio_wait_bit); !! 1364 EXPORT_SYMBOL(wait_on_page_bit); 1451 1365 1452 int folio_wait_bit_killable(struct folio *fol !! 1366 int wait_on_page_bit_killable(struct page *page, int bit_nr) 1453 { 1367 { 1454 return folio_wait_bit_common(folio, b !! 1368 wait_queue_head_t *q = page_waitqueue(page); >> 1369 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); 1455 } 1370 } 1456 EXPORT_SYMBOL(folio_wait_bit_killable); !! 1371 EXPORT_SYMBOL(wait_on_page_bit_killable); 1457 1372 1458 /** 1373 /** 1459 * folio_put_wait_locked - Drop a reference a !! 1374 * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked 1460 * @folio: The folio to wait for. !! 1375 * @page: The page to wait for. 1461 * @state: The sleep state (TASK_KILLABLE, TA 1376 * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc). 1462 * 1377 * 1463 * The caller should hold a reference on @fol !! 1378 * The caller should hold a reference on @page. They expect the page to 1464 * become unlocked relatively soon, but do no 1379 * become unlocked relatively soon, but do not wish to hold up migration 1465 * (for example) by holding the reference whi !! 1380 * (for example) by holding the reference while waiting for the page to 1466 * come unlocked. After this function return 1381 * come unlocked. After this function returns, the caller should not 1467 * dereference @folio. !! 1382 * dereference @page. 1468 * 1383 * 1469 * Return: 0 if the folio was unlocked or -EI !! 1384 * Return: 0 if the page was unlocked or -EINTR if interrupted by a signal. 1470 */ 1385 */ 1471 static int folio_put_wait_locked(struct folio !! 1386 int put_and_wait_on_page_locked(struct page *page, int state) 1472 { 1387 { 1473 return folio_wait_bit_common(folio, P !! 1388 wait_queue_head_t *q; >> 1389 >> 1390 page = compound_head(page); >> 1391 q = page_waitqueue(page); >> 1392 return wait_on_page_bit_common(q, page, PG_locked, state, DROP); 1474 } 1393 } 1475 1394 1476 /** 1395 /** 1477 * folio_add_wait_queue - Add an arbitrary wa !! 1396 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue 1478 * @folio: Folio defining the wait queue of i !! 1397 * @page: Page defining the wait queue of interest 1479 * @waiter: Waiter to add to the queue 1398 * @waiter: Waiter to add to the queue 1480 * 1399 * 1481 * Add an arbitrary @waiter to the wait queue !! 1400 * Add an arbitrary @waiter to the wait queue for the nominated @page. 1482 */ 1401 */ 1483 void folio_add_wait_queue(struct folio *folio !! 1402 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) 1484 { 1403 { 1485 wait_queue_head_t *q = folio_waitqueu !! 1404 wait_queue_head_t *q = page_waitqueue(page); 1486 unsigned long flags; 1405 unsigned long flags; 1487 1406 1488 spin_lock_irqsave(&q->lock, flags); 1407 spin_lock_irqsave(&q->lock, flags); 1489 __add_wait_queue_entry_tail(q, waiter 1408 __add_wait_queue_entry_tail(q, waiter); 1490 folio_set_waiters(folio); !! 1409 SetPageWaiters(page); 1491 spin_unlock_irqrestore(&q->lock, flag 1410 spin_unlock_irqrestore(&q->lock, flags); 1492 } 1411 } 1493 EXPORT_SYMBOL_GPL(folio_add_wait_queue); !! 1412 EXPORT_SYMBOL_GPL(add_page_wait_queue); 1494 1413 1495 /** !! 1414 #ifndef clear_bit_unlock_is_negative_byte 1496 * folio_unlock - Unlock a locked folio. !! 1415 1497 * @folio: The folio. !! 1416 /* >> 1417 * PG_waiters is the high bit in the same byte as PG_lock. 1498 * 1418 * 1499 * Unlocks the folio and wakes up any thread !! 1419 * On x86 (and on many other architectures), we can clear PG_lock and >> 1420 * test the sign bit at the same time. But if the architecture does >> 1421 * not support that special operation, we just do this all by hand >> 1422 * instead. 1500 * 1423 * 1501 * Context: May be called from interrupt or p !! 1424 * The read of PG_waiters has to be after (or concurrently with) PG_locked 1502 * called from NMI context. !! 1425 * being cleared, but a memory barrier should be unnecessary since it is >> 1426 * in the same byte as PG_locked. 1503 */ 1427 */ 1504 void folio_unlock(struct folio *folio) !! 1428 static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) 1505 { 1429 { 1506 /* Bit 7 allows x86 to check the byte !! 1430 clear_bit_unlock(nr, mem); 1507 BUILD_BUG_ON(PG_waiters != 7); !! 1431 /* smp_mb__after_atomic(); */ 1508 BUILD_BUG_ON(PG_locked > 7); !! 1432 return test_bit(PG_waiters, mem); 1509 VM_BUG_ON_FOLIO(!folio_test_locked(fo << 1510 if (folio_xor_flags_has_waiters(folio << 1511 folio_wake_bit(folio, PG_lock << 1512 } 1433 } 1513 EXPORT_SYMBOL(folio_unlock); !! 1434 >> 1435 #endif 1514 1436 1515 /** 1437 /** 1516 * folio_end_read - End read on a folio. !! 1438 * unlock_page - unlock a locked page 1517 * @folio: The folio. !! 1439 * @page: the page 1518 * @success: True if all reads completed succ !! 1440 * 1519 * !! 1441 * Unlocks the page and wakes up sleepers in wait_on_page_locked(). 1520 * When all reads against a folio have comple !! 1442 * Also wakes sleepers in wait_on_page_writeback() because the wakeup 1521 * call this function to let the pagecache kn !! 1443 * mechanism between PageLocked pages and PageWriteback pages is shared. 1522 * are outstanding. This will unlock the fol !! 1444 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. 1523 * sleeping on the lock. The folio will also !! 1445 * 1524 * reads succeeded. !! 1446 * Note that this depends on PG_waiters being the sign bit in the byte 1525 * !! 1447 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to 1526 * Context: May be called from interrupt or p !! 1448 * clear the PG_locked bit and test PG_waiters at the same time fairly 1527 * called from NMI context. !! 1449 * portably (architectures that do LL/SC can test any bit, while x86 can 1528 */ !! 1450 * test the sign bit). 1529 void folio_end_read(struct folio *folio, bool !! 1451 */ 1530 { !! 1452 void unlock_page(struct page *page) 1531 unsigned long mask = 1 << PG_locked; !! 1453 { 1532 !! 1454 BUILD_BUG_ON(PG_waiters != 7); 1533 /* Must be in bottom byte for x86 to !! 1455 page = compound_head(page); 1534 BUILD_BUG_ON(PG_uptodate > 7); !! 1456 VM_BUG_ON_PAGE(!PageLocked(page), page); 1535 VM_BUG_ON_FOLIO(!folio_test_locked(fo !! 1457 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) 1536 VM_BUG_ON_FOLIO(folio_test_uptodate(f !! 1458 wake_up_page_bit(page, PG_locked); 1537 << 1538 if (likely(success)) << 1539 mask |= 1 << PG_uptodate; << 1540 if (folio_xor_flags_has_waiters(folio << 1541 folio_wake_bit(folio, PG_lock << 1542 } 1459 } 1543 EXPORT_SYMBOL(folio_end_read); !! 1460 EXPORT_SYMBOL(unlock_page); 1544 1461 1545 /** 1462 /** 1546 * folio_end_private_2 - Clear PG_private_2 a !! 1463 * end_page_private_2 - Clear PG_private_2 and release any waiters 1547 * @folio: The folio. !! 1464 * @page: The page 1548 * 1465 * 1549 * Clear the PG_private_2 bit on a folio and !! 1466 * Clear the PG_private_2 bit on a page and wake up any sleepers waiting for 1550 * it. The folio reference held for PG_priva !! 1467 * this. The page ref held for PG_private_2 being set is released. 1551 * 1468 * 1552 * This is, for example, used when a netfs fo !! 1469 * This is, for example, used when a netfs page is being written to a local 1553 * disk cache, thereby allowing writes to the !! 1470 * disk cache, thereby allowing writes to the cache for the same page to be 1554 * serialised. 1471 * serialised. 1555 */ 1472 */ 1556 void folio_end_private_2(struct folio *folio) !! 1473 void end_page_private_2(struct page *page) 1557 { 1474 { 1558 VM_BUG_ON_FOLIO(!folio_test_private_2 !! 1475 page = compound_head(page); 1559 clear_bit_unlock(PG_private_2, folio_ !! 1476 VM_BUG_ON_PAGE(!PagePrivate2(page), page); 1560 folio_wake_bit(folio, PG_private_2); !! 1477 clear_bit_unlock(PG_private_2, &page->flags); 1561 folio_put(folio); !! 1478 wake_up_page_bit(page, PG_private_2); >> 1479 put_page(page); 1562 } 1480 } 1563 EXPORT_SYMBOL(folio_end_private_2); !! 1481 EXPORT_SYMBOL(end_page_private_2); 1564 1482 1565 /** 1483 /** 1566 * folio_wait_private_2 - Wait for PG_private !! 1484 * wait_on_page_private_2 - Wait for PG_private_2 to be cleared on a page 1567 * @folio: The folio to wait on. !! 1485 * @page: The page to wait on 1568 * 1486 * 1569 * Wait for PG_private_2 to be cleared on a f !! 1487 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a page. 1570 */ 1488 */ 1571 void folio_wait_private_2(struct folio *folio !! 1489 void wait_on_page_private_2(struct page *page) 1572 { 1490 { 1573 while (folio_test_private_2(folio)) !! 1491 page = compound_head(page); 1574 folio_wait_bit(folio, PG_priv !! 1492 while (PagePrivate2(page)) >> 1493 wait_on_page_bit(page, PG_private_2); 1575 } 1494 } 1576 EXPORT_SYMBOL(folio_wait_private_2); !! 1495 EXPORT_SYMBOL(wait_on_page_private_2); 1577 1496 1578 /** 1497 /** 1579 * folio_wait_private_2_killable - Wait for P !! 1498 * wait_on_page_private_2_killable - Wait for PG_private_2 to be cleared on a page 1580 * @folio: The folio to wait on. !! 1499 * @page: The page to wait on 1581 * 1500 * 1582 * Wait for PG_private_2 to be cleared on a f !! 1501 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a page or until a 1583 * received by the calling task. !! 1502 * fatal signal is received by the calling task. 1584 * 1503 * 1585 * Return: 1504 * Return: 1586 * - 0 if successful. 1505 * - 0 if successful. 1587 * - -EINTR if a fatal signal was encountered 1506 * - -EINTR if a fatal signal was encountered. 1588 */ 1507 */ 1589 int folio_wait_private_2_killable(struct foli !! 1508 int wait_on_page_private_2_killable(struct page *page) 1590 { 1509 { 1591 int ret = 0; 1510 int ret = 0; 1592 1511 1593 while (folio_test_private_2(folio)) { !! 1512 page = compound_head(page); 1594 ret = folio_wait_bit_killable !! 1513 while (PagePrivate2(page)) { >> 1514 ret = wait_on_page_bit_killable(page, PG_private_2); 1595 if (ret < 0) 1515 if (ret < 0) 1596 break; 1516 break; 1597 } 1517 } 1598 1518 1599 return ret; 1519 return ret; 1600 } 1520 } 1601 EXPORT_SYMBOL(folio_wait_private_2_killable); !! 1521 EXPORT_SYMBOL(wait_on_page_private_2_killable); 1602 1522 1603 /** 1523 /** 1604 * folio_end_writeback - End writeback agains !! 1524 * end_page_writeback - end writeback against a page 1605 * @folio: The folio. !! 1525 * @page: the page 1606 * << 1607 * The folio must actually be under writeback << 1608 * << 1609 * Context: May be called from process or int << 1610 */ 1526 */ 1611 void folio_end_writeback(struct folio *folio) !! 1527 void end_page_writeback(struct page *page) 1612 { 1528 { 1613 VM_BUG_ON_FOLIO(!folio_test_writeback << 1614 << 1615 /* 1529 /* 1616 * folio_test_clear_reclaim() could b !! 1530 * TestClearPageReclaim could be used here but it is an atomic 1617 * atomic operation and overkill in t !! 1531 * operation and overkill in this particular case. Failing to 1618 * to shuffle a folio marked for imme !! 1532 * shuffle a page marked for immediate reclaim is too mild to 1619 * a gain to justify taking an atomic !! 1533 * justify taking an atomic operation penalty at the end of 1620 * end of every folio writeback. !! 1534 * ever page writeback. 1621 */ 1535 */ 1622 if (folio_test_reclaim(folio)) { !! 1536 if (PageReclaim(page)) { 1623 folio_clear_reclaim(folio); !! 1537 ClearPageReclaim(page); 1624 folio_rotate_reclaimable(foli !! 1538 rotate_reclaimable_page(page); 1625 } 1539 } 1626 1540 1627 /* 1541 /* 1628 * Writeback does not hold a folio re !! 1542 * Writeback does not hold a page reference of its own, relying 1629 * on truncation to wait for the clea 1543 * on truncation to wait for the clearing of PG_writeback. 1630 * But here we must make sure that th !! 1544 * But here we must make sure that the page is not freed and 1631 * reused before the folio_wake_bit() !! 1545 * reused before the wake_up_page(). 1632 */ 1546 */ 1633 folio_get(folio); !! 1547 get_page(page); 1634 if (__folio_end_writeback(folio)) !! 1548 if (!test_clear_page_writeback(page)) 1635 folio_wake_bit(folio, PG_writ !! 1549 BUG(); 1636 acct_reclaim_writeback(folio); !! 1550 1637 folio_put(folio); !! 1551 smp_mb__after_atomic(); >> 1552 wake_up_page(page, PG_writeback); >> 1553 put_page(page); >> 1554 } >> 1555 EXPORT_SYMBOL(end_page_writeback); >> 1556 >> 1557 /* >> 1558 * After completing I/O on a page, call this routine to update the page >> 1559 * flags appropriately >> 1560 */ >> 1561 void page_endio(struct page *page, bool is_write, int err) >> 1562 { >> 1563 if (!is_write) { >> 1564 if (!err) { >> 1565 SetPageUptodate(page); >> 1566 } else { >> 1567 ClearPageUptodate(page); >> 1568 SetPageError(page); >> 1569 } >> 1570 unlock_page(page); >> 1571 } else { >> 1572 if (err) { >> 1573 struct address_space *mapping; >> 1574 >> 1575 SetPageError(page); >> 1576 mapping = page_mapping(page); >> 1577 if (mapping) >> 1578 mapping_set_error(mapping, err); >> 1579 } >> 1580 end_page_writeback(page); >> 1581 } 1638 } 1582 } 1639 EXPORT_SYMBOL(folio_end_writeback); !! 1583 EXPORT_SYMBOL_GPL(page_endio); 1640 1584 1641 /** 1585 /** 1642 * __folio_lock - Get a lock on the folio, as !! 1586 * __lock_page - get a lock on the page, assuming we need to sleep to get it 1643 * @folio: The folio to lock !! 1587 * @__page: the page to lock 1644 */ 1588 */ 1645 void __folio_lock(struct folio *folio) !! 1589 void __lock_page(struct page *__page) 1646 { 1590 { 1647 folio_wait_bit_common(folio, PG_locke !! 1591 struct page *page = compound_head(__page); >> 1592 wait_queue_head_t *q = page_waitqueue(page); >> 1593 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, 1648 EXCLUSIVE); 1594 EXCLUSIVE); 1649 } 1595 } 1650 EXPORT_SYMBOL(__folio_lock); !! 1596 EXPORT_SYMBOL(__lock_page); 1651 1597 1652 int __folio_lock_killable(struct folio *folio !! 1598 int __lock_page_killable(struct page *__page) 1653 { 1599 { 1654 return folio_wait_bit_common(folio, P !! 1600 struct page *page = compound_head(__page); >> 1601 wait_queue_head_t *q = page_waitqueue(page); >> 1602 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, 1655 EXCLU 1603 EXCLUSIVE); 1656 } 1604 } 1657 EXPORT_SYMBOL_GPL(__folio_lock_killable); !! 1605 EXPORT_SYMBOL_GPL(__lock_page_killable); 1658 1606 1659 static int __folio_lock_async(struct folio *f !! 1607 int __lock_page_async(struct page *page, struct wait_page_queue *wait) 1660 { 1608 { 1661 struct wait_queue_head *q = folio_wai !! 1609 struct wait_queue_head *q = page_waitqueue(page); 1662 int ret; !! 1610 int ret = 0; 1663 1611 1664 wait->folio = folio; !! 1612 wait->page = page; 1665 wait->bit_nr = PG_locked; 1613 wait->bit_nr = PG_locked; 1666 1614 1667 spin_lock_irq(&q->lock); 1615 spin_lock_irq(&q->lock); 1668 __add_wait_queue_entry_tail(q, &wait- 1616 __add_wait_queue_entry_tail(q, &wait->wait); 1669 folio_set_waiters(folio); !! 1617 SetPageWaiters(page); 1670 ret = !folio_trylock(folio); !! 1618 ret = !trylock_page(page); 1671 /* 1619 /* 1672 * If we were successful now, we know 1620 * If we were successful now, we know we're still on the 1673 * waitqueue as we're still under the 1621 * waitqueue as we're still under the lock. This means it's 1674 * safe to remove and return success, 1622 * safe to remove and return success, we know the callback 1675 * isn't going to trigger. 1623 * isn't going to trigger. 1676 */ 1624 */ 1677 if (!ret) 1625 if (!ret) 1678 __remove_wait_queue(q, &wait- 1626 __remove_wait_queue(q, &wait->wait); 1679 else 1627 else 1680 ret = -EIOCBQUEUED; 1628 ret = -EIOCBQUEUED; 1681 spin_unlock_irq(&q->lock); 1629 spin_unlock_irq(&q->lock); 1682 return ret; 1630 return ret; 1683 } 1631 } 1684 1632 1685 /* 1633 /* 1686 * Return values: 1634 * Return values: 1687 * 0 - folio is locked. !! 1635 * 1 - page is locked; mmap_lock is still held. 1688 * non-zero - folio is not locked. !! 1636 * 0 - page is not locked. 1689 * mmap_lock or per-VMA lock has been rel !! 1637 * mmap_lock has been released (mmap_read_unlock(), unless flags had both 1690 * vma_end_read()), unless flags had both !! 1638 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in 1691 * FAULT_FLAG_RETRY_NOWAIT set, in which !! 1639 * which case mmap_lock is still held. 1692 * 1640 * 1693 * If neither ALLOW_RETRY nor KILLABLE are se !! 1641 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1 1694 * with the folio locked and the mmap_lock/pe !! 1642 * with the page locked and the mmap_lock unperturbed. 1695 */ 1643 */ 1696 vm_fault_t __folio_lock_or_retry(struct folio !! 1644 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, >> 1645 unsigned int flags) 1697 { 1646 { 1698 unsigned int flags = vmf->flags; << 1699 << 1700 if (fault_flag_allow_retry_first(flag 1647 if (fault_flag_allow_retry_first(flags)) { 1701 /* 1648 /* 1702 * CAUTION! In this case, mma !! 1649 * CAUTION! In this case, mmap_lock is not released 1703 * released even though retur !! 1650 * even though return 0. 1704 */ 1651 */ 1705 if (flags & FAULT_FLAG_RETRY_ 1652 if (flags & FAULT_FLAG_RETRY_NOWAIT) 1706 return VM_FAULT_RETRY !! 1653 return 0; 1707 1654 1708 release_fault_lock(vmf); !! 1655 mmap_read_unlock(mm); 1709 if (flags & FAULT_FLAG_KILLAB 1656 if (flags & FAULT_FLAG_KILLABLE) 1710 folio_wait_locked_kil !! 1657 wait_on_page_locked_killable(page); 1711 else 1658 else 1712 folio_wait_locked(fol !! 1659 wait_on_page_locked(page); 1713 return VM_FAULT_RETRY; !! 1660 return 0; 1714 } 1661 } 1715 if (flags & FAULT_FLAG_KILLABLE) { 1662 if (flags & FAULT_FLAG_KILLABLE) { 1716 bool ret; !! 1663 int ret; 1717 1664 1718 ret = __folio_lock_killable(f !! 1665 ret = __lock_page_killable(page); 1719 if (ret) { 1666 if (ret) { 1720 release_fault_lock(vm !! 1667 mmap_read_unlock(mm); 1721 return VM_FAULT_RETRY !! 1668 return 0; 1722 } 1669 } 1723 } else { 1670 } else { 1724 __folio_lock(folio); !! 1671 __lock_page(page); 1725 } 1672 } >> 1673 return 1; 1726 1674 1727 return 0; << 1728 } 1675 } 1729 1676 1730 /** 1677 /** 1731 * page_cache_next_miss() - Find the next gap 1678 * page_cache_next_miss() - Find the next gap in the page cache. 1732 * @mapping: Mapping. 1679 * @mapping: Mapping. 1733 * @index: Index. 1680 * @index: Index. 1734 * @max_scan: Maximum range to search. 1681 * @max_scan: Maximum range to search. 1735 * 1682 * 1736 * Search the range [index, min(index + max_s 1683 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the 1737 * gap with the lowest index. 1684 * gap with the lowest index. 1738 * 1685 * 1739 * This function may be called under the rcu_ 1686 * This function may be called under the rcu_read_lock. However, this will 1740 * not atomically search a snapshot of the ca 1687 * not atomically search a snapshot of the cache at a single point in time. 1741 * For example, if a gap is created at index 1688 * For example, if a gap is created at index 5, then subsequently a gap is 1742 * created at index 10, page_cache_next_miss 1689 * created at index 10, page_cache_next_miss covering both indices may 1743 * return 10 if called under the rcu_read_loc 1690 * return 10 if called under the rcu_read_lock. 1744 * 1691 * 1745 * Return: The index of the gap if found, oth 1692 * Return: The index of the gap if found, otherwise an index outside the 1746 * range specified (in which case 'return - i 1693 * range specified (in which case 'return - index >= max_scan' will be true). 1747 * In the rare case of index wrap-around, 0 w 1694 * In the rare case of index wrap-around, 0 will be returned. 1748 */ 1695 */ 1749 pgoff_t page_cache_next_miss(struct address_s 1696 pgoff_t page_cache_next_miss(struct address_space *mapping, 1750 pgoff_t index, u 1697 pgoff_t index, unsigned long max_scan) 1751 { 1698 { 1752 XA_STATE(xas, &mapping->i_pages, inde 1699 XA_STATE(xas, &mapping->i_pages, index); 1753 1700 1754 while (max_scan--) { 1701 while (max_scan--) { 1755 void *entry = xas_next(&xas); 1702 void *entry = xas_next(&xas); 1756 if (!entry || xa_is_value(ent 1703 if (!entry || xa_is_value(entry)) 1757 return xas.xa_index; !! 1704 break; 1758 if (xas.xa_index == 0) 1705 if (xas.xa_index == 0) 1759 return 0; !! 1706 break; 1760 } 1707 } 1761 1708 1762 return index + max_scan; !! 1709 return xas.xa_index; 1763 } 1710 } 1764 EXPORT_SYMBOL(page_cache_next_miss); 1711 EXPORT_SYMBOL(page_cache_next_miss); 1765 1712 1766 /** 1713 /** 1767 * page_cache_prev_miss() - Find the previous 1714 * page_cache_prev_miss() - Find the previous gap in the page cache. 1768 * @mapping: Mapping. 1715 * @mapping: Mapping. 1769 * @index: Index. 1716 * @index: Index. 1770 * @max_scan: Maximum range to search. 1717 * @max_scan: Maximum range to search. 1771 * 1718 * 1772 * Search the range [max(index - max_scan + 1 1719 * Search the range [max(index - max_scan + 1, 0), index] for the 1773 * gap with the highest index. 1720 * gap with the highest index. 1774 * 1721 * 1775 * This function may be called under the rcu_ 1722 * This function may be called under the rcu_read_lock. However, this will 1776 * not atomically search a snapshot of the ca 1723 * not atomically search a snapshot of the cache at a single point in time. 1777 * For example, if a gap is created at index 1724 * For example, if a gap is created at index 10, then subsequently a gap is 1778 * created at index 5, page_cache_prev_miss() 1725 * created at index 5, page_cache_prev_miss() covering both indices may 1779 * return 5 if called under the rcu_read_lock 1726 * return 5 if called under the rcu_read_lock. 1780 * 1727 * 1781 * Return: The index of the gap if found, oth 1728 * Return: The index of the gap if found, otherwise an index outside the 1782 * range specified (in which case 'index - re 1729 * range specified (in which case 'index - return >= max_scan' will be true). 1783 * In the rare case of wrap-around, ULONG_MAX 1730 * In the rare case of wrap-around, ULONG_MAX will be returned. 1784 */ 1731 */ 1785 pgoff_t page_cache_prev_miss(struct address_s 1732 pgoff_t page_cache_prev_miss(struct address_space *mapping, 1786 pgoff_t index, u 1733 pgoff_t index, unsigned long max_scan) 1787 { 1734 { 1788 XA_STATE(xas, &mapping->i_pages, inde 1735 XA_STATE(xas, &mapping->i_pages, index); 1789 1736 1790 while (max_scan--) { 1737 while (max_scan--) { 1791 void *entry = xas_prev(&xas); 1738 void *entry = xas_prev(&xas); 1792 if (!entry || xa_is_value(ent 1739 if (!entry || xa_is_value(entry)) 1793 break; 1740 break; 1794 if (xas.xa_index == ULONG_MAX 1741 if (xas.xa_index == ULONG_MAX) 1795 break; 1742 break; 1796 } 1743 } 1797 1744 1798 return xas.xa_index; 1745 return xas.xa_index; 1799 } 1746 } 1800 EXPORT_SYMBOL(page_cache_prev_miss); 1747 EXPORT_SYMBOL(page_cache_prev_miss); 1801 1748 1802 /* 1749 /* 1803 * Lockless page cache protocol: !! 1750 * mapping_get_entry - Get a page cache entry. 1804 * On the lookup side: << 1805 * 1. Load the folio from i_pages << 1806 * 2. Increment the refcount if it's not zero << 1807 * 3. If the folio is not found by xas_reload << 1808 * << 1809 * On the removal side: << 1810 * A. Freeze the page (by zeroing the refcoun << 1811 * B. Remove the page from i_pages << 1812 * C. Return the page to the page allocator << 1813 * << 1814 * This means that any page may have its refe << 1815 * increased by a speculative page cache (or << 1816 * be allocated by another user before the RC << 1817 * Because the refcount temporarily acquired << 1818 * last refcount on the page, any page alloca << 1819 * folio_put(). << 1820 */ << 1821 << 1822 /* << 1823 * filemap_get_entry - Get a page cache entry << 1824 * @mapping: the address_space to search 1751 * @mapping: the address_space to search 1825 * @index: The page cache index. 1752 * @index: The page cache index. 1826 * 1753 * 1827 * Looks up the page cache entry at @mapping !! 1754 * Looks up the page cache slot at @mapping & @index. If there is a 1828 * it is returned with an increased refcount. !! 1755 * page cache page, the head page is returned with an increased refcount. 1829 * of a previously evicted folio, or a swap e << 1830 * it is returned without further action. << 1831 * 1756 * 1832 * Return: The folio, swap or shadow entry, % !! 1757 * If the slot holds a shadow entry of a previously evicted page, or a >> 1758 * swap entry from shmem/tmpfs, it is returned. >> 1759 * >> 1760 * Return: The head page or shadow entry, %NULL if nothing is found. 1833 */ 1761 */ 1834 void *filemap_get_entry(struct address_space !! 1762 static struct page *mapping_get_entry(struct address_space *mapping, >> 1763 pgoff_t index) 1835 { 1764 { 1836 XA_STATE(xas, &mapping->i_pages, inde 1765 XA_STATE(xas, &mapping->i_pages, index); 1837 struct folio *folio; !! 1766 struct page *page; 1838 1767 1839 rcu_read_lock(); 1768 rcu_read_lock(); 1840 repeat: 1769 repeat: 1841 xas_reset(&xas); 1770 xas_reset(&xas); 1842 folio = xas_load(&xas); !! 1771 page = xas_load(&xas); 1843 if (xas_retry(&xas, folio)) !! 1772 if (xas_retry(&xas, page)) 1844 goto repeat; 1773 goto repeat; 1845 /* 1774 /* 1846 * A shadow entry of a recently evict 1775 * A shadow entry of a recently evicted page, or a swap entry from 1847 * shmem/tmpfs. Return it without at 1776 * shmem/tmpfs. Return it without attempting to raise page count. 1848 */ 1777 */ 1849 if (!folio || xa_is_value(folio)) !! 1778 if (!page || xa_is_value(page)) 1850 goto out; 1779 goto out; 1851 1780 1852 if (!folio_try_get(folio)) !! 1781 if (!page_cache_get_speculative(page)) 1853 goto repeat; 1782 goto repeat; 1854 1783 1855 if (unlikely(folio != xas_reload(&xas !! 1784 /* 1856 folio_put(folio); !! 1785 * Has the page moved or been split? >> 1786 * This is part of the lockless pagecache protocol. See >> 1787 * include/linux/pagemap.h for details. >> 1788 */ >> 1789 if (unlikely(page != xas_reload(&xas))) { >> 1790 put_page(page); 1857 goto repeat; 1791 goto repeat; 1858 } 1792 } 1859 out: 1793 out: 1860 rcu_read_unlock(); 1794 rcu_read_unlock(); 1861 1795 1862 return folio; !! 1796 return page; 1863 } 1797 } 1864 1798 1865 /** 1799 /** 1866 * __filemap_get_folio - Find and get a refer !! 1800 * pagecache_get_page - Find and get a reference to a page. 1867 * @mapping: The address_space to search. 1801 * @mapping: The address_space to search. 1868 * @index: The page index. 1802 * @index: The page index. 1869 * @fgp_flags: %FGP flags modify how the foli !! 1803 * @fgp_flags: %FGP flags modify how the page is returned. 1870 * @gfp: Memory allocation flags to use if %F !! 1804 * @gfp_mask: Memory allocation flags to use if %FGP_CREAT is specified. 1871 * 1805 * 1872 * Looks up the page cache entry at @mapping 1806 * Looks up the page cache entry at @mapping & @index. 1873 * 1807 * >> 1808 * @fgp_flags can be zero or more of these flags: >> 1809 * >> 1810 * * %FGP_ACCESSED - The page will be marked accessed. >> 1811 * * %FGP_LOCK - The page is returned locked. >> 1812 * * %FGP_HEAD - If the page is present and a THP, return the head page >> 1813 * rather than the exact page specified by the index. >> 1814 * * %FGP_ENTRY - If there is a shadow / swap / DAX entry, return it >> 1815 * instead of allocating a new page to replace it. >> 1816 * * %FGP_CREAT - If no page is present then a new page is allocated using >> 1817 * @gfp_mask and added to the page cache and the VM's LRU list. >> 1818 * The page is returned locked and with an increased refcount. >> 1819 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the >> 1820 * page is already in cache. If the page was allocated, unlock it before >> 1821 * returning so the caller can do the same dance. >> 1822 * * %FGP_WRITE - The page will be written >> 1823 * * %FGP_NOFS - __GFP_FS will get cleared in gfp mask >> 1824 * * %FGP_NOWAIT - Don't get blocked by page lock >> 1825 * 1874 * If %FGP_LOCK or %FGP_CREAT are specified t 1826 * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even 1875 * if the %GFP flags specified for %FGP_CREAT 1827 * if the %GFP flags specified for %FGP_CREAT are atomic. 1876 * 1828 * 1877 * If this function returns a folio, it is re !! 1829 * If there is a page cache page, it is returned with an increased refcount. 1878 * 1830 * 1879 * Return: The found folio or an ERR_PTR() ot !! 1831 * Return: The found page or %NULL otherwise. 1880 */ 1832 */ 1881 struct folio *__filemap_get_folio(struct addr !! 1833 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, 1882 fgf_t fgp_flags, gfp_t gfp) !! 1834 int fgp_flags, gfp_t gfp_mask) 1883 { 1835 { 1884 struct folio *folio; !! 1836 struct page *page; 1885 1837 1886 repeat: 1838 repeat: 1887 folio = filemap_get_entry(mapping, in !! 1839 page = mapping_get_entry(mapping, index); 1888 if (xa_is_value(folio)) !! 1840 if (xa_is_value(page)) { 1889 folio = NULL; !! 1841 if (fgp_flags & FGP_ENTRY) 1890 if (!folio) !! 1842 return page; >> 1843 page = NULL; >> 1844 } >> 1845 if (!page) 1891 goto no_page; 1846 goto no_page; 1892 1847 1893 if (fgp_flags & FGP_LOCK) { 1848 if (fgp_flags & FGP_LOCK) { 1894 if (fgp_flags & FGP_NOWAIT) { 1849 if (fgp_flags & FGP_NOWAIT) { 1895 if (!folio_trylock(fo !! 1850 if (!trylock_page(page)) { 1896 folio_put(fol !! 1851 put_page(page); 1897 return ERR_PT !! 1852 return NULL; 1898 } 1853 } 1899 } else { 1854 } else { 1900 folio_lock(folio); !! 1855 lock_page(page); 1901 } 1856 } 1902 1857 1903 /* Has the page been truncate 1858 /* Has the page been truncated? */ 1904 if (unlikely(folio->mapping ! !! 1859 if (unlikely(page->mapping != mapping)) { 1905 folio_unlock(folio); !! 1860 unlock_page(page); 1906 folio_put(folio); !! 1861 put_page(page); 1907 goto repeat; 1862 goto repeat; 1908 } 1863 } 1909 VM_BUG_ON_FOLIO(!folio_contai !! 1864 VM_BUG_ON_PAGE(!thp_contains(page, index), page); 1910 } 1865 } 1911 1866 1912 if (fgp_flags & FGP_ACCESSED) 1867 if (fgp_flags & FGP_ACCESSED) 1913 folio_mark_accessed(folio); !! 1868 mark_page_accessed(page); 1914 else if (fgp_flags & FGP_WRITE) { 1869 else if (fgp_flags & FGP_WRITE) { 1915 /* Clear idle flag for buffer 1870 /* Clear idle flag for buffer write */ 1916 if (folio_test_idle(folio)) !! 1871 if (page_is_idle(page)) 1917 folio_clear_idle(foli !! 1872 clear_page_idle(page); 1918 } 1873 } >> 1874 if (!(fgp_flags & FGP_HEAD)) >> 1875 page = find_subpage(page, index); 1919 1876 1920 if (fgp_flags & FGP_STABLE) << 1921 folio_wait_stable(folio); << 1922 no_page: 1877 no_page: 1923 if (!folio && (fgp_flags & FGP_CREAT) !! 1878 if (!page && (fgp_flags & FGP_CREAT)) { 1924 unsigned int min_order = mapp << 1925 unsigned int order = max(min_ << 1926 int err; 1879 int err; 1927 index = mapping_align_index(m << 1928 << 1929 if ((fgp_flags & FGP_WRITE) & 1880 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) 1930 gfp |= __GFP_WRITE; !! 1881 gfp_mask |= __GFP_WRITE; 1931 if (fgp_flags & FGP_NOFS) 1882 if (fgp_flags & FGP_NOFS) 1932 gfp &= ~__GFP_FS; !! 1883 gfp_mask &= ~__GFP_FS; 1933 if (fgp_flags & FGP_NOWAIT) { << 1934 gfp &= ~GFP_KERNEL; << 1935 gfp |= GFP_NOWAIT | _ << 1936 } << 1937 if (WARN_ON_ONCE(!(fgp_flags << 1938 fgp_flags |= FGP_LOCK << 1939 1884 1940 if (order > mapping_max_folio !! 1885 page = __page_cache_alloc(gfp_mask); 1941 order = mapping_max_f !! 1886 if (!page) 1942 /* If we're not aligned, allo !! 1887 return NULL; 1943 if (index & ((1UL << order) - << 1944 order = __ffs(index); << 1945 1888 1946 do { !! 1889 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP)))) 1947 gfp_t alloc_gfp = gfp !! 1890 fgp_flags |= FGP_LOCK; 1948 1891 1949 err = -ENOMEM; !! 1892 /* Init accessed so avoid atomic mark_page_accessed later */ 1950 if (order > min_order !! 1893 if (fgp_flags & FGP_ACCESSED) 1951 alloc_gfp |= !! 1894 __SetPageReferenced(page); 1952 folio = filemap_alloc << 1953 if (!folio) << 1954 continue; << 1955 << 1956 /* Init accessed so a << 1957 if (fgp_flags & FGP_A << 1958 __folio_set_r << 1959 1895 1960 err = filemap_add_fol !! 1896 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); 1961 if (!err) !! 1897 if (unlikely(err)) { 1962 break; !! 1898 put_page(page); 1963 folio_put(folio); !! 1899 page = NULL; 1964 folio = NULL; !! 1900 if (err == -EEXIST) 1965 } while (order-- > min_order) !! 1901 goto repeat; >> 1902 } 1966 1903 1967 if (err == -EEXIST) << 1968 goto repeat; << 1969 if (err) << 1970 return ERR_PTR(err); << 1971 /* 1904 /* 1972 * filemap_add_folio locks th !! 1905 * add_to_page_cache_lru locks the page, and for mmap we expect 1973 * we expect an unlocked page !! 1906 * an unlocked page. 1974 */ 1907 */ 1975 if (folio && (fgp_flags & FGP !! 1908 if (page && (fgp_flags & FGP_FOR_MMAP)) 1976 folio_unlock(folio); !! 1909 unlock_page(page); 1977 } 1910 } 1978 1911 1979 if (!folio) !! 1912 return page; 1980 return ERR_PTR(-ENOENT); << 1981 return folio; << 1982 } 1913 } 1983 EXPORT_SYMBOL(__filemap_get_folio); !! 1914 EXPORT_SYMBOL(pagecache_get_page); 1984 1915 1985 static inline struct folio *find_get_entry(st !! 1916 static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max, 1986 xa_mark_t mark) 1917 xa_mark_t mark) 1987 { 1918 { 1988 struct folio *folio; !! 1919 struct page *page; 1989 1920 1990 retry: 1921 retry: 1991 if (mark == XA_PRESENT) 1922 if (mark == XA_PRESENT) 1992 folio = xas_find(xas, max); !! 1923 page = xas_find(xas, max); 1993 else 1924 else 1994 folio = xas_find_marked(xas, !! 1925 page = xas_find_marked(xas, max, mark); 1995 1926 1996 if (xas_retry(xas, folio)) !! 1927 if (xas_retry(xas, page)) 1997 goto retry; 1928 goto retry; 1998 /* 1929 /* 1999 * A shadow entry of a recently evict 1930 * A shadow entry of a recently evicted page, a swap 2000 * entry from shmem/tmpfs or a DAX en 1931 * entry from shmem/tmpfs or a DAX entry. Return it 2001 * without attempting to raise page c 1932 * without attempting to raise page count. 2002 */ 1933 */ 2003 if (!folio || xa_is_value(folio)) !! 1934 if (!page || xa_is_value(page)) 2004 return folio; !! 1935 return page; 2005 1936 2006 if (!folio_try_get(folio)) !! 1937 if (!page_cache_get_speculative(page)) 2007 goto reset; 1938 goto reset; 2008 1939 2009 if (unlikely(folio != xas_reload(xas) !! 1940 /* Has the page moved or been split? */ 2010 folio_put(folio); !! 1941 if (unlikely(page != xas_reload(xas))) { >> 1942 put_page(page); 2011 goto reset; 1943 goto reset; 2012 } 1944 } 2013 1945 2014 return folio; !! 1946 return page; 2015 reset: 1947 reset: 2016 xas_reset(xas); 1948 xas_reset(xas); 2017 goto retry; 1949 goto retry; 2018 } 1950 } 2019 1951 2020 /** 1952 /** 2021 * find_get_entries - gang pagecache lookup 1953 * find_get_entries - gang pagecache lookup 2022 * @mapping: The address_space to search 1954 * @mapping: The address_space to search 2023 * @start: The starting page cache index 1955 * @start: The starting page cache index 2024 * @end: The final page index (inclusi 1956 * @end: The final page index (inclusive). 2025 * @fbatch: Where the resulting entries a !! 1957 * @pvec: Where the resulting entries are placed. 2026 * @indices: The cache indices correspondi 1958 * @indices: The cache indices corresponding to the entries in @entries 2027 * 1959 * 2028 * find_get_entries() will search for and ret 1960 * find_get_entries() will search for and return a batch of entries in 2029 * the mapping. The entries are placed in @f !! 1961 * the mapping. The entries are placed in @pvec. find_get_entries() 2030 * takes a reference on any actual folios it !! 1962 * takes a reference on any actual pages it returns. 2031 * 1963 * 2032 * The entries have ascending indexes. The i !! 1964 * The search returns a group of mapping-contiguous page cache entries 2033 * due to not-present entries or large folios !! 1965 * with ascending indexes. There may be holes in the indices due to >> 1966 * not-present pages. 2034 * 1967 * 2035 * Any shadow entries of evicted folios, or s !! 1968 * Any shadow entries of evicted pages, or swap entries from 2036 * shmem/tmpfs, are included in the returned 1969 * shmem/tmpfs, are included in the returned array. 2037 * 1970 * 2038 * Return: The number of entries which were f !! 1971 * If it finds a Transparent Huge Page, head or tail, find_get_entries() >> 1972 * stops at that page: the caller is likely to have a better way to handle >> 1973 * the compound page as a whole, and then skip its extent, than repeatedly >> 1974 * calling find_get_entries() to return all its tails. >> 1975 * >> 1976 * Return: the number of pages and shadow entries which were found. 2039 */ 1977 */ 2040 unsigned find_get_entries(struct address_spac !! 1978 unsigned find_get_entries(struct address_space *mapping, pgoff_t start, 2041 pgoff_t end, struct folio_bat !! 1979 pgoff_t end, struct pagevec *pvec, pgoff_t *indices) 2042 { 1980 { 2043 XA_STATE(xas, &mapping->i_pages, *sta !! 1981 XA_STATE(xas, &mapping->i_pages, start); 2044 struct folio *folio; !! 1982 struct page *page; >> 1983 unsigned int ret = 0; >> 1984 unsigned nr_entries = PAGEVEC_SIZE; 2045 1985 2046 rcu_read_lock(); 1986 rcu_read_lock(); 2047 while ((folio = find_get_entry(&xas, !! 1987 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { 2048 indices[fbatch->nr] = xas.xa_ !! 1988 /* 2049 if (!folio_batch_add(fbatch, !! 1989 * Terminate early on finding a THP, to allow the caller to 2050 break; !! 1990 * handle it all at once; but continue if this is hugetlbfs. 2051 } !! 1991 */ >> 1992 if (!xa_is_value(page) && PageTransHuge(page) && >> 1993 !PageHuge(page)) { >> 1994 page = find_subpage(page, xas.xa_index); >> 1995 nr_entries = ret + 1; >> 1996 } 2052 1997 2053 if (folio_batch_count(fbatch)) { !! 1998 indices[ret] = xas.xa_index; 2054 unsigned long nr; !! 1999 pvec->pages[ret] = page; 2055 int idx = folio_batch_count(f !! 2000 if (++ret == nr_entries) 2056 !! 2001 break; 2057 folio = fbatch->folios[idx]; << 2058 if (!xa_is_value(folio)) << 2059 nr = folio_nr_pages(f << 2060 else << 2061 nr = 1 << xa_get_orde << 2062 *start = round_down(indices[i << 2063 } 2002 } 2064 rcu_read_unlock(); 2003 rcu_read_unlock(); 2065 2004 2066 return folio_batch_count(fbatch); !! 2005 pvec->nr = ret; >> 2006 return ret; 2067 } 2007 } 2068 2008 2069 /** 2009 /** 2070 * find_lock_entries - Find a batch of pageca 2010 * find_lock_entries - Find a batch of pagecache entries. 2071 * @mapping: The address_space to search. 2011 * @mapping: The address_space to search. 2072 * @start: The starting page cache index 2012 * @start: The starting page cache index. 2073 * @end: The final page index (inclusi 2013 * @end: The final page index (inclusive). 2074 * @fbatch: Where the resulting entries a !! 2014 * @pvec: Where the resulting entries are placed. 2075 * @indices: The cache indices of the entr !! 2015 * @indices: The cache indices of the entries in @pvec. 2076 * 2016 * 2077 * find_lock_entries() will return a batch of 2017 * find_lock_entries() will return a batch of entries from @mapping. 2078 * Swap, shadow and DAX entries are included. !! 2018 * Swap, shadow and DAX entries are included. Pages are returned 2079 * locked and with an incremented refcount. !! 2019 * locked and with an incremented refcount. Pages which are locked by 2080 * by somebody else or under writeback are sk !! 2020 * somebody else or under writeback are skipped. Only the head page of 2081 * partially outside the range are not return !! 2021 * a THP is returned. Pages which are partially outside the range are >> 2022 * not returned. 2082 * 2023 * 2083 * The entries have ascending indexes. The i 2024 * The entries have ascending indexes. The indices may not be consecutive 2084 * due to not-present entries, large folios, !! 2025 * due to not-present entries, THP pages, pages which could not be locked 2085 * locked or folios under writeback. !! 2026 * or pages under writeback. 2086 * 2027 * 2087 * Return: The number of entries which were f 2028 * Return: The number of entries which were found. 2088 */ 2029 */ 2089 unsigned find_lock_entries(struct address_spa !! 2030 unsigned find_lock_entries(struct address_space *mapping, pgoff_t start, 2090 pgoff_t end, struct folio_bat !! 2031 pgoff_t end, struct pagevec *pvec, pgoff_t *indices) 2091 { 2032 { 2092 XA_STATE(xas, &mapping->i_pages, *sta !! 2033 XA_STATE(xas, &mapping->i_pages, start); 2093 struct folio *folio; !! 2034 struct page *page; 2094 2035 2095 rcu_read_lock(); 2036 rcu_read_lock(); 2096 while ((folio = find_get_entry(&xas, !! 2037 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { 2097 unsigned long base; !! 2038 if (!xa_is_value(page)) { 2098 unsigned long nr; !! 2039 if (page->index < start) 2099 << 2100 if (!xa_is_value(folio)) { << 2101 nr = folio_nr_pages(f << 2102 base = folio->index; << 2103 /* Omit large folio w << 2104 if (base < *start) << 2105 goto put; 2040 goto put; 2106 /* Omit large folio w !! 2041 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); 2107 if (base + nr - 1 > e !! 2042 if (page->index + thp_nr_pages(page) - 1 > end) 2108 goto put; 2043 goto put; 2109 if (!folio_trylock(fo !! 2044 if (!trylock_page(page)) 2110 goto put; 2045 goto put; 2111 if (folio->mapping != !! 2046 if (page->mapping != mapping || PageWriteback(page)) 2112 folio_test_writeb << 2113 goto unlock; 2047 goto unlock; 2114 VM_BUG_ON_FOLIO(!foli !! 2048 VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index), 2115 folio !! 2049 page); 2116 } else { << 2117 nr = 1 << xas_get_ord << 2118 base = xas.xa_index & << 2119 /* Omit order>0 value << 2120 if (base < *start) << 2121 continue; << 2122 /* Omit order>0 value << 2123 if (base + nr - 1 > e << 2124 break; << 2125 } 2050 } 2126 !! 2051 indices[pvec->nr] = xas.xa_index; 2127 /* Update start now so that l !! 2052 if (!pagevec_add(pvec, page)) 2128 *start = base + nr; << 2129 indices[fbatch->nr] = xas.xa_ << 2130 if (!folio_batch_add(fbatch, << 2131 break; 2053 break; 2132 continue; !! 2054 goto next; 2133 unlock: 2055 unlock: 2134 folio_unlock(folio); !! 2056 unlock_page(page); 2135 put: 2057 put: 2136 folio_put(folio); !! 2058 put_page(page); >> 2059 next: >> 2060 if (!xa_is_value(page) && PageTransHuge(page)) { >> 2061 unsigned int nr_pages = thp_nr_pages(page); >> 2062 >> 2063 /* Final THP may cross MAX_LFS_FILESIZE on 32-bit */ >> 2064 xas_set(&xas, page->index + nr_pages); >> 2065 if (xas.xa_index < nr_pages) >> 2066 break; >> 2067 } 2137 } 2068 } 2138 rcu_read_unlock(); 2069 rcu_read_unlock(); 2139 2070 2140 return folio_batch_count(fbatch); !! 2071 return pagevec_count(pvec); 2141 } 2072 } 2142 2073 2143 /** 2074 /** 2144 * filemap_get_folios - Get a batch of folios !! 2075 * find_get_pages_range - gang pagecache lookup 2145 * @mapping: The address_space to search 2076 * @mapping: The address_space to search 2146 * @start: The starting page index 2077 * @start: The starting page index 2147 * @end: The final page index (inclusi 2078 * @end: The final page index (inclusive) 2148 * @fbatch: The batch to fill. !! 2079 * @nr_pages: The maximum number of pages >> 2080 * @pages: Where the resulting pages are placed 2149 * 2081 * 2150 * Search for and return a batch of folios in !! 2082 * find_get_pages_range() will search for and return a group of up to @nr_pages 2151 * index @start and up to index @end (inclusi !! 2083 * pages in the mapping starting at index @start and up to index @end 2152 * in @fbatch with an elevated reference coun !! 2084 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes 2153 * !! 2085 * a reference against the returned pages. 2154 * Return: The number of folios which were fo !! 2086 * 2155 * We also update @start to index the next fo !! 2087 * The search returns a group of mapping-contiguous pages with ascending 2156 */ !! 2088 * indexes. There may be holes in the indices due to not-present pages. 2157 unsigned filemap_get_folios(struct address_sp !! 2089 * We also update @start to index the next page for the traversal. 2158 pgoff_t end, struct folio_bat !! 2090 * >> 2091 * Return: the number of pages which were found. If this number is >> 2092 * smaller than @nr_pages, the end of specified range has been >> 2093 * reached. >> 2094 */ >> 2095 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, >> 2096 pgoff_t end, unsigned int nr_pages, >> 2097 struct page **pages) 2159 { 2098 { 2160 return filemap_get_folios_tag(mapping !! 2099 XA_STATE(xas, &mapping->i_pages, *start); >> 2100 struct page *page; >> 2101 unsigned ret = 0; >> 2102 >> 2103 if (unlikely(!nr_pages)) >> 2104 return 0; >> 2105 >> 2106 rcu_read_lock(); >> 2107 while ((page = find_get_entry(&xas, end, XA_PRESENT))) { >> 2108 /* Skip over shadow, swap and DAX entries */ >> 2109 if (xa_is_value(page)) >> 2110 continue; >> 2111 >> 2112 pages[ret] = find_subpage(page, xas.xa_index); >> 2113 if (++ret == nr_pages) { >> 2114 *start = xas.xa_index + 1; >> 2115 goto out; >> 2116 } >> 2117 } >> 2118 >> 2119 /* >> 2120 * We come here when there is no page beyond @end. We take care to not >> 2121 * overflow the index @start as it confuses some of the callers. This >> 2122 * breaks the iteration when there is a page at index -1 but that is >> 2123 * already broken anyway. >> 2124 */ >> 2125 if (end == (pgoff_t)-1) >> 2126 *start = (pgoff_t)-1; >> 2127 else >> 2128 *start = end + 1; >> 2129 out: >> 2130 rcu_read_unlock(); >> 2131 >> 2132 return ret; 2161 } 2133 } 2162 EXPORT_SYMBOL(filemap_get_folios); << 2163 2134 2164 /** 2135 /** 2165 * filemap_get_folios_contig - Get a batch of !! 2136 * find_get_pages_contig - gang contiguous pagecache lookup 2166 * @mapping: The address_space to search 2137 * @mapping: The address_space to search 2167 * @start: The starting page index !! 2138 * @index: The starting page index 2168 * @end: The final page index (inclusi !! 2139 * @nr_pages: The maximum number of pages 2169 * @fbatch: The batch to fill !! 2140 * @pages: Where the resulting pages are placed 2170 * 2141 * 2171 * filemap_get_folios_contig() works exactly !! 2142 * find_get_pages_contig() works exactly like find_get_pages(), except 2172 * except the returned folios are guaranteed !! 2143 * that the returned number of pages are guaranteed to be contiguous. 2173 * not return all contiguous folios if the ba << 2174 * 2144 * 2175 * Return: The number of folios found. !! 2145 * Return: the number of pages which were found. 2176 * Also update @start to be positioned for tr << 2177 */ 2146 */ 2178 !! 2147 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, 2179 unsigned filemap_get_folios_contig(struct add !! 2148 unsigned int nr_pages, struct page **pages) 2180 pgoff_t *start, pgoff_t end, << 2181 { 2149 { 2182 XA_STATE(xas, &mapping->i_pages, *sta !! 2150 XA_STATE(xas, &mapping->i_pages, index); 2183 unsigned long nr; !! 2151 struct page *page; 2184 struct folio *folio; !! 2152 unsigned int ret = 0; 2185 2153 2186 rcu_read_lock(); !! 2154 if (unlikely(!nr_pages)) >> 2155 return 0; 2187 2156 2188 for (folio = xas_load(&xas); folio && !! 2157 rcu_read_lock(); 2189 folio = xas_next(&xas !! 2158 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 2190 if (xas_retry(&xas, folio)) !! 2159 if (xas_retry(&xas, page)) 2191 continue; 2160 continue; 2192 /* 2161 /* 2193 * If the entry has been swap 2162 * If the entry has been swapped out, we can stop looking. 2194 * No current caller is looki 2163 * No current caller is looking for DAX entries. 2195 */ 2164 */ 2196 if (xa_is_value(folio)) !! 2165 if (xa_is_value(page)) 2197 goto update_start; !! 2166 break; 2198 << 2199 /* If we landed in the middle << 2200 if (xa_is_sibling(folio)) << 2201 goto update_start; << 2202 2167 2203 if (!folio_try_get(folio)) !! 2168 if (!page_cache_get_speculative(page)) 2204 goto retry; 2169 goto retry; 2205 2170 2206 if (unlikely(folio != xas_rel !! 2171 /* Has the page moved or been split? */ 2207 goto put_folio; !! 2172 if (unlikely(page != xas_reload(&xas))) >> 2173 goto put_page; 2208 2174 2209 if (!folio_batch_add(fbatch, !! 2175 pages[ret] = find_subpage(page, xas.xa_index); 2210 nr = folio_nr_pages(f !! 2176 if (++ret == nr_pages) 2211 *start = folio->index !! 2177 break; 2212 goto out; << 2213 } << 2214 continue; 2178 continue; 2215 put_folio: !! 2179 put_page: 2216 folio_put(folio); !! 2180 put_page(page); 2217 << 2218 retry: 2181 retry: 2219 xas_reset(&xas); 2182 xas_reset(&xas); 2220 } 2183 } 2221 << 2222 update_start: << 2223 nr = folio_batch_count(fbatch); << 2224 << 2225 if (nr) { << 2226 folio = fbatch->folios[nr - 1 << 2227 *start = folio_next_index(fol << 2228 } << 2229 out: << 2230 rcu_read_unlock(); 2184 rcu_read_unlock(); 2231 return folio_batch_count(fbatch); !! 2185 return ret; 2232 } 2186 } 2233 EXPORT_SYMBOL(filemap_get_folios_contig); !! 2187 EXPORT_SYMBOL(find_get_pages_contig); 2234 2188 2235 /** 2189 /** 2236 * filemap_get_folios_tag - Get a batch of fo !! 2190 * find_get_pages_range_tag - Find and return head pages matching @tag. 2237 * @mapping: The address_space to search !! 2191 * @mapping: the address_space to search 2238 * @start: The starting page index !! 2192 * @index: the starting page index 2239 * @end: The final page index (inclusi !! 2193 * @end: The final page index (inclusive) 2240 * @tag: The tag index !! 2194 * @tag: the tag index 2241 * @fbatch: The batch to fill !! 2195 * @nr_pages: the maximum number of pages 2242 * !! 2196 * @pages: where the resulting pages are placed 2243 * The first folio may start before @start; i !! 2197 * 2244 * @start. The final folio may extend beyond !! 2198 * Like find_get_pages(), except we only return head pages which are tagged 2245 * contain @end. The folios have ascending i !! 2199 * with @tag. @index is updated to the index immediately after the last 2246 * between the folios if there are indices wh !! 2200 * page we return, ready for the next iteration. 2247 * page cache. If folios are added to or rem << 2248 * while this is running, they may or may not << 2249 * Only returns folios that are tagged with @ << 2250 * 2201 * 2251 * Return: The number of folios found. !! 2202 * Return: the number of pages which were found. 2252 * Also update @start to index the next folio << 2253 */ 2203 */ 2254 unsigned filemap_get_folios_tag(struct addres !! 2204 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, 2255 pgoff_t end, xa_mark_ !! 2205 pgoff_t end, xa_mark_t tag, unsigned int nr_pages, >> 2206 struct page **pages) 2256 { 2207 { 2257 XA_STATE(xas, &mapping->i_pages, *sta !! 2208 XA_STATE(xas, &mapping->i_pages, *index); 2258 struct folio *folio; !! 2209 struct page *page; >> 2210 unsigned ret = 0; >> 2211 >> 2212 if (unlikely(!nr_pages)) >> 2213 return 0; 2259 2214 2260 rcu_read_lock(); 2215 rcu_read_lock(); 2261 while ((folio = find_get_entry(&xas, !! 2216 while ((page = find_get_entry(&xas, end, tag))) { 2262 /* 2217 /* 2263 * Shadow entries should neve 2218 * Shadow entries should never be tagged, but this iteration 2264 * is lockless so there is a 2219 * is lockless so there is a window for page reclaim to evict 2265 * a page we saw tagged. Skip !! 2220 * a page we saw tagged. Skip over it. 2266 */ 2221 */ 2267 if (xa_is_value(folio)) !! 2222 if (xa_is_value(page)) 2268 continue; 2223 continue; 2269 if (!folio_batch_add(fbatch, !! 2224 2270 unsigned long nr = fo !! 2225 pages[ret] = page; 2271 *start = folio->index !! 2226 if (++ret == nr_pages) { >> 2227 *index = page->index + thp_nr_pages(page); 2272 goto out; 2228 goto out; 2273 } 2229 } 2274 } 2230 } >> 2231 2275 /* 2232 /* 2276 * We come here when there is no page !! 2233 * We come here when we got to @end. We take care to not overflow the 2277 * overflow the index @start as it co !! 2234 * index @index as it confuses some of the callers. This breaks the 2278 * breaks the iteration when there is !! 2235 * iteration when there is a page at index -1 but that is already 2279 * already broke anyway. !! 2236 * broken anyway. 2280 */ 2237 */ 2281 if (end == (pgoff_t)-1) 2238 if (end == (pgoff_t)-1) 2282 *start = (pgoff_t)-1; !! 2239 *index = (pgoff_t)-1; 2283 else 2240 else 2284 *start = end + 1; !! 2241 *index = end + 1; 2285 out: 2242 out: 2286 rcu_read_unlock(); 2243 rcu_read_unlock(); 2287 2244 2288 return folio_batch_count(fbatch); !! 2245 return ret; 2289 } 2246 } 2290 EXPORT_SYMBOL(filemap_get_folios_tag); !! 2247 EXPORT_SYMBOL(find_get_pages_range_tag); 2291 2248 2292 /* 2249 /* 2293 * CD/DVDs are error prone. When a medium err 2250 * CD/DVDs are error prone. When a medium error occurs, the driver may fail 2294 * a _large_ part of the i/o request. Imagine 2251 * a _large_ part of the i/o request. Imagine the worst scenario: 2295 * 2252 * 2296 * ---R_________________________________ 2253 * ---R__________________________________________B__________ 2297 * ^ reading here 2254 * ^ reading here ^ bad block(assume 4k) 2298 * 2255 * 2299 * read(R) => miss => readahead(R...B) => med 2256 * read(R) => miss => readahead(R...B) => media error => frustrating retries 2300 * => failing the whole request => read(R) => 2257 * => failing the whole request => read(R) => read(R+1) => 2301 * readahead(R+1...B+1) => bang => read(R+2) 2258 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => 2302 * readahead(R+3...B+2) => bang => read(R+3) 2259 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => 2303 * readahead(R+4...B+3) => bang => read(R+4) 2260 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... 2304 * 2261 * 2305 * It is going insane. Fix it by quickly scal 2262 * It is going insane. Fix it by quickly scaling down the readahead size. 2306 */ 2263 */ 2307 static void shrink_readahead_size_eio(struct 2264 static void shrink_readahead_size_eio(struct file_ra_state *ra) 2308 { 2265 { 2309 ra->ra_pages /= 4; 2266 ra->ra_pages /= 4; 2310 } 2267 } 2311 2268 2312 /* 2269 /* 2313 * filemap_get_read_batch - Get a batch of fo !! 2270 * filemap_get_read_batch - Get a batch of pages for read 2314 * 2271 * 2315 * Get a batch of folios which represent a co !! 2272 * Get a batch of pages which represent a contiguous range of bytes 2316 * the file. No exceptional entries will be !! 2273 * in the file. No tail pages will be returned. If @index is in the 2317 * the middle of a folio, the entire folio wi !! 2274 * middle of a THP, the entire THP will be returned. The last page in 2318 * folio in the batch may have the readahead !! 2275 * the batch may have Readahead set or be not Uptodate so that the 2319 * clear so that the caller can take the appr !! 2276 * caller can take the appropriate action. 2320 */ 2277 */ 2321 static void filemap_get_read_batch(struct add 2278 static void filemap_get_read_batch(struct address_space *mapping, 2322 pgoff_t index, pgoff_t max, s !! 2279 pgoff_t index, pgoff_t max, struct pagevec *pvec) 2323 { 2280 { 2324 XA_STATE(xas, &mapping->i_pages, inde 2281 XA_STATE(xas, &mapping->i_pages, index); 2325 struct folio *folio; !! 2282 struct page *head; 2326 2283 2327 rcu_read_lock(); 2284 rcu_read_lock(); 2328 for (folio = xas_load(&xas); folio; f !! 2285 for (head = xas_load(&xas); head; head = xas_next(&xas)) { 2329 if (xas_retry(&xas, folio)) !! 2286 if (xas_retry(&xas, head)) 2330 continue; 2287 continue; 2331 if (xas.xa_index > max || xa_ !! 2288 if (xas.xa_index > max || xa_is_value(head)) 2332 break; << 2333 if (xa_is_sibling(folio)) << 2334 break; 2289 break; 2335 if (!folio_try_get(folio)) !! 2290 if (!page_cache_get_speculative(head)) 2336 goto retry; 2291 goto retry; 2337 2292 2338 if (unlikely(folio != xas_rel !! 2293 /* Has the page moved or been split? */ 2339 goto put_folio; !! 2294 if (unlikely(head != xas_reload(&xas))) >> 2295 goto put_page; 2340 2296 2341 if (!folio_batch_add(fbatch, !! 2297 if (!pagevec_add(pvec, head)) 2342 break; 2298 break; 2343 if (!folio_test_uptodate(foli !! 2299 if (!PageUptodate(head)) 2344 break; 2300 break; 2345 if (folio_test_readahead(foli !! 2301 if (PageReadahead(head)) 2346 break; 2302 break; 2347 xas_advance(&xas, folio_next_ !! 2303 xas.xa_index = head->index + thp_nr_pages(head) - 1; >> 2304 xas.xa_offset = (xas.xa_index >> xas.xa_shift) & XA_CHUNK_MASK; 2348 continue; 2305 continue; 2349 put_folio: !! 2306 put_page: 2350 folio_put(folio); !! 2307 put_page(head); 2351 retry: 2308 retry: 2352 xas_reset(&xas); 2309 xas_reset(&xas); 2353 } 2310 } 2354 rcu_read_unlock(); 2311 rcu_read_unlock(); 2355 } 2312 } 2356 2313 2357 static int filemap_read_folio(struct file *fi !! 2314 static int filemap_read_page(struct file *file, struct address_space *mapping, 2358 struct folio *folio) !! 2315 struct page *page) 2359 { 2316 { 2360 bool workingset = folio_test_workings << 2361 unsigned long pflags; << 2362 int error; 2317 int error; 2363 2318 >> 2319 /* >> 2320 * A previous I/O error may have been due to temporary failures, >> 2321 * eg. multipath errors. PG_error will be set again if readpage >> 2322 * fails. >> 2323 */ >> 2324 ClearPageError(page); 2364 /* Start the actual read. The read wi 2325 /* Start the actual read. The read will unlock the page. */ 2365 if (unlikely(workingset)) !! 2326 error = mapping->a_ops->readpage(file, page); 2366 psi_memstall_enter(&pflags); << 2367 error = filler(file, folio); << 2368 if (unlikely(workingset)) << 2369 psi_memstall_leave(&pflags); << 2370 if (error) 2327 if (error) 2371 return error; 2328 return error; 2372 2329 2373 error = folio_wait_locked_killable(fo !! 2330 error = wait_on_page_locked_killable(page); 2374 if (error) 2331 if (error) 2375 return error; 2332 return error; 2376 if (folio_test_uptodate(folio)) !! 2333 if (PageUptodate(page)) 2377 return 0; 2334 return 0; 2378 if (file) !! 2335 shrink_readahead_size_eio(&file->f_ra); 2379 shrink_readahead_size_eio(&fi << 2380 return -EIO; 2336 return -EIO; 2381 } 2337 } 2382 2338 2383 static bool filemap_range_uptodate(struct add 2339 static bool filemap_range_uptodate(struct address_space *mapping, 2384 loff_t pos, size_t count, str !! 2340 loff_t pos, struct iov_iter *iter, struct page *page) 2385 bool need_uptodate) << 2386 { 2341 { 2387 if (folio_test_uptodate(folio)) !! 2342 int count; >> 2343 >> 2344 if (PageUptodate(page)) 2388 return true; 2345 return true; 2389 /* pipes can't handle partially uptod 2346 /* pipes can't handle partially uptodate pages */ 2390 if (need_uptodate) !! 2347 if (iov_iter_is_pipe(iter)) 2391 return false; 2348 return false; 2392 if (!mapping->a_ops->is_partially_upt 2349 if (!mapping->a_ops->is_partially_uptodate) 2393 return false; 2350 return false; 2394 if (mapping->host->i_blkbits >= folio !! 2351 if (mapping->host->i_blkbits >= (PAGE_SHIFT + thp_order(page))) 2395 return false; 2352 return false; 2396 2353 2397 if (folio_pos(folio) > pos) { !! 2354 count = iter->count; 2398 count -= folio_pos(folio) - p !! 2355 if (page_offset(page) > pos) { >> 2356 count -= page_offset(page) - pos; 2399 pos = 0; 2357 pos = 0; 2400 } else { 2358 } else { 2401 pos -= folio_pos(folio); !! 2359 pos -= page_offset(page); 2402 } 2360 } 2403 2361 2404 return mapping->a_ops->is_partially_u !! 2362 return mapping->a_ops->is_partially_uptodate(page, pos, count); 2405 } 2363 } 2406 2364 2407 static int filemap_update_page(struct kiocb * 2365 static int filemap_update_page(struct kiocb *iocb, 2408 struct address_space *mapping !! 2366 struct address_space *mapping, struct iov_iter *iter, 2409 struct folio *folio, bool nee !! 2367 struct page *page) 2410 { 2368 { 2411 int error; 2369 int error; 2412 2370 2413 if (iocb->ki_flags & IOCB_NOWAIT) { !! 2371 if (!trylock_page(page)) { 2414 if (!filemap_invalidate_trylo << 2415 return -EAGAIN; << 2416 } else { << 2417 filemap_invalidate_lock_share << 2418 } << 2419 << 2420 if (!folio_trylock(folio)) { << 2421 error = -EAGAIN; << 2422 if (iocb->ki_flags & (IOCB_NO 2372 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) 2423 goto unlock_mapping; !! 2373 return -EAGAIN; 2424 if (!(iocb->ki_flags & IOCB_W 2374 if (!(iocb->ki_flags & IOCB_WAITQ)) { 2425 filemap_invalidate_un !! 2375 put_and_wait_on_page_locked(page, TASK_KILLABLE); 2426 /* << 2427 * This is where we u << 2428 * previously submitt << 2429 */ << 2430 folio_put_wait_locked << 2431 return AOP_TRUNCATED_ 2376 return AOP_TRUNCATED_PAGE; 2432 } 2377 } 2433 error = __folio_lock_async(fo !! 2378 error = __lock_page_async(page, iocb->ki_waitq); 2434 if (error) 2379 if (error) 2435 goto unlock_mapping; !! 2380 return error; 2436 } 2381 } 2437 2382 2438 error = AOP_TRUNCATED_PAGE; !! 2383 if (!page->mapping) 2439 if (!folio->mapping) !! 2384 goto truncated; 2440 goto unlock; << 2441 2385 2442 error = 0; 2386 error = 0; 2443 if (filemap_range_uptodate(mapping, i !! 2387 if (filemap_range_uptodate(mapping, iocb->ki_pos, iter, page)) 2444 need_uptod << 2445 goto unlock; 2388 goto unlock; 2446 2389 2447 error = -EAGAIN; 2390 error = -EAGAIN; 2448 if (iocb->ki_flags & (IOCB_NOIO | IOC 2391 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) 2449 goto unlock; 2392 goto unlock; 2450 2393 2451 error = filemap_read_folio(iocb->ki_f !! 2394 error = filemap_read_page(iocb->ki_filp, mapping, page); 2452 folio); << 2453 goto unlock_mapping; << 2454 unlock: << 2455 folio_unlock(folio); << 2456 unlock_mapping: << 2457 filemap_invalidate_unlock_shared(mapp << 2458 if (error == AOP_TRUNCATED_PAGE) 2395 if (error == AOP_TRUNCATED_PAGE) 2459 folio_put(folio); !! 2396 put_page(page); >> 2397 return error; >> 2398 truncated: >> 2399 unlock_page(page); >> 2400 put_page(page); >> 2401 return AOP_TRUNCATED_PAGE; >> 2402 unlock: >> 2403 unlock_page(page); 2460 return error; 2404 return error; 2461 } 2405 } 2462 2406 2463 static int filemap_create_folio(struct file * !! 2407 static int filemap_create_page(struct file *file, 2464 struct address_space *mapping !! 2408 struct address_space *mapping, pgoff_t index, 2465 struct folio_batch *fbatch) !! 2409 struct pagevec *pvec) 2466 { 2410 { 2467 struct folio *folio; !! 2411 struct page *page; 2468 int error; 2412 int error; 2469 unsigned int min_order = mapping_min_ << 2470 pgoff_t index; << 2471 2413 2472 folio = filemap_alloc_folio(mapping_g !! 2414 page = page_cache_alloc(mapping); 2473 if (!folio) !! 2415 if (!page) 2474 return -ENOMEM; 2416 return -ENOMEM; 2475 2417 2476 /* !! 2418 error = add_to_page_cache_lru(page, mapping, index, 2477 * Protect against truncate / hole pu << 2478 * here assures we cannot instantiate << 2479 * pagecache folios after evicting pa << 2480 * and before actually freeing blocks << 2481 * release invalidate_lock after inse << 2482 * the page cache as the locked folio << 2483 * synchronize with hole punching. Bu << 2484 * such as filemap_update_page() fill << 2485 * pages or ->readahead() that need t << 2486 * while mapping blocks for IO so let << 2487 * well to keep locking rules simple. << 2488 */ << 2489 filemap_invalidate_lock_shared(mappin << 2490 index = (pos >> (PAGE_SHIFT + min_ord << 2491 error = filemap_add_folio(mapping, fo << 2492 mapping_gfp_constrain 2419 mapping_gfp_constraint(mapping, GFP_KERNEL)); 2493 if (error == -EEXIST) 2420 if (error == -EEXIST) 2494 error = AOP_TRUNCATED_PAGE; 2421 error = AOP_TRUNCATED_PAGE; 2495 if (error) 2422 if (error) 2496 goto error; 2423 goto error; 2497 2424 2498 error = filemap_read_folio(file, mapp !! 2425 error = filemap_read_page(file, mapping, page); 2499 if (error) 2426 if (error) 2500 goto error; 2427 goto error; 2501 2428 2502 filemap_invalidate_unlock_shared(mapp !! 2429 pagevec_add(pvec, page); 2503 folio_batch_add(fbatch, folio); << 2504 return 0; 2430 return 0; 2505 error: 2431 error: 2506 filemap_invalidate_unlock_shared(mapp !! 2432 put_page(page); 2507 folio_put(folio); << 2508 return error; 2433 return error; 2509 } 2434 } 2510 2435 2511 static int filemap_readahead(struct kiocb *io 2436 static int filemap_readahead(struct kiocb *iocb, struct file *file, 2512 struct address_space *mapping !! 2437 struct address_space *mapping, struct page *page, 2513 pgoff_t last_index) 2438 pgoff_t last_index) 2514 { 2439 { 2515 DEFINE_READAHEAD(ractl, file, &file-> << 2516 << 2517 if (iocb->ki_flags & IOCB_NOIO) 2440 if (iocb->ki_flags & IOCB_NOIO) 2518 return -EAGAIN; 2441 return -EAGAIN; 2519 page_cache_async_ra(&ractl, folio, la !! 2442 page_cache_async_readahead(mapping, &file->f_ra, file, page, >> 2443 page->index, last_index - page->index); 2520 return 0; 2444 return 0; 2521 } 2445 } 2522 2446 2523 static int filemap_get_pages(struct kiocb *io !! 2447 static int filemap_get_pages(struct kiocb *iocb, struct iov_iter *iter, 2524 struct folio_batch *fbatch, b !! 2448 struct pagevec *pvec) 2525 { 2449 { 2526 struct file *filp = iocb->ki_filp; 2450 struct file *filp = iocb->ki_filp; 2527 struct address_space *mapping = filp- 2451 struct address_space *mapping = filp->f_mapping; 2528 struct file_ra_state *ra = &filp->f_r 2452 struct file_ra_state *ra = &filp->f_ra; 2529 pgoff_t index = iocb->ki_pos >> PAGE_ 2453 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; 2530 pgoff_t last_index; 2454 pgoff_t last_index; 2531 struct folio *folio; !! 2455 struct page *page; 2532 unsigned int flags; << 2533 int err = 0; 2456 int err = 0; 2534 2457 2535 /* "last_index" is the index of the p !! 2458 last_index = DIV_ROUND_UP(iocb->ki_pos + iter->count, PAGE_SIZE); 2536 last_index = DIV_ROUND_UP(iocb->ki_po << 2537 retry: 2459 retry: 2538 if (fatal_signal_pending(current)) 2460 if (fatal_signal_pending(current)) 2539 return -EINTR; 2461 return -EINTR; 2540 2462 2541 filemap_get_read_batch(mapping, index !! 2463 filemap_get_read_batch(mapping, index, last_index, pvec); 2542 if (!folio_batch_count(fbatch)) { !! 2464 if (!pagevec_count(pvec)) { 2543 if (iocb->ki_flags & IOCB_NOI 2465 if (iocb->ki_flags & IOCB_NOIO) 2544 return -EAGAIN; 2466 return -EAGAIN; 2545 if (iocb->ki_flags & IOCB_NOW << 2546 flags = memalloc_noio << 2547 page_cache_sync_readahead(map 2467 page_cache_sync_readahead(mapping, ra, filp, index, 2548 last_index - 2468 last_index - index); 2549 if (iocb->ki_flags & IOCB_NOW !! 2469 filemap_get_read_batch(mapping, index, last_index, pvec); 2550 memalloc_noio_restore << 2551 filemap_get_read_batch(mappin << 2552 } 2470 } 2553 if (!folio_batch_count(fbatch)) { !! 2471 if (!pagevec_count(pvec)) { 2554 if (iocb->ki_flags & (IOCB_NO 2472 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) 2555 return -EAGAIN; 2473 return -EAGAIN; 2556 err = filemap_create_folio(fi !! 2474 err = filemap_create_page(filp, mapping, >> 2475 iocb->ki_pos >> PAGE_SHIFT, pvec); 2557 if (err == AOP_TRUNCATED_PAGE 2476 if (err == AOP_TRUNCATED_PAGE) 2558 goto retry; 2477 goto retry; 2559 return err; 2478 return err; 2560 } 2479 } 2561 2480 2562 folio = fbatch->folios[folio_batch_co !! 2481 page = pvec->pages[pagevec_count(pvec) - 1]; 2563 if (folio_test_readahead(folio)) { !! 2482 if (PageReadahead(page)) { 2564 err = filemap_readahead(iocb, !! 2483 err = filemap_readahead(iocb, filp, mapping, page, last_index); 2565 if (err) 2484 if (err) 2566 goto err; 2485 goto err; 2567 } 2486 } 2568 if (!folio_test_uptodate(folio)) { !! 2487 if (!PageUptodate(page)) { 2569 if ((iocb->ki_flags & IOCB_WA !! 2488 if ((iocb->ki_flags & IOCB_WAITQ) && pagevec_count(pvec) > 1) 2570 folio_batch_count(fbatch) << 2571 iocb->ki_flags |= IOC 2489 iocb->ki_flags |= IOCB_NOWAIT; 2572 err = filemap_update_page(ioc !! 2490 err = filemap_update_page(iocb, mapping, iter, page); 2573 nee << 2574 if (err) 2491 if (err) 2575 goto err; 2492 goto err; 2576 } 2493 } 2577 2494 2578 trace_mm_filemap_get_pages(mapping, i << 2579 return 0; 2495 return 0; 2580 err: 2496 err: 2581 if (err < 0) 2497 if (err < 0) 2582 folio_put(folio); !! 2498 put_page(page); 2583 if (likely(--fbatch->nr)) !! 2499 if (likely(--pvec->nr)) 2584 return 0; 2500 return 0; 2585 if (err == AOP_TRUNCATED_PAGE) 2501 if (err == AOP_TRUNCATED_PAGE) 2586 goto retry; 2502 goto retry; 2587 return err; 2503 return err; 2588 } 2504 } 2589 2505 2590 static inline bool pos_same_folio(loff_t pos1 << 2591 { << 2592 unsigned int shift = folio_shift(foli << 2593 << 2594 return (pos1 >> shift == pos2 >> shif << 2595 } << 2596 << 2597 /** 2506 /** 2598 * filemap_read - Read data from the page cac 2507 * filemap_read - Read data from the page cache. 2599 * @iocb: The iocb to read. 2508 * @iocb: The iocb to read. 2600 * @iter: Destination for the data. 2509 * @iter: Destination for the data. 2601 * @already_read: Number of bytes already rea 2510 * @already_read: Number of bytes already read by the caller. 2602 * 2511 * 2603 * Copies data from the page cache. If the d 2512 * Copies data from the page cache. If the data is not currently present, 2604 * uses the readahead and read_folio address_ !! 2513 * uses the readahead and readpage address_space operations to fetch it. 2605 * 2514 * 2606 * Return: Total number of bytes copied, incl 2515 * Return: Total number of bytes copied, including those already read by 2607 * the caller. If an error happens before an 2516 * the caller. If an error happens before any bytes are copied, returns 2608 * a negative error number. 2517 * a negative error number. 2609 */ 2518 */ 2610 ssize_t filemap_read(struct kiocb *iocb, stru 2519 ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, 2611 ssize_t already_read) 2520 ssize_t already_read) 2612 { 2521 { 2613 struct file *filp = iocb->ki_filp; 2522 struct file *filp = iocb->ki_filp; 2614 struct file_ra_state *ra = &filp->f_r 2523 struct file_ra_state *ra = &filp->f_ra; 2615 struct address_space *mapping = filp- 2524 struct address_space *mapping = filp->f_mapping; 2616 struct inode *inode = mapping->host; 2525 struct inode *inode = mapping->host; 2617 struct folio_batch fbatch; !! 2526 struct pagevec pvec; 2618 int i, error = 0; 2527 int i, error = 0; 2619 bool writably_mapped; 2528 bool writably_mapped; 2620 loff_t isize, end_offset; 2529 loff_t isize, end_offset; 2621 loff_t last_pos = ra->prev_pos; << 2622 2530 2623 if (unlikely(iocb->ki_pos >= inode->i 2531 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) 2624 return 0; 2532 return 0; 2625 if (unlikely(!iov_iter_count(iter))) 2533 if (unlikely(!iov_iter_count(iter))) 2626 return 0; 2534 return 0; 2627 2535 2628 iov_iter_truncate(iter, inode->i_sb-> !! 2536 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); 2629 folio_batch_init(&fbatch); !! 2537 pagevec_init(&pvec); 2630 2538 2631 do { 2539 do { 2632 cond_resched(); 2540 cond_resched(); 2633 2541 2634 /* 2542 /* 2635 * If we've already successfu 2543 * If we've already successfully copied some data, then we 2636 * can no longer safely retur 2544 * can no longer safely return -EIOCBQUEUED. Hence mark 2637 * an async read NOWAIT at th 2545 * an async read NOWAIT at that point. 2638 */ 2546 */ 2639 if ((iocb->ki_flags & IOCB_WA 2547 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) 2640 iocb->ki_flags |= IOC 2548 iocb->ki_flags |= IOCB_NOWAIT; 2641 2549 2642 if (unlikely(iocb->ki_pos >= !! 2550 error = filemap_get_pages(iocb, iter, &pvec); 2643 break; << 2644 << 2645 error = filemap_get_pages(ioc << 2646 if (error < 0) 2551 if (error < 0) 2647 break; 2552 break; 2648 2553 2649 /* 2554 /* 2650 * i_size must be checked aft 2555 * i_size must be checked after we know the pages are Uptodate. 2651 * 2556 * 2652 * Checking i_size after the 2557 * Checking i_size after the check allows us to calculate 2653 * the correct value for "nr" 2558 * the correct value for "nr", which means the zero-filled 2654 * part of the page is not co 2559 * part of the page is not copied back to userspace (unless 2655 * another truncate extends t 2560 * another truncate extends the file - this is desired though). 2656 */ 2561 */ 2657 isize = i_size_read(inode); 2562 isize = i_size_read(inode); 2658 if (unlikely(iocb->ki_pos >= 2563 if (unlikely(iocb->ki_pos >= isize)) 2659 goto put_folios; !! 2564 goto put_pages; 2660 end_offset = min_t(loff_t, is 2565 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); 2661 2566 2662 /* 2567 /* 2663 * Once we start copying data 2568 * Once we start copying data, we don't want to be touching any 2664 * cachelines that might be c 2569 * cachelines that might be contended: 2665 */ 2570 */ 2666 writably_mapped = mapping_wri 2571 writably_mapped = mapping_writably_mapped(mapping); 2667 2572 2668 /* 2573 /* 2669 * When a read accesses the s !! 2574 * When a sequential read accesses a page several times, only 2670 * mark it as accessed the fi 2575 * mark it as accessed the first time. 2671 */ 2576 */ 2672 if (!pos_same_folio(iocb->ki_ !! 2577 if (iocb->ki_pos >> PAGE_SHIFT != 2673 fbatch.fo !! 2578 ra->prev_pos >> PAGE_SHIFT) 2674 folio_mark_accessed(f !! 2579 mark_page_accessed(pvec.pages[0]); 2675 !! 2580 2676 for (i = 0; i < folio_batch_c !! 2581 for (i = 0; i < pagevec_count(&pvec); i++) { 2677 struct folio *folio = !! 2582 struct page *page = pvec.pages[i]; 2678 size_t fsize = folio_ !! 2583 size_t page_size = thp_size(page); 2679 size_t offset = iocb- !! 2584 size_t offset = iocb->ki_pos & (page_size - 1); 2680 size_t bytes = min_t( 2585 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, 2681 !! 2586 page_size - offset); 2682 size_t copied; 2587 size_t copied; 2683 2588 2684 if (end_offset < foli !! 2589 if (end_offset < page_offset(page)) 2685 break; 2590 break; 2686 if (i > 0) 2591 if (i > 0) 2687 folio_mark_ac !! 2592 mark_page_accessed(page); 2688 /* 2593 /* 2689 * If users can be wr !! 2594 * If users can be writing to this page using arbitrary 2690 * virtual addresses, !! 2595 * virtual addresses, take care about potential aliasing 2691 * before reading the !! 2596 * before reading the page on the kernel side. 2692 */ 2597 */ 2693 if (writably_mapped) !! 2598 if (writably_mapped) { 2694 flush_dcache_ !! 2599 int j; >> 2600 >> 2601 for (j = 0; j < thp_nr_pages(page); j++) >> 2602 flush_dcache_page(page + j); >> 2603 } 2695 2604 2696 copied = copy_folio_t !! 2605 copied = copy_page_to_iter(page, offset, bytes, iter); 2697 2606 2698 already_read += copie 2607 already_read += copied; 2699 iocb->ki_pos += copie 2608 iocb->ki_pos += copied; 2700 last_pos = iocb->ki_p !! 2609 ra->prev_pos = iocb->ki_pos; 2701 2610 2702 if (copied < bytes) { 2611 if (copied < bytes) { 2703 error = -EFAU 2612 error = -EFAULT; 2704 break; 2613 break; 2705 } 2614 } 2706 } 2615 } 2707 put_folios: !! 2616 put_pages: 2708 for (i = 0; i < folio_batch_c !! 2617 for (i = 0; i < pagevec_count(&pvec); i++) 2709 folio_put(fbatch.foli !! 2618 put_page(pvec.pages[i]); 2710 folio_batch_init(&fbatch); !! 2619 pagevec_reinit(&pvec); 2711 } while (iov_iter_count(iter) && iocb 2620 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); 2712 2621 2713 file_accessed(filp); 2622 file_accessed(filp); 2714 ra->prev_pos = last_pos; !! 2623 2715 return already_read ? already_read : 2624 return already_read ? already_read : error; 2716 } 2625 } 2717 EXPORT_SYMBOL_GPL(filemap_read); 2626 EXPORT_SYMBOL_GPL(filemap_read); 2718 2627 2719 int kiocb_write_and_wait(struct kiocb *iocb, << 2720 { << 2721 struct address_space *mapping = iocb- << 2722 loff_t pos = iocb->ki_pos; << 2723 loff_t end = pos + count - 1; << 2724 << 2725 if (iocb->ki_flags & IOCB_NOWAIT) { << 2726 if (filemap_range_needs_write << 2727 return -EAGAIN; << 2728 return 0; << 2729 } << 2730 << 2731 return filemap_write_and_wait_range(m << 2732 } << 2733 EXPORT_SYMBOL_GPL(kiocb_write_and_wait); << 2734 << 2735 int filemap_invalidate_pages(struct address_s << 2736 loff_t pos, loff << 2737 { << 2738 int ret; << 2739 << 2740 if (nowait) { << 2741 /* we could block if there ar << 2742 if (filemap_range_has_page(ma << 2743 return -EAGAIN; << 2744 } else { << 2745 ret = filemap_write_and_wait_ << 2746 if (ret) << 2747 return ret; << 2748 } << 2749 << 2750 /* << 2751 * After a write we want buffered rea << 2752 * the new data. We invalidate clean << 2753 * about to write. We do this *befor << 2754 * without clobbering -EIOCBQUEUED fr << 2755 */ << 2756 return invalidate_inode_pages2_range( << 2757 << 2758 } << 2759 << 2760 int kiocb_invalidate_pages(struct kiocb *iocb << 2761 { << 2762 struct address_space *mapping = iocb- << 2763 << 2764 return filemap_invalidate_pages(mappi << 2765 iocb- << 2766 iocb- << 2767 } << 2768 EXPORT_SYMBOL_GPL(kiocb_invalidate_pages); << 2769 << 2770 /** 2628 /** 2771 * generic_file_read_iter - generic filesyste 2629 * generic_file_read_iter - generic filesystem read routine 2772 * @iocb: kernel I/O control block 2630 * @iocb: kernel I/O control block 2773 * @iter: destination for the data read 2631 * @iter: destination for the data read 2774 * 2632 * 2775 * This is the "read_iter()" routine for all 2633 * This is the "read_iter()" routine for all filesystems 2776 * that can use the page cache directly. 2634 * that can use the page cache directly. 2777 * 2635 * 2778 * The IOCB_NOWAIT flag in iocb->ki_flags ind 2636 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall 2779 * be returned when no data can be read witho 2637 * be returned when no data can be read without waiting for I/O requests 2780 * to complete; it doesn't prevent readahead. 2638 * to complete; it doesn't prevent readahead. 2781 * 2639 * 2782 * The IOCB_NOIO flag in iocb->ki_flags indic 2640 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O 2783 * requests shall be made for the read or for 2641 * requests shall be made for the read or for readahead. When no data 2784 * can be read, -EAGAIN shall be returned. W 2642 * can be read, -EAGAIN shall be returned. When readahead would be 2785 * triggered, a partial, possibly empty read 2643 * triggered, a partial, possibly empty read shall be returned. 2786 * 2644 * 2787 * Return: 2645 * Return: 2788 * * number of bytes copied, even for partial 2646 * * number of bytes copied, even for partial reads 2789 * * negative error code (or 0 if IOCB_NOIO) 2647 * * negative error code (or 0 if IOCB_NOIO) if nothing was read 2790 */ 2648 */ 2791 ssize_t 2649 ssize_t 2792 generic_file_read_iter(struct kiocb *iocb, st 2650 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) 2793 { 2651 { 2794 size_t count = iov_iter_count(iter); 2652 size_t count = iov_iter_count(iter); 2795 ssize_t retval = 0; 2653 ssize_t retval = 0; 2796 2654 2797 if (!count) 2655 if (!count) 2798 return 0; /* skip atime */ 2656 return 0; /* skip atime */ 2799 2657 2800 if (iocb->ki_flags & IOCB_DIRECT) { 2658 if (iocb->ki_flags & IOCB_DIRECT) { 2801 struct file *file = iocb->ki_ 2659 struct file *file = iocb->ki_filp; 2802 struct address_space *mapping 2660 struct address_space *mapping = file->f_mapping; 2803 struct inode *inode = mapping 2661 struct inode *inode = mapping->host; >> 2662 loff_t size; >> 2663 >> 2664 size = i_size_read(inode); >> 2665 if (iocb->ki_flags & IOCB_NOWAIT) { >> 2666 if (filemap_range_needs_writeback(mapping, iocb->ki_pos, >> 2667 iocb->ki_pos + count - 1)) >> 2668 return -EAGAIN; >> 2669 } else { >> 2670 retval = filemap_write_and_wait_range(mapping, >> 2671 iocb->ki_pos, >> 2672 iocb->ki_pos + count - 1); >> 2673 if (retval < 0) >> 2674 return retval; >> 2675 } 2804 2676 2805 retval = kiocb_write_and_wait << 2806 if (retval < 0) << 2807 return retval; << 2808 file_accessed(file); 2677 file_accessed(file); 2809 2678 2810 retval = mapping->a_ops->dire 2679 retval = mapping->a_ops->direct_IO(iocb, iter); 2811 if (retval >= 0) { 2680 if (retval >= 0) { 2812 iocb->ki_pos += retva 2681 iocb->ki_pos += retval; 2813 count -= retval; 2682 count -= retval; 2814 } 2683 } 2815 if (retval != -EIOCBQUEUED) 2684 if (retval != -EIOCBQUEUED) 2816 iov_iter_revert(iter, 2685 iov_iter_revert(iter, count - iov_iter_count(iter)); 2817 2686 2818 /* 2687 /* 2819 * Btrfs can have a short DIO 2688 * Btrfs can have a short DIO read if we encounter 2820 * compressed extents, so if 2689 * compressed extents, so if there was an error, or if 2821 * we've already read everyth 2690 * we've already read everything we wanted to, or if 2822 * there was a short read bec 2691 * there was a short read because we hit EOF, go ahead 2823 * and return. Otherwise fal 2692 * and return. Otherwise fallthrough to buffered io for 2824 * the rest of the read. Buf 2693 * the rest of the read. Buffered reads will not work for 2825 * DAX files, so don't bother 2694 * DAX files, so don't bother trying. 2826 */ 2695 */ 2827 if (retval < 0 || !count || I !! 2696 if (retval < 0 || !count || iocb->ki_pos >= size || 2828 return retval; !! 2697 IS_DAX(inode)) 2829 if (iocb->ki_pos >= i_size_re << 2830 return retval; 2698 return retval; 2831 } 2699 } 2832 2700 2833 return filemap_read(iocb, iter, retva 2701 return filemap_read(iocb, iter, retval); 2834 } 2702 } 2835 EXPORT_SYMBOL(generic_file_read_iter); 2703 EXPORT_SYMBOL(generic_file_read_iter); 2836 2704 2837 /* !! 2705 static inline loff_t page_seek_hole_data(struct xa_state *xas, 2838 * Splice subpages from a folio into a pipe. !! 2706 struct address_space *mapping, struct page *page, 2839 */ << 2840 size_t splice_folio_into_pipe(struct pipe_ino << 2841 struct folio *f << 2842 { << 2843 struct page *page; << 2844 size_t spliced = 0, offset = offset_i << 2845 << 2846 page = folio_page(folio, offset / PAG << 2847 size = min(size, folio_size(folio) - << 2848 offset %= PAGE_SIZE; << 2849 << 2850 while (spliced < size && << 2851 !pipe_full(pipe->head, pipe->t << 2852 struct pipe_buffer *buf = pip << 2853 size_t part = min_t(size_t, P << 2854 << 2855 *buf = (struct pipe_buffer) { << 2856 .ops = &page_cache << 2857 .page = page, << 2858 .offset = offset, << 2859 .len = part, << 2860 }; << 2861 folio_get(folio); << 2862 pipe->head++; << 2863 page++; << 2864 spliced += part; << 2865 offset = 0; << 2866 } << 2867 << 2868 return spliced; << 2869 } << 2870 << 2871 /** << 2872 * filemap_splice_read - Splice data from a << 2873 * @in: The file to read from << 2874 * @ppos: Pointer to the file position to rea << 2875 * @pipe: The pipe to splice into << 2876 * @len: The amount to splice << 2877 * @flags: The SPLICE_F_* flags << 2878 * << 2879 * This function gets folios from a file's pa << 2880 * pipe. Readahead will be called as necessa << 2881 * be used for blockdevs also. << 2882 * << 2883 * Return: On success, the number of bytes re << 2884 * will be updated if appropriate; 0 will be << 2885 * to be read; -EAGAIN will be returned if th << 2886 * other negative error code will be returned << 2887 * if the pipe has insufficient space, we rea << 2888 * hole. << 2889 */ << 2890 ssize_t filemap_splice_read(struct file *in, << 2891 struct pipe_inode << 2892 size_t len, unsig << 2893 { << 2894 struct folio_batch fbatch; << 2895 struct kiocb iocb; << 2896 size_t total_spliced = 0, used, npage << 2897 loff_t isize, end_offset; << 2898 bool writably_mapped; << 2899 int i, error = 0; << 2900 << 2901 if (unlikely(*ppos >= in->f_mapping-> << 2902 return 0; << 2903 << 2904 init_sync_kiocb(&iocb, in); << 2905 iocb.ki_pos = *ppos; << 2906 << 2907 /* Work out how much data we can actu << 2908 used = pipe_occupancy(pipe->head, pip << 2909 npages = max_t(ssize_t, pipe->max_usa << 2910 len = min_t(size_t, len, npages * PAG << 2911 << 2912 folio_batch_init(&fbatch); << 2913 << 2914 do { << 2915 cond_resched(); << 2916 << 2917 if (*ppos >= i_size_read(in-> << 2918 break; << 2919 << 2920 iocb.ki_pos = *ppos; << 2921 error = filemap_get_pages(&io << 2922 if (error < 0) << 2923 break; << 2924 << 2925 /* << 2926 * i_size must be checked aft << 2927 * << 2928 * Checking i_size after the << 2929 * the correct value for "nr" << 2930 * part of the page is not co << 2931 * another truncate extends t << 2932 */ << 2933 isize = i_size_read(in->f_map << 2934 if (unlikely(*ppos >= isize)) << 2935 break; << 2936 end_offset = min_t(loff_t, is << 2937 << 2938 /* << 2939 * Once we start copying data << 2940 * cachelines that might be c << 2941 */ << 2942 writably_mapped = mapping_wri << 2943 << 2944 for (i = 0; i < folio_batch_c << 2945 struct folio *folio = << 2946 size_t n; << 2947 << 2948 if (folio_pos(folio) << 2949 goto out; << 2950 folio_mark_accessed(f << 2951 << 2952 /* << 2953 * If users can be wr << 2954 * virtual addresses, << 2955 * before reading the << 2956 */ << 2957 if (writably_mapped) << 2958 flush_dcache_ << 2959 << 2960 n = min_t(loff_t, len << 2961 n = splice_folio_into << 2962 if (!n) << 2963 goto out; << 2964 len -= n; << 2965 total_spliced += n; << 2966 *ppos += n; << 2967 in->f_ra.prev_pos = * << 2968 if (pipe_full(pipe->h << 2969 goto out; << 2970 } << 2971 << 2972 folio_batch_release(&fbatch); << 2973 } while (len); << 2974 << 2975 out: << 2976 folio_batch_release(&fbatch); << 2977 file_accessed(in); << 2978 << 2979 return total_spliced ? total_spliced << 2980 } << 2981 EXPORT_SYMBOL(filemap_splice_read); << 2982 << 2983 static inline loff_t folio_seek_hole_data(str << 2984 struct address_space *mapping << 2985 loff_t start, loff_t end, boo 2707 loff_t start, loff_t end, bool seek_data) 2986 { 2708 { 2987 const struct address_space_operations 2709 const struct address_space_operations *ops = mapping->a_ops; 2988 size_t offset, bsz = i_blocksize(mapp 2710 size_t offset, bsz = i_blocksize(mapping->host); 2989 2711 2990 if (xa_is_value(folio) || folio_test_ !! 2712 if (xa_is_value(page) || PageUptodate(page)) 2991 return seek_data ? start : en 2713 return seek_data ? start : end; 2992 if (!ops->is_partially_uptodate) 2714 if (!ops->is_partially_uptodate) 2993 return seek_data ? end : star 2715 return seek_data ? end : start; 2994 2716 2995 xas_pause(xas); 2717 xas_pause(xas); 2996 rcu_read_unlock(); 2718 rcu_read_unlock(); 2997 folio_lock(folio); !! 2719 lock_page(page); 2998 if (unlikely(folio->mapping != mappin !! 2720 if (unlikely(page->mapping != mapping)) 2999 goto unlock; 2721 goto unlock; 3000 2722 3001 offset = offset_in_folio(folio, start !! 2723 offset = offset_in_thp(page, start) & ~(bsz - 1); 3002 2724 3003 do { 2725 do { 3004 if (ops->is_partially_uptodat !! 2726 if (ops->is_partially_uptodate(page, offset, bsz) == seek_data) 3005 << 3006 break; 2727 break; 3007 start = (start + bsz) & ~(bsz 2728 start = (start + bsz) & ~(bsz - 1); 3008 offset += bsz; 2729 offset += bsz; 3009 } while (offset < folio_size(folio)); !! 2730 } while (offset < thp_size(page)); 3010 unlock: 2731 unlock: 3011 folio_unlock(folio); !! 2732 unlock_page(page); 3012 rcu_read_lock(); 2733 rcu_read_lock(); 3013 return start; 2734 return start; 3014 } 2735 } 3015 2736 3016 static inline size_t seek_folio_size(struct x !! 2737 static inline >> 2738 unsigned int seek_page_size(struct xa_state *xas, struct page *page) 3017 { 2739 { 3018 if (xa_is_value(folio)) !! 2740 if (xa_is_value(page)) 3019 return PAGE_SIZE << xas_get_o !! 2741 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); 3020 return folio_size(folio); !! 2742 return thp_size(page); 3021 } 2743 } 3022 2744 3023 /** 2745 /** 3024 * mapping_seek_hole_data - Seek for SEEK_DAT 2746 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache. 3025 * @mapping: Address space to search. 2747 * @mapping: Address space to search. 3026 * @start: First byte to consider. 2748 * @start: First byte to consider. 3027 * @end: Limit of search (exclusive). 2749 * @end: Limit of search (exclusive). 3028 * @whence: Either SEEK_HOLE or SEEK_DATA. 2750 * @whence: Either SEEK_HOLE or SEEK_DATA. 3029 * 2751 * 3030 * If the page cache knows which blocks conta 2752 * If the page cache knows which blocks contain holes and which blocks 3031 * contain data, your filesystem can use this 2753 * contain data, your filesystem can use this function to implement 3032 * SEEK_HOLE and SEEK_DATA. This is useful f 2754 * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are 3033 * entirely memory-based such as tmpfs, and f 2755 * entirely memory-based such as tmpfs, and filesystems which support 3034 * unwritten extents. 2756 * unwritten extents. 3035 * 2757 * 3036 * Return: The requested offset on success, o 2758 * Return: The requested offset on success, or -ENXIO if @whence specifies 3037 * SEEK_DATA and there is no data after @star 2759 * SEEK_DATA and there is no data after @start. There is an implicit hole 3038 * after @end - 1, so SEEK_HOLE returns @end 2760 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start 3039 * and @end contain data. 2761 * and @end contain data. 3040 */ 2762 */ 3041 loff_t mapping_seek_hole_data(struct address_ 2763 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, 3042 loff_t end, int whence) 2764 loff_t end, int whence) 3043 { 2765 { 3044 XA_STATE(xas, &mapping->i_pages, star 2766 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); 3045 pgoff_t max = (end - 1) >> PAGE_SHIFT 2767 pgoff_t max = (end - 1) >> PAGE_SHIFT; 3046 bool seek_data = (whence == SEEK_DATA 2768 bool seek_data = (whence == SEEK_DATA); 3047 struct folio *folio; !! 2769 struct page *page; 3048 2770 3049 if (end <= start) 2771 if (end <= start) 3050 return -ENXIO; 2772 return -ENXIO; 3051 2773 3052 rcu_read_lock(); 2774 rcu_read_lock(); 3053 while ((folio = find_get_entry(&xas, !! 2775 while ((page = find_get_entry(&xas, max, XA_PRESENT))) { 3054 loff_t pos = (u64)xas.xa_inde 2776 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT; 3055 size_t seek_size; !! 2777 unsigned int seek_size; 3056 2778 3057 if (start < pos) { 2779 if (start < pos) { 3058 if (!seek_data) 2780 if (!seek_data) 3059 goto unlock; 2781 goto unlock; 3060 start = pos; 2782 start = pos; 3061 } 2783 } 3062 2784 3063 seek_size = seek_folio_size(& !! 2785 seek_size = seek_page_size(&xas, page); 3064 pos = round_up((u64)pos + 1, !! 2786 pos = round_up(pos + 1, seek_size); 3065 start = folio_seek_hole_data( !! 2787 start = page_seek_hole_data(&xas, mapping, page, start, pos, 3066 seek_data); 2788 seek_data); 3067 if (start < pos) 2789 if (start < pos) 3068 goto unlock; 2790 goto unlock; 3069 if (start >= end) 2791 if (start >= end) 3070 break; 2792 break; 3071 if (seek_size > PAGE_SIZE) 2793 if (seek_size > PAGE_SIZE) 3072 xas_set(&xas, pos >> 2794 xas_set(&xas, pos >> PAGE_SHIFT); 3073 if (!xa_is_value(folio)) !! 2795 if (!xa_is_value(page)) 3074 folio_put(folio); !! 2796 put_page(page); 3075 } 2797 } 3076 if (seek_data) 2798 if (seek_data) 3077 start = -ENXIO; 2799 start = -ENXIO; 3078 unlock: 2800 unlock: 3079 rcu_read_unlock(); 2801 rcu_read_unlock(); 3080 if (folio && !xa_is_value(folio)) !! 2802 if (page && !xa_is_value(page)) 3081 folio_put(folio); !! 2803 put_page(page); 3082 if (start > end) 2804 if (start > end) 3083 return end; 2805 return end; 3084 return start; 2806 return start; 3085 } 2807 } 3086 2808 3087 #ifdef CONFIG_MMU 2809 #ifdef CONFIG_MMU 3088 #define MMAP_LOTSAMISS (100) 2810 #define MMAP_LOTSAMISS (100) 3089 /* 2811 /* 3090 * lock_folio_maybe_drop_mmap - lock the page !! 2812 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock 3091 * @vmf - the vm_fault for this fault. 2813 * @vmf - the vm_fault for this fault. 3092 * @folio - the folio to lock. !! 2814 * @page - the page to lock. 3093 * @fpin - the pointer to the file we may pin 2815 * @fpin - the pointer to the file we may pin (or is already pinned). 3094 * 2816 * 3095 * This works similar to lock_folio_or_retry !! 2817 * This works similar to lock_page_or_retry in that it can drop the mmap_lock. 3096 * mmap_lock. It differs in that it actually !! 2818 * It differs in that it actually returns the page locked if it returns 1 and 0 3097 * if it returns 1 and 0 if it couldn't lock !! 2819 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin 3098 * to drop the mmap_lock then fpin will point !! 2820 * will point to the pinned file and needs to be fput()'ed at a later point. 3099 * needs to be fput()'ed at a later point. << 3100 */ 2821 */ 3101 static int lock_folio_maybe_drop_mmap(struct !! 2822 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, 3102 struct f 2823 struct file **fpin) 3103 { 2824 { 3104 if (folio_trylock(folio)) !! 2825 if (trylock_page(page)) 3105 return 1; 2826 return 1; 3106 2827 3107 /* 2828 /* 3108 * NOTE! This will make us return wit 2829 * NOTE! This will make us return with VM_FAULT_RETRY, but with 3109 * the fault lock still held. That's !! 2830 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT 3110 * is supposed to work. We have way t 2831 * is supposed to work. We have way too many special cases.. 3111 */ 2832 */ 3112 if (vmf->flags & FAULT_FLAG_RETRY_NOW 2833 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) 3113 return 0; 2834 return 0; 3114 2835 3115 *fpin = maybe_unlock_mmap_for_io(vmf, 2836 *fpin = maybe_unlock_mmap_for_io(vmf, *fpin); 3116 if (vmf->flags & FAULT_FLAG_KILLABLE) 2837 if (vmf->flags & FAULT_FLAG_KILLABLE) { 3117 if (__folio_lock_killable(fol !! 2838 if (__lock_page_killable(page)) { 3118 /* 2839 /* 3119 * We didn't have the !! 2840 * We didn't have the right flags to drop the mmap_lock, 3120 * fault lock, but al !! 2841 * but all fault_handlers only check for fatal signals 3121 * for fatal signals !! 2842 * if we return VM_FAULT_RETRY, so we need to drop the 3122 * so we need to drop !! 2843 * mmap_lock here and return 0 if we don't have a fpin. 3123 * return 0 if we don << 3124 */ 2844 */ 3125 if (*fpin == NULL) 2845 if (*fpin == NULL) 3126 release_fault !! 2846 mmap_read_unlock(vmf->vma->vm_mm); 3127 return 0; 2847 return 0; 3128 } 2848 } 3129 } else 2849 } else 3130 __folio_lock(folio); !! 2850 __lock_page(page); 3131 << 3132 return 1; 2851 return 1; 3133 } 2852 } 3134 2853 >> 2854 3135 /* 2855 /* 3136 * Synchronous readahead happens when we don' 2856 * Synchronous readahead happens when we don't even find a page in the page 3137 * cache at all. We don't want to perform IO 2857 * cache at all. We don't want to perform IO under the mmap sem, so if we have 3138 * to drop the mmap sem we return the file th 2858 * to drop the mmap sem we return the file that was pinned in order for us to do 3139 * that. If we didn't pin a file then we ret 2859 * that. If we didn't pin a file then we return NULL. The file that is 3140 * returned needs to be fput()'ed when we're 2860 * returned needs to be fput()'ed when we're done with it. 3141 */ 2861 */ 3142 static struct file *do_sync_mmap_readahead(st 2862 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) 3143 { 2863 { 3144 struct file *file = vmf->vma->vm_file 2864 struct file *file = vmf->vma->vm_file; 3145 struct file_ra_state *ra = &file->f_r 2865 struct file_ra_state *ra = &file->f_ra; 3146 struct address_space *mapping = file- 2866 struct address_space *mapping = file->f_mapping; 3147 DEFINE_READAHEAD(ractl, file, ra, map 2867 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); 3148 struct file *fpin = NULL; 2868 struct file *fpin = NULL; 3149 unsigned long vm_flags = vmf->vma->vm << 3150 unsigned int mmap_miss; 2869 unsigned int mmap_miss; 3151 2870 3152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 3153 /* Use the readahead code, even if re << 3154 if ((vm_flags & VM_HUGEPAGE) && HPAGE << 3155 fpin = maybe_unlock_mmap_for_ << 3156 ractl._index &= ~((unsigned l << 3157 ra->size = HPAGE_PMD_NR; << 3158 /* << 3159 * Fetch two PMD folios, so w << 3160 * readahead, unless we've be << 3161 */ << 3162 if (!(vm_flags & VM_RAND_READ << 3163 ra->size *= 2; << 3164 ra->async_size = HPAGE_PMD_NR << 3165 page_cache_ra_order(&ractl, r << 3166 return fpin; << 3167 } << 3168 #endif << 3169 << 3170 /* If we don't want any read-ahead, d 2871 /* If we don't want any read-ahead, don't bother */ 3171 if (vm_flags & VM_RAND_READ) !! 2872 if (vmf->vma->vm_flags & VM_RAND_READ) 3172 return fpin; 2873 return fpin; 3173 if (!ra->ra_pages) 2874 if (!ra->ra_pages) 3174 return fpin; 2875 return fpin; 3175 2876 3176 if (vm_flags & VM_SEQ_READ) { !! 2877 if (vmf->vma->vm_flags & VM_SEQ_READ) { 3177 fpin = maybe_unlock_mmap_for_ 2878 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3178 page_cache_sync_ra(&ractl, ra 2879 page_cache_sync_ra(&ractl, ra->ra_pages); 3179 return fpin; 2880 return fpin; 3180 } 2881 } 3181 2882 3182 /* Avoid banging the cache line if no 2883 /* Avoid banging the cache line if not needed */ 3183 mmap_miss = READ_ONCE(ra->mmap_miss); 2884 mmap_miss = READ_ONCE(ra->mmap_miss); 3184 if (mmap_miss < MMAP_LOTSAMISS * 10) 2885 if (mmap_miss < MMAP_LOTSAMISS * 10) 3185 WRITE_ONCE(ra->mmap_miss, ++m 2886 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); 3186 2887 3187 /* 2888 /* 3188 * Do we miss much more than hit in t 2889 * Do we miss much more than hit in this file? If so, 3189 * stop bothering with read-ahead. It 2890 * stop bothering with read-ahead. It will only hurt. 3190 */ 2891 */ 3191 if (mmap_miss > MMAP_LOTSAMISS) 2892 if (mmap_miss > MMAP_LOTSAMISS) 3192 return fpin; 2893 return fpin; 3193 2894 3194 /* 2895 /* 3195 * mmap read-around 2896 * mmap read-around 3196 */ 2897 */ 3197 fpin = maybe_unlock_mmap_for_io(vmf, 2898 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3198 ra->start = max_t(long, 0, vmf->pgoff 2899 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); 3199 ra->size = ra->ra_pages; 2900 ra->size = ra->ra_pages; 3200 ra->async_size = ra->ra_pages / 4; 2901 ra->async_size = ra->ra_pages / 4; 3201 ractl._index = ra->start; 2902 ractl._index = ra->start; 3202 page_cache_ra_order(&ractl, ra, 0); !! 2903 do_page_cache_ra(&ractl, ra->size, ra->async_size); 3203 return fpin; 2904 return fpin; 3204 } 2905 } 3205 2906 3206 /* 2907 /* 3207 * Asynchronous readahead happens when we fin 2908 * Asynchronous readahead happens when we find the page and PG_readahead, 3208 * so we want to possibly extend the readahea 2909 * so we want to possibly extend the readahead further. We return the file that 3209 * was pinned if we have to drop the mmap_loc 2910 * was pinned if we have to drop the mmap_lock in order to do IO. 3210 */ 2911 */ 3211 static struct file *do_async_mmap_readahead(s 2912 static struct file *do_async_mmap_readahead(struct vm_fault *vmf, 3212 s !! 2913 struct page *page) 3213 { 2914 { 3214 struct file *file = vmf->vma->vm_file 2915 struct file *file = vmf->vma->vm_file; 3215 struct file_ra_state *ra = &file->f_r 2916 struct file_ra_state *ra = &file->f_ra; 3216 DEFINE_READAHEAD(ractl, file, ra, fil !! 2917 struct address_space *mapping = file->f_mapping; 3217 struct file *fpin = NULL; 2918 struct file *fpin = NULL; 3218 unsigned int mmap_miss; 2919 unsigned int mmap_miss; >> 2920 pgoff_t offset = vmf->pgoff; 3219 2921 3220 /* If we don't want any read-ahead, d 2922 /* If we don't want any read-ahead, don't bother */ 3221 if (vmf->vma->vm_flags & VM_RAND_READ 2923 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) 3222 return fpin; 2924 return fpin; 3223 << 3224 mmap_miss = READ_ONCE(ra->mmap_miss); 2925 mmap_miss = READ_ONCE(ra->mmap_miss); 3225 if (mmap_miss) 2926 if (mmap_miss) 3226 WRITE_ONCE(ra->mmap_miss, --m 2927 WRITE_ONCE(ra->mmap_miss, --mmap_miss); 3227 !! 2928 if (PageReadahead(page)) { 3228 if (folio_test_readahead(folio)) { << 3229 fpin = maybe_unlock_mmap_for_ 2929 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3230 page_cache_async_ra(&ractl, f !! 2930 page_cache_async_readahead(mapping, ra, file, >> 2931 page, offset, ra->ra_pages); 3231 } 2932 } 3232 return fpin; 2933 return fpin; 3233 } 2934 } 3234 2935 3235 static vm_fault_t filemap_fault_recheck_pte_n << 3236 { << 3237 struct vm_area_struct *vma = vmf->vma << 3238 vm_fault_t ret = 0; << 3239 pte_t *ptep; << 3240 << 3241 /* << 3242 * We might have COW'ed a pagecache f << 3243 * anon folio mapped. The original pa << 3244 * might have been evicted. During a << 3245 * the PTE, such as done in do_numa_p << 3246 * temporarily clear the PTE under PT << 3247 * "none" when not holding the PT loc << 3248 * << 3249 * Not rechecking the PTE under PT lo << 3250 * major fault in an mlock'ed region. << 3251 * scenario while holding the PT lock << 3252 * scenarios. Recheck the PTE without << 3253 * the number of times we hold PT loc << 3254 */ << 3255 if (!(vma->vm_flags & VM_LOCKED)) << 3256 return 0; << 3257 << 3258 if (!(vmf->flags & FAULT_FLAG_ORIG_PT << 3259 return 0; << 3260 << 3261 ptep = pte_offset_map_nolock(vma->vm_ << 3262 &vmf->pt << 3263 if (unlikely(!ptep)) << 3264 return VM_FAULT_NOPAGE; << 3265 << 3266 if (unlikely(!pte_none(ptep_get_lockl << 3267 ret = VM_FAULT_NOPAGE; << 3268 } else { << 3269 spin_lock(vmf->ptl); << 3270 if (unlikely(!pte_none(ptep_g << 3271 ret = VM_FAULT_NOPAGE << 3272 spin_unlock(vmf->ptl); << 3273 } << 3274 pte_unmap(ptep); << 3275 return ret; << 3276 } << 3277 << 3278 /** 2936 /** 3279 * filemap_fault - read in file data for page 2937 * filemap_fault - read in file data for page fault handling 3280 * @vmf: struct vm_fault containing de 2938 * @vmf: struct vm_fault containing details of the fault 3281 * 2939 * 3282 * filemap_fault() is invoked via the vma ope 2940 * filemap_fault() is invoked via the vma operations vector for a 3283 * mapped memory region to read in file data 2941 * mapped memory region to read in file data during a page fault. 3284 * 2942 * 3285 * The goto's are kind of ugly, but this stre 2943 * The goto's are kind of ugly, but this streamlines the normal case of having 3286 * it in the page cache, and handles the spec 2944 * it in the page cache, and handles the special cases reasonably without 3287 * having a lot of duplicated code. 2945 * having a lot of duplicated code. 3288 * 2946 * 3289 * vma->vm_mm->mmap_lock must be held on entr 2947 * vma->vm_mm->mmap_lock must be held on entry. 3290 * 2948 * 3291 * If our return value has VM_FAULT_RETRY set 2949 * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock 3292 * may be dropped before doing I/O or by lock !! 2950 * may be dropped before doing I/O or by lock_page_maybe_drop_mmap(). 3293 * 2951 * 3294 * If our return value does not have VM_FAULT 2952 * If our return value does not have VM_FAULT_RETRY set, the mmap_lock 3295 * has not been released. 2953 * has not been released. 3296 * 2954 * 3297 * We never return with VM_FAULT_RETRY and a 2955 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set. 3298 * 2956 * 3299 * Return: bitwise-OR of %VM_FAULT_ codes. 2957 * Return: bitwise-OR of %VM_FAULT_ codes. 3300 */ 2958 */ 3301 vm_fault_t filemap_fault(struct vm_fault *vmf 2959 vm_fault_t filemap_fault(struct vm_fault *vmf) 3302 { 2960 { 3303 int error; 2961 int error; 3304 struct file *file = vmf->vma->vm_file 2962 struct file *file = vmf->vma->vm_file; 3305 struct file *fpin = NULL; 2963 struct file *fpin = NULL; 3306 struct address_space *mapping = file- 2964 struct address_space *mapping = file->f_mapping; 3307 struct inode *inode = mapping->host; 2965 struct inode *inode = mapping->host; 3308 pgoff_t max_idx, index = vmf->pgoff; !! 2966 pgoff_t offset = vmf->pgoff; 3309 struct folio *folio; !! 2967 pgoff_t max_off; >> 2968 struct page *page; 3310 vm_fault_t ret = 0; 2969 vm_fault_t ret = 0; 3311 bool mapping_locked = false; << 3312 2970 3313 max_idx = DIV_ROUND_UP(i_size_read(in !! 2971 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3314 if (unlikely(index >= max_idx)) !! 2972 if (unlikely(offset >= max_off)) 3315 return VM_FAULT_SIGBUS; 2973 return VM_FAULT_SIGBUS; 3316 2974 3317 trace_mm_filemap_fault(mapping, index << 3318 << 3319 /* 2975 /* 3320 * Do we have something in the page c 2976 * Do we have something in the page cache already? 3321 */ 2977 */ 3322 folio = filemap_get_folio(mapping, in !! 2978 page = find_get_page(mapping, offset); 3323 if (likely(!IS_ERR(folio))) { !! 2979 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { 3324 /* 2980 /* 3325 * We found the page, so try !! 2981 * We found the page, so try async readahead before 3326 * the lock. !! 2982 * waiting for the lock. 3327 */ 2983 */ 3328 if (!(vmf->flags & FAULT_FLAG !! 2984 fpin = do_async_mmap_readahead(vmf, page); 3329 fpin = do_async_mmap_ !! 2985 } else if (!page) { 3330 if (unlikely(!folio_test_upto << 3331 filemap_invalidate_lo << 3332 mapping_locked = true << 3333 } << 3334 } else { << 3335 ret = filemap_fault_recheck_p << 3336 if (unlikely(ret)) << 3337 return ret; << 3338 << 3339 /* No page in the page cache 2986 /* No page in the page cache at all */ 3340 count_vm_event(PGMAJFAULT); 2987 count_vm_event(PGMAJFAULT); 3341 count_memcg_event_mm(vmf->vma 2988 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); 3342 ret = VM_FAULT_MAJOR; 2989 ret = VM_FAULT_MAJOR; 3343 fpin = do_sync_mmap_readahead 2990 fpin = do_sync_mmap_readahead(vmf); 3344 retry_find: 2991 retry_find: 3345 /* !! 2992 page = pagecache_get_page(mapping, offset, 3346 * See comment in filemap_cre << 3347 * invalidate_lock << 3348 */ << 3349 if (!mapping_locked) { << 3350 filemap_invalidate_lo << 3351 mapping_locked = true << 3352 } << 3353 folio = __filemap_get_folio(m << 3354 FGP 2993 FGP_CREAT|FGP_FOR_MMAP, 3355 vmf 2994 vmf->gfp_mask); 3356 if (IS_ERR(folio)) { !! 2995 if (!page) { 3357 if (fpin) 2996 if (fpin) 3358 goto out_retr 2997 goto out_retry; 3359 filemap_invalidate_un << 3360 return VM_FAULT_OOM; 2998 return VM_FAULT_OOM; 3361 } 2999 } 3362 } 3000 } 3363 3001 3364 if (!lock_folio_maybe_drop_mmap(vmf, !! 3002 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) 3365 goto out_retry; 3003 goto out_retry; 3366 3004 3367 /* Did it get truncated? */ 3005 /* Did it get truncated? */ 3368 if (unlikely(folio->mapping != mappin !! 3006 if (unlikely(compound_head(page)->mapping != mapping)) { 3369 folio_unlock(folio); !! 3007 unlock_page(page); 3370 folio_put(folio); !! 3008 put_page(page); 3371 goto retry_find; 3009 goto retry_find; 3372 } 3010 } 3373 VM_BUG_ON_FOLIO(!folio_contains(folio !! 3011 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); 3374 3012 3375 /* 3013 /* 3376 * We have a locked folio in the page !! 3014 * We have a locked page in the page cache, now we need to check 3377 * that it's up-to-date. If not, it i !! 3015 * that it's up-to-date. If not, it is going to be due to an error. 3378 * or because readahead was otherwise << 3379 */ 3016 */ 3380 if (unlikely(!folio_test_uptodate(fol !! 3017 if (unlikely(!PageUptodate(page))) 3381 /* << 3382 * If the invalidate lock is << 3383 * and uptodate and now it is << 3384 * didn't hold the page lock << 3385 * everything, get the invali << 3386 */ << 3387 if (!mapping_locked) { << 3388 folio_unlock(folio); << 3389 folio_put(folio); << 3390 goto retry_find; << 3391 } << 3392 << 3393 /* << 3394 * OK, the folio is really no << 3395 * VMA has the VM_RAND_READ f << 3396 * arose. Let's read it in di << 3397 */ << 3398 goto page_not_uptodate; 3018 goto page_not_uptodate; 3399 } << 3400 3019 3401 /* 3020 /* 3402 * We've made it this far and we had 3021 * We've made it this far and we had to drop our mmap_lock, now is the 3403 * time to return to the upper layer 3022 * time to return to the upper layer and have it re-find the vma and 3404 * redo the fault. 3023 * redo the fault. 3405 */ 3024 */ 3406 if (fpin) { 3025 if (fpin) { 3407 folio_unlock(folio); !! 3026 unlock_page(page); 3408 goto out_retry; 3027 goto out_retry; 3409 } 3028 } 3410 if (mapping_locked) << 3411 filemap_invalidate_unlock_sha << 3412 3029 3413 /* 3030 /* 3414 * Found the page and have a referenc 3031 * Found the page and have a reference on it. 3415 * We must recheck i_size under page 3032 * We must recheck i_size under page lock. 3416 */ 3033 */ 3417 max_idx = DIV_ROUND_UP(i_size_read(in !! 3034 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 3418 if (unlikely(index >= max_idx)) { !! 3035 if (unlikely(offset >= max_off)) { 3419 folio_unlock(folio); !! 3036 unlock_page(page); 3420 folio_put(folio); !! 3037 put_page(page); 3421 return VM_FAULT_SIGBUS; 3038 return VM_FAULT_SIGBUS; 3422 } 3039 } 3423 3040 3424 vmf->page = folio_file_page(folio, in !! 3041 vmf->page = page; 3425 return ret | VM_FAULT_LOCKED; 3042 return ret | VM_FAULT_LOCKED; 3426 3043 3427 page_not_uptodate: 3044 page_not_uptodate: 3428 /* 3045 /* 3429 * Umm, take care of errors if the pa 3046 * Umm, take care of errors if the page isn't up-to-date. 3430 * Try to re-read it _once_. We do th 3047 * Try to re-read it _once_. We do this synchronously, 3431 * because there really aren't any pe 3048 * because there really aren't any performance issues here 3432 * and we need to check for errors. 3049 * and we need to check for errors. 3433 */ 3050 */ 3434 fpin = maybe_unlock_mmap_for_io(vmf, 3051 fpin = maybe_unlock_mmap_for_io(vmf, fpin); 3435 error = filemap_read_folio(file, mapp !! 3052 error = filemap_read_page(file, mapping, page); 3436 if (fpin) 3053 if (fpin) 3437 goto out_retry; 3054 goto out_retry; 3438 folio_put(folio); !! 3055 put_page(page); 3439 3056 3440 if (!error || error == AOP_TRUNCATED_ 3057 if (!error || error == AOP_TRUNCATED_PAGE) 3441 goto retry_find; 3058 goto retry_find; 3442 filemap_invalidate_unlock_shared(mapp << 3443 3059 3444 return VM_FAULT_SIGBUS; 3060 return VM_FAULT_SIGBUS; 3445 3061 3446 out_retry: 3062 out_retry: 3447 /* 3063 /* 3448 * We dropped the mmap_lock, we need 3064 * We dropped the mmap_lock, we need to return to the fault handler to 3449 * re-find the vma and come back and 3065 * re-find the vma and come back and find our hopefully still populated 3450 * page. 3066 * page. 3451 */ 3067 */ 3452 if (!IS_ERR(folio)) !! 3068 if (page) 3453 folio_put(folio); !! 3069 put_page(page); 3454 if (mapping_locked) << 3455 filemap_invalidate_unlock_sha << 3456 if (fpin) 3070 if (fpin) 3457 fput(fpin); 3071 fput(fpin); 3458 return ret | VM_FAULT_RETRY; 3072 return ret | VM_FAULT_RETRY; 3459 } 3073 } 3460 EXPORT_SYMBOL(filemap_fault); 3074 EXPORT_SYMBOL(filemap_fault); 3461 3075 3462 static bool filemap_map_pmd(struct vm_fault * !! 3076 static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) 3463 pgoff_t start) << 3464 { 3077 { 3465 struct mm_struct *mm = vmf->vma->vm_m 3078 struct mm_struct *mm = vmf->vma->vm_mm; 3466 3079 3467 /* Huge page is mapped? No need to pr 3080 /* Huge page is mapped? No need to proceed. */ 3468 if (pmd_trans_huge(*vmf->pmd)) { 3081 if (pmd_trans_huge(*vmf->pmd)) { 3469 folio_unlock(folio); !! 3082 unlock_page(page); 3470 folio_put(folio); !! 3083 put_page(page); 3471 return true; 3084 return true; 3472 } 3085 } 3473 3086 3474 if (pmd_none(*vmf->pmd) && folio_test !! 3087 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) { 3475 struct page *page = folio_fil !! 3088 vm_fault_t ret = do_set_pmd(vmf, page); 3476 vm_fault_t ret = do_set_pmd(v !! 3089 if (!ret) { 3477 if (!ret) { !! 3090 /* The page is mapped successfully, reference consumed. */ 3478 /* The page is mapped !! 3091 unlock_page(page); 3479 folio_unlock(folio); !! 3092 return true; 3480 return true; !! 3093 } >> 3094 } >> 3095 >> 3096 if (pmd_none(*vmf->pmd)) { >> 3097 vmf->ptl = pmd_lock(mm, vmf->pmd); >> 3098 if (likely(pmd_none(*vmf->pmd))) { >> 3099 mm_inc_nr_ptes(mm); >> 3100 pmd_populate(mm, vmf->pmd, vmf->prealloc_pte); >> 3101 vmf->prealloc_pte = NULL; 3481 } 3102 } >> 3103 spin_unlock(vmf->ptl); 3482 } 3104 } 3483 3105 3484 if (pmd_none(*vmf->pmd) && vmf->preal !! 3106 /* See comment in handle_pte_fault() */ 3485 pmd_install(mm, vmf->pmd, &vm !! 3107 if (pmd_devmap_trans_unstable(vmf->pmd)) { >> 3108 unlock_page(page); >> 3109 put_page(page); >> 3110 return true; >> 3111 } 3486 3112 3487 return false; 3113 return false; 3488 } 3114 } 3489 3115 3490 static struct folio *next_uptodate_folio(stru !! 3116 static struct page *next_uptodate_page(struct page *page, 3491 struct address_space *mapping !! 3117 struct address_space *mapping, >> 3118 struct xa_state *xas, pgoff_t end_pgoff) 3492 { 3119 { 3493 struct folio *folio = xas_next_entry( << 3494 unsigned long max_idx; 3120 unsigned long max_idx; 3495 3121 3496 do { 3122 do { 3497 if (!folio) !! 3123 if (!page) 3498 return NULL; 3124 return NULL; 3499 if (xas_retry(xas, folio)) !! 3125 if (xas_retry(xas, page)) 3500 continue; 3126 continue; 3501 if (xa_is_value(folio)) !! 3127 if (xa_is_value(page)) 3502 continue; 3128 continue; 3503 if (folio_test_locked(folio)) !! 3129 if (PageLocked(page)) 3504 continue; 3130 continue; 3505 if (!folio_try_get(folio)) !! 3131 if (!page_cache_get_speculative(page)) 3506 continue; 3132 continue; 3507 /* Has the page moved or been 3133 /* Has the page moved or been split? */ 3508 if (unlikely(folio != xas_rel !! 3134 if (unlikely(page != xas_reload(xas))) >> 3135 goto skip; >> 3136 if (!PageUptodate(page) || PageReadahead(page)) 3509 goto skip; 3137 goto skip; 3510 if (!folio_test_uptodate(foli !! 3138 if (PageHWPoison(page)) 3511 goto skip; 3139 goto skip; 3512 if (!folio_trylock(folio)) !! 3140 if (!trylock_page(page)) 3513 goto skip; 3141 goto skip; 3514 if (folio->mapping != mapping !! 3142 if (page->mapping != mapping) 3515 goto unlock; 3143 goto unlock; 3516 if (!folio_test_uptodate(foli !! 3144 if (!PageUptodate(page)) 3517 goto unlock; 3145 goto unlock; 3518 max_idx = DIV_ROUND_UP(i_size 3146 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); 3519 if (xas->xa_index >= max_idx) 3147 if (xas->xa_index >= max_idx) 3520 goto unlock; 3148 goto unlock; 3521 return folio; !! 3149 return page; 3522 unlock: 3150 unlock: 3523 folio_unlock(folio); !! 3151 unlock_page(page); 3524 skip: 3152 skip: 3525 folio_put(folio); !! 3153 put_page(page); 3526 } while ((folio = xas_next_entry(xas, !! 3154 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL); 3527 3155 3528 return NULL; 3156 return NULL; 3529 } 3157 } 3530 3158 3531 /* !! 3159 static inline struct page *first_map_page(struct address_space *mapping, 3532 * Map page range [start_page, start_page + n !! 3160 struct xa_state *xas, 3533 * start_page is gotten from start by folio_p !! 3161 pgoff_t end_pgoff) 3534 */ << 3535 static vm_fault_t filemap_map_folio_range(str << 3536 struct folio *folio, << 3537 unsigned long addr, u << 3538 unsigned long *rss, u << 3539 { 3162 { 3540 vm_fault_t ret = 0; !! 3163 return next_uptodate_page(xas_find(xas, end_pgoff), 3541 struct page *page = folio_page(folio, !! 3164 mapping, xas, end_pgoff); 3542 unsigned int count = 0; << 3543 pte_t *old_ptep = vmf->pte; << 3544 << 3545 do { << 3546 if (PageHWPoison(page + count << 3547 goto skip; << 3548 << 3549 /* << 3550 * If there are too many foli << 3551 * in a file, they will proba << 3552 * In such situation, read-ah << 3553 * Don't decrease mmap_miss i << 3554 * we can stop read-ahead. << 3555 */ << 3556 if (!folio_test_workingset(fo << 3557 (*mmap_miss)++; << 3558 << 3559 /* << 3560 * NOTE: If there're PTE mark << 3561 * handled in the specific fa << 3562 * fault-around logic. << 3563 */ << 3564 if (!pte_none(ptep_get(&vmf-> << 3565 goto skip; << 3566 << 3567 count++; << 3568 continue; << 3569 skip: << 3570 if (count) { << 3571 set_pte_range(vmf, fo << 3572 *rss += count; << 3573 folio_ref_add(folio, << 3574 if (in_range(vmf->add << 3575 ret = VM_FAUL << 3576 } << 3577 << 3578 count++; << 3579 page += count; << 3580 vmf->pte += count; << 3581 addr += count * PAGE_SIZE; << 3582 count = 0; << 3583 } while (--nr_pages > 0); << 3584 << 3585 if (count) { << 3586 set_pte_range(vmf, folio, pag << 3587 *rss += count; << 3588 folio_ref_add(folio, count); << 3589 if (in_range(vmf->address, ad << 3590 ret = VM_FAULT_NOPAGE << 3591 } << 3592 << 3593 vmf->pte = old_ptep; << 3594 << 3595 return ret; << 3596 } 3165 } 3597 3166 3598 static vm_fault_t filemap_map_order0_folio(st !! 3167 static inline struct page *next_map_page(struct address_space *mapping, 3599 struct folio *folio, unsigned !! 3168 struct xa_state *xas, 3600 unsigned long *rss, unsigned !! 3169 pgoff_t end_pgoff) 3601 { 3170 { 3602 vm_fault_t ret = 0; !! 3171 return next_uptodate_page(xas_next_entry(xas, end_pgoff), 3603 struct page *page = &folio->page; !! 3172 mapping, xas, end_pgoff); 3604 << 3605 if (PageHWPoison(page)) << 3606 return ret; << 3607 << 3608 /* See comment of filemap_map_folio_r << 3609 if (!folio_test_workingset(folio)) << 3610 (*mmap_miss)++; << 3611 << 3612 /* << 3613 * NOTE: If there're PTE markers, we' << 3614 * handled in the specific fault path << 3615 * the fault-around logic. << 3616 */ << 3617 if (!pte_none(ptep_get(vmf->pte))) << 3618 return ret; << 3619 << 3620 if (vmf->address == addr) << 3621 ret = VM_FAULT_NOPAGE; << 3622 << 3623 set_pte_range(vmf, folio, page, 1, ad << 3624 (*rss)++; << 3625 folio_ref_inc(folio); << 3626 << 3627 return ret; << 3628 } 3173 } 3629 3174 3630 vm_fault_t filemap_map_pages(struct vm_fault 3175 vm_fault_t filemap_map_pages(struct vm_fault *vmf, 3631 pgoff_t start_pg 3176 pgoff_t start_pgoff, pgoff_t end_pgoff) 3632 { 3177 { 3633 struct vm_area_struct *vma = vmf->vma 3178 struct vm_area_struct *vma = vmf->vma; 3634 struct file *file = vma->vm_file; 3179 struct file *file = vma->vm_file; 3635 struct address_space *mapping = file- 3180 struct address_space *mapping = file->f_mapping; 3636 pgoff_t file_end, last_pgoff = start_ !! 3181 pgoff_t last_pgoff = start_pgoff; 3637 unsigned long addr; 3182 unsigned long addr; 3638 XA_STATE(xas, &mapping->i_pages, star 3183 XA_STATE(xas, &mapping->i_pages, start_pgoff); 3639 struct folio *folio; !! 3184 struct page *head, *page; >> 3185 unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); 3640 vm_fault_t ret = 0; 3186 vm_fault_t ret = 0; 3641 unsigned long rss = 0; << 3642 unsigned int nr_pages = 0, mmap_miss << 3643 3187 3644 rcu_read_lock(); 3188 rcu_read_lock(); 3645 folio = next_uptodate_folio(&xas, map !! 3189 head = first_map_page(mapping, &xas, end_pgoff); 3646 if (!folio) !! 3190 if (!head) 3647 goto out; 3191 goto out; 3648 3192 3649 if (filemap_map_pmd(vmf, folio, start !! 3193 if (filemap_map_pmd(vmf, head)) { 3650 ret = VM_FAULT_NOPAGE; 3194 ret = VM_FAULT_NOPAGE; 3651 goto out; 3195 goto out; 3652 } 3196 } 3653 3197 3654 addr = vma->vm_start + ((start_pgoff 3198 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); 3655 vmf->pte = pte_offset_map_lock(vma->v 3199 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); 3656 if (!vmf->pte) { << 3657 folio_unlock(folio); << 3658 folio_put(folio); << 3659 goto out; << 3660 } << 3661 << 3662 file_end = DIV_ROUND_UP(i_size_read(m << 3663 if (end_pgoff > file_end) << 3664 end_pgoff = file_end; << 3665 << 3666 folio_type = mm_counter_file(folio); << 3667 do { 3200 do { 3668 unsigned long end; !! 3201 page = find_subpage(head, xas.xa_index); >> 3202 if (PageHWPoison(page)) >> 3203 goto unlock; >> 3204 >> 3205 if (mmap_miss > 0) >> 3206 mmap_miss--; 3669 3207 3670 addr += (xas.xa_index - last_ 3208 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; 3671 vmf->pte += xas.xa_index - la 3209 vmf->pte += xas.xa_index - last_pgoff; 3672 last_pgoff = xas.xa_index; 3210 last_pgoff = xas.xa_index; 3673 end = folio_next_index(folio) << 3674 nr_pages = min(end, end_pgoff << 3675 3211 3676 if (!folio_test_large(folio)) !! 3212 if (!pte_none(*vmf->pte)) 3677 ret |= filemap_map_or !! 3213 goto unlock; 3678 folio !! 3214 3679 else !! 3215 /* We're about to handle the fault */ 3680 ret |= filemap_map_fo !! 3216 if (vmf->address == addr) 3681 xas.x !! 3217 ret = VM_FAULT_NOPAGE; 3682 nr_pa !! 3218 3683 !! 3219 do_set_pte(vmf, page, addr); 3684 folio_unlock(folio); !! 3220 /* no need to invalidate: a not-present page won't be cached */ 3685 folio_put(folio); !! 3221 update_mmu_cache(vma, addr, vmf->pte); 3686 } while ((folio = next_uptodate_folio !! 3222 unlock_page(head); 3687 add_mm_counter(vma->vm_mm, folio_type !! 3223 continue; >> 3224 unlock: >> 3225 unlock_page(head); >> 3226 put_page(head); >> 3227 } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL); 3688 pte_unmap_unlock(vmf->pte, vmf->ptl); 3228 pte_unmap_unlock(vmf->pte, vmf->ptl); 3689 trace_mm_filemap_map_pages(mapping, s << 3690 out: 3229 out: 3691 rcu_read_unlock(); 3230 rcu_read_unlock(); 3692 !! 3231 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss); 3693 mmap_miss_saved = READ_ONCE(file->f_r << 3694 if (mmap_miss >= mmap_miss_saved) << 3695 WRITE_ONCE(file->f_ra.mmap_mi << 3696 else << 3697 WRITE_ONCE(file->f_ra.mmap_mi << 3698 << 3699 return ret; 3232 return ret; 3700 } 3233 } 3701 EXPORT_SYMBOL(filemap_map_pages); 3234 EXPORT_SYMBOL(filemap_map_pages); 3702 3235 3703 vm_fault_t filemap_page_mkwrite(struct vm_fau 3236 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3704 { 3237 { 3705 struct address_space *mapping = vmf-> 3238 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 3706 struct folio *folio = page_folio(vmf- !! 3239 struct page *page = vmf->page; 3707 vm_fault_t ret = VM_FAULT_LOCKED; 3240 vm_fault_t ret = VM_FAULT_LOCKED; 3708 3241 3709 sb_start_pagefault(mapping->host->i_s 3242 sb_start_pagefault(mapping->host->i_sb); 3710 file_update_time(vmf->vma->vm_file); 3243 file_update_time(vmf->vma->vm_file); 3711 folio_lock(folio); !! 3244 lock_page(page); 3712 if (folio->mapping != mapping) { !! 3245 if (page->mapping != mapping) { 3713 folio_unlock(folio); !! 3246 unlock_page(page); 3714 ret = VM_FAULT_NOPAGE; 3247 ret = VM_FAULT_NOPAGE; 3715 goto out; 3248 goto out; 3716 } 3249 } 3717 /* 3250 /* 3718 * We mark the folio dirty already he !! 3251 * We mark the page dirty already here so that when freeze is in 3719 * progress, we are guaranteed that w 3252 * progress, we are guaranteed that writeback during freezing will 3720 * see the dirty folio and writeprote !! 3253 * see the dirty page and writeprotect it again. 3721 */ 3254 */ 3722 folio_mark_dirty(folio); !! 3255 set_page_dirty(page); 3723 folio_wait_stable(folio); !! 3256 wait_for_stable_page(page); 3724 out: 3257 out: 3725 sb_end_pagefault(mapping->host->i_sb) 3258 sb_end_pagefault(mapping->host->i_sb); 3726 return ret; 3259 return ret; 3727 } 3260 } 3728 3261 3729 const struct vm_operations_struct generic_fil 3262 const struct vm_operations_struct generic_file_vm_ops = { 3730 .fault = filemap_fault, 3263 .fault = filemap_fault, 3731 .map_pages = filemap_map_pages, 3264 .map_pages = filemap_map_pages, 3732 .page_mkwrite = filemap_page_mkwrit 3265 .page_mkwrite = filemap_page_mkwrite, 3733 }; 3266 }; 3734 3267 3735 /* This is used for a general mmap of a disk 3268 /* This is used for a general mmap of a disk file */ 3736 3269 3737 int generic_file_mmap(struct file *file, stru 3270 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3738 { 3271 { 3739 struct address_space *mapping = file- 3272 struct address_space *mapping = file->f_mapping; 3740 3273 3741 if (!mapping->a_ops->read_folio) !! 3274 if (!mapping->a_ops->readpage) 3742 return -ENOEXEC; 3275 return -ENOEXEC; 3743 file_accessed(file); 3276 file_accessed(file); 3744 vma->vm_ops = &generic_file_vm_ops; 3277 vma->vm_ops = &generic_file_vm_ops; 3745 return 0; 3278 return 0; 3746 } 3279 } 3747 3280 3748 /* 3281 /* 3749 * This is for filesystems which do not imple 3282 * This is for filesystems which do not implement ->writepage. 3750 */ 3283 */ 3751 int generic_file_readonly_mmap(struct file *f 3284 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3752 { 3285 { 3753 if (vma_is_shared_maywrite(vma)) !! 3286 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) 3754 return -EINVAL; 3287 return -EINVAL; 3755 return generic_file_mmap(file, vma); 3288 return generic_file_mmap(file, vma); 3756 } 3289 } 3757 #else 3290 #else 3758 vm_fault_t filemap_page_mkwrite(struct vm_fau 3291 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf) 3759 { 3292 { 3760 return VM_FAULT_SIGBUS; 3293 return VM_FAULT_SIGBUS; 3761 } 3294 } 3762 int generic_file_mmap(struct file *file, stru 3295 int generic_file_mmap(struct file *file, struct vm_area_struct *vma) 3763 { 3296 { 3764 return -ENOSYS; 3297 return -ENOSYS; 3765 } 3298 } 3766 int generic_file_readonly_mmap(struct file *f 3299 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) 3767 { 3300 { 3768 return -ENOSYS; 3301 return -ENOSYS; 3769 } 3302 } 3770 #endif /* CONFIG_MMU */ 3303 #endif /* CONFIG_MMU */ 3771 3304 3772 EXPORT_SYMBOL(filemap_page_mkwrite); 3305 EXPORT_SYMBOL(filemap_page_mkwrite); 3773 EXPORT_SYMBOL(generic_file_mmap); 3306 EXPORT_SYMBOL(generic_file_mmap); 3774 EXPORT_SYMBOL(generic_file_readonly_mmap); 3307 EXPORT_SYMBOL(generic_file_readonly_mmap); 3775 3308 3776 static struct folio *do_read_cache_folio(stru !! 3309 static struct page *wait_on_page_read(struct page *page) 3777 pgoff_t index, filler_t fille << 3778 { 3310 { 3779 struct folio *folio; !! 3311 if (!IS_ERR(page)) { 3780 int err; !! 3312 wait_on_page_locked(page); >> 3313 if (!PageUptodate(page)) { >> 3314 put_page(page); >> 3315 page = ERR_PTR(-EIO); >> 3316 } >> 3317 } >> 3318 return page; >> 3319 } 3781 3320 3782 if (!filler) !! 3321 static struct page *do_read_cache_page(struct address_space *mapping, 3783 filler = mapping->a_ops->read !! 3322 pgoff_t index, >> 3323 int (*filler)(void *, struct page *), >> 3324 void *data, >> 3325 gfp_t gfp) >> 3326 { >> 3327 struct page *page; >> 3328 int err; 3784 repeat: 3329 repeat: 3785 folio = filemap_get_folio(mapping, in !! 3330 page = find_get_page(mapping, index); 3786 if (IS_ERR(folio)) { !! 3331 if (!page) { 3787 folio = filemap_alloc_folio(g !! 3332 page = __page_cache_alloc(gfp); 3788 m !! 3333 if (!page) 3789 if (!folio) << 3790 return ERR_PTR(-ENOME 3334 return ERR_PTR(-ENOMEM); 3791 index = mapping_align_index(m !! 3335 err = add_to_page_cache_lru(page, mapping, index, gfp); 3792 err = filemap_add_folio(mappi << 3793 if (unlikely(err)) { 3336 if (unlikely(err)) { 3794 folio_put(folio); !! 3337 put_page(page); 3795 if (err == -EEXIST) 3338 if (err == -EEXIST) 3796 goto repeat; 3339 goto repeat; 3797 /* Presumably ENOMEM 3340 /* Presumably ENOMEM for xarray node */ 3798 return ERR_PTR(err); 3341 return ERR_PTR(err); 3799 } 3342 } 3800 3343 3801 goto filler; !! 3344 filler: >> 3345 if (filler) >> 3346 err = filler(data, page); >> 3347 else >> 3348 err = mapping->a_ops->readpage(data, page); >> 3349 >> 3350 if (err < 0) { >> 3351 put_page(page); >> 3352 return ERR_PTR(err); >> 3353 } >> 3354 >> 3355 page = wait_on_page_read(page); >> 3356 if (IS_ERR(page)) >> 3357 return page; >> 3358 goto out; 3802 } 3359 } 3803 if (folio_test_uptodate(folio)) !! 3360 if (PageUptodate(page)) 3804 goto out; 3361 goto out; 3805 3362 3806 if (!folio_trylock(folio)) { !! 3363 /* 3807 folio_put_wait_locked(folio, !! 3364 * Page is not up to date and may be locked due to one of the following 3808 goto repeat; !! 3365 * case a: Page is being filled and the page lock is held 3809 } !! 3366 * case b: Read/write error clearing the page uptodate status >> 3367 * case c: Truncation in progress (page locked) >> 3368 * case d: Reclaim in progress >> 3369 * >> 3370 * Case a, the page will be up to date when the page is unlocked. >> 3371 * There is no need to serialise on the page lock here as the page >> 3372 * is pinned so the lock gives no additional protection. Even if the >> 3373 * page is truncated, the data is still valid if PageUptodate as >> 3374 * it's a race vs truncate race. >> 3375 * Case b, the page will not be up to date >> 3376 * Case c, the page may be truncated but in itself, the data may still >> 3377 * be valid after IO completes as it's a read vs truncate race. The >> 3378 * operation must restart if the page is not uptodate on unlock but >> 3379 * otherwise serialising on page lock to stabilise the mapping gives >> 3380 * no additional guarantees to the caller as the page lock is >> 3381 * released before return. >> 3382 * Case d, similar to truncation. If reclaim holds the page lock, it >> 3383 * will be a race with remove_mapping that determines if the mapping >> 3384 * is valid on unlock but otherwise the data is valid and there is >> 3385 * no need to serialise with page lock. >> 3386 * >> 3387 * As the page lock gives no additional guarantee, we optimistically >> 3388 * wait on the page to be unlocked and check if it's up to date and >> 3389 * use the page if it is. Otherwise, the page lock is required to >> 3390 * distinguish between the different cases. The motivation is that we >> 3391 * avoid spurious serialisations and wakeups when multiple processes >> 3392 * wait on the same page for IO to complete. >> 3393 */ >> 3394 wait_on_page_locked(page); >> 3395 if (PageUptodate(page)) >> 3396 goto out; >> 3397 >> 3398 /* Distinguish between all the cases under the safety of the lock */ >> 3399 lock_page(page); 3810 3400 3811 /* Folio was truncated from mapping * !! 3401 /* Case c or d, restart the operation */ 3812 if (!folio->mapping) { !! 3402 if (!page->mapping) { 3813 folio_unlock(folio); !! 3403 unlock_page(page); 3814 folio_put(folio); !! 3404 put_page(page); 3815 goto repeat; 3405 goto repeat; 3816 } 3406 } 3817 3407 3818 /* Someone else locked and filled the 3408 /* Someone else locked and filled the page in a very small window */ 3819 if (folio_test_uptodate(folio)) { !! 3409 if (PageUptodate(page)) { 3820 folio_unlock(folio); !! 3410 unlock_page(page); 3821 goto out; 3411 goto out; 3822 } 3412 } 3823 3413 3824 filler: !! 3414 /* 3825 err = filemap_read_folio(file, filler !! 3415 * A previous I/O error may have been due to temporary 3826 if (err) { !! 3416 * failures. 3827 folio_put(folio); !! 3417 * Clear page error before actual read, PG_error will be 3828 if (err == AOP_TRUNCATED_PAGE !! 3418 * set again if read page fails. 3829 goto repeat; !! 3419 */ 3830 return ERR_PTR(err); !! 3420 ClearPageError(page); 3831 } !! 3421 goto filler; 3832 3422 3833 out: 3423 out: 3834 folio_mark_accessed(folio); !! 3424 mark_page_accessed(page); 3835 return folio; !! 3425 return page; 3836 } << 3837 << 3838 /** << 3839 * read_cache_folio - Read into page cache, f << 3840 * @mapping: The address_space to read from. << 3841 * @index: The index to read. << 3842 * @filler: Function to perform the read, or << 3843 * @file: Passed to filler function, may be N << 3844 * << 3845 * Read one page into the page cache. If it << 3846 * will contain @index, but it may not be the << 3847 * << 3848 * If the filler function returns an error, i << 3849 * caller. << 3850 * << 3851 * Context: May sleep. Expects mapping->inva << 3852 * Return: An uptodate folio on success, ERR_ << 3853 */ << 3854 struct folio *read_cache_folio(struct address << 3855 filler_t filler, struct file << 3856 { << 3857 return do_read_cache_folio(mapping, i << 3858 mapping_gfp_mask(mapp << 3859 } 3426 } 3860 EXPORT_SYMBOL(read_cache_folio); << 3861 3427 3862 /** 3428 /** 3863 * mapping_read_folio_gfp - Read into page ca !! 3429 * read_cache_page - read into page cache, fill it if needed 3864 * @mapping: The address_space for the fol !! 3430 * @mapping: the page's address_space 3865 * @index: The index that the allocated !! 3431 * @index: the page index 3866 * @gfp: The page allocator flags to u !! 3432 * @filler: function to perform the read 3867 * !! 3433 * @data: first arg to filler(data, page) function, often left as NULL 3868 * This is the same as "read_cache_folio(mapp << 3869 * any new memory allocations done using the << 3870 * 3434 * 3871 * The most likely error from this function i !! 3435 * Read into the page cache. If a page already exists, and PageUptodate() is 3872 * possible and so is EINTR. If ->read_folio !! 3436 * not set, try to fill the page and wait for it to become unlocked. 3873 * that will be returned to the caller. << 3874 * 3437 * 3875 * The function expects mapping->invalidate_l !! 3438 * If the page does not get brought uptodate, return -EIO. 3876 * 3439 * 3877 * Return: Uptodate folio on success, ERR_PTR !! 3440 * Return: up to date page on success, ERR_PTR() on failure. 3878 */ 3441 */ 3879 struct folio *mapping_read_folio_gfp(struct a << 3880 pgoff_t index, gfp_t gfp) << 3881 { << 3882 return do_read_cache_folio(mapping, i << 3883 } << 3884 EXPORT_SYMBOL(mapping_read_folio_gfp); << 3885 << 3886 static struct page *do_read_cache_page(struct << 3887 pgoff_t index, filler_t *fill << 3888 { << 3889 struct folio *folio; << 3890 << 3891 folio = do_read_cache_folio(mapping, << 3892 if (IS_ERR(folio)) << 3893 return &folio->page; << 3894 return folio_file_page(folio, index); << 3895 } << 3896 << 3897 struct page *read_cache_page(struct address_s 3442 struct page *read_cache_page(struct address_space *mapping, 3898 pgoff_t index, filler !! 3443 pgoff_t index, >> 3444 int (*filler)(void *, struct page *), >> 3445 void *data) 3899 { 3446 { 3900 return do_read_cache_page(mapping, in !! 3447 return do_read_cache_page(mapping, index, filler, data, 3901 mapping_gfp_mask(mapp 3448 mapping_gfp_mask(mapping)); 3902 } 3449 } 3903 EXPORT_SYMBOL(read_cache_page); 3450 EXPORT_SYMBOL(read_cache_page); 3904 3451 3905 /** 3452 /** 3906 * read_cache_page_gfp - read into page cache 3453 * read_cache_page_gfp - read into page cache, using specified page allocation flags. 3907 * @mapping: the page's address_space 3454 * @mapping: the page's address_space 3908 * @index: the page index 3455 * @index: the page index 3909 * @gfp: the page allocator flags to u 3456 * @gfp: the page allocator flags to use if allocating 3910 * 3457 * 3911 * This is the same as "read_mapping_page(map 3458 * This is the same as "read_mapping_page(mapping, index, NULL)", but with 3912 * any new page allocations done using the sp 3459 * any new page allocations done using the specified allocation flags. 3913 * 3460 * 3914 * If the page does not get brought uptodate, 3461 * If the page does not get brought uptodate, return -EIO. 3915 * 3462 * 3916 * The function expects mapping->invalidate_l << 3917 * << 3918 * Return: up to date page on success, ERR_PT 3463 * Return: up to date page on success, ERR_PTR() on failure. 3919 */ 3464 */ 3920 struct page *read_cache_page_gfp(struct addre 3465 struct page *read_cache_page_gfp(struct address_space *mapping, 3921 pgoff_t index 3466 pgoff_t index, 3922 gfp_t gfp) 3467 gfp_t gfp) 3923 { 3468 { 3924 return do_read_cache_page(mapping, in 3469 return do_read_cache_page(mapping, index, NULL, NULL, gfp); 3925 } 3470 } 3926 EXPORT_SYMBOL(read_cache_page_gfp); 3471 EXPORT_SYMBOL(read_cache_page_gfp); 3927 3472 >> 3473 int pagecache_write_begin(struct file *file, struct address_space *mapping, >> 3474 loff_t pos, unsigned len, unsigned flags, >> 3475 struct page **pagep, void **fsdata) >> 3476 { >> 3477 const struct address_space_operations *aops = mapping->a_ops; >> 3478 >> 3479 return aops->write_begin(file, mapping, pos, len, flags, >> 3480 pagep, fsdata); >> 3481 } >> 3482 EXPORT_SYMBOL(pagecache_write_begin); >> 3483 >> 3484 int pagecache_write_end(struct file *file, struct address_space *mapping, >> 3485 loff_t pos, unsigned len, unsigned copied, >> 3486 struct page *page, void *fsdata) >> 3487 { >> 3488 const struct address_space_operations *aops = mapping->a_ops; >> 3489 >> 3490 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); >> 3491 } >> 3492 EXPORT_SYMBOL(pagecache_write_end); >> 3493 3928 /* 3494 /* 3929 * Warn about a page cache invalidation failu 3495 * Warn about a page cache invalidation failure during a direct I/O write. 3930 */ 3496 */ 3931 static void dio_warn_stale_pagecache(struct f !! 3497 void dio_warn_stale_pagecache(struct file *filp) 3932 { 3498 { 3933 static DEFINE_RATELIMIT_STATE(_rs, 86 3499 static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); 3934 char pathname[128]; 3500 char pathname[128]; 3935 char *path; 3501 char *path; 3936 3502 3937 errseq_set(&filp->f_mapping->wb_err, 3503 errseq_set(&filp->f_mapping->wb_err, -EIO); 3938 if (__ratelimit(&_rs)) { 3504 if (__ratelimit(&_rs)) { 3939 path = file_path(filp, pathna 3505 path = file_path(filp, pathname, sizeof(pathname)); 3940 if (IS_ERR(path)) 3506 if (IS_ERR(path)) 3941 path = "(unknown)"; 3507 path = "(unknown)"; 3942 pr_crit("Page cache invalidat 3508 pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n"); 3943 pr_crit("File: %s PID: %d Com 3509 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, 3944 current->comm); 3510 current->comm); 3945 } 3511 } 3946 } 3512 } 3947 3513 3948 void kiocb_invalidate_post_direct_write(struc << 3949 { << 3950 struct address_space *mapping = iocb- << 3951 << 3952 if (mapping->nrpages && << 3953 invalidate_inode_pages2_range(map << 3954 iocb->ki_pos >> PAGE_ << 3955 (iocb->ki_pos + count << 3956 dio_warn_stale_pagecache(iocb << 3957 } << 3958 << 3959 ssize_t 3514 ssize_t 3960 generic_file_direct_write(struct kiocb *iocb, 3515 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) 3961 { 3516 { 3962 struct address_space *mapping = iocb- !! 3517 struct file *file = iocb->ki_filp; 3963 size_t write_len = iov_iter_count(fro !! 3518 struct address_space *mapping = file->f_mapping; 3964 ssize_t written; !! 3519 struct inode *inode = mapping->host; >> 3520 loff_t pos = iocb->ki_pos; >> 3521 ssize_t written; >> 3522 size_t write_len; >> 3523 pgoff_t end; >> 3524 >> 3525 write_len = iov_iter_count(from); >> 3526 end = (pos + write_len - 1) >> PAGE_SHIFT; >> 3527 >> 3528 if (iocb->ki_flags & IOCB_NOWAIT) { >> 3529 /* If there are pages to writeback, return */ >> 3530 if (filemap_range_has_page(file->f_mapping, pos, >> 3531 pos + write_len - 1)) >> 3532 return -EAGAIN; >> 3533 } else { >> 3534 written = filemap_write_and_wait_range(mapping, pos, >> 3535 pos + write_len - 1); >> 3536 if (written) >> 3537 goto out; >> 3538 } 3965 3539 3966 /* 3540 /* >> 3541 * After a write we want buffered reads to be sure to go to disk to get >> 3542 * the new data. We invalidate clean cached page from the region we're >> 3543 * about to write. We do this *before* the write so that we can return >> 3544 * without clobbering -EIOCBQUEUED from ->direct_IO(). >> 3545 */ >> 3546 written = invalidate_inode_pages2_range(mapping, >> 3547 pos >> PAGE_SHIFT, end); >> 3548 /* 3967 * If a page can not be invalidated, 3549 * If a page can not be invalidated, return 0 to fall back 3968 * to buffered write. 3550 * to buffered write. 3969 */ 3551 */ 3970 written = kiocb_invalidate_pages(iocb << 3971 if (written) { 3552 if (written) { 3972 if (written == -EBUSY) 3553 if (written == -EBUSY) 3973 return 0; 3554 return 0; 3974 return written; !! 3555 goto out; 3975 } 3556 } 3976 3557 3977 written = mapping->a_ops->direct_IO(i 3558 written = mapping->a_ops->direct_IO(iocb, from); 3978 3559 3979 /* 3560 /* 3980 * Finally, try again to invalidate c 3561 * Finally, try again to invalidate clean pages which might have been 3981 * cached by non-direct readahead, or 3562 * cached by non-direct readahead, or faulted in by get_user_pages() 3982 * if the source of the write was an 3563 * if the source of the write was an mmap'ed region of the file 3983 * we're writing. Either one is a pr 3564 * we're writing. Either one is a pretty crazy thing to do, 3984 * so we don't support it 100%. If t 3565 * so we don't support it 100%. If this invalidation 3985 * fails, tough, the write still work 3566 * fails, tough, the write still worked... 3986 * 3567 * 3987 * Most of the time we do not need th 3568 * Most of the time we do not need this since dio_complete() will do 3988 * the invalidation for us. However t 3569 * the invalidation for us. However there are some file systems that 3989 * do not end up with dio_complete() 3570 * do not end up with dio_complete() being called, so let's not break 3990 * them by removing it completely. 3571 * them by removing it completely. 3991 * 3572 * 3992 * Noticeable example is a blkdev_dir 3573 * Noticeable example is a blkdev_direct_IO(). 3993 * 3574 * 3994 * Skip invalidation for async writes 3575 * Skip invalidation for async writes or if mapping has no pages. 3995 */ 3576 */ 3996 if (written > 0) { !! 3577 if (written > 0 && mapping->nrpages && 3997 struct inode *inode = mapping !! 3578 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) 3998 loff_t pos = iocb->ki_pos; !! 3579 dio_warn_stale_pagecache(file); 3999 3580 4000 kiocb_invalidate_post_direct_ !! 3581 if (written > 0) { 4001 pos += written; 3582 pos += written; 4002 write_len -= written; 3583 write_len -= written; 4003 if (pos > i_size_read(inode) 3584 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { 4004 i_size_write(inode, p 3585 i_size_write(inode, pos); 4005 mark_inode_dirty(inod 3586 mark_inode_dirty(inode); 4006 } 3587 } 4007 iocb->ki_pos = pos; 3588 iocb->ki_pos = pos; 4008 } 3589 } 4009 if (written != -EIOCBQUEUED) 3590 if (written != -EIOCBQUEUED) 4010 iov_iter_revert(from, write_l 3591 iov_iter_revert(from, write_len - iov_iter_count(from)); >> 3592 out: 4011 return written; 3593 return written; 4012 } 3594 } 4013 EXPORT_SYMBOL(generic_file_direct_write); 3595 EXPORT_SYMBOL(generic_file_direct_write); 4014 3596 4015 ssize_t generic_perform_write(struct kiocb *i !! 3597 /* >> 3598 * Find or create a page at the given pagecache position. Return the locked >> 3599 * page. This function is specifically for buffered writes. >> 3600 */ >> 3601 struct page *grab_cache_page_write_begin(struct address_space *mapping, >> 3602 pgoff_t index, unsigned flags) >> 3603 { >> 3604 struct page *page; >> 3605 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT; >> 3606 >> 3607 if (flags & AOP_FLAG_NOFS) >> 3608 fgp_flags |= FGP_NOFS; >> 3609 >> 3610 page = pagecache_get_page(mapping, index, fgp_flags, >> 3611 mapping_gfp_mask(mapping)); >> 3612 if (page) >> 3613 wait_for_stable_page(page); >> 3614 >> 3615 return page; >> 3616 } >> 3617 EXPORT_SYMBOL(grab_cache_page_write_begin); >> 3618 >> 3619 ssize_t generic_perform_write(struct file *file, >> 3620 struct iov_iter *i, loff_t pos) 4016 { 3621 { 4017 struct file *file = iocb->ki_filp; << 4018 loff_t pos = iocb->ki_pos; << 4019 struct address_space *mapping = file- 3622 struct address_space *mapping = file->f_mapping; 4020 const struct address_space_operations 3623 const struct address_space_operations *a_ops = mapping->a_ops; 4021 size_t chunk = mapping_max_folio_size << 4022 long status = 0; 3624 long status = 0; 4023 ssize_t written = 0; 3625 ssize_t written = 0; >> 3626 unsigned int flags = 0; 4024 3627 4025 do { 3628 do { 4026 struct folio *folio; !! 3629 struct page *page; 4027 size_t offset; /* Of !! 3630 unsigned long offset; /* Offset into pagecache page */ 4028 size_t bytes; /* By !! 3631 unsigned long bytes; /* Bytes to write to page */ 4029 size_t copied; /* By 3632 size_t copied; /* Bytes copied from user */ 4030 void *fsdata = NULL; !! 3633 void *fsdata; 4031 3634 4032 bytes = iov_iter_count(i); !! 3635 offset = (pos & (PAGE_SIZE - 1)); 4033 retry: !! 3636 bytes = min_t(unsigned long, PAGE_SIZE - offset, 4034 offset = pos & (chunk - 1); !! 3637 iov_iter_count(i)); 4035 bytes = min(chunk - offset, b << 4036 balance_dirty_pages_ratelimit << 4037 3638 >> 3639 again: 4038 /* 3640 /* 4039 * Bring in the user page tha 3641 * Bring in the user page that we will copy from _first_. 4040 * Otherwise there's a nasty 3642 * Otherwise there's a nasty deadlock on copying from the 4041 * same page as we're writing 3643 * same page as we're writing to, without it being marked 4042 * up-to-date. 3644 * up-to-date. >> 3645 * >> 3646 * Not only is this an optimisation, but it is also required >> 3647 * to check that the address is actually valid, when atomic >> 3648 * usercopies are used, below. 4043 */ 3649 */ 4044 if (unlikely(fault_in_iov_ite !! 3650 if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 4045 status = -EFAULT; 3651 status = -EFAULT; 4046 break; 3652 break; 4047 } 3653 } 4048 3654 4049 if (fatal_signal_pending(curr 3655 if (fatal_signal_pending(current)) { 4050 status = -EINTR; 3656 status = -EINTR; 4051 break; 3657 break; 4052 } 3658 } 4053 3659 4054 status = a_ops->write_begin(f !! 3660 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 4055 !! 3661 &page, &fsdata); 4056 if (unlikely(status < 0)) 3662 if (unlikely(status < 0)) 4057 break; 3663 break; 4058 3664 4059 offset = offset_in_folio(foli << 4060 if (bytes > folio_size(folio) << 4061 bytes = folio_size(fo << 4062 << 4063 if (mapping_writably_mapped(m 3665 if (mapping_writably_mapped(mapping)) 4064 flush_dcache_folio(fo !! 3666 flush_dcache_page(page); 4065 3667 4066 copied = copy_folio_from_iter !! 3668 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 4067 flush_dcache_folio(folio); !! 3669 flush_dcache_page(page); 4068 3670 4069 status = a_ops->write_end(fil 3671 status = a_ops->write_end(file, mapping, pos, bytes, copied, 4070 !! 3672 page, fsdata); 4071 if (unlikely(status != copied !! 3673 if (unlikely(status < 0)) 4072 iov_iter_revert(i, co !! 3674 break; 4073 if (unlikely(status < !! 3675 copied = status; 4074 break; !! 3676 4075 } << 4076 cond_resched(); 3677 cond_resched(); 4077 3678 4078 if (unlikely(status == 0)) { !! 3679 iov_iter_advance(i, copied); >> 3680 if (unlikely(copied == 0)) { 4079 /* 3681 /* 4080 * A short copy made !! 3682 * If we were unable to copy any data at all, we must 4081 * thing entirely. M !! 3683 * fall back to a single segment length write. 4082 * halfway through, m !! 3684 * 4083 * might be severe me !! 3685 * If we didn't fallback here, we could livelock >> 3686 * because not all segments in the iov can be copied at >> 3687 * once without a pagefault. 4084 */ 3688 */ 4085 if (chunk > PAGE_SIZE !! 3689 bytes = min_t(unsigned long, PAGE_SIZE - offset, 4086 chunk /= 2; !! 3690 iov_iter_single_seg_count(i)); 4087 if (copied) { !! 3691 goto again; 4088 bytes = copie << 4089 goto retry; << 4090 } << 4091 } else { << 4092 pos += status; << 4093 written += status; << 4094 } 3692 } >> 3693 pos += copied; >> 3694 written += copied; >> 3695 >> 3696 balance_dirty_pages_ratelimited(mapping); 4095 } while (iov_iter_count(i)); 3697 } while (iov_iter_count(i)); 4096 3698 4097 if (!written) !! 3699 return written ? written : status; 4098 return status; << 4099 iocb->ki_pos += written; << 4100 return written; << 4101 } 3700 } 4102 EXPORT_SYMBOL(generic_perform_write); 3701 EXPORT_SYMBOL(generic_perform_write); 4103 3702 4104 /** 3703 /** 4105 * __generic_file_write_iter - write data to 3704 * __generic_file_write_iter - write data to a file 4106 * @iocb: IO state structure (file, off 3705 * @iocb: IO state structure (file, offset, etc.) 4107 * @from: iov_iter with data to write 3706 * @from: iov_iter with data to write 4108 * 3707 * 4109 * This function does all the work needed for 3708 * This function does all the work needed for actually writing data to a 4110 * file. It does all basic checks, removes SU 3709 * file. It does all basic checks, removes SUID from the file, updates 4111 * modification times and calls proper subrou 3710 * modification times and calls proper subroutines depending on whether we 4112 * do direct IO or a standard buffered write. 3711 * do direct IO or a standard buffered write. 4113 * 3712 * 4114 * It expects i_rwsem to be grabbed unless we !! 3713 * It expects i_mutex to be grabbed unless we work on a block device or similar 4115 * object which does not need locking at all. 3714 * object which does not need locking at all. 4116 * 3715 * 4117 * This function does *not* take care of sync 3716 * This function does *not* take care of syncing data in case of O_SYNC write. 4118 * A caller has to handle it. This is mainly 3717 * A caller has to handle it. This is mainly due to the fact that we want to 4119 * avoid syncing under i_rwsem. !! 3718 * avoid syncing under i_mutex. 4120 * 3719 * 4121 * Return: 3720 * Return: 4122 * * number of bytes written, even for trunca 3721 * * number of bytes written, even for truncated writes 4123 * * negative error code if no data has been 3722 * * negative error code if no data has been written at all 4124 */ 3723 */ 4125 ssize_t __generic_file_write_iter(struct kioc 3724 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4126 { 3725 { 4127 struct file *file = iocb->ki_filp; 3726 struct file *file = iocb->ki_filp; 4128 struct address_space *mapping = file- 3727 struct address_space *mapping = file->f_mapping; 4129 struct inode *inode = mapping->host; !! 3728 struct inode *inode = mapping->host; 4130 ssize_t ret; !! 3729 ssize_t written = 0; >> 3730 ssize_t err; >> 3731 ssize_t status; >> 3732 >> 3733 /* We can write back this queue in page reclaim */ >> 3734 current->backing_dev_info = inode_to_bdi(inode); >> 3735 err = file_remove_privs(file); >> 3736 if (err) >> 3737 goto out; 4131 3738 4132 ret = file_remove_privs(file); !! 3739 err = file_update_time(file); 4133 if (ret) !! 3740 if (err) 4134 return ret; !! 3741 goto out; 4135 << 4136 ret = file_update_time(file); << 4137 if (ret) << 4138 return ret; << 4139 3742 4140 if (iocb->ki_flags & IOCB_DIRECT) { 3743 if (iocb->ki_flags & IOCB_DIRECT) { 4141 ret = generic_file_direct_wri !! 3744 loff_t pos, endbyte; >> 3745 >> 3746 written = generic_file_direct_write(iocb, from); 4142 /* 3747 /* 4143 * If the write stopped short 3748 * If the write stopped short of completing, fall back to 4144 * buffered writes. Some fil 3749 * buffered writes. Some filesystems do this for writes to 4145 * holes, for example. For D 3750 * holes, for example. For DAX files, a buffered write will 4146 * not succeed (even if it di 3751 * not succeed (even if it did, DAX does not handle dirty 4147 * page-cache pages correctly 3752 * page-cache pages correctly). 4148 */ 3753 */ 4149 if (ret < 0 || !iov_iter_coun !! 3754 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) 4150 return ret; !! 3755 goto out; 4151 return direct_write_fallback( << 4152 generic_perfo << 4153 } << 4154 3756 4155 return generic_perform_write(iocb, fr !! 3757 status = generic_perform_write(file, from, pos = iocb->ki_pos); >> 3758 /* >> 3759 * If generic_perform_write() returned a synchronous error >> 3760 * then we want to return the number of bytes which were >> 3761 * direct-written, or the error code if that was zero. Note >> 3762 * that this differs from normal direct-io semantics, which >> 3763 * will return -EFOO even if some bytes were written. >> 3764 */ >> 3765 if (unlikely(status < 0)) { >> 3766 err = status; >> 3767 goto out; >> 3768 } >> 3769 /* >> 3770 * We need to ensure that the page cache pages are written to >> 3771 * disk and invalidated to preserve the expected O_DIRECT >> 3772 * semantics. >> 3773 */ >> 3774 endbyte = pos + status - 1; >> 3775 err = filemap_write_and_wait_range(mapping, pos, endbyte); >> 3776 if (err == 0) { >> 3777 iocb->ki_pos = endbyte + 1; >> 3778 written += status; >> 3779 invalidate_mapping_pages(mapping, >> 3780 pos >> PAGE_SHIFT, >> 3781 endbyte >> PAGE_SHIFT); >> 3782 } else { >> 3783 /* >> 3784 * We don't know how much we wrote, so just return >> 3785 * the number of bytes which were direct-written >> 3786 */ >> 3787 } >> 3788 } else { >> 3789 written = generic_perform_write(file, from, iocb->ki_pos); >> 3790 if (likely(written > 0)) >> 3791 iocb->ki_pos += written; >> 3792 } >> 3793 out: >> 3794 current->backing_dev_info = NULL; >> 3795 return written ? written : err; 4156 } 3796 } 4157 EXPORT_SYMBOL(__generic_file_write_iter); 3797 EXPORT_SYMBOL(__generic_file_write_iter); 4158 3798 4159 /** 3799 /** 4160 * generic_file_write_iter - write data to a 3800 * generic_file_write_iter - write data to a file 4161 * @iocb: IO state structure 3801 * @iocb: IO state structure 4162 * @from: iov_iter with data to write 3802 * @from: iov_iter with data to write 4163 * 3803 * 4164 * This is a wrapper around __generic_file_wr 3804 * This is a wrapper around __generic_file_write_iter() to be used by most 4165 * filesystems. It takes care of syncing the 3805 * filesystems. It takes care of syncing the file in case of O_SYNC file 4166 * and acquires i_rwsem as needed. !! 3806 * and acquires i_mutex as needed. 4167 * Return: 3807 * Return: 4168 * * negative error code if no data has been 3808 * * negative error code if no data has been written at all of 4169 * vfs_fsync_range() failed for a synchrono 3809 * vfs_fsync_range() failed for a synchronous write 4170 * * number of bytes written, even for trunca 3810 * * number of bytes written, even for truncated writes 4171 */ 3811 */ 4172 ssize_t generic_file_write_iter(struct kiocb 3812 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 4173 { 3813 { 4174 struct file *file = iocb->ki_filp; 3814 struct file *file = iocb->ki_filp; 4175 struct inode *inode = file->f_mapping 3815 struct inode *inode = file->f_mapping->host; 4176 ssize_t ret; 3816 ssize_t ret; 4177 3817 4178 inode_lock(inode); 3818 inode_lock(inode); 4179 ret = generic_write_checks(iocb, from 3819 ret = generic_write_checks(iocb, from); 4180 if (ret > 0) 3820 if (ret > 0) 4181 ret = __generic_file_write_it 3821 ret = __generic_file_write_iter(iocb, from); 4182 inode_unlock(inode); 3822 inode_unlock(inode); 4183 3823 4184 if (ret > 0) 3824 if (ret > 0) 4185 ret = generic_write_sync(iocb 3825 ret = generic_write_sync(iocb, ret); 4186 return ret; 3826 return ret; 4187 } 3827 } 4188 EXPORT_SYMBOL(generic_file_write_iter); 3828 EXPORT_SYMBOL(generic_file_write_iter); 4189 3829 4190 /** 3830 /** 4191 * filemap_release_folio() - Release fs-speci !! 3831 * try_to_release_page() - release old fs-specific metadata on a page 4192 * @folio: The folio which the kernel is tryi << 4193 * @gfp: Memory allocation flags (and I/O mod << 4194 * 3832 * 4195 * The address_space is trying to release any !! 3833 * @page: the page which the kernel is trying to free 4196 * (presumably at folio->private). !! 3834 * @gfp_mask: memory allocation flags (and I/O mode) 4197 * 3835 * 4198 * This will also be called if the private_2 !! 3836 * The address_space is to try to release any data against the page 4199 * indicating that the folio has other metada !! 3837 * (presumably at page->private). 4200 * 3838 * 4201 * The @gfp argument specifies whether I/O ma !! 3839 * This may also be called if PG_fscache is set on a page, indicating that the 4202 * this page (__GFP_IO), and whether the call !! 3840 * page is known to the local caching routines. 4203 * (__GFP_RECLAIM & __GFP_FS). << 4204 * 3841 * 4205 * Return: %true if the release was successfu !! 3842 * The @gfp_mask argument specifies whether I/O may be performed to release 4206 */ !! 3843 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS). 4207 bool filemap_release_folio(struct folio *foli !! 3844 * 4208 { !! 3845 * Return: %1 if the release was successful, otherwise return zero. 4209 struct address_space * const mapping << 4210 << 4211 BUG_ON(!folio_test_locked(folio)); << 4212 if (!folio_needs_release(folio)) << 4213 return true; << 4214 if (folio_test_writeback(folio)) << 4215 return false; << 4216 << 4217 if (mapping && mapping->a_ops->releas << 4218 return mapping->a_ops->releas << 4219 return try_to_free_buffers(folio); << 4220 } << 4221 EXPORT_SYMBOL(filemap_release_folio); << 4222 << 4223 /** << 4224 * filemap_invalidate_inode - Invalidate/forc << 4225 * @inode: The inode to flush << 4226 * @flush: Set to write back rather than simp << 4227 * @start: First byte to in range. << 4228 * @end: Last byte in range (inclusive), or L << 4229 * onwards. << 4230 * << 4231 * Invalidate all the folios on an inode that << 4232 * range, possibly writing them back first. << 4233 * undertaken, the invalidate lock is held to << 4234 * installed. << 4235 */ << 4236 int filemap_invalidate_inode(struct inode *in << 4237 loff_t start, lo << 4238 { << 4239 struct address_space *mapping = inode << 4240 pgoff_t first = start >> PAGE_SHIFT; << 4241 pgoff_t last = end >> PAGE_SHIFT; << 4242 pgoff_t nr = end == LLONG_MAX ? ULONG << 4243 << 4244 if (!mapping || !mapping->nrpages || << 4245 goto out; << 4246 << 4247 /* Prevent new folios from being adde << 4248 filemap_invalidate_lock(mapping); << 4249 << 4250 if (!mapping->nrpages) << 4251 goto unlock; << 4252 << 4253 unmap_mapping_pages(mapping, first, n << 4254 << 4255 /* Write back the data if we're asked << 4256 if (flush) { << 4257 struct writeback_control wbc << 4258 .sync_mode = WB_ << 4259 .nr_to_write = LON << 4260 .range_start = sta << 4261 .range_end = end << 4262 }; << 4263 << 4264 filemap_fdatawrite_wbc(mappin << 4265 } << 4266 << 4267 /* Wait for writeback to complete on << 4268 invalidate_inode_pages2_range(mapping << 4269 << 4270 unlock: << 4271 filemap_invalidate_unlock(mapping); << 4272 out: << 4273 return filemap_check_errors(mapping); << 4274 } << 4275 EXPORT_SYMBOL_GPL(filemap_invalidate_inode); << 4276 << 4277 #ifdef CONFIG_CACHESTAT_SYSCALL << 4278 /** << 4279 * filemap_cachestat() - compute the page cac << 4280 * @mapping: The mapping to compute the st << 4281 * @first_index: The starting page cac << 4282 * @last_index: The final page index (inclusi << 4283 * @cs: the cachestat struct to write the res << 4284 * << 4285 * This will query the page cache statistics << 4286 * page range of [first_index, last_index] (i << 4287 * queried include: number of dirty pages, nu << 4288 * writeback, and the number of (recently) ev << 4289 */ 3846 */ 4290 static void filemap_cachestat(struct address_ !! 3847 int try_to_release_page(struct page *page, gfp_t gfp_mask) 4291 pgoff_t first_index, pgoff_t << 4292 { 3848 { 4293 XA_STATE(xas, &mapping->i_pages, firs !! 3849 struct address_space * const mapping = page->mapping; 4294 struct folio *folio; << 4295 << 4296 /* Flush stats (and potentially sleep << 4297 mem_cgroup_flush_stats_ratelimited(NU << 4298 << 4299 rcu_read_lock(); << 4300 xas_for_each(&xas, folio, last_index) << 4301 int order; << 4302 unsigned long nr_pages; << 4303 pgoff_t folio_first_index, fo << 4304 << 4305 /* << 4306 * Don't deref the folio. It << 4307 * get freed (and reused) und << 4308 * << 4309 * We *could* pin it, but tha << 4310 * what should be a fast and << 4311 * << 4312 * Instead, derive all inform << 4313 * the rcu-protected xarray. << 4314 */ << 4315 3850 4316 if (xas_retry(&xas, folio)) !! 3851 BUG_ON(!PageLocked(page)); 4317 continue; !! 3852 if (PageWriteback(page)) 4318 !! 3853 return 0; 4319 order = xas_get_order(&xas); << 4320 nr_pages = 1 << order; << 4321 folio_first_index = round_dow << 4322 folio_last_index = folio_firs << 4323 << 4324 /* Folios might straddle the << 4325 if (folio_first_index < first << 4326 nr_pages -= first_ind << 4327 << 4328 if (folio_last_index > last_i << 4329 nr_pages -= folio_las << 4330 << 4331 if (xa_is_value(folio)) { << 4332 /* page is evicted */ << 4333 void *shadow = (void << 4334 bool workingset; /* n << 4335 << 4336 cs->nr_evicted += nr_ << 4337 << 4338 #ifdef CONFIG_SWAP /* implies CONFIG_MMU */ << 4339 if (shmem_mapping(map << 4340 /* shmem file << 4341 swp_entry_t s << 4342 << 4343 /* swapin err << 4344 if (non_swap_ << 4345 goto << 4346 << 4347 /* << 4348 * Getting a << 4349 * inode mean << 4350 * shmem_unus << 4351 * ensures sw << 4352 * freeing th << 4353 * we can rac << 4354 * invalidati << 4355 * a shadow i << 4356 */ << 4357 shadow = get_ << 4358 if (!shadow) << 4359 goto << 4360 } << 4361 #endif << 4362 if (workingset_test_r << 4363 cs->nr_recent << 4364 << 4365 goto resched; << 4366 } << 4367 << 4368 /* page is in cache */ << 4369 cs->nr_cache += nr_pages; << 4370 << 4371 if (xas_get_mark(&xas, PAGECA << 4372 cs->nr_dirty += nr_pa << 4373 << 4374 if (xas_get_mark(&xas, PAGECA << 4375 cs->nr_writeback += n << 4376 3854 4377 resched: !! 3855 if (mapping && mapping->a_ops->releasepage) 4378 if (need_resched()) { !! 3856 return mapping->a_ops->releasepage(page, gfp_mask); 4379 xas_pause(&xas); !! 3857 return try_to_free_buffers(page); 4380 cond_resched_rcu(); << 4381 } << 4382 } << 4383 rcu_read_unlock(); << 4384 } 3858 } 4385 3859 4386 /* !! 3860 EXPORT_SYMBOL(try_to_release_page); 4387 * The cachestat(2) system call. << 4388 * << 4389 * cachestat() returns the page cache statist << 4390 * bytes range specified by `off` and `len`: << 4391 * number of dirty pages, number of pages mar << 4392 * number of evicted pages, and number of rec << 4393 * << 4394 * An evicted page is a page that is previous << 4395 * but has been evicted since. A page is rece << 4396 * eviction was recent enough that its reentr << 4397 * indicate that it is actively being used by << 4398 * there is memory pressure on the system. << 4399 * << 4400 * `off` and `len` must be non-negative integ << 4401 * the queried range is [`off`, `off` + `len` << 4402 * we will query in the range from `off` to t << 4403 * << 4404 * The `flags` argument is unused for now, bu << 4405 * extensibility. User should pass 0 (i.e no << 4406 * << 4407 * Currently, hugetlbfs is not supported. << 4408 * << 4409 * Because the status of a page can change af << 4410 * but before it returns to the application, << 4411 * contain stale information. << 4412 * << 4413 * return values: << 4414 * zero - success << 4415 * -EFAULT - cstat or cstat_range points << 4416 * -EINVAL - invalid flags << 4417 * -EBADF - invalid file descriptor << 4418 * -EOPNOTSUPP - file descriptor is of a hug << 4419 */ << 4420 SYSCALL_DEFINE4(cachestat, unsigned int, fd, << 4421 struct cachestat_range __user << 4422 struct cachestat __user *, cs << 4423 { << 4424 struct fd f = fdget(fd); << 4425 struct address_space *mapping; << 4426 struct cachestat_range csr; << 4427 struct cachestat cs; << 4428 pgoff_t first_index, last_index; << 4429 << 4430 if (!fd_file(f)) << 4431 return -EBADF; << 4432 << 4433 if (copy_from_user(&csr, cstat_range, << 4434 sizeof(struct cachest << 4435 fdput(f); << 4436 return -EFAULT; << 4437 } << 4438 << 4439 /* hugetlbfs is not supported */ << 4440 if (is_file_hugepages(fd_file(f))) { << 4441 fdput(f); << 4442 return -EOPNOTSUPP; << 4443 } << 4444 << 4445 if (flags != 0) { << 4446 fdput(f); << 4447 return -EINVAL; << 4448 } << 4449 << 4450 first_index = csr.off >> PAGE_SHIFT; << 4451 last_index = << 4452 csr.len == 0 ? ULONG_MAX : (c << 4453 memset(&cs, 0, sizeof(struct cachesta << 4454 mapping = fd_file(f)->f_mapping; << 4455 filemap_cachestat(mapping, first_inde << 4456 fdput(f); << 4457 << 4458 if (copy_to_user(cstat, &cs, sizeof(s << 4459 return -EFAULT; << 4460 << 4461 return 0; << 4462 } << 4463 #endif /* CONFIG_CACHESTAT_SYSCALL */ << 4464 3861
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.