~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/filemap.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/filemap.c (Architecture sparc) and /mm/filemap.c (Architecture sparc64)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*                                                  2 /*
  3  *      linux/mm/filemap.c                          3  *      linux/mm/filemap.c
  4  *                                                  4  *
  5  * Copyright (C) 1994-1999  Linus Torvalds          5  * Copyright (C) 1994-1999  Linus Torvalds
  6  */                                                 6  */
  7                                                     7 
  8 /*                                                  8 /*
  9  * This file handles the generic file mmap sem      9  * This file handles the generic file mmap semantics used by
 10  * most "normal" filesystems (but you don't /h     10  * most "normal" filesystems (but you don't /have/ to use this:
 11  * the NFS filesystem used to do this differen     11  * the NFS filesystem used to do this differently, for example)
 12  */                                                12  */
 13 #include <linux/export.h>                          13 #include <linux/export.h>
 14 #include <linux/compiler.h>                        14 #include <linux/compiler.h>
 15 #include <linux/dax.h>                             15 #include <linux/dax.h>
 16 #include <linux/fs.h>                              16 #include <linux/fs.h>
 17 #include <linux/sched/signal.h>                    17 #include <linux/sched/signal.h>
 18 #include <linux/uaccess.h>                         18 #include <linux/uaccess.h>
 19 #include <linux/capability.h>                      19 #include <linux/capability.h>
 20 #include <linux/kernel_stat.h>                     20 #include <linux/kernel_stat.h>
 21 #include <linux/gfp.h>                             21 #include <linux/gfp.h>
 22 #include <linux/mm.h>                              22 #include <linux/mm.h>
 23 #include <linux/swap.h>                            23 #include <linux/swap.h>
 24 #include <linux/swapops.h>                         24 #include <linux/swapops.h>
 25 #include <linux/syscalls.h>                        25 #include <linux/syscalls.h>
 26 #include <linux/mman.h>                            26 #include <linux/mman.h>
 27 #include <linux/pagemap.h>                         27 #include <linux/pagemap.h>
 28 #include <linux/file.h>                            28 #include <linux/file.h>
 29 #include <linux/uio.h>                             29 #include <linux/uio.h>
 30 #include <linux/error-injection.h>                 30 #include <linux/error-injection.h>
 31 #include <linux/hash.h>                            31 #include <linux/hash.h>
 32 #include <linux/writeback.h>                       32 #include <linux/writeback.h>
 33 #include <linux/backing-dev.h>                     33 #include <linux/backing-dev.h>
 34 #include <linux/pagevec.h>                         34 #include <linux/pagevec.h>
 35 #include <linux/security.h>                        35 #include <linux/security.h>
 36 #include <linux/cpuset.h>                          36 #include <linux/cpuset.h>
 37 #include <linux/hugetlb.h>                         37 #include <linux/hugetlb.h>
 38 #include <linux/memcontrol.h>                      38 #include <linux/memcontrol.h>
 39 #include <linux/shmem_fs.h>                        39 #include <linux/shmem_fs.h>
 40 #include <linux/rmap.h>                            40 #include <linux/rmap.h>
 41 #include <linux/delayacct.h>                       41 #include <linux/delayacct.h>
 42 #include <linux/psi.h>                             42 #include <linux/psi.h>
 43 #include <linux/ramfs.h>                           43 #include <linux/ramfs.h>
 44 #include <linux/page_idle.h>                       44 #include <linux/page_idle.h>
 45 #include <linux/migrate.h>                         45 #include <linux/migrate.h>
 46 #include <linux/pipe_fs_i.h>                       46 #include <linux/pipe_fs_i.h>
 47 #include <linux/splice.h>                          47 #include <linux/splice.h>
 48 #include <linux/rcupdate_wait.h>                   48 #include <linux/rcupdate_wait.h>
 49 #include <linux/sched/mm.h>                        49 #include <linux/sched/mm.h>
 50 #include <asm/pgalloc.h>                           50 #include <asm/pgalloc.h>
 51 #include <asm/tlbflush.h>                          51 #include <asm/tlbflush.h>
 52 #include "internal.h"                              52 #include "internal.h"
 53                                                    53 
 54 #define CREATE_TRACE_POINTS                        54 #define CREATE_TRACE_POINTS
 55 #include <trace/events/filemap.h>                  55 #include <trace/events/filemap.h>
 56                                                    56 
 57 /*                                                 57 /*
 58  * FIXME: remove all knowledge of the buffer l     58  * FIXME: remove all knowledge of the buffer layer from the core VM
 59  */                                                59  */
 60 #include <linux/buffer_head.h> /* for try_to_f     60 #include <linux/buffer_head.h> /* for try_to_free_buffers */
 61                                                    61 
 62 #include <asm/mman.h>                              62 #include <asm/mman.h>
 63                                                    63 
 64 #include "swap.h"                                  64 #include "swap.h"
 65                                                    65 
 66 /*                                                 66 /*
 67  * Shared mappings implemented 30.11.1994. It'     67  * Shared mappings implemented 30.11.1994. It's not fully working yet,
 68  * though.                                         68  * though.
 69  *                                                 69  *
 70  * Shared mappings now work. 15.8.1995  Bruno.     70  * Shared mappings now work. 15.8.1995  Bruno.
 71  *                                                 71  *
 72  * finished 'unifying' the page and buffer cac     72  * finished 'unifying' the page and buffer cache and SMP-threaded the
 73  * page-cache, 21.05.1999, Ingo Molnar <mingo@     73  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
 74  *                                                 74  *
 75  * SMP-threaded pagemap-LRU 1999, Andrea Arcan     75  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
 76  */                                                76  */
 77                                                    77 
 78 /*                                                 78 /*
 79  * Lock ordering:                                  79  * Lock ordering:
 80  *                                                 80  *
 81  *  ->i_mmap_rwsem              (truncate_page     81  *  ->i_mmap_rwsem              (truncate_pagecache)
 82  *    ->private_lock            (__free_pte->b     82  *    ->private_lock            (__free_pte->block_dirty_folio)
 83  *      ->swap_lock             (exclusive_swa     83  *      ->swap_lock             (exclusive_swap_page, others)
 84  *        ->i_pages lock                           84  *        ->i_pages lock
 85  *                                                 85  *
 86  *  ->i_rwsem                                      86  *  ->i_rwsem
 87  *    ->invalidate_lock         (acquired by f     87  *    ->invalidate_lock         (acquired by fs in truncate path)
 88  *      ->i_mmap_rwsem          (truncate->unm     88  *      ->i_mmap_rwsem          (truncate->unmap_mapping_range)
 89  *                                                 89  *
 90  *  ->mmap_lock                                    90  *  ->mmap_lock
 91  *    ->i_mmap_rwsem                               91  *    ->i_mmap_rwsem
 92  *      ->page_table_lock or pte_lock   (vario     92  *      ->page_table_lock or pte_lock   (various, mainly in memory.c)
 93  *        ->i_pages lock        (arch-dependen     93  *        ->i_pages lock        (arch-dependent flush_dcache_mmap_lock)
 94  *                                                 94  *
 95  *  ->mmap_lock                                    95  *  ->mmap_lock
 96  *    ->invalidate_lock         (filemap_fault     96  *    ->invalidate_lock         (filemap_fault)
 97  *      ->lock_page             (filemap_fault     97  *      ->lock_page             (filemap_fault, access_process_vm)
 98  *                                                 98  *
 99  *  ->i_rwsem                   (generic_perfo     99  *  ->i_rwsem                   (generic_perform_write)
100  *    ->mmap_lock               (fault_in_read    100  *    ->mmap_lock               (fault_in_readable->do_page_fault)
101  *                                                101  *
102  *  bdi->wb.list_lock                             102  *  bdi->wb.list_lock
103  *    sb_lock                   (fs/fs-writeba    103  *    sb_lock                   (fs/fs-writeback.c)
104  *    ->i_pages lock            (__sync_single    104  *    ->i_pages lock            (__sync_single_inode)
105  *                                                105  *
106  *  ->i_mmap_rwsem                                106  *  ->i_mmap_rwsem
107  *    ->anon_vma.lock           (vma_merge)       107  *    ->anon_vma.lock           (vma_merge)
108  *                                                108  *
109  *  ->anon_vma.lock                               109  *  ->anon_vma.lock
110  *    ->page_table_lock or pte_lock     (anon_    110  *    ->page_table_lock or pte_lock     (anon_vma_prepare and various)
111  *                                                111  *
112  *  ->page_table_lock or pte_lock                 112  *  ->page_table_lock or pte_lock
113  *    ->swap_lock               (try_to_unmap_    113  *    ->swap_lock               (try_to_unmap_one)
114  *    ->private_lock            (try_to_unmap_    114  *    ->private_lock            (try_to_unmap_one)
115  *    ->i_pages lock            (try_to_unmap_    115  *    ->i_pages lock            (try_to_unmap_one)
116  *    ->lruvec->lru_lock        (follow_page_m    116  *    ->lruvec->lru_lock        (follow_page_mask->mark_page_accessed)
117  *    ->lruvec->lru_lock        (check_pte_ran    117  *    ->lruvec->lru_lock        (check_pte_range->folio_isolate_lru)
118  *    ->private_lock            (folio_remove_    118  *    ->private_lock            (folio_remove_rmap_pte->set_page_dirty)
119  *    ->i_pages lock            (folio_remove_    119  *    ->i_pages lock            (folio_remove_rmap_pte->set_page_dirty)
120  *    bdi.wb->list_lock         (folio_remove_    120  *    bdi.wb->list_lock         (folio_remove_rmap_pte->set_page_dirty)
121  *    ->inode->i_lock           (folio_remove_    121  *    ->inode->i_lock           (folio_remove_rmap_pte->set_page_dirty)
122  *    ->memcg->move_lock        (folio_remove_    122  *    ->memcg->move_lock        (folio_remove_rmap_pte->folio_memcg_lock)
123  *    bdi.wb->list_lock         (zap_pte_range    123  *    bdi.wb->list_lock         (zap_pte_range->set_page_dirty)
124  *    ->inode->i_lock           (zap_pte_range    124  *    ->inode->i_lock           (zap_pte_range->set_page_dirty)
125  *    ->private_lock            (zap_pte_range    125  *    ->private_lock            (zap_pte_range->block_dirty_folio)
126  */                                               126  */
127                                                   127 
128 static void mapping_set_update(struct xa_state    128 static void mapping_set_update(struct xa_state *xas,
129                 struct address_space *mapping)    129                 struct address_space *mapping)
130 {                                                 130 {
131         if (dax_mapping(mapping) || shmem_mapp    131         if (dax_mapping(mapping) || shmem_mapping(mapping))
132                 return;                           132                 return;
133         xas_set_update(xas, workingset_update_    133         xas_set_update(xas, workingset_update_node);
134         xas_set_lru(xas, &shadow_nodes);          134         xas_set_lru(xas, &shadow_nodes);
135 }                                                 135 }
136                                                   136 
137 static void page_cache_delete(struct address_s    137 static void page_cache_delete(struct address_space *mapping,
138                                    struct foli    138                                    struct folio *folio, void *shadow)
139 {                                                 139 {
140         XA_STATE(xas, &mapping->i_pages, folio    140         XA_STATE(xas, &mapping->i_pages, folio->index);
141         long nr = 1;                              141         long nr = 1;
142                                                   142 
143         mapping_set_update(&xas, mapping);        143         mapping_set_update(&xas, mapping);
144                                                   144 
145         xas_set_order(&xas, folio->index, foli    145         xas_set_order(&xas, folio->index, folio_order(folio));
146         nr = folio_nr_pages(folio);               146         nr = folio_nr_pages(folio);
147                                                   147 
148         VM_BUG_ON_FOLIO(!folio_test_locked(fol    148         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
149                                                   149 
150         xas_store(&xas, shadow);                  150         xas_store(&xas, shadow);
151         xas_init_marks(&xas);                     151         xas_init_marks(&xas);
152                                                   152 
153         folio->mapping = NULL;                    153         folio->mapping = NULL;
154         /* Leave page->index set: truncation l    154         /* Leave page->index set: truncation lookup relies upon it */
155         mapping->nrpages -= nr;                   155         mapping->nrpages -= nr;
156 }                                                 156 }
157                                                   157 
158 static void filemap_unaccount_folio(struct add    158 static void filemap_unaccount_folio(struct address_space *mapping,
159                 struct folio *folio)              159                 struct folio *folio)
160 {                                                 160 {
161         long nr;                                  161         long nr;
162                                                   162 
163         VM_BUG_ON_FOLIO(folio_mapped(folio), f    163         VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
164         if (!IS_ENABLED(CONFIG_DEBUG_VM) && un    164         if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
165                 pr_alert("BUG: Bad page cache     165                 pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
166                          current->comm, folio_    166                          current->comm, folio_pfn(folio));
167                 dump_page(&folio->page, "still    167                 dump_page(&folio->page, "still mapped when deleted");
168                 dump_stack();                     168                 dump_stack();
169                 add_taint(TAINT_BAD_PAGE, LOCK    169                 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
170                                                   170 
171                 if (mapping_exiting(mapping) &    171                 if (mapping_exiting(mapping) && !folio_test_large(folio)) {
172                         int mapcount = folio_m    172                         int mapcount = folio_mapcount(folio);
173                                                   173 
174                         if (folio_ref_count(fo    174                         if (folio_ref_count(folio) >= mapcount + 2) {
175                                 /*                175                                 /*
176                                  * All vmas ha    176                                  * All vmas have already been torn down, so it's
177                                  * a good bet     177                                  * a good bet that actually the page is unmapped
178                                  * and we'd ra    178                                  * and we'd rather not leak it: if we're wrong,
179                                  * another bad    179                                  * another bad page check should catch it later.
180                                  */               180                                  */
181                                 atomic_set(&fo    181                                 atomic_set(&folio->_mapcount, -1);
182                                 folio_ref_sub(    182                                 folio_ref_sub(folio, mapcount);
183                         }                         183                         }
184                 }                                 184                 }
185         }                                         185         }
186                                                   186 
187         /* hugetlb folios do not participate i    187         /* hugetlb folios do not participate in page cache accounting. */
188         if (folio_test_hugetlb(folio))            188         if (folio_test_hugetlb(folio))
189                 return;                           189                 return;
190                                                   190 
191         nr = folio_nr_pages(folio);               191         nr = folio_nr_pages(folio);
192                                                   192 
193         __lruvec_stat_mod_folio(folio, NR_FILE    193         __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
194         if (folio_test_swapbacked(folio)) {       194         if (folio_test_swapbacked(folio)) {
195                 __lruvec_stat_mod_folio(folio,    195                 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
196                 if (folio_test_pmd_mappable(fo    196                 if (folio_test_pmd_mappable(folio))
197                         __lruvec_stat_mod_foli    197                         __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
198         } else if (folio_test_pmd_mappable(fol    198         } else if (folio_test_pmd_mappable(folio)) {
199                 __lruvec_stat_mod_folio(folio,    199                 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
200                 filemap_nr_thps_dec(mapping);     200                 filemap_nr_thps_dec(mapping);
201         }                                         201         }
202                                                   202 
203         /*                                        203         /*
204          * At this point folio must be either     204          * At this point folio must be either written or cleaned by
205          * truncate.  Dirty folio here signals    205          * truncate.  Dirty folio here signals a bug and loss of
206          * unwritten data - on ordinary filesy    206          * unwritten data - on ordinary filesystems.
207          *                                        207          *
208          * But it's harmless on in-memory file    208          * But it's harmless on in-memory filesystems like tmpfs; and can
209          * occur when a driver which did get_u    209          * occur when a driver which did get_user_pages() sets page dirty
210          * before putting it, while the inode     210          * before putting it, while the inode is being finally evicted.
211          *                                        211          *
212          * Below fixes dirty accounting after     212          * Below fixes dirty accounting after removing the folio entirely
213          * but leaves the dirty flag set: it h    213          * but leaves the dirty flag set: it has no effect for truncated
214          * folio and anyway will be cleared be    214          * folio and anyway will be cleared before returning folio to
215          * buddy allocator.                       215          * buddy allocator.
216          */                                       216          */
217         if (WARN_ON_ONCE(folio_test_dirty(foli    217         if (WARN_ON_ONCE(folio_test_dirty(folio) &&
218                          mapping_can_writeback    218                          mapping_can_writeback(mapping)))
219                 folio_account_cleaned(folio, i    219                 folio_account_cleaned(folio, inode_to_wb(mapping->host));
220 }                                                 220 }
221                                                   221 
222 /*                                                222 /*
223  * Delete a page from the page cache and free     223  * Delete a page from the page cache and free it. Caller has to make
224  * sure the page is locked and that nobody els    224  * sure the page is locked and that nobody else uses it - or that usage
225  * is safe.  The caller must hold the i_pages     225  * is safe.  The caller must hold the i_pages lock.
226  */                                               226  */
227 void __filemap_remove_folio(struct folio *foli    227 void __filemap_remove_folio(struct folio *folio, void *shadow)
228 {                                                 228 {
229         struct address_space *mapping = folio-    229         struct address_space *mapping = folio->mapping;
230                                                   230 
231         trace_mm_filemap_delete_from_page_cach    231         trace_mm_filemap_delete_from_page_cache(folio);
232         filemap_unaccount_folio(mapping, folio    232         filemap_unaccount_folio(mapping, folio);
233         page_cache_delete(mapping, folio, shad    233         page_cache_delete(mapping, folio, shadow);
234 }                                                 234 }
235                                                   235 
236 void filemap_free_folio(struct address_space *    236 void filemap_free_folio(struct address_space *mapping, struct folio *folio)
237 {                                                 237 {
238         void (*free_folio)(struct folio *);       238         void (*free_folio)(struct folio *);
239         int refs = 1;                             239         int refs = 1;
240                                                   240 
241         free_folio = mapping->a_ops->free_foli    241         free_folio = mapping->a_ops->free_folio;
242         if (free_folio)                           242         if (free_folio)
243                 free_folio(folio);                243                 free_folio(folio);
244                                                   244 
245         if (folio_test_large(folio))              245         if (folio_test_large(folio))
246                 refs = folio_nr_pages(folio);     246                 refs = folio_nr_pages(folio);
247         folio_put_refs(folio, refs);              247         folio_put_refs(folio, refs);
248 }                                                 248 }
249                                                   249 
250 /**                                               250 /**
251  * filemap_remove_folio - Remove folio from pa    251  * filemap_remove_folio - Remove folio from page cache.
252  * @folio: The folio.                             252  * @folio: The folio.
253  *                                                253  *
254  * This must be called only on folios that are    254  * This must be called only on folios that are locked and have been
255  * verified to be in the page cache.  It will     255  * verified to be in the page cache.  It will never put the folio into
256  * the free list because the caller has a refe    256  * the free list because the caller has a reference on the page.
257  */                                               257  */
258 void filemap_remove_folio(struct folio *folio)    258 void filemap_remove_folio(struct folio *folio)
259 {                                                 259 {
260         struct address_space *mapping = folio-    260         struct address_space *mapping = folio->mapping;
261                                                   261 
262         BUG_ON(!folio_test_locked(folio));        262         BUG_ON(!folio_test_locked(folio));
263         spin_lock(&mapping->host->i_lock);        263         spin_lock(&mapping->host->i_lock);
264         xa_lock_irq(&mapping->i_pages);           264         xa_lock_irq(&mapping->i_pages);
265         __filemap_remove_folio(folio, NULL);      265         __filemap_remove_folio(folio, NULL);
266         xa_unlock_irq(&mapping->i_pages);         266         xa_unlock_irq(&mapping->i_pages);
267         if (mapping_shrinkable(mapping))          267         if (mapping_shrinkable(mapping))
268                 inode_add_lru(mapping->host);     268                 inode_add_lru(mapping->host);
269         spin_unlock(&mapping->host->i_lock);      269         spin_unlock(&mapping->host->i_lock);
270                                                   270 
271         filemap_free_folio(mapping, folio);       271         filemap_free_folio(mapping, folio);
272 }                                                 272 }
273                                                   273 
274 /*                                                274 /*
275  * page_cache_delete_batch - delete several fo    275  * page_cache_delete_batch - delete several folios from page cache
276  * @mapping: the mapping to which folios belon    276  * @mapping: the mapping to which folios belong
277  * @fbatch: batch of folios to delete             277  * @fbatch: batch of folios to delete
278  *                                                278  *
279  * The function walks over mapping->i_pages an    279  * The function walks over mapping->i_pages and removes folios passed in
280  * @fbatch from the mapping. The function expe    280  * @fbatch from the mapping. The function expects @fbatch to be sorted
281  * by page index and is optimised for it to be    281  * by page index and is optimised for it to be dense.
282  * It tolerates holes in @fbatch (mapping entr    282  * It tolerates holes in @fbatch (mapping entries at those indices are not
283  * modified).                                     283  * modified).
284  *                                                284  *
285  * The function expects the i_pages lock to be    285  * The function expects the i_pages lock to be held.
286  */                                               286  */
287 static void page_cache_delete_batch(struct add    287 static void page_cache_delete_batch(struct address_space *mapping,
288                              struct folio_batc    288                              struct folio_batch *fbatch)
289 {                                                 289 {
290         XA_STATE(xas, &mapping->i_pages, fbatc    290         XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
291         long total_pages = 0;                     291         long total_pages = 0;
292         int i = 0;                                292         int i = 0;
293         struct folio *folio;                      293         struct folio *folio;
294                                                   294 
295         mapping_set_update(&xas, mapping);        295         mapping_set_update(&xas, mapping);
296         xas_for_each(&xas, folio, ULONG_MAX) {    296         xas_for_each(&xas, folio, ULONG_MAX) {
297                 if (i >= folio_batch_count(fba    297                 if (i >= folio_batch_count(fbatch))
298                         break;                    298                         break;
299                                                   299 
300                 /* A swap/dax/shadow entry got    300                 /* A swap/dax/shadow entry got inserted? Skip it. */
301                 if (xa_is_value(folio))           301                 if (xa_is_value(folio))
302                         continue;                 302                         continue;
303                 /*                                303                 /*
304                  * A page got inserted in our     304                  * A page got inserted in our range? Skip it. We have our
305                  * pages locked so they are pr    305                  * pages locked so they are protected from being removed.
306                  * If we see a page whose inde    306                  * If we see a page whose index is higher than ours, it
307                  * means our page has been rem    307                  * means our page has been removed, which shouldn't be
308                  * possible because we're hold    308                  * possible because we're holding the PageLock.
309                  */                               309                  */
310                 if (folio != fbatch->folios[i]    310                 if (folio != fbatch->folios[i]) {
311                         VM_BUG_ON_FOLIO(folio-    311                         VM_BUG_ON_FOLIO(folio->index >
312                                         fbatch    312                                         fbatch->folios[i]->index, folio);
313                         continue;                 313                         continue;
314                 }                                 314                 }
315                                                   315 
316                 WARN_ON_ONCE(!folio_test_locke    316                 WARN_ON_ONCE(!folio_test_locked(folio));
317                                                   317 
318                 folio->mapping = NULL;            318                 folio->mapping = NULL;
319                 /* Leave folio->index set: tru    319                 /* Leave folio->index set: truncation lookup relies on it */
320                                                   320 
321                 i++;                              321                 i++;
322                 xas_store(&xas, NULL);            322                 xas_store(&xas, NULL);
323                 total_pages += folio_nr_pages(    323                 total_pages += folio_nr_pages(folio);
324         }                                         324         }
325         mapping->nrpages -= total_pages;          325         mapping->nrpages -= total_pages;
326 }                                                 326 }
327                                                   327 
328 void delete_from_page_cache_batch(struct addre    328 void delete_from_page_cache_batch(struct address_space *mapping,
329                                   struct folio    329                                   struct folio_batch *fbatch)
330 {                                                 330 {
331         int i;                                    331         int i;
332                                                   332 
333         if (!folio_batch_count(fbatch))           333         if (!folio_batch_count(fbatch))
334                 return;                           334                 return;
335                                                   335 
336         spin_lock(&mapping->host->i_lock);        336         spin_lock(&mapping->host->i_lock);
337         xa_lock_irq(&mapping->i_pages);           337         xa_lock_irq(&mapping->i_pages);
338         for (i = 0; i < folio_batch_count(fbat    338         for (i = 0; i < folio_batch_count(fbatch); i++) {
339                 struct folio *folio = fbatch->    339                 struct folio *folio = fbatch->folios[i];
340                                                   340 
341                 trace_mm_filemap_delete_from_p    341                 trace_mm_filemap_delete_from_page_cache(folio);
342                 filemap_unaccount_folio(mappin    342                 filemap_unaccount_folio(mapping, folio);
343         }                                         343         }
344         page_cache_delete_batch(mapping, fbatc    344         page_cache_delete_batch(mapping, fbatch);
345         xa_unlock_irq(&mapping->i_pages);         345         xa_unlock_irq(&mapping->i_pages);
346         if (mapping_shrinkable(mapping))          346         if (mapping_shrinkable(mapping))
347                 inode_add_lru(mapping->host);     347                 inode_add_lru(mapping->host);
348         spin_unlock(&mapping->host->i_lock);      348         spin_unlock(&mapping->host->i_lock);
349                                                   349 
350         for (i = 0; i < folio_batch_count(fbat    350         for (i = 0; i < folio_batch_count(fbatch); i++)
351                 filemap_free_folio(mapping, fb    351                 filemap_free_folio(mapping, fbatch->folios[i]);
352 }                                                 352 }
353                                                   353 
354 int filemap_check_errors(struct address_space     354 int filemap_check_errors(struct address_space *mapping)
355 {                                                 355 {
356         int ret = 0;                              356         int ret = 0;
357         /* Check for outstanding write errors     357         /* Check for outstanding write errors */
358         if (test_bit(AS_ENOSPC, &mapping->flag    358         if (test_bit(AS_ENOSPC, &mapping->flags) &&
359             test_and_clear_bit(AS_ENOSPC, &map    359             test_and_clear_bit(AS_ENOSPC, &mapping->flags))
360                 ret = -ENOSPC;                    360                 ret = -ENOSPC;
361         if (test_bit(AS_EIO, &mapping->flags)     361         if (test_bit(AS_EIO, &mapping->flags) &&
362             test_and_clear_bit(AS_EIO, &mappin    362             test_and_clear_bit(AS_EIO, &mapping->flags))
363                 ret = -EIO;                       363                 ret = -EIO;
364         return ret;                               364         return ret;
365 }                                                 365 }
366 EXPORT_SYMBOL(filemap_check_errors);              366 EXPORT_SYMBOL(filemap_check_errors);
367                                                   367 
368 static int filemap_check_and_keep_errors(struc    368 static int filemap_check_and_keep_errors(struct address_space *mapping)
369 {                                                 369 {
370         /* Check for outstanding write errors     370         /* Check for outstanding write errors */
371         if (test_bit(AS_EIO, &mapping->flags))    371         if (test_bit(AS_EIO, &mapping->flags))
372                 return -EIO;                      372                 return -EIO;
373         if (test_bit(AS_ENOSPC, &mapping->flag    373         if (test_bit(AS_ENOSPC, &mapping->flags))
374                 return -ENOSPC;                   374                 return -ENOSPC;
375         return 0;                                 375         return 0;
376 }                                                 376 }
377                                                   377 
378 /**                                               378 /**
379  * filemap_fdatawrite_wbc - start writeback on    379  * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
380  * @mapping:    address space structure to wri    380  * @mapping:    address space structure to write
381  * @wbc:        the writeback_control controll    381  * @wbc:        the writeback_control controlling the writeout
382  *                                                382  *
383  * Call writepages on the mapping using the pr    383  * Call writepages on the mapping using the provided wbc to control the
384  * writeout.                                      384  * writeout.
385  *                                                385  *
386  * Return: %0 on success, negative error code     386  * Return: %0 on success, negative error code otherwise.
387  */                                               387  */
388 int filemap_fdatawrite_wbc(struct address_spac    388 int filemap_fdatawrite_wbc(struct address_space *mapping,
389                            struct writeback_co    389                            struct writeback_control *wbc)
390 {                                                 390 {
391         int ret;                                  391         int ret;
392                                                   392 
393         if (!mapping_can_writeback(mapping) ||    393         if (!mapping_can_writeback(mapping) ||
394             !mapping_tagged(mapping, PAGECACHE    394             !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
395                 return 0;                         395                 return 0;
396                                                   396 
397         wbc_attach_fdatawrite_inode(wbc, mappi    397         wbc_attach_fdatawrite_inode(wbc, mapping->host);
398         ret = do_writepages(mapping, wbc);        398         ret = do_writepages(mapping, wbc);
399         wbc_detach_inode(wbc);                    399         wbc_detach_inode(wbc);
400         return ret;                               400         return ret;
401 }                                                 401 }
402 EXPORT_SYMBOL(filemap_fdatawrite_wbc);            402 EXPORT_SYMBOL(filemap_fdatawrite_wbc);
403                                                   403 
404 /**                                               404 /**
405  * __filemap_fdatawrite_range - start writebac    405  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
406  * @mapping:    address space structure to wri    406  * @mapping:    address space structure to write
407  * @start:      offset in bytes where the rang    407  * @start:      offset in bytes where the range starts
408  * @end:        offset in bytes where the rang    408  * @end:        offset in bytes where the range ends (inclusive)
409  * @sync_mode:  enable synchronous operation      409  * @sync_mode:  enable synchronous operation
410  *                                                410  *
411  * Start writeback against all of a mapping's     411  * Start writeback against all of a mapping's dirty pages that lie
412  * within the byte offsets <start, end> inclus    412  * within the byte offsets <start, end> inclusive.
413  *                                                413  *
414  * If sync_mode is WB_SYNC_ALL then this is a     414  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
415  * opposed to a regular memory cleansing write    415  * opposed to a regular memory cleansing writeback.  The difference between
416  * these two operations is that if a dirty pag    416  * these two operations is that if a dirty page/buffer is encountered, it must
417  * be waited upon, and not just skipped over.     417  * be waited upon, and not just skipped over.
418  *                                                418  *
419  * Return: %0 on success, negative error code     419  * Return: %0 on success, negative error code otherwise.
420  */                                               420  */
421 int __filemap_fdatawrite_range(struct address_    421 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
422                                 loff_t end, in    422                                 loff_t end, int sync_mode)
423 {                                                 423 {
424         struct writeback_control wbc = {          424         struct writeback_control wbc = {
425                 .sync_mode = sync_mode,           425                 .sync_mode = sync_mode,
426                 .nr_to_write = LONG_MAX,          426                 .nr_to_write = LONG_MAX,
427                 .range_start = start,             427                 .range_start = start,
428                 .range_end = end,                 428                 .range_end = end,
429         };                                        429         };
430                                                   430 
431         return filemap_fdatawrite_wbc(mapping,    431         return filemap_fdatawrite_wbc(mapping, &wbc);
432 }                                                 432 }
433                                                   433 
434 static inline int __filemap_fdatawrite(struct     434 static inline int __filemap_fdatawrite(struct address_space *mapping,
435         int sync_mode)                            435         int sync_mode)
436 {                                                 436 {
437         return __filemap_fdatawrite_range(mapp    437         return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
438 }                                                 438 }
439                                                   439 
440 int filemap_fdatawrite(struct address_space *m    440 int filemap_fdatawrite(struct address_space *mapping)
441 {                                                 441 {
442         return __filemap_fdatawrite(mapping, W    442         return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
443 }                                                 443 }
444 EXPORT_SYMBOL(filemap_fdatawrite);                444 EXPORT_SYMBOL(filemap_fdatawrite);
445                                                   445 
446 int filemap_fdatawrite_range(struct address_sp    446 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
447                                 loff_t end)       447                                 loff_t end)
448 {                                                 448 {
449         return __filemap_fdatawrite_range(mapp    449         return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
450 }                                                 450 }
451 EXPORT_SYMBOL(filemap_fdatawrite_range);          451 EXPORT_SYMBOL(filemap_fdatawrite_range);
452                                                   452 
453 /**                                               453 /**
454  * filemap_flush - mostly a non-blocking flush    454  * filemap_flush - mostly a non-blocking flush
455  * @mapping:    target address_space              455  * @mapping:    target address_space
456  *                                                456  *
457  * This is a mostly non-blocking flush.  Not s    457  * This is a mostly non-blocking flush.  Not suitable for data-integrity
458  * purposes - I/O may not be started against a    458  * purposes - I/O may not be started against all dirty pages.
459  *                                                459  *
460  * Return: %0 on success, negative error code     460  * Return: %0 on success, negative error code otherwise.
461  */                                               461  */
462 int filemap_flush(struct address_space *mappin    462 int filemap_flush(struct address_space *mapping)
463 {                                                 463 {
464         return __filemap_fdatawrite(mapping, W    464         return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
465 }                                                 465 }
466 EXPORT_SYMBOL(filemap_flush);                     466 EXPORT_SYMBOL(filemap_flush);
467                                                   467 
468 /**                                               468 /**
469  * filemap_range_has_page - check if a page ex    469  * filemap_range_has_page - check if a page exists in range.
470  * @mapping:           address space within wh    470  * @mapping:           address space within which to check
471  * @start_byte:        offset in bytes where t    471  * @start_byte:        offset in bytes where the range starts
472  * @end_byte:          offset in bytes where t    472  * @end_byte:          offset in bytes where the range ends (inclusive)
473  *                                                473  *
474  * Find at least one page in the range supplie    474  * Find at least one page in the range supplied, usually used to check if
475  * direct writing in this range will trigger a    475  * direct writing in this range will trigger a writeback.
476  *                                                476  *
477  * Return: %true if at least one page exists i    477  * Return: %true if at least one page exists in the specified range,
478  * %false otherwise.                              478  * %false otherwise.
479  */                                               479  */
480 bool filemap_range_has_page(struct address_spa    480 bool filemap_range_has_page(struct address_space *mapping,
481                            loff_t start_byte,     481                            loff_t start_byte, loff_t end_byte)
482 {                                                 482 {
483         struct folio *folio;                      483         struct folio *folio;
484         XA_STATE(xas, &mapping->i_pages, start    484         XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
485         pgoff_t max = end_byte >> PAGE_SHIFT;     485         pgoff_t max = end_byte >> PAGE_SHIFT;
486                                                   486 
487         if (end_byte < start_byte)                487         if (end_byte < start_byte)
488                 return false;                     488                 return false;
489                                                   489 
490         rcu_read_lock();                          490         rcu_read_lock();
491         for (;;) {                                491         for (;;) {
492                 folio = xas_find(&xas, max);      492                 folio = xas_find(&xas, max);
493                 if (xas_retry(&xas, folio))       493                 if (xas_retry(&xas, folio))
494                         continue;                 494                         continue;
495                 /* Shadow entries don't count     495                 /* Shadow entries don't count */
496                 if (xa_is_value(folio))           496                 if (xa_is_value(folio))
497                         continue;                 497                         continue;
498                 /*                                498                 /*
499                  * We don't need to try to pin    499                  * We don't need to try to pin this page; we're about to
500                  * release the RCU lock anyway    500                  * release the RCU lock anyway.  It is enough to know that
501                  * there was a page here recen    501                  * there was a page here recently.
502                  */                               502                  */
503                 break;                            503                 break;
504         }                                         504         }
505         rcu_read_unlock();                        505         rcu_read_unlock();
506                                                   506 
507         return folio != NULL;                     507         return folio != NULL;
508 }                                                 508 }
509 EXPORT_SYMBOL(filemap_range_has_page);            509 EXPORT_SYMBOL(filemap_range_has_page);
510                                                   510 
511 static void __filemap_fdatawait_range(struct a    511 static void __filemap_fdatawait_range(struct address_space *mapping,
512                                      loff_t st    512                                      loff_t start_byte, loff_t end_byte)
513 {                                                 513 {
514         pgoff_t index = start_byte >> PAGE_SHI    514         pgoff_t index = start_byte >> PAGE_SHIFT;
515         pgoff_t end = end_byte >> PAGE_SHIFT;     515         pgoff_t end = end_byte >> PAGE_SHIFT;
516         struct folio_batch fbatch;                516         struct folio_batch fbatch;
517         unsigned nr_folios;                       517         unsigned nr_folios;
518                                                   518 
519         folio_batch_init(&fbatch);                519         folio_batch_init(&fbatch);
520                                                   520 
521         while (index <= end) {                    521         while (index <= end) {
522                 unsigned i;                       522                 unsigned i;
523                                                   523 
524                 nr_folios = filemap_get_folios    524                 nr_folios = filemap_get_folios_tag(mapping, &index, end,
525                                 PAGECACHE_TAG_    525                                 PAGECACHE_TAG_WRITEBACK, &fbatch);
526                                                   526 
527                 if (!nr_folios)                   527                 if (!nr_folios)
528                         break;                    528                         break;
529                                                   529 
530                 for (i = 0; i < nr_folios; i++    530                 for (i = 0; i < nr_folios; i++) {
531                         struct folio *folio =     531                         struct folio *folio = fbatch.folios[i];
532                                                   532 
533                         folio_wait_writeback(f    533                         folio_wait_writeback(folio);
534                 }                                 534                 }
535                 folio_batch_release(&fbatch);     535                 folio_batch_release(&fbatch);
536                 cond_resched();                   536                 cond_resched();
537         }                                         537         }
538 }                                                 538 }
539                                                   539 
540 /**                                               540 /**
541  * filemap_fdatawait_range - wait for writebac    541  * filemap_fdatawait_range - wait for writeback to complete
542  * @mapping:            address space structur    542  * @mapping:            address space structure to wait for
543  * @start_byte:         offset in bytes where     543  * @start_byte:         offset in bytes where the range starts
544  * @end_byte:           offset in bytes where     544  * @end_byte:           offset in bytes where the range ends (inclusive)
545  *                                                545  *
546  * Walk the list of under-writeback pages of t    546  * Walk the list of under-writeback pages of the given address space
547  * in the given range and wait for all of them    547  * in the given range and wait for all of them.  Check error status of
548  * the address space and return it.               548  * the address space and return it.
549  *                                                549  *
550  * Since the error status of the address space    550  * Since the error status of the address space is cleared by this function,
551  * callers are responsible for checking the re    551  * callers are responsible for checking the return value and handling and/or
552  * reporting the error.                           552  * reporting the error.
553  *                                                553  *
554  * Return: error status of the address space.     554  * Return: error status of the address space.
555  */                                               555  */
556 int filemap_fdatawait_range(struct address_spa    556 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
557                             loff_t end_byte)      557                             loff_t end_byte)
558 {                                                 558 {
559         __filemap_fdatawait_range(mapping, sta    559         __filemap_fdatawait_range(mapping, start_byte, end_byte);
560         return filemap_check_errors(mapping);     560         return filemap_check_errors(mapping);
561 }                                                 561 }
562 EXPORT_SYMBOL(filemap_fdatawait_range);           562 EXPORT_SYMBOL(filemap_fdatawait_range);
563                                                   563 
564 /**                                               564 /**
565  * filemap_fdatawait_range_keep_errors - wait     565  * filemap_fdatawait_range_keep_errors - wait for writeback to complete
566  * @mapping:            address space structur    566  * @mapping:            address space structure to wait for
567  * @start_byte:         offset in bytes where     567  * @start_byte:         offset in bytes where the range starts
568  * @end_byte:           offset in bytes where     568  * @end_byte:           offset in bytes where the range ends (inclusive)
569  *                                                569  *
570  * Walk the list of under-writeback pages of t    570  * Walk the list of under-writeback pages of the given address space in the
571  * given range and wait for all of them.  Unli    571  * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
572  * this function does not clear error status o    572  * this function does not clear error status of the address space.
573  *                                                573  *
574  * Use this function if callers don't handle e    574  * Use this function if callers don't handle errors themselves.  Expected
575  * call sites are system-wide / filesystem-wid    575  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
576  * fsfreeze(8)                                    576  * fsfreeze(8)
577  */                                               577  */
578 int filemap_fdatawait_range_keep_errors(struct    578 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
579                 loff_t start_byte, loff_t end_    579                 loff_t start_byte, loff_t end_byte)
580 {                                                 580 {
581         __filemap_fdatawait_range(mapping, sta    581         __filemap_fdatawait_range(mapping, start_byte, end_byte);
582         return filemap_check_and_keep_errors(m    582         return filemap_check_and_keep_errors(mapping);
583 }                                                 583 }
584 EXPORT_SYMBOL(filemap_fdatawait_range_keep_err    584 EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
585                                                   585 
586 /**                                               586 /**
587  * file_fdatawait_range - wait for writeback t    587  * file_fdatawait_range - wait for writeback to complete
588  * @file:               file pointing to addre    588  * @file:               file pointing to address space structure to wait for
589  * @start_byte:         offset in bytes where     589  * @start_byte:         offset in bytes where the range starts
590  * @end_byte:           offset in bytes where     590  * @end_byte:           offset in bytes where the range ends (inclusive)
591  *                                                591  *
592  * Walk the list of under-writeback pages of t    592  * Walk the list of under-writeback pages of the address space that file
593  * refers to, in the given range and wait for     593  * refers to, in the given range and wait for all of them.  Check error
594  * status of the address space vs. the file->f    594  * status of the address space vs. the file->f_wb_err cursor and return it.
595  *                                                595  *
596  * Since the error status of the file is advan    596  * Since the error status of the file is advanced by this function,
597  * callers are responsible for checking the re    597  * callers are responsible for checking the return value and handling and/or
598  * reporting the error.                           598  * reporting the error.
599  *                                                599  *
600  * Return: error status of the address space v    600  * Return: error status of the address space vs. the file->f_wb_err cursor.
601  */                                               601  */
602 int file_fdatawait_range(struct file *file, lo    602 int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
603 {                                                 603 {
604         struct address_space *mapping = file->    604         struct address_space *mapping = file->f_mapping;
605                                                   605 
606         __filemap_fdatawait_range(mapping, sta    606         __filemap_fdatawait_range(mapping, start_byte, end_byte);
607         return file_check_and_advance_wb_err(f    607         return file_check_and_advance_wb_err(file);
608 }                                                 608 }
609 EXPORT_SYMBOL(file_fdatawait_range);              609 EXPORT_SYMBOL(file_fdatawait_range);
610                                                   610 
611 /**                                               611 /**
612  * filemap_fdatawait_keep_errors - wait for wr    612  * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
613  * @mapping: address space structure to wait f    613  * @mapping: address space structure to wait for
614  *                                                614  *
615  * Walk the list of under-writeback pages of t    615  * Walk the list of under-writeback pages of the given address space
616  * and wait for all of them.  Unlike filemap_f    616  * and wait for all of them.  Unlike filemap_fdatawait(), this function
617  * does not clear error status of the address     617  * does not clear error status of the address space.
618  *                                                618  *
619  * Use this function if callers don't handle e    619  * Use this function if callers don't handle errors themselves.  Expected
620  * call sites are system-wide / filesystem-wid    620  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
621  * fsfreeze(8)                                    621  * fsfreeze(8)
622  *                                                622  *
623  * Return: error status of the address space.     623  * Return: error status of the address space.
624  */                                               624  */
625 int filemap_fdatawait_keep_errors(struct addre    625 int filemap_fdatawait_keep_errors(struct address_space *mapping)
626 {                                                 626 {
627         __filemap_fdatawait_range(mapping, 0,     627         __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
628         return filemap_check_and_keep_errors(m    628         return filemap_check_and_keep_errors(mapping);
629 }                                                 629 }
630 EXPORT_SYMBOL(filemap_fdatawait_keep_errors);     630 EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
631                                                   631 
632 /* Returns true if writeback might be needed o    632 /* Returns true if writeback might be needed or already in progress. */
633 static bool mapping_needs_writeback(struct add    633 static bool mapping_needs_writeback(struct address_space *mapping)
634 {                                                 634 {
635         return mapping->nrpages;                  635         return mapping->nrpages;
636 }                                                 636 }
637                                                   637 
638 bool filemap_range_has_writeback(struct addres    638 bool filemap_range_has_writeback(struct address_space *mapping,
639                                  loff_t start_    639                                  loff_t start_byte, loff_t end_byte)
640 {                                                 640 {
641         XA_STATE(xas, &mapping->i_pages, start    641         XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
642         pgoff_t max = end_byte >> PAGE_SHIFT;     642         pgoff_t max = end_byte >> PAGE_SHIFT;
643         struct folio *folio;                      643         struct folio *folio;
644                                                   644 
645         if (end_byte < start_byte)                645         if (end_byte < start_byte)
646                 return false;                     646                 return false;
647                                                   647 
648         rcu_read_lock();                          648         rcu_read_lock();
649         xas_for_each(&xas, folio, max) {          649         xas_for_each(&xas, folio, max) {
650                 if (xas_retry(&xas, folio))       650                 if (xas_retry(&xas, folio))
651                         continue;                 651                         continue;
652                 if (xa_is_value(folio))           652                 if (xa_is_value(folio))
653                         continue;                 653                         continue;
654                 if (folio_test_dirty(folio) ||    654                 if (folio_test_dirty(folio) || folio_test_locked(folio) ||
655                                 folio_test_wri    655                                 folio_test_writeback(folio))
656                         break;                    656                         break;
657         }                                         657         }
658         rcu_read_unlock();                        658         rcu_read_unlock();
659         return folio != NULL;                     659         return folio != NULL;
660 }                                                 660 }
661 EXPORT_SYMBOL_GPL(filemap_range_has_writeback)    661 EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
662                                                   662 
663 /**                                               663 /**
664  * filemap_write_and_wait_range - write out &     664  * filemap_write_and_wait_range - write out & wait on a file range
665  * @mapping:    the address_space for the page    665  * @mapping:    the address_space for the pages
666  * @lstart:     offset in bytes where the rang    666  * @lstart:     offset in bytes where the range starts
667  * @lend:       offset in bytes where the rang    667  * @lend:       offset in bytes where the range ends (inclusive)
668  *                                                668  *
669  * Write out and wait upon file offsets lstart    669  * Write out and wait upon file offsets lstart->lend, inclusive.
670  *                                                670  *
671  * Note that @lend is inclusive (describes the    671  * Note that @lend is inclusive (describes the last byte to be written) so
672  * that this function can be used to write to     672  * that this function can be used to write to the very end-of-file (end = -1).
673  *                                                673  *
674  * Return: error status of the address space.     674  * Return: error status of the address space.
675  */                                               675  */
676 int filemap_write_and_wait_range(struct addres    676 int filemap_write_and_wait_range(struct address_space *mapping,
677                                  loff_t lstart    677                                  loff_t lstart, loff_t lend)
678 {                                                 678 {
679         int err = 0, err2;                        679         int err = 0, err2;
680                                                   680 
681         if (lend < lstart)                        681         if (lend < lstart)
682                 return 0;                         682                 return 0;
683                                                   683 
684         if (mapping_needs_writeback(mapping))     684         if (mapping_needs_writeback(mapping)) {
685                 err = __filemap_fdatawrite_ran    685                 err = __filemap_fdatawrite_range(mapping, lstart, lend,
686                                                   686                                                  WB_SYNC_ALL);
687                 /*                                687                 /*
688                  * Even if the above returned     688                  * Even if the above returned error, the pages may be
689                  * written partially (e.g. -EN    689                  * written partially (e.g. -ENOSPC), so we wait for it.
690                  * But the -EIO is special cas    690                  * But the -EIO is special case, it may indicate the worst
691                  * thing (e.g. bug) happened,     691                  * thing (e.g. bug) happened, so we avoid waiting for it.
692                  */                               692                  */
693                 if (err != -EIO)                  693                 if (err != -EIO)
694                         __filemap_fdatawait_ra    694                         __filemap_fdatawait_range(mapping, lstart, lend);
695         }                                         695         }
696         err2 = filemap_check_errors(mapping);     696         err2 = filemap_check_errors(mapping);
697         if (!err)                                 697         if (!err)
698                 err = err2;                       698                 err = err2;
699         return err;                               699         return err;
700 }                                                 700 }
701 EXPORT_SYMBOL(filemap_write_and_wait_range);      701 EXPORT_SYMBOL(filemap_write_and_wait_range);
702                                                   702 
703 void __filemap_set_wb_err(struct address_space    703 void __filemap_set_wb_err(struct address_space *mapping, int err)
704 {                                                 704 {
705         errseq_t eseq = errseq_set(&mapping->w    705         errseq_t eseq = errseq_set(&mapping->wb_err, err);
706                                                   706 
707         trace_filemap_set_wb_err(mapping, eseq    707         trace_filemap_set_wb_err(mapping, eseq);
708 }                                                 708 }
709 EXPORT_SYMBOL(__filemap_set_wb_err);              709 EXPORT_SYMBOL(__filemap_set_wb_err);
710                                                   710 
711 /**                                               711 /**
712  * file_check_and_advance_wb_err - report wb e    712  * file_check_and_advance_wb_err - report wb error (if any) that was previously
713  *                                 and advance    713  *                                 and advance wb_err to current one
714  * @file: struct file on which the error is be    714  * @file: struct file on which the error is being reported
715  *                                                715  *
716  * When userland calls fsync (or something lik    716  * When userland calls fsync (or something like nfsd does the equivalent), we
717  * want to report any writeback errors that oc    717  * want to report any writeback errors that occurred since the last fsync (or
718  * since the file was opened if there haven't     718  * since the file was opened if there haven't been any).
719  *                                                719  *
720  * Grab the wb_err from the mapping. If it mat    720  * Grab the wb_err from the mapping. If it matches what we have in the file,
721  * then just quickly return 0. The file is all    721  * then just quickly return 0. The file is all caught up.
722  *                                                722  *
723  * If it doesn't match, then take the mapping     723  * If it doesn't match, then take the mapping value, set the "seen" flag in
724  * it and try to swap it into place. If it wor    724  * it and try to swap it into place. If it works, or another task beat us
725  * to it with the new value, then update the f    725  * to it with the new value, then update the f_wb_err and return the error
726  * portion. The error at this point must be re    726  * portion. The error at this point must be reported via proper channels
727  * (a'la fsync, or NFS COMMIT operation, etc.)    727  * (a'la fsync, or NFS COMMIT operation, etc.).
728  *                                                728  *
729  * While we handle mapping->wb_err with atomic    729  * While we handle mapping->wb_err with atomic operations, the f_wb_err
730  * value is protected by the f_lock since we m    730  * value is protected by the f_lock since we must ensure that it reflects
731  * the latest value swapped in for this file d    731  * the latest value swapped in for this file descriptor.
732  *                                                732  *
733  * Return: %0 on success, negative error code     733  * Return: %0 on success, negative error code otherwise.
734  */                                               734  */
735 int file_check_and_advance_wb_err(struct file     735 int file_check_and_advance_wb_err(struct file *file)
736 {                                                 736 {
737         int err = 0;                              737         int err = 0;
738         errseq_t old = READ_ONCE(file->f_wb_er    738         errseq_t old = READ_ONCE(file->f_wb_err);
739         struct address_space *mapping = file->    739         struct address_space *mapping = file->f_mapping;
740                                                   740 
741         /* Locklessly handle the common case w    741         /* Locklessly handle the common case where nothing has changed */
742         if (errseq_check(&mapping->wb_err, old    742         if (errseq_check(&mapping->wb_err, old)) {
743                 /* Something changed, must use    743                 /* Something changed, must use slow path */
744                 spin_lock(&file->f_lock);         744                 spin_lock(&file->f_lock);
745                 old = file->f_wb_err;             745                 old = file->f_wb_err;
746                 err = errseq_check_and_advance    746                 err = errseq_check_and_advance(&mapping->wb_err,
747                                                   747                                                 &file->f_wb_err);
748                 trace_file_check_and_advance_w    748                 trace_file_check_and_advance_wb_err(file, old);
749                 spin_unlock(&file->f_lock);       749                 spin_unlock(&file->f_lock);
750         }                                         750         }
751                                                   751 
752         /*                                        752         /*
753          * We're mostly using this function as    753          * We're mostly using this function as a drop in replacement for
754          * filemap_check_errors. Clear AS_EIO/    754          * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
755          * that the legacy code would have had    755          * that the legacy code would have had on these flags.
756          */                                       756          */
757         clear_bit(AS_EIO, &mapping->flags);       757         clear_bit(AS_EIO, &mapping->flags);
758         clear_bit(AS_ENOSPC, &mapping->flags);    758         clear_bit(AS_ENOSPC, &mapping->flags);
759         return err;                               759         return err;
760 }                                                 760 }
761 EXPORT_SYMBOL(file_check_and_advance_wb_err);     761 EXPORT_SYMBOL(file_check_and_advance_wb_err);
762                                                   762 
763 /**                                               763 /**
764  * file_write_and_wait_range - write out & wai    764  * file_write_and_wait_range - write out & wait on a file range
765  * @file:       file pointing to address_space    765  * @file:       file pointing to address_space with pages
766  * @lstart:     offset in bytes where the rang    766  * @lstart:     offset in bytes where the range starts
767  * @lend:       offset in bytes where the rang    767  * @lend:       offset in bytes where the range ends (inclusive)
768  *                                                768  *
769  * Write out and wait upon file offsets lstart    769  * Write out and wait upon file offsets lstart->lend, inclusive.
770  *                                                770  *
771  * Note that @lend is inclusive (describes the    771  * Note that @lend is inclusive (describes the last byte to be written) so
772  * that this function can be used to write to     772  * that this function can be used to write to the very end-of-file (end = -1).
773  *                                                773  *
774  * After writing out and waiting on the data,     774  * After writing out and waiting on the data, we check and advance the
775  * f_wb_err cursor to the latest value, and re    775  * f_wb_err cursor to the latest value, and return any errors detected there.
776  *                                                776  *
777  * Return: %0 on success, negative error code     777  * Return: %0 on success, negative error code otherwise.
778  */                                               778  */
779 int file_write_and_wait_range(struct file *fil    779 int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
780 {                                                 780 {
781         int err = 0, err2;                        781         int err = 0, err2;
782         struct address_space *mapping = file->    782         struct address_space *mapping = file->f_mapping;
783                                                   783 
784         if (lend < lstart)                        784         if (lend < lstart)
785                 return 0;                         785                 return 0;
786                                                   786 
787         if (mapping_needs_writeback(mapping))     787         if (mapping_needs_writeback(mapping)) {
788                 err = __filemap_fdatawrite_ran    788                 err = __filemap_fdatawrite_range(mapping, lstart, lend,
789                                                   789                                                  WB_SYNC_ALL);
790                 /* See comment of filemap_writ    790                 /* See comment of filemap_write_and_wait() */
791                 if (err != -EIO)                  791                 if (err != -EIO)
792                         __filemap_fdatawait_ra    792                         __filemap_fdatawait_range(mapping, lstart, lend);
793         }                                         793         }
794         err2 = file_check_and_advance_wb_err(f    794         err2 = file_check_and_advance_wb_err(file);
795         if (!err)                                 795         if (!err)
796                 err = err2;                       796                 err = err2;
797         return err;                               797         return err;
798 }                                                 798 }
799 EXPORT_SYMBOL(file_write_and_wait_range);         799 EXPORT_SYMBOL(file_write_and_wait_range);
800                                                   800 
801 /**                                               801 /**
802  * replace_page_cache_folio - replace a pageca    802  * replace_page_cache_folio - replace a pagecache folio with a new one
803  * @old:        folio to be replaced              803  * @old:        folio to be replaced
804  * @new:        folio to replace with             804  * @new:        folio to replace with
805  *                                                805  *
806  * This function replaces a folio in the pagec    806  * This function replaces a folio in the pagecache with a new one.  On
807  * success it acquires the pagecache reference    807  * success it acquires the pagecache reference for the new folio and
808  * drops it for the old folio.  Both the old a    808  * drops it for the old folio.  Both the old and new folios must be
809  * locked.  This function does not add the new    809  * locked.  This function does not add the new folio to the LRU, the
810  * caller must do that.                           810  * caller must do that.
811  *                                                811  *
812  * The remove + add is atomic.  This function     812  * The remove + add is atomic.  This function cannot fail.
813  */                                               813  */
814 void replace_page_cache_folio(struct folio *ol    814 void replace_page_cache_folio(struct folio *old, struct folio *new)
815 {                                                 815 {
816         struct address_space *mapping = old->m    816         struct address_space *mapping = old->mapping;
817         void (*free_folio)(struct folio *) = m    817         void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
818         pgoff_t offset = old->index;              818         pgoff_t offset = old->index;
819         XA_STATE(xas, &mapping->i_pages, offse    819         XA_STATE(xas, &mapping->i_pages, offset);
820                                                   820 
821         VM_BUG_ON_FOLIO(!folio_test_locked(old    821         VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
822         VM_BUG_ON_FOLIO(!folio_test_locked(new    822         VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
823         VM_BUG_ON_FOLIO(new->mapping, new);       823         VM_BUG_ON_FOLIO(new->mapping, new);
824                                                   824 
825         folio_get(new);                           825         folio_get(new);
826         new->mapping = mapping;                   826         new->mapping = mapping;
827         new->index = offset;                      827         new->index = offset;
828                                                   828 
829         mem_cgroup_replace_folio(old, new);       829         mem_cgroup_replace_folio(old, new);
830                                                   830 
831         xas_lock_irq(&xas);                       831         xas_lock_irq(&xas);
832         xas_store(&xas, new);                     832         xas_store(&xas, new);
833                                                   833 
834         old->mapping = NULL;                      834         old->mapping = NULL;
835         /* hugetlb pages do not participate in    835         /* hugetlb pages do not participate in page cache accounting. */
836         if (!folio_test_hugetlb(old))             836         if (!folio_test_hugetlb(old))
837                 __lruvec_stat_sub_folio(old, N    837                 __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
838         if (!folio_test_hugetlb(new))             838         if (!folio_test_hugetlb(new))
839                 __lruvec_stat_add_folio(new, N    839                 __lruvec_stat_add_folio(new, NR_FILE_PAGES);
840         if (folio_test_swapbacked(old))           840         if (folio_test_swapbacked(old))
841                 __lruvec_stat_sub_folio(old, N    841                 __lruvec_stat_sub_folio(old, NR_SHMEM);
842         if (folio_test_swapbacked(new))           842         if (folio_test_swapbacked(new))
843                 __lruvec_stat_add_folio(new, N    843                 __lruvec_stat_add_folio(new, NR_SHMEM);
844         xas_unlock_irq(&xas);                     844         xas_unlock_irq(&xas);
845         if (free_folio)                           845         if (free_folio)
846                 free_folio(old);                  846                 free_folio(old);
847         folio_put(old);                           847         folio_put(old);
848 }                                                 848 }
849 EXPORT_SYMBOL_GPL(replace_page_cache_folio);      849 EXPORT_SYMBOL_GPL(replace_page_cache_folio);
850                                                   850 
851 noinline int __filemap_add_folio(struct addres    851 noinline int __filemap_add_folio(struct address_space *mapping,
852                 struct folio *folio, pgoff_t i    852                 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
853 {                                                 853 {
854         XA_STATE(xas, &mapping->i_pages, index    854         XA_STATE(xas, &mapping->i_pages, index);
855         void *alloced_shadow = NULL;              855         void *alloced_shadow = NULL;
856         int alloced_order = 0;                    856         int alloced_order = 0;
857         bool huge;                                857         bool huge;
858         long nr;                                  858         long nr;
859                                                   859 
860         VM_BUG_ON_FOLIO(!folio_test_locked(fol    860         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
861         VM_BUG_ON_FOLIO(folio_test_swapbacked(    861         VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
862         VM_BUG_ON_FOLIO(folio_order(folio) < m    862         VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),
863                         folio);                   863                         folio);
864         mapping_set_update(&xas, mapping);        864         mapping_set_update(&xas, mapping);
865                                                   865 
866         VM_BUG_ON_FOLIO(index & (folio_nr_page    866         VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
867         xas_set_order(&xas, index, folio_order    867         xas_set_order(&xas, index, folio_order(folio));
868         huge = folio_test_hugetlb(folio);         868         huge = folio_test_hugetlb(folio);
869         nr = folio_nr_pages(folio);               869         nr = folio_nr_pages(folio);
870                                                   870 
871         gfp &= GFP_RECLAIM_MASK;                  871         gfp &= GFP_RECLAIM_MASK;
872         folio_ref_add(folio, nr);                 872         folio_ref_add(folio, nr);
873         folio->mapping = mapping;                 873         folio->mapping = mapping;
874         folio->index = xas.xa_index;              874         folio->index = xas.xa_index;
875                                                   875 
876         for (;;) {                                876         for (;;) {
877                 int order = -1, split_order =     877                 int order = -1, split_order = 0;
878                 void *entry, *old = NULL;         878                 void *entry, *old = NULL;
879                                                   879 
880                 xas_lock_irq(&xas);               880                 xas_lock_irq(&xas);
881                 xas_for_each_conflict(&xas, en    881                 xas_for_each_conflict(&xas, entry) {
882                         old = entry;              882                         old = entry;
883                         if (!xa_is_value(entry    883                         if (!xa_is_value(entry)) {
884                                 xas_set_err(&x    884                                 xas_set_err(&xas, -EEXIST);
885                                 goto unlock;      885                                 goto unlock;
886                         }                         886                         }
887                         /*                        887                         /*
888                          * If a larger entry e    888                          * If a larger entry exists,
889                          * it will be the firs    889                          * it will be the first and only entry iterated.
890                          */                       890                          */
891                         if (order == -1)          891                         if (order == -1)
892                                 order = xas_ge    892                                 order = xas_get_order(&xas);
893                 }                                 893                 }
894                                                   894 
895                 /* entry may have changed befo    895                 /* entry may have changed before we re-acquire the lock */
896                 if (alloced_order && (old != a    896                 if (alloced_order && (old != alloced_shadow || order != alloced_order)) {
897                         xas_destroy(&xas);        897                         xas_destroy(&xas);
898                         alloced_order = 0;        898                         alloced_order = 0;
899                 }                                 899                 }
900                                                   900 
901                 if (old) {                        901                 if (old) {
902                         if (order > 0 && order    902                         if (order > 0 && order > folio_order(folio)) {
903                                 /* How to hand    903                                 /* How to handle large swap entries? */
904                                 BUG_ON(shmem_m    904                                 BUG_ON(shmem_mapping(mapping));
905                                 if (!alloced_o    905                                 if (!alloced_order) {
906                                         split_    906                                         split_order = order;
907                                         goto u    907                                         goto unlock;
908                                 }                 908                                 }
909                                 xas_split(&xas    909                                 xas_split(&xas, old, order);
910                                 xas_reset(&xas    910                                 xas_reset(&xas);
911                         }                         911                         }
912                         if (shadowp)              912                         if (shadowp)
913                                 *shadowp = old    913                                 *shadowp = old;
914                 }                                 914                 }
915                                                   915 
916                 xas_store(&xas, folio);           916                 xas_store(&xas, folio);
917                 if (xas_error(&xas))              917                 if (xas_error(&xas))
918                         goto unlock;              918                         goto unlock;
919                                                   919 
920                 mapping->nrpages += nr;           920                 mapping->nrpages += nr;
921                                                   921 
922                 /* hugetlb pages do not partic    922                 /* hugetlb pages do not participate in page cache accounting */
923                 if (!huge) {                      923                 if (!huge) {
924                         __lruvec_stat_mod_foli    924                         __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
925                         if (folio_test_pmd_map    925                         if (folio_test_pmd_mappable(folio))
926                                 __lruvec_stat_    926                                 __lruvec_stat_mod_folio(folio,
927                                                   927                                                 NR_FILE_THPS, nr);
928                 }                                 928                 }
929                                                   929 
930 unlock:                                           930 unlock:
931                 xas_unlock_irq(&xas);             931                 xas_unlock_irq(&xas);
932                                                   932 
933                 /* split needed, alloc here an    933                 /* split needed, alloc here and retry. */
934                 if (split_order) {                934                 if (split_order) {
935                         xas_split_alloc(&xas,     935                         xas_split_alloc(&xas, old, split_order, gfp);
936                         if (xas_error(&xas))      936                         if (xas_error(&xas))
937                                 goto error;       937                                 goto error;
938                         alloced_shadow = old;     938                         alloced_shadow = old;
939                         alloced_order = split_    939                         alloced_order = split_order;
940                         xas_reset(&xas);          940                         xas_reset(&xas);
941                         continue;                 941                         continue;
942                 }                                 942                 }
943                                                   943 
944                 if (!xas_nomem(&xas, gfp))        944                 if (!xas_nomem(&xas, gfp))
945                         break;                    945                         break;
946         }                                         946         }
947                                                   947 
948         if (xas_error(&xas))                      948         if (xas_error(&xas))
949                 goto error;                       949                 goto error;
950                                                   950 
951         trace_mm_filemap_add_to_page_cache(fol    951         trace_mm_filemap_add_to_page_cache(folio);
952         return 0;                                 952         return 0;
953 error:                                            953 error:
954         folio->mapping = NULL;                    954         folio->mapping = NULL;
955         /* Leave page->index set: truncation r    955         /* Leave page->index set: truncation relies upon it */
956         folio_put_refs(folio, nr);                956         folio_put_refs(folio, nr);
957         return xas_error(&xas);                   957         return xas_error(&xas);
958 }                                                 958 }
959 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERR    959 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
960                                                   960 
961 int filemap_add_folio(struct address_space *ma    961 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
962                                 pgoff_t index,    962                                 pgoff_t index, gfp_t gfp)
963 {                                                 963 {
964         void *shadow = NULL;                      964         void *shadow = NULL;
965         int ret;                                  965         int ret;
966                                                   966 
967         ret = mem_cgroup_charge(folio, NULL, g    967         ret = mem_cgroup_charge(folio, NULL, gfp);
968         if (ret)                                  968         if (ret)
969                 return ret;                       969                 return ret;
970                                                   970 
971         __folio_set_locked(folio);                971         __folio_set_locked(folio);
972         ret = __filemap_add_folio(mapping, fol    972         ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
973         if (unlikely(ret)) {                      973         if (unlikely(ret)) {
974                 mem_cgroup_uncharge(folio);       974                 mem_cgroup_uncharge(folio);
975                 __folio_clear_locked(folio);      975                 __folio_clear_locked(folio);
976         } else {                                  976         } else {
977                 /*                                977                 /*
978                  * The folio might have been e    978                  * The folio might have been evicted from cache only
979                  * recently, in which case it     979                  * recently, in which case it should be activated like
980                  * any other repeatedly access    980                  * any other repeatedly accessed folio.
981                  * The exception is folios get    981                  * The exception is folios getting rewritten; evicting other
982                  * data from the working set,     982                  * data from the working set, only to cache data that will
983                  * get overwritten with someth    983                  * get overwritten with something else, is a waste of memory.
984                  */                               984                  */
985                 WARN_ON_ONCE(folio_test_active    985                 WARN_ON_ONCE(folio_test_active(folio));
986                 if (!(gfp & __GFP_WRITE) && sh    986                 if (!(gfp & __GFP_WRITE) && shadow)
987                         workingset_refault(fol    987                         workingset_refault(folio, shadow);
988                 folio_add_lru(folio);             988                 folio_add_lru(folio);
989         }                                         989         }
990         return ret;                               990         return ret;
991 }                                                 991 }
992 EXPORT_SYMBOL_GPL(filemap_add_folio);             992 EXPORT_SYMBOL_GPL(filemap_add_folio);
993                                                   993 
994 #ifdef CONFIG_NUMA                                994 #ifdef CONFIG_NUMA
995 struct folio *filemap_alloc_folio_noprof(gfp_t    995 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)
996 {                                                 996 {
997         int n;                                    997         int n;
998         struct folio *folio;                      998         struct folio *folio;
999                                                   999 
1000         if (cpuset_do_page_mem_spread()) {       1000         if (cpuset_do_page_mem_spread()) {
1001                 unsigned int cpuset_mems_cook    1001                 unsigned int cpuset_mems_cookie;
1002                 do {                             1002                 do {
1003                         cpuset_mems_cookie =     1003                         cpuset_mems_cookie = read_mems_allowed_begin();
1004                         n = cpuset_mem_spread    1004                         n = cpuset_mem_spread_node();
1005                         folio = __folio_alloc    1005                         folio = __folio_alloc_node_noprof(gfp, order, n);
1006                 } while (!folio && read_mems_    1006                 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
1007                                                  1007 
1008                 return folio;                    1008                 return folio;
1009         }                                        1009         }
1010         return folio_alloc_noprof(gfp, order)    1010         return folio_alloc_noprof(gfp, order);
1011 }                                                1011 }
1012 EXPORT_SYMBOL(filemap_alloc_folio_noprof);       1012 EXPORT_SYMBOL(filemap_alloc_folio_noprof);
1013 #endif                                           1013 #endif
1014                                                  1014 
1015 /*                                               1015 /*
1016  * filemap_invalidate_lock_two - lock invalid    1016  * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
1017  *                                               1017  *
1018  * Lock exclusively invalidate_lock of any pa    1018  * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
1019  *                                               1019  *
1020  * @mapping1: the first mapping to lock          1020  * @mapping1: the first mapping to lock
1021  * @mapping2: the second mapping to lock         1021  * @mapping2: the second mapping to lock
1022  */                                              1022  */
1023 void filemap_invalidate_lock_two(struct addre    1023 void filemap_invalidate_lock_two(struct address_space *mapping1,
1024                                  struct addre    1024                                  struct address_space *mapping2)
1025 {                                                1025 {
1026         if (mapping1 > mapping2)                 1026         if (mapping1 > mapping2)
1027                 swap(mapping1, mapping2);        1027                 swap(mapping1, mapping2);
1028         if (mapping1)                            1028         if (mapping1)
1029                 down_write(&mapping1->invalid    1029                 down_write(&mapping1->invalidate_lock);
1030         if (mapping2 && mapping1 != mapping2)    1030         if (mapping2 && mapping1 != mapping2)
1031                 down_write_nested(&mapping2->    1031                 down_write_nested(&mapping2->invalidate_lock, 1);
1032 }                                                1032 }
1033 EXPORT_SYMBOL(filemap_invalidate_lock_two);      1033 EXPORT_SYMBOL(filemap_invalidate_lock_two);
1034                                                  1034 
1035 /*                                               1035 /*
1036  * filemap_invalidate_unlock_two - unlock inv    1036  * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1037  *                                               1037  *
1038  * Unlock exclusive invalidate_lock of any pa    1038  * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1039  *                                               1039  *
1040  * @mapping1: the first mapping to unlock        1040  * @mapping1: the first mapping to unlock
1041  * @mapping2: the second mapping to unlock       1041  * @mapping2: the second mapping to unlock
1042  */                                              1042  */
1043 void filemap_invalidate_unlock_two(struct add    1043 void filemap_invalidate_unlock_two(struct address_space *mapping1,
1044                                    struct add    1044                                    struct address_space *mapping2)
1045 {                                                1045 {
1046         if (mapping1)                            1046         if (mapping1)
1047                 up_write(&mapping1->invalidat    1047                 up_write(&mapping1->invalidate_lock);
1048         if (mapping2 && mapping1 != mapping2)    1048         if (mapping2 && mapping1 != mapping2)
1049                 up_write(&mapping2->invalidat    1049                 up_write(&mapping2->invalidate_lock);
1050 }                                                1050 }
1051 EXPORT_SYMBOL(filemap_invalidate_unlock_two);    1051 EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1052                                                  1052 
1053 /*                                               1053 /*
1054  * In order to wait for pages to become avail    1054  * In order to wait for pages to become available there must be
1055  * waitqueues associated with pages. By using    1055  * waitqueues associated with pages. By using a hash table of
1056  * waitqueues where the bucket discipline is     1056  * waitqueues where the bucket discipline is to maintain all
1057  * waiters on the same queue and wake all whe    1057  * waiters on the same queue and wake all when any of the pages
1058  * become available, and for the woken contex    1058  * become available, and for the woken contexts to check to be
1059  * sure the appropriate page became available    1059  * sure the appropriate page became available, this saves space
1060  * at a cost of "thundering herd" phenomena d    1060  * at a cost of "thundering herd" phenomena during rare hash
1061  * collisions.                                   1061  * collisions.
1062  */                                              1062  */
1063 #define PAGE_WAIT_TABLE_BITS 8                   1063 #define PAGE_WAIT_TABLE_BITS 8
1064 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_    1064 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1065 static wait_queue_head_t folio_wait_table[PAG    1065 static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1066                                                  1066 
1067 static wait_queue_head_t *folio_waitqueue(str    1067 static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1068 {                                                1068 {
1069         return &folio_wait_table[hash_ptr(fol    1069         return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1070 }                                                1070 }
1071                                                  1071 
1072 void __init pagecache_init(void)                 1072 void __init pagecache_init(void)
1073 {                                                1073 {
1074         int i;                                   1074         int i;
1075                                                  1075 
1076         for (i = 0; i < PAGE_WAIT_TABLE_SIZE;    1076         for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1077                 init_waitqueue_head(&folio_wa    1077                 init_waitqueue_head(&folio_wait_table[i]);
1078                                                  1078 
1079         page_writeback_init();                   1079         page_writeback_init();
1080 }                                                1080 }
1081                                                  1081 
1082 /*                                               1082 /*
1083  * The page wait code treats the "wait->flags    1083  * The page wait code treats the "wait->flags" somewhat unusually, because
1084  * we have multiple different kinds of waits,    1084  * we have multiple different kinds of waits, not just the usual "exclusive"
1085  * one.                                          1085  * one.
1086  *                                               1086  *
1087  * We have:                                      1087  * We have:
1088  *                                               1088  *
1089  *  (a) no special bits set:                     1089  *  (a) no special bits set:
1090  *                                               1090  *
1091  *      We're just waiting for the bit to be     1091  *      We're just waiting for the bit to be released, and when a waker
1092  *      calls the wakeup function, we set WQ_    1092  *      calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1093  *      and remove it from the wait queue.       1093  *      and remove it from the wait queue.
1094  *                                               1094  *
1095  *      Simple and straightforward.              1095  *      Simple and straightforward.
1096  *                                               1096  *
1097  *  (b) WQ_FLAG_EXCLUSIVE:                       1097  *  (b) WQ_FLAG_EXCLUSIVE:
1098  *                                               1098  *
1099  *      The waiter is waiting to get the lock    1099  *      The waiter is waiting to get the lock, and only one waiter should
1100  *      be woken up to avoid any thundering h    1100  *      be woken up to avoid any thundering herd behavior. We'll set the
1101  *      WQ_FLAG_WOKEN bit, wake it up, and re    1101  *      WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1102  *                                               1102  *
1103  *      This is the traditional exclusive wai    1103  *      This is the traditional exclusive wait.
1104  *                                               1104  *
1105  *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:      1105  *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1106  *                                               1106  *
1107  *      The waiter is waiting to get the bit,    1107  *      The waiter is waiting to get the bit, and additionally wants the
1108  *      lock to be transferred to it for fair    1108  *      lock to be transferred to it for fair lock behavior. If the lock
1109  *      cannot be taken, we stop walking the     1109  *      cannot be taken, we stop walking the wait queue without waking
1110  *      the waiter.                              1110  *      the waiter.
1111  *                                               1111  *
1112  *      This is the "fair lock handoff" case,    1112  *      This is the "fair lock handoff" case, and in addition to setting
1113  *      WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to    1113  *      WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1114  *      that it now has the lock.                1114  *      that it now has the lock.
1115  */                                              1115  */
1116 static int wake_page_function(wait_queue_entr    1116 static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1117 {                                                1117 {
1118         unsigned int flags;                      1118         unsigned int flags;
1119         struct wait_page_key *key = arg;         1119         struct wait_page_key *key = arg;
1120         struct wait_page_queue *wait_page        1120         struct wait_page_queue *wait_page
1121                 = container_of(wait, struct w    1121                 = container_of(wait, struct wait_page_queue, wait);
1122                                                  1122 
1123         if (!wake_page_match(wait_page, key))    1123         if (!wake_page_match(wait_page, key))
1124                 return 0;                        1124                 return 0;
1125                                                  1125 
1126         /*                                       1126         /*
1127          * If it's a lock handoff wait, we ge    1127          * If it's a lock handoff wait, we get the bit for it, and
1128          * stop walking (and do not wake it u    1128          * stop walking (and do not wake it up) if we can't.
1129          */                                      1129          */
1130         flags = wait->flags;                     1130         flags = wait->flags;
1131         if (flags & WQ_FLAG_EXCLUSIVE) {         1131         if (flags & WQ_FLAG_EXCLUSIVE) {
1132                 if (test_bit(key->bit_nr, &ke    1132                 if (test_bit(key->bit_nr, &key->folio->flags))
1133                         return -1;               1133                         return -1;
1134                 if (flags & WQ_FLAG_CUSTOM) {    1134                 if (flags & WQ_FLAG_CUSTOM) {
1135                         if (test_and_set_bit(    1135                         if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1136                                 return -1;       1136                                 return -1;
1137                         flags |= WQ_FLAG_DONE    1137                         flags |= WQ_FLAG_DONE;
1138                 }                                1138                 }
1139         }                                        1139         }
1140                                                  1140 
1141         /*                                       1141         /*
1142          * We are holding the wait-queue lock    1142          * We are holding the wait-queue lock, but the waiter that
1143          * is waiting for this will be checki    1143          * is waiting for this will be checking the flags without
1144          * any locking.                          1144          * any locking.
1145          *                                       1145          *
1146          * So update the flags atomically, an    1146          * So update the flags atomically, and wake up the waiter
1147          * afterwards to avoid any races. Thi    1147          * afterwards to avoid any races. This store-release pairs
1148          * with the load-acquire in folio_wai    1148          * with the load-acquire in folio_wait_bit_common().
1149          */                                      1149          */
1150         smp_store_release(&wait->flags, flags    1150         smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1151         wake_up_state(wait->private, mode);      1151         wake_up_state(wait->private, mode);
1152                                                  1152 
1153         /*                                       1153         /*
1154          * Ok, we have successfully done what    1154          * Ok, we have successfully done what we're waiting for,
1155          * and we can unconditionally remove     1155          * and we can unconditionally remove the wait entry.
1156          *                                       1156          *
1157          * Note that this pairs with the "fin    1157          * Note that this pairs with the "finish_wait()" in the
1158          * waiter, and has to be the absolute    1158          * waiter, and has to be the absolute last thing we do.
1159          * After this list_del_init(&wait->en    1159          * After this list_del_init(&wait->entry) the wait entry
1160          * might be de-allocated and the proc    1160          * might be de-allocated and the process might even have
1161          * exited.                               1161          * exited.
1162          */                                      1162          */
1163         list_del_init_careful(&wait->entry);     1163         list_del_init_careful(&wait->entry);
1164         return (flags & WQ_FLAG_EXCLUSIVE) !=    1164         return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1165 }                                                1165 }
1166                                                  1166 
1167 static void folio_wake_bit(struct folio *foli    1167 static void folio_wake_bit(struct folio *folio, int bit_nr)
1168 {                                                1168 {
1169         wait_queue_head_t *q = folio_waitqueu    1169         wait_queue_head_t *q = folio_waitqueue(folio);
1170         struct wait_page_key key;                1170         struct wait_page_key key;
1171         unsigned long flags;                     1171         unsigned long flags;
1172                                                  1172 
1173         key.folio = folio;                       1173         key.folio = folio;
1174         key.bit_nr = bit_nr;                     1174         key.bit_nr = bit_nr;
1175         key.page_match = 0;                      1175         key.page_match = 0;
1176                                                  1176 
1177         spin_lock_irqsave(&q->lock, flags);      1177         spin_lock_irqsave(&q->lock, flags);
1178         __wake_up_locked_key(q, TASK_NORMAL,     1178         __wake_up_locked_key(q, TASK_NORMAL, &key);
1179                                                  1179 
1180         /*                                       1180         /*
1181          * It's possible to miss clearing wai    1181          * It's possible to miss clearing waiters here, when we woke our page
1182          * waiters, but the hashed waitqueue     1182          * waiters, but the hashed waitqueue has waiters for other pages on it.
1183          * That's okay, it's a rare case. The    1183          * That's okay, it's a rare case. The next waker will clear it.
1184          *                                       1184          *
1185          * Note that, depending on the page p    1185          * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1186          * other), the flag may be cleared in    1186          * other), the flag may be cleared in the course of freeing the page;
1187          * but that is not required for corre    1187          * but that is not required for correctness.
1188          */                                      1188          */
1189         if (!waitqueue_active(q) || !key.page    1189         if (!waitqueue_active(q) || !key.page_match)
1190                 folio_clear_waiters(folio);      1190                 folio_clear_waiters(folio);
1191                                                  1191 
1192         spin_unlock_irqrestore(&q->lock, flag    1192         spin_unlock_irqrestore(&q->lock, flags);
1193 }                                                1193 }
1194                                                  1194 
1195 /*                                               1195 /*
1196  * A choice of three behaviors for folio_wait    1196  * A choice of three behaviors for folio_wait_bit_common():
1197  */                                              1197  */
1198 enum behavior {                                  1198 enum behavior {
1199         EXCLUSIVE,      /* Hold ref to page a    1199         EXCLUSIVE,      /* Hold ref to page and take the bit when woken, like
1200                          * __folio_lock() wai    1200                          * __folio_lock() waiting on then setting PG_locked.
1201                          */                      1201                          */
1202         SHARED,         /* Hold ref to page a    1202         SHARED,         /* Hold ref to page and check the bit when woken, like
1203                          * folio_wait_writeba    1203                          * folio_wait_writeback() waiting on PG_writeback.
1204                          */                      1204                          */
1205         DROP,           /* Drop ref to page b    1205         DROP,           /* Drop ref to page before wait, no check when woken,
1206                          * like folio_put_wai    1206                          * like folio_put_wait_locked() on PG_locked.
1207                          */                      1207                          */
1208 };                                               1208 };
1209                                                  1209 
1210 /*                                               1210 /*
1211  * Attempt to check (or get) the folio flag,     1211  * Attempt to check (or get) the folio flag, and mark us done
1212  * if successful.                                1212  * if successful.
1213  */                                              1213  */
1214 static inline bool folio_trylock_flag(struct     1214 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1215                                         struc    1215                                         struct wait_queue_entry *wait)
1216 {                                                1216 {
1217         if (wait->flags & WQ_FLAG_EXCLUSIVE)     1217         if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1218                 if (test_and_set_bit(bit_nr,     1218                 if (test_and_set_bit(bit_nr, &folio->flags))
1219                         return false;            1219                         return false;
1220         } else if (test_bit(bit_nr, &folio->f    1220         } else if (test_bit(bit_nr, &folio->flags))
1221                 return false;                    1221                 return false;
1222                                                  1222 
1223         wait->flags |= WQ_FLAG_WOKEN | WQ_FLA    1223         wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1224         return true;                             1224         return true;
1225 }                                                1225 }
1226                                                  1226 
1227 /* How many times do we accept lock stealing     1227 /* How many times do we accept lock stealing from under a waiter? */
1228 int sysctl_page_lock_unfairness = 5;             1228 int sysctl_page_lock_unfairness = 5;
1229                                                  1229 
1230 static inline int folio_wait_bit_common(struc    1230 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1231                 int state, enum behavior beha    1231                 int state, enum behavior behavior)
1232 {                                                1232 {
1233         wait_queue_head_t *q = folio_waitqueu    1233         wait_queue_head_t *q = folio_waitqueue(folio);
1234         int unfairness = sysctl_page_lock_unf    1234         int unfairness = sysctl_page_lock_unfairness;
1235         struct wait_page_queue wait_page;        1235         struct wait_page_queue wait_page;
1236         wait_queue_entry_t *wait = &wait_page    1236         wait_queue_entry_t *wait = &wait_page.wait;
1237         bool thrashing = false;                  1237         bool thrashing = false;
1238         unsigned long pflags;                    1238         unsigned long pflags;
1239         bool in_thrashing;                       1239         bool in_thrashing;
1240                                                  1240 
1241         if (bit_nr == PG_locked &&               1241         if (bit_nr == PG_locked &&
1242             !folio_test_uptodate(folio) && fo    1242             !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1243                 delayacct_thrashing_start(&in    1243                 delayacct_thrashing_start(&in_thrashing);
1244                 psi_memstall_enter(&pflags);     1244                 psi_memstall_enter(&pflags);
1245                 thrashing = true;                1245                 thrashing = true;
1246         }                                        1246         }
1247                                                  1247 
1248         init_wait(wait);                         1248         init_wait(wait);
1249         wait->func = wake_page_function;         1249         wait->func = wake_page_function;
1250         wait_page.folio = folio;                 1250         wait_page.folio = folio;
1251         wait_page.bit_nr = bit_nr;               1251         wait_page.bit_nr = bit_nr;
1252                                                  1252 
1253 repeat:                                          1253 repeat:
1254         wait->flags = 0;                         1254         wait->flags = 0;
1255         if (behavior == EXCLUSIVE) {             1255         if (behavior == EXCLUSIVE) {
1256                 wait->flags = WQ_FLAG_EXCLUSI    1256                 wait->flags = WQ_FLAG_EXCLUSIVE;
1257                 if (--unfairness < 0)            1257                 if (--unfairness < 0)
1258                         wait->flags |= WQ_FLA    1258                         wait->flags |= WQ_FLAG_CUSTOM;
1259         }                                        1259         }
1260                                                  1260 
1261         /*                                       1261         /*
1262          * Do one last check whether we can g    1262          * Do one last check whether we can get the
1263          * page bit synchronously.               1263          * page bit synchronously.
1264          *                                       1264          *
1265          * Do the folio_set_waiters() marking    1265          * Do the folio_set_waiters() marking before that
1266          * to let any waker we _just_ missed     1266          * to let any waker we _just_ missed know they
1267          * need to wake us up (otherwise they    1267          * need to wake us up (otherwise they'll never
1268          * even go to the slow case that look    1268          * even go to the slow case that looks at the
1269          * page queue), and add ourselves to     1269          * page queue), and add ourselves to the wait
1270          * queue if we need to sleep.            1270          * queue if we need to sleep.
1271          *                                       1271          *
1272          * This part needs to be done under t    1272          * This part needs to be done under the queue
1273          * lock to avoid races.                  1273          * lock to avoid races.
1274          */                                      1274          */
1275         spin_lock_irq(&q->lock);                 1275         spin_lock_irq(&q->lock);
1276         folio_set_waiters(folio);                1276         folio_set_waiters(folio);
1277         if (!folio_trylock_flag(folio, bit_nr    1277         if (!folio_trylock_flag(folio, bit_nr, wait))
1278                 __add_wait_queue_entry_tail(q    1278                 __add_wait_queue_entry_tail(q, wait);
1279         spin_unlock_irq(&q->lock);               1279         spin_unlock_irq(&q->lock);
1280                                                  1280 
1281         /*                                       1281         /*
1282          * From now on, all the logic will be    1282          * From now on, all the logic will be based on
1283          * the WQ_FLAG_WOKEN and WQ_FLAG_DONE    1283          * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1284          * see whether the page bit testing h    1284          * see whether the page bit testing has already
1285          * been done by the wake function.       1285          * been done by the wake function.
1286          *                                       1286          *
1287          * We can drop our reference to the f    1287          * We can drop our reference to the folio.
1288          */                                      1288          */
1289         if (behavior == DROP)                    1289         if (behavior == DROP)
1290                 folio_put(folio);                1290                 folio_put(folio);
1291                                                  1291 
1292         /*                                       1292         /*
1293          * Note that until the "finish_wait()    1293          * Note that until the "finish_wait()", or until
1294          * we see the WQ_FLAG_WOKEN flag, we     1294          * we see the WQ_FLAG_WOKEN flag, we need to
1295          * be very careful with the 'wait->fl    1295          * be very careful with the 'wait->flags', because
1296          * we may race with a waker that sets    1296          * we may race with a waker that sets them.
1297          */                                      1297          */
1298         for (;;) {                               1298         for (;;) {
1299                 unsigned int flags;              1299                 unsigned int flags;
1300                                                  1300 
1301                 set_current_state(state);        1301                 set_current_state(state);
1302                                                  1302 
1303                 /* Loop until we've been woke    1303                 /* Loop until we've been woken or interrupted */
1304                 flags = smp_load_acquire(&wai    1304                 flags = smp_load_acquire(&wait->flags);
1305                 if (!(flags & WQ_FLAG_WOKEN))    1305                 if (!(flags & WQ_FLAG_WOKEN)) {
1306                         if (signal_pending_st    1306                         if (signal_pending_state(state, current))
1307                                 break;           1307                                 break;
1308                                                  1308 
1309                         io_schedule();           1309                         io_schedule();
1310                         continue;                1310                         continue;
1311                 }                                1311                 }
1312                                                  1312 
1313                 /* If we were non-exclusive,     1313                 /* If we were non-exclusive, we're done */
1314                 if (behavior != EXCLUSIVE)       1314                 if (behavior != EXCLUSIVE)
1315                         break;                   1315                         break;
1316                                                  1316 
1317                 /* If the waker got the lock     1317                 /* If the waker got the lock for us, we're done */
1318                 if (flags & WQ_FLAG_DONE)        1318                 if (flags & WQ_FLAG_DONE)
1319                         break;                   1319                         break;
1320                                                  1320 
1321                 /*                               1321                 /*
1322                  * Otherwise, if we're gettin    1322                  * Otherwise, if we're getting the lock, we need to
1323                  * try to get it ourselves.      1323                  * try to get it ourselves.
1324                  *                               1324                  *
1325                  * And if that fails, we'll h    1325                  * And if that fails, we'll have to retry this all.
1326                  */                              1326                  */
1327                 if (unlikely(test_and_set_bit    1327                 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1328                         goto repeat;             1328                         goto repeat;
1329                                                  1329 
1330                 wait->flags |= WQ_FLAG_DONE;     1330                 wait->flags |= WQ_FLAG_DONE;
1331                 break;                           1331                 break;
1332         }                                        1332         }
1333                                                  1333 
1334         /*                                       1334         /*
1335          * If a signal happened, this 'finish    1335          * If a signal happened, this 'finish_wait()' may remove the last
1336          * waiter from the wait-queues, but t    1336          * waiter from the wait-queues, but the folio waiters bit will remain
1337          * set. That's ok. The next wakeup wi    1337          * set. That's ok. The next wakeup will take care of it, and trying
1338          * to do it here would be difficult a    1338          * to do it here would be difficult and prone to races.
1339          */                                      1339          */
1340         finish_wait(q, wait);                    1340         finish_wait(q, wait);
1341                                                  1341 
1342         if (thrashing) {                         1342         if (thrashing) {
1343                 delayacct_thrashing_end(&in_t    1343                 delayacct_thrashing_end(&in_thrashing);
1344                 psi_memstall_leave(&pflags);     1344                 psi_memstall_leave(&pflags);
1345         }                                        1345         }
1346                                                  1346 
1347         /*                                       1347         /*
1348          * NOTE! The wait->flags weren't stab    1348          * NOTE! The wait->flags weren't stable until we've done the
1349          * 'finish_wait()', and we could have    1349          * 'finish_wait()', and we could have exited the loop above due
1350          * to a signal, and had a wakeup even    1350          * to a signal, and had a wakeup event happen after the signal
1351          * test but before the 'finish_wait()    1351          * test but before the 'finish_wait()'.
1352          *                                       1352          *
1353          * So only after the finish_wait() ca    1353          * So only after the finish_wait() can we reliably determine
1354          * if we got woken up or not, so we c    1354          * if we got woken up or not, so we can now figure out the final
1355          * return value based on that state w    1355          * return value based on that state without races.
1356          *                                       1356          *
1357          * Also note that WQ_FLAG_WOKEN is su    1357          * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1358          * waiter, but an exclusive one requi    1358          * waiter, but an exclusive one requires WQ_FLAG_DONE.
1359          */                                      1359          */
1360         if (behavior == EXCLUSIVE)               1360         if (behavior == EXCLUSIVE)
1361                 return wait->flags & WQ_FLAG_    1361                 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1362                                                  1362 
1363         return wait->flags & WQ_FLAG_WOKEN ?     1363         return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1364 }                                                1364 }
1365                                                  1365 
1366 #ifdef CONFIG_MIGRATION                          1366 #ifdef CONFIG_MIGRATION
1367 /**                                              1367 /**
1368  * migration_entry_wait_on_locked - Wait for     1368  * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1369  * @entry: migration swap entry.                 1369  * @entry: migration swap entry.
1370  * @ptl: already locked ptl. This function wi    1370  * @ptl: already locked ptl. This function will drop the lock.
1371  *                                               1371  *
1372  * Wait for a migration entry referencing the    1372  * Wait for a migration entry referencing the given page to be removed. This is
1373  * equivalent to put_and_wait_on_page_locked(    1373  * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1374  * this can be called without taking a refere    1374  * this can be called without taking a reference on the page. Instead this
1375  * should be called while holding the ptl for    1375  * should be called while holding the ptl for the migration entry referencing
1376  * the page.                                     1376  * the page.
1377  *                                               1377  *
1378  * Returns after unlocking the ptl.              1378  * Returns after unlocking the ptl.
1379  *                                               1379  *
1380  * This follows the same logic as folio_wait_    1380  * This follows the same logic as folio_wait_bit_common() so see the comments
1381  * there.                                        1381  * there.
1382  */                                              1382  */
1383 void migration_entry_wait_on_locked(swp_entry    1383 void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
1384         __releases(ptl)                          1384         __releases(ptl)
1385 {                                                1385 {
1386         struct wait_page_queue wait_page;        1386         struct wait_page_queue wait_page;
1387         wait_queue_entry_t *wait = &wait_page    1387         wait_queue_entry_t *wait = &wait_page.wait;
1388         bool thrashing = false;                  1388         bool thrashing = false;
1389         unsigned long pflags;                    1389         unsigned long pflags;
1390         bool in_thrashing;                       1390         bool in_thrashing;
1391         wait_queue_head_t *q;                    1391         wait_queue_head_t *q;
1392         struct folio *folio = pfn_swap_entry_    1392         struct folio *folio = pfn_swap_entry_folio(entry);
1393                                                  1393 
1394         q = folio_waitqueue(folio);              1394         q = folio_waitqueue(folio);
1395         if (!folio_test_uptodate(folio) && fo    1395         if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1396                 delayacct_thrashing_start(&in    1396                 delayacct_thrashing_start(&in_thrashing);
1397                 psi_memstall_enter(&pflags);     1397                 psi_memstall_enter(&pflags);
1398                 thrashing = true;                1398                 thrashing = true;
1399         }                                        1399         }
1400                                                  1400 
1401         init_wait(wait);                         1401         init_wait(wait);
1402         wait->func = wake_page_function;         1402         wait->func = wake_page_function;
1403         wait_page.folio = folio;                 1403         wait_page.folio = folio;
1404         wait_page.bit_nr = PG_locked;            1404         wait_page.bit_nr = PG_locked;
1405         wait->flags = 0;                         1405         wait->flags = 0;
1406                                                  1406 
1407         spin_lock_irq(&q->lock);                 1407         spin_lock_irq(&q->lock);
1408         folio_set_waiters(folio);                1408         folio_set_waiters(folio);
1409         if (!folio_trylock_flag(folio, PG_loc    1409         if (!folio_trylock_flag(folio, PG_locked, wait))
1410                 __add_wait_queue_entry_tail(q    1410                 __add_wait_queue_entry_tail(q, wait);
1411         spin_unlock_irq(&q->lock);               1411         spin_unlock_irq(&q->lock);
1412                                                  1412 
1413         /*                                       1413         /*
1414          * If a migration entry exists for th    1414          * If a migration entry exists for the page the migration path must hold
1415          * a valid reference to the page, and    1415          * a valid reference to the page, and it must take the ptl to remove the
1416          * migration entry. So the page is va    1416          * migration entry. So the page is valid until the ptl is dropped.
1417          */                                      1417          */
1418         spin_unlock(ptl);                        1418         spin_unlock(ptl);
1419                                                  1419 
1420         for (;;) {                               1420         for (;;) {
1421                 unsigned int flags;              1421                 unsigned int flags;
1422                                                  1422 
1423                 set_current_state(TASK_UNINTE    1423                 set_current_state(TASK_UNINTERRUPTIBLE);
1424                                                  1424 
1425                 /* Loop until we've been woke    1425                 /* Loop until we've been woken or interrupted */
1426                 flags = smp_load_acquire(&wai    1426                 flags = smp_load_acquire(&wait->flags);
1427                 if (!(flags & WQ_FLAG_WOKEN))    1427                 if (!(flags & WQ_FLAG_WOKEN)) {
1428                         if (signal_pending_st    1428                         if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1429                                 break;           1429                                 break;
1430                                                  1430 
1431                         io_schedule();           1431                         io_schedule();
1432                         continue;                1432                         continue;
1433                 }                                1433                 }
1434                 break;                           1434                 break;
1435         }                                        1435         }
1436                                                  1436 
1437         finish_wait(q, wait);                    1437         finish_wait(q, wait);
1438                                                  1438 
1439         if (thrashing) {                         1439         if (thrashing) {
1440                 delayacct_thrashing_end(&in_t    1440                 delayacct_thrashing_end(&in_thrashing);
1441                 psi_memstall_leave(&pflags);     1441                 psi_memstall_leave(&pflags);
1442         }                                        1442         }
1443 }                                                1443 }
1444 #endif                                           1444 #endif
1445                                                  1445 
1446 void folio_wait_bit(struct folio *folio, int     1446 void folio_wait_bit(struct folio *folio, int bit_nr)
1447 {                                                1447 {
1448         folio_wait_bit_common(folio, bit_nr,     1448         folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1449 }                                                1449 }
1450 EXPORT_SYMBOL(folio_wait_bit);                   1450 EXPORT_SYMBOL(folio_wait_bit);
1451                                                  1451 
1452 int folio_wait_bit_killable(struct folio *fol    1452 int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1453 {                                                1453 {
1454         return folio_wait_bit_common(folio, b    1454         return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1455 }                                                1455 }
1456 EXPORT_SYMBOL(folio_wait_bit_killable);          1456 EXPORT_SYMBOL(folio_wait_bit_killable);
1457                                                  1457 
1458 /**                                              1458 /**
1459  * folio_put_wait_locked - Drop a reference a    1459  * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1460  * @folio: The folio to wait for.                1460  * @folio: The folio to wait for.
1461  * @state: The sleep state (TASK_KILLABLE, TA    1461  * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1462  *                                               1462  *
1463  * The caller should hold a reference on @fol    1463  * The caller should hold a reference on @folio.  They expect the page to
1464  * become unlocked relatively soon, but do no    1464  * become unlocked relatively soon, but do not wish to hold up migration
1465  * (for example) by holding the reference whi    1465  * (for example) by holding the reference while waiting for the folio to
1466  * come unlocked.  After this function return    1466  * come unlocked.  After this function returns, the caller should not
1467  * dereference @folio.                           1467  * dereference @folio.
1468  *                                               1468  *
1469  * Return: 0 if the folio was unlocked or -EI    1469  * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1470  */                                              1470  */
1471 static int folio_put_wait_locked(struct folio    1471 static int folio_put_wait_locked(struct folio *folio, int state)
1472 {                                                1472 {
1473         return folio_wait_bit_common(folio, P    1473         return folio_wait_bit_common(folio, PG_locked, state, DROP);
1474 }                                                1474 }
1475                                                  1475 
1476 /**                                              1476 /**
1477  * folio_add_wait_queue - Add an arbitrary wa    1477  * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1478  * @folio: Folio defining the wait queue of i    1478  * @folio: Folio defining the wait queue of interest
1479  * @waiter: Waiter to add to the queue           1479  * @waiter: Waiter to add to the queue
1480  *                                               1480  *
1481  * Add an arbitrary @waiter to the wait queue    1481  * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1482  */                                              1482  */
1483 void folio_add_wait_queue(struct folio *folio    1483 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1484 {                                                1484 {
1485         wait_queue_head_t *q = folio_waitqueu    1485         wait_queue_head_t *q = folio_waitqueue(folio);
1486         unsigned long flags;                     1486         unsigned long flags;
1487                                                  1487 
1488         spin_lock_irqsave(&q->lock, flags);      1488         spin_lock_irqsave(&q->lock, flags);
1489         __add_wait_queue_entry_tail(q, waiter    1489         __add_wait_queue_entry_tail(q, waiter);
1490         folio_set_waiters(folio);                1490         folio_set_waiters(folio);
1491         spin_unlock_irqrestore(&q->lock, flag    1491         spin_unlock_irqrestore(&q->lock, flags);
1492 }                                                1492 }
1493 EXPORT_SYMBOL_GPL(folio_add_wait_queue);         1493 EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1494                                                  1494 
1495 /**                                              1495 /**
1496  * folio_unlock - Unlock a locked folio.         1496  * folio_unlock - Unlock a locked folio.
1497  * @folio: The folio.                            1497  * @folio: The folio.
1498  *                                               1498  *
1499  * Unlocks the folio and wakes up any thread     1499  * Unlocks the folio and wakes up any thread sleeping on the page lock.
1500  *                                               1500  *
1501  * Context: May be called from interrupt or p    1501  * Context: May be called from interrupt or process context.  May not be
1502  * called from NMI context.                      1502  * called from NMI context.
1503  */                                              1503  */
1504 void folio_unlock(struct folio *folio)           1504 void folio_unlock(struct folio *folio)
1505 {                                                1505 {
1506         /* Bit 7 allows x86 to check the byte    1506         /* Bit 7 allows x86 to check the byte's sign bit */
1507         BUILD_BUG_ON(PG_waiters != 7);           1507         BUILD_BUG_ON(PG_waiters != 7);
1508         BUILD_BUG_ON(PG_locked > 7);             1508         BUILD_BUG_ON(PG_locked > 7);
1509         VM_BUG_ON_FOLIO(!folio_test_locked(fo    1509         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1510         if (folio_xor_flags_has_waiters(folio    1510         if (folio_xor_flags_has_waiters(folio, 1 << PG_locked))
1511                 folio_wake_bit(folio, PG_lock    1511                 folio_wake_bit(folio, PG_locked);
1512 }                                                1512 }
1513 EXPORT_SYMBOL(folio_unlock);                     1513 EXPORT_SYMBOL(folio_unlock);
1514                                                  1514 
1515 /**                                              1515 /**
1516  * folio_end_read - End read on a folio.         1516  * folio_end_read - End read on a folio.
1517  * @folio: The folio.                            1517  * @folio: The folio.
1518  * @success: True if all reads completed succ    1518  * @success: True if all reads completed successfully.
1519  *                                               1519  *
1520  * When all reads against a folio have comple    1520  * When all reads against a folio have completed, filesystems should
1521  * call this function to let the pagecache kn    1521  * call this function to let the pagecache know that no more reads
1522  * are outstanding.  This will unlock the fol    1522  * are outstanding.  This will unlock the folio and wake up any thread
1523  * sleeping on the lock.  The folio will also    1523  * sleeping on the lock.  The folio will also be marked uptodate if all
1524  * reads succeeded.                              1524  * reads succeeded.
1525  *                                               1525  *
1526  * Context: May be called from interrupt or p    1526  * Context: May be called from interrupt or process context.  May not be
1527  * called from NMI context.                      1527  * called from NMI context.
1528  */                                              1528  */
1529 void folio_end_read(struct folio *folio, bool    1529 void folio_end_read(struct folio *folio, bool success)
1530 {                                                1530 {
1531         unsigned long mask = 1 << PG_locked;     1531         unsigned long mask = 1 << PG_locked;
1532                                                  1532 
1533         /* Must be in bottom byte for x86 to     1533         /* Must be in bottom byte for x86 to work */
1534         BUILD_BUG_ON(PG_uptodate > 7);           1534         BUILD_BUG_ON(PG_uptodate > 7);
1535         VM_BUG_ON_FOLIO(!folio_test_locked(fo    1535         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1536         VM_BUG_ON_FOLIO(folio_test_uptodate(f    1536         VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
1537                                                  1537 
1538         if (likely(success))                     1538         if (likely(success))
1539                 mask |= 1 << PG_uptodate;        1539                 mask |= 1 << PG_uptodate;
1540         if (folio_xor_flags_has_waiters(folio    1540         if (folio_xor_flags_has_waiters(folio, mask))
1541                 folio_wake_bit(folio, PG_lock    1541                 folio_wake_bit(folio, PG_locked);
1542 }                                                1542 }
1543 EXPORT_SYMBOL(folio_end_read);                   1543 EXPORT_SYMBOL(folio_end_read);
1544                                                  1544 
1545 /**                                              1545 /**
1546  * folio_end_private_2 - Clear PG_private_2 a    1546  * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1547  * @folio: The folio.                            1547  * @folio: The folio.
1548  *                                               1548  *
1549  * Clear the PG_private_2 bit on a folio and     1549  * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1550  * it.  The folio reference held for PG_priva    1550  * it.  The folio reference held for PG_private_2 being set is released.
1551  *                                               1551  *
1552  * This is, for example, used when a netfs fo    1552  * This is, for example, used when a netfs folio is being written to a local
1553  * disk cache, thereby allowing writes to the    1553  * disk cache, thereby allowing writes to the cache for the same folio to be
1554  * serialised.                                   1554  * serialised.
1555  */                                              1555  */
1556 void folio_end_private_2(struct folio *folio)    1556 void folio_end_private_2(struct folio *folio)
1557 {                                                1557 {
1558         VM_BUG_ON_FOLIO(!folio_test_private_2    1558         VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1559         clear_bit_unlock(PG_private_2, folio_    1559         clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1560         folio_wake_bit(folio, PG_private_2);     1560         folio_wake_bit(folio, PG_private_2);
1561         folio_put(folio);                        1561         folio_put(folio);
1562 }                                                1562 }
1563 EXPORT_SYMBOL(folio_end_private_2);              1563 EXPORT_SYMBOL(folio_end_private_2);
1564                                                  1564 
1565 /**                                              1565 /**
1566  * folio_wait_private_2 - Wait for PG_private    1566  * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1567  * @folio: The folio to wait on.                 1567  * @folio: The folio to wait on.
1568  *                                               1568  *
1569  * Wait for PG_private_2 to be cleared on a f    1569  * Wait for PG_private_2 to be cleared on a folio.
1570  */                                              1570  */
1571 void folio_wait_private_2(struct folio *folio    1571 void folio_wait_private_2(struct folio *folio)
1572 {                                                1572 {
1573         while (folio_test_private_2(folio))      1573         while (folio_test_private_2(folio))
1574                 folio_wait_bit(folio, PG_priv    1574                 folio_wait_bit(folio, PG_private_2);
1575 }                                                1575 }
1576 EXPORT_SYMBOL(folio_wait_private_2);             1576 EXPORT_SYMBOL(folio_wait_private_2);
1577                                                  1577 
1578 /**                                              1578 /**
1579  * folio_wait_private_2_killable - Wait for P    1579  * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1580  * @folio: The folio to wait on.                 1580  * @folio: The folio to wait on.
1581  *                                               1581  *
1582  * Wait for PG_private_2 to be cleared on a f    1582  * Wait for PG_private_2 to be cleared on a folio or until a fatal signal is
1583  * received by the calling task.                 1583  * received by the calling task.
1584  *                                               1584  *
1585  * Return:                                       1585  * Return:
1586  * - 0 if successful.                            1586  * - 0 if successful.
1587  * - -EINTR if a fatal signal was encountered    1587  * - -EINTR if a fatal signal was encountered.
1588  */                                              1588  */
1589 int folio_wait_private_2_killable(struct foli    1589 int folio_wait_private_2_killable(struct folio *folio)
1590 {                                                1590 {
1591         int ret = 0;                             1591         int ret = 0;
1592                                                  1592 
1593         while (folio_test_private_2(folio)) {    1593         while (folio_test_private_2(folio)) {
1594                 ret = folio_wait_bit_killable    1594                 ret = folio_wait_bit_killable(folio, PG_private_2);
1595                 if (ret < 0)                     1595                 if (ret < 0)
1596                         break;                   1596                         break;
1597         }                                        1597         }
1598                                                  1598 
1599         return ret;                              1599         return ret;
1600 }                                                1600 }
1601 EXPORT_SYMBOL(folio_wait_private_2_killable);    1601 EXPORT_SYMBOL(folio_wait_private_2_killable);
1602                                                  1602 
1603 /**                                              1603 /**
1604  * folio_end_writeback - End writeback agains    1604  * folio_end_writeback - End writeback against a folio.
1605  * @folio: The folio.                            1605  * @folio: The folio.
1606  *                                               1606  *
1607  * The folio must actually be under writeback    1607  * The folio must actually be under writeback.
1608  *                                               1608  *
1609  * Context: May be called from process or int    1609  * Context: May be called from process or interrupt context.
1610  */                                              1610  */
1611 void folio_end_writeback(struct folio *folio)    1611 void folio_end_writeback(struct folio *folio)
1612 {                                                1612 {
1613         VM_BUG_ON_FOLIO(!folio_test_writeback    1613         VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);
1614                                                  1614 
1615         /*                                       1615         /*
1616          * folio_test_clear_reclaim() could b    1616          * folio_test_clear_reclaim() could be used here but it is an
1617          * atomic operation and overkill in t    1617          * atomic operation and overkill in this particular case. Failing
1618          * to shuffle a folio marked for imme    1618          * to shuffle a folio marked for immediate reclaim is too mild
1619          * a gain to justify taking an atomic    1619          * a gain to justify taking an atomic operation penalty at the
1620          * end of every folio writeback.         1620          * end of every folio writeback.
1621          */                                      1621          */
1622         if (folio_test_reclaim(folio)) {         1622         if (folio_test_reclaim(folio)) {
1623                 folio_clear_reclaim(folio);      1623                 folio_clear_reclaim(folio);
1624                 folio_rotate_reclaimable(foli    1624                 folio_rotate_reclaimable(folio);
1625         }                                        1625         }
1626                                                  1626 
1627         /*                                       1627         /*
1628          * Writeback does not hold a folio re    1628          * Writeback does not hold a folio reference of its own, relying
1629          * on truncation to wait for the clea    1629          * on truncation to wait for the clearing of PG_writeback.
1630          * But here we must make sure that th    1630          * But here we must make sure that the folio is not freed and
1631          * reused before the folio_wake_bit()    1631          * reused before the folio_wake_bit().
1632          */                                      1632          */
1633         folio_get(folio);                        1633         folio_get(folio);
1634         if (__folio_end_writeback(folio))        1634         if (__folio_end_writeback(folio))
1635                 folio_wake_bit(folio, PG_writ    1635                 folio_wake_bit(folio, PG_writeback);
1636         acct_reclaim_writeback(folio);           1636         acct_reclaim_writeback(folio);
1637         folio_put(folio);                        1637         folio_put(folio);
1638 }                                                1638 }
1639 EXPORT_SYMBOL(folio_end_writeback);              1639 EXPORT_SYMBOL(folio_end_writeback);
1640                                                  1640 
1641 /**                                              1641 /**
1642  * __folio_lock - Get a lock on the folio, as    1642  * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1643  * @folio: The folio to lock                     1643  * @folio: The folio to lock
1644  */                                              1644  */
1645 void __folio_lock(struct folio *folio)           1645 void __folio_lock(struct folio *folio)
1646 {                                                1646 {
1647         folio_wait_bit_common(folio, PG_locke    1647         folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1648                                 EXCLUSIVE);      1648                                 EXCLUSIVE);
1649 }                                                1649 }
1650 EXPORT_SYMBOL(__folio_lock);                     1650 EXPORT_SYMBOL(__folio_lock);
1651                                                  1651 
1652 int __folio_lock_killable(struct folio *folio    1652 int __folio_lock_killable(struct folio *folio)
1653 {                                                1653 {
1654         return folio_wait_bit_common(folio, P    1654         return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1655                                         EXCLU    1655                                         EXCLUSIVE);
1656 }                                                1656 }
1657 EXPORT_SYMBOL_GPL(__folio_lock_killable);        1657 EXPORT_SYMBOL_GPL(__folio_lock_killable);
1658                                                  1658 
1659 static int __folio_lock_async(struct folio *f    1659 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1660 {                                                1660 {
1661         struct wait_queue_head *q = folio_wai    1661         struct wait_queue_head *q = folio_waitqueue(folio);
1662         int ret;                                 1662         int ret;
1663                                                  1663 
1664         wait->folio = folio;                     1664         wait->folio = folio;
1665         wait->bit_nr = PG_locked;                1665         wait->bit_nr = PG_locked;
1666                                                  1666 
1667         spin_lock_irq(&q->lock);                 1667         spin_lock_irq(&q->lock);
1668         __add_wait_queue_entry_tail(q, &wait-    1668         __add_wait_queue_entry_tail(q, &wait->wait);
1669         folio_set_waiters(folio);                1669         folio_set_waiters(folio);
1670         ret = !folio_trylock(folio);             1670         ret = !folio_trylock(folio);
1671         /*                                       1671         /*
1672          * If we were successful now, we know    1672          * If we were successful now, we know we're still on the
1673          * waitqueue as we're still under the    1673          * waitqueue as we're still under the lock. This means it's
1674          * safe to remove and return success,    1674          * safe to remove and return success, we know the callback
1675          * isn't going to trigger.               1675          * isn't going to trigger.
1676          */                                      1676          */
1677         if (!ret)                                1677         if (!ret)
1678                 __remove_wait_queue(q, &wait-    1678                 __remove_wait_queue(q, &wait->wait);
1679         else                                     1679         else
1680                 ret = -EIOCBQUEUED;              1680                 ret = -EIOCBQUEUED;
1681         spin_unlock_irq(&q->lock);               1681         spin_unlock_irq(&q->lock);
1682         return ret;                              1682         return ret;
1683 }                                                1683 }
1684                                                  1684 
1685 /*                                               1685 /*
1686  * Return values:                                1686  * Return values:
1687  * 0 - folio is locked.                          1687  * 0 - folio is locked.
1688  * non-zero - folio is not locked.               1688  * non-zero - folio is not locked.
1689  *     mmap_lock or per-VMA lock has been rel    1689  *     mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1690  *     vma_end_read()), unless flags had both    1690  *     vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and
1691  *     FAULT_FLAG_RETRY_NOWAIT set, in which     1691  *     FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held.
1692  *                                               1692  *
1693  * If neither ALLOW_RETRY nor KILLABLE are se    1693  * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0
1694  * with the folio locked and the mmap_lock/pe    1694  * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1695  */                                              1695  */
1696 vm_fault_t __folio_lock_or_retry(struct folio    1696 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
1697 {                                                1697 {
1698         unsigned int flags = vmf->flags;         1698         unsigned int flags = vmf->flags;
1699                                                  1699 
1700         if (fault_flag_allow_retry_first(flag    1700         if (fault_flag_allow_retry_first(flags)) {
1701                 /*                               1701                 /*
1702                  * CAUTION! In this case, mma    1702                  * CAUTION! In this case, mmap_lock/per-VMA lock is not
1703                  * released even though retur    1703                  * released even though returning VM_FAULT_RETRY.
1704                  */                              1704                  */
1705                 if (flags & FAULT_FLAG_RETRY_    1705                 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1706                         return VM_FAULT_RETRY    1706                         return VM_FAULT_RETRY;
1707                                                  1707 
1708                 release_fault_lock(vmf);         1708                 release_fault_lock(vmf);
1709                 if (flags & FAULT_FLAG_KILLAB    1709                 if (flags & FAULT_FLAG_KILLABLE)
1710                         folio_wait_locked_kil    1710                         folio_wait_locked_killable(folio);
1711                 else                             1711                 else
1712                         folio_wait_locked(fol    1712                         folio_wait_locked(folio);
1713                 return VM_FAULT_RETRY;           1713                 return VM_FAULT_RETRY;
1714         }                                        1714         }
1715         if (flags & FAULT_FLAG_KILLABLE) {       1715         if (flags & FAULT_FLAG_KILLABLE) {
1716                 bool ret;                        1716                 bool ret;
1717                                                  1717 
1718                 ret = __folio_lock_killable(f    1718                 ret = __folio_lock_killable(folio);
1719                 if (ret) {                       1719                 if (ret) {
1720                         release_fault_lock(vm    1720                         release_fault_lock(vmf);
1721                         return VM_FAULT_RETRY    1721                         return VM_FAULT_RETRY;
1722                 }                                1722                 }
1723         } else {                                 1723         } else {
1724                 __folio_lock(folio);             1724                 __folio_lock(folio);
1725         }                                        1725         }
1726                                                  1726 
1727         return 0;                                1727         return 0;
1728 }                                                1728 }
1729                                                  1729 
1730 /**                                              1730 /**
1731  * page_cache_next_miss() - Find the next gap    1731  * page_cache_next_miss() - Find the next gap in the page cache.
1732  * @mapping: Mapping.                            1732  * @mapping: Mapping.
1733  * @index: Index.                                1733  * @index: Index.
1734  * @max_scan: Maximum range to search.           1734  * @max_scan: Maximum range to search.
1735  *                                               1735  *
1736  * Search the range [index, min(index + max_s    1736  * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1737  * gap with the lowest index.                    1737  * gap with the lowest index.
1738  *                                               1738  *
1739  * This function may be called under the rcu_    1739  * This function may be called under the rcu_read_lock.  However, this will
1740  * not atomically search a snapshot of the ca    1740  * not atomically search a snapshot of the cache at a single point in time.
1741  * For example, if a gap is created at index     1741  * For example, if a gap is created at index 5, then subsequently a gap is
1742  * created at index 10, page_cache_next_miss     1742  * created at index 10, page_cache_next_miss covering both indices may
1743  * return 10 if called under the rcu_read_loc    1743  * return 10 if called under the rcu_read_lock.
1744  *                                               1744  *
1745  * Return: The index of the gap if found, oth    1745  * Return: The index of the gap if found, otherwise an index outside the
1746  * range specified (in which case 'return - i    1746  * range specified (in which case 'return - index >= max_scan' will be true).
1747  * In the rare case of index wrap-around, 0 w    1747  * In the rare case of index wrap-around, 0 will be returned.
1748  */                                              1748  */
1749 pgoff_t page_cache_next_miss(struct address_s    1749 pgoff_t page_cache_next_miss(struct address_space *mapping,
1750                              pgoff_t index, u    1750                              pgoff_t index, unsigned long max_scan)
1751 {                                                1751 {
1752         XA_STATE(xas, &mapping->i_pages, inde    1752         XA_STATE(xas, &mapping->i_pages, index);
1753                                                  1753 
1754         while (max_scan--) {                     1754         while (max_scan--) {
1755                 void *entry = xas_next(&xas);    1755                 void *entry = xas_next(&xas);
1756                 if (!entry || xa_is_value(ent    1756                 if (!entry || xa_is_value(entry))
1757                         return xas.xa_index;     1757                         return xas.xa_index;
1758                 if (xas.xa_index == 0)           1758                 if (xas.xa_index == 0)
1759                         return 0;                1759                         return 0;
1760         }                                        1760         }
1761                                                  1761 
1762         return index + max_scan;                 1762         return index + max_scan;
1763 }                                                1763 }
1764 EXPORT_SYMBOL(page_cache_next_miss);             1764 EXPORT_SYMBOL(page_cache_next_miss);
1765                                                  1765 
1766 /**                                              1766 /**
1767  * page_cache_prev_miss() - Find the previous    1767  * page_cache_prev_miss() - Find the previous gap in the page cache.
1768  * @mapping: Mapping.                            1768  * @mapping: Mapping.
1769  * @index: Index.                                1769  * @index: Index.
1770  * @max_scan: Maximum range to search.           1770  * @max_scan: Maximum range to search.
1771  *                                               1771  *
1772  * Search the range [max(index - max_scan + 1    1772  * Search the range [max(index - max_scan + 1, 0), index] for the
1773  * gap with the highest index.                   1773  * gap with the highest index.
1774  *                                               1774  *
1775  * This function may be called under the rcu_    1775  * This function may be called under the rcu_read_lock.  However, this will
1776  * not atomically search a snapshot of the ca    1776  * not atomically search a snapshot of the cache at a single point in time.
1777  * For example, if a gap is created at index     1777  * For example, if a gap is created at index 10, then subsequently a gap is
1778  * created at index 5, page_cache_prev_miss()    1778  * created at index 5, page_cache_prev_miss() covering both indices may
1779  * return 5 if called under the rcu_read_lock    1779  * return 5 if called under the rcu_read_lock.
1780  *                                               1780  *
1781  * Return: The index of the gap if found, oth    1781  * Return: The index of the gap if found, otherwise an index outside the
1782  * range specified (in which case 'index - re    1782  * range specified (in which case 'index - return >= max_scan' will be true).
1783  * In the rare case of wrap-around, ULONG_MAX    1783  * In the rare case of wrap-around, ULONG_MAX will be returned.
1784  */                                              1784  */
1785 pgoff_t page_cache_prev_miss(struct address_s    1785 pgoff_t page_cache_prev_miss(struct address_space *mapping,
1786                              pgoff_t index, u    1786                              pgoff_t index, unsigned long max_scan)
1787 {                                                1787 {
1788         XA_STATE(xas, &mapping->i_pages, inde    1788         XA_STATE(xas, &mapping->i_pages, index);
1789                                                  1789 
1790         while (max_scan--) {                     1790         while (max_scan--) {
1791                 void *entry = xas_prev(&xas);    1791                 void *entry = xas_prev(&xas);
1792                 if (!entry || xa_is_value(ent    1792                 if (!entry || xa_is_value(entry))
1793                         break;                   1793                         break;
1794                 if (xas.xa_index == ULONG_MAX    1794                 if (xas.xa_index == ULONG_MAX)
1795                         break;                   1795                         break;
1796         }                                        1796         }
1797                                                  1797 
1798         return xas.xa_index;                     1798         return xas.xa_index;
1799 }                                                1799 }
1800 EXPORT_SYMBOL(page_cache_prev_miss);             1800 EXPORT_SYMBOL(page_cache_prev_miss);
1801                                                  1801 
1802 /*                                               1802 /*
1803  * Lockless page cache protocol:                 1803  * Lockless page cache protocol:
1804  * On the lookup side:                           1804  * On the lookup side:
1805  * 1. Load the folio from i_pages                1805  * 1. Load the folio from i_pages
1806  * 2. Increment the refcount if it's not zero    1806  * 2. Increment the refcount if it's not zero
1807  * 3. If the folio is not found by xas_reload    1807  * 3. If the folio is not found by xas_reload(), put the refcount and retry
1808  *                                               1808  *
1809  * On the removal side:                          1809  * On the removal side:
1810  * A. Freeze the page (by zeroing the refcoun    1810  * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1811  * B. Remove the page from i_pages               1811  * B. Remove the page from i_pages
1812  * C. Return the page to the page allocator      1812  * C. Return the page to the page allocator
1813  *                                               1813  *
1814  * This means that any page may have its refe    1814  * This means that any page may have its reference count temporarily
1815  * increased by a speculative page cache (or     1815  * increased by a speculative page cache (or GUP-fast) lookup as it can
1816  * be allocated by another user before the RC    1816  * be allocated by another user before the RCU grace period expires.
1817  * Because the refcount temporarily acquired     1817  * Because the refcount temporarily acquired here may end up being the
1818  * last refcount on the page, any page alloca    1818  * last refcount on the page, any page allocation must be freeable by
1819  * folio_put().                                  1819  * folio_put().
1820  */                                              1820  */
1821                                                  1821 
1822 /*                                               1822 /*
1823  * filemap_get_entry - Get a page cache entry    1823  * filemap_get_entry - Get a page cache entry.
1824  * @mapping: the address_space to search         1824  * @mapping: the address_space to search
1825  * @index: The page cache index.                 1825  * @index: The page cache index.
1826  *                                               1826  *
1827  * Looks up the page cache entry at @mapping     1827  * Looks up the page cache entry at @mapping & @index.  If it is a folio,
1828  * it is returned with an increased refcount.    1828  * it is returned with an increased refcount.  If it is a shadow entry
1829  * of a previously evicted folio, or a swap e    1829  * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1830  * it is returned without further action.        1830  * it is returned without further action.
1831  *                                               1831  *
1832  * Return: The folio, swap or shadow entry, %    1832  * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1833  */                                              1833  */
1834 void *filemap_get_entry(struct address_space     1834 void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
1835 {                                                1835 {
1836         XA_STATE(xas, &mapping->i_pages, inde    1836         XA_STATE(xas, &mapping->i_pages, index);
1837         struct folio *folio;                     1837         struct folio *folio;
1838                                                  1838 
1839         rcu_read_lock();                         1839         rcu_read_lock();
1840 repeat:                                          1840 repeat:
1841         xas_reset(&xas);                         1841         xas_reset(&xas);
1842         folio = xas_load(&xas);                  1842         folio = xas_load(&xas);
1843         if (xas_retry(&xas, folio))              1843         if (xas_retry(&xas, folio))
1844                 goto repeat;                     1844                 goto repeat;
1845         /*                                       1845         /*
1846          * A shadow entry of a recently evict    1846          * A shadow entry of a recently evicted page, or a swap entry from
1847          * shmem/tmpfs.  Return it without at    1847          * shmem/tmpfs.  Return it without attempting to raise page count.
1848          */                                      1848          */
1849         if (!folio || xa_is_value(folio))        1849         if (!folio || xa_is_value(folio))
1850                 goto out;                        1850                 goto out;
1851                                                  1851 
1852         if (!folio_try_get(folio))               1852         if (!folio_try_get(folio))
1853                 goto repeat;                     1853                 goto repeat;
1854                                                  1854 
1855         if (unlikely(folio != xas_reload(&xas    1855         if (unlikely(folio != xas_reload(&xas))) {
1856                 folio_put(folio);                1856                 folio_put(folio);
1857                 goto repeat;                     1857                 goto repeat;
1858         }                                        1858         }
1859 out:                                             1859 out:
1860         rcu_read_unlock();                       1860         rcu_read_unlock();
1861                                                  1861 
1862         return folio;                            1862         return folio;
1863 }                                                1863 }
1864                                                  1864 
1865 /**                                              1865 /**
1866  * __filemap_get_folio - Find and get a refer    1866  * __filemap_get_folio - Find and get a reference to a folio.
1867  * @mapping: The address_space to search.        1867  * @mapping: The address_space to search.
1868  * @index: The page index.                       1868  * @index: The page index.
1869  * @fgp_flags: %FGP flags modify how the foli    1869  * @fgp_flags: %FGP flags modify how the folio is returned.
1870  * @gfp: Memory allocation flags to use if %F    1870  * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1871  *                                               1871  *
1872  * Looks up the page cache entry at @mapping     1872  * Looks up the page cache entry at @mapping & @index.
1873  *                                               1873  *
1874  * If %FGP_LOCK or %FGP_CREAT are specified t    1874  * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1875  * if the %GFP flags specified for %FGP_CREAT    1875  * if the %GFP flags specified for %FGP_CREAT are atomic.
1876  *                                               1876  *
1877  * If this function returns a folio, it is re    1877  * If this function returns a folio, it is returned with an increased refcount.
1878  *                                               1878  *
1879  * Return: The found folio or an ERR_PTR() ot    1879  * Return: The found folio or an ERR_PTR() otherwise.
1880  */                                              1880  */
1881 struct folio *__filemap_get_folio(struct addr    1881 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1882                 fgf_t fgp_flags, gfp_t gfp)      1882                 fgf_t fgp_flags, gfp_t gfp)
1883 {                                                1883 {
1884         struct folio *folio;                     1884         struct folio *folio;
1885                                                  1885 
1886 repeat:                                          1886 repeat:
1887         folio = filemap_get_entry(mapping, in    1887         folio = filemap_get_entry(mapping, index);
1888         if (xa_is_value(folio))                  1888         if (xa_is_value(folio))
1889                 folio = NULL;                    1889                 folio = NULL;
1890         if (!folio)                              1890         if (!folio)
1891                 goto no_page;                    1891                 goto no_page;
1892                                                  1892 
1893         if (fgp_flags & FGP_LOCK) {              1893         if (fgp_flags & FGP_LOCK) {
1894                 if (fgp_flags & FGP_NOWAIT) {    1894                 if (fgp_flags & FGP_NOWAIT) {
1895                         if (!folio_trylock(fo    1895                         if (!folio_trylock(folio)) {
1896                                 folio_put(fol    1896                                 folio_put(folio);
1897                                 return ERR_PT    1897                                 return ERR_PTR(-EAGAIN);
1898                         }                        1898                         }
1899                 } else {                         1899                 } else {
1900                         folio_lock(folio);       1900                         folio_lock(folio);
1901                 }                                1901                 }
1902                                                  1902 
1903                 /* Has the page been truncate    1903                 /* Has the page been truncated? */
1904                 if (unlikely(folio->mapping !    1904                 if (unlikely(folio->mapping != mapping)) {
1905                         folio_unlock(folio);     1905                         folio_unlock(folio);
1906                         folio_put(folio);        1906                         folio_put(folio);
1907                         goto repeat;             1907                         goto repeat;
1908                 }                                1908                 }
1909                 VM_BUG_ON_FOLIO(!folio_contai    1909                 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1910         }                                        1910         }
1911                                                  1911 
1912         if (fgp_flags & FGP_ACCESSED)            1912         if (fgp_flags & FGP_ACCESSED)
1913                 folio_mark_accessed(folio);      1913                 folio_mark_accessed(folio);
1914         else if (fgp_flags & FGP_WRITE) {        1914         else if (fgp_flags & FGP_WRITE) {
1915                 /* Clear idle flag for buffer    1915                 /* Clear idle flag for buffer write */
1916                 if (folio_test_idle(folio))      1916                 if (folio_test_idle(folio))
1917                         folio_clear_idle(foli    1917                         folio_clear_idle(folio);
1918         }                                        1918         }
1919                                                  1919 
1920         if (fgp_flags & FGP_STABLE)              1920         if (fgp_flags & FGP_STABLE)
1921                 folio_wait_stable(folio);        1921                 folio_wait_stable(folio);
1922 no_page:                                         1922 no_page:
1923         if (!folio && (fgp_flags & FGP_CREAT)    1923         if (!folio && (fgp_flags & FGP_CREAT)) {
1924                 unsigned int min_order = mapp    1924                 unsigned int min_order = mapping_min_folio_order(mapping);
1925                 unsigned int order = max(min_    1925                 unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));
1926                 int err;                         1926                 int err;
1927                 index = mapping_align_index(m    1927                 index = mapping_align_index(mapping, index);
1928                                                  1928 
1929                 if ((fgp_flags & FGP_WRITE) &    1929                 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1930                         gfp |= __GFP_WRITE;      1930                         gfp |= __GFP_WRITE;
1931                 if (fgp_flags & FGP_NOFS)        1931                 if (fgp_flags & FGP_NOFS)
1932                         gfp &= ~__GFP_FS;        1932                         gfp &= ~__GFP_FS;
1933                 if (fgp_flags & FGP_NOWAIT) {    1933                 if (fgp_flags & FGP_NOWAIT) {
1934                         gfp &= ~GFP_KERNEL;      1934                         gfp &= ~GFP_KERNEL;
1935                         gfp |= GFP_NOWAIT | _    1935                         gfp |= GFP_NOWAIT | __GFP_NOWARN;
1936                 }                                1936                 }
1937                 if (WARN_ON_ONCE(!(fgp_flags     1937                 if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1938                         fgp_flags |= FGP_LOCK    1938                         fgp_flags |= FGP_LOCK;
1939                                                  1939 
1940                 if (order > mapping_max_folio    1940                 if (order > mapping_max_folio_order(mapping))
1941                         order = mapping_max_f    1941                         order = mapping_max_folio_order(mapping);
1942                 /* If we're not aligned, allo    1942                 /* If we're not aligned, allocate a smaller folio */
1943                 if (index & ((1UL << order) -    1943                 if (index & ((1UL << order) - 1))
1944                         order = __ffs(index);    1944                         order = __ffs(index);
1945                                                  1945 
1946                 do {                             1946                 do {
1947                         gfp_t alloc_gfp = gfp    1947                         gfp_t alloc_gfp = gfp;
1948                                                  1948 
1949                         err = -ENOMEM;           1949                         err = -ENOMEM;
1950                         if (order > min_order    1950                         if (order > min_order)
1951                                 alloc_gfp |=     1951                                 alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
1952                         folio = filemap_alloc    1952                         folio = filemap_alloc_folio(alloc_gfp, order);
1953                         if (!folio)              1953                         if (!folio)
1954                                 continue;        1954                                 continue;
1955                                                  1955 
1956                         /* Init accessed so a    1956                         /* Init accessed so avoid atomic mark_page_accessed later */
1957                         if (fgp_flags & FGP_A    1957                         if (fgp_flags & FGP_ACCESSED)
1958                                 __folio_set_r    1958                                 __folio_set_referenced(folio);
1959                                                  1959 
1960                         err = filemap_add_fol    1960                         err = filemap_add_folio(mapping, folio, index, gfp);
1961                         if (!err)                1961                         if (!err)
1962                                 break;           1962                                 break;
1963                         folio_put(folio);        1963                         folio_put(folio);
1964                         folio = NULL;            1964                         folio = NULL;
1965                 } while (order-- > min_order)    1965                 } while (order-- > min_order);
1966                                                  1966 
1967                 if (err == -EEXIST)              1967                 if (err == -EEXIST)
1968                         goto repeat;             1968                         goto repeat;
1969                 if (err)                         1969                 if (err)
1970                         return ERR_PTR(err);     1970                         return ERR_PTR(err);
1971                 /*                               1971                 /*
1972                  * filemap_add_folio locks th    1972                  * filemap_add_folio locks the page, and for mmap
1973                  * we expect an unlocked page    1973                  * we expect an unlocked page.
1974                  */                              1974                  */
1975                 if (folio && (fgp_flags & FGP    1975                 if (folio && (fgp_flags & FGP_FOR_MMAP))
1976                         folio_unlock(folio);     1976                         folio_unlock(folio);
1977         }                                        1977         }
1978                                                  1978 
1979         if (!folio)                              1979         if (!folio)
1980                 return ERR_PTR(-ENOENT);         1980                 return ERR_PTR(-ENOENT);
1981         return folio;                            1981         return folio;
1982 }                                                1982 }
1983 EXPORT_SYMBOL(__filemap_get_folio);              1983 EXPORT_SYMBOL(__filemap_get_folio);
1984                                                  1984 
1985 static inline struct folio *find_get_entry(st    1985 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1986                 xa_mark_t mark)                  1986                 xa_mark_t mark)
1987 {                                                1987 {
1988         struct folio *folio;                     1988         struct folio *folio;
1989                                                  1989 
1990 retry:                                           1990 retry:
1991         if (mark == XA_PRESENT)                  1991         if (mark == XA_PRESENT)
1992                 folio = xas_find(xas, max);      1992                 folio = xas_find(xas, max);
1993         else                                     1993         else
1994                 folio = xas_find_marked(xas,     1994                 folio = xas_find_marked(xas, max, mark);
1995                                                  1995 
1996         if (xas_retry(xas, folio))               1996         if (xas_retry(xas, folio))
1997                 goto retry;                      1997                 goto retry;
1998         /*                                       1998         /*
1999          * A shadow entry of a recently evict    1999          * A shadow entry of a recently evicted page, a swap
2000          * entry from shmem/tmpfs or a DAX en    2000          * entry from shmem/tmpfs or a DAX entry.  Return it
2001          * without attempting to raise page c    2001          * without attempting to raise page count.
2002          */                                      2002          */
2003         if (!folio || xa_is_value(folio))        2003         if (!folio || xa_is_value(folio))
2004                 return folio;                    2004                 return folio;
2005                                                  2005 
2006         if (!folio_try_get(folio))               2006         if (!folio_try_get(folio))
2007                 goto reset;                      2007                 goto reset;
2008                                                  2008 
2009         if (unlikely(folio != xas_reload(xas)    2009         if (unlikely(folio != xas_reload(xas))) {
2010                 folio_put(folio);                2010                 folio_put(folio);
2011                 goto reset;                      2011                 goto reset;
2012         }                                        2012         }
2013                                                  2013 
2014         return folio;                            2014         return folio;
2015 reset:                                           2015 reset:
2016         xas_reset(xas);                          2016         xas_reset(xas);
2017         goto retry;                              2017         goto retry;
2018 }                                                2018 }
2019                                                  2019 
2020 /**                                              2020 /**
2021  * find_get_entries - gang pagecache lookup      2021  * find_get_entries - gang pagecache lookup
2022  * @mapping:    The address_space to search      2022  * @mapping:    The address_space to search
2023  * @start:      The starting page cache index    2023  * @start:      The starting page cache index
2024  * @end:        The final page index (inclusi    2024  * @end:        The final page index (inclusive).
2025  * @fbatch:     Where the resulting entries a    2025  * @fbatch:     Where the resulting entries are placed.
2026  * @indices:    The cache indices correspondi    2026  * @indices:    The cache indices corresponding to the entries in @entries
2027  *                                               2027  *
2028  * find_get_entries() will search for and ret    2028  * find_get_entries() will search for and return a batch of entries in
2029  * the mapping.  The entries are placed in @f    2029  * the mapping.  The entries are placed in @fbatch.  find_get_entries()
2030  * takes a reference on any actual folios it     2030  * takes a reference on any actual folios it returns.
2031  *                                               2031  *
2032  * The entries have ascending indexes.  The i    2032  * The entries have ascending indexes.  The indices may not be consecutive
2033  * due to not-present entries or large folios    2033  * due to not-present entries or large folios.
2034  *                                               2034  *
2035  * Any shadow entries of evicted folios, or s    2035  * Any shadow entries of evicted folios, or swap entries from
2036  * shmem/tmpfs, are included in the returned     2036  * shmem/tmpfs, are included in the returned array.
2037  *                                               2037  *
2038  * Return: The number of entries which were f    2038  * Return: The number of entries which were found.
2039  */                                              2039  */
2040 unsigned find_get_entries(struct address_spac    2040 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2041                 pgoff_t end, struct folio_bat    2041                 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2042 {                                                2042 {
2043         XA_STATE(xas, &mapping->i_pages, *sta    2043         XA_STATE(xas, &mapping->i_pages, *start);
2044         struct folio *folio;                     2044         struct folio *folio;
2045                                                  2045 
2046         rcu_read_lock();                         2046         rcu_read_lock();
2047         while ((folio = find_get_entry(&xas,     2047         while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2048                 indices[fbatch->nr] = xas.xa_    2048                 indices[fbatch->nr] = xas.xa_index;
2049                 if (!folio_batch_add(fbatch,     2049                 if (!folio_batch_add(fbatch, folio))
2050                         break;                   2050                         break;
2051         }                                        2051         }
2052                                                  2052 
2053         if (folio_batch_count(fbatch)) {         2053         if (folio_batch_count(fbatch)) {
2054                 unsigned long nr;                2054                 unsigned long nr;
2055                 int idx = folio_batch_count(f    2055                 int idx = folio_batch_count(fbatch) - 1;
2056                                                  2056 
2057                 folio = fbatch->folios[idx];     2057                 folio = fbatch->folios[idx];
2058                 if (!xa_is_value(folio))         2058                 if (!xa_is_value(folio))
2059                         nr = folio_nr_pages(f    2059                         nr = folio_nr_pages(folio);
2060                 else                             2060                 else
2061                         nr = 1 << xa_get_orde    2061                         nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);
2062                 *start = round_down(indices[i    2062                 *start = round_down(indices[idx] + nr, nr);
2063         }                                        2063         }
2064         rcu_read_unlock();                       2064         rcu_read_unlock();
2065                                                  2065 
2066         return folio_batch_count(fbatch);        2066         return folio_batch_count(fbatch);
2067 }                                                2067 }
2068                                                  2068 
2069 /**                                              2069 /**
2070  * find_lock_entries - Find a batch of pageca    2070  * find_lock_entries - Find a batch of pagecache entries.
2071  * @mapping:    The address_space to search.     2071  * @mapping:    The address_space to search.
2072  * @start:      The starting page cache index    2072  * @start:      The starting page cache index.
2073  * @end:        The final page index (inclusi    2073  * @end:        The final page index (inclusive).
2074  * @fbatch:     Where the resulting entries a    2074  * @fbatch:     Where the resulting entries are placed.
2075  * @indices:    The cache indices of the entr    2075  * @indices:    The cache indices of the entries in @fbatch.
2076  *                                               2076  *
2077  * find_lock_entries() will return a batch of    2077  * find_lock_entries() will return a batch of entries from @mapping.
2078  * Swap, shadow and DAX entries are included.    2078  * Swap, shadow and DAX entries are included.  Folios are returned
2079  * locked and with an incremented refcount.      2079  * locked and with an incremented refcount.  Folios which are locked
2080  * by somebody else or under writeback are sk    2080  * by somebody else or under writeback are skipped.  Folios which are
2081  * partially outside the range are not return    2081  * partially outside the range are not returned.
2082  *                                               2082  *
2083  * The entries have ascending indexes.  The i    2083  * The entries have ascending indexes.  The indices may not be consecutive
2084  * due to not-present entries, large folios,     2084  * due to not-present entries, large folios, folios which could not be
2085  * locked or folios under writeback.             2085  * locked or folios under writeback.
2086  *                                               2086  *
2087  * Return: The number of entries which were f    2087  * Return: The number of entries which were found.
2088  */                                              2088  */
2089 unsigned find_lock_entries(struct address_spa    2089 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2090                 pgoff_t end, struct folio_bat    2090                 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2091 {                                                2091 {
2092         XA_STATE(xas, &mapping->i_pages, *sta    2092         XA_STATE(xas, &mapping->i_pages, *start);
2093         struct folio *folio;                     2093         struct folio *folio;
2094                                                  2094 
2095         rcu_read_lock();                         2095         rcu_read_lock();
2096         while ((folio = find_get_entry(&xas,     2096         while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2097                 unsigned long base;              2097                 unsigned long base;
2098                 unsigned long nr;                2098                 unsigned long nr;
2099                                                  2099 
2100                 if (!xa_is_value(folio)) {       2100                 if (!xa_is_value(folio)) {
2101                         nr = folio_nr_pages(f    2101                         nr = folio_nr_pages(folio);
2102                         base = folio->index;     2102                         base = folio->index;
2103                         /* Omit large folio w    2103                         /* Omit large folio which begins before the start */
2104                         if (base < *start)       2104                         if (base < *start)
2105                                 goto put;        2105                                 goto put;
2106                         /* Omit large folio w    2106                         /* Omit large folio which extends beyond the end */
2107                         if (base + nr - 1 > e    2107                         if (base + nr - 1 > end)
2108                                 goto put;        2108                                 goto put;
2109                         if (!folio_trylock(fo    2109                         if (!folio_trylock(folio))
2110                                 goto put;        2110                                 goto put;
2111                         if (folio->mapping !=    2111                         if (folio->mapping != mapping ||
2112                             folio_test_writeb    2112                             folio_test_writeback(folio))
2113                                 goto unlock;     2113                                 goto unlock;
2114                         VM_BUG_ON_FOLIO(!foli    2114                         VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2115                                         folio    2115                                         folio);
2116                 } else {                         2116                 } else {
2117                         nr = 1 << xas_get_ord    2117                         nr = 1 << xas_get_order(&xas);
2118                         base = xas.xa_index &    2118                         base = xas.xa_index & ~(nr - 1);
2119                         /* Omit order>0 value    2119                         /* Omit order>0 value which begins before the start */
2120                         if (base < *start)       2120                         if (base < *start)
2121                                 continue;        2121                                 continue;
2122                         /* Omit order>0 value    2122                         /* Omit order>0 value which extends beyond the end */
2123                         if (base + nr - 1 > e    2123                         if (base + nr - 1 > end)
2124                                 break;           2124                                 break;
2125                 }                                2125                 }
2126                                                  2126 
2127                 /* Update start now so that l    2127                 /* Update start now so that last update is correct on return */
2128                 *start = base + nr;              2128                 *start = base + nr;
2129                 indices[fbatch->nr] = xas.xa_    2129                 indices[fbatch->nr] = xas.xa_index;
2130                 if (!folio_batch_add(fbatch,     2130                 if (!folio_batch_add(fbatch, folio))
2131                         break;                   2131                         break;
2132                 continue;                        2132                 continue;
2133 unlock:                                          2133 unlock:
2134                 folio_unlock(folio);             2134                 folio_unlock(folio);
2135 put:                                             2135 put:
2136                 folio_put(folio);                2136                 folio_put(folio);
2137         }                                        2137         }
2138         rcu_read_unlock();                       2138         rcu_read_unlock();
2139                                                  2139 
2140         return folio_batch_count(fbatch);        2140         return folio_batch_count(fbatch);
2141 }                                                2141 }
2142                                                  2142 
2143 /**                                              2143 /**
2144  * filemap_get_folios - Get a batch of folios    2144  * filemap_get_folios - Get a batch of folios
2145  * @mapping:    The address_space to search      2145  * @mapping:    The address_space to search
2146  * @start:      The starting page index          2146  * @start:      The starting page index
2147  * @end:        The final page index (inclusi    2147  * @end:        The final page index (inclusive)
2148  * @fbatch:     The batch to fill.               2148  * @fbatch:     The batch to fill.
2149  *                                               2149  *
2150  * Search for and return a batch of folios in    2150  * Search for and return a batch of folios in the mapping starting at
2151  * index @start and up to index @end (inclusi    2151  * index @start and up to index @end (inclusive).  The folios are returned
2152  * in @fbatch with an elevated reference coun    2152  * in @fbatch with an elevated reference count.
2153  *                                               2153  *
2154  * Return: The number of folios which were fo    2154  * Return: The number of folios which were found.
2155  * We also update @start to index the next fo    2155  * We also update @start to index the next folio for the traversal.
2156  */                                              2156  */
2157 unsigned filemap_get_folios(struct address_sp    2157 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2158                 pgoff_t end, struct folio_bat    2158                 pgoff_t end, struct folio_batch *fbatch)
2159 {                                                2159 {
2160         return filemap_get_folios_tag(mapping    2160         return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch);
2161 }                                                2161 }
2162 EXPORT_SYMBOL(filemap_get_folios);               2162 EXPORT_SYMBOL(filemap_get_folios);
2163                                                  2163 
2164 /**                                              2164 /**
2165  * filemap_get_folios_contig - Get a batch of    2165  * filemap_get_folios_contig - Get a batch of contiguous folios
2166  * @mapping:    The address_space to search      2166  * @mapping:    The address_space to search
2167  * @start:      The starting page index          2167  * @start:      The starting page index
2168  * @end:        The final page index (inclusi    2168  * @end:        The final page index (inclusive)
2169  * @fbatch:     The batch to fill                2169  * @fbatch:     The batch to fill
2170  *                                               2170  *
2171  * filemap_get_folios_contig() works exactly     2171  * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2172  * except the returned folios are guaranteed     2172  * except the returned folios are guaranteed to be contiguous. This may
2173  * not return all contiguous folios if the ba    2173  * not return all contiguous folios if the batch gets filled up.
2174  *                                               2174  *
2175  * Return: The number of folios found.           2175  * Return: The number of folios found.
2176  * Also update @start to be positioned for tr    2176  * Also update @start to be positioned for traversal of the next folio.
2177  */                                              2177  */
2178                                                  2178 
2179 unsigned filemap_get_folios_contig(struct add    2179 unsigned filemap_get_folios_contig(struct address_space *mapping,
2180                 pgoff_t *start, pgoff_t end,     2180                 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2181 {                                                2181 {
2182         XA_STATE(xas, &mapping->i_pages, *sta    2182         XA_STATE(xas, &mapping->i_pages, *start);
2183         unsigned long nr;                        2183         unsigned long nr;
2184         struct folio *folio;                     2184         struct folio *folio;
2185                                                  2185 
2186         rcu_read_lock();                         2186         rcu_read_lock();
2187                                                  2187 
2188         for (folio = xas_load(&xas); folio &&    2188         for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2189                         folio = xas_next(&xas    2189                         folio = xas_next(&xas)) {
2190                 if (xas_retry(&xas, folio))      2190                 if (xas_retry(&xas, folio))
2191                         continue;                2191                         continue;
2192                 /*                               2192                 /*
2193                  * If the entry has been swap    2193                  * If the entry has been swapped out, we can stop looking.
2194                  * No current caller is looki    2194                  * No current caller is looking for DAX entries.
2195                  */                              2195                  */
2196                 if (xa_is_value(folio))          2196                 if (xa_is_value(folio))
2197                         goto update_start;       2197                         goto update_start;
2198                                                  2198 
2199                 /* If we landed in the middle    2199                 /* If we landed in the middle of a THP, continue at its end. */
2200                 if (xa_is_sibling(folio))        2200                 if (xa_is_sibling(folio))
2201                         goto update_start;       2201                         goto update_start;
2202                                                  2202 
2203                 if (!folio_try_get(folio))       2203                 if (!folio_try_get(folio))
2204                         goto retry;              2204                         goto retry;
2205                                                  2205 
2206                 if (unlikely(folio != xas_rel    2206                 if (unlikely(folio != xas_reload(&xas)))
2207                         goto put_folio;          2207                         goto put_folio;
2208                                                  2208 
2209                 if (!folio_batch_add(fbatch,     2209                 if (!folio_batch_add(fbatch, folio)) {
2210                         nr = folio_nr_pages(f    2210                         nr = folio_nr_pages(folio);
2211                         *start = folio->index    2211                         *start = folio->index + nr;
2212                         goto out;                2212                         goto out;
2213                 }                                2213                 }
2214                 continue;                        2214                 continue;
2215 put_folio:                                       2215 put_folio:
2216                 folio_put(folio);                2216                 folio_put(folio);
2217                                                  2217 
2218 retry:                                           2218 retry:
2219                 xas_reset(&xas);                 2219                 xas_reset(&xas);
2220         }                                        2220         }
2221                                                  2221 
2222 update_start:                                    2222 update_start:
2223         nr = folio_batch_count(fbatch);          2223         nr = folio_batch_count(fbatch);
2224                                                  2224 
2225         if (nr) {                                2225         if (nr) {
2226                 folio = fbatch->folios[nr - 1    2226                 folio = fbatch->folios[nr - 1];
2227                 *start = folio_next_index(fol    2227                 *start = folio_next_index(folio);
2228         }                                        2228         }
2229 out:                                             2229 out:
2230         rcu_read_unlock();                       2230         rcu_read_unlock();
2231         return folio_batch_count(fbatch);        2231         return folio_batch_count(fbatch);
2232 }                                                2232 }
2233 EXPORT_SYMBOL(filemap_get_folios_contig);        2233 EXPORT_SYMBOL(filemap_get_folios_contig);
2234                                                  2234 
2235 /**                                              2235 /**
2236  * filemap_get_folios_tag - Get a batch of fo    2236  * filemap_get_folios_tag - Get a batch of folios matching @tag
2237  * @mapping:    The address_space to search      2237  * @mapping:    The address_space to search
2238  * @start:      The starting page index          2238  * @start:      The starting page index
2239  * @end:        The final page index (inclusi    2239  * @end:        The final page index (inclusive)
2240  * @tag:        The tag index                    2240  * @tag:        The tag index
2241  * @fbatch:     The batch to fill                2241  * @fbatch:     The batch to fill
2242  *                                               2242  *
2243  * The first folio may start before @start; i    2243  * The first folio may start before @start; if it does, it will contain
2244  * @start.  The final folio may extend beyond    2244  * @start.  The final folio may extend beyond @end; if it does, it will
2245  * contain @end.  The folios have ascending i    2245  * contain @end.  The folios have ascending indices.  There may be gaps
2246  * between the folios if there are indices wh    2246  * between the folios if there are indices which have no folio in the
2247  * page cache.  If folios are added to or rem    2247  * page cache.  If folios are added to or removed from the page cache
2248  * while this is running, they may or may not    2248  * while this is running, they may or may not be found by this call.
2249  * Only returns folios that are tagged with @    2249  * Only returns folios that are tagged with @tag.
2250  *                                               2250  *
2251  * Return: The number of folios found.           2251  * Return: The number of folios found.
2252  * Also update @start to index the next folio    2252  * Also update @start to index the next folio for traversal.
2253  */                                              2253  */
2254 unsigned filemap_get_folios_tag(struct addres    2254 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
2255                         pgoff_t end, xa_mark_    2255                         pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2256 {                                                2256 {
2257         XA_STATE(xas, &mapping->i_pages, *sta    2257         XA_STATE(xas, &mapping->i_pages, *start);
2258         struct folio *folio;                     2258         struct folio *folio;
2259                                                  2259 
2260         rcu_read_lock();                         2260         rcu_read_lock();
2261         while ((folio = find_get_entry(&xas,     2261         while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2262                 /*                               2262                 /*
2263                  * Shadow entries should neve    2263                  * Shadow entries should never be tagged, but this iteration
2264                  * is lockless so there is a     2264                  * is lockless so there is a window for page reclaim to evict
2265                  * a page we saw tagged. Skip    2265                  * a page we saw tagged. Skip over it.
2266                  */                              2266                  */
2267                 if (xa_is_value(folio))          2267                 if (xa_is_value(folio))
2268                         continue;                2268                         continue;
2269                 if (!folio_batch_add(fbatch,     2269                 if (!folio_batch_add(fbatch, folio)) {
2270                         unsigned long nr = fo    2270                         unsigned long nr = folio_nr_pages(folio);
2271                         *start = folio->index    2271                         *start = folio->index + nr;
2272                         goto out;                2272                         goto out;
2273                 }                                2273                 }
2274         }                                        2274         }
2275         /*                                       2275         /*
2276          * We come here when there is no page    2276          * We come here when there is no page beyond @end. We take care to not
2277          * overflow the index @start as it co    2277          * overflow the index @start as it confuses some of the callers. This
2278          * breaks the iteration when there is    2278          * breaks the iteration when there is a page at index -1 but that is
2279          * already broke anyway.                 2279          * already broke anyway.
2280          */                                      2280          */
2281         if (end == (pgoff_t)-1)                  2281         if (end == (pgoff_t)-1)
2282                 *start = (pgoff_t)-1;            2282                 *start = (pgoff_t)-1;
2283         else                                     2283         else
2284                 *start = end + 1;                2284                 *start = end + 1;
2285 out:                                             2285 out:
2286         rcu_read_unlock();                       2286         rcu_read_unlock();
2287                                                  2287 
2288         return folio_batch_count(fbatch);        2288         return folio_batch_count(fbatch);
2289 }                                                2289 }
2290 EXPORT_SYMBOL(filemap_get_folios_tag);           2290 EXPORT_SYMBOL(filemap_get_folios_tag);
2291                                                  2291 
2292 /*                                               2292 /*
2293  * CD/DVDs are error prone. When a medium err    2293  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2294  * a _large_ part of the i/o request. Imagine    2294  * a _large_ part of the i/o request. Imagine the worst scenario:
2295  *                                               2295  *
2296  *      ---R_________________________________    2296  *      ---R__________________________________________B__________
2297  *         ^ reading here                        2297  *         ^ reading here                             ^ bad block(assume 4k)
2298  *                                               2298  *
2299  * read(R) => miss => readahead(R...B) => med    2299  * read(R) => miss => readahead(R...B) => media error => frustrating retries
2300  * => failing the whole request => read(R) =>    2300  * => failing the whole request => read(R) => read(R+1) =>
2301  * readahead(R+1...B+1) => bang => read(R+2)     2301  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2302  * readahead(R+3...B+2) => bang => read(R+3)     2302  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2303  * readahead(R+4...B+3) => bang => read(R+4)     2303  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2304  *                                               2304  *
2305  * It is going insane. Fix it by quickly scal    2305  * It is going insane. Fix it by quickly scaling down the readahead size.
2306  */                                              2306  */
2307 static void shrink_readahead_size_eio(struct     2307 static void shrink_readahead_size_eio(struct file_ra_state *ra)
2308 {                                                2308 {
2309         ra->ra_pages /= 4;                       2309         ra->ra_pages /= 4;
2310 }                                                2310 }
2311                                                  2311 
2312 /*                                               2312 /*
2313  * filemap_get_read_batch - Get a batch of fo    2313  * filemap_get_read_batch - Get a batch of folios for read
2314  *                                               2314  *
2315  * Get a batch of folios which represent a co    2315  * Get a batch of folios which represent a contiguous range of bytes in
2316  * the file.  No exceptional entries will be     2316  * the file.  No exceptional entries will be returned.  If @index is in
2317  * the middle of a folio, the entire folio wi    2317  * the middle of a folio, the entire folio will be returned.  The last
2318  * folio in the batch may have the readahead     2318  * folio in the batch may have the readahead flag set or the uptodate flag
2319  * clear so that the caller can take the appr    2319  * clear so that the caller can take the appropriate action.
2320  */                                              2320  */
2321 static void filemap_get_read_batch(struct add    2321 static void filemap_get_read_batch(struct address_space *mapping,
2322                 pgoff_t index, pgoff_t max, s    2322                 pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2323 {                                                2323 {
2324         XA_STATE(xas, &mapping->i_pages, inde    2324         XA_STATE(xas, &mapping->i_pages, index);
2325         struct folio *folio;                     2325         struct folio *folio;
2326                                                  2326 
2327         rcu_read_lock();                         2327         rcu_read_lock();
2328         for (folio = xas_load(&xas); folio; f    2328         for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2329                 if (xas_retry(&xas, folio))      2329                 if (xas_retry(&xas, folio))
2330                         continue;                2330                         continue;
2331                 if (xas.xa_index > max || xa_    2331                 if (xas.xa_index > max || xa_is_value(folio))
2332                         break;                   2332                         break;
2333                 if (xa_is_sibling(folio))        2333                 if (xa_is_sibling(folio))
2334                         break;                   2334                         break;
2335                 if (!folio_try_get(folio))       2335                 if (!folio_try_get(folio))
2336                         goto retry;              2336                         goto retry;
2337                                                  2337 
2338                 if (unlikely(folio != xas_rel    2338                 if (unlikely(folio != xas_reload(&xas)))
2339                         goto put_folio;          2339                         goto put_folio;
2340                                                  2340 
2341                 if (!folio_batch_add(fbatch,     2341                 if (!folio_batch_add(fbatch, folio))
2342                         break;                   2342                         break;
2343                 if (!folio_test_uptodate(foli    2343                 if (!folio_test_uptodate(folio))
2344                         break;                   2344                         break;
2345                 if (folio_test_readahead(foli    2345                 if (folio_test_readahead(folio))
2346                         break;                   2346                         break;
2347                 xas_advance(&xas, folio_next_    2347                 xas_advance(&xas, folio_next_index(folio) - 1);
2348                 continue;                        2348                 continue;
2349 put_folio:                                       2349 put_folio:
2350                 folio_put(folio);                2350                 folio_put(folio);
2351 retry:                                           2351 retry:
2352                 xas_reset(&xas);                 2352                 xas_reset(&xas);
2353         }                                        2353         }
2354         rcu_read_unlock();                       2354         rcu_read_unlock();
2355 }                                                2355 }
2356                                                  2356 
2357 static int filemap_read_folio(struct file *fi    2357 static int filemap_read_folio(struct file *file, filler_t filler,
2358                 struct folio *folio)             2358                 struct folio *folio)
2359 {                                                2359 {
2360         bool workingset = folio_test_workings    2360         bool workingset = folio_test_workingset(folio);
2361         unsigned long pflags;                    2361         unsigned long pflags;
2362         int error;                               2362         int error;
2363                                                  2363 
2364         /* Start the actual read. The read wi    2364         /* Start the actual read. The read will unlock the page. */
2365         if (unlikely(workingset))                2365         if (unlikely(workingset))
2366                 psi_memstall_enter(&pflags);     2366                 psi_memstall_enter(&pflags);
2367         error = filler(file, folio);             2367         error = filler(file, folio);
2368         if (unlikely(workingset))                2368         if (unlikely(workingset))
2369                 psi_memstall_leave(&pflags);     2369                 psi_memstall_leave(&pflags);
2370         if (error)                               2370         if (error)
2371                 return error;                    2371                 return error;
2372                                                  2372 
2373         error = folio_wait_locked_killable(fo    2373         error = folio_wait_locked_killable(folio);
2374         if (error)                               2374         if (error)
2375                 return error;                    2375                 return error;
2376         if (folio_test_uptodate(folio))          2376         if (folio_test_uptodate(folio))
2377                 return 0;                        2377                 return 0;
2378         if (file)                                2378         if (file)
2379                 shrink_readahead_size_eio(&fi    2379                 shrink_readahead_size_eio(&file->f_ra);
2380         return -EIO;                             2380         return -EIO;
2381 }                                                2381 }
2382                                                  2382 
2383 static bool filemap_range_uptodate(struct add    2383 static bool filemap_range_uptodate(struct address_space *mapping,
2384                 loff_t pos, size_t count, str    2384                 loff_t pos, size_t count, struct folio *folio,
2385                 bool need_uptodate)              2385                 bool need_uptodate)
2386 {                                                2386 {
2387         if (folio_test_uptodate(folio))          2387         if (folio_test_uptodate(folio))
2388                 return true;                     2388                 return true;
2389         /* pipes can't handle partially uptod    2389         /* pipes can't handle partially uptodate pages */
2390         if (need_uptodate)                       2390         if (need_uptodate)
2391                 return false;                    2391                 return false;
2392         if (!mapping->a_ops->is_partially_upt    2392         if (!mapping->a_ops->is_partially_uptodate)
2393                 return false;                    2393                 return false;
2394         if (mapping->host->i_blkbits >= folio    2394         if (mapping->host->i_blkbits >= folio_shift(folio))
2395                 return false;                    2395                 return false;
2396                                                  2396 
2397         if (folio_pos(folio) > pos) {            2397         if (folio_pos(folio) > pos) {
2398                 count -= folio_pos(folio) - p    2398                 count -= folio_pos(folio) - pos;
2399                 pos = 0;                         2399                 pos = 0;
2400         } else {                                 2400         } else {
2401                 pos -= folio_pos(folio);         2401                 pos -= folio_pos(folio);
2402         }                                        2402         }
2403                                                  2403 
2404         return mapping->a_ops->is_partially_u    2404         return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2405 }                                                2405 }
2406                                                  2406 
2407 static int filemap_update_page(struct kiocb *    2407 static int filemap_update_page(struct kiocb *iocb,
2408                 struct address_space *mapping    2408                 struct address_space *mapping, size_t count,
2409                 struct folio *folio, bool nee    2409                 struct folio *folio, bool need_uptodate)
2410 {                                                2410 {
2411         int error;                               2411         int error;
2412                                                  2412 
2413         if (iocb->ki_flags & IOCB_NOWAIT) {      2413         if (iocb->ki_flags & IOCB_NOWAIT) {
2414                 if (!filemap_invalidate_trylo    2414                 if (!filemap_invalidate_trylock_shared(mapping))
2415                         return -EAGAIN;          2415                         return -EAGAIN;
2416         } else {                                 2416         } else {
2417                 filemap_invalidate_lock_share    2417                 filemap_invalidate_lock_shared(mapping);
2418         }                                        2418         }
2419                                                  2419 
2420         if (!folio_trylock(folio)) {             2420         if (!folio_trylock(folio)) {
2421                 error = -EAGAIN;                 2421                 error = -EAGAIN;
2422                 if (iocb->ki_flags & (IOCB_NO    2422                 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2423                         goto unlock_mapping;     2423                         goto unlock_mapping;
2424                 if (!(iocb->ki_flags & IOCB_W    2424                 if (!(iocb->ki_flags & IOCB_WAITQ)) {
2425                         filemap_invalidate_un    2425                         filemap_invalidate_unlock_shared(mapping);
2426                         /*                       2426                         /*
2427                          * This is where we u    2427                          * This is where we usually end up waiting for a
2428                          * previously submitt    2428                          * previously submitted readahead to finish.
2429                          */                      2429                          */
2430                         folio_put_wait_locked    2430                         folio_put_wait_locked(folio, TASK_KILLABLE);
2431                         return AOP_TRUNCATED_    2431                         return AOP_TRUNCATED_PAGE;
2432                 }                                2432                 }
2433                 error = __folio_lock_async(fo    2433                 error = __folio_lock_async(folio, iocb->ki_waitq);
2434                 if (error)                       2434                 if (error)
2435                         goto unlock_mapping;     2435                         goto unlock_mapping;
2436         }                                        2436         }
2437                                                  2437 
2438         error = AOP_TRUNCATED_PAGE;              2438         error = AOP_TRUNCATED_PAGE;
2439         if (!folio->mapping)                     2439         if (!folio->mapping)
2440                 goto unlock;                     2440                 goto unlock;
2441                                                  2441 
2442         error = 0;                               2442         error = 0;
2443         if (filemap_range_uptodate(mapping, i    2443         if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2444                                    need_uptod    2444                                    need_uptodate))
2445                 goto unlock;                     2445                 goto unlock;
2446                                                  2446 
2447         error = -EAGAIN;                         2447         error = -EAGAIN;
2448         if (iocb->ki_flags & (IOCB_NOIO | IOC    2448         if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2449                 goto unlock;                     2449                 goto unlock;
2450                                                  2450 
2451         error = filemap_read_folio(iocb->ki_f    2451         error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2452                         folio);                  2452                         folio);
2453         goto unlock_mapping;                     2453         goto unlock_mapping;
2454 unlock:                                          2454 unlock:
2455         folio_unlock(folio);                     2455         folio_unlock(folio);
2456 unlock_mapping:                                  2456 unlock_mapping:
2457         filemap_invalidate_unlock_shared(mapp    2457         filemap_invalidate_unlock_shared(mapping);
2458         if (error == AOP_TRUNCATED_PAGE)         2458         if (error == AOP_TRUNCATED_PAGE)
2459                 folio_put(folio);                2459                 folio_put(folio);
2460         return error;                            2460         return error;
2461 }                                                2461 }
2462                                                  2462 
2463 static int filemap_create_folio(struct file *    2463 static int filemap_create_folio(struct file *file,
2464                 struct address_space *mapping    2464                 struct address_space *mapping, loff_t pos,
2465                 struct folio_batch *fbatch)      2465                 struct folio_batch *fbatch)
2466 {                                                2466 {
2467         struct folio *folio;                     2467         struct folio *folio;
2468         int error;                               2468         int error;
2469         unsigned int min_order = mapping_min_    2469         unsigned int min_order = mapping_min_folio_order(mapping);
2470         pgoff_t index;                           2470         pgoff_t index;
2471                                                  2471 
2472         folio = filemap_alloc_folio(mapping_g    2472         folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);
2473         if (!folio)                              2473         if (!folio)
2474                 return -ENOMEM;                  2474                 return -ENOMEM;
2475                                                  2475 
2476         /*                                       2476         /*
2477          * Protect against truncate / hole pu    2477          * Protect against truncate / hole punch. Grabbing invalidate_lock
2478          * here assures we cannot instantiate    2478          * here assures we cannot instantiate and bring uptodate new
2479          * pagecache folios after evicting pa    2479          * pagecache folios after evicting page cache during truncate
2480          * and before actually freeing blocks    2480          * and before actually freeing blocks.  Note that we could
2481          * release invalidate_lock after inse    2481          * release invalidate_lock after inserting the folio into
2482          * the page cache as the locked folio    2482          * the page cache as the locked folio would then be enough to
2483          * synchronize with hole punching. Bu    2483          * synchronize with hole punching. But there are code paths
2484          * such as filemap_update_page() fill    2484          * such as filemap_update_page() filling in partially uptodate
2485          * pages or ->readahead() that need t    2485          * pages or ->readahead() that need to hold invalidate_lock
2486          * while mapping blocks for IO so let    2486          * while mapping blocks for IO so let's hold the lock here as
2487          * well to keep locking rules simple.    2487          * well to keep locking rules simple.
2488          */                                      2488          */
2489         filemap_invalidate_lock_shared(mappin    2489         filemap_invalidate_lock_shared(mapping);
2490         index = (pos >> (PAGE_SHIFT + min_ord    2490         index = (pos >> (PAGE_SHIFT + min_order)) << min_order;
2491         error = filemap_add_folio(mapping, fo    2491         error = filemap_add_folio(mapping, folio, index,
2492                         mapping_gfp_constrain    2492                         mapping_gfp_constraint(mapping, GFP_KERNEL));
2493         if (error == -EEXIST)                    2493         if (error == -EEXIST)
2494                 error = AOP_TRUNCATED_PAGE;      2494                 error = AOP_TRUNCATED_PAGE;
2495         if (error)                               2495         if (error)
2496                 goto error;                      2496                 goto error;
2497                                                  2497 
2498         error = filemap_read_folio(file, mapp    2498         error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2499         if (error)                               2499         if (error)
2500                 goto error;                      2500                 goto error;
2501                                                  2501 
2502         filemap_invalidate_unlock_shared(mapp    2502         filemap_invalidate_unlock_shared(mapping);
2503         folio_batch_add(fbatch, folio);          2503         folio_batch_add(fbatch, folio);
2504         return 0;                                2504         return 0;
2505 error:                                           2505 error:
2506         filemap_invalidate_unlock_shared(mapp    2506         filemap_invalidate_unlock_shared(mapping);
2507         folio_put(folio);                        2507         folio_put(folio);
2508         return error;                            2508         return error;
2509 }                                                2509 }
2510                                                  2510 
2511 static int filemap_readahead(struct kiocb *io    2511 static int filemap_readahead(struct kiocb *iocb, struct file *file,
2512                 struct address_space *mapping    2512                 struct address_space *mapping, struct folio *folio,
2513                 pgoff_t last_index)              2513                 pgoff_t last_index)
2514 {                                                2514 {
2515         DEFINE_READAHEAD(ractl, file, &file->    2515         DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2516                                                  2516 
2517         if (iocb->ki_flags & IOCB_NOIO)          2517         if (iocb->ki_flags & IOCB_NOIO)
2518                 return -EAGAIN;                  2518                 return -EAGAIN;
2519         page_cache_async_ra(&ractl, folio, la    2519         page_cache_async_ra(&ractl, folio, last_index - folio->index);
2520         return 0;                                2520         return 0;
2521 }                                                2521 }
2522                                                  2522 
2523 static int filemap_get_pages(struct kiocb *io    2523 static int filemap_get_pages(struct kiocb *iocb, size_t count,
2524                 struct folio_batch *fbatch, b    2524                 struct folio_batch *fbatch, bool need_uptodate)
2525 {                                                2525 {
2526         struct file *filp = iocb->ki_filp;       2526         struct file *filp = iocb->ki_filp;
2527         struct address_space *mapping = filp-    2527         struct address_space *mapping = filp->f_mapping;
2528         struct file_ra_state *ra = &filp->f_r    2528         struct file_ra_state *ra = &filp->f_ra;
2529         pgoff_t index = iocb->ki_pos >> PAGE_    2529         pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2530         pgoff_t last_index;                      2530         pgoff_t last_index;
2531         struct folio *folio;                     2531         struct folio *folio;
2532         unsigned int flags;                      2532         unsigned int flags;
2533         int err = 0;                             2533         int err = 0;
2534                                                  2534 
2535         /* "last_index" is the index of the p    2535         /* "last_index" is the index of the page beyond the end of the read */
2536         last_index = DIV_ROUND_UP(iocb->ki_po    2536         last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE);
2537 retry:                                           2537 retry:
2538         if (fatal_signal_pending(current))       2538         if (fatal_signal_pending(current))
2539                 return -EINTR;                   2539                 return -EINTR;
2540                                                  2540 
2541         filemap_get_read_batch(mapping, index    2541         filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2542         if (!folio_batch_count(fbatch)) {        2542         if (!folio_batch_count(fbatch)) {
2543                 if (iocb->ki_flags & IOCB_NOI    2543                 if (iocb->ki_flags & IOCB_NOIO)
2544                         return -EAGAIN;          2544                         return -EAGAIN;
2545                 if (iocb->ki_flags & IOCB_NOW    2545                 if (iocb->ki_flags & IOCB_NOWAIT)
2546                         flags = memalloc_noio    2546                         flags = memalloc_noio_save();
2547                 page_cache_sync_readahead(map    2547                 page_cache_sync_readahead(mapping, ra, filp, index,
2548                                 last_index -     2548                                 last_index - index);
2549                 if (iocb->ki_flags & IOCB_NOW    2549                 if (iocb->ki_flags & IOCB_NOWAIT)
2550                         memalloc_noio_restore    2550                         memalloc_noio_restore(flags);
2551                 filemap_get_read_batch(mappin    2551                 filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2552         }                                        2552         }
2553         if (!folio_batch_count(fbatch)) {        2553         if (!folio_batch_count(fbatch)) {
2554                 if (iocb->ki_flags & (IOCB_NO    2554                 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2555                         return -EAGAIN;          2555                         return -EAGAIN;
2556                 err = filemap_create_folio(fi    2556                 err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch);
2557                 if (err == AOP_TRUNCATED_PAGE    2557                 if (err == AOP_TRUNCATED_PAGE)
2558                         goto retry;              2558                         goto retry;
2559                 return err;                      2559                 return err;
2560         }                                        2560         }
2561                                                  2561 
2562         folio = fbatch->folios[folio_batch_co    2562         folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2563         if (folio_test_readahead(folio)) {       2563         if (folio_test_readahead(folio)) {
2564                 err = filemap_readahead(iocb,    2564                 err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2565                 if (err)                         2565                 if (err)
2566                         goto err;                2566                         goto err;
2567         }                                        2567         }
2568         if (!folio_test_uptodate(folio)) {       2568         if (!folio_test_uptodate(folio)) {
2569                 if ((iocb->ki_flags & IOCB_WA    2569                 if ((iocb->ki_flags & IOCB_WAITQ) &&
2570                     folio_batch_count(fbatch)    2570                     folio_batch_count(fbatch) > 1)
2571                         iocb->ki_flags |= IOC    2571                         iocb->ki_flags |= IOCB_NOWAIT;
2572                 err = filemap_update_page(ioc    2572                 err = filemap_update_page(iocb, mapping, count, folio,
2573                                           nee    2573                                           need_uptodate);
2574                 if (err)                         2574                 if (err)
2575                         goto err;                2575                         goto err;
2576         }                                        2576         }
2577                                                  2577 
2578         trace_mm_filemap_get_pages(mapping, i    2578         trace_mm_filemap_get_pages(mapping, index, last_index - 1);
2579         return 0;                                2579         return 0;
2580 err:                                             2580 err:
2581         if (err < 0)                             2581         if (err < 0)
2582                 folio_put(folio);                2582                 folio_put(folio);
2583         if (likely(--fbatch->nr))                2583         if (likely(--fbatch->nr))
2584                 return 0;                        2584                 return 0;
2585         if (err == AOP_TRUNCATED_PAGE)           2585         if (err == AOP_TRUNCATED_PAGE)
2586                 goto retry;                      2586                 goto retry;
2587         return err;                              2587         return err;
2588 }                                                2588 }
2589                                                  2589 
2590 static inline bool pos_same_folio(loff_t pos1    2590 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2591 {                                                2591 {
2592         unsigned int shift = folio_shift(foli    2592         unsigned int shift = folio_shift(folio);
2593                                                  2593 
2594         return (pos1 >> shift == pos2 >> shif    2594         return (pos1 >> shift == pos2 >> shift);
2595 }                                                2595 }
2596                                                  2596 
2597 /**                                              2597 /**
2598  * filemap_read - Read data from the page cac    2598  * filemap_read - Read data from the page cache.
2599  * @iocb: The iocb to read.                      2599  * @iocb: The iocb to read.
2600  * @iter: Destination for the data.              2600  * @iter: Destination for the data.
2601  * @already_read: Number of bytes already rea    2601  * @already_read: Number of bytes already read by the caller.
2602  *                                               2602  *
2603  * Copies data from the page cache.  If the d    2603  * Copies data from the page cache.  If the data is not currently present,
2604  * uses the readahead and read_folio address_    2604  * uses the readahead and read_folio address_space operations to fetch it.
2605  *                                               2605  *
2606  * Return: Total number of bytes copied, incl    2606  * Return: Total number of bytes copied, including those already read by
2607  * the caller.  If an error happens before an    2607  * the caller.  If an error happens before any bytes are copied, returns
2608  * a negative error number.                      2608  * a negative error number.
2609  */                                              2609  */
2610 ssize_t filemap_read(struct kiocb *iocb, stru    2610 ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2611                 ssize_t already_read)            2611                 ssize_t already_read)
2612 {                                                2612 {
2613         struct file *filp = iocb->ki_filp;       2613         struct file *filp = iocb->ki_filp;
2614         struct file_ra_state *ra = &filp->f_r    2614         struct file_ra_state *ra = &filp->f_ra;
2615         struct address_space *mapping = filp-    2615         struct address_space *mapping = filp->f_mapping;
2616         struct inode *inode = mapping->host;     2616         struct inode *inode = mapping->host;
2617         struct folio_batch fbatch;               2617         struct folio_batch fbatch;
2618         int i, error = 0;                        2618         int i, error = 0;
2619         bool writably_mapped;                    2619         bool writably_mapped;
2620         loff_t isize, end_offset;                2620         loff_t isize, end_offset;
2621         loff_t last_pos = ra->prev_pos;          2621         loff_t last_pos = ra->prev_pos;
2622                                                  2622 
2623         if (unlikely(iocb->ki_pos >= inode->i    2623         if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2624                 return 0;                        2624                 return 0;
2625         if (unlikely(!iov_iter_count(iter)))     2625         if (unlikely(!iov_iter_count(iter)))
2626                 return 0;                        2626                 return 0;
2627                                                  2627 
2628         iov_iter_truncate(iter, inode->i_sb->    2628         iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos);
2629         folio_batch_init(&fbatch);               2629         folio_batch_init(&fbatch);
2630                                                  2630 
2631         do {                                     2631         do {
2632                 cond_resched();                  2632                 cond_resched();
2633                                                  2633 
2634                 /*                               2634                 /*
2635                  * If we've already successfu    2635                  * If we've already successfully copied some data, then we
2636                  * can no longer safely retur    2636                  * can no longer safely return -EIOCBQUEUED. Hence mark
2637                  * an async read NOWAIT at th    2637                  * an async read NOWAIT at that point.
2638                  */                              2638                  */
2639                 if ((iocb->ki_flags & IOCB_WA    2639                 if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2640                         iocb->ki_flags |= IOC    2640                         iocb->ki_flags |= IOCB_NOWAIT;
2641                                                  2641 
2642                 if (unlikely(iocb->ki_pos >=     2642                 if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2643                         break;                   2643                         break;
2644                                                  2644 
2645                 error = filemap_get_pages(ioc    2645                 error = filemap_get_pages(iocb, iter->count, &fbatch, false);
2646                 if (error < 0)                   2646                 if (error < 0)
2647                         break;                   2647                         break;
2648                                                  2648 
2649                 /*                               2649                 /*
2650                  * i_size must be checked aft    2650                  * i_size must be checked after we know the pages are Uptodate.
2651                  *                               2651                  *
2652                  * Checking i_size after the     2652                  * Checking i_size after the check allows us to calculate
2653                  * the correct value for "nr"    2653                  * the correct value for "nr", which means the zero-filled
2654                  * part of the page is not co    2654                  * part of the page is not copied back to userspace (unless
2655                  * another truncate extends t    2655                  * another truncate extends the file - this is desired though).
2656                  */                              2656                  */
2657                 isize = i_size_read(inode);      2657                 isize = i_size_read(inode);
2658                 if (unlikely(iocb->ki_pos >=     2658                 if (unlikely(iocb->ki_pos >= isize))
2659                         goto put_folios;         2659                         goto put_folios;
2660                 end_offset = min_t(loff_t, is    2660                 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2661                                                  2661 
2662                 /*                               2662                 /*
2663                  * Once we start copying data    2663                  * Once we start copying data, we don't want to be touching any
2664                  * cachelines that might be c    2664                  * cachelines that might be contended:
2665                  */                              2665                  */
2666                 writably_mapped = mapping_wri    2666                 writably_mapped = mapping_writably_mapped(mapping);
2667                                                  2667 
2668                 /*                               2668                 /*
2669                  * When a read accesses the s    2669                  * When a read accesses the same folio several times, only
2670                  * mark it as accessed the fi    2670                  * mark it as accessed the first time.
2671                  */                              2671                  */
2672                 if (!pos_same_folio(iocb->ki_    2672                 if (!pos_same_folio(iocb->ki_pos, last_pos - 1,
2673                                     fbatch.fo    2673                                     fbatch.folios[0]))
2674                         folio_mark_accessed(f    2674                         folio_mark_accessed(fbatch.folios[0]);
2675                                                  2675 
2676                 for (i = 0; i < folio_batch_c    2676                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2677                         struct folio *folio =    2677                         struct folio *folio = fbatch.folios[i];
2678                         size_t fsize = folio_    2678                         size_t fsize = folio_size(folio);
2679                         size_t offset = iocb-    2679                         size_t offset = iocb->ki_pos & (fsize - 1);
2680                         size_t bytes = min_t(    2680                         size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2681                                                  2681                                              fsize - offset);
2682                         size_t copied;           2682                         size_t copied;
2683                                                  2683 
2684                         if (end_offset < foli    2684                         if (end_offset < folio_pos(folio))
2685                                 break;           2685                                 break;
2686                         if (i > 0)               2686                         if (i > 0)
2687                                 folio_mark_ac    2687                                 folio_mark_accessed(folio);
2688                         /*                       2688                         /*
2689                          * If users can be wr    2689                          * If users can be writing to this folio using arbitrary
2690                          * virtual addresses,    2690                          * virtual addresses, take care of potential aliasing
2691                          * before reading the    2691                          * before reading the folio on the kernel side.
2692                          */                      2692                          */
2693                         if (writably_mapped)     2693                         if (writably_mapped)
2694                                 flush_dcache_    2694                                 flush_dcache_folio(folio);
2695                                                  2695 
2696                         copied = copy_folio_t    2696                         copied = copy_folio_to_iter(folio, offset, bytes, iter);
2697                                                  2697 
2698                         already_read += copie    2698                         already_read += copied;
2699                         iocb->ki_pos += copie    2699                         iocb->ki_pos += copied;
2700                         last_pos = iocb->ki_p    2700                         last_pos = iocb->ki_pos;
2701                                                  2701 
2702                         if (copied < bytes) {    2702                         if (copied < bytes) {
2703                                 error = -EFAU    2703                                 error = -EFAULT;
2704                                 break;           2704                                 break;
2705                         }                        2705                         }
2706                 }                                2706                 }
2707 put_folios:                                      2707 put_folios:
2708                 for (i = 0; i < folio_batch_c    2708                 for (i = 0; i < folio_batch_count(&fbatch); i++)
2709                         folio_put(fbatch.foli    2709                         folio_put(fbatch.folios[i]);
2710                 folio_batch_init(&fbatch);       2710                 folio_batch_init(&fbatch);
2711         } while (iov_iter_count(iter) && iocb    2711         } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2712                                                  2712 
2713         file_accessed(filp);                     2713         file_accessed(filp);
2714         ra->prev_pos = last_pos;                 2714         ra->prev_pos = last_pos;
2715         return already_read ? already_read :     2715         return already_read ? already_read : error;
2716 }                                                2716 }
2717 EXPORT_SYMBOL_GPL(filemap_read);                 2717 EXPORT_SYMBOL_GPL(filemap_read);
2718                                                  2718 
2719 int kiocb_write_and_wait(struct kiocb *iocb,     2719 int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
2720 {                                                2720 {
2721         struct address_space *mapping = iocb-    2721         struct address_space *mapping = iocb->ki_filp->f_mapping;
2722         loff_t pos = iocb->ki_pos;               2722         loff_t pos = iocb->ki_pos;
2723         loff_t end = pos + count - 1;            2723         loff_t end = pos + count - 1;
2724                                                  2724 
2725         if (iocb->ki_flags & IOCB_NOWAIT) {      2725         if (iocb->ki_flags & IOCB_NOWAIT) {
2726                 if (filemap_range_needs_write    2726                 if (filemap_range_needs_writeback(mapping, pos, end))
2727                         return -EAGAIN;          2727                         return -EAGAIN;
2728                 return 0;                        2728                 return 0;
2729         }                                        2729         }
2730                                                  2730 
2731         return filemap_write_and_wait_range(m    2731         return filemap_write_and_wait_range(mapping, pos, end);
2732 }                                                2732 }
2733 EXPORT_SYMBOL_GPL(kiocb_write_and_wait);         2733 EXPORT_SYMBOL_GPL(kiocb_write_and_wait);
2734                                                  2734 
2735 int filemap_invalidate_pages(struct address_s    2735 int filemap_invalidate_pages(struct address_space *mapping,
2736                              loff_t pos, loff    2736                              loff_t pos, loff_t end, bool nowait)
2737 {                                                2737 {
2738         int ret;                                 2738         int ret;
2739                                                  2739 
2740         if (nowait) {                            2740         if (nowait) {
2741                 /* we could block if there ar    2741                 /* we could block if there are any pages in the range */
2742                 if (filemap_range_has_page(ma    2742                 if (filemap_range_has_page(mapping, pos, end))
2743                         return -EAGAIN;          2743                         return -EAGAIN;
2744         } else {                                 2744         } else {
2745                 ret = filemap_write_and_wait_    2745                 ret = filemap_write_and_wait_range(mapping, pos, end);
2746                 if (ret)                         2746                 if (ret)
2747                         return ret;              2747                         return ret;
2748         }                                        2748         }
2749                                                  2749 
2750         /*                                       2750         /*
2751          * After a write we want buffered rea    2751          * After a write we want buffered reads to be sure to go to disk to get
2752          * the new data.  We invalidate clean    2752          * the new data.  We invalidate clean cached page from the region we're
2753          * about to write.  We do this *befor    2753          * about to write.  We do this *before* the write so that we can return
2754          * without clobbering -EIOCBQUEUED fr    2754          * without clobbering -EIOCBQUEUED from ->direct_IO().
2755          */                                      2755          */
2756         return invalidate_inode_pages2_range(    2756         return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
2757                                                  2757                                              end >> PAGE_SHIFT);
2758 }                                                2758 }
2759                                                  2759 
2760 int kiocb_invalidate_pages(struct kiocb *iocb    2760 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
2761 {                                                2761 {
2762         struct address_space *mapping = iocb-    2762         struct address_space *mapping = iocb->ki_filp->f_mapping;
2763                                                  2763 
2764         return filemap_invalidate_pages(mappi    2764         return filemap_invalidate_pages(mapping, iocb->ki_pos,
2765                                         iocb-    2765                                         iocb->ki_pos + count - 1,
2766                                         iocb-    2766                                         iocb->ki_flags & IOCB_NOWAIT);
2767 }                                                2767 }
2768 EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);       2768 EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);
2769                                                  2769 
2770 /**                                              2770 /**
2771  * generic_file_read_iter - generic filesyste    2771  * generic_file_read_iter - generic filesystem read routine
2772  * @iocb:       kernel I/O control block         2772  * @iocb:       kernel I/O control block
2773  * @iter:       destination for the data read    2773  * @iter:       destination for the data read
2774  *                                               2774  *
2775  * This is the "read_iter()" routine for all     2775  * This is the "read_iter()" routine for all filesystems
2776  * that can use the page cache directly.         2776  * that can use the page cache directly.
2777  *                                               2777  *
2778  * The IOCB_NOWAIT flag in iocb->ki_flags ind    2778  * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2779  * be returned when no data can be read witho    2779  * be returned when no data can be read without waiting for I/O requests
2780  * to complete; it doesn't prevent readahead.    2780  * to complete; it doesn't prevent readahead.
2781  *                                               2781  *
2782  * The IOCB_NOIO flag in iocb->ki_flags indic    2782  * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2783  * requests shall be made for the read or for    2783  * requests shall be made for the read or for readahead.  When no data
2784  * can be read, -EAGAIN shall be returned.  W    2784  * can be read, -EAGAIN shall be returned.  When readahead would be
2785  * triggered, a partial, possibly empty read     2785  * triggered, a partial, possibly empty read shall be returned.
2786  *                                               2786  *
2787  * Return:                                       2787  * Return:
2788  * * number of bytes copied, even for partial    2788  * * number of bytes copied, even for partial reads
2789  * * negative error code (or 0 if IOCB_NOIO)     2789  * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2790  */                                              2790  */
2791 ssize_t                                          2791 ssize_t
2792 generic_file_read_iter(struct kiocb *iocb, st    2792 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2793 {                                                2793 {
2794         size_t count = iov_iter_count(iter);     2794         size_t count = iov_iter_count(iter);
2795         ssize_t retval = 0;                      2795         ssize_t retval = 0;
2796                                                  2796 
2797         if (!count)                              2797         if (!count)
2798                 return 0; /* skip atime */       2798                 return 0; /* skip atime */
2799                                                  2799 
2800         if (iocb->ki_flags & IOCB_DIRECT) {      2800         if (iocb->ki_flags & IOCB_DIRECT) {
2801                 struct file *file = iocb->ki_    2801                 struct file *file = iocb->ki_filp;
2802                 struct address_space *mapping    2802                 struct address_space *mapping = file->f_mapping;
2803                 struct inode *inode = mapping    2803                 struct inode *inode = mapping->host;
2804                                                  2804 
2805                 retval = kiocb_write_and_wait    2805                 retval = kiocb_write_and_wait(iocb, count);
2806                 if (retval < 0)                  2806                 if (retval < 0)
2807                         return retval;           2807                         return retval;
2808                 file_accessed(file);             2808                 file_accessed(file);
2809                                                  2809 
2810                 retval = mapping->a_ops->dire    2810                 retval = mapping->a_ops->direct_IO(iocb, iter);
2811                 if (retval >= 0) {               2811                 if (retval >= 0) {
2812                         iocb->ki_pos += retva    2812                         iocb->ki_pos += retval;
2813                         count -= retval;         2813                         count -= retval;
2814                 }                                2814                 }
2815                 if (retval != -EIOCBQUEUED)      2815                 if (retval != -EIOCBQUEUED)
2816                         iov_iter_revert(iter,    2816                         iov_iter_revert(iter, count - iov_iter_count(iter));
2817                                                  2817 
2818                 /*                               2818                 /*
2819                  * Btrfs can have a short DIO    2819                  * Btrfs can have a short DIO read if we encounter
2820                  * compressed extents, so if     2820                  * compressed extents, so if there was an error, or if
2821                  * we've already read everyth    2821                  * we've already read everything we wanted to, or if
2822                  * there was a short read bec    2822                  * there was a short read because we hit EOF, go ahead
2823                  * and return.  Otherwise fal    2823                  * and return.  Otherwise fallthrough to buffered io for
2824                  * the rest of the read.  Buf    2824                  * the rest of the read.  Buffered reads will not work for
2825                  * DAX files, so don't bother    2825                  * DAX files, so don't bother trying.
2826                  */                              2826                  */
2827                 if (retval < 0 || !count || I    2827                 if (retval < 0 || !count || IS_DAX(inode))
2828                         return retval;           2828                         return retval;
2829                 if (iocb->ki_pos >= i_size_re    2829                 if (iocb->ki_pos >= i_size_read(inode))
2830                         return retval;           2830                         return retval;
2831         }                                        2831         }
2832                                                  2832 
2833         return filemap_read(iocb, iter, retva    2833         return filemap_read(iocb, iter, retval);
2834 }                                                2834 }
2835 EXPORT_SYMBOL(generic_file_read_iter);           2835 EXPORT_SYMBOL(generic_file_read_iter);
2836                                                  2836 
2837 /*                                               2837 /*
2838  * Splice subpages from a folio into a pipe.     2838  * Splice subpages from a folio into a pipe.
2839  */                                              2839  */
2840 size_t splice_folio_into_pipe(struct pipe_ino    2840 size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
2841                               struct folio *f    2841                               struct folio *folio, loff_t fpos, size_t size)
2842 {                                                2842 {
2843         struct page *page;                       2843         struct page *page;
2844         size_t spliced = 0, offset = offset_i    2844         size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2845                                                  2845 
2846         page = folio_page(folio, offset / PAG    2846         page = folio_page(folio, offset / PAGE_SIZE);
2847         size = min(size, folio_size(folio) -     2847         size = min(size, folio_size(folio) - offset);
2848         offset %= PAGE_SIZE;                     2848         offset %= PAGE_SIZE;
2849                                                  2849 
2850         while (spliced < size &&                 2850         while (spliced < size &&
2851                !pipe_full(pipe->head, pipe->t    2851                !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2852                 struct pipe_buffer *buf = pip    2852                 struct pipe_buffer *buf = pipe_head_buf(pipe);
2853                 size_t part = min_t(size_t, P    2853                 size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
2854                                                  2854 
2855                 *buf = (struct pipe_buffer) {    2855                 *buf = (struct pipe_buffer) {
2856                         .ops    = &page_cache    2856                         .ops    = &page_cache_pipe_buf_ops,
2857                         .page   = page,          2857                         .page   = page,
2858                         .offset = offset,        2858                         .offset = offset,
2859                         .len    = part,          2859                         .len    = part,
2860                 };                               2860                 };
2861                 folio_get(folio);                2861                 folio_get(folio);
2862                 pipe->head++;                    2862                 pipe->head++;
2863                 page++;                          2863                 page++;
2864                 spliced += part;                 2864                 spliced += part;
2865                 offset = 0;                      2865                 offset = 0;
2866         }                                        2866         }
2867                                                  2867 
2868         return spliced;                          2868         return spliced;
2869 }                                                2869 }
2870                                                  2870 
2871 /**                                              2871 /**
2872  * filemap_splice_read -  Splice data from a     2872  * filemap_splice_read -  Splice data from a file's pagecache into a pipe
2873  * @in: The file to read from                    2873  * @in: The file to read from
2874  * @ppos: Pointer to the file position to rea    2874  * @ppos: Pointer to the file position to read from
2875  * @pipe: The pipe to splice into                2875  * @pipe: The pipe to splice into
2876  * @len: The amount to splice                    2876  * @len: The amount to splice
2877  * @flags: The SPLICE_F_* flags                  2877  * @flags: The SPLICE_F_* flags
2878  *                                               2878  *
2879  * This function gets folios from a file's pa    2879  * This function gets folios from a file's pagecache and splices them into the
2880  * pipe.  Readahead will be called as necessa    2880  * pipe.  Readahead will be called as necessary to fill more folios.  This may
2881  * be used for blockdevs also.                   2881  * be used for blockdevs also.
2882  *                                               2882  *
2883  * Return: On success, the number of bytes re    2883  * Return: On success, the number of bytes read will be returned and *@ppos
2884  * will be updated if appropriate; 0 will be     2884  * will be updated if appropriate; 0 will be returned if there is no more data
2885  * to be read; -EAGAIN will be returned if th    2885  * to be read; -EAGAIN will be returned if the pipe had no space, and some
2886  * other negative error code will be returned    2886  * other negative error code will be returned on error.  A short read may occur
2887  * if the pipe has insufficient space, we rea    2887  * if the pipe has insufficient space, we reach the end of the data or we hit a
2888  * hole.                                         2888  * hole.
2889  */                                              2889  */
2890 ssize_t filemap_splice_read(struct file *in,     2890 ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
2891                             struct pipe_inode    2891                             struct pipe_inode_info *pipe,
2892                             size_t len, unsig    2892                             size_t len, unsigned int flags)
2893 {                                                2893 {
2894         struct folio_batch fbatch;               2894         struct folio_batch fbatch;
2895         struct kiocb iocb;                       2895         struct kiocb iocb;
2896         size_t total_spliced = 0, used, npage    2896         size_t total_spliced = 0, used, npages;
2897         loff_t isize, end_offset;                2897         loff_t isize, end_offset;
2898         bool writably_mapped;                    2898         bool writably_mapped;
2899         int i, error = 0;                        2899         int i, error = 0;
2900                                                  2900 
2901         if (unlikely(*ppos >= in->f_mapping->    2901         if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
2902                 return 0;                        2902                 return 0;
2903                                                  2903 
2904         init_sync_kiocb(&iocb, in);              2904         init_sync_kiocb(&iocb, in);
2905         iocb.ki_pos = *ppos;                     2905         iocb.ki_pos = *ppos;
2906                                                  2906 
2907         /* Work out how much data we can actu    2907         /* Work out how much data we can actually add into the pipe */
2908         used = pipe_occupancy(pipe->head, pip    2908         used = pipe_occupancy(pipe->head, pipe->tail);
2909         npages = max_t(ssize_t, pipe->max_usa    2909         npages = max_t(ssize_t, pipe->max_usage - used, 0);
2910         len = min_t(size_t, len, npages * PAG    2910         len = min_t(size_t, len, npages * PAGE_SIZE);
2911                                                  2911 
2912         folio_batch_init(&fbatch);               2912         folio_batch_init(&fbatch);
2913                                                  2913 
2914         do {                                     2914         do {
2915                 cond_resched();                  2915                 cond_resched();
2916                                                  2916 
2917                 if (*ppos >= i_size_read(in->    2917                 if (*ppos >= i_size_read(in->f_mapping->host))
2918                         break;                   2918                         break;
2919                                                  2919 
2920                 iocb.ki_pos = *ppos;             2920                 iocb.ki_pos = *ppos;
2921                 error = filemap_get_pages(&io    2921                 error = filemap_get_pages(&iocb, len, &fbatch, true);
2922                 if (error < 0)                   2922                 if (error < 0)
2923                         break;                   2923                         break;
2924                                                  2924 
2925                 /*                               2925                 /*
2926                  * i_size must be checked aft    2926                  * i_size must be checked after we know the pages are Uptodate.
2927                  *                               2927                  *
2928                  * Checking i_size after the     2928                  * Checking i_size after the check allows us to calculate
2929                  * the correct value for "nr"    2929                  * the correct value for "nr", which means the zero-filled
2930                  * part of the page is not co    2930                  * part of the page is not copied back to userspace (unless
2931                  * another truncate extends t    2931                  * another truncate extends the file - this is desired though).
2932                  */                              2932                  */
2933                 isize = i_size_read(in->f_map    2933                 isize = i_size_read(in->f_mapping->host);
2934                 if (unlikely(*ppos >= isize))    2934                 if (unlikely(*ppos >= isize))
2935                         break;                   2935                         break;
2936                 end_offset = min_t(loff_t, is    2936                 end_offset = min_t(loff_t, isize, *ppos + len);
2937                                                  2937 
2938                 /*                               2938                 /*
2939                  * Once we start copying data    2939                  * Once we start copying data, we don't want to be touching any
2940                  * cachelines that might be c    2940                  * cachelines that might be contended:
2941                  */                              2941                  */
2942                 writably_mapped = mapping_wri    2942                 writably_mapped = mapping_writably_mapped(in->f_mapping);
2943                                                  2943 
2944                 for (i = 0; i < folio_batch_c    2944                 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2945                         struct folio *folio =    2945                         struct folio *folio = fbatch.folios[i];
2946                         size_t n;                2946                         size_t n;
2947                                                  2947 
2948                         if (folio_pos(folio)     2948                         if (folio_pos(folio) >= end_offset)
2949                                 goto out;        2949                                 goto out;
2950                         folio_mark_accessed(f    2950                         folio_mark_accessed(folio);
2951                                                  2951 
2952                         /*                       2952                         /*
2953                          * If users can be wr    2953                          * If users can be writing to this folio using arbitrary
2954                          * virtual addresses,    2954                          * virtual addresses, take care of potential aliasing
2955                          * before reading the    2955                          * before reading the folio on the kernel side.
2956                          */                      2956                          */
2957                         if (writably_mapped)     2957                         if (writably_mapped)
2958                                 flush_dcache_    2958                                 flush_dcache_folio(folio);
2959                                                  2959 
2960                         n = min_t(loff_t, len    2960                         n = min_t(loff_t, len, isize - *ppos);
2961                         n = splice_folio_into    2961                         n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2962                         if (!n)                  2962                         if (!n)
2963                                 goto out;        2963                                 goto out;
2964                         len -= n;                2964                         len -= n;
2965                         total_spliced += n;      2965                         total_spliced += n;
2966                         *ppos += n;              2966                         *ppos += n;
2967                         in->f_ra.prev_pos = *    2967                         in->f_ra.prev_pos = *ppos;
2968                         if (pipe_full(pipe->h    2968                         if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2969                                 goto out;        2969                                 goto out;
2970                 }                                2970                 }
2971                                                  2971 
2972                 folio_batch_release(&fbatch);    2972                 folio_batch_release(&fbatch);
2973         } while (len);                           2973         } while (len);
2974                                                  2974 
2975 out:                                             2975 out:
2976         folio_batch_release(&fbatch);            2976         folio_batch_release(&fbatch);
2977         file_accessed(in);                       2977         file_accessed(in);
2978                                                  2978 
2979         return total_spliced ? total_spliced     2979         return total_spliced ? total_spliced : error;
2980 }                                                2980 }
2981 EXPORT_SYMBOL(filemap_splice_read);              2981 EXPORT_SYMBOL(filemap_splice_read);
2982                                                  2982 
2983 static inline loff_t folio_seek_hole_data(str    2983 static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2984                 struct address_space *mapping    2984                 struct address_space *mapping, struct folio *folio,
2985                 loff_t start, loff_t end, boo    2985                 loff_t start, loff_t end, bool seek_data)
2986 {                                                2986 {
2987         const struct address_space_operations    2987         const struct address_space_operations *ops = mapping->a_ops;
2988         size_t offset, bsz = i_blocksize(mapp    2988         size_t offset, bsz = i_blocksize(mapping->host);
2989                                                  2989 
2990         if (xa_is_value(folio) || folio_test_    2990         if (xa_is_value(folio) || folio_test_uptodate(folio))
2991                 return seek_data ? start : en    2991                 return seek_data ? start : end;
2992         if (!ops->is_partially_uptodate)         2992         if (!ops->is_partially_uptodate)
2993                 return seek_data ? end : star    2993                 return seek_data ? end : start;
2994                                                  2994 
2995         xas_pause(xas);                          2995         xas_pause(xas);
2996         rcu_read_unlock();                       2996         rcu_read_unlock();
2997         folio_lock(folio);                       2997         folio_lock(folio);
2998         if (unlikely(folio->mapping != mappin    2998         if (unlikely(folio->mapping != mapping))
2999                 goto unlock;                     2999                 goto unlock;
3000                                                  3000 
3001         offset = offset_in_folio(folio, start    3001         offset = offset_in_folio(folio, start) & ~(bsz - 1);
3002                                                  3002 
3003         do {                                     3003         do {
3004                 if (ops->is_partially_uptodat    3004                 if (ops->is_partially_uptodate(folio, offset, bsz) ==
3005                                                  3005                                                         seek_data)
3006                         break;                   3006                         break;
3007                 start = (start + bsz) & ~(bsz    3007                 start = (start + bsz) & ~(bsz - 1);
3008                 offset += bsz;                   3008                 offset += bsz;
3009         } while (offset < folio_size(folio));    3009         } while (offset < folio_size(folio));
3010 unlock:                                          3010 unlock:
3011         folio_unlock(folio);                     3011         folio_unlock(folio);
3012         rcu_read_lock();                         3012         rcu_read_lock();
3013         return start;                            3013         return start;
3014 }                                                3014 }
3015                                                  3015 
3016 static inline size_t seek_folio_size(struct x    3016 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3017 {                                                3017 {
3018         if (xa_is_value(folio))                  3018         if (xa_is_value(folio))
3019                 return PAGE_SIZE << xas_get_o    3019                 return PAGE_SIZE << xas_get_order(xas);
3020         return folio_size(folio);                3020         return folio_size(folio);
3021 }                                                3021 }
3022                                                  3022 
3023 /**                                              3023 /**
3024  * mapping_seek_hole_data - Seek for SEEK_DAT    3024  * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3025  * @mapping: Address space to search.            3025  * @mapping: Address space to search.
3026  * @start: First byte to consider.               3026  * @start: First byte to consider.
3027  * @end: Limit of search (exclusive).            3027  * @end: Limit of search (exclusive).
3028  * @whence: Either SEEK_HOLE or SEEK_DATA.       3028  * @whence: Either SEEK_HOLE or SEEK_DATA.
3029  *                                               3029  *
3030  * If the page cache knows which blocks conta    3030  * If the page cache knows which blocks contain holes and which blocks
3031  * contain data, your filesystem can use this    3031  * contain data, your filesystem can use this function to implement
3032  * SEEK_HOLE and SEEK_DATA.  This is useful f    3032  * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
3033  * entirely memory-based such as tmpfs, and f    3033  * entirely memory-based such as tmpfs, and filesystems which support
3034  * unwritten extents.                            3034  * unwritten extents.
3035  *                                               3035  *
3036  * Return: The requested offset on success, o    3036  * Return: The requested offset on success, or -ENXIO if @whence specifies
3037  * SEEK_DATA and there is no data after @star    3037  * SEEK_DATA and there is no data after @start.  There is an implicit hole
3038  * after @end - 1, so SEEK_HOLE returns @end     3038  * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3039  * and @end contain data.                        3039  * and @end contain data.
3040  */                                              3040  */
3041 loff_t mapping_seek_hole_data(struct address_    3041 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
3042                 loff_t end, int whence)          3042                 loff_t end, int whence)
3043 {                                                3043 {
3044         XA_STATE(xas, &mapping->i_pages, star    3044         XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
3045         pgoff_t max = (end - 1) >> PAGE_SHIFT    3045         pgoff_t max = (end - 1) >> PAGE_SHIFT;
3046         bool seek_data = (whence == SEEK_DATA    3046         bool seek_data = (whence == SEEK_DATA);
3047         struct folio *folio;                     3047         struct folio *folio;
3048                                                  3048 
3049         if (end <= start)                        3049         if (end <= start)
3050                 return -ENXIO;                   3050                 return -ENXIO;
3051                                                  3051 
3052         rcu_read_lock();                         3052         rcu_read_lock();
3053         while ((folio = find_get_entry(&xas,     3053         while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3054                 loff_t pos = (u64)xas.xa_inde    3054                 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3055                 size_t seek_size;                3055                 size_t seek_size;
3056                                                  3056 
3057                 if (start < pos) {               3057                 if (start < pos) {
3058                         if (!seek_data)          3058                         if (!seek_data)
3059                                 goto unlock;     3059                                 goto unlock;
3060                         start = pos;             3060                         start = pos;
3061                 }                                3061                 }
3062                                                  3062 
3063                 seek_size = seek_folio_size(&    3063                 seek_size = seek_folio_size(&xas, folio);
3064                 pos = round_up((u64)pos + 1,     3064                 pos = round_up((u64)pos + 1, seek_size);
3065                 start = folio_seek_hole_data(    3065                 start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3066                                 seek_data);      3066                                 seek_data);
3067                 if (start < pos)                 3067                 if (start < pos)
3068                         goto unlock;             3068                         goto unlock;
3069                 if (start >= end)                3069                 if (start >= end)
3070                         break;                   3070                         break;
3071                 if (seek_size > PAGE_SIZE)       3071                 if (seek_size > PAGE_SIZE)
3072                         xas_set(&xas, pos >>     3072                         xas_set(&xas, pos >> PAGE_SHIFT);
3073                 if (!xa_is_value(folio))         3073                 if (!xa_is_value(folio))
3074                         folio_put(folio);        3074                         folio_put(folio);
3075         }                                        3075         }
3076         if (seek_data)                           3076         if (seek_data)
3077                 start = -ENXIO;                  3077                 start = -ENXIO;
3078 unlock:                                          3078 unlock:
3079         rcu_read_unlock();                       3079         rcu_read_unlock();
3080         if (folio && !xa_is_value(folio))        3080         if (folio && !xa_is_value(folio))
3081                 folio_put(folio);                3081                 folio_put(folio);
3082         if (start > end)                         3082         if (start > end)
3083                 return end;                      3083                 return end;
3084         return start;                            3084         return start;
3085 }                                                3085 }
3086                                                  3086 
3087 #ifdef CONFIG_MMU                                3087 #ifdef CONFIG_MMU
3088 #define MMAP_LOTSAMISS  (100)                    3088 #define MMAP_LOTSAMISS  (100)
3089 /*                                               3089 /*
3090  * lock_folio_maybe_drop_mmap - lock the page    3090  * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3091  * @vmf - the vm_fault for this fault.           3091  * @vmf - the vm_fault for this fault.
3092  * @folio - the folio to lock.                   3092  * @folio - the folio to lock.
3093  * @fpin - the pointer to the file we may pin    3093  * @fpin - the pointer to the file we may pin (or is already pinned).
3094  *                                               3094  *
3095  * This works similar to lock_folio_or_retry     3095  * This works similar to lock_folio_or_retry in that it can drop the
3096  * mmap_lock.  It differs in that it actually    3096  * mmap_lock.  It differs in that it actually returns the folio locked
3097  * if it returns 1 and 0 if it couldn't lock     3097  * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
3098  * to drop the mmap_lock then fpin will point    3098  * to drop the mmap_lock then fpin will point to the pinned file and
3099  * needs to be fput()'ed at a later point.       3099  * needs to be fput()'ed at a later point.
3100  */                                              3100  */
3101 static int lock_folio_maybe_drop_mmap(struct     3101 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3102                                      struct f    3102                                      struct file **fpin)
3103 {                                                3103 {
3104         if (folio_trylock(folio))                3104         if (folio_trylock(folio))
3105                 return 1;                        3105                 return 1;
3106                                                  3106 
3107         /*                                       3107         /*
3108          * NOTE! This will make us return wit    3108          * NOTE! This will make us return with VM_FAULT_RETRY, but with
3109          * the fault lock still held. That's     3109          * the fault lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
3110          * is supposed to work. We have way t    3110          * is supposed to work. We have way too many special cases..
3111          */                                      3111          */
3112         if (vmf->flags & FAULT_FLAG_RETRY_NOW    3112         if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3113                 return 0;                        3113                 return 0;
3114                                                  3114 
3115         *fpin = maybe_unlock_mmap_for_io(vmf,    3115         *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
3116         if (vmf->flags & FAULT_FLAG_KILLABLE)    3116         if (vmf->flags & FAULT_FLAG_KILLABLE) {
3117                 if (__folio_lock_killable(fol    3117                 if (__folio_lock_killable(folio)) {
3118                         /*                       3118                         /*
3119                          * We didn't have the    3119                          * We didn't have the right flags to drop the
3120                          * fault lock, but al    3120                          * fault lock, but all fault_handlers only check
3121                          * for fatal signals     3121                          * for fatal signals if we return VM_FAULT_RETRY,
3122                          * so we need to drop    3122                          * so we need to drop the fault lock here and
3123                          * return 0 if we don    3123                          * return 0 if we don't have a fpin.
3124                          */                      3124                          */
3125                         if (*fpin == NULL)       3125                         if (*fpin == NULL)
3126                                 release_fault    3126                                 release_fault_lock(vmf);
3127                         return 0;                3127                         return 0;
3128                 }                                3128                 }
3129         } else                                   3129         } else
3130                 __folio_lock(folio);             3130                 __folio_lock(folio);
3131                                                  3131 
3132         return 1;                                3132         return 1;
3133 }                                                3133 }
3134                                                  3134 
3135 /*                                               3135 /*
3136  * Synchronous readahead happens when we don'    3136  * Synchronous readahead happens when we don't even find a page in the page
3137  * cache at all.  We don't want to perform IO    3137  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
3138  * to drop the mmap sem we return the file th    3138  * to drop the mmap sem we return the file that was pinned in order for us to do
3139  * that.  If we didn't pin a file then we ret    3139  * that.  If we didn't pin a file then we return NULL.  The file that is
3140  * returned needs to be fput()'ed when we're     3140  * returned needs to be fput()'ed when we're done with it.
3141  */                                              3141  */
3142 static struct file *do_sync_mmap_readahead(st    3142 static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3143 {                                                3143 {
3144         struct file *file = vmf->vma->vm_file    3144         struct file *file = vmf->vma->vm_file;
3145         struct file_ra_state *ra = &file->f_r    3145         struct file_ra_state *ra = &file->f_ra;
3146         struct address_space *mapping = file-    3146         struct address_space *mapping = file->f_mapping;
3147         DEFINE_READAHEAD(ractl, file, ra, map    3147         DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3148         struct file *fpin = NULL;                3148         struct file *fpin = NULL;
3149         unsigned long vm_flags = vmf->vma->vm    3149         unsigned long vm_flags = vmf->vma->vm_flags;
3150         unsigned int mmap_miss;                  3150         unsigned int mmap_miss;
3151                                                  3151 
3152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE               3152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3153         /* Use the readahead code, even if re    3153         /* Use the readahead code, even if readahead is disabled */
3154         if ((vm_flags & VM_HUGEPAGE) && HPAGE    3154         if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {
3155                 fpin = maybe_unlock_mmap_for_    3155                 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3156                 ractl._index &= ~((unsigned l    3156                 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3157                 ra->size = HPAGE_PMD_NR;         3157                 ra->size = HPAGE_PMD_NR;
3158                 /*                               3158                 /*
3159                  * Fetch two PMD folios, so w    3159                  * Fetch two PMD folios, so we get the chance to actually
3160                  * readahead, unless we've be    3160                  * readahead, unless we've been told not to.
3161                  */                              3161                  */
3162                 if (!(vm_flags & VM_RAND_READ    3162                 if (!(vm_flags & VM_RAND_READ))
3163                         ra->size *= 2;           3163                         ra->size *= 2;
3164                 ra->async_size = HPAGE_PMD_NR    3164                 ra->async_size = HPAGE_PMD_NR;
3165                 page_cache_ra_order(&ractl, r    3165                 page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3166                 return fpin;                     3166                 return fpin;
3167         }                                        3167         }
3168 #endif                                           3168 #endif
3169                                                  3169 
3170         /* If we don't want any read-ahead, d    3170         /* If we don't want any read-ahead, don't bother */
3171         if (vm_flags & VM_RAND_READ)             3171         if (vm_flags & VM_RAND_READ)
3172                 return fpin;                     3172                 return fpin;
3173         if (!ra->ra_pages)                       3173         if (!ra->ra_pages)
3174                 return fpin;                     3174                 return fpin;
3175                                                  3175 
3176         if (vm_flags & VM_SEQ_READ) {            3176         if (vm_flags & VM_SEQ_READ) {
3177                 fpin = maybe_unlock_mmap_for_    3177                 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3178                 page_cache_sync_ra(&ractl, ra    3178                 page_cache_sync_ra(&ractl, ra->ra_pages);
3179                 return fpin;                     3179                 return fpin;
3180         }                                        3180         }
3181                                                  3181 
3182         /* Avoid banging the cache line if no    3182         /* Avoid banging the cache line if not needed */
3183         mmap_miss = READ_ONCE(ra->mmap_miss);    3183         mmap_miss = READ_ONCE(ra->mmap_miss);
3184         if (mmap_miss < MMAP_LOTSAMISS * 10)     3184         if (mmap_miss < MMAP_LOTSAMISS * 10)
3185                 WRITE_ONCE(ra->mmap_miss, ++m    3185                 WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3186                                                  3186 
3187         /*                                       3187         /*
3188          * Do we miss much more than hit in t    3188          * Do we miss much more than hit in this file? If so,
3189          * stop bothering with read-ahead. It    3189          * stop bothering with read-ahead. It will only hurt.
3190          */                                      3190          */
3191         if (mmap_miss > MMAP_LOTSAMISS)          3191         if (mmap_miss > MMAP_LOTSAMISS)
3192                 return fpin;                     3192                 return fpin;
3193                                                  3193 
3194         /*                                       3194         /*
3195          * mmap read-around                      3195          * mmap read-around
3196          */                                      3196          */
3197         fpin = maybe_unlock_mmap_for_io(vmf,     3197         fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3198         ra->start = max_t(long, 0, vmf->pgoff    3198         ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3199         ra->size = ra->ra_pages;                 3199         ra->size = ra->ra_pages;
3200         ra->async_size = ra->ra_pages / 4;       3200         ra->async_size = ra->ra_pages / 4;
3201         ractl._index = ra->start;                3201         ractl._index = ra->start;
3202         page_cache_ra_order(&ractl, ra, 0);      3202         page_cache_ra_order(&ractl, ra, 0);
3203         return fpin;                             3203         return fpin;
3204 }                                                3204 }
3205                                                  3205 
3206 /*                                               3206 /*
3207  * Asynchronous readahead happens when we fin    3207  * Asynchronous readahead happens when we find the page and PG_readahead,
3208  * so we want to possibly extend the readahea    3208  * so we want to possibly extend the readahead further.  We return the file that
3209  * was pinned if we have to drop the mmap_loc    3209  * was pinned if we have to drop the mmap_lock in order to do IO.
3210  */                                              3210  */
3211 static struct file *do_async_mmap_readahead(s    3211 static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3212                                             s    3212                                             struct folio *folio)
3213 {                                                3213 {
3214         struct file *file = vmf->vma->vm_file    3214         struct file *file = vmf->vma->vm_file;
3215         struct file_ra_state *ra = &file->f_r    3215         struct file_ra_state *ra = &file->f_ra;
3216         DEFINE_READAHEAD(ractl, file, ra, fil    3216         DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3217         struct file *fpin = NULL;                3217         struct file *fpin = NULL;
3218         unsigned int mmap_miss;                  3218         unsigned int mmap_miss;
3219                                                  3219 
3220         /* If we don't want any read-ahead, d    3220         /* If we don't want any read-ahead, don't bother */
3221         if (vmf->vma->vm_flags & VM_RAND_READ    3221         if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3222                 return fpin;                     3222                 return fpin;
3223                                                  3223 
3224         mmap_miss = READ_ONCE(ra->mmap_miss);    3224         mmap_miss = READ_ONCE(ra->mmap_miss);
3225         if (mmap_miss)                           3225         if (mmap_miss)
3226                 WRITE_ONCE(ra->mmap_miss, --m    3226                 WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3227                                                  3227 
3228         if (folio_test_readahead(folio)) {       3228         if (folio_test_readahead(folio)) {
3229                 fpin = maybe_unlock_mmap_for_    3229                 fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3230                 page_cache_async_ra(&ractl, f    3230                 page_cache_async_ra(&ractl, folio, ra->ra_pages);
3231         }                                        3231         }
3232         return fpin;                             3232         return fpin;
3233 }                                                3233 }
3234                                                  3234 
3235 static vm_fault_t filemap_fault_recheck_pte_n    3235 static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf)
3236 {                                                3236 {
3237         struct vm_area_struct *vma = vmf->vma    3237         struct vm_area_struct *vma = vmf->vma;
3238         vm_fault_t ret = 0;                      3238         vm_fault_t ret = 0;
3239         pte_t *ptep;                             3239         pte_t *ptep;
3240                                                  3240 
3241         /*                                       3241         /*
3242          * We might have COW'ed a pagecache f    3242          * We might have COW'ed a pagecache folio and might now have an mlocked
3243          * anon folio mapped. The original pa    3243          * anon folio mapped. The original pagecache folio is not mlocked and
3244          * might have been evicted. During a     3244          * might have been evicted. During a read+clear/modify/write update of
3245          * the PTE, such as done in do_numa_p    3245          * the PTE, such as done in do_numa_page()/change_pte_range(), we
3246          * temporarily clear the PTE under PT    3246          * temporarily clear the PTE under PT lock and might detect it here as
3247          * "none" when not holding the PT loc    3247          * "none" when not holding the PT lock.
3248          *                                       3248          *
3249          * Not rechecking the PTE under PT lo    3249          * Not rechecking the PTE under PT lock could result in an unexpected
3250          * major fault in an mlock'ed region.    3250          * major fault in an mlock'ed region. Recheck only for this special
3251          * scenario while holding the PT lock    3251          * scenario while holding the PT lock, to not degrade non-mlocked
3252          * scenarios. Recheck the PTE without    3252          * scenarios. Recheck the PTE without PT lock firstly, thereby reducing
3253          * the number of times we hold PT loc    3253          * the number of times we hold PT lock.
3254          */                                      3254          */
3255         if (!(vma->vm_flags & VM_LOCKED))        3255         if (!(vma->vm_flags & VM_LOCKED))
3256                 return 0;                        3256                 return 0;
3257                                                  3257 
3258         if (!(vmf->flags & FAULT_FLAG_ORIG_PT    3258         if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
3259                 return 0;                        3259                 return 0;
3260                                                  3260 
3261         ptep = pte_offset_map_nolock(vma->vm_    3261         ptep = pte_offset_map_nolock(vma->vm_mm, vmf->pmd, vmf->address,
3262                                      &vmf->pt    3262                                      &vmf->ptl);
3263         if (unlikely(!ptep))                     3263         if (unlikely(!ptep))
3264                 return VM_FAULT_NOPAGE;          3264                 return VM_FAULT_NOPAGE;
3265                                                  3265 
3266         if (unlikely(!pte_none(ptep_get_lockl    3266         if (unlikely(!pte_none(ptep_get_lockless(ptep)))) {
3267                 ret = VM_FAULT_NOPAGE;           3267                 ret = VM_FAULT_NOPAGE;
3268         } else {                                 3268         } else {
3269                 spin_lock(vmf->ptl);             3269                 spin_lock(vmf->ptl);
3270                 if (unlikely(!pte_none(ptep_g    3270                 if (unlikely(!pte_none(ptep_get(ptep))))
3271                         ret = VM_FAULT_NOPAGE    3271                         ret = VM_FAULT_NOPAGE;
3272                 spin_unlock(vmf->ptl);           3272                 spin_unlock(vmf->ptl);
3273         }                                        3273         }
3274         pte_unmap(ptep);                         3274         pte_unmap(ptep);
3275         return ret;                              3275         return ret;
3276 }                                                3276 }
3277                                                  3277 
3278 /**                                              3278 /**
3279  * filemap_fault - read in file data for page    3279  * filemap_fault - read in file data for page fault handling
3280  * @vmf:        struct vm_fault containing de    3280  * @vmf:        struct vm_fault containing details of the fault
3281  *                                               3281  *
3282  * filemap_fault() is invoked via the vma ope    3282  * filemap_fault() is invoked via the vma operations vector for a
3283  * mapped memory region to read in file data     3283  * mapped memory region to read in file data during a page fault.
3284  *                                               3284  *
3285  * The goto's are kind of ugly, but this stre    3285  * The goto's are kind of ugly, but this streamlines the normal case of having
3286  * it in the page cache, and handles the spec    3286  * it in the page cache, and handles the special cases reasonably without
3287  * having a lot of duplicated code.              3287  * having a lot of duplicated code.
3288  *                                               3288  *
3289  * vma->vm_mm->mmap_lock must be held on entr    3289  * vma->vm_mm->mmap_lock must be held on entry.
3290  *                                               3290  *
3291  * If our return value has VM_FAULT_RETRY set    3291  * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3292  * may be dropped before doing I/O or by lock    3292  * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3293  *                                               3293  *
3294  * If our return value does not have VM_FAULT    3294  * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3295  * has not been released.                        3295  * has not been released.
3296  *                                               3296  *
3297  * We never return with VM_FAULT_RETRY and a     3297  * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3298  *                                               3298  *
3299  * Return: bitwise-OR of %VM_FAULT_ codes.       3299  * Return: bitwise-OR of %VM_FAULT_ codes.
3300  */                                              3300  */
3301 vm_fault_t filemap_fault(struct vm_fault *vmf    3301 vm_fault_t filemap_fault(struct vm_fault *vmf)
3302 {                                                3302 {
3303         int error;                               3303         int error;
3304         struct file *file = vmf->vma->vm_file    3304         struct file *file = vmf->vma->vm_file;
3305         struct file *fpin = NULL;                3305         struct file *fpin = NULL;
3306         struct address_space *mapping = file-    3306         struct address_space *mapping = file->f_mapping;
3307         struct inode *inode = mapping->host;     3307         struct inode *inode = mapping->host;
3308         pgoff_t max_idx, index = vmf->pgoff;     3308         pgoff_t max_idx, index = vmf->pgoff;
3309         struct folio *folio;                     3309         struct folio *folio;
3310         vm_fault_t ret = 0;                      3310         vm_fault_t ret = 0;
3311         bool mapping_locked = false;             3311         bool mapping_locked = false;
3312                                                  3312 
3313         max_idx = DIV_ROUND_UP(i_size_read(in    3313         max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3314         if (unlikely(index >= max_idx))          3314         if (unlikely(index >= max_idx))
3315                 return VM_FAULT_SIGBUS;          3315                 return VM_FAULT_SIGBUS;
3316                                                  3316 
3317         trace_mm_filemap_fault(mapping, index    3317         trace_mm_filemap_fault(mapping, index);
3318                                                  3318 
3319         /*                                       3319         /*
3320          * Do we have something in the page c    3320          * Do we have something in the page cache already?
3321          */                                      3321          */
3322         folio = filemap_get_folio(mapping, in    3322         folio = filemap_get_folio(mapping, index);
3323         if (likely(!IS_ERR(folio))) {            3323         if (likely(!IS_ERR(folio))) {
3324                 /*                               3324                 /*
3325                  * We found the page, so try     3325                  * We found the page, so try async readahead before waiting for
3326                  * the lock.                     3326                  * the lock.
3327                  */                              3327                  */
3328                 if (!(vmf->flags & FAULT_FLAG    3328                 if (!(vmf->flags & FAULT_FLAG_TRIED))
3329                         fpin = do_async_mmap_    3329                         fpin = do_async_mmap_readahead(vmf, folio);
3330                 if (unlikely(!folio_test_upto    3330                 if (unlikely(!folio_test_uptodate(folio))) {
3331                         filemap_invalidate_lo    3331                         filemap_invalidate_lock_shared(mapping);
3332                         mapping_locked = true    3332                         mapping_locked = true;
3333                 }                                3333                 }
3334         } else {                                 3334         } else {
3335                 ret = filemap_fault_recheck_p    3335                 ret = filemap_fault_recheck_pte_none(vmf);
3336                 if (unlikely(ret))               3336                 if (unlikely(ret))
3337                         return ret;              3337                         return ret;
3338                                                  3338 
3339                 /* No page in the page cache     3339                 /* No page in the page cache at all */
3340                 count_vm_event(PGMAJFAULT);      3340                 count_vm_event(PGMAJFAULT);
3341                 count_memcg_event_mm(vmf->vma    3341                 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3342                 ret = VM_FAULT_MAJOR;            3342                 ret = VM_FAULT_MAJOR;
3343                 fpin = do_sync_mmap_readahead    3343                 fpin = do_sync_mmap_readahead(vmf);
3344 retry_find:                                      3344 retry_find:
3345                 /*                               3345                 /*
3346                  * See comment in filemap_cre    3346                  * See comment in filemap_create_folio() why we need
3347                  * invalidate_lock               3347                  * invalidate_lock
3348                  */                              3348                  */
3349                 if (!mapping_locked) {           3349                 if (!mapping_locked) {
3350                         filemap_invalidate_lo    3350                         filemap_invalidate_lock_shared(mapping);
3351                         mapping_locked = true    3351                         mapping_locked = true;
3352                 }                                3352                 }
3353                 folio = __filemap_get_folio(m    3353                 folio = __filemap_get_folio(mapping, index,
3354                                           FGP    3354                                           FGP_CREAT|FGP_FOR_MMAP,
3355                                           vmf    3355                                           vmf->gfp_mask);
3356                 if (IS_ERR(folio)) {             3356                 if (IS_ERR(folio)) {
3357                         if (fpin)                3357                         if (fpin)
3358                                 goto out_retr    3358                                 goto out_retry;
3359                         filemap_invalidate_un    3359                         filemap_invalidate_unlock_shared(mapping);
3360                         return VM_FAULT_OOM;     3360                         return VM_FAULT_OOM;
3361                 }                                3361                 }
3362         }                                        3362         }
3363                                                  3363 
3364         if (!lock_folio_maybe_drop_mmap(vmf,     3364         if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3365                 goto out_retry;                  3365                 goto out_retry;
3366                                                  3366 
3367         /* Did it get truncated? */              3367         /* Did it get truncated? */
3368         if (unlikely(folio->mapping != mappin    3368         if (unlikely(folio->mapping != mapping)) {
3369                 folio_unlock(folio);             3369                 folio_unlock(folio);
3370                 folio_put(folio);                3370                 folio_put(folio);
3371                 goto retry_find;                 3371                 goto retry_find;
3372         }                                        3372         }
3373         VM_BUG_ON_FOLIO(!folio_contains(folio    3373         VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3374                                                  3374 
3375         /*                                       3375         /*
3376          * We have a locked folio in the page    3376          * We have a locked folio in the page cache, now we need to check
3377          * that it's up-to-date. If not, it i    3377          * that it's up-to-date. If not, it is going to be due to an error,
3378          * or because readahead was otherwise    3378          * or because readahead was otherwise unable to retrieve it.
3379          */                                      3379          */
3380         if (unlikely(!folio_test_uptodate(fol    3380         if (unlikely(!folio_test_uptodate(folio))) {
3381                 /*                               3381                 /*
3382                  * If the invalidate lock is     3382                  * If the invalidate lock is not held, the folio was in cache
3383                  * and uptodate and now it is    3383                  * and uptodate and now it is not. Strange but possible since we
3384                  * didn't hold the page lock     3384                  * didn't hold the page lock all the time. Let's drop
3385                  * everything, get the invali    3385                  * everything, get the invalidate lock and try again.
3386                  */                              3386                  */
3387                 if (!mapping_locked) {           3387                 if (!mapping_locked) {
3388                         folio_unlock(folio);     3388                         folio_unlock(folio);
3389                         folio_put(folio);        3389                         folio_put(folio);
3390                         goto retry_find;         3390                         goto retry_find;
3391                 }                                3391                 }
3392                                                  3392 
3393                 /*                               3393                 /*
3394                  * OK, the folio is really no    3394                  * OK, the folio is really not uptodate. This can be because the
3395                  * VMA has the VM_RAND_READ f    3395                  * VMA has the VM_RAND_READ flag set, or because an error
3396                  * arose. Let's read it in di    3396                  * arose. Let's read it in directly.
3397                  */                              3397                  */
3398                 goto page_not_uptodate;          3398                 goto page_not_uptodate;
3399         }                                        3399         }
3400                                                  3400 
3401         /*                                       3401         /*
3402          * We've made it this far and we had     3402          * We've made it this far and we had to drop our mmap_lock, now is the
3403          * time to return to the upper layer     3403          * time to return to the upper layer and have it re-find the vma and
3404          * redo the fault.                       3404          * redo the fault.
3405          */                                      3405          */
3406         if (fpin) {                              3406         if (fpin) {
3407                 folio_unlock(folio);             3407                 folio_unlock(folio);
3408                 goto out_retry;                  3408                 goto out_retry;
3409         }                                        3409         }
3410         if (mapping_locked)                      3410         if (mapping_locked)
3411                 filemap_invalidate_unlock_sha    3411                 filemap_invalidate_unlock_shared(mapping);
3412                                                  3412 
3413         /*                                       3413         /*
3414          * Found the page and have a referenc    3414          * Found the page and have a reference on it.
3415          * We must recheck i_size under page     3415          * We must recheck i_size under page lock.
3416          */                                      3416          */
3417         max_idx = DIV_ROUND_UP(i_size_read(in    3417         max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3418         if (unlikely(index >= max_idx)) {        3418         if (unlikely(index >= max_idx)) {
3419                 folio_unlock(folio);             3419                 folio_unlock(folio);
3420                 folio_put(folio);                3420                 folio_put(folio);
3421                 return VM_FAULT_SIGBUS;          3421                 return VM_FAULT_SIGBUS;
3422         }                                        3422         }
3423                                                  3423 
3424         vmf->page = folio_file_page(folio, in    3424         vmf->page = folio_file_page(folio, index);
3425         return ret | VM_FAULT_LOCKED;            3425         return ret | VM_FAULT_LOCKED;
3426                                                  3426 
3427 page_not_uptodate:                               3427 page_not_uptodate:
3428         /*                                       3428         /*
3429          * Umm, take care of errors if the pa    3429          * Umm, take care of errors if the page isn't up-to-date.
3430          * Try to re-read it _once_. We do th    3430          * Try to re-read it _once_. We do this synchronously,
3431          * because there really aren't any pe    3431          * because there really aren't any performance issues here
3432          * and we need to check for errors.      3432          * and we need to check for errors.
3433          */                                      3433          */
3434         fpin = maybe_unlock_mmap_for_io(vmf,     3434         fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3435         error = filemap_read_folio(file, mapp    3435         error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3436         if (fpin)                                3436         if (fpin)
3437                 goto out_retry;                  3437                 goto out_retry;
3438         folio_put(folio);                        3438         folio_put(folio);
3439                                                  3439 
3440         if (!error || error == AOP_TRUNCATED_    3440         if (!error || error == AOP_TRUNCATED_PAGE)
3441                 goto retry_find;                 3441                 goto retry_find;
3442         filemap_invalidate_unlock_shared(mapp    3442         filemap_invalidate_unlock_shared(mapping);
3443                                                  3443 
3444         return VM_FAULT_SIGBUS;                  3444         return VM_FAULT_SIGBUS;
3445                                                  3445 
3446 out_retry:                                       3446 out_retry:
3447         /*                                       3447         /*
3448          * We dropped the mmap_lock, we need     3448          * We dropped the mmap_lock, we need to return to the fault handler to
3449          * re-find the vma and come back and     3449          * re-find the vma and come back and find our hopefully still populated
3450          * page.                                 3450          * page.
3451          */                                      3451          */
3452         if (!IS_ERR(folio))                      3452         if (!IS_ERR(folio))
3453                 folio_put(folio);                3453                 folio_put(folio);
3454         if (mapping_locked)                      3454         if (mapping_locked)
3455                 filemap_invalidate_unlock_sha    3455                 filemap_invalidate_unlock_shared(mapping);
3456         if (fpin)                                3456         if (fpin)
3457                 fput(fpin);                      3457                 fput(fpin);
3458         return ret | VM_FAULT_RETRY;             3458         return ret | VM_FAULT_RETRY;
3459 }                                                3459 }
3460 EXPORT_SYMBOL(filemap_fault);                    3460 EXPORT_SYMBOL(filemap_fault);
3461                                                  3461 
3462 static bool filemap_map_pmd(struct vm_fault *    3462 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3463                 pgoff_t start)                   3463                 pgoff_t start)
3464 {                                                3464 {
3465         struct mm_struct *mm = vmf->vma->vm_m    3465         struct mm_struct *mm = vmf->vma->vm_mm;
3466                                                  3466 
3467         /* Huge page is mapped? No need to pr    3467         /* Huge page is mapped? No need to proceed. */
3468         if (pmd_trans_huge(*vmf->pmd)) {         3468         if (pmd_trans_huge(*vmf->pmd)) {
3469                 folio_unlock(folio);             3469                 folio_unlock(folio);
3470                 folio_put(folio);                3470                 folio_put(folio);
3471                 return true;                     3471                 return true;
3472         }                                        3472         }
3473                                                  3473 
3474         if (pmd_none(*vmf->pmd) && folio_test    3474         if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3475                 struct page *page = folio_fil    3475                 struct page *page = folio_file_page(folio, start);
3476                 vm_fault_t ret = do_set_pmd(v    3476                 vm_fault_t ret = do_set_pmd(vmf, page);
3477                 if (!ret) {                      3477                 if (!ret) {
3478                         /* The page is mapped    3478                         /* The page is mapped successfully, reference consumed. */
3479                         folio_unlock(folio);     3479                         folio_unlock(folio);
3480                         return true;             3480                         return true;
3481                 }                                3481                 }
3482         }                                        3482         }
3483                                                  3483 
3484         if (pmd_none(*vmf->pmd) && vmf->preal    3484         if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
3485                 pmd_install(mm, vmf->pmd, &vm    3485                 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3486                                                  3486 
3487         return false;                            3487         return false;
3488 }                                                3488 }
3489                                                  3489 
3490 static struct folio *next_uptodate_folio(stru    3490 static struct folio *next_uptodate_folio(struct xa_state *xas,
3491                 struct address_space *mapping    3491                 struct address_space *mapping, pgoff_t end_pgoff)
3492 {                                                3492 {
3493         struct folio *folio = xas_next_entry(    3493         struct folio *folio = xas_next_entry(xas, end_pgoff);
3494         unsigned long max_idx;                   3494         unsigned long max_idx;
3495                                                  3495 
3496         do {                                     3496         do {
3497                 if (!folio)                      3497                 if (!folio)
3498                         return NULL;             3498                         return NULL;
3499                 if (xas_retry(xas, folio))       3499                 if (xas_retry(xas, folio))
3500                         continue;                3500                         continue;
3501                 if (xa_is_value(folio))          3501                 if (xa_is_value(folio))
3502                         continue;                3502                         continue;
3503                 if (folio_test_locked(folio))    3503                 if (folio_test_locked(folio))
3504                         continue;                3504                         continue;
3505                 if (!folio_try_get(folio))       3505                 if (!folio_try_get(folio))
3506                         continue;                3506                         continue;
3507                 /* Has the page moved or been    3507                 /* Has the page moved or been split? */
3508                 if (unlikely(folio != xas_rel    3508                 if (unlikely(folio != xas_reload(xas)))
3509                         goto skip;               3509                         goto skip;
3510                 if (!folio_test_uptodate(foli    3510                 if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3511                         goto skip;               3511                         goto skip;
3512                 if (!folio_trylock(folio))       3512                 if (!folio_trylock(folio))
3513                         goto skip;               3513                         goto skip;
3514                 if (folio->mapping != mapping    3514                 if (folio->mapping != mapping)
3515                         goto unlock;             3515                         goto unlock;
3516                 if (!folio_test_uptodate(foli    3516                 if (!folio_test_uptodate(folio))
3517                         goto unlock;             3517                         goto unlock;
3518                 max_idx = DIV_ROUND_UP(i_size    3518                 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3519                 if (xas->xa_index >= max_idx)    3519                 if (xas->xa_index >= max_idx)
3520                         goto unlock;             3520                         goto unlock;
3521                 return folio;                    3521                 return folio;
3522 unlock:                                          3522 unlock:
3523                 folio_unlock(folio);             3523                 folio_unlock(folio);
3524 skip:                                            3524 skip:
3525                 folio_put(folio);                3525                 folio_put(folio);
3526         } while ((folio = xas_next_entry(xas,    3526         } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3527                                                  3527 
3528         return NULL;                             3528         return NULL;
3529 }                                                3529 }
3530                                                  3530 
3531 /*                                               3531 /*
3532  * Map page range [start_page, start_page + n    3532  * Map page range [start_page, start_page + nr_pages) of folio.
3533  * start_page is gotten from start by folio_p    3533  * start_page is gotten from start by folio_page(folio, start)
3534  */                                              3534  */
3535 static vm_fault_t filemap_map_folio_range(str    3535 static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
3536                         struct folio *folio,     3536                         struct folio *folio, unsigned long start,
3537                         unsigned long addr, u    3537                         unsigned long addr, unsigned int nr_pages,
3538                         unsigned long *rss, u    3538                         unsigned long *rss, unsigned int *mmap_miss)
3539 {                                                3539 {
3540         vm_fault_t ret = 0;                      3540         vm_fault_t ret = 0;
3541         struct page *page = folio_page(folio,    3541         struct page *page = folio_page(folio, start);
3542         unsigned int count = 0;                  3542         unsigned int count = 0;
3543         pte_t *old_ptep = vmf->pte;              3543         pte_t *old_ptep = vmf->pte;
3544                                                  3544 
3545         do {                                     3545         do {
3546                 if (PageHWPoison(page + count    3546                 if (PageHWPoison(page + count))
3547                         goto skip;               3547                         goto skip;
3548                                                  3548 
3549                 /*                               3549                 /*
3550                  * If there are too many foli    3550                  * If there are too many folios that are recently evicted
3551                  * in a file, they will proba    3551                  * in a file, they will probably continue to be evicted.
3552                  * In such situation, read-ah    3552                  * In such situation, read-ahead is only a waste of IO.
3553                  * Don't decrease mmap_miss i    3553                  * Don't decrease mmap_miss in this scenario to make sure
3554                  * we can stop read-ahead.       3554                  * we can stop read-ahead.
3555                  */                              3555                  */
3556                 if (!folio_test_workingset(fo    3556                 if (!folio_test_workingset(folio))
3557                         (*mmap_miss)++;          3557                         (*mmap_miss)++;
3558                                                  3558 
3559                 /*                               3559                 /*
3560                  * NOTE: If there're PTE mark    3560                  * NOTE: If there're PTE markers, we'll leave them to be
3561                  * handled in the specific fa    3561                  * handled in the specific fault path, and it'll prohibit the
3562                  * fault-around logic.           3562                  * fault-around logic.
3563                  */                              3563                  */
3564                 if (!pte_none(ptep_get(&vmf->    3564                 if (!pte_none(ptep_get(&vmf->pte[count])))
3565                         goto skip;               3565                         goto skip;
3566                                                  3566 
3567                 count++;                         3567                 count++;
3568                 continue;                        3568                 continue;
3569 skip:                                            3569 skip:
3570                 if (count) {                     3570                 if (count) {
3571                         set_pte_range(vmf, fo    3571                         set_pte_range(vmf, folio, page, count, addr);
3572                         *rss += count;           3572                         *rss += count;
3573                         folio_ref_add(folio,     3573                         folio_ref_add(folio, count);
3574                         if (in_range(vmf->add    3574                         if (in_range(vmf->address, addr, count * PAGE_SIZE))
3575                                 ret = VM_FAUL    3575                                 ret = VM_FAULT_NOPAGE;
3576                 }                                3576                 }
3577                                                  3577 
3578                 count++;                         3578                 count++;
3579                 page += count;                   3579                 page += count;
3580                 vmf->pte += count;               3580                 vmf->pte += count;
3581                 addr += count * PAGE_SIZE;       3581                 addr += count * PAGE_SIZE;
3582                 count = 0;                       3582                 count = 0;
3583         } while (--nr_pages > 0);                3583         } while (--nr_pages > 0);
3584                                                  3584 
3585         if (count) {                             3585         if (count) {
3586                 set_pte_range(vmf, folio, pag    3586                 set_pte_range(vmf, folio, page, count, addr);
3587                 *rss += count;                   3587                 *rss += count;
3588                 folio_ref_add(folio, count);     3588                 folio_ref_add(folio, count);
3589                 if (in_range(vmf->address, ad    3589                 if (in_range(vmf->address, addr, count * PAGE_SIZE))
3590                         ret = VM_FAULT_NOPAGE    3590                         ret = VM_FAULT_NOPAGE;
3591         }                                        3591         }
3592                                                  3592 
3593         vmf->pte = old_ptep;                     3593         vmf->pte = old_ptep;
3594                                                  3594 
3595         return ret;                              3595         return ret;
3596 }                                                3596 }
3597                                                  3597 
3598 static vm_fault_t filemap_map_order0_folio(st    3598 static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
3599                 struct folio *folio, unsigned    3599                 struct folio *folio, unsigned long addr,
3600                 unsigned long *rss, unsigned     3600                 unsigned long *rss, unsigned int *mmap_miss)
3601 {                                                3601 {
3602         vm_fault_t ret = 0;                      3602         vm_fault_t ret = 0;
3603         struct page *page = &folio->page;        3603         struct page *page = &folio->page;
3604                                                  3604 
3605         if (PageHWPoison(page))                  3605         if (PageHWPoison(page))
3606                 return ret;                      3606                 return ret;
3607                                                  3607 
3608         /* See comment of filemap_map_folio_r    3608         /* See comment of filemap_map_folio_range() */
3609         if (!folio_test_workingset(folio))       3609         if (!folio_test_workingset(folio))
3610                 (*mmap_miss)++;                  3610                 (*mmap_miss)++;
3611                                                  3611 
3612         /*                                       3612         /*
3613          * NOTE: If there're PTE markers, we'    3613          * NOTE: If there're PTE markers, we'll leave them to be
3614          * handled in the specific fault path    3614          * handled in the specific fault path, and it'll prohibit
3615          * the fault-around logic.               3615          * the fault-around logic.
3616          */                                      3616          */
3617         if (!pte_none(ptep_get(vmf->pte)))       3617         if (!pte_none(ptep_get(vmf->pte)))
3618                 return ret;                      3618                 return ret;
3619                                                  3619 
3620         if (vmf->address == addr)                3620         if (vmf->address == addr)
3621                 ret = VM_FAULT_NOPAGE;           3621                 ret = VM_FAULT_NOPAGE;
3622                                                  3622 
3623         set_pte_range(vmf, folio, page, 1, ad    3623         set_pte_range(vmf, folio, page, 1, addr);
3624         (*rss)++;                                3624         (*rss)++;
3625         folio_ref_inc(folio);                    3625         folio_ref_inc(folio);
3626                                                  3626 
3627         return ret;                              3627         return ret;
3628 }                                                3628 }
3629                                                  3629 
3630 vm_fault_t filemap_map_pages(struct vm_fault     3630 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3631                              pgoff_t start_pg    3631                              pgoff_t start_pgoff, pgoff_t end_pgoff)
3632 {                                                3632 {
3633         struct vm_area_struct *vma = vmf->vma    3633         struct vm_area_struct *vma = vmf->vma;
3634         struct file *file = vma->vm_file;        3634         struct file *file = vma->vm_file;
3635         struct address_space *mapping = file-    3635         struct address_space *mapping = file->f_mapping;
3636         pgoff_t file_end, last_pgoff = start_    3636         pgoff_t file_end, last_pgoff = start_pgoff;
3637         unsigned long addr;                      3637         unsigned long addr;
3638         XA_STATE(xas, &mapping->i_pages, star    3638         XA_STATE(xas, &mapping->i_pages, start_pgoff);
3639         struct folio *folio;                     3639         struct folio *folio;
3640         vm_fault_t ret = 0;                      3640         vm_fault_t ret = 0;
3641         unsigned long rss = 0;                   3641         unsigned long rss = 0;
3642         unsigned int nr_pages = 0, mmap_miss     3642         unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type;
3643                                                  3643 
3644         rcu_read_lock();                         3644         rcu_read_lock();
3645         folio = next_uptodate_folio(&xas, map    3645         folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3646         if (!folio)                              3646         if (!folio)
3647                 goto out;                        3647                 goto out;
3648                                                  3648 
3649         if (filemap_map_pmd(vmf, folio, start    3649         if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3650                 ret = VM_FAULT_NOPAGE;           3650                 ret = VM_FAULT_NOPAGE;
3651                 goto out;                        3651                 goto out;
3652         }                                        3652         }
3653                                                  3653 
3654         addr = vma->vm_start + ((start_pgoff     3654         addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3655         vmf->pte = pte_offset_map_lock(vma->v    3655         vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3656         if (!vmf->pte) {                         3656         if (!vmf->pte) {
3657                 folio_unlock(folio);             3657                 folio_unlock(folio);
3658                 folio_put(folio);                3658                 folio_put(folio);
3659                 goto out;                        3659                 goto out;
3660         }                                        3660         }
3661                                                  3661 
3662         file_end = DIV_ROUND_UP(i_size_read(m    3662         file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;
3663         if (end_pgoff > file_end)                3663         if (end_pgoff > file_end)
3664                 end_pgoff = file_end;            3664                 end_pgoff = file_end;
3665                                                  3665 
3666         folio_type = mm_counter_file(folio);     3666         folio_type = mm_counter_file(folio);
3667         do {                                     3667         do {
3668                 unsigned long end;               3668                 unsigned long end;
3669                                                  3669 
3670                 addr += (xas.xa_index - last_    3670                 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3671                 vmf->pte += xas.xa_index - la    3671                 vmf->pte += xas.xa_index - last_pgoff;
3672                 last_pgoff = xas.xa_index;       3672                 last_pgoff = xas.xa_index;
3673                 end = folio_next_index(folio)    3673                 end = folio_next_index(folio) - 1;
3674                 nr_pages = min(end, end_pgoff    3674                 nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
3675                                                  3675 
3676                 if (!folio_test_large(folio))    3676                 if (!folio_test_large(folio))
3677                         ret |= filemap_map_or    3677                         ret |= filemap_map_order0_folio(vmf,
3678                                         folio    3678                                         folio, addr, &rss, &mmap_miss);
3679                 else                             3679                 else
3680                         ret |= filemap_map_fo    3680                         ret |= filemap_map_folio_range(vmf, folio,
3681                                         xas.x    3681                                         xas.xa_index - folio->index, addr,
3682                                         nr_pa    3682                                         nr_pages, &rss, &mmap_miss);
3683                                                  3683 
3684                 folio_unlock(folio);             3684                 folio_unlock(folio);
3685                 folio_put(folio);                3685                 folio_put(folio);
3686         } while ((folio = next_uptodate_folio    3686         } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
3687         add_mm_counter(vma->vm_mm, folio_type    3687         add_mm_counter(vma->vm_mm, folio_type, rss);
3688         pte_unmap_unlock(vmf->pte, vmf->ptl);    3688         pte_unmap_unlock(vmf->pte, vmf->ptl);
3689         trace_mm_filemap_map_pages(mapping, s    3689         trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);
3690 out:                                             3690 out:
3691         rcu_read_unlock();                       3691         rcu_read_unlock();
3692                                                  3692 
3693         mmap_miss_saved = READ_ONCE(file->f_r    3693         mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
3694         if (mmap_miss >= mmap_miss_saved)        3694         if (mmap_miss >= mmap_miss_saved)
3695                 WRITE_ONCE(file->f_ra.mmap_mi    3695                 WRITE_ONCE(file->f_ra.mmap_miss, 0);
3696         else                                     3696         else
3697                 WRITE_ONCE(file->f_ra.mmap_mi    3697                 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
3698                                                  3698 
3699         return ret;                              3699         return ret;
3700 }                                                3700 }
3701 EXPORT_SYMBOL(filemap_map_pages);                3701 EXPORT_SYMBOL(filemap_map_pages);
3702                                                  3702 
3703 vm_fault_t filemap_page_mkwrite(struct vm_fau    3703 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3704 {                                                3704 {
3705         struct address_space *mapping = vmf->    3705         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3706         struct folio *folio = page_folio(vmf-    3706         struct folio *folio = page_folio(vmf->page);
3707         vm_fault_t ret = VM_FAULT_LOCKED;        3707         vm_fault_t ret = VM_FAULT_LOCKED;
3708                                                  3708 
3709         sb_start_pagefault(mapping->host->i_s    3709         sb_start_pagefault(mapping->host->i_sb);
3710         file_update_time(vmf->vma->vm_file);     3710         file_update_time(vmf->vma->vm_file);
3711         folio_lock(folio);                       3711         folio_lock(folio);
3712         if (folio->mapping != mapping) {         3712         if (folio->mapping != mapping) {
3713                 folio_unlock(folio);             3713                 folio_unlock(folio);
3714                 ret = VM_FAULT_NOPAGE;           3714                 ret = VM_FAULT_NOPAGE;
3715                 goto out;                        3715                 goto out;
3716         }                                        3716         }
3717         /*                                       3717         /*
3718          * We mark the folio dirty already he    3718          * We mark the folio dirty already here so that when freeze is in
3719          * progress, we are guaranteed that w    3719          * progress, we are guaranteed that writeback during freezing will
3720          * see the dirty folio and writeprote    3720          * see the dirty folio and writeprotect it again.
3721          */                                      3721          */
3722         folio_mark_dirty(folio);                 3722         folio_mark_dirty(folio);
3723         folio_wait_stable(folio);                3723         folio_wait_stable(folio);
3724 out:                                             3724 out:
3725         sb_end_pagefault(mapping->host->i_sb)    3725         sb_end_pagefault(mapping->host->i_sb);
3726         return ret;                              3726         return ret;
3727 }                                                3727 }
3728                                                  3728 
3729 const struct vm_operations_struct generic_fil    3729 const struct vm_operations_struct generic_file_vm_ops = {
3730         .fault          = filemap_fault,         3730         .fault          = filemap_fault,
3731         .map_pages      = filemap_map_pages,     3731         .map_pages      = filemap_map_pages,
3732         .page_mkwrite   = filemap_page_mkwrit    3732         .page_mkwrite   = filemap_page_mkwrite,
3733 };                                               3733 };
3734                                                  3734 
3735 /* This is used for a general mmap of a disk     3735 /* This is used for a general mmap of a disk file */
3736                                                  3736 
3737 int generic_file_mmap(struct file *file, stru    3737 int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3738 {                                                3738 {
3739         struct address_space *mapping = file-    3739         struct address_space *mapping = file->f_mapping;
3740                                                  3740 
3741         if (!mapping->a_ops->read_folio)         3741         if (!mapping->a_ops->read_folio)
3742                 return -ENOEXEC;                 3742                 return -ENOEXEC;
3743         file_accessed(file);                     3743         file_accessed(file);
3744         vma->vm_ops = &generic_file_vm_ops;      3744         vma->vm_ops = &generic_file_vm_ops;
3745         return 0;                                3745         return 0;
3746 }                                                3746 }
3747                                                  3747 
3748 /*                                               3748 /*
3749  * This is for filesystems which do not imple    3749  * This is for filesystems which do not implement ->writepage.
3750  */                                              3750  */
3751 int generic_file_readonly_mmap(struct file *f    3751 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3752 {                                                3752 {
3753         if (vma_is_shared_maywrite(vma))         3753         if (vma_is_shared_maywrite(vma))
3754                 return -EINVAL;                  3754                 return -EINVAL;
3755         return generic_file_mmap(file, vma);     3755         return generic_file_mmap(file, vma);
3756 }                                                3756 }
3757 #else                                            3757 #else
3758 vm_fault_t filemap_page_mkwrite(struct vm_fau    3758 vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3759 {                                                3759 {
3760         return VM_FAULT_SIGBUS;                  3760         return VM_FAULT_SIGBUS;
3761 }                                                3761 }
3762 int generic_file_mmap(struct file *file, stru    3762 int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3763 {                                                3763 {
3764         return -ENOSYS;                          3764         return -ENOSYS;
3765 }                                                3765 }
3766 int generic_file_readonly_mmap(struct file *f    3766 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3767 {                                                3767 {
3768         return -ENOSYS;                          3768         return -ENOSYS;
3769 }                                                3769 }
3770 #endif /* CONFIG_MMU */                          3770 #endif /* CONFIG_MMU */
3771                                                  3771 
3772 EXPORT_SYMBOL(filemap_page_mkwrite);             3772 EXPORT_SYMBOL(filemap_page_mkwrite);
3773 EXPORT_SYMBOL(generic_file_mmap);                3773 EXPORT_SYMBOL(generic_file_mmap);
3774 EXPORT_SYMBOL(generic_file_readonly_mmap);       3774 EXPORT_SYMBOL(generic_file_readonly_mmap);
3775                                                  3775 
3776 static struct folio *do_read_cache_folio(stru    3776 static struct folio *do_read_cache_folio(struct address_space *mapping,
3777                 pgoff_t index, filler_t fille    3777                 pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3778 {                                                3778 {
3779         struct folio *folio;                     3779         struct folio *folio;
3780         int err;                                 3780         int err;
3781                                                  3781 
3782         if (!filler)                             3782         if (!filler)
3783                 filler = mapping->a_ops->read    3783                 filler = mapping->a_ops->read_folio;
3784 repeat:                                          3784 repeat:
3785         folio = filemap_get_folio(mapping, in    3785         folio = filemap_get_folio(mapping, index);
3786         if (IS_ERR(folio)) {                     3786         if (IS_ERR(folio)) {
3787                 folio = filemap_alloc_folio(g    3787                 folio = filemap_alloc_folio(gfp,
3788                                             m    3788                                             mapping_min_folio_order(mapping));
3789                 if (!folio)                      3789                 if (!folio)
3790                         return ERR_PTR(-ENOME    3790                         return ERR_PTR(-ENOMEM);
3791                 index = mapping_align_index(m    3791                 index = mapping_align_index(mapping, index);
3792                 err = filemap_add_folio(mappi    3792                 err = filemap_add_folio(mapping, folio, index, gfp);
3793                 if (unlikely(err)) {             3793                 if (unlikely(err)) {
3794                         folio_put(folio);        3794                         folio_put(folio);
3795                         if (err == -EEXIST)      3795                         if (err == -EEXIST)
3796                                 goto repeat;     3796                                 goto repeat;
3797                         /* Presumably ENOMEM     3797                         /* Presumably ENOMEM for xarray node */
3798                         return ERR_PTR(err);     3798                         return ERR_PTR(err);
3799                 }                                3799                 }
3800                                                  3800 
3801                 goto filler;                     3801                 goto filler;
3802         }                                        3802         }
3803         if (folio_test_uptodate(folio))          3803         if (folio_test_uptodate(folio))
3804                 goto out;                        3804                 goto out;
3805                                                  3805 
3806         if (!folio_trylock(folio)) {             3806         if (!folio_trylock(folio)) {
3807                 folio_put_wait_locked(folio,     3807                 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3808                 goto repeat;                     3808                 goto repeat;
3809         }                                        3809         }
3810                                                  3810 
3811         /* Folio was truncated from mapping *    3811         /* Folio was truncated from mapping */
3812         if (!folio->mapping) {                   3812         if (!folio->mapping) {
3813                 folio_unlock(folio);             3813                 folio_unlock(folio);
3814                 folio_put(folio);                3814                 folio_put(folio);
3815                 goto repeat;                     3815                 goto repeat;
3816         }                                        3816         }
3817                                                  3817 
3818         /* Someone else locked and filled the    3818         /* Someone else locked and filled the page in a very small window */
3819         if (folio_test_uptodate(folio)) {        3819         if (folio_test_uptodate(folio)) {
3820                 folio_unlock(folio);             3820                 folio_unlock(folio);
3821                 goto out;                        3821                 goto out;
3822         }                                        3822         }
3823                                                  3823 
3824 filler:                                          3824 filler:
3825         err = filemap_read_folio(file, filler    3825         err = filemap_read_folio(file, filler, folio);
3826         if (err) {                               3826         if (err) {
3827                 folio_put(folio);                3827                 folio_put(folio);
3828                 if (err == AOP_TRUNCATED_PAGE    3828                 if (err == AOP_TRUNCATED_PAGE)
3829                         goto repeat;             3829                         goto repeat;
3830                 return ERR_PTR(err);             3830                 return ERR_PTR(err);
3831         }                                        3831         }
3832                                                  3832 
3833 out:                                             3833 out:
3834         folio_mark_accessed(folio);              3834         folio_mark_accessed(folio);
3835         return folio;                            3835         return folio;
3836 }                                                3836 }
3837                                                  3837 
3838 /**                                              3838 /**
3839  * read_cache_folio - Read into page cache, f    3839  * read_cache_folio - Read into page cache, fill it if needed.
3840  * @mapping: The address_space to read from.     3840  * @mapping: The address_space to read from.
3841  * @index: The index to read.                    3841  * @index: The index to read.
3842  * @filler: Function to perform the read, or     3842  * @filler: Function to perform the read, or NULL to use aops->read_folio().
3843  * @file: Passed to filler function, may be N    3843  * @file: Passed to filler function, may be NULL if not required.
3844  *                                               3844  *
3845  * Read one page into the page cache.  If it     3845  * Read one page into the page cache.  If it succeeds, the folio returned
3846  * will contain @index, but it may not be the    3846  * will contain @index, but it may not be the first page of the folio.
3847  *                                               3847  *
3848  * If the filler function returns an error, i    3848  * If the filler function returns an error, it will be returned to the
3849  * caller.                                       3849  * caller.
3850  *                                               3850  *
3851  * Context: May sleep.  Expects mapping->inva    3851  * Context: May sleep.  Expects mapping->invalidate_lock to be held.
3852  * Return: An uptodate folio on success, ERR_    3852  * Return: An uptodate folio on success, ERR_PTR() on failure.
3853  */                                              3853  */
3854 struct folio *read_cache_folio(struct address    3854 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3855                 filler_t filler, struct file     3855                 filler_t filler, struct file *file)
3856 {                                                3856 {
3857         return do_read_cache_folio(mapping, i    3857         return do_read_cache_folio(mapping, index, filler, file,
3858                         mapping_gfp_mask(mapp    3858                         mapping_gfp_mask(mapping));
3859 }                                                3859 }
3860 EXPORT_SYMBOL(read_cache_folio);                 3860 EXPORT_SYMBOL(read_cache_folio);
3861                                                  3861 
3862 /**                                              3862 /**
3863  * mapping_read_folio_gfp - Read into page ca    3863  * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3864  * @mapping:    The address_space for the fol    3864  * @mapping:    The address_space for the folio.
3865  * @index:      The index that the allocated     3865  * @index:      The index that the allocated folio will contain.
3866  * @gfp:        The page allocator flags to u    3866  * @gfp:        The page allocator flags to use if allocating.
3867  *                                               3867  *
3868  * This is the same as "read_cache_folio(mapp    3868  * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3869  * any new memory allocations done using the     3869  * any new memory allocations done using the specified allocation flags.
3870  *                                               3870  *
3871  * The most likely error from this function i    3871  * The most likely error from this function is EIO, but ENOMEM is
3872  * possible and so is EINTR.  If ->read_folio    3872  * possible and so is EINTR.  If ->read_folio returns another error,
3873  * that will be returned to the caller.          3873  * that will be returned to the caller.
3874  *                                               3874  *
3875  * The function expects mapping->invalidate_l    3875  * The function expects mapping->invalidate_lock to be already held.
3876  *                                               3876  *
3877  * Return: Uptodate folio on success, ERR_PTR    3877  * Return: Uptodate folio on success, ERR_PTR() on failure.
3878  */                                              3878  */
3879 struct folio *mapping_read_folio_gfp(struct a    3879 struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3880                 pgoff_t index, gfp_t gfp)        3880                 pgoff_t index, gfp_t gfp)
3881 {                                                3881 {
3882         return do_read_cache_folio(mapping, i    3882         return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
3883 }                                                3883 }
3884 EXPORT_SYMBOL(mapping_read_folio_gfp);           3884 EXPORT_SYMBOL(mapping_read_folio_gfp);
3885                                                  3885 
3886 static struct page *do_read_cache_page(struct    3886 static struct page *do_read_cache_page(struct address_space *mapping,
3887                 pgoff_t index, filler_t *fill    3887                 pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3888 {                                                3888 {
3889         struct folio *folio;                     3889         struct folio *folio;
3890                                                  3890 
3891         folio = do_read_cache_folio(mapping,     3891         folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3892         if (IS_ERR(folio))                       3892         if (IS_ERR(folio))
3893                 return &folio->page;             3893                 return &folio->page;
3894         return folio_file_page(folio, index);    3894         return folio_file_page(folio, index);
3895 }                                                3895 }
3896                                                  3896 
3897 struct page *read_cache_page(struct address_s    3897 struct page *read_cache_page(struct address_space *mapping,
3898                         pgoff_t index, filler    3898                         pgoff_t index, filler_t *filler, struct file *file)
3899 {                                                3899 {
3900         return do_read_cache_page(mapping, in    3900         return do_read_cache_page(mapping, index, filler, file,
3901                         mapping_gfp_mask(mapp    3901                         mapping_gfp_mask(mapping));
3902 }                                                3902 }
3903 EXPORT_SYMBOL(read_cache_page);                  3903 EXPORT_SYMBOL(read_cache_page);
3904                                                  3904 
3905 /**                                              3905 /**
3906  * read_cache_page_gfp - read into page cache    3906  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3907  * @mapping:    the page's address_space         3907  * @mapping:    the page's address_space
3908  * @index:      the page index                   3908  * @index:      the page index
3909  * @gfp:        the page allocator flags to u    3909  * @gfp:        the page allocator flags to use if allocating
3910  *                                               3910  *
3911  * This is the same as "read_mapping_page(map    3911  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3912  * any new page allocations done using the sp    3912  * any new page allocations done using the specified allocation flags.
3913  *                                               3913  *
3914  * If the page does not get brought uptodate,    3914  * If the page does not get brought uptodate, return -EIO.
3915  *                                               3915  *
3916  * The function expects mapping->invalidate_l    3916  * The function expects mapping->invalidate_lock to be already held.
3917  *                                               3917  *
3918  * Return: up to date page on success, ERR_PT    3918  * Return: up to date page on success, ERR_PTR() on failure.
3919  */                                              3919  */
3920 struct page *read_cache_page_gfp(struct addre    3920 struct page *read_cache_page_gfp(struct address_space *mapping,
3921                                 pgoff_t index    3921                                 pgoff_t index,
3922                                 gfp_t gfp)       3922                                 gfp_t gfp)
3923 {                                                3923 {
3924         return do_read_cache_page(mapping, in    3924         return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3925 }                                                3925 }
3926 EXPORT_SYMBOL(read_cache_page_gfp);              3926 EXPORT_SYMBOL(read_cache_page_gfp);
3927                                                  3927 
3928 /*                                               3928 /*
3929  * Warn about a page cache invalidation failu    3929  * Warn about a page cache invalidation failure during a direct I/O write.
3930  */                                              3930  */
3931 static void dio_warn_stale_pagecache(struct f    3931 static void dio_warn_stale_pagecache(struct file *filp)
3932 {                                                3932 {
3933         static DEFINE_RATELIMIT_STATE(_rs, 86    3933         static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3934         char pathname[128];                      3934         char pathname[128];
3935         char *path;                              3935         char *path;
3936                                                  3936 
3937         errseq_set(&filp->f_mapping->wb_err,     3937         errseq_set(&filp->f_mapping->wb_err, -EIO);
3938         if (__ratelimit(&_rs)) {                 3938         if (__ratelimit(&_rs)) {
3939                 path = file_path(filp, pathna    3939                 path = file_path(filp, pathname, sizeof(pathname));
3940                 if (IS_ERR(path))                3940                 if (IS_ERR(path))
3941                         path = "(unknown)";      3941                         path = "(unknown)";
3942                 pr_crit("Page cache invalidat    3942                 pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3943                 pr_crit("File: %s PID: %d Com    3943                 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3944                         current->comm);          3944                         current->comm);
3945         }                                        3945         }
3946 }                                                3946 }
3947                                                  3947 
3948 void kiocb_invalidate_post_direct_write(struc    3948 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count)
3949 {                                                3949 {
3950         struct address_space *mapping = iocb-    3950         struct address_space *mapping = iocb->ki_filp->f_mapping;
3951                                                  3951 
3952         if (mapping->nrpages &&                  3952         if (mapping->nrpages &&
3953             invalidate_inode_pages2_range(map    3953             invalidate_inode_pages2_range(mapping,
3954                         iocb->ki_pos >> PAGE_    3954                         iocb->ki_pos >> PAGE_SHIFT,
3955                         (iocb->ki_pos + count    3955                         (iocb->ki_pos + count - 1) >> PAGE_SHIFT))
3956                 dio_warn_stale_pagecache(iocb    3956                 dio_warn_stale_pagecache(iocb->ki_filp);
3957 }                                                3957 }
3958                                                  3958 
3959 ssize_t                                          3959 ssize_t
3960 generic_file_direct_write(struct kiocb *iocb,    3960 generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3961 {                                                3961 {
3962         struct address_space *mapping = iocb-    3962         struct address_space *mapping = iocb->ki_filp->f_mapping;
3963         size_t write_len = iov_iter_count(fro    3963         size_t write_len = iov_iter_count(from);
3964         ssize_t written;                         3964         ssize_t written;
3965                                                  3965 
3966         /*                                       3966         /*
3967          * If a page can not be invalidated,     3967          * If a page can not be invalidated, return 0 to fall back
3968          * to buffered write.                    3968          * to buffered write.
3969          */                                      3969          */
3970         written = kiocb_invalidate_pages(iocb    3970         written = kiocb_invalidate_pages(iocb, write_len);
3971         if (written) {                           3971         if (written) {
3972                 if (written == -EBUSY)           3972                 if (written == -EBUSY)
3973                         return 0;                3973                         return 0;
3974                 return written;                  3974                 return written;
3975         }                                        3975         }
3976                                                  3976 
3977         written = mapping->a_ops->direct_IO(i    3977         written = mapping->a_ops->direct_IO(iocb, from);
3978                                                  3978 
3979         /*                                       3979         /*
3980          * Finally, try again to invalidate c    3980          * Finally, try again to invalidate clean pages which might have been
3981          * cached by non-direct readahead, or    3981          * cached by non-direct readahead, or faulted in by get_user_pages()
3982          * if the source of the write was an     3982          * if the source of the write was an mmap'ed region of the file
3983          * we're writing.  Either one is a pr    3983          * we're writing.  Either one is a pretty crazy thing to do,
3984          * so we don't support it 100%.  If t    3984          * so we don't support it 100%.  If this invalidation
3985          * fails, tough, the write still work    3985          * fails, tough, the write still worked...
3986          *                                       3986          *
3987          * Most of the time we do not need th    3987          * Most of the time we do not need this since dio_complete() will do
3988          * the invalidation for us. However t    3988          * the invalidation for us. However there are some file systems that
3989          * do not end up with dio_complete()     3989          * do not end up with dio_complete() being called, so let's not break
3990          * them by removing it completely.       3990          * them by removing it completely.
3991          *                                       3991          *
3992          * Noticeable example is a blkdev_dir    3992          * Noticeable example is a blkdev_direct_IO().
3993          *                                       3993          *
3994          * Skip invalidation for async writes    3994          * Skip invalidation for async writes or if mapping has no pages.
3995          */                                      3995          */
3996         if (written > 0) {                       3996         if (written > 0) {
3997                 struct inode *inode = mapping    3997                 struct inode *inode = mapping->host;
3998                 loff_t pos = iocb->ki_pos;       3998                 loff_t pos = iocb->ki_pos;
3999                                                  3999 
4000                 kiocb_invalidate_post_direct_    4000                 kiocb_invalidate_post_direct_write(iocb, written);
4001                 pos += written;                  4001                 pos += written;
4002                 write_len -= written;            4002                 write_len -= written;
4003                 if (pos > i_size_read(inode)     4003                 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
4004                         i_size_write(inode, p    4004                         i_size_write(inode, pos);
4005                         mark_inode_dirty(inod    4005                         mark_inode_dirty(inode);
4006                 }                                4006                 }
4007                 iocb->ki_pos = pos;              4007                 iocb->ki_pos = pos;
4008         }                                        4008         }
4009         if (written != -EIOCBQUEUED)             4009         if (written != -EIOCBQUEUED)
4010                 iov_iter_revert(from, write_l    4010                 iov_iter_revert(from, write_len - iov_iter_count(from));
4011         return written;                          4011         return written;
4012 }                                                4012 }
4013 EXPORT_SYMBOL(generic_file_direct_write);        4013 EXPORT_SYMBOL(generic_file_direct_write);
4014                                                  4014 
4015 ssize_t generic_perform_write(struct kiocb *i    4015 ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
4016 {                                                4016 {
4017         struct file *file = iocb->ki_filp;       4017         struct file *file = iocb->ki_filp;
4018         loff_t pos = iocb->ki_pos;               4018         loff_t pos = iocb->ki_pos;
4019         struct address_space *mapping = file-    4019         struct address_space *mapping = file->f_mapping;
4020         const struct address_space_operations    4020         const struct address_space_operations *a_ops = mapping->a_ops;
4021         size_t chunk = mapping_max_folio_size    4021         size_t chunk = mapping_max_folio_size(mapping);
4022         long status = 0;                         4022         long status = 0;
4023         ssize_t written = 0;                     4023         ssize_t written = 0;
4024                                                  4024 
4025         do {                                     4025         do {
4026                 struct folio *folio;             4026                 struct folio *folio;
4027                 size_t offset;          /* Of    4027                 size_t offset;          /* Offset into folio */
4028                 size_t bytes;           /* By    4028                 size_t bytes;           /* Bytes to write to folio */
4029                 size_t copied;          /* By    4029                 size_t copied;          /* Bytes copied from user */
4030                 void *fsdata = NULL;             4030                 void *fsdata = NULL;
4031                                                  4031 
4032                 bytes = iov_iter_count(i);       4032                 bytes = iov_iter_count(i);
4033 retry:                                           4033 retry:
4034                 offset = pos & (chunk - 1);      4034                 offset = pos & (chunk - 1);
4035                 bytes = min(chunk - offset, b    4035                 bytes = min(chunk - offset, bytes);
4036                 balance_dirty_pages_ratelimit    4036                 balance_dirty_pages_ratelimited(mapping);
4037                                                  4037 
4038                 /*                               4038                 /*
4039                  * Bring in the user page tha    4039                  * Bring in the user page that we will copy from _first_.
4040                  * Otherwise there's a nasty     4040                  * Otherwise there's a nasty deadlock on copying from the
4041                  * same page as we're writing    4041                  * same page as we're writing to, without it being marked
4042                  * up-to-date.                   4042                  * up-to-date.
4043                  */                              4043                  */
4044                 if (unlikely(fault_in_iov_ite    4044                 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
4045                         status = -EFAULT;        4045                         status = -EFAULT;
4046                         break;                   4046                         break;
4047                 }                                4047                 }
4048                                                  4048 
4049                 if (fatal_signal_pending(curr    4049                 if (fatal_signal_pending(current)) {
4050                         status = -EINTR;         4050                         status = -EINTR;
4051                         break;                   4051                         break;
4052                 }                                4052                 }
4053                                                  4053 
4054                 status = a_ops->write_begin(f    4054                 status = a_ops->write_begin(file, mapping, pos, bytes,
4055                                                  4055                                                 &folio, &fsdata);
4056                 if (unlikely(status < 0))        4056                 if (unlikely(status < 0))
4057                         break;                   4057                         break;
4058                                                  4058 
4059                 offset = offset_in_folio(foli    4059                 offset = offset_in_folio(folio, pos);
4060                 if (bytes > folio_size(folio)    4060                 if (bytes > folio_size(folio) - offset)
4061                         bytes = folio_size(fo    4061                         bytes = folio_size(folio) - offset;
4062                                                  4062 
4063                 if (mapping_writably_mapped(m    4063                 if (mapping_writably_mapped(mapping))
4064                         flush_dcache_folio(fo    4064                         flush_dcache_folio(folio);
4065                                                  4065 
4066                 copied = copy_folio_from_iter    4066                 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
4067                 flush_dcache_folio(folio);       4067                 flush_dcache_folio(folio);
4068                                                  4068 
4069                 status = a_ops->write_end(fil    4069                 status = a_ops->write_end(file, mapping, pos, bytes, copied,
4070                                                  4070                                                 folio, fsdata);
4071                 if (unlikely(status != copied    4071                 if (unlikely(status != copied)) {
4072                         iov_iter_revert(i, co    4072                         iov_iter_revert(i, copied - max(status, 0L));
4073                         if (unlikely(status <    4073                         if (unlikely(status < 0))
4074                                 break;           4074                                 break;
4075                 }                                4075                 }
4076                 cond_resched();                  4076                 cond_resched();
4077                                                  4077 
4078                 if (unlikely(status == 0)) {     4078                 if (unlikely(status == 0)) {
4079                         /*                       4079                         /*
4080                          * A short copy made     4080                          * A short copy made ->write_end() reject the
4081                          * thing entirely.  M    4081                          * thing entirely.  Might be memory poisoning
4082                          * halfway through, m    4082                          * halfway through, might be a race with munmap,
4083                          * might be severe me    4083                          * might be severe memory pressure.
4084                          */                      4084                          */
4085                         if (chunk > PAGE_SIZE    4085                         if (chunk > PAGE_SIZE)
4086                                 chunk /= 2;      4086                                 chunk /= 2;
4087                         if (copied) {            4087                         if (copied) {
4088                                 bytes = copie    4088                                 bytes = copied;
4089                                 goto retry;      4089                                 goto retry;
4090                         }                        4090                         }
4091                 } else {                         4091                 } else {
4092                         pos += status;           4092                         pos += status;
4093                         written += status;       4093                         written += status;
4094                 }                                4094                 }
4095         } while (iov_iter_count(i));             4095         } while (iov_iter_count(i));
4096                                                  4096 
4097         if (!written)                            4097         if (!written)
4098                 return status;                   4098                 return status;
4099         iocb->ki_pos += written;                 4099         iocb->ki_pos += written;
4100         return written;                          4100         return written;
4101 }                                                4101 }
4102 EXPORT_SYMBOL(generic_perform_write);            4102 EXPORT_SYMBOL(generic_perform_write);
4103                                                  4103 
4104 /**                                              4104 /**
4105  * __generic_file_write_iter - write data to     4105  * __generic_file_write_iter - write data to a file
4106  * @iocb:       IO state structure (file, off    4106  * @iocb:       IO state structure (file, offset, etc.)
4107  * @from:       iov_iter with data to write      4107  * @from:       iov_iter with data to write
4108  *                                               4108  *
4109  * This function does all the work needed for    4109  * This function does all the work needed for actually writing data to a
4110  * file. It does all basic checks, removes SU    4110  * file. It does all basic checks, removes SUID from the file, updates
4111  * modification times and calls proper subrou    4111  * modification times and calls proper subroutines depending on whether we
4112  * do direct IO or a standard buffered write.    4112  * do direct IO or a standard buffered write.
4113  *                                               4113  *
4114  * It expects i_rwsem to be grabbed unless we    4114  * It expects i_rwsem to be grabbed unless we work on a block device or similar
4115  * object which does not need locking at all.    4115  * object which does not need locking at all.
4116  *                                               4116  *
4117  * This function does *not* take care of sync    4117  * This function does *not* take care of syncing data in case of O_SYNC write.
4118  * A caller has to handle it. This is mainly     4118  * A caller has to handle it. This is mainly due to the fact that we want to
4119  * avoid syncing under i_rwsem.                  4119  * avoid syncing under i_rwsem.
4120  *                                               4120  *
4121  * Return:                                       4121  * Return:
4122  * * number of bytes written, even for trunca    4122  * * number of bytes written, even for truncated writes
4123  * * negative error code if no data has been     4123  * * negative error code if no data has been written at all
4124  */                                              4124  */
4125 ssize_t __generic_file_write_iter(struct kioc    4125 ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4126 {                                                4126 {
4127         struct file *file = iocb->ki_filp;       4127         struct file *file = iocb->ki_filp;
4128         struct address_space *mapping = file-    4128         struct address_space *mapping = file->f_mapping;
4129         struct inode *inode = mapping->host;     4129         struct inode *inode = mapping->host;
4130         ssize_t ret;                             4130         ssize_t ret;
4131                                                  4131 
4132         ret = file_remove_privs(file);           4132         ret = file_remove_privs(file);
4133         if (ret)                                 4133         if (ret)
4134                 return ret;                      4134                 return ret;
4135                                                  4135 
4136         ret = file_update_time(file);            4136         ret = file_update_time(file);
4137         if (ret)                                 4137         if (ret)
4138                 return ret;                      4138                 return ret;
4139                                                  4139 
4140         if (iocb->ki_flags & IOCB_DIRECT) {      4140         if (iocb->ki_flags & IOCB_DIRECT) {
4141                 ret = generic_file_direct_wri    4141                 ret = generic_file_direct_write(iocb, from);
4142                 /*                               4142                 /*
4143                  * If the write stopped short    4143                  * If the write stopped short of completing, fall back to
4144                  * buffered writes.  Some fil    4144                  * buffered writes.  Some filesystems do this for writes to
4145                  * holes, for example.  For D    4145                  * holes, for example.  For DAX files, a buffered write will
4146                  * not succeed (even if it di    4146                  * not succeed (even if it did, DAX does not handle dirty
4147                  * page-cache pages correctly    4147                  * page-cache pages correctly).
4148                  */                              4148                  */
4149                 if (ret < 0 || !iov_iter_coun    4149                 if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode))
4150                         return ret;              4150                         return ret;
4151                 return direct_write_fallback(    4151                 return direct_write_fallback(iocb, from, ret,
4152                                 generic_perfo    4152                                 generic_perform_write(iocb, from));
4153         }                                        4153         }
4154                                                  4154 
4155         return generic_perform_write(iocb, fr    4155         return generic_perform_write(iocb, from);
4156 }                                                4156 }
4157 EXPORT_SYMBOL(__generic_file_write_iter);        4157 EXPORT_SYMBOL(__generic_file_write_iter);
4158                                                  4158 
4159 /**                                              4159 /**
4160  * generic_file_write_iter - write data to a     4160  * generic_file_write_iter - write data to a file
4161  * @iocb:       IO state structure               4161  * @iocb:       IO state structure
4162  * @from:       iov_iter with data to write      4162  * @from:       iov_iter with data to write
4163  *                                               4163  *
4164  * This is a wrapper around __generic_file_wr    4164  * This is a wrapper around __generic_file_write_iter() to be used by most
4165  * filesystems. It takes care of syncing the     4165  * filesystems. It takes care of syncing the file in case of O_SYNC file
4166  * and acquires i_rwsem as needed.               4166  * and acquires i_rwsem as needed.
4167  * Return:                                       4167  * Return:
4168  * * negative error code if no data has been     4168  * * negative error code if no data has been written at all of
4169  *   vfs_fsync_range() failed for a synchrono    4169  *   vfs_fsync_range() failed for a synchronous write
4170  * * number of bytes written, even for trunca    4170  * * number of bytes written, even for truncated writes
4171  */                                              4171  */
4172 ssize_t generic_file_write_iter(struct kiocb     4172 ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4173 {                                                4173 {
4174         struct file *file = iocb->ki_filp;       4174         struct file *file = iocb->ki_filp;
4175         struct inode *inode = file->f_mapping    4175         struct inode *inode = file->f_mapping->host;
4176         ssize_t ret;                             4176         ssize_t ret;
4177                                                  4177 
4178         inode_lock(inode);                       4178         inode_lock(inode);
4179         ret = generic_write_checks(iocb, from    4179         ret = generic_write_checks(iocb, from);
4180         if (ret > 0)                             4180         if (ret > 0)
4181                 ret = __generic_file_write_it    4181                 ret = __generic_file_write_iter(iocb, from);
4182         inode_unlock(inode);                     4182         inode_unlock(inode);
4183                                                  4183 
4184         if (ret > 0)                             4184         if (ret > 0)
4185                 ret = generic_write_sync(iocb    4185                 ret = generic_write_sync(iocb, ret);
4186         return ret;                              4186         return ret;
4187 }                                                4187 }
4188 EXPORT_SYMBOL(generic_file_write_iter);          4188 EXPORT_SYMBOL(generic_file_write_iter);
4189                                                  4189 
4190 /**                                              4190 /**
4191  * filemap_release_folio() - Release fs-speci    4191  * filemap_release_folio() - Release fs-specific metadata on a folio.
4192  * @folio: The folio which the kernel is tryi    4192  * @folio: The folio which the kernel is trying to free.
4193  * @gfp: Memory allocation flags (and I/O mod    4193  * @gfp: Memory allocation flags (and I/O mode).
4194  *                                               4194  *
4195  * The address_space is trying to release any    4195  * The address_space is trying to release any data attached to a folio
4196  * (presumably at folio->private).               4196  * (presumably at folio->private).
4197  *                                               4197  *
4198  * This will also be called if the private_2     4198  * This will also be called if the private_2 flag is set on a page,
4199  * indicating that the folio has other metada    4199  * indicating that the folio has other metadata associated with it.
4200  *                                               4200  *
4201  * The @gfp argument specifies whether I/O ma    4201  * The @gfp argument specifies whether I/O may be performed to release
4202  * this page (__GFP_IO), and whether the call    4202  * this page (__GFP_IO), and whether the call may block
4203  * (__GFP_RECLAIM & __GFP_FS).                   4203  * (__GFP_RECLAIM & __GFP_FS).
4204  *                                               4204  *
4205  * Return: %true if the release was successfu    4205  * Return: %true if the release was successful, otherwise %false.
4206  */                                              4206  */
4207 bool filemap_release_folio(struct folio *foli    4207 bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4208 {                                                4208 {
4209         struct address_space * const mapping     4209         struct address_space * const mapping = folio->mapping;
4210                                                  4210 
4211         BUG_ON(!folio_test_locked(folio));       4211         BUG_ON(!folio_test_locked(folio));
4212         if (!folio_needs_release(folio))         4212         if (!folio_needs_release(folio))
4213                 return true;                     4213                 return true;
4214         if (folio_test_writeback(folio))         4214         if (folio_test_writeback(folio))
4215                 return false;                    4215                 return false;
4216                                                  4216 
4217         if (mapping && mapping->a_ops->releas    4217         if (mapping && mapping->a_ops->release_folio)
4218                 return mapping->a_ops->releas    4218                 return mapping->a_ops->release_folio(folio, gfp);
4219         return try_to_free_buffers(folio);       4219         return try_to_free_buffers(folio);
4220 }                                                4220 }
4221 EXPORT_SYMBOL(filemap_release_folio);            4221 EXPORT_SYMBOL(filemap_release_folio);
4222                                                  4222 
4223 /**                                              4223 /**
4224  * filemap_invalidate_inode - Invalidate/forc    4224  * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache
4225  * @inode: The inode to flush                    4225  * @inode: The inode to flush
4226  * @flush: Set to write back rather than simp    4226  * @flush: Set to write back rather than simply invalidate.
4227  * @start: First byte to in range.               4227  * @start: First byte to in range.
4228  * @end: Last byte in range (inclusive), or L    4228  * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start
4229  *       onwards.                                4229  *       onwards.
4230  *                                               4230  *
4231  * Invalidate all the folios on an inode that    4231  * Invalidate all the folios on an inode that contribute to the specified
4232  * range, possibly writing them back first.      4232  * range, possibly writing them back first.  Whilst the operation is
4233  * undertaken, the invalidate lock is held to    4233  * undertaken, the invalidate lock is held to prevent new folios from being
4234  * installed.                                    4234  * installed.
4235  */                                              4235  */
4236 int filemap_invalidate_inode(struct inode *in    4236 int filemap_invalidate_inode(struct inode *inode, bool flush,
4237                              loff_t start, lo    4237                              loff_t start, loff_t end)
4238 {                                                4238 {
4239         struct address_space *mapping = inode    4239         struct address_space *mapping = inode->i_mapping;
4240         pgoff_t first = start >> PAGE_SHIFT;     4240         pgoff_t first = start >> PAGE_SHIFT;
4241         pgoff_t last = end >> PAGE_SHIFT;        4241         pgoff_t last = end >> PAGE_SHIFT;
4242         pgoff_t nr = end == LLONG_MAX ? ULONG    4242         pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1;
4243                                                  4243 
4244         if (!mapping || !mapping->nrpages ||     4244         if (!mapping || !mapping->nrpages || end < start)
4245                 goto out;                        4245                 goto out;
4246                                                  4246 
4247         /* Prevent new folios from being adde    4247         /* Prevent new folios from being added to the inode. */
4248         filemap_invalidate_lock(mapping);        4248         filemap_invalidate_lock(mapping);
4249                                                  4249 
4250         if (!mapping->nrpages)                   4250         if (!mapping->nrpages)
4251                 goto unlock;                     4251                 goto unlock;
4252                                                  4252 
4253         unmap_mapping_pages(mapping, first, n    4253         unmap_mapping_pages(mapping, first, nr, false);
4254                                                  4254 
4255         /* Write back the data if we're asked    4255         /* Write back the data if we're asked to. */
4256         if (flush) {                             4256         if (flush) {
4257                 struct writeback_control wbc     4257                 struct writeback_control wbc = {
4258                         .sync_mode      = WB_    4258                         .sync_mode      = WB_SYNC_ALL,
4259                         .nr_to_write    = LON    4259                         .nr_to_write    = LONG_MAX,
4260                         .range_start    = sta    4260                         .range_start    = start,
4261                         .range_end      = end    4261                         .range_end      = end,
4262                 };                               4262                 };
4263                                                  4263 
4264                 filemap_fdatawrite_wbc(mappin    4264                 filemap_fdatawrite_wbc(mapping, &wbc);
4265         }                                        4265         }
4266                                                  4266 
4267         /* Wait for writeback to complete on     4267         /* Wait for writeback to complete on all folios and discard. */
4268         invalidate_inode_pages2_range(mapping    4268         invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);
4269                                                  4269 
4270 unlock:                                          4270 unlock:
4271         filemap_invalidate_unlock(mapping);      4271         filemap_invalidate_unlock(mapping);
4272 out:                                             4272 out:
4273         return filemap_check_errors(mapping);    4273         return filemap_check_errors(mapping);
4274 }                                                4274 }
4275 EXPORT_SYMBOL_GPL(filemap_invalidate_inode);     4275 EXPORT_SYMBOL_GPL(filemap_invalidate_inode);
4276                                                  4276 
4277 #ifdef CONFIG_CACHESTAT_SYSCALL                  4277 #ifdef CONFIG_CACHESTAT_SYSCALL
4278 /**                                              4278 /**
4279  * filemap_cachestat() - compute the page cac    4279  * filemap_cachestat() - compute the page cache statistics of a mapping
4280  * @mapping:    The mapping to compute the st    4280  * @mapping:    The mapping to compute the statistics for.
4281  * @first_index:        The starting page cac    4281  * @first_index:        The starting page cache index.
4282  * @last_index: The final page index (inclusi    4282  * @last_index: The final page index (inclusive).
4283  * @cs: the cachestat struct to write the res    4283  * @cs: the cachestat struct to write the result to.
4284  *                                               4284  *
4285  * This will query the page cache statistics     4285  * This will query the page cache statistics of a mapping in the
4286  * page range of [first_index, last_index] (i    4286  * page range of [first_index, last_index] (inclusive). The statistics
4287  * queried include: number of dirty pages, nu    4287  * queried include: number of dirty pages, number of pages marked for
4288  * writeback, and the number of (recently) ev    4288  * writeback, and the number of (recently) evicted pages.
4289  */                                              4289  */
4290 static void filemap_cachestat(struct address_    4290 static void filemap_cachestat(struct address_space *mapping,
4291                 pgoff_t first_index, pgoff_t     4291                 pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
4292 {                                                4292 {
4293         XA_STATE(xas, &mapping->i_pages, firs    4293         XA_STATE(xas, &mapping->i_pages, first_index);
4294         struct folio *folio;                     4294         struct folio *folio;
4295                                                  4295 
4296         /* Flush stats (and potentially sleep    4296         /* Flush stats (and potentially sleep) outside the RCU read section. */
4297         mem_cgroup_flush_stats_ratelimited(NU    4297         mem_cgroup_flush_stats_ratelimited(NULL);
4298                                                  4298 
4299         rcu_read_lock();                         4299         rcu_read_lock();
4300         xas_for_each(&xas, folio, last_index)    4300         xas_for_each(&xas, folio, last_index) {
4301                 int order;                       4301                 int order;
4302                 unsigned long nr_pages;          4302                 unsigned long nr_pages;
4303                 pgoff_t folio_first_index, fo    4303                 pgoff_t folio_first_index, folio_last_index;
4304                                                  4304 
4305                 /*                               4305                 /*
4306                  * Don't deref the folio. It     4306                  * Don't deref the folio. It is not pinned, and might
4307                  * get freed (and reused) und    4307                  * get freed (and reused) underneath us.
4308                  *                               4308                  *
4309                  * We *could* pin it, but tha    4309                  * We *could* pin it, but that would be expensive for
4310                  * what should be a fast and     4310                  * what should be a fast and lightweight syscall.
4311                  *                               4311                  *
4312                  * Instead, derive all inform    4312                  * Instead, derive all information of interest from
4313                  * the rcu-protected xarray.     4313                  * the rcu-protected xarray.
4314                  */                              4314                  */
4315                                                  4315 
4316                 if (xas_retry(&xas, folio))      4316                 if (xas_retry(&xas, folio))
4317                         continue;                4317                         continue;
4318                                                  4318 
4319                 order = xas_get_order(&xas);     4319                 order = xas_get_order(&xas);
4320                 nr_pages = 1 << order;           4320                 nr_pages = 1 << order;
4321                 folio_first_index = round_dow    4321                 folio_first_index = round_down(xas.xa_index, 1 << order);
4322                 folio_last_index = folio_firs    4322                 folio_last_index = folio_first_index + nr_pages - 1;
4323                                                  4323 
4324                 /* Folios might straddle the     4324                 /* Folios might straddle the range boundaries, only count covered pages */
4325                 if (folio_first_index < first    4325                 if (folio_first_index < first_index)
4326                         nr_pages -= first_ind    4326                         nr_pages -= first_index - folio_first_index;
4327                                                  4327 
4328                 if (folio_last_index > last_i    4328                 if (folio_last_index > last_index)
4329                         nr_pages -= folio_las    4329                         nr_pages -= folio_last_index - last_index;
4330                                                  4330 
4331                 if (xa_is_value(folio)) {        4331                 if (xa_is_value(folio)) {
4332                         /* page is evicted */    4332                         /* page is evicted */
4333                         void *shadow = (void     4333                         void *shadow = (void *)folio;
4334                         bool workingset; /* n    4334                         bool workingset; /* not used */
4335                                                  4335 
4336                         cs->nr_evicted += nr_    4336                         cs->nr_evicted += nr_pages;
4337                                                  4337 
4338 #ifdef CONFIG_SWAP /* implies CONFIG_MMU */      4338 #ifdef CONFIG_SWAP /* implies CONFIG_MMU */
4339                         if (shmem_mapping(map    4339                         if (shmem_mapping(mapping)) {
4340                                 /* shmem file    4340                                 /* shmem file - in swap cache */
4341                                 swp_entry_t s    4341                                 swp_entry_t swp = radix_to_swp_entry(folio);
4342                                                  4342 
4343                                 /* swapin err    4343                                 /* swapin error results in poisoned entry */
4344                                 if (non_swap_    4344                                 if (non_swap_entry(swp))
4345                                         goto     4345                                         goto resched;
4346                                                  4346 
4347                                 /*               4347                                 /*
4348                                  * Getting a     4348                                  * Getting a swap entry from the shmem
4349                                  * inode mean    4349                                  * inode means we beat
4350                                  * shmem_unus    4350                                  * shmem_unuse(). rcu_read_lock()
4351                                  * ensures sw    4351                                  * ensures swapoff waits for us before
4352                                  * freeing th    4352                                  * freeing the swapper space. However,
4353                                  * we can rac    4353                                  * we can race with swapping and
4354                                  * invalidati    4354                                  * invalidation, so there might not be
4355                                  * a shadow i    4355                                  * a shadow in the swapcache (yet).
4356                                  */              4356                                  */
4357                                 shadow = get_    4357                                 shadow = get_shadow_from_swap_cache(swp);
4358                                 if (!shadow)     4358                                 if (!shadow)
4359                                         goto     4359                                         goto resched;
4360                         }                        4360                         }
4361 #endif                                           4361 #endif
4362                         if (workingset_test_r    4362                         if (workingset_test_recent(shadow, true, &workingset, false))
4363                                 cs->nr_recent    4363                                 cs->nr_recently_evicted += nr_pages;
4364                                                  4364 
4365                         goto resched;            4365                         goto resched;
4366                 }                                4366                 }
4367                                                  4367 
4368                 /* page is in cache */           4368                 /* page is in cache */
4369                 cs->nr_cache += nr_pages;        4369                 cs->nr_cache += nr_pages;
4370                                                  4370 
4371                 if (xas_get_mark(&xas, PAGECA    4371                 if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
4372                         cs->nr_dirty += nr_pa    4372                         cs->nr_dirty += nr_pages;
4373                                                  4373 
4374                 if (xas_get_mark(&xas, PAGECA    4374                 if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
4375                         cs->nr_writeback += n    4375                         cs->nr_writeback += nr_pages;
4376                                                  4376 
4377 resched:                                         4377 resched:
4378                 if (need_resched()) {            4378                 if (need_resched()) {
4379                         xas_pause(&xas);         4379                         xas_pause(&xas);
4380                         cond_resched_rcu();      4380                         cond_resched_rcu();
4381                 }                                4381                 }
4382         }                                        4382         }
4383         rcu_read_unlock();                       4383         rcu_read_unlock();
4384 }                                                4384 }
4385                                                  4385 
4386 /*                                               4386 /*
4387  * The cachestat(2) system call.                 4387  * The cachestat(2) system call.
4388  *                                               4388  *
4389  * cachestat() returns the page cache statist    4389  * cachestat() returns the page cache statistics of a file in the
4390  * bytes range specified by `off` and `len`:     4390  * bytes range specified by `off` and `len`: number of cached pages,
4391  * number of dirty pages, number of pages mar    4391  * number of dirty pages, number of pages marked for writeback,
4392  * number of evicted pages, and number of rec    4392  * number of evicted pages, and number of recently evicted pages.
4393  *                                               4393  *
4394  * An evicted page is a page that is previous    4394  * An evicted page is a page that is previously in the page cache
4395  * but has been evicted since. A page is rece    4395  * but has been evicted since. A page is recently evicted if its last
4396  * eviction was recent enough that its reentr    4396  * eviction was recent enough that its reentry to the cache would
4397  * indicate that it is actively being used by    4397  * indicate that it is actively being used by the system, and that
4398  * there is memory pressure on the system.       4398  * there is memory pressure on the system.
4399  *                                               4399  *
4400  * `off` and `len` must be non-negative integ    4400  * `off` and `len` must be non-negative integers. If `len` > 0,
4401  * the queried range is [`off`, `off` + `len`    4401  * the queried range is [`off`, `off` + `len`]. If `len` == 0,
4402  * we will query in the range from `off` to t    4402  * we will query in the range from `off` to the end of the file.
4403  *                                               4403  *
4404  * The `flags` argument is unused for now, bu    4404  * The `flags` argument is unused for now, but is included for future
4405  * extensibility. User should pass 0 (i.e no     4405  * extensibility. User should pass 0 (i.e no flag specified).
4406  *                                               4406  *
4407  * Currently, hugetlbfs is not supported.        4407  * Currently, hugetlbfs is not supported.
4408  *                                               4408  *
4409  * Because the status of a page can change af    4409  * Because the status of a page can change after cachestat() checks it
4410  * but before it returns to the application,     4410  * but before it returns to the application, the returned values may
4411  * contain stale information.                    4411  * contain stale information.
4412  *                                               4412  *
4413  * return values:                                4413  * return values:
4414  *  zero        - success                        4414  *  zero        - success
4415  *  -EFAULT     - cstat or cstat_range points    4415  *  -EFAULT     - cstat or cstat_range points to an illegal address
4416  *  -EINVAL     - invalid flags                  4416  *  -EINVAL     - invalid flags
4417  *  -EBADF      - invalid file descriptor        4417  *  -EBADF      - invalid file descriptor
4418  *  -EOPNOTSUPP - file descriptor is of a hug    4418  *  -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4419  */                                              4419  */
4420 SYSCALL_DEFINE4(cachestat, unsigned int, fd,     4420 SYSCALL_DEFINE4(cachestat, unsigned int, fd,
4421                 struct cachestat_range __user    4421                 struct cachestat_range __user *, cstat_range,
4422                 struct cachestat __user *, cs    4422                 struct cachestat __user *, cstat, unsigned int, flags)
4423 {                                                4423 {
4424         struct fd f = fdget(fd);                 4424         struct fd f = fdget(fd);
4425         struct address_space *mapping;           4425         struct address_space *mapping;
4426         struct cachestat_range csr;              4426         struct cachestat_range csr;
4427         struct cachestat cs;                     4427         struct cachestat cs;
4428         pgoff_t first_index, last_index;         4428         pgoff_t first_index, last_index;
4429                                                  4429 
4430         if (!fd_file(f))                         4430         if (!fd_file(f))
4431                 return -EBADF;                   4431                 return -EBADF;
4432                                                  4432 
4433         if (copy_from_user(&csr, cstat_range,    4433         if (copy_from_user(&csr, cstat_range,
4434                         sizeof(struct cachest    4434                         sizeof(struct cachestat_range))) {
4435                 fdput(f);                        4435                 fdput(f);
4436                 return -EFAULT;                  4436                 return -EFAULT;
4437         }                                        4437         }
4438                                                  4438 
4439         /* hugetlbfs is not supported */         4439         /* hugetlbfs is not supported */
4440         if (is_file_hugepages(fd_file(f))) {     4440         if (is_file_hugepages(fd_file(f))) {
4441                 fdput(f);                        4441                 fdput(f);
4442                 return -EOPNOTSUPP;              4442                 return -EOPNOTSUPP;
4443         }                                        4443         }
4444                                                  4444 
4445         if (flags != 0) {                        4445         if (flags != 0) {
4446                 fdput(f);                        4446                 fdput(f);
4447                 return -EINVAL;                  4447                 return -EINVAL;
4448         }                                        4448         }
4449                                                  4449 
4450         first_index = csr.off >> PAGE_SHIFT;     4450         first_index = csr.off >> PAGE_SHIFT;
4451         last_index =                             4451         last_index =
4452                 csr.len == 0 ? ULONG_MAX : (c    4452                 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
4453         memset(&cs, 0, sizeof(struct cachesta    4453         memset(&cs, 0, sizeof(struct cachestat));
4454         mapping = fd_file(f)->f_mapping;         4454         mapping = fd_file(f)->f_mapping;
4455         filemap_cachestat(mapping, first_inde    4455         filemap_cachestat(mapping, first_index, last_index, &cs);
4456         fdput(f);                                4456         fdput(f);
4457                                                  4457 
4458         if (copy_to_user(cstat, &cs, sizeof(s    4458         if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
4459                 return -EFAULT;                  4459                 return -EFAULT;
4460                                                  4460 
4461         return 0;                                4461         return 0;
4462 }                                                4462 }
4463 #endif /* CONFIG_CACHESTAT_SYSCALL */            4463 #endif /* CONFIG_CACHESTAT_SYSCALL */
4464                                                  4464 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php