~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/filemap.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/filemap.c (Version linux-6.12-rc7) and /mm/filemap.c (Version ccs-tools-1.8.12)


  1 // SPDX-License-Identifier: GPL-2.0-only            1 
  2 /*                                                
  3  *      linux/mm/filemap.c                        
  4  *                                                
  5  * Copyright (C) 1994-1999  Linus Torvalds        
  6  */                                               
  7                                                   
  8 /*                                                
  9  * This file handles the generic file mmap sem    
 10  * most "normal" filesystems (but you don't /h    
 11  * the NFS filesystem used to do this differen    
 12  */                                               
 13 #include <linux/export.h>                         
 14 #include <linux/compiler.h>                       
 15 #include <linux/dax.h>                            
 16 #include <linux/fs.h>                             
 17 #include <linux/sched/signal.h>                   
 18 #include <linux/uaccess.h>                        
 19 #include <linux/capability.h>                     
 20 #include <linux/kernel_stat.h>                    
 21 #include <linux/gfp.h>                            
 22 #include <linux/mm.h>                             
 23 #include <linux/swap.h>                           
 24 #include <linux/swapops.h>                        
 25 #include <linux/syscalls.h>                       
 26 #include <linux/mman.h>                           
 27 #include <linux/pagemap.h>                        
 28 #include <linux/file.h>                           
 29 #include <linux/uio.h>                            
 30 #include <linux/error-injection.h>                
 31 #include <linux/hash.h>                           
 32 #include <linux/writeback.h>                      
 33 #include <linux/backing-dev.h>                    
 34 #include <linux/pagevec.h>                        
 35 #include <linux/security.h>                       
 36 #include <linux/cpuset.h>                         
 37 #include <linux/hugetlb.h>                        
 38 #include <linux/memcontrol.h>                     
 39 #include <linux/shmem_fs.h>                       
 40 #include <linux/rmap.h>                           
 41 #include <linux/delayacct.h>                      
 42 #include <linux/psi.h>                            
 43 #include <linux/ramfs.h>                          
 44 #include <linux/page_idle.h>                      
 45 #include <linux/migrate.h>                        
 46 #include <linux/pipe_fs_i.h>                      
 47 #include <linux/splice.h>                         
 48 #include <linux/rcupdate_wait.h>                  
 49 #include <linux/sched/mm.h>                       
 50 #include <asm/pgalloc.h>                          
 51 #include <asm/tlbflush.h>                         
 52 #include "internal.h"                             
 53                                                   
 54 #define CREATE_TRACE_POINTS                       
 55 #include <trace/events/filemap.h>                 
 56                                                   
 57 /*                                                
 58  * FIXME: remove all knowledge of the buffer l    
 59  */                                               
 60 #include <linux/buffer_head.h> /* for try_to_f    
 61                                                   
 62 #include <asm/mman.h>                             
 63                                                   
 64 #include "swap.h"                                 
 65                                                   
 66 /*                                                
 67  * Shared mappings implemented 30.11.1994. It'    
 68  * though.                                        
 69  *                                                
 70  * Shared mappings now work. 15.8.1995  Bruno.    
 71  *                                                
 72  * finished 'unifying' the page and buffer cac    
 73  * page-cache, 21.05.1999, Ingo Molnar <mingo@    
 74  *                                                
 75  * SMP-threaded pagemap-LRU 1999, Andrea Arcan    
 76  */                                               
 77                                                   
 78 /*                                                
 79  * Lock ordering:                                 
 80  *                                                
 81  *  ->i_mmap_rwsem              (truncate_page    
 82  *    ->private_lock            (__free_pte->b    
 83  *      ->swap_lock             (exclusive_swa    
 84  *        ->i_pages lock                          
 85  *                                                
 86  *  ->i_rwsem                                     
 87  *    ->invalidate_lock         (acquired by f    
 88  *      ->i_mmap_rwsem          (truncate->unm    
 89  *                                                
 90  *  ->mmap_lock                                   
 91  *    ->i_mmap_rwsem                              
 92  *      ->page_table_lock or pte_lock   (vario    
 93  *        ->i_pages lock        (arch-dependen    
 94  *                                                
 95  *  ->mmap_lock                                   
 96  *    ->invalidate_lock         (filemap_fault    
 97  *      ->lock_page             (filemap_fault    
 98  *                                                
 99  *  ->i_rwsem                   (generic_perfo    
100  *    ->mmap_lock               (fault_in_read    
101  *                                                
102  *  bdi->wb.list_lock                             
103  *    sb_lock                   (fs/fs-writeba    
104  *    ->i_pages lock            (__sync_single    
105  *                                                
106  *  ->i_mmap_rwsem                                
107  *    ->anon_vma.lock           (vma_merge)       
108  *                                                
109  *  ->anon_vma.lock                               
110  *    ->page_table_lock or pte_lock     (anon_    
111  *                                                
112  *  ->page_table_lock or pte_lock                 
113  *    ->swap_lock               (try_to_unmap_    
114  *    ->private_lock            (try_to_unmap_    
115  *    ->i_pages lock            (try_to_unmap_    
116  *    ->lruvec->lru_lock        (follow_page_m    
117  *    ->lruvec->lru_lock        (check_pte_ran    
118  *    ->private_lock            (folio_remove_    
119  *    ->i_pages lock            (folio_remove_    
120  *    bdi.wb->list_lock         (folio_remove_    
121  *    ->inode->i_lock           (folio_remove_    
122  *    ->memcg->move_lock        (folio_remove_    
123  *    bdi.wb->list_lock         (zap_pte_range    
124  *    ->inode->i_lock           (zap_pte_range    
125  *    ->private_lock            (zap_pte_range    
126  */                                               
127                                                   
128 static void mapping_set_update(struct xa_state    
129                 struct address_space *mapping)    
130 {                                                 
131         if (dax_mapping(mapping) || shmem_mapp    
132                 return;                           
133         xas_set_update(xas, workingset_update_    
134         xas_set_lru(xas, &shadow_nodes);          
135 }                                                 
136                                                   
137 static void page_cache_delete(struct address_s    
138                                    struct foli    
139 {                                                 
140         XA_STATE(xas, &mapping->i_pages, folio    
141         long nr = 1;                              
142                                                   
143         mapping_set_update(&xas, mapping);        
144                                                   
145         xas_set_order(&xas, folio->index, foli    
146         nr = folio_nr_pages(folio);               
147                                                   
148         VM_BUG_ON_FOLIO(!folio_test_locked(fol    
149                                                   
150         xas_store(&xas, shadow);                  
151         xas_init_marks(&xas);                     
152                                                   
153         folio->mapping = NULL;                    
154         /* Leave page->index set: truncation l    
155         mapping->nrpages -= nr;                   
156 }                                                 
157                                                   
158 static void filemap_unaccount_folio(struct add    
159                 struct folio *folio)              
160 {                                                 
161         long nr;                                  
162                                                   
163         VM_BUG_ON_FOLIO(folio_mapped(folio), f    
164         if (!IS_ENABLED(CONFIG_DEBUG_VM) && un    
165                 pr_alert("BUG: Bad page cache     
166                          current->comm, folio_    
167                 dump_page(&folio->page, "still    
168                 dump_stack();                     
169                 add_taint(TAINT_BAD_PAGE, LOCK    
170                                                   
171                 if (mapping_exiting(mapping) &    
172                         int mapcount = folio_m    
173                                                   
174                         if (folio_ref_count(fo    
175                                 /*                
176                                  * All vmas ha    
177                                  * a good bet     
178                                  * and we'd ra    
179                                  * another bad    
180                                  */               
181                                 atomic_set(&fo    
182                                 folio_ref_sub(    
183                         }                         
184                 }                                 
185         }                                         
186                                                   
187         /* hugetlb folios do not participate i    
188         if (folio_test_hugetlb(folio))            
189                 return;                           
190                                                   
191         nr = folio_nr_pages(folio);               
192                                                   
193         __lruvec_stat_mod_folio(folio, NR_FILE    
194         if (folio_test_swapbacked(folio)) {       
195                 __lruvec_stat_mod_folio(folio,    
196                 if (folio_test_pmd_mappable(fo    
197                         __lruvec_stat_mod_foli    
198         } else if (folio_test_pmd_mappable(fol    
199                 __lruvec_stat_mod_folio(folio,    
200                 filemap_nr_thps_dec(mapping);     
201         }                                         
202                                                   
203         /*                                        
204          * At this point folio must be either     
205          * truncate.  Dirty folio here signals    
206          * unwritten data - on ordinary filesy    
207          *                                        
208          * But it's harmless on in-memory file    
209          * occur when a driver which did get_u    
210          * before putting it, while the inode     
211          *                                        
212          * Below fixes dirty accounting after     
213          * but leaves the dirty flag set: it h    
214          * folio and anyway will be cleared be    
215          * buddy allocator.                       
216          */                                       
217         if (WARN_ON_ONCE(folio_test_dirty(foli    
218                          mapping_can_writeback    
219                 folio_account_cleaned(folio, i    
220 }                                                 
221                                                   
222 /*                                                
223  * Delete a page from the page cache and free     
224  * sure the page is locked and that nobody els    
225  * is safe.  The caller must hold the i_pages     
226  */                                               
227 void __filemap_remove_folio(struct folio *foli    
228 {                                                 
229         struct address_space *mapping = folio-    
230                                                   
231         trace_mm_filemap_delete_from_page_cach    
232         filemap_unaccount_folio(mapping, folio    
233         page_cache_delete(mapping, folio, shad    
234 }                                                 
235                                                   
236 void filemap_free_folio(struct address_space *    
237 {                                                 
238         void (*free_folio)(struct folio *);       
239         int refs = 1;                             
240                                                   
241         free_folio = mapping->a_ops->free_foli    
242         if (free_folio)                           
243                 free_folio(folio);                
244                                                   
245         if (folio_test_large(folio))              
246                 refs = folio_nr_pages(folio);     
247         folio_put_refs(folio, refs);              
248 }                                                 
249                                                   
250 /**                                               
251  * filemap_remove_folio - Remove folio from pa    
252  * @folio: The folio.                             
253  *                                                
254  * This must be called only on folios that are    
255  * verified to be in the page cache.  It will     
256  * the free list because the caller has a refe    
257  */                                               
258 void filemap_remove_folio(struct folio *folio)    
259 {                                                 
260         struct address_space *mapping = folio-    
261                                                   
262         BUG_ON(!folio_test_locked(folio));        
263         spin_lock(&mapping->host->i_lock);        
264         xa_lock_irq(&mapping->i_pages);           
265         __filemap_remove_folio(folio, NULL);      
266         xa_unlock_irq(&mapping->i_pages);         
267         if (mapping_shrinkable(mapping))          
268                 inode_add_lru(mapping->host);     
269         spin_unlock(&mapping->host->i_lock);      
270                                                   
271         filemap_free_folio(mapping, folio);       
272 }                                                 
273                                                   
274 /*                                                
275  * page_cache_delete_batch - delete several fo    
276  * @mapping: the mapping to which folios belon    
277  * @fbatch: batch of folios to delete             
278  *                                                
279  * The function walks over mapping->i_pages an    
280  * @fbatch from the mapping. The function expe    
281  * by page index and is optimised for it to be    
282  * It tolerates holes in @fbatch (mapping entr    
283  * modified).                                     
284  *                                                
285  * The function expects the i_pages lock to be    
286  */                                               
287 static void page_cache_delete_batch(struct add    
288                              struct folio_batc    
289 {                                                 
290         XA_STATE(xas, &mapping->i_pages, fbatc    
291         long total_pages = 0;                     
292         int i = 0;                                
293         struct folio *folio;                      
294                                                   
295         mapping_set_update(&xas, mapping);        
296         xas_for_each(&xas, folio, ULONG_MAX) {    
297                 if (i >= folio_batch_count(fba    
298                         break;                    
299                                                   
300                 /* A swap/dax/shadow entry got    
301                 if (xa_is_value(folio))           
302                         continue;                 
303                 /*                                
304                  * A page got inserted in our     
305                  * pages locked so they are pr    
306                  * If we see a page whose inde    
307                  * means our page has been rem    
308                  * possible because we're hold    
309                  */                               
310                 if (folio != fbatch->folios[i]    
311                         VM_BUG_ON_FOLIO(folio-    
312                                         fbatch    
313                         continue;                 
314                 }                                 
315                                                   
316                 WARN_ON_ONCE(!folio_test_locke    
317                                                   
318                 folio->mapping = NULL;            
319                 /* Leave folio->index set: tru    
320                                                   
321                 i++;                              
322                 xas_store(&xas, NULL);            
323                 total_pages += folio_nr_pages(    
324         }                                         
325         mapping->nrpages -= total_pages;          
326 }                                                 
327                                                   
328 void delete_from_page_cache_batch(struct addre    
329                                   struct folio    
330 {                                                 
331         int i;                                    
332                                                   
333         if (!folio_batch_count(fbatch))           
334                 return;                           
335                                                   
336         spin_lock(&mapping->host->i_lock);        
337         xa_lock_irq(&mapping->i_pages);           
338         for (i = 0; i < folio_batch_count(fbat    
339                 struct folio *folio = fbatch->    
340                                                   
341                 trace_mm_filemap_delete_from_p    
342                 filemap_unaccount_folio(mappin    
343         }                                         
344         page_cache_delete_batch(mapping, fbatc    
345         xa_unlock_irq(&mapping->i_pages);         
346         if (mapping_shrinkable(mapping))          
347                 inode_add_lru(mapping->host);     
348         spin_unlock(&mapping->host->i_lock);      
349                                                   
350         for (i = 0; i < folio_batch_count(fbat    
351                 filemap_free_folio(mapping, fb    
352 }                                                 
353                                                   
354 int filemap_check_errors(struct address_space     
355 {                                                 
356         int ret = 0;                              
357         /* Check for outstanding write errors     
358         if (test_bit(AS_ENOSPC, &mapping->flag    
359             test_and_clear_bit(AS_ENOSPC, &map    
360                 ret = -ENOSPC;                    
361         if (test_bit(AS_EIO, &mapping->flags)     
362             test_and_clear_bit(AS_EIO, &mappin    
363                 ret = -EIO;                       
364         return ret;                               
365 }                                                 
366 EXPORT_SYMBOL(filemap_check_errors);              
367                                                   
368 static int filemap_check_and_keep_errors(struc    
369 {                                                 
370         /* Check for outstanding write errors     
371         if (test_bit(AS_EIO, &mapping->flags))    
372                 return -EIO;                      
373         if (test_bit(AS_ENOSPC, &mapping->flag    
374                 return -ENOSPC;                   
375         return 0;                                 
376 }                                                 
377                                                   
378 /**                                               
379  * filemap_fdatawrite_wbc - start writeback on    
380  * @mapping:    address space structure to wri    
381  * @wbc:        the writeback_control controll    
382  *                                                
383  * Call writepages on the mapping using the pr    
384  * writeout.                                      
385  *                                                
386  * Return: %0 on success, negative error code     
387  */                                               
388 int filemap_fdatawrite_wbc(struct address_spac    
389                            struct writeback_co    
390 {                                                 
391         int ret;                                  
392                                                   
393         if (!mapping_can_writeback(mapping) ||    
394             !mapping_tagged(mapping, PAGECACHE    
395                 return 0;                         
396                                                   
397         wbc_attach_fdatawrite_inode(wbc, mappi    
398         ret = do_writepages(mapping, wbc);        
399         wbc_detach_inode(wbc);                    
400         return ret;                               
401 }                                                 
402 EXPORT_SYMBOL(filemap_fdatawrite_wbc);            
403                                                   
404 /**                                               
405  * __filemap_fdatawrite_range - start writebac    
406  * @mapping:    address space structure to wri    
407  * @start:      offset in bytes where the rang    
408  * @end:        offset in bytes where the rang    
409  * @sync_mode:  enable synchronous operation      
410  *                                                
411  * Start writeback against all of a mapping's     
412  * within the byte offsets <start, end> inclus    
413  *                                                
414  * If sync_mode is WB_SYNC_ALL then this is a     
415  * opposed to a regular memory cleansing write    
416  * these two operations is that if a dirty pag    
417  * be waited upon, and not just skipped over.     
418  *                                                
419  * Return: %0 on success, negative error code     
420  */                                               
421 int __filemap_fdatawrite_range(struct address_    
422                                 loff_t end, in    
423 {                                                 
424         struct writeback_control wbc = {          
425                 .sync_mode = sync_mode,           
426                 .nr_to_write = LONG_MAX,          
427                 .range_start = start,             
428                 .range_end = end,                 
429         };                                        
430                                                   
431         return filemap_fdatawrite_wbc(mapping,    
432 }                                                 
433                                                   
434 static inline int __filemap_fdatawrite(struct     
435         int sync_mode)                            
436 {                                                 
437         return __filemap_fdatawrite_range(mapp    
438 }                                                 
439                                                   
440 int filemap_fdatawrite(struct address_space *m    
441 {                                                 
442         return __filemap_fdatawrite(mapping, W    
443 }                                                 
444 EXPORT_SYMBOL(filemap_fdatawrite);                
445                                                   
446 int filemap_fdatawrite_range(struct address_sp    
447                                 loff_t end)       
448 {                                                 
449         return __filemap_fdatawrite_range(mapp    
450 }                                                 
451 EXPORT_SYMBOL(filemap_fdatawrite_range);          
452                                                   
453 /**                                               
454  * filemap_flush - mostly a non-blocking flush    
455  * @mapping:    target address_space              
456  *                                                
457  * This is a mostly non-blocking flush.  Not s    
458  * purposes - I/O may not be started against a    
459  *                                                
460  * Return: %0 on success, negative error code     
461  */                                               
462 int filemap_flush(struct address_space *mappin    
463 {                                                 
464         return __filemap_fdatawrite(mapping, W    
465 }                                                 
466 EXPORT_SYMBOL(filemap_flush);                     
467                                                   
468 /**                                               
469  * filemap_range_has_page - check if a page ex    
470  * @mapping:           address space within wh    
471  * @start_byte:        offset in bytes where t    
472  * @end_byte:          offset in bytes where t    
473  *                                                
474  * Find at least one page in the range supplie    
475  * direct writing in this range will trigger a    
476  *                                                
477  * Return: %true if at least one page exists i    
478  * %false otherwise.                              
479  */                                               
480 bool filemap_range_has_page(struct address_spa    
481                            loff_t start_byte,     
482 {                                                 
483         struct folio *folio;                      
484         XA_STATE(xas, &mapping->i_pages, start    
485         pgoff_t max = end_byte >> PAGE_SHIFT;     
486                                                   
487         if (end_byte < start_byte)                
488                 return false;                     
489                                                   
490         rcu_read_lock();                          
491         for (;;) {                                
492                 folio = xas_find(&xas, max);      
493                 if (xas_retry(&xas, folio))       
494                         continue;                 
495                 /* Shadow entries don't count     
496                 if (xa_is_value(folio))           
497                         continue;                 
498                 /*                                
499                  * We don't need to try to pin    
500                  * release the RCU lock anyway    
501                  * there was a page here recen    
502                  */                               
503                 break;                            
504         }                                         
505         rcu_read_unlock();                        
506                                                   
507         return folio != NULL;                     
508 }                                                 
509 EXPORT_SYMBOL(filemap_range_has_page);            
510                                                   
511 static void __filemap_fdatawait_range(struct a    
512                                      loff_t st    
513 {                                                 
514         pgoff_t index = start_byte >> PAGE_SHI    
515         pgoff_t end = end_byte >> PAGE_SHIFT;     
516         struct folio_batch fbatch;                
517         unsigned nr_folios;                       
518                                                   
519         folio_batch_init(&fbatch);                
520                                                   
521         while (index <= end) {                    
522                 unsigned i;                       
523                                                   
524                 nr_folios = filemap_get_folios    
525                                 PAGECACHE_TAG_    
526                                                   
527                 if (!nr_folios)                   
528                         break;                    
529                                                   
530                 for (i = 0; i < nr_folios; i++    
531                         struct folio *folio =     
532                                                   
533                         folio_wait_writeback(f    
534                 }                                 
535                 folio_batch_release(&fbatch);     
536                 cond_resched();                   
537         }                                         
538 }                                                 
539                                                   
540 /**                                               
541  * filemap_fdatawait_range - wait for writebac    
542  * @mapping:            address space structur    
543  * @start_byte:         offset in bytes where     
544  * @end_byte:           offset in bytes where     
545  *                                                
546  * Walk the list of under-writeback pages of t    
547  * in the given range and wait for all of them    
548  * the address space and return it.               
549  *                                                
550  * Since the error status of the address space    
551  * callers are responsible for checking the re    
552  * reporting the error.                           
553  *                                                
554  * Return: error status of the address space.     
555  */                                               
556 int filemap_fdatawait_range(struct address_spa    
557                             loff_t end_byte)      
558 {                                                 
559         __filemap_fdatawait_range(mapping, sta    
560         return filemap_check_errors(mapping);     
561 }                                                 
562 EXPORT_SYMBOL(filemap_fdatawait_range);           
563                                                   
564 /**                                               
565  * filemap_fdatawait_range_keep_errors - wait     
566  * @mapping:            address space structur    
567  * @start_byte:         offset in bytes where     
568  * @end_byte:           offset in bytes where     
569  *                                                
570  * Walk the list of under-writeback pages of t    
571  * given range and wait for all of them.  Unli    
572  * this function does not clear error status o    
573  *                                                
574  * Use this function if callers don't handle e    
575  * call sites are system-wide / filesystem-wid    
576  * fsfreeze(8)                                    
577  */                                               
578 int filemap_fdatawait_range_keep_errors(struct    
579                 loff_t start_byte, loff_t end_    
580 {                                                 
581         __filemap_fdatawait_range(mapping, sta    
582         return filemap_check_and_keep_errors(m    
583 }                                                 
584 EXPORT_SYMBOL(filemap_fdatawait_range_keep_err    
585                                                   
586 /**                                               
587  * file_fdatawait_range - wait for writeback t    
588  * @file:               file pointing to addre    
589  * @start_byte:         offset in bytes where     
590  * @end_byte:           offset in bytes where     
591  *                                                
592  * Walk the list of under-writeback pages of t    
593  * refers to, in the given range and wait for     
594  * status of the address space vs. the file->f    
595  *                                                
596  * Since the error status of the file is advan    
597  * callers are responsible for checking the re    
598  * reporting the error.                           
599  *                                                
600  * Return: error status of the address space v    
601  */                                               
602 int file_fdatawait_range(struct file *file, lo    
603 {                                                 
604         struct address_space *mapping = file->    
605                                                   
606         __filemap_fdatawait_range(mapping, sta    
607         return file_check_and_advance_wb_err(f    
608 }                                                 
609 EXPORT_SYMBOL(file_fdatawait_range);              
610                                                   
611 /**                                               
612  * filemap_fdatawait_keep_errors - wait for wr    
613  * @mapping: address space structure to wait f    
614  *                                                
615  * Walk the list of under-writeback pages of t    
616  * and wait for all of them.  Unlike filemap_f    
617  * does not clear error status of the address     
618  *                                                
619  * Use this function if callers don't handle e    
620  * call sites are system-wide / filesystem-wid    
621  * fsfreeze(8)                                    
622  *                                                
623  * Return: error status of the address space.     
624  */                                               
625 int filemap_fdatawait_keep_errors(struct addre    
626 {                                                 
627         __filemap_fdatawait_range(mapping, 0,     
628         return filemap_check_and_keep_errors(m    
629 }                                                 
630 EXPORT_SYMBOL(filemap_fdatawait_keep_errors);     
631                                                   
632 /* Returns true if writeback might be needed o    
633 static bool mapping_needs_writeback(struct add    
634 {                                                 
635         return mapping->nrpages;                  
636 }                                                 
637                                                   
638 bool filemap_range_has_writeback(struct addres    
639                                  loff_t start_    
640 {                                                 
641         XA_STATE(xas, &mapping->i_pages, start    
642         pgoff_t max = end_byte >> PAGE_SHIFT;     
643         struct folio *folio;                      
644                                                   
645         if (end_byte < start_byte)                
646                 return false;                     
647                                                   
648         rcu_read_lock();                          
649         xas_for_each(&xas, folio, max) {          
650                 if (xas_retry(&xas, folio))       
651                         continue;                 
652                 if (xa_is_value(folio))           
653                         continue;                 
654                 if (folio_test_dirty(folio) ||    
655                                 folio_test_wri    
656                         break;                    
657         }                                         
658         rcu_read_unlock();                        
659         return folio != NULL;                     
660 }                                                 
661 EXPORT_SYMBOL_GPL(filemap_range_has_writeback)    
662                                                   
663 /**                                               
664  * filemap_write_and_wait_range - write out &     
665  * @mapping:    the address_space for the page    
666  * @lstart:     offset in bytes where the rang    
667  * @lend:       offset in bytes where the rang    
668  *                                                
669  * Write out and wait upon file offsets lstart    
670  *                                                
671  * Note that @lend is inclusive (describes the    
672  * that this function can be used to write to     
673  *                                                
674  * Return: error status of the address space.     
675  */                                               
676 int filemap_write_and_wait_range(struct addres    
677                                  loff_t lstart    
678 {                                                 
679         int err = 0, err2;                        
680                                                   
681         if (lend < lstart)                        
682                 return 0;                         
683                                                   
684         if (mapping_needs_writeback(mapping))     
685                 err = __filemap_fdatawrite_ran    
686                                                   
687                 /*                                
688                  * Even if the above returned     
689                  * written partially (e.g. -EN    
690                  * But the -EIO is special cas    
691                  * thing (e.g. bug) happened,     
692                  */                               
693                 if (err != -EIO)                  
694                         __filemap_fdatawait_ra    
695         }                                         
696         err2 = filemap_check_errors(mapping);     
697         if (!err)                                 
698                 err = err2;                       
699         return err;                               
700 }                                                 
701 EXPORT_SYMBOL(filemap_write_and_wait_range);      
702                                                   
703 void __filemap_set_wb_err(struct address_space    
704 {                                                 
705         errseq_t eseq = errseq_set(&mapping->w    
706                                                   
707         trace_filemap_set_wb_err(mapping, eseq    
708 }                                                 
709 EXPORT_SYMBOL(__filemap_set_wb_err);              
710                                                   
711 /**                                               
712  * file_check_and_advance_wb_err - report wb e    
713  *                                 and advance    
714  * @file: struct file on which the error is be    
715  *                                                
716  * When userland calls fsync (or something lik    
717  * want to report any writeback errors that oc    
718  * since the file was opened if there haven't     
719  *                                                
720  * Grab the wb_err from the mapping. If it mat    
721  * then just quickly return 0. The file is all    
722  *                                                
723  * If it doesn't match, then take the mapping     
724  * it and try to swap it into place. If it wor    
725  * to it with the new value, then update the f    
726  * portion. The error at this point must be re    
727  * (a'la fsync, or NFS COMMIT operation, etc.)    
728  *                                                
729  * While we handle mapping->wb_err with atomic    
730  * value is protected by the f_lock since we m    
731  * the latest value swapped in for this file d    
732  *                                                
733  * Return: %0 on success, negative error code     
734  */                                               
735 int file_check_and_advance_wb_err(struct file     
736 {                                                 
737         int err = 0;                              
738         errseq_t old = READ_ONCE(file->f_wb_er    
739         struct address_space *mapping = file->    
740                                                   
741         /* Locklessly handle the common case w    
742         if (errseq_check(&mapping->wb_err, old    
743                 /* Something changed, must use    
744                 spin_lock(&file->f_lock);         
745                 old = file->f_wb_err;             
746                 err = errseq_check_and_advance    
747                                                   
748                 trace_file_check_and_advance_w    
749                 spin_unlock(&file->f_lock);       
750         }                                         
751                                                   
752         /*                                        
753          * We're mostly using this function as    
754          * filemap_check_errors. Clear AS_EIO/    
755          * that the legacy code would have had    
756          */                                       
757         clear_bit(AS_EIO, &mapping->flags);       
758         clear_bit(AS_ENOSPC, &mapping->flags);    
759         return err;                               
760 }                                                 
761 EXPORT_SYMBOL(file_check_and_advance_wb_err);     
762                                                   
763 /**                                               
764  * file_write_and_wait_range - write out & wai    
765  * @file:       file pointing to address_space    
766  * @lstart:     offset in bytes where the rang    
767  * @lend:       offset in bytes where the rang    
768  *                                                
769  * Write out and wait upon file offsets lstart    
770  *                                                
771  * Note that @lend is inclusive (describes the    
772  * that this function can be used to write to     
773  *                                                
774  * After writing out and waiting on the data,     
775  * f_wb_err cursor to the latest value, and re    
776  *                                                
777  * Return: %0 on success, negative error code     
778  */                                               
779 int file_write_and_wait_range(struct file *fil    
780 {                                                 
781         int err = 0, err2;                        
782         struct address_space *mapping = file->    
783                                                   
784         if (lend < lstart)                        
785                 return 0;                         
786                                                   
787         if (mapping_needs_writeback(mapping))     
788                 err = __filemap_fdatawrite_ran    
789                                                   
790                 /* See comment of filemap_writ    
791                 if (err != -EIO)                  
792                         __filemap_fdatawait_ra    
793         }                                         
794         err2 = file_check_and_advance_wb_err(f    
795         if (!err)                                 
796                 err = err2;                       
797         return err;                               
798 }                                                 
799 EXPORT_SYMBOL(file_write_and_wait_range);         
800                                                   
801 /**                                               
802  * replace_page_cache_folio - replace a pageca    
803  * @old:        folio to be replaced              
804  * @new:        folio to replace with             
805  *                                                
806  * This function replaces a folio in the pagec    
807  * success it acquires the pagecache reference    
808  * drops it for the old folio.  Both the old a    
809  * locked.  This function does not add the new    
810  * caller must do that.                           
811  *                                                
812  * The remove + add is atomic.  This function     
813  */                                               
814 void replace_page_cache_folio(struct folio *ol    
815 {                                                 
816         struct address_space *mapping = old->m    
817         void (*free_folio)(struct folio *) = m    
818         pgoff_t offset = old->index;              
819         XA_STATE(xas, &mapping->i_pages, offse    
820                                                   
821         VM_BUG_ON_FOLIO(!folio_test_locked(old    
822         VM_BUG_ON_FOLIO(!folio_test_locked(new    
823         VM_BUG_ON_FOLIO(new->mapping, new);       
824                                                   
825         folio_get(new);                           
826         new->mapping = mapping;                   
827         new->index = offset;                      
828                                                   
829         mem_cgroup_replace_folio(old, new);       
830                                                   
831         xas_lock_irq(&xas);                       
832         xas_store(&xas, new);                     
833                                                   
834         old->mapping = NULL;                      
835         /* hugetlb pages do not participate in    
836         if (!folio_test_hugetlb(old))             
837                 __lruvec_stat_sub_folio(old, N    
838         if (!folio_test_hugetlb(new))             
839                 __lruvec_stat_add_folio(new, N    
840         if (folio_test_swapbacked(old))           
841                 __lruvec_stat_sub_folio(old, N    
842         if (folio_test_swapbacked(new))           
843                 __lruvec_stat_add_folio(new, N    
844         xas_unlock_irq(&xas);                     
845         if (free_folio)                           
846                 free_folio(old);                  
847         folio_put(old);                           
848 }                                                 
849 EXPORT_SYMBOL_GPL(replace_page_cache_folio);      
850                                                   
851 noinline int __filemap_add_folio(struct addres    
852                 struct folio *folio, pgoff_t i    
853 {                                                 
854         XA_STATE(xas, &mapping->i_pages, index    
855         void *alloced_shadow = NULL;              
856         int alloced_order = 0;                    
857         bool huge;                                
858         long nr;                                  
859                                                   
860         VM_BUG_ON_FOLIO(!folio_test_locked(fol    
861         VM_BUG_ON_FOLIO(folio_test_swapbacked(    
862         VM_BUG_ON_FOLIO(folio_order(folio) < m    
863                         folio);                   
864         mapping_set_update(&xas, mapping);        
865                                                   
866         VM_BUG_ON_FOLIO(index & (folio_nr_page    
867         xas_set_order(&xas, index, folio_order    
868         huge = folio_test_hugetlb(folio);         
869         nr = folio_nr_pages(folio);               
870                                                   
871         gfp &= GFP_RECLAIM_MASK;                  
872         folio_ref_add(folio, nr);                 
873         folio->mapping = mapping;                 
874         folio->index = xas.xa_index;              
875                                                   
876         for (;;) {                                
877                 int order = -1, split_order =     
878                 void *entry, *old = NULL;         
879                                                   
880                 xas_lock_irq(&xas);               
881                 xas_for_each_conflict(&xas, en    
882                         old = entry;              
883                         if (!xa_is_value(entry    
884                                 xas_set_err(&x    
885                                 goto unlock;      
886                         }                         
887                         /*                        
888                          * If a larger entry e    
889                          * it will be the firs    
890                          */                       
891                         if (order == -1)          
892                                 order = xas_ge    
893                 }                                 
894                                                   
895                 /* entry may have changed befo    
896                 if (alloced_order && (old != a    
897                         xas_destroy(&xas);        
898                         alloced_order = 0;        
899                 }                                 
900                                                   
901                 if (old) {                        
902                         if (order > 0 && order    
903                                 /* How to hand    
904                                 BUG_ON(shmem_m    
905                                 if (!alloced_o    
906                                         split_    
907                                         goto u    
908                                 }                 
909                                 xas_split(&xas    
910                                 xas_reset(&xas    
911                         }                         
912                         if (shadowp)              
913                                 *shadowp = old    
914                 }                                 
915                                                   
916                 xas_store(&xas, folio);           
917                 if (xas_error(&xas))              
918                         goto unlock;              
919                                                   
920                 mapping->nrpages += nr;           
921                                                   
922                 /* hugetlb pages do not partic    
923                 if (!huge) {                      
924                         __lruvec_stat_mod_foli    
925                         if (folio_test_pmd_map    
926                                 __lruvec_stat_    
927                                                   
928                 }                                 
929                                                   
930 unlock:                                           
931                 xas_unlock_irq(&xas);             
932                                                   
933                 /* split needed, alloc here an    
934                 if (split_order) {                
935                         xas_split_alloc(&xas,     
936                         if (xas_error(&xas))      
937                                 goto error;       
938                         alloced_shadow = old;     
939                         alloced_order = split_    
940                         xas_reset(&xas);          
941                         continue;                 
942                 }                                 
943                                                   
944                 if (!xas_nomem(&xas, gfp))        
945                         break;                    
946         }                                         
947                                                   
948         if (xas_error(&xas))                      
949                 goto error;                       
950                                                   
951         trace_mm_filemap_add_to_page_cache(fol    
952         return 0;                                 
953 error:                                            
954         folio->mapping = NULL;                    
955         /* Leave page->index set: truncation r    
956         folio_put_refs(folio, nr);                
957         return xas_error(&xas);                   
958 }                                                 
959 ALLOW_ERROR_INJECTION(__filemap_add_folio, ERR    
960                                                   
961 int filemap_add_folio(struct address_space *ma    
962                                 pgoff_t index,    
963 {                                                 
964         void *shadow = NULL;                      
965         int ret;                                  
966                                                   
967         ret = mem_cgroup_charge(folio, NULL, g    
968         if (ret)                                  
969                 return ret;                       
970                                                   
971         __folio_set_locked(folio);                
972         ret = __filemap_add_folio(mapping, fol    
973         if (unlikely(ret)) {                      
974                 mem_cgroup_uncharge(folio);       
975                 __folio_clear_locked(folio);      
976         } else {                                  
977                 /*                                
978                  * The folio might have been e    
979                  * recently, in which case it     
980                  * any other repeatedly access    
981                  * The exception is folios get    
982                  * data from the working set,     
983                  * get overwritten with someth    
984                  */                               
985                 WARN_ON_ONCE(folio_test_active    
986                 if (!(gfp & __GFP_WRITE) && sh    
987                         workingset_refault(fol    
988                 folio_add_lru(folio);             
989         }                                         
990         return ret;                               
991 }                                                 
992 EXPORT_SYMBOL_GPL(filemap_add_folio);             
993                                                   
994 #ifdef CONFIG_NUMA                                
995 struct folio *filemap_alloc_folio_noprof(gfp_t    
996 {                                                 
997         int n;                                    
998         struct folio *folio;                      
999                                                   
1000         if (cpuset_do_page_mem_spread()) {       
1001                 unsigned int cpuset_mems_cook    
1002                 do {                             
1003                         cpuset_mems_cookie =     
1004                         n = cpuset_mem_spread    
1005                         folio = __folio_alloc    
1006                 } while (!folio && read_mems_    
1007                                                  
1008                 return folio;                    
1009         }                                        
1010         return folio_alloc_noprof(gfp, order)    
1011 }                                                
1012 EXPORT_SYMBOL(filemap_alloc_folio_noprof);       
1013 #endif                                           
1014                                                  
1015 /*                                               
1016  * filemap_invalidate_lock_two - lock invalid    
1017  *                                               
1018  * Lock exclusively invalidate_lock of any pa    
1019  *                                               
1020  * @mapping1: the first mapping to lock          
1021  * @mapping2: the second mapping to lock         
1022  */                                              
1023 void filemap_invalidate_lock_two(struct addre    
1024                                  struct addre    
1025 {                                                
1026         if (mapping1 > mapping2)                 
1027                 swap(mapping1, mapping2);        
1028         if (mapping1)                            
1029                 down_write(&mapping1->invalid    
1030         if (mapping2 && mapping1 != mapping2)    
1031                 down_write_nested(&mapping2->    
1032 }                                                
1033 EXPORT_SYMBOL(filemap_invalidate_lock_two);      
1034                                                  
1035 /*                                               
1036  * filemap_invalidate_unlock_two - unlock inv    
1037  *                                               
1038  * Unlock exclusive invalidate_lock of any pa    
1039  *                                               
1040  * @mapping1: the first mapping to unlock        
1041  * @mapping2: the second mapping to unlock       
1042  */                                              
1043 void filemap_invalidate_unlock_two(struct add    
1044                                    struct add    
1045 {                                                
1046         if (mapping1)                            
1047                 up_write(&mapping1->invalidat    
1048         if (mapping2 && mapping1 != mapping2)    
1049                 up_write(&mapping2->invalidat    
1050 }                                                
1051 EXPORT_SYMBOL(filemap_invalidate_unlock_two);    
1052                                                  
1053 /*                                               
1054  * In order to wait for pages to become avail    
1055  * waitqueues associated with pages. By using    
1056  * waitqueues where the bucket discipline is     
1057  * waiters on the same queue and wake all whe    
1058  * become available, and for the woken contex    
1059  * sure the appropriate page became available    
1060  * at a cost of "thundering herd" phenomena d    
1061  * collisions.                                   
1062  */                                              
1063 #define PAGE_WAIT_TABLE_BITS 8                   
1064 #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_    
1065 static wait_queue_head_t folio_wait_table[PAG    
1066                                                  
1067 static wait_queue_head_t *folio_waitqueue(str    
1068 {                                                
1069         return &folio_wait_table[hash_ptr(fol    
1070 }                                                
1071                                                  
1072 void __init pagecache_init(void)                 
1073 {                                                
1074         int i;                                   
1075                                                  
1076         for (i = 0; i < PAGE_WAIT_TABLE_SIZE;    
1077                 init_waitqueue_head(&folio_wa    
1078                                                  
1079         page_writeback_init();                   
1080 }                                                
1081                                                  
1082 /*                                               
1083  * The page wait code treats the "wait->flags    
1084  * we have multiple different kinds of waits,    
1085  * one.                                          
1086  *                                               
1087  * We have:                                      
1088  *                                               
1089  *  (a) no special bits set:                     
1090  *                                               
1091  *      We're just waiting for the bit to be     
1092  *      calls the wakeup function, we set WQ_    
1093  *      and remove it from the wait queue.       
1094  *                                               
1095  *      Simple and straightforward.              
1096  *                                               
1097  *  (b) WQ_FLAG_EXCLUSIVE:                       
1098  *                                               
1099  *      The waiter is waiting to get the lock    
1100  *      be woken up to avoid any thundering h    
1101  *      WQ_FLAG_WOKEN bit, wake it up, and re    
1102  *                                               
1103  *      This is the traditional exclusive wai    
1104  *                                               
1105  *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:      
1106  *                                               
1107  *      The waiter is waiting to get the bit,    
1108  *      lock to be transferred to it for fair    
1109  *      cannot be taken, we stop walking the     
1110  *      the waiter.                              
1111  *                                               
1112  *      This is the "fair lock handoff" case,    
1113  *      WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to    
1114  *      that it now has the lock.                
1115  */                                              
1116 static int wake_page_function(wait_queue_entr    
1117 {                                                
1118         unsigned int flags;                      
1119         struct wait_page_key *key = arg;         
1120         struct wait_page_queue *wait_page        
1121                 = container_of(wait, struct w    
1122                                                  
1123         if (!wake_page_match(wait_page, key))    
1124                 return 0;                        
1125                                                  
1126         /*                                       
1127          * If it's a lock handoff wait, we ge    
1128          * stop walking (and do not wake it u    
1129          */                                      
1130         flags = wait->flags;                     
1131         if (flags & WQ_FLAG_EXCLUSIVE) {         
1132                 if (test_bit(key->bit_nr, &ke    
1133                         return -1;               
1134                 if (flags & WQ_FLAG_CUSTOM) {    
1135                         if (test_and_set_bit(    
1136                                 return -1;       
1137                         flags |= WQ_FLAG_DONE    
1138                 }                                
1139         }                                        
1140                                                  
1141         /*                                       
1142          * We are holding the wait-queue lock    
1143          * is waiting for this will be checki    
1144          * any locking.                          
1145          *                                       
1146          * So update the flags atomically, an    
1147          * afterwards to avoid any races. Thi    
1148          * with the load-acquire in folio_wai    
1149          */                                      
1150         smp_store_release(&wait->flags, flags    
1151         wake_up_state(wait->private, mode);      
1152                                                  
1153         /*                                       
1154          * Ok, we have successfully done what    
1155          * and we can unconditionally remove     
1156          *                                       
1157          * Note that this pairs with the "fin    
1158          * waiter, and has to be the absolute    
1159          * After this list_del_init(&wait->en    
1160          * might be de-allocated and the proc    
1161          * exited.                               
1162          */                                      
1163         list_del_init_careful(&wait->entry);     
1164         return (flags & WQ_FLAG_EXCLUSIVE) !=    
1165 }                                                
1166                                                  
1167 static void folio_wake_bit(struct folio *foli    
1168 {                                                
1169         wait_queue_head_t *q = folio_waitqueu    
1170         struct wait_page_key key;                
1171         unsigned long flags;                     
1172                                                  
1173         key.folio = folio;                       
1174         key.bit_nr = bit_nr;                     
1175         key.page_match = 0;                      
1176                                                  
1177         spin_lock_irqsave(&q->lock, flags);      
1178         __wake_up_locked_key(q, TASK_NORMAL,     
1179                                                  
1180         /*                                       
1181          * It's possible to miss clearing wai    
1182          * waiters, but the hashed waitqueue     
1183          * That's okay, it's a rare case. The    
1184          *                                       
1185          * Note that, depending on the page p    
1186          * other), the flag may be cleared in    
1187          * but that is not required for corre    
1188          */                                      
1189         if (!waitqueue_active(q) || !key.page    
1190                 folio_clear_waiters(folio);      
1191                                                  
1192         spin_unlock_irqrestore(&q->lock, flag    
1193 }                                                
1194                                                  
1195 /*                                               
1196  * A choice of three behaviors for folio_wait    
1197  */                                              
1198 enum behavior {                                  
1199         EXCLUSIVE,      /* Hold ref to page a    
1200                          * __folio_lock() wai    
1201                          */                      
1202         SHARED,         /* Hold ref to page a    
1203                          * folio_wait_writeba    
1204                          */                      
1205         DROP,           /* Drop ref to page b    
1206                          * like folio_put_wai    
1207                          */                      
1208 };                                               
1209                                                  
1210 /*                                               
1211  * Attempt to check (or get) the folio flag,     
1212  * if successful.                                
1213  */                                              
1214 static inline bool folio_trylock_flag(struct     
1215                                         struc    
1216 {                                                
1217         if (wait->flags & WQ_FLAG_EXCLUSIVE)     
1218                 if (test_and_set_bit(bit_nr,     
1219                         return false;            
1220         } else if (test_bit(bit_nr, &folio->f    
1221                 return false;                    
1222                                                  
1223         wait->flags |= WQ_FLAG_WOKEN | WQ_FLA    
1224         return true;                             
1225 }                                                
1226                                                  
1227 /* How many times do we accept lock stealing     
1228 int sysctl_page_lock_unfairness = 5;             
1229                                                  
1230 static inline int folio_wait_bit_common(struc    
1231                 int state, enum behavior beha    
1232 {                                                
1233         wait_queue_head_t *q = folio_waitqueu    
1234         int unfairness = sysctl_page_lock_unf    
1235         struct wait_page_queue wait_page;        
1236         wait_queue_entry_t *wait = &wait_page    
1237         bool thrashing = false;                  
1238         unsigned long pflags;                    
1239         bool in_thrashing;                       
1240                                                  
1241         if (bit_nr == PG_locked &&               
1242             !folio_test_uptodate(folio) && fo    
1243                 delayacct_thrashing_start(&in    
1244                 psi_memstall_enter(&pflags);     
1245                 thrashing = true;                
1246         }                                        
1247                                                  
1248         init_wait(wait);                         
1249         wait->func = wake_page_function;         
1250         wait_page.folio = folio;                 
1251         wait_page.bit_nr = bit_nr;               
1252                                                  
1253 repeat:                                          
1254         wait->flags = 0;                         
1255         if (behavior == EXCLUSIVE) {             
1256                 wait->flags = WQ_FLAG_EXCLUSI    
1257                 if (--unfairness < 0)            
1258                         wait->flags |= WQ_FLA    
1259         }                                        
1260                                                  
1261         /*                                       
1262          * Do one last check whether we can g    
1263          * page bit synchronously.               
1264          *                                       
1265          * Do the folio_set_waiters() marking    
1266          * to let any waker we _just_ missed     
1267          * need to wake us up (otherwise they    
1268          * even go to the slow case that look    
1269          * page queue), and add ourselves to     
1270          * queue if we need to sleep.            
1271          *                                       
1272          * This part needs to be done under t    
1273          * lock to avoid races.                  
1274          */                                      
1275         spin_lock_irq(&q->lock);                 
1276         folio_set_waiters(folio);                
1277         if (!folio_trylock_flag(folio, bit_nr    
1278                 __add_wait_queue_entry_tail(q    
1279         spin_unlock_irq(&q->lock);               
1280                                                  
1281         /*                                       
1282          * From now on, all the logic will be    
1283          * the WQ_FLAG_WOKEN and WQ_FLAG_DONE    
1284          * see whether the page bit testing h    
1285          * been done by the wake function.       
1286          *                                       
1287          * We can drop our reference to the f    
1288          */                                      
1289         if (behavior == DROP)                    
1290                 folio_put(folio);                
1291                                                  
1292         /*                                       
1293          * Note that until the "finish_wait()    
1294          * we see the WQ_FLAG_WOKEN flag, we     
1295          * be very careful with the 'wait->fl    
1296          * we may race with a waker that sets    
1297          */                                      
1298         for (;;) {                               
1299                 unsigned int flags;              
1300                                                  
1301                 set_current_state(state);        
1302                                                  
1303                 /* Loop until we've been woke    
1304                 flags = smp_load_acquire(&wai    
1305                 if (!(flags & WQ_FLAG_WOKEN))    
1306                         if (signal_pending_st    
1307                                 break;           
1308                                                  
1309                         io_schedule();           
1310                         continue;                
1311                 }                                
1312                                                  
1313                 /* If we were non-exclusive,     
1314                 if (behavior != EXCLUSIVE)       
1315                         break;                   
1316                                                  
1317                 /* If the waker got the lock     
1318                 if (flags & WQ_FLAG_DONE)        
1319                         break;                   
1320                                                  
1321                 /*                               
1322                  * Otherwise, if we're gettin    
1323                  * try to get it ourselves.      
1324                  *                               
1325                  * And if that fails, we'll h    
1326                  */                              
1327                 if (unlikely(test_and_set_bit    
1328                         goto repeat;             
1329                                                  
1330                 wait->flags |= WQ_FLAG_DONE;     
1331                 break;                           
1332         }                                        
1333                                                  
1334         /*                                       
1335          * If a signal happened, this 'finish    
1336          * waiter from the wait-queues, but t    
1337          * set. That's ok. The next wakeup wi    
1338          * to do it here would be difficult a    
1339          */                                      
1340         finish_wait(q, wait);                    
1341                                                  
1342         if (thrashing) {                         
1343                 delayacct_thrashing_end(&in_t    
1344                 psi_memstall_leave(&pflags);     
1345         }                                        
1346                                                  
1347         /*                                       
1348          * NOTE! The wait->flags weren't stab    
1349          * 'finish_wait()', and we could have    
1350          * to a signal, and had a wakeup even    
1351          * test but before the 'finish_wait()    
1352          *                                       
1353          * So only after the finish_wait() ca    
1354          * if we got woken up or not, so we c    
1355          * return value based on that state w    
1356          *                                       
1357          * Also note that WQ_FLAG_WOKEN is su    
1358          * waiter, but an exclusive one requi    
1359          */                                      
1360         if (behavior == EXCLUSIVE)               
1361                 return wait->flags & WQ_FLAG_    
1362                                                  
1363         return wait->flags & WQ_FLAG_WOKEN ?     
1364 }                                                
1365                                                  
1366 #ifdef CONFIG_MIGRATION                          
1367 /**                                              
1368  * migration_entry_wait_on_locked - Wait for     
1369  * @entry: migration swap entry.                 
1370  * @ptl: already locked ptl. This function wi    
1371  *                                               
1372  * Wait for a migration entry referencing the    
1373  * equivalent to put_and_wait_on_page_locked(    
1374  * this can be called without taking a refere    
1375  * should be called while holding the ptl for    
1376  * the page.                                     
1377  *                                               
1378  * Returns after unlocking the ptl.              
1379  *                                               
1380  * This follows the same logic as folio_wait_    
1381  * there.                                        
1382  */                                              
1383 void migration_entry_wait_on_locked(swp_entry    
1384         __releases(ptl)                          
1385 {                                                
1386         struct wait_page_queue wait_page;        
1387         wait_queue_entry_t *wait = &wait_page    
1388         bool thrashing = false;                  
1389         unsigned long pflags;                    
1390         bool in_thrashing;                       
1391         wait_queue_head_t *q;                    
1392         struct folio *folio = pfn_swap_entry_    
1393                                                  
1394         q = folio_waitqueue(folio);              
1395         if (!folio_test_uptodate(folio) && fo    
1396                 delayacct_thrashing_start(&in    
1397                 psi_memstall_enter(&pflags);     
1398                 thrashing = true;                
1399         }                                        
1400                                                  
1401         init_wait(wait);                         
1402         wait->func = wake_page_function;         
1403         wait_page.folio = folio;                 
1404         wait_page.bit_nr = PG_locked;            
1405         wait->flags = 0;                         
1406                                                  
1407         spin_lock_irq(&q->lock);                 
1408         folio_set_waiters(folio);                
1409         if (!folio_trylock_flag(folio, PG_loc    
1410                 __add_wait_queue_entry_tail(q    
1411         spin_unlock_irq(&q->lock);               
1412                                                  
1413         /*                                       
1414          * If a migration entry exists for th    
1415          * a valid reference to the page, and    
1416          * migration entry. So the page is va    
1417          */                                      
1418         spin_unlock(ptl);                        
1419                                                  
1420         for (;;) {                               
1421                 unsigned int flags;              
1422                                                  
1423                 set_current_state(TASK_UNINTE    
1424                                                  
1425                 /* Loop until we've been woke    
1426                 flags = smp_load_acquire(&wai    
1427                 if (!(flags & WQ_FLAG_WOKEN))    
1428                         if (signal_pending_st    
1429                                 break;           
1430                                                  
1431                         io_schedule();           
1432                         continue;                
1433                 }                                
1434                 break;                           
1435         }                                        
1436                                                  
1437         finish_wait(q, wait);                    
1438                                                  
1439         if (thrashing) {                         
1440                 delayacct_thrashing_end(&in_t    
1441                 psi_memstall_leave(&pflags);     
1442         }                                        
1443 }                                                
1444 #endif                                           
1445                                                  
1446 void folio_wait_bit(struct folio *folio, int     
1447 {                                                
1448         folio_wait_bit_common(folio, bit_nr,     
1449 }                                                
1450 EXPORT_SYMBOL(folio_wait_bit);                   
1451                                                  
1452 int folio_wait_bit_killable(struct folio *fol    
1453 {                                                
1454         return folio_wait_bit_common(folio, b    
1455 }                                                
1456 EXPORT_SYMBOL(folio_wait_bit_killable);          
1457                                                  
1458 /**                                              
1459  * folio_put_wait_locked - Drop a reference a    
1460  * @folio: The folio to wait for.                
1461  * @state: The sleep state (TASK_KILLABLE, TA    
1462  *                                               
1463  * The caller should hold a reference on @fol    
1464  * become unlocked relatively soon, but do no    
1465  * (for example) by holding the reference whi    
1466  * come unlocked.  After this function return    
1467  * dereference @folio.                           
1468  *                                               
1469  * Return: 0 if the folio was unlocked or -EI    
1470  */                                              
1471 static int folio_put_wait_locked(struct folio    
1472 {                                                
1473         return folio_wait_bit_common(folio, P    
1474 }                                                
1475                                                  
1476 /**                                              
1477  * folio_add_wait_queue - Add an arbitrary wa    
1478  * @folio: Folio defining the wait queue of i    
1479  * @waiter: Waiter to add to the queue           
1480  *                                               
1481  * Add an arbitrary @waiter to the wait queue    
1482  */                                              
1483 void folio_add_wait_queue(struct folio *folio    
1484 {                                                
1485         wait_queue_head_t *q = folio_waitqueu    
1486         unsigned long flags;                     
1487                                                  
1488         spin_lock_irqsave(&q->lock, flags);      
1489         __add_wait_queue_entry_tail(q, waiter    
1490         folio_set_waiters(folio);                
1491         spin_unlock_irqrestore(&q->lock, flag    
1492 }                                                
1493 EXPORT_SYMBOL_GPL(folio_add_wait_queue);         
1494                                                  
1495 /**                                              
1496  * folio_unlock - Unlock a locked folio.         
1497  * @folio: The folio.                            
1498  *                                               
1499  * Unlocks the folio and wakes up any thread     
1500  *                                               
1501  * Context: May be called from interrupt or p    
1502  * called from NMI context.                      
1503  */                                              
1504 void folio_unlock(struct folio *folio)           
1505 {                                                
1506         /* Bit 7 allows x86 to check the byte    
1507         BUILD_BUG_ON(PG_waiters != 7);           
1508         BUILD_BUG_ON(PG_locked > 7);             
1509         VM_BUG_ON_FOLIO(!folio_test_locked(fo    
1510         if (folio_xor_flags_has_waiters(folio    
1511                 folio_wake_bit(folio, PG_lock    
1512 }                                                
1513 EXPORT_SYMBOL(folio_unlock);                     
1514                                                  
1515 /**                                              
1516  * folio_end_read - End read on a folio.         
1517  * @folio: The folio.                            
1518  * @success: True if all reads completed succ    
1519  *                                               
1520  * When all reads against a folio have comple    
1521  * call this function to let the pagecache kn    
1522  * are outstanding.  This will unlock the fol    
1523  * sleeping on the lock.  The folio will also    
1524  * reads succeeded.                              
1525  *                                               
1526  * Context: May be called from interrupt or p    
1527  * called from NMI context.                      
1528  */                                              
1529 void folio_end_read(struct folio *folio, bool    
1530 {                                                
1531         unsigned long mask = 1 << PG_locked;     
1532                                                  
1533         /* Must be in bottom byte for x86 to     
1534         BUILD_BUG_ON(PG_uptodate > 7);           
1535         VM_BUG_ON_FOLIO(!folio_test_locked(fo    
1536         VM_BUG_ON_FOLIO(folio_test_uptodate(f    
1537                                                  
1538         if (likely(success))                     
1539                 mask |= 1 << PG_uptodate;        
1540         if (folio_xor_flags_has_waiters(folio    
1541                 folio_wake_bit(folio, PG_lock    
1542 }                                                
1543 EXPORT_SYMBOL(folio_end_read);                   
1544                                                  
1545 /**                                              
1546  * folio_end_private_2 - Clear PG_private_2 a    
1547  * @folio: The folio.                            
1548  *                                               
1549  * Clear the PG_private_2 bit on a folio and     
1550  * it.  The folio reference held for PG_priva    
1551  *                                               
1552  * This is, for example, used when a netfs fo    
1553  * disk cache, thereby allowing writes to the    
1554  * serialised.                                   
1555  */                                              
1556 void folio_end_private_2(struct folio *folio)    
1557 {                                                
1558         VM_BUG_ON_FOLIO(!folio_test_private_2    
1559         clear_bit_unlock(PG_private_2, folio_    
1560         folio_wake_bit(folio, PG_private_2);     
1561         folio_put(folio);                        
1562 }                                                
1563 EXPORT_SYMBOL(folio_end_private_2);              
1564                                                  
1565 /**                                              
1566  * folio_wait_private_2 - Wait for PG_private    
1567  * @folio: The folio to wait on.                 
1568  *                                               
1569  * Wait for PG_private_2 to be cleared on a f    
1570  */                                              
1571 void folio_wait_private_2(struct folio *folio    
1572 {                                                
1573         while (folio_test_private_2(folio))      
1574                 folio_wait_bit(folio, PG_priv    
1575 }                                                
1576 EXPORT_SYMBOL(folio_wait_private_2);             
1577                                                  
1578 /**                                              
1579  * folio_wait_private_2_killable - Wait for P    
1580  * @folio: The folio to wait on.                 
1581  *                                               
1582  * Wait for PG_private_2 to be cleared on a f    
1583  * received by the calling task.                 
1584  *                                               
1585  * Return:                                       
1586  * - 0 if successful.                            
1587  * - -EINTR if a fatal signal was encountered    
1588  */                                              
1589 int folio_wait_private_2_killable(struct foli    
1590 {                                                
1591         int ret = 0;                             
1592                                                  
1593         while (folio_test_private_2(folio)) {    
1594                 ret = folio_wait_bit_killable    
1595                 if (ret < 0)                     
1596                         break;                   
1597         }                                        
1598                                                  
1599         return ret;                              
1600 }                                                
1601 EXPORT_SYMBOL(folio_wait_private_2_killable);    
1602                                                  
1603 /**                                              
1604  * folio_end_writeback - End writeback agains    
1605  * @folio: The folio.                            
1606  *                                               
1607  * The folio must actually be under writeback    
1608  *                                               
1609  * Context: May be called from process or int    
1610  */                                              
1611 void folio_end_writeback(struct folio *folio)    
1612 {                                                
1613         VM_BUG_ON_FOLIO(!folio_test_writeback    
1614                                                  
1615         /*                                       
1616          * folio_test_clear_reclaim() could b    
1617          * atomic operation and overkill in t    
1618          * to shuffle a folio marked for imme    
1619          * a gain to justify taking an atomic    
1620          * end of every folio writeback.         
1621          */                                      
1622         if (folio_test_reclaim(folio)) {         
1623                 folio_clear_reclaim(folio);      
1624                 folio_rotate_reclaimable(foli    
1625         }                                        
1626                                                  
1627         /*                                       
1628          * Writeback does not hold a folio re    
1629          * on truncation to wait for the clea    
1630          * But here we must make sure that th    
1631          * reused before the folio_wake_bit()    
1632          */                                      
1633         folio_get(folio);                        
1634         if (__folio_end_writeback(folio))        
1635                 folio_wake_bit(folio, PG_writ    
1636         acct_reclaim_writeback(folio);           
1637         folio_put(folio);                        
1638 }                                                
1639 EXPORT_SYMBOL(folio_end_writeback);              
1640                                                  
1641 /**                                              
1642  * __folio_lock - Get a lock on the folio, as    
1643  * @folio: The folio to lock                     
1644  */                                              
1645 void __folio_lock(struct folio *folio)           
1646 {                                                
1647         folio_wait_bit_common(folio, PG_locke    
1648                                 EXCLUSIVE);      
1649 }                                                
1650 EXPORT_SYMBOL(__folio_lock);                     
1651                                                  
1652 int __folio_lock_killable(struct folio *folio    
1653 {                                                
1654         return folio_wait_bit_common(folio, P    
1655                                         EXCLU    
1656 }                                                
1657 EXPORT_SYMBOL_GPL(__folio_lock_killable);        
1658                                                  
1659 static int __folio_lock_async(struct folio *f    
1660 {                                                
1661         struct wait_queue_head *q = folio_wai    
1662         int ret;                                 
1663                                                  
1664         wait->folio = folio;                     
1665         wait->bit_nr = PG_locked;                
1666                                                  
1667         spin_lock_irq(&q->lock);                 
1668         __add_wait_queue_entry_tail(q, &wait-    
1669         folio_set_waiters(folio);                
1670         ret = !folio_trylock(folio);             
1671         /*                                       
1672          * If we were successful now, we know    
1673          * waitqueue as we're still under the    
1674          * safe to remove and return success,    
1675          * isn't going to trigger.               
1676          */                                      
1677         if (!ret)                                
1678                 __remove_wait_queue(q, &wait-    
1679         else                                     
1680                 ret = -EIOCBQUEUED;              
1681         spin_unlock_irq(&q->lock);               
1682         return ret;                              
1683 }                                                
1684                                                  
1685 /*                                               
1686  * Return values:                                
1687  * 0 - folio is locked.                          
1688  * non-zero - folio is not locked.               
1689  *     mmap_lock or per-VMA lock has been rel    
1690  *     vma_end_read()), unless flags had both    
1691  *     FAULT_FLAG_RETRY_NOWAIT set, in which     
1692  *                                               
1693  * If neither ALLOW_RETRY nor KILLABLE are se    
1694  * with the folio locked and the mmap_lock/pe    
1695  */                                              
1696 vm_fault_t __folio_lock_or_retry(struct folio    
1697 {                                                
1698         unsigned int flags = vmf->flags;         
1699                                                  
1700         if (fault_flag_allow_retry_first(flag    
1701                 /*                               
1702                  * CAUTION! In this case, mma    
1703                  * released even though retur    
1704                  */                              
1705                 if (flags & FAULT_FLAG_RETRY_    
1706                         return VM_FAULT_RETRY    
1707                                                  
1708                 release_fault_lock(vmf);         
1709                 if (flags & FAULT_FLAG_KILLAB    
1710                         folio_wait_locked_kil    
1711                 else                             
1712                         folio_wait_locked(fol    
1713                 return VM_FAULT_RETRY;           
1714         }                                        
1715         if (flags & FAULT_FLAG_KILLABLE) {       
1716                 bool ret;                        
1717                                                  
1718                 ret = __folio_lock_killable(f    
1719                 if (ret) {                       
1720                         release_fault_lock(vm    
1721                         return VM_FAULT_RETRY    
1722                 }                                
1723         } else {                                 
1724                 __folio_lock(folio);             
1725         }                                        
1726                                                  
1727         return 0;                                
1728 }                                                
1729                                                  
1730 /**                                              
1731  * page_cache_next_miss() - Find the next gap    
1732  * @mapping: Mapping.                            
1733  * @index: Index.                                
1734  * @max_scan: Maximum range to search.           
1735  *                                               
1736  * Search the range [index, min(index + max_s    
1737  * gap with the lowest index.                    
1738  *                                               
1739  * This function may be called under the rcu_    
1740  * not atomically search a snapshot of the ca    
1741  * For example, if a gap is created at index     
1742  * created at index 10, page_cache_next_miss     
1743  * return 10 if called under the rcu_read_loc    
1744  *                                               
1745  * Return: The index of the gap if found, oth    
1746  * range specified (in which case 'return - i    
1747  * In the rare case of index wrap-around, 0 w    
1748  */                                              
1749 pgoff_t page_cache_next_miss(struct address_s    
1750                              pgoff_t index, u    
1751 {                                                
1752         XA_STATE(xas, &mapping->i_pages, inde    
1753                                                  
1754         while (max_scan--) {                     
1755                 void *entry = xas_next(&xas);    
1756                 if (!entry || xa_is_value(ent    
1757                         return xas.xa_index;     
1758                 if (xas.xa_index == 0)           
1759                         return 0;                
1760         }                                        
1761                                                  
1762         return index + max_scan;                 
1763 }                                                
1764 EXPORT_SYMBOL(page_cache_next_miss);             
1765                                                  
1766 /**                                              
1767  * page_cache_prev_miss() - Find the previous    
1768  * @mapping: Mapping.                            
1769  * @index: Index.                                
1770  * @max_scan: Maximum range to search.           
1771  *                                               
1772  * Search the range [max(index - max_scan + 1    
1773  * gap with the highest index.                   
1774  *                                               
1775  * This function may be called under the rcu_    
1776  * not atomically search a snapshot of the ca    
1777  * For example, if a gap is created at index     
1778  * created at index 5, page_cache_prev_miss()    
1779  * return 5 if called under the rcu_read_lock    
1780  *                                               
1781  * Return: The index of the gap if found, oth    
1782  * range specified (in which case 'index - re    
1783  * In the rare case of wrap-around, ULONG_MAX    
1784  */                                              
1785 pgoff_t page_cache_prev_miss(struct address_s    
1786                              pgoff_t index, u    
1787 {                                                
1788         XA_STATE(xas, &mapping->i_pages, inde    
1789                                                  
1790         while (max_scan--) {                     
1791                 void *entry = xas_prev(&xas);    
1792                 if (!entry || xa_is_value(ent    
1793                         break;                   
1794                 if (xas.xa_index == ULONG_MAX    
1795                         break;                   
1796         }                                        
1797                                                  
1798         return xas.xa_index;                     
1799 }                                                
1800 EXPORT_SYMBOL(page_cache_prev_miss);             
1801                                                  
1802 /*                                               
1803  * Lockless page cache protocol:                 
1804  * On the lookup side:                           
1805  * 1. Load the folio from i_pages                
1806  * 2. Increment the refcount if it's not zero    
1807  * 3. If the folio is not found by xas_reload    
1808  *                                               
1809  * On the removal side:                          
1810  * A. Freeze the page (by zeroing the refcoun    
1811  * B. Remove the page from i_pages               
1812  * C. Return the page to the page allocator      
1813  *                                               
1814  * This means that any page may have its refe    
1815  * increased by a speculative page cache (or     
1816  * be allocated by another user before the RC    
1817  * Because the refcount temporarily acquired     
1818  * last refcount on the page, any page alloca    
1819  * folio_put().                                  
1820  */                                              
1821                                                  
1822 /*                                               
1823  * filemap_get_entry - Get a page cache entry    
1824  * @mapping: the address_space to search         
1825  * @index: The page cache index.                 
1826  *                                               
1827  * Looks up the page cache entry at @mapping     
1828  * it is returned with an increased refcount.    
1829  * of a previously evicted folio, or a swap e    
1830  * it is returned without further action.        
1831  *                                               
1832  * Return: The folio, swap or shadow entry, %    
1833  */                                              
1834 void *filemap_get_entry(struct address_space     
1835 {                                                
1836         XA_STATE(xas, &mapping->i_pages, inde    
1837         struct folio *folio;                     
1838                                                  
1839         rcu_read_lock();                         
1840 repeat:                                          
1841         xas_reset(&xas);                         
1842         folio = xas_load(&xas);                  
1843         if (xas_retry(&xas, folio))              
1844                 goto repeat;                     
1845         /*                                       
1846          * A shadow entry of a recently evict    
1847          * shmem/tmpfs.  Return it without at    
1848          */                                      
1849         if (!folio || xa_is_value(folio))        
1850                 goto out;                        
1851                                                  
1852         if (!folio_try_get(folio))               
1853                 goto repeat;                     
1854                                                  
1855         if (unlikely(folio != xas_reload(&xas    
1856                 folio_put(folio);                
1857                 goto repeat;                     
1858         }                                        
1859 out:                                             
1860         rcu_read_unlock();                       
1861                                                  
1862         return folio;                            
1863 }                                                
1864                                                  
1865 /**                                              
1866  * __filemap_get_folio - Find and get a refer    
1867  * @mapping: The address_space to search.        
1868  * @index: The page index.                       
1869  * @fgp_flags: %FGP flags modify how the foli    
1870  * @gfp: Memory allocation flags to use if %F    
1871  *                                               
1872  * Looks up the page cache entry at @mapping     
1873  *                                               
1874  * If %FGP_LOCK or %FGP_CREAT are specified t    
1875  * if the %GFP flags specified for %FGP_CREAT    
1876  *                                               
1877  * If this function returns a folio, it is re    
1878  *                                               
1879  * Return: The found folio or an ERR_PTR() ot    
1880  */                                              
1881 struct folio *__filemap_get_folio(struct addr    
1882                 fgf_t fgp_flags, gfp_t gfp)      
1883 {                                                
1884         struct folio *folio;                     
1885                                                  
1886 repeat:                                          
1887         folio = filemap_get_entry(mapping, in    
1888         if (xa_is_value(folio))                  
1889                 folio = NULL;                    
1890         if (!folio)                              
1891                 goto no_page;                    
1892                                                  
1893         if (fgp_flags & FGP_LOCK) {              
1894                 if (fgp_flags & FGP_NOWAIT) {    
1895                         if (!folio_trylock(fo    
1896                                 folio_put(fol    
1897                                 return ERR_PT    
1898                         }                        
1899                 } else {                         
1900                         folio_lock(folio);       
1901                 }                                
1902                                                  
1903                 /* Has the page been truncate    
1904                 if (unlikely(folio->mapping !    
1905                         folio_unlock(folio);     
1906                         folio_put(folio);        
1907                         goto repeat;             
1908                 }                                
1909                 VM_BUG_ON_FOLIO(!folio_contai    
1910         }                                        
1911                                                  
1912         if (fgp_flags & FGP_ACCESSED)            
1913                 folio_mark_accessed(folio);      
1914         else if (fgp_flags & FGP_WRITE) {        
1915                 /* Clear idle flag for buffer    
1916                 if (folio_test_idle(folio))      
1917                         folio_clear_idle(foli    
1918         }                                        
1919                                                  
1920         if (fgp_flags & FGP_STABLE)              
1921                 folio_wait_stable(folio);        
1922 no_page:                                         
1923         if (!folio && (fgp_flags & FGP_CREAT)    
1924                 unsigned int min_order = mapp    
1925                 unsigned int order = max(min_    
1926                 int err;                         
1927                 index = mapping_align_index(m    
1928                                                  
1929                 if ((fgp_flags & FGP_WRITE) &    
1930                         gfp |= __GFP_WRITE;      
1931                 if (fgp_flags & FGP_NOFS)        
1932                         gfp &= ~__GFP_FS;        
1933                 if (fgp_flags & FGP_NOWAIT) {    
1934                         gfp &= ~GFP_KERNEL;      
1935                         gfp |= GFP_NOWAIT | _    
1936                 }                                
1937                 if (WARN_ON_ONCE(!(fgp_flags     
1938                         fgp_flags |= FGP_LOCK    
1939                                                  
1940                 if (order > mapping_max_folio    
1941                         order = mapping_max_f    
1942                 /* If we're not aligned, allo    
1943                 if (index & ((1UL << order) -    
1944                         order = __ffs(index);    
1945                                                  
1946                 do {                             
1947                         gfp_t alloc_gfp = gfp    
1948                                                  
1949                         err = -ENOMEM;           
1950                         if (order > min_order    
1951                                 alloc_gfp |=     
1952                         folio = filemap_alloc    
1953                         if (!folio)              
1954                                 continue;        
1955                                                  
1956                         /* Init accessed so a    
1957                         if (fgp_flags & FGP_A    
1958                                 __folio_set_r    
1959                                                  
1960                         err = filemap_add_fol    
1961                         if (!err)                
1962                                 break;           
1963                         folio_put(folio);        
1964                         folio = NULL;            
1965                 } while (order-- > min_order)    
1966                                                  
1967                 if (err == -EEXIST)              
1968                         goto repeat;             
1969                 if (err)                         
1970                         return ERR_PTR(err);     
1971                 /*                               
1972                  * filemap_add_folio locks th    
1973                  * we expect an unlocked page    
1974                  */                              
1975                 if (folio && (fgp_flags & FGP    
1976                         folio_unlock(folio);     
1977         }                                        
1978                                                  
1979         if (!folio)                              
1980                 return ERR_PTR(-ENOENT);         
1981         return folio;                            
1982 }                                                
1983 EXPORT_SYMBOL(__filemap_get_folio);              
1984                                                  
1985 static inline struct folio *find_get_entry(st    
1986                 xa_mark_t mark)                  
1987 {                                                
1988         struct folio *folio;                     
1989                                                  
1990 retry:                                           
1991         if (mark == XA_PRESENT)                  
1992                 folio = xas_find(xas, max);      
1993         else                                     
1994                 folio = xas_find_marked(xas,     
1995                                                  
1996         if (xas_retry(xas, folio))               
1997                 goto retry;                      
1998         /*                                       
1999          * A shadow entry of a recently evict    
2000          * entry from shmem/tmpfs or a DAX en    
2001          * without attempting to raise page c    
2002          */                                      
2003         if (!folio || xa_is_value(folio))        
2004                 return folio;                    
2005                                                  
2006         if (!folio_try_get(folio))               
2007                 goto reset;                      
2008                                                  
2009         if (unlikely(folio != xas_reload(xas)    
2010                 folio_put(folio);                
2011                 goto reset;                      
2012         }                                        
2013                                                  
2014         return folio;                            
2015 reset:                                           
2016         xas_reset(xas);                          
2017         goto retry;                              
2018 }                                                
2019                                                  
2020 /**                                              
2021  * find_get_entries - gang pagecache lookup      
2022  * @mapping:    The address_space to search      
2023  * @start:      The starting page cache index    
2024  * @end:        The final page index (inclusi    
2025  * @fbatch:     Where the resulting entries a    
2026  * @indices:    The cache indices correspondi    
2027  *                                               
2028  * find_get_entries() will search for and ret    
2029  * the mapping.  The entries are placed in @f    
2030  * takes a reference on any actual folios it     
2031  *                                               
2032  * The entries have ascending indexes.  The i    
2033  * due to not-present entries or large folios    
2034  *                                               
2035  * Any shadow entries of evicted folios, or s    
2036  * shmem/tmpfs, are included in the returned     
2037  *                                               
2038  * Return: The number of entries which were f    
2039  */                                              
2040 unsigned find_get_entries(struct address_spac    
2041                 pgoff_t end, struct folio_bat    
2042 {                                                
2043         XA_STATE(xas, &mapping->i_pages, *sta    
2044         struct folio *folio;                     
2045                                                  
2046         rcu_read_lock();                         
2047         while ((folio = find_get_entry(&xas,     
2048                 indices[fbatch->nr] = xas.xa_    
2049                 if (!folio_batch_add(fbatch,     
2050                         break;                   
2051         }                                        
2052                                                  
2053         if (folio_batch_count(fbatch)) {         
2054                 unsigned long nr;                
2055                 int idx = folio_batch_count(f    
2056                                                  
2057                 folio = fbatch->folios[idx];     
2058                 if (!xa_is_value(folio))         
2059                         nr = folio_nr_pages(f    
2060                 else                             
2061                         nr = 1 << xa_get_orde    
2062                 *start = round_down(indices[i    
2063         }                                        
2064         rcu_read_unlock();                       
2065                                                  
2066         return folio_batch_count(fbatch);        
2067 }                                                
2068                                                  
2069 /**                                              
2070  * find_lock_entries - Find a batch of pageca    
2071  * @mapping:    The address_space to search.     
2072  * @start:      The starting page cache index    
2073  * @end:        The final page index (inclusi    
2074  * @fbatch:     Where the resulting entries a    
2075  * @indices:    The cache indices of the entr    
2076  *                                               
2077  * find_lock_entries() will return a batch of    
2078  * Swap, shadow and DAX entries are included.    
2079  * locked and with an incremented refcount.      
2080  * by somebody else or under writeback are sk    
2081  * partially outside the range are not return    
2082  *                                               
2083  * The entries have ascending indexes.  The i    
2084  * due to not-present entries, large folios,     
2085  * locked or folios under writeback.             
2086  *                                               
2087  * Return: The number of entries which were f    
2088  */                                              
2089 unsigned find_lock_entries(struct address_spa    
2090                 pgoff_t end, struct folio_bat    
2091 {                                                
2092         XA_STATE(xas, &mapping->i_pages, *sta    
2093         struct folio *folio;                     
2094                                                  
2095         rcu_read_lock();                         
2096         while ((folio = find_get_entry(&xas,     
2097                 unsigned long base;              
2098                 unsigned long nr;                
2099                                                  
2100                 if (!xa_is_value(folio)) {       
2101                         nr = folio_nr_pages(f    
2102                         base = folio->index;     
2103                         /* Omit large folio w    
2104                         if (base < *start)       
2105                                 goto put;        
2106                         /* Omit large folio w    
2107                         if (base + nr - 1 > e    
2108                                 goto put;        
2109                         if (!folio_trylock(fo    
2110                                 goto put;        
2111                         if (folio->mapping !=    
2112                             folio_test_writeb    
2113                                 goto unlock;     
2114                         VM_BUG_ON_FOLIO(!foli    
2115                                         folio    
2116                 } else {                         
2117                         nr = 1 << xas_get_ord    
2118                         base = xas.xa_index &    
2119                         /* Omit order>0 value    
2120                         if (base < *start)       
2121                                 continue;        
2122                         /* Omit order>0 value    
2123                         if (base + nr - 1 > e    
2124                                 break;           
2125                 }                                
2126                                                  
2127                 /* Update start now so that l    
2128                 *start = base + nr;              
2129                 indices[fbatch->nr] = xas.xa_    
2130                 if (!folio_batch_add(fbatch,     
2131                         break;                   
2132                 continue;                        
2133 unlock:                                          
2134                 folio_unlock(folio);             
2135 put:                                             
2136                 folio_put(folio);                
2137         }                                        
2138         rcu_read_unlock();                       
2139                                                  
2140         return folio_batch_count(fbatch);        
2141 }                                                
2142                                                  
2143 /**                                              
2144  * filemap_get_folios - Get a batch of folios    
2145  * @mapping:    The address_space to search      
2146  * @start:      The starting page index          
2147  * @end:        The final page index (inclusi    
2148  * @fbatch:     The batch to fill.               
2149  *                                               
2150  * Search for and return a batch of folios in    
2151  * index @start and up to index @end (inclusi    
2152  * in @fbatch with an elevated reference coun    
2153  *                                               
2154  * Return: The number of folios which were fo    
2155  * We also update @start to index the next fo    
2156  */                                              
2157 unsigned filemap_get_folios(struct address_sp    
2158                 pgoff_t end, struct folio_bat    
2159 {                                                
2160         return filemap_get_folios_tag(mapping    
2161 }                                                
2162 EXPORT_SYMBOL(filemap_get_folios);               
2163                                                  
2164 /**                                              
2165  * filemap_get_folios_contig - Get a batch of    
2166  * @mapping:    The address_space to search      
2167  * @start:      The starting page index          
2168  * @end:        The final page index (inclusi    
2169  * @fbatch:     The batch to fill                
2170  *                                               
2171  * filemap_get_folios_contig() works exactly     
2172  * except the returned folios are guaranteed     
2173  * not return all contiguous folios if the ba    
2174  *                                               
2175  * Return: The number of folios found.           
2176  * Also update @start to be positioned for tr    
2177  */                                              
2178                                                  
2179 unsigned filemap_get_folios_contig(struct add    
2180                 pgoff_t *start, pgoff_t end,     
2181 {                                                
2182         XA_STATE(xas, &mapping->i_pages, *sta    
2183         unsigned long nr;                        
2184         struct folio *folio;                     
2185                                                  
2186         rcu_read_lock();                         
2187                                                  
2188         for (folio = xas_load(&xas); folio &&    
2189                         folio = xas_next(&xas    
2190                 if (xas_retry(&xas, folio))      
2191                         continue;                
2192                 /*                               
2193                  * If the entry has been swap    
2194                  * No current caller is looki    
2195                  */                              
2196                 if (xa_is_value(folio))          
2197                         goto update_start;       
2198                                                  
2199                 /* If we landed in the middle    
2200                 if (xa_is_sibling(folio))        
2201                         goto update_start;       
2202                                                  
2203                 if (!folio_try_get(folio))       
2204                         goto retry;              
2205                                                  
2206                 if (unlikely(folio != xas_rel    
2207                         goto put_folio;          
2208                                                  
2209                 if (!folio_batch_add(fbatch,     
2210                         nr = folio_nr_pages(f    
2211                         *start = folio->index    
2212                         goto out;                
2213                 }                                
2214                 continue;                        
2215 put_folio:                                       
2216                 folio_put(folio);                
2217                                                  
2218 retry:                                           
2219                 xas_reset(&xas);                 
2220         }                                        
2221                                                  
2222 update_start:                                    
2223         nr = folio_batch_count(fbatch);          
2224                                                  
2225         if (nr) {                                
2226                 folio = fbatch->folios[nr - 1    
2227                 *start = folio_next_index(fol    
2228         }                                        
2229 out:                                             
2230         rcu_read_unlock();                       
2231         return folio_batch_count(fbatch);        
2232 }                                                
2233 EXPORT_SYMBOL(filemap_get_folios_contig);        
2234                                                  
2235 /**                                              
2236  * filemap_get_folios_tag - Get a batch of fo    
2237  * @mapping:    The address_space to search      
2238  * @start:      The starting page index          
2239  * @end:        The final page index (inclusi    
2240  * @tag:        The tag index                    
2241  * @fbatch:     The batch to fill                
2242  *                                               
2243  * The first folio may start before @start; i    
2244  * @start.  The final folio may extend beyond    
2245  * contain @end.  The folios have ascending i    
2246  * between the folios if there are indices wh    
2247  * page cache.  If folios are added to or rem    
2248  * while this is running, they may or may not    
2249  * Only returns folios that are tagged with @    
2250  *                                               
2251  * Return: The number of folios found.           
2252  * Also update @start to index the next folio    
2253  */                                              
2254 unsigned filemap_get_folios_tag(struct addres    
2255                         pgoff_t end, xa_mark_    
2256 {                                                
2257         XA_STATE(xas, &mapping->i_pages, *sta    
2258         struct folio *folio;                     
2259                                                  
2260         rcu_read_lock();                         
2261         while ((folio = find_get_entry(&xas,     
2262                 /*                               
2263                  * Shadow entries should neve    
2264                  * is lockless so there is a     
2265                  * a page we saw tagged. Skip    
2266                  */                              
2267                 if (xa_is_value(folio))          
2268                         continue;                
2269                 if (!folio_batch_add(fbatch,     
2270                         unsigned long nr = fo    
2271                         *start = folio->index    
2272                         goto out;                
2273                 }                                
2274         }                                        
2275         /*                                       
2276          * We come here when there is no page    
2277          * overflow the index @start as it co    
2278          * breaks the iteration when there is    
2279          * already broke anyway.                 
2280          */                                      
2281         if (end == (pgoff_t)-1)                  
2282                 *start = (pgoff_t)-1;            
2283         else                                     
2284                 *start = end + 1;                
2285 out:                                             
2286         rcu_read_unlock();                       
2287                                                  
2288         return folio_batch_count(fbatch);        
2289 }                                                
2290 EXPORT_SYMBOL(filemap_get_folios_tag);           
2291                                                  
2292 /*                                               
2293  * CD/DVDs are error prone. When a medium err    
2294  * a _large_ part of the i/o request. Imagine    
2295  *                                               
2296  *      ---R_________________________________    
2297  *         ^ reading here                        
2298  *                                               
2299  * read(R) => miss => readahead(R...B) => med    
2300  * => failing the whole request => read(R) =>    
2301  * readahead(R+1...B+1) => bang => read(R+2)     
2302  * readahead(R+3...B+2) => bang => read(R+3)     
2303  * readahead(R+4...B+3) => bang => read(R+4)     
2304  *                                               
2305  * It is going insane. Fix it by quickly scal    
2306  */                                              
2307 static void shrink_readahead_size_eio(struct     
2308 {                                                
2309         ra->ra_pages /= 4;                       
2310 }                                                
2311                                                  
2312 /*                                               
2313  * filemap_get_read_batch - Get a batch of fo    
2314  *                                               
2315  * Get a batch of folios which represent a co    
2316  * the file.  No exceptional entries will be     
2317  * the middle of a folio, the entire folio wi    
2318  * folio in the batch may have the readahead     
2319  * clear so that the caller can take the appr    
2320  */                                              
2321 static void filemap_get_read_batch(struct add    
2322                 pgoff_t index, pgoff_t max, s    
2323 {                                                
2324         XA_STATE(xas, &mapping->i_pages, inde    
2325         struct folio *folio;                     
2326                                                  
2327         rcu_read_lock();                         
2328         for (folio = xas_load(&xas); folio; f    
2329                 if (xas_retry(&xas, folio))      
2330                         continue;                
2331                 if (xas.xa_index > max || xa_    
2332                         break;                   
2333                 if (xa_is_sibling(folio))        
2334                         break;                   
2335                 if (!folio_try_get(folio))       
2336                         goto retry;              
2337                                                  
2338                 if (unlikely(folio != xas_rel    
2339                         goto put_folio;          
2340                                                  
2341                 if (!folio_batch_add(fbatch,     
2342                         break;                   
2343                 if (!folio_test_uptodate(foli    
2344                         break;                   
2345                 if (folio_test_readahead(foli    
2346                         break;                   
2347                 xas_advance(&xas, folio_next_    
2348                 continue;                        
2349 put_folio:                                       
2350                 folio_put(folio);                
2351 retry:                                           
2352                 xas_reset(&xas);                 
2353         }                                        
2354         rcu_read_unlock();                       
2355 }                                                
2356                                                  
2357 static int filemap_read_folio(struct file *fi    
2358                 struct folio *folio)             
2359 {                                                
2360         bool workingset = folio_test_workings    
2361         unsigned long pflags;                    
2362         int error;                               
2363                                                  
2364         /* Start the actual read. The read wi    
2365         if (unlikely(workingset))                
2366                 psi_memstall_enter(&pflags);     
2367         error = filler(file, folio);             
2368         if (unlikely(workingset))                
2369                 psi_memstall_leave(&pflags);     
2370         if (error)                               
2371                 return error;                    
2372                                                  
2373         error = folio_wait_locked_killable(fo    
2374         if (error)                               
2375                 return error;                    
2376         if (folio_test_uptodate(folio))          
2377                 return 0;                        
2378         if (file)                                
2379                 shrink_readahead_size_eio(&fi    
2380         return -EIO;                             
2381 }                                                
2382                                                  
2383 static bool filemap_range_uptodate(struct add    
2384                 loff_t pos, size_t count, str    
2385                 bool need_uptodate)              
2386 {                                                
2387         if (folio_test_uptodate(folio))          
2388                 return true;                     
2389         /* pipes can't handle partially uptod    
2390         if (need_uptodate)                       
2391                 return false;                    
2392         if (!mapping->a_ops->is_partially_upt    
2393                 return false;                    
2394         if (mapping->host->i_blkbits >= folio    
2395                 return false;                    
2396                                                  
2397         if (folio_pos(folio) > pos) {            
2398                 count -= folio_pos(folio) - p    
2399                 pos = 0;                         
2400         } else {                                 
2401                 pos -= folio_pos(folio);         
2402         }                                        
2403                                                  
2404         return mapping->a_ops->is_partially_u    
2405 }                                                
2406                                                  
2407 static int filemap_update_page(struct kiocb *    
2408                 struct address_space *mapping    
2409                 struct folio *folio, bool nee    
2410 {                                                
2411         int error;                               
2412                                                  
2413         if (iocb->ki_flags & IOCB_NOWAIT) {      
2414                 if (!filemap_invalidate_trylo    
2415                         return -EAGAIN;          
2416         } else {                                 
2417                 filemap_invalidate_lock_share    
2418         }                                        
2419                                                  
2420         if (!folio_trylock(folio)) {             
2421                 error = -EAGAIN;                 
2422                 if (iocb->ki_flags & (IOCB_NO    
2423                         goto unlock_mapping;     
2424                 if (!(iocb->ki_flags & IOCB_W    
2425                         filemap_invalidate_un    
2426                         /*                       
2427                          * This is where we u    
2428                          * previously submitt    
2429                          */                      
2430                         folio_put_wait_locked    
2431                         return AOP_TRUNCATED_    
2432                 }                                
2433                 error = __folio_lock_async(fo    
2434                 if (error)                       
2435                         goto unlock_mapping;     
2436         }                                        
2437                                                  
2438         error = AOP_TRUNCATED_PAGE;              
2439         if (!folio->mapping)                     
2440                 goto unlock;                     
2441                                                  
2442         error = 0;                               
2443         if (filemap_range_uptodate(mapping, i    
2444                                    need_uptod    
2445                 goto unlock;                     
2446                                                  
2447         error = -EAGAIN;                         
2448         if (iocb->ki_flags & (IOCB_NOIO | IOC    
2449                 goto unlock;                     
2450                                                  
2451         error = filemap_read_folio(iocb->ki_f    
2452                         folio);                  
2453         goto unlock_mapping;                     
2454 unlock:                                          
2455         folio_unlock(folio);                     
2456 unlock_mapping:                                  
2457         filemap_invalidate_unlock_shared(mapp    
2458         if (error == AOP_TRUNCATED_PAGE)         
2459                 folio_put(folio);                
2460         return error;                            
2461 }                                                
2462                                                  
2463 static int filemap_create_folio(struct file *    
2464                 struct address_space *mapping    
2465                 struct folio_batch *fbatch)      
2466 {                                                
2467         struct folio *folio;                     
2468         int error;                               
2469         unsigned int min_order = mapping_min_    
2470         pgoff_t index;                           
2471                                                  
2472         folio = filemap_alloc_folio(mapping_g    
2473         if (!folio)                              
2474                 return -ENOMEM;                  
2475                                                  
2476         /*                                       
2477          * Protect against truncate / hole pu    
2478          * here assures we cannot instantiate    
2479          * pagecache folios after evicting pa    
2480          * and before actually freeing blocks    
2481          * release invalidate_lock after inse    
2482          * the page cache as the locked folio    
2483          * synchronize with hole punching. Bu    
2484          * such as filemap_update_page() fill    
2485          * pages or ->readahead() that need t    
2486          * while mapping blocks for IO so let    
2487          * well to keep locking rules simple.    
2488          */                                      
2489         filemap_invalidate_lock_shared(mappin    
2490         index = (pos >> (PAGE_SHIFT + min_ord    
2491         error = filemap_add_folio(mapping, fo    
2492                         mapping_gfp_constrain    
2493         if (error == -EEXIST)                    
2494                 error = AOP_TRUNCATED_PAGE;      
2495         if (error)                               
2496                 goto error;                      
2497                                                  
2498         error = filemap_read_folio(file, mapp    
2499         if (error)                               
2500                 goto error;                      
2501                                                  
2502         filemap_invalidate_unlock_shared(mapp    
2503         folio_batch_add(fbatch, folio);          
2504         return 0;                                
2505 error:                                           
2506         filemap_invalidate_unlock_shared(mapp    
2507         folio_put(folio);                        
2508         return error;                            
2509 }                                                
2510                                                  
2511 static int filemap_readahead(struct kiocb *io    
2512                 struct address_space *mapping    
2513                 pgoff_t last_index)              
2514 {                                                
2515         DEFINE_READAHEAD(ractl, file, &file->    
2516                                                  
2517         if (iocb->ki_flags & IOCB_NOIO)          
2518                 return -EAGAIN;                  
2519         page_cache_async_ra(&ractl, folio, la    
2520         return 0;                                
2521 }                                                
2522                                                  
2523 static int filemap_get_pages(struct kiocb *io    
2524                 struct folio_batch *fbatch, b    
2525 {                                                
2526         struct file *filp = iocb->ki_filp;       
2527         struct address_space *mapping = filp-    
2528         struct file_ra_state *ra = &filp->f_r    
2529         pgoff_t index = iocb->ki_pos >> PAGE_    
2530         pgoff_t last_index;                      
2531         struct folio *folio;                     
2532         unsigned int flags;                      
2533         int err = 0;                             
2534                                                  
2535         /* "last_index" is the index of the p    
2536         last_index = DIV_ROUND_UP(iocb->ki_po    
2537 retry:                                           
2538         if (fatal_signal_pending(current))       
2539                 return -EINTR;                   
2540                                                  
2541         filemap_get_read_batch(mapping, index    
2542         if (!folio_batch_count(fbatch)) {        
2543                 if (iocb->ki_flags & IOCB_NOI    
2544                         return -EAGAIN;          
2545                 if (iocb->ki_flags & IOCB_NOW    
2546                         flags = memalloc_noio    
2547                 page_cache_sync_readahead(map    
2548                                 last_index -     
2549                 if (iocb->ki_flags & IOCB_NOW    
2550                         memalloc_noio_restore    
2551                 filemap_get_read_batch(mappin    
2552         }                                        
2553         if (!folio_batch_count(fbatch)) {        
2554                 if (iocb->ki_flags & (IOCB_NO    
2555                         return -EAGAIN;          
2556                 err = filemap_create_folio(fi    
2557                 if (err == AOP_TRUNCATED_PAGE    
2558                         goto retry;              
2559                 return err;                      
2560         }                                        
2561                                                  
2562         folio = fbatch->folios[folio_batch_co    
2563         if (folio_test_readahead(folio)) {       
2564                 err = filemap_readahead(iocb,    
2565                 if (err)                         
2566                         goto err;                
2567         }                                        
2568         if (!folio_test_uptodate(folio)) {       
2569                 if ((iocb->ki_flags & IOCB_WA    
2570                     folio_batch_count(fbatch)    
2571                         iocb->ki_flags |= IOC    
2572                 err = filemap_update_page(ioc    
2573                                           nee    
2574                 if (err)                         
2575                         goto err;                
2576         }                                        
2577                                                  
2578         trace_mm_filemap_get_pages(mapping, i    
2579         return 0;                                
2580 err:                                             
2581         if (err < 0)                             
2582                 folio_put(folio);                
2583         if (likely(--fbatch->nr))                
2584                 return 0;                        
2585         if (err == AOP_TRUNCATED_PAGE)           
2586                 goto retry;                      
2587         return err;                              
2588 }                                                
2589                                                  
2590 static inline bool pos_same_folio(loff_t pos1    
2591 {                                                
2592         unsigned int shift = folio_shift(foli    
2593                                                  
2594         return (pos1 >> shift == pos2 >> shif    
2595 }                                                
2596                                                  
2597 /**                                              
2598  * filemap_read - Read data from the page cac    
2599  * @iocb: The iocb to read.                      
2600  * @iter: Destination for the data.              
2601  * @already_read: Number of bytes already rea    
2602  *                                               
2603  * Copies data from the page cache.  If the d    
2604  * uses the readahead and read_folio address_    
2605  *                                               
2606  * Return: Total number of bytes copied, incl    
2607  * the caller.  If an error happens before an    
2608  * a negative error number.                      
2609  */                                              
2610 ssize_t filemap_read(struct kiocb *iocb, stru    
2611                 ssize_t already_read)            
2612 {                                                
2613         struct file *filp = iocb->ki_filp;       
2614         struct file_ra_state *ra = &filp->f_r    
2615         struct address_space *mapping = filp-    
2616         struct inode *inode = mapping->host;     
2617         struct folio_batch fbatch;               
2618         int i, error = 0;                        
2619         bool writably_mapped;                    
2620         loff_t isize, end_offset;                
2621         loff_t last_pos = ra->prev_pos;          
2622                                                  
2623         if (unlikely(iocb->ki_pos >= inode->i    
2624                 return 0;                        
2625         if (unlikely(!iov_iter_count(iter)))     
2626                 return 0;                        
2627                                                  
2628         iov_iter_truncate(iter, inode->i_sb->    
2629         folio_batch_init(&fbatch);               
2630                                                  
2631         do {                                     
2632                 cond_resched();                  
2633                                                  
2634                 /*                               
2635                  * If we've already successfu    
2636                  * can no longer safely retur    
2637                  * an async read NOWAIT at th    
2638                  */                              
2639                 if ((iocb->ki_flags & IOCB_WA    
2640                         iocb->ki_flags |= IOC    
2641                                                  
2642                 if (unlikely(iocb->ki_pos >=     
2643                         break;                   
2644                                                  
2645                 error = filemap_get_pages(ioc    
2646                 if (error < 0)                   
2647                         break;                   
2648                                                  
2649                 /*                               
2650                  * i_size must be checked aft    
2651                  *                               
2652                  * Checking i_size after the     
2653                  * the correct value for "nr"    
2654                  * part of the page is not co    
2655                  * another truncate extends t    
2656                  */                              
2657                 isize = i_size_read(inode);      
2658                 if (unlikely(iocb->ki_pos >=     
2659                         goto put_folios;         
2660                 end_offset = min_t(loff_t, is    
2661                                                  
2662                 /*                               
2663                  * Once we start copying data    
2664                  * cachelines that might be c    
2665                  */                              
2666                 writably_mapped = mapping_wri    
2667                                                  
2668                 /*                               
2669                  * When a read accesses the s    
2670                  * mark it as accessed the fi    
2671                  */                              
2672                 if (!pos_same_folio(iocb->ki_    
2673                                     fbatch.fo    
2674                         folio_mark_accessed(f    
2675                                                  
2676                 for (i = 0; i < folio_batch_c    
2677                         struct folio *folio =    
2678                         size_t fsize = folio_    
2679                         size_t offset = iocb-    
2680                         size_t bytes = min_t(    
2681                                                  
2682                         size_t copied;           
2683                                                  
2684                         if (end_offset < foli    
2685                                 break;           
2686                         if (i > 0)               
2687                                 folio_mark_ac    
2688                         /*                       
2689                          * If users can be wr    
2690                          * virtual addresses,    
2691                          * before reading the    
2692                          */                      
2693                         if (writably_mapped)     
2694                                 flush_dcache_    
2695                                                  
2696                         copied = copy_folio_t    
2697                                                  
2698                         already_read += copie    
2699                         iocb->ki_pos += copie    
2700                         last_pos = iocb->ki_p    
2701                                                  
2702                         if (copied < bytes) {    
2703                                 error = -EFAU    
2704                                 break;           
2705                         }                        
2706                 }                                
2707 put_folios:                                      
2708                 for (i = 0; i < folio_batch_c    
2709                         folio_put(fbatch.foli    
2710                 folio_batch_init(&fbatch);       
2711         } while (iov_iter_count(iter) && iocb    
2712                                                  
2713         file_accessed(filp);                     
2714         ra->prev_pos = last_pos;                 
2715         return already_read ? already_read :     
2716 }                                                
2717 EXPORT_SYMBOL_GPL(filemap_read);                 
2718                                                  
2719 int kiocb_write_and_wait(struct kiocb *iocb,     
2720 {                                                
2721         struct address_space *mapping = iocb-    
2722         loff_t pos = iocb->ki_pos;               
2723         loff_t end = pos + count - 1;            
2724                                                  
2725         if (iocb->ki_flags & IOCB_NOWAIT) {      
2726                 if (filemap_range_needs_write    
2727                         return -EAGAIN;          
2728                 return 0;                        
2729         }                                        
2730                                                  
2731         return filemap_write_and_wait_range(m    
2732 }                                                
2733 EXPORT_SYMBOL_GPL(kiocb_write_and_wait);         
2734                                                  
2735 int filemap_invalidate_pages(struct address_s    
2736                              loff_t pos, loff    
2737 {                                                
2738         int ret;                                 
2739                                                  
2740         if (nowait) {                            
2741                 /* we could block if there ar    
2742                 if (filemap_range_has_page(ma    
2743                         return -EAGAIN;          
2744         } else {                                 
2745                 ret = filemap_write_and_wait_    
2746                 if (ret)                         
2747                         return ret;              
2748         }                                        
2749                                                  
2750         /*                                       
2751          * After a write we want buffered rea    
2752          * the new data.  We invalidate clean    
2753          * about to write.  We do this *befor    
2754          * without clobbering -EIOCBQUEUED fr    
2755          */                                      
2756         return invalidate_inode_pages2_range(    
2757                                                  
2758 }                                                
2759                                                  
2760 int kiocb_invalidate_pages(struct kiocb *iocb    
2761 {                                                
2762         struct address_space *mapping = iocb-    
2763                                                  
2764         return filemap_invalidate_pages(mappi    
2765                                         iocb-    
2766                                         iocb-    
2767 }                                                
2768 EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);       
2769                                                  
2770 /**                                              
2771  * generic_file_read_iter - generic filesyste    
2772  * @iocb:       kernel I/O control block         
2773  * @iter:       destination for the data read    
2774  *                                               
2775  * This is the "read_iter()" routine for all     
2776  * that can use the page cache directly.         
2777  *                                               
2778  * The IOCB_NOWAIT flag in iocb->ki_flags ind    
2779  * be returned when no data can be read witho    
2780  * to complete; it doesn't prevent readahead.    
2781  *                                               
2782  * The IOCB_NOIO flag in iocb->ki_flags indic    
2783  * requests shall be made for the read or for    
2784  * can be read, -EAGAIN shall be returned.  W    
2785  * triggered, a partial, possibly empty read     
2786  *                                               
2787  * Return:                                       
2788  * * number of bytes copied, even for partial    
2789  * * negative error code (or 0 if IOCB_NOIO)     
2790  */                                              
2791 ssize_t                                          
2792 generic_file_read_iter(struct kiocb *iocb, st    
2793 {                                                
2794         size_t count = iov_iter_count(iter);     
2795         ssize_t retval = 0;                      
2796                                                  
2797         if (!count)                              
2798                 return 0; /* skip atime */       
2799                                                  
2800         if (iocb->ki_flags & IOCB_DIRECT) {      
2801                 struct file *file = iocb->ki_    
2802                 struct address_space *mapping    
2803                 struct inode *inode = mapping    
2804                                                  
2805                 retval = kiocb_write_and_wait    
2806                 if (retval < 0)                  
2807                         return retval;           
2808                 file_accessed(file);             
2809                                                  
2810                 retval = mapping->a_ops->dire    
2811                 if (retval >= 0) {               
2812                         iocb->ki_pos += retva    
2813                         count -= retval;         
2814                 }                                
2815                 if (retval != -EIOCBQUEUED)      
2816                         iov_iter_revert(iter,    
2817                                                  
2818                 /*                               
2819                  * Btrfs can have a short DIO    
2820                  * compressed extents, so if     
2821                  * we've already read everyth    
2822                  * there was a short read bec    
2823                  * and return.  Otherwise fal    
2824                  * the rest of the read.  Buf    
2825                  * DAX files, so don't bother    
2826                  */                              
2827                 if (retval < 0 || !count || I    
2828                         return retval;           
2829                 if (iocb->ki_pos >= i_size_re    
2830                         return retval;           
2831         }                                        
2832                                                  
2833         return filemap_read(iocb, iter, retva    
2834 }                                                
2835 EXPORT_SYMBOL(generic_file_read_iter);           
2836                                                  
2837 /*                                               
2838  * Splice subpages from a folio into a pipe.     
2839  */                                              
2840 size_t splice_folio_into_pipe(struct pipe_ino    
2841                               struct folio *f    
2842 {                                                
2843         struct page *page;                       
2844         size_t spliced = 0, offset = offset_i    
2845                                                  
2846         page = folio_page(folio, offset / PAG    
2847         size = min(size, folio_size(folio) -     
2848         offset %= PAGE_SIZE;                     
2849                                                  
2850         while (spliced < size &&                 
2851                !pipe_full(pipe->head, pipe->t    
2852                 struct pipe_buffer *buf = pip    
2853                 size_t part = min_t(size_t, P    
2854                                                  
2855                 *buf = (struct pipe_buffer) {    
2856                         .ops    = &page_cache    
2857                         .page   = page,          
2858                         .offset = offset,        
2859                         .len    = part,          
2860                 };                               
2861                 folio_get(folio);                
2862                 pipe->head++;                    
2863                 page++;                          
2864                 spliced += part;                 
2865                 offset = 0;                      
2866         }                                        
2867                                                  
2868         return spliced;                          
2869 }                                                
2870                                                  
2871 /**                                              
2872  * filemap_splice_read -  Splice data from a     
2873  * @in: The file to read from                    
2874  * @ppos: Pointer to the file position to rea    
2875  * @pipe: The pipe to splice into                
2876  * @len: The amount to splice                    
2877  * @flags: The SPLICE_F_* flags                  
2878  *                                               
2879  * This function gets folios from a file's pa    
2880  * pipe.  Readahead will be called as necessa    
2881  * be used for blockdevs also.                   
2882  *                                               
2883  * Return: On success, the number of bytes re    
2884  * will be updated if appropriate; 0 will be     
2885  * to be read; -EAGAIN will be returned if th    
2886  * other negative error code will be returned    
2887  * if the pipe has insufficient space, we rea    
2888  * hole.                                         
2889  */                                              
2890 ssize_t filemap_splice_read(struct file *in,     
2891                             struct pipe_inode    
2892                             size_t len, unsig    
2893 {                                                
2894         struct folio_batch fbatch;               
2895         struct kiocb iocb;                       
2896         size_t total_spliced = 0, used, npage    
2897         loff_t isize, end_offset;                
2898         bool writably_mapped;                    
2899         int i, error = 0;                        
2900                                                  
2901         if (unlikely(*ppos >= in->f_mapping->    
2902                 return 0;                        
2903                                                  
2904         init_sync_kiocb(&iocb, in);              
2905         iocb.ki_pos = *ppos;                     
2906                                                  
2907         /* Work out how much data we can actu    
2908         used = pipe_occupancy(pipe->head, pip    
2909         npages = max_t(ssize_t, pipe->max_usa    
2910         len = min_t(size_t, len, npages * PAG    
2911                                                  
2912         folio_batch_init(&fbatch);               
2913                                                  
2914         do {                                     
2915                 cond_resched();                  
2916                                                  
2917                 if (*ppos >= i_size_read(in->    
2918                         break;                   
2919                                                  
2920                 iocb.ki_pos = *ppos;             
2921                 error = filemap_get_pages(&io    
2922                 if (error < 0)                   
2923                         break;                   
2924                                                  
2925                 /*                               
2926                  * i_size must be checked aft    
2927                  *                               
2928                  * Checking i_size after the     
2929                  * the correct value for "nr"    
2930                  * part of the page is not co    
2931                  * another truncate extends t    
2932                  */                              
2933                 isize = i_size_read(in->f_map    
2934                 if (unlikely(*ppos >= isize))    
2935                         break;                   
2936                 end_offset = min_t(loff_t, is    
2937                                                  
2938                 /*                               
2939                  * Once we start copying data    
2940                  * cachelines that might be c    
2941                  */                              
2942                 writably_mapped = mapping_wri    
2943                                                  
2944                 for (i = 0; i < folio_batch_c    
2945                         struct folio *folio =    
2946                         size_t n;                
2947                                                  
2948                         if (folio_pos(folio)     
2949                                 goto out;        
2950                         folio_mark_accessed(f    
2951                                                  
2952                         /*                       
2953                          * If users can be wr    
2954                          * virtual addresses,    
2955                          * before reading the    
2956                          */                      
2957                         if (writably_mapped)     
2958                                 flush_dcache_    
2959                                                  
2960                         n = min_t(loff_t, len    
2961                         n = splice_folio_into    
2962                         if (!n)                  
2963                                 goto out;        
2964                         len -= n;                
2965                         total_spliced += n;      
2966                         *ppos += n;              
2967                         in->f_ra.prev_pos = *    
2968                         if (pipe_full(pipe->h    
2969                                 goto out;        
2970                 }                                
2971                                                  
2972                 folio_batch_release(&fbatch);    
2973         } while (len);                           
2974                                                  
2975 out:                                             
2976         folio_batch_release(&fbatch);            
2977         file_accessed(in);                       
2978                                                  
2979         return total_spliced ? total_spliced     
2980 }                                                
2981 EXPORT_SYMBOL(filemap_splice_read);              
2982                                                  
2983 static inline loff_t folio_seek_hole_data(str    
2984                 struct address_space *mapping    
2985                 loff_t start, loff_t end, boo    
2986 {                                                
2987         const struct address_space_operations    
2988         size_t offset, bsz = i_blocksize(mapp    
2989                                                  
2990         if (xa_is_value(folio) || folio_test_    
2991                 return seek_data ? start : en    
2992         if (!ops->is_partially_uptodate)         
2993                 return seek_data ? end : star    
2994                                                  
2995         xas_pause(xas);                          
2996         rcu_read_unlock();                       
2997         folio_lock(folio);                       
2998         if (unlikely(folio->mapping != mappin    
2999                 goto unlock;                     
3000                                                  
3001         offset = offset_in_folio(folio, start    
3002                                                  
3003         do {                                     
3004                 if (ops->is_partially_uptodat    
3005                                                  
3006                         break;                   
3007                 start = (start + bsz) & ~(bsz    
3008                 offset += bsz;                   
3009         } while (offset < folio_size(folio));    
3010 unlock:                                          
3011         folio_unlock(folio);                     
3012         rcu_read_lock();                         
3013         return start;                            
3014 }                                                
3015                                                  
3016 static inline size_t seek_folio_size(struct x    
3017 {                                                
3018         if (xa_is_value(folio))                  
3019                 return PAGE_SIZE << xas_get_o    
3020         return folio_size(folio);                
3021 }                                                
3022                                                  
3023 /**                                              
3024  * mapping_seek_hole_data - Seek for SEEK_DAT    
3025  * @mapping: Address space to search.            
3026  * @start: First byte to consider.               
3027  * @end: Limit of search (exclusive).            
3028  * @whence: Either SEEK_HOLE or SEEK_DATA.       
3029  *                                               
3030  * If the page cache knows which blocks conta    
3031  * contain data, your filesystem can use this    
3032  * SEEK_HOLE and SEEK_DATA.  This is useful f    
3033  * entirely memory-based such as tmpfs, and f    
3034  * unwritten extents.                            
3035  *                                               
3036  * Return: The requested offset on success, o    
3037  * SEEK_DATA and there is no data after @star    
3038  * after @end - 1, so SEEK_HOLE returns @end     
3039  * and @end contain data.                        
3040  */                                              
3041 loff_t mapping_seek_hole_data(struct address_    
3042                 loff_t end, int whence)          
3043 {                                                
3044         XA_STATE(xas, &mapping->i_pages, star    
3045         pgoff_t max = (end - 1) >> PAGE_SHIFT    
3046         bool seek_data = (whence == SEEK_DATA    
3047         struct folio *folio;                     
3048                                                  
3049         if (end <= start)                        
3050                 return -ENXIO;                   
3051                                                  
3052         rcu_read_lock();                         
3053         while ((folio = find_get_entry(&xas,     
3054                 loff_t pos = (u64)xas.xa_inde    
3055                 size_t seek_size;                
3056                                                  
3057                 if (start < pos) {               
3058                         if (!seek_data)          
3059                                 goto unlock;     
3060                         start = pos;             
3061                 }                                
3062                                                  
3063                 seek_size = seek_folio_size(&    
3064                 pos = round_up((u64)pos + 1,     
3065                 start = folio_seek_hole_data(    
3066                                 seek_data);      
3067                 if (start < pos)                 
3068                         goto unlock;             
3069                 if (start >= end)                
3070                         break;                   
3071                 if (seek_size > PAGE_SIZE)       
3072                         xas_set(&xas, pos >>     
3073                 if (!xa_is_value(folio))         
3074                         folio_put(folio);        
3075         }                                        
3076         if (seek_data)                           
3077                 start = -ENXIO;                  
3078 unlock:                                          
3079         rcu_read_unlock();                       
3080         if (folio && !xa_is_value(folio))        
3081                 folio_put(folio);                
3082         if (start > end)                         
3083                 return end;                      
3084         return start;                            
3085 }                                                
3086                                                  
3087 #ifdef CONFIG_MMU                                
3088 #define MMAP_LOTSAMISS  (100)                    
3089 /*                                               
3090  * lock_folio_maybe_drop_mmap - lock the page    
3091  * @vmf - the vm_fault for this fault.           
3092  * @folio - the folio to lock.                   
3093  * @fpin - the pointer to the file we may pin    
3094  *                                               
3095  * This works similar to lock_folio_or_retry     
3096  * mmap_lock.  It differs in that it actually    
3097  * if it returns 1 and 0 if it couldn't lock     
3098  * to drop the mmap_lock then fpin will point    
3099  * needs to be fput()'ed at a later point.       
3100  */                                              
3101 static int lock_folio_maybe_drop_mmap(struct     
3102                                      struct f    
3103 {                                                
3104         if (folio_trylock(folio))                
3105                 return 1;                        
3106                                                  
3107         /*                                       
3108          * NOTE! This will make us return wit    
3109          * the fault lock still held. That's     
3110          * is supposed to work. We have way t    
3111          */                                      
3112         if (vmf->flags & FAULT_FLAG_RETRY_NOW    
3113                 return 0;                        
3114                                                  
3115         *fpin = maybe_unlock_mmap_for_io(vmf,    
3116         if (vmf->flags & FAULT_FLAG_KILLABLE)    
3117                 if (__folio_lock_killable(fol    
3118                         /*                       
3119                          * We didn't have the    
3120                          * fault lock, but al    
3121                          * for fatal signals     
3122                          * so we need to drop    
3123                          * return 0 if we don    
3124                          */                      
3125                         if (*fpin == NULL)       
3126                                 release_fault    
3127                         return 0;                
3128                 }                                
3129         } else                                   
3130                 __folio_lock(folio);             
3131                                                  
3132         return 1;                                
3133 }                                                
3134                                                  
3135 /*                                               
3136  * Synchronous readahead happens when we don'    
3137  * cache at all.  We don't want to perform IO    
3138  * to drop the mmap sem we return the file th    
3139  * that.  If we didn't pin a file then we ret    
3140  * returned needs to be fput()'ed when we're     
3141  */                                              
3142 static struct file *do_sync_mmap_readahead(st    
3143 {                                                
3144         struct file *file = vmf->vma->vm_file    
3145         struct file_ra_state *ra = &file->f_r    
3146         struct address_space *mapping = file-    
3147         DEFINE_READAHEAD(ractl, file, ra, map    
3148         struct file *fpin = NULL;                
3149         unsigned long vm_flags = vmf->vma->vm    
3150         unsigned int mmap_miss;                  
3151                                                  
3152 #ifdef CONFIG_TRANSPARENT_HUGEPAGE               
3153         /* Use the readahead code, even if re    
3154         if ((vm_flags & VM_HUGEPAGE) && HPAGE    
3155                 fpin = maybe_unlock_mmap_for_    
3156                 ractl._index &= ~((unsigned l    
3157                 ra->size = HPAGE_PMD_NR;         
3158                 /*                               
3159                  * Fetch two PMD folios, so w    
3160                  * readahead, unless we've be    
3161                  */                              
3162                 if (!(vm_flags & VM_RAND_READ    
3163                         ra->size *= 2;           
3164                 ra->async_size = HPAGE_PMD_NR    
3165                 page_cache_ra_order(&ractl, r    
3166                 return fpin;                     
3167         }                                        
3168 #endif                                           
3169                                                  
3170         /* If we don't want any read-ahead, d    
3171         if (vm_flags & VM_RAND_READ)             
3172                 return fpin;                     
3173         if (!ra->ra_pages)                       
3174                 return fpin;                     
3175                                                  
3176         if (vm_flags & VM_SEQ_READ) {            
3177                 fpin = maybe_unlock_mmap_for_    
3178                 page_cache_sync_ra(&ractl, ra    
3179                 return fpin;                     
3180         }                                        
3181                                                  
3182         /* Avoid banging the cache line if no    
3183         mmap_miss = READ_ONCE(ra->mmap_miss);    
3184         if (mmap_miss < MMAP_LOTSAMISS * 10)     
3185                 WRITE_ONCE(ra->mmap_miss, ++m    
3186                                                  
3187         /*                                       
3188          * Do we miss much more than hit in t    
3189          * stop bothering with read-ahead. It    
3190          */                                      
3191         if (mmap_miss > MMAP_LOTSAMISS)          
3192                 return fpin;                     
3193                                                  
3194         /*                                       
3195          * mmap read-around                      
3196          */                                      
3197         fpin = maybe_unlock_mmap_for_io(vmf,     
3198         ra->start = max_t(long, 0, vmf->pgoff    
3199         ra->size = ra->ra_pages;                 
3200         ra->async_size = ra->ra_pages / 4;       
3201         ractl._index = ra->start;                
3202         page_cache_ra_order(&ractl, ra, 0);      
3203         return fpin;                             
3204 }                                                
3205                                                  
3206 /*                                               
3207  * Asynchronous readahead happens when we fin    
3208  * so we want to possibly extend the readahea    
3209  * was pinned if we have to drop the mmap_loc    
3210  */                                              
3211 static struct file *do_async_mmap_readahead(s    
3212                                             s    
3213 {                                                
3214         struct file *file = vmf->vma->vm_file    
3215         struct file_ra_state *ra = &file->f_r    
3216         DEFINE_READAHEAD(ractl, file, ra, fil    
3217         struct file *fpin = NULL;                
3218         unsigned int mmap_miss;                  
3219                                                  
3220         /* If we don't want any read-ahead, d    
3221         if (vmf->vma->vm_flags & VM_RAND_READ    
3222                 return fpin;                     
3223                                                  
3224         mmap_miss = READ_ONCE(ra->mmap_miss);    
3225         if (mmap_miss)                           
3226                 WRITE_ONCE(ra->mmap_miss, --m    
3227                                                  
3228         if (folio_test_readahead(folio)) {       
3229                 fpin = maybe_unlock_mmap_for_    
3230                 page_cache_async_ra(&ractl, f    
3231         }                                        
3232         return fpin;                             
3233 }                                                
3234                                                  
3235 static vm_fault_t filemap_fault_recheck_pte_n    
3236 {                                                
3237         struct vm_area_struct *vma = vmf->vma    
3238         vm_fault_t ret = 0;                      
3239         pte_t *ptep;                             
3240                                                  
3241         /*                                       
3242          * We might have COW'ed a pagecache f    
3243          * anon folio mapped. The original pa    
3244          * might have been evicted. During a     
3245          * the PTE, such as done in do_numa_p    
3246          * temporarily clear the PTE under PT    
3247          * "none" when not holding the PT loc    
3248          *                                       
3249          * Not rechecking the PTE under PT lo    
3250          * major fault in an mlock'ed region.    
3251          * scenario while holding the PT lock    
3252          * scenarios. Recheck the PTE without    
3253          * the number of times we hold PT loc    
3254          */                                      
3255         if (!(vma->vm_flags & VM_LOCKED))        
3256                 return 0;                        
3257                                                  
3258         if (!(vmf->flags & FAULT_FLAG_ORIG_PT    
3259                 return 0;                        
3260                                                  
3261         ptep = pte_offset_map_nolock(vma->vm_    
3262                                      &vmf->pt    
3263         if (unlikely(!ptep))                     
3264                 return VM_FAULT_NOPAGE;          
3265                                                  
3266         if (unlikely(!pte_none(ptep_get_lockl    
3267                 ret = VM_FAULT_NOPAGE;           
3268         } else {                                 
3269                 spin_lock(vmf->ptl);             
3270                 if (unlikely(!pte_none(ptep_g    
3271                         ret = VM_FAULT_NOPAGE    
3272                 spin_unlock(vmf->ptl);           
3273         }                                        
3274         pte_unmap(ptep);                         
3275         return ret;                              
3276 }                                                
3277                                                  
3278 /**                                              
3279  * filemap_fault - read in file data for page    
3280  * @vmf:        struct vm_fault containing de    
3281  *                                               
3282  * filemap_fault() is invoked via the vma ope    
3283  * mapped memory region to read in file data     
3284  *                                               
3285  * The goto's are kind of ugly, but this stre    
3286  * it in the page cache, and handles the spec    
3287  * having a lot of duplicated code.              
3288  *                                               
3289  * vma->vm_mm->mmap_lock must be held on entr    
3290  *                                               
3291  * If our return value has VM_FAULT_RETRY set    
3292  * may be dropped before doing I/O or by lock    
3293  *                                               
3294  * If our return value does not have VM_FAULT    
3295  * has not been released.                        
3296  *                                               
3297  * We never return with VM_FAULT_RETRY and a     
3298  *                                               
3299  * Return: bitwise-OR of %VM_FAULT_ codes.       
3300  */                                              
3301 vm_fault_t filemap_fault(struct vm_fault *vmf    
3302 {                                                
3303         int error;                               
3304         struct file *file = vmf->vma->vm_file    
3305         struct file *fpin = NULL;                
3306         struct address_space *mapping = file-    
3307         struct inode *inode = mapping->host;     
3308         pgoff_t max_idx, index = vmf->pgoff;     
3309         struct folio *folio;                     
3310         vm_fault_t ret = 0;                      
3311         bool mapping_locked = false;             
3312                                                  
3313         max_idx = DIV_ROUND_UP(i_size_read(in    
3314         if (unlikely(index >= max_idx))          
3315                 return VM_FAULT_SIGBUS;          
3316                                                  
3317         trace_mm_filemap_fault(mapping, index    
3318                                                  
3319         /*                                       
3320          * Do we have something in the page c    
3321          */                                      
3322         folio = filemap_get_folio(mapping, in    
3323         if (likely(!IS_ERR(folio))) {            
3324                 /*                               
3325                  * We found the page, so try     
3326                  * the lock.                     
3327                  */                              
3328                 if (!(vmf->flags & FAULT_FLAG    
3329                         fpin = do_async_mmap_    
3330                 if (unlikely(!folio_test_upto    
3331                         filemap_invalidate_lo    
3332                         mapping_locked = true    
3333                 }                                
3334         } else {                                 
3335                 ret = filemap_fault_recheck_p    
3336                 if (unlikely(ret))               
3337                         return ret;              
3338                                                  
3339                 /* No page in the page cache     
3340                 count_vm_event(PGMAJFAULT);      
3341                 count_memcg_event_mm(vmf->vma    
3342                 ret = VM_FAULT_MAJOR;            
3343                 fpin = do_sync_mmap_readahead    
3344 retry_find:                                      
3345                 /*                               
3346                  * See comment in filemap_cre    
3347                  * invalidate_lock               
3348                  */                              
3349                 if (!mapping_locked) {           
3350                         filemap_invalidate_lo    
3351                         mapping_locked = true    
3352                 }                                
3353                 folio = __filemap_get_folio(m    
3354                                           FGP    
3355                                           vmf    
3356                 if (IS_ERR(folio)) {             
3357                         if (fpin)                
3358                                 goto out_retr    
3359                         filemap_invalidate_un    
3360                         return VM_FAULT_OOM;     
3361                 }                                
3362         }                                        
3363                                                  
3364         if (!lock_folio_maybe_drop_mmap(vmf,     
3365                 goto out_retry;                  
3366                                                  
3367         /* Did it get truncated? */              
3368         if (unlikely(folio->mapping != mappin    
3369                 folio_unlock(folio);             
3370                 folio_put(folio);                
3371                 goto retry_find;                 
3372         }                                        
3373         VM_BUG_ON_FOLIO(!folio_contains(folio    
3374                                                  
3375         /*                                       
3376          * We have a locked folio in the page    
3377          * that it's up-to-date. If not, it i    
3378          * or because readahead was otherwise    
3379          */                                      
3380         if (unlikely(!folio_test_uptodate(fol    
3381                 /*                               
3382                  * If the invalidate lock is     
3383                  * and uptodate and now it is    
3384                  * didn't hold the page lock     
3385                  * everything, get the invali    
3386                  */                              
3387                 if (!mapping_locked) {           
3388                         folio_unlock(folio);     
3389                         folio_put(folio);        
3390                         goto retry_find;         
3391                 }                                
3392                                                  
3393                 /*                               
3394                  * OK, the folio is really no    
3395                  * VMA has the VM_RAND_READ f    
3396                  * arose. Let's read it in di    
3397                  */                              
3398                 goto page_not_uptodate;          
3399         }                                        
3400                                                  
3401         /*                                       
3402          * We've made it this far and we had     
3403          * time to return to the upper layer     
3404          * redo the fault.                       
3405          */                                      
3406         if (fpin) {                              
3407                 folio_unlock(folio);             
3408                 goto out_retry;                  
3409         }                                        
3410         if (mapping_locked)                      
3411                 filemap_invalidate_unlock_sha    
3412                                                  
3413         /*                                       
3414          * Found the page and have a referenc    
3415          * We must recheck i_size under page     
3416          */                                      
3417         max_idx = DIV_ROUND_UP(i_size_read(in    
3418         if (unlikely(index >= max_idx)) {        
3419                 folio_unlock(folio);             
3420                 folio_put(folio);                
3421                 return VM_FAULT_SIGBUS;          
3422         }                                        
3423                                                  
3424         vmf->page = folio_file_page(folio, in    
3425         return ret | VM_FAULT_LOCKED;            
3426                                                  
3427 page_not_uptodate:                               
3428         /*                                       
3429          * Umm, take care of errors if the pa    
3430          * Try to re-read it _once_. We do th    
3431          * because there really aren't any pe    
3432          * and we need to check for errors.      
3433          */                                      
3434         fpin = maybe_unlock_mmap_for_io(vmf,     
3435         error = filemap_read_folio(file, mapp    
3436         if (fpin)                                
3437                 goto out_retry;                  
3438         folio_put(folio);                        
3439                                                  
3440         if (!error || error == AOP_TRUNCATED_    
3441                 goto retry_find;                 
3442         filemap_invalidate_unlock_shared(mapp    
3443                                                  
3444         return VM_FAULT_SIGBUS;                  
3445                                                  
3446 out_retry:                                       
3447         /*                                       
3448          * We dropped the mmap_lock, we need     
3449          * re-find the vma and come back and     
3450          * page.                                 
3451          */                                      
3452         if (!IS_ERR(folio))                      
3453                 folio_put(folio);                
3454         if (mapping_locked)                      
3455                 filemap_invalidate_unlock_sha    
3456         if (fpin)                                
3457                 fput(fpin);                      
3458         return ret | VM_FAULT_RETRY;             
3459 }                                                
3460 EXPORT_SYMBOL(filemap_fault);                    
3461                                                  
3462 static bool filemap_map_pmd(struct vm_fault *    
3463                 pgoff_t start)                   
3464 {                                                
3465         struct mm_struct *mm = vmf->vma->vm_m    
3466                                                  
3467         /* Huge page is mapped? No need to pr    
3468         if (pmd_trans_huge(*vmf->pmd)) {         
3469                 folio_unlock(folio);             
3470                 folio_put(folio);                
3471                 return true;                     
3472         }                                        
3473                                                  
3474         if (pmd_none(*vmf->pmd) && folio_test    
3475                 struct page *page = folio_fil    
3476                 vm_fault_t ret = do_set_pmd(v    
3477                 if (!ret) {                      
3478                         /* The page is mapped    
3479                         folio_unlock(folio);     
3480                         return true;             
3481                 }                                
3482         }                                        
3483                                                  
3484         if (pmd_none(*vmf->pmd) && vmf->preal    
3485                 pmd_install(mm, vmf->pmd, &vm    
3486                                                  
3487         return false;                            
3488 }                                                
3489                                                  
3490 static struct folio *next_uptodate_folio(stru    
3491                 struct address_space *mapping    
3492 {                                                
3493         struct folio *folio = xas_next_entry(    
3494         unsigned long max_idx;                   
3495                                                  
3496         do {                                     
3497                 if (!folio)                      
3498                         return NULL;             
3499                 if (xas_retry(xas, folio))       
3500                         continue;                
3501                 if (xa_is_value(folio))          
3502                         continue;                
3503                 if (folio_test_locked(folio))    
3504                         continue;                
3505                 if (!folio_try_get(folio))       
3506                         continue;                
3507                 /* Has the page moved or been    
3508                 if (unlikely(folio != xas_rel    
3509                         goto skip;               
3510                 if (!folio_test_uptodate(foli    
3511                         goto skip;               
3512                 if (!folio_trylock(folio))       
3513                         goto skip;               
3514                 if (folio->mapping != mapping    
3515                         goto unlock;             
3516                 if (!folio_test_uptodate(foli    
3517                         goto unlock;             
3518                 max_idx = DIV_ROUND_UP(i_size    
3519                 if (xas->xa_index >= max_idx)    
3520                         goto unlock;             
3521                 return folio;                    
3522 unlock:                                          
3523                 folio_unlock(folio);             
3524 skip:                                            
3525                 folio_put(folio);                
3526         } while ((folio = xas_next_entry(xas,    
3527                                                  
3528         return NULL;                             
3529 }                                                
3530                                                  
3531 /*                                               
3532  * Map page range [start_page, start_page + n    
3533  * start_page is gotten from start by folio_p    
3534  */                                              
3535 static vm_fault_t filemap_map_folio_range(str    
3536                         struct folio *folio,     
3537                         unsigned long addr, u    
3538                         unsigned long *rss, u    
3539 {                                                
3540         vm_fault_t ret = 0;                      
3541         struct page *page = folio_page(folio,    
3542         unsigned int count = 0;                  
3543         pte_t *old_ptep = vmf->pte;              
3544                                                  
3545         do {                                     
3546                 if (PageHWPoison(page + count    
3547                         goto skip;               
3548                                                  
3549                 /*                               
3550                  * If there are too many foli    
3551                  * in a file, they will proba    
3552                  * In such situation, read-ah    
3553                  * Don't decrease mmap_miss i    
3554                  * we can stop read-ahead.       
3555                  */                              
3556                 if (!folio_test_workingset(fo    
3557                         (*mmap_miss)++;          
3558                                                  
3559                 /*                               
3560                  * NOTE: If there're PTE mark    
3561                  * handled in the specific fa    
3562                  * fault-around logic.           
3563                  */                              
3564                 if (!pte_none(ptep_get(&vmf->    
3565                         goto skip;               
3566                                                  
3567                 count++;                         
3568                 continue;                        
3569 skip:                                            
3570                 if (count) {                     
3571                         set_pte_range(vmf, fo    
3572                         *rss += count;           
3573                         folio_ref_add(folio,     
3574                         if (in_range(vmf->add    
3575                                 ret = VM_FAUL    
3576                 }                                
3577                                                  
3578                 count++;                         
3579                 page += count;                   
3580                 vmf->pte += count;               
3581                 addr += count * PAGE_SIZE;       
3582                 count = 0;                       
3583         } while (--nr_pages > 0);                
3584                                                  
3585         if (count) {                             
3586                 set_pte_range(vmf, folio, pag    
3587                 *rss += count;                   
3588                 folio_ref_add(folio, count);     
3589                 if (in_range(vmf->address, ad    
3590                         ret = VM_FAULT_NOPAGE    
3591         }                                        
3592                                                  
3593         vmf->pte = old_ptep;                     
3594                                                  
3595         return ret;                              
3596 }                                                
3597                                                  
3598 static vm_fault_t filemap_map_order0_folio(st    
3599                 struct folio *folio, unsigned    
3600                 unsigned long *rss, unsigned     
3601 {                                                
3602         vm_fault_t ret = 0;                      
3603         struct page *page = &folio->page;        
3604                                                  
3605         if (PageHWPoison(page))                  
3606                 return ret;                      
3607                                                  
3608         /* See comment of filemap_map_folio_r    
3609         if (!folio_test_workingset(folio))       
3610                 (*mmap_miss)++;                  
3611                                                  
3612         /*                                       
3613          * NOTE: If there're PTE markers, we'    
3614          * handled in the specific fault path    
3615          * the fault-around logic.               
3616          */                                      
3617         if (!pte_none(ptep_get(vmf->pte)))       
3618                 return ret;                      
3619                                                  
3620         if (vmf->address == addr)                
3621                 ret = VM_FAULT_NOPAGE;           
3622                                                  
3623         set_pte_range(vmf, folio, page, 1, ad    
3624         (*rss)++;                                
3625         folio_ref_inc(folio);                    
3626                                                  
3627         return ret;                              
3628 }                                                
3629                                                  
3630 vm_fault_t filemap_map_pages(struct vm_fault     
3631                              pgoff_t start_pg    
3632 {                                                
3633         struct vm_area_struct *vma = vmf->vma    
3634         struct file *file = vma->vm_file;        
3635         struct address_space *mapping = file-    
3636         pgoff_t file_end, last_pgoff = start_    
3637         unsigned long addr;                      
3638         XA_STATE(xas, &mapping->i_pages, star    
3639         struct folio *folio;                     
3640         vm_fault_t ret = 0;                      
3641         unsigned long rss = 0;                   
3642         unsigned int nr_pages = 0, mmap_miss     
3643                                                  
3644         rcu_read_lock();                         
3645         folio = next_uptodate_folio(&xas, map    
3646         if (!folio)                              
3647                 goto out;                        
3648                                                  
3649         if (filemap_map_pmd(vmf, folio, start    
3650                 ret = VM_FAULT_NOPAGE;           
3651                 goto out;                        
3652         }                                        
3653                                                  
3654         addr = vma->vm_start + ((start_pgoff     
3655         vmf->pte = pte_offset_map_lock(vma->v    
3656         if (!vmf->pte) {                         
3657                 folio_unlock(folio);             
3658                 folio_put(folio);                
3659                 goto out;                        
3660         }                                        
3661                                                  
3662         file_end = DIV_ROUND_UP(i_size_read(m    
3663         if (end_pgoff > file_end)                
3664                 end_pgoff = file_end;            
3665                                                  
3666         folio_type = mm_counter_file(folio);     
3667         do {                                     
3668                 unsigned long end;               
3669                                                  
3670                 addr += (xas.xa_index - last_    
3671                 vmf->pte += xas.xa_index - la    
3672                 last_pgoff = xas.xa_index;       
3673                 end = folio_next_index(folio)    
3674                 nr_pages = min(end, end_pgoff    
3675                                                  
3676                 if (!folio_test_large(folio))    
3677                         ret |= filemap_map_or    
3678                                         folio    
3679                 else                             
3680                         ret |= filemap_map_fo    
3681                                         xas.x    
3682                                         nr_pa    
3683                                                  
3684                 folio_unlock(folio);             
3685                 folio_put(folio);                
3686         } while ((folio = next_uptodate_folio    
3687         add_mm_counter(vma->vm_mm, folio_type    
3688         pte_unmap_unlock(vmf->pte, vmf->ptl);    
3689         trace_mm_filemap_map_pages(mapping, s    
3690 out:                                             
3691         rcu_read_unlock();                       
3692                                                  
3693         mmap_miss_saved = READ_ONCE(file->f_r    
3694         if (mmap_miss >= mmap_miss_saved)        
3695                 WRITE_ONCE(file->f_ra.mmap_mi    
3696         else                                     
3697                 WRITE_ONCE(file->f_ra.mmap_mi    
3698                                                  
3699         return ret;                              
3700 }                                                
3701 EXPORT_SYMBOL(filemap_map_pages);                
3702                                                  
3703 vm_fault_t filemap_page_mkwrite(struct vm_fau    
3704 {                                                
3705         struct address_space *mapping = vmf->    
3706         struct folio *folio = page_folio(vmf-    
3707         vm_fault_t ret = VM_FAULT_LOCKED;        
3708                                                  
3709         sb_start_pagefault(mapping->host->i_s    
3710         file_update_time(vmf->vma->vm_file);     
3711         folio_lock(folio);                       
3712         if (folio->mapping != mapping) {         
3713                 folio_unlock(folio);             
3714                 ret = VM_FAULT_NOPAGE;           
3715                 goto out;                        
3716         }                                        
3717         /*                                       
3718          * We mark the folio dirty already he    
3719          * progress, we are guaranteed that w    
3720          * see the dirty folio and writeprote    
3721          */                                      
3722         folio_mark_dirty(folio);                 
3723         folio_wait_stable(folio);                
3724 out:                                             
3725         sb_end_pagefault(mapping->host->i_sb)    
3726         return ret;                              
3727 }                                                
3728                                                  
3729 const struct vm_operations_struct generic_fil    
3730         .fault          = filemap_fault,         
3731         .map_pages      = filemap_map_pages,     
3732         .page_mkwrite   = filemap_page_mkwrit    
3733 };                                               
3734                                                  
3735 /* This is used for a general mmap of a disk     
3736                                                  
3737 int generic_file_mmap(struct file *file, stru    
3738 {                                                
3739         struct address_space *mapping = file-    
3740                                                  
3741         if (!mapping->a_ops->read_folio)         
3742                 return -ENOEXEC;                 
3743         file_accessed(file);                     
3744         vma->vm_ops = &generic_file_vm_ops;      
3745         return 0;                                
3746 }                                                
3747                                                  
3748 /*                                               
3749  * This is for filesystems which do not imple    
3750  */                                              
3751 int generic_file_readonly_mmap(struct file *f    
3752 {                                                
3753         if (vma_is_shared_maywrite(vma))         
3754                 return -EINVAL;                  
3755         return generic_file_mmap(file, vma);     
3756 }                                                
3757 #else                                            
3758 vm_fault_t filemap_page_mkwrite(struct vm_fau    
3759 {                                                
3760         return VM_FAULT_SIGBUS;                  
3761 }                                                
3762 int generic_file_mmap(struct file *file, stru    
3763 {                                                
3764         return -ENOSYS;                          
3765 }                                                
3766 int generic_file_readonly_mmap(struct file *f    
3767 {                                                
3768         return -ENOSYS;                          
3769 }                                                
3770 #endif /* CONFIG_MMU */                          
3771                                                  
3772 EXPORT_SYMBOL(filemap_page_mkwrite);             
3773 EXPORT_SYMBOL(generic_file_mmap);                
3774 EXPORT_SYMBOL(generic_file_readonly_mmap);       
3775                                                  
3776 static struct folio *do_read_cache_folio(stru    
3777                 pgoff_t index, filler_t fille    
3778 {                                                
3779         struct folio *folio;                     
3780         int err;                                 
3781                                                  
3782         if (!filler)                             
3783                 filler = mapping->a_ops->read    
3784 repeat:                                          
3785         folio = filemap_get_folio(mapping, in    
3786         if (IS_ERR(folio)) {                     
3787                 folio = filemap_alloc_folio(g    
3788                                             m    
3789                 if (!folio)                      
3790                         return ERR_PTR(-ENOME    
3791                 index = mapping_align_index(m    
3792                 err = filemap_add_folio(mappi    
3793                 if (unlikely(err)) {             
3794                         folio_put(folio);        
3795                         if (err == -EEXIST)      
3796                                 goto repeat;     
3797                         /* Presumably ENOMEM     
3798                         return ERR_PTR(err);     
3799                 }                                
3800                                                  
3801                 goto filler;                     
3802         }                                        
3803         if (folio_test_uptodate(folio))          
3804                 goto out;                        
3805                                                  
3806         if (!folio_trylock(folio)) {             
3807                 folio_put_wait_locked(folio,     
3808                 goto repeat;                     
3809         }                                        
3810                                                  
3811         /* Folio was truncated from mapping *    
3812         if (!folio->mapping) {                   
3813                 folio_unlock(folio);             
3814                 folio_put(folio);                
3815                 goto repeat;                     
3816         }                                        
3817                                                  
3818         /* Someone else locked and filled the    
3819         if (folio_test_uptodate(folio)) {        
3820                 folio_unlock(folio);             
3821                 goto out;                        
3822         }                                        
3823                                                  
3824 filler:                                          
3825         err = filemap_read_folio(file, filler    
3826         if (err) {                               
3827                 folio_put(folio);                
3828                 if (err == AOP_TRUNCATED_PAGE    
3829                         goto repeat;             
3830                 return ERR_PTR(err);             
3831         }                                        
3832                                                  
3833 out:                                             
3834         folio_mark_accessed(folio);              
3835         return folio;                            
3836 }                                                
3837                                                  
3838 /**                                              
3839  * read_cache_folio - Read into page cache, f    
3840  * @mapping: The address_space to read from.     
3841  * @index: The index to read.                    
3842  * @filler: Function to perform the read, or     
3843  * @file: Passed to filler function, may be N    
3844  *                                               
3845  * Read one page into the page cache.  If it     
3846  * will contain @index, but it may not be the    
3847  *                                               
3848  * If the filler function returns an error, i    
3849  * caller.                                       
3850  *                                               
3851  * Context: May sleep.  Expects mapping->inva    
3852  * Return: An uptodate folio on success, ERR_    
3853  */                                              
3854 struct folio *read_cache_folio(struct address    
3855                 filler_t filler, struct file     
3856 {                                                
3857         return do_read_cache_folio(mapping, i    
3858                         mapping_gfp_mask(mapp    
3859 }                                                
3860 EXPORT_SYMBOL(read_cache_folio);                 
3861                                                  
3862 /**                                              
3863  * mapping_read_folio_gfp - Read into page ca    
3864  * @mapping:    The address_space for the fol    
3865  * @index:      The index that the allocated     
3866  * @gfp:        The page allocator flags to u    
3867  *                                               
3868  * This is the same as "read_cache_folio(mapp    
3869  * any new memory allocations done using the     
3870  *                                               
3871  * The most likely error from this function i    
3872  * possible and so is EINTR.  If ->read_folio    
3873  * that will be returned to the caller.          
3874  *                                               
3875  * The function expects mapping->invalidate_l    
3876  *                                               
3877  * Return: Uptodate folio on success, ERR_PTR    
3878  */                                              
3879 struct folio *mapping_read_folio_gfp(struct a    
3880                 pgoff_t index, gfp_t gfp)        
3881 {                                                
3882         return do_read_cache_folio(mapping, i    
3883 }                                                
3884 EXPORT_SYMBOL(mapping_read_folio_gfp);           
3885                                                  
3886 static struct page *do_read_cache_page(struct    
3887                 pgoff_t index, filler_t *fill    
3888 {                                                
3889         struct folio *folio;                     
3890                                                  
3891         folio = do_read_cache_folio(mapping,     
3892         if (IS_ERR(folio))                       
3893                 return &folio->page;             
3894         return folio_file_page(folio, index);    
3895 }                                                
3896                                                  
3897 struct page *read_cache_page(struct address_s    
3898                         pgoff_t index, filler    
3899 {                                                
3900         return do_read_cache_page(mapping, in    
3901                         mapping_gfp_mask(mapp    
3902 }                                                
3903 EXPORT_SYMBOL(read_cache_page);                  
3904                                                  
3905 /**                                              
3906  * read_cache_page_gfp - read into page cache    
3907  * @mapping:    the page's address_space         
3908  * @index:      the page index                   
3909  * @gfp:        the page allocator flags to u    
3910  *                                               
3911  * This is the same as "read_mapping_page(map    
3912  * any new page allocations done using the sp    
3913  *                                               
3914  * If the page does not get brought uptodate,    
3915  *                                               
3916  * The function expects mapping->invalidate_l    
3917  *                                               
3918  * Return: up to date page on success, ERR_PT    
3919  */                                              
3920 struct page *read_cache_page_gfp(struct addre    
3921                                 pgoff_t index    
3922                                 gfp_t gfp)       
3923 {                                                
3924         return do_read_cache_page(mapping, in    
3925 }                                                
3926 EXPORT_SYMBOL(read_cache_page_gfp);              
3927                                                  
3928 /*                                               
3929  * Warn about a page cache invalidation failu    
3930  */                                              
3931 static void dio_warn_stale_pagecache(struct f    
3932 {                                                
3933         static DEFINE_RATELIMIT_STATE(_rs, 86    
3934         char pathname[128];                      
3935         char *path;                              
3936                                                  
3937         errseq_set(&filp->f_mapping->wb_err,     
3938         if (__ratelimit(&_rs)) {                 
3939                 path = file_path(filp, pathna    
3940                 if (IS_ERR(path))                
3941                         path = "(unknown)";      
3942                 pr_crit("Page cache invalidat    
3943                 pr_crit("File: %s PID: %d Com    
3944                         current->comm);          
3945         }                                        
3946 }                                                
3947                                                  
3948 void kiocb_invalidate_post_direct_write(struc    
3949 {                                                
3950         struct address_space *mapping = iocb-    
3951                                                  
3952         if (mapping->nrpages &&                  
3953             invalidate_inode_pages2_range(map    
3954                         iocb->ki_pos >> PAGE_    
3955                         (iocb->ki_pos + count    
3956                 dio_warn_stale_pagecache(iocb    
3957 }                                                
3958                                                  
3959 ssize_t                                          
3960 generic_file_direct_write(struct kiocb *iocb,    
3961 {                                                
3962         struct address_space *mapping = iocb-    
3963         size_t write_len = iov_iter_count(fro    
3964         ssize_t written;                         
3965                                                  
3966         /*                                       
3967          * If a page can not be invalidated,     
3968          * to buffered write.                    
3969          */                                      
3970         written = kiocb_invalidate_pages(iocb    
3971         if (written) {                           
3972                 if (written == -EBUSY)           
3973                         return 0;                
3974                 return written;                  
3975         }                                        
3976                                                  
3977         written = mapping->a_ops->direct_IO(i    
3978                                                  
3979         /*                                       
3980          * Finally, try again to invalidate c    
3981          * cached by non-direct readahead, or    
3982          * if the source of the write was an     
3983          * we're writing.  Either one is a pr    
3984          * so we don't support it 100%.  If t    
3985          * fails, tough, the write still work    
3986          *                                       
3987          * Most of the time we do not need th    
3988          * the invalidation for us. However t    
3989          * do not end up with dio_complete()     
3990          * them by removing it completely.       
3991          *                                       
3992          * Noticeable example is a blkdev_dir    
3993          *                                       
3994          * Skip invalidation for async writes    
3995          */                                      
3996         if (written > 0) {                       
3997                 struct inode *inode = mapping    
3998                 loff_t pos = iocb->ki_pos;       
3999                                                  
4000                 kiocb_invalidate_post_direct_    
4001                 pos += written;                  
4002                 write_len -= written;            
4003                 if (pos > i_size_read(inode)     
4004                         i_size_write(inode, p    
4005                         mark_inode_dirty(inod    
4006                 }                                
4007                 iocb->ki_pos = pos;              
4008         }                                        
4009         if (written != -EIOCBQUEUED)             
4010                 iov_iter_revert(from, write_l    
4011         return written;                          
4012 }                                                
4013 EXPORT_SYMBOL(generic_file_direct_write);        
4014                                                  
4015 ssize_t generic_perform_write(struct kiocb *i    
4016 {                                                
4017         struct file *file = iocb->ki_filp;       
4018         loff_t pos = iocb->ki_pos;               
4019         struct address_space *mapping = file-    
4020         const struct address_space_operations    
4021         size_t chunk = mapping_max_folio_size    
4022         long status = 0;                         
4023         ssize_t written = 0;                     
4024                                                  
4025         do {                                     
4026                 struct folio *folio;             
4027                 size_t offset;          /* Of    
4028                 size_t bytes;           /* By    
4029                 size_t copied;          /* By    
4030                 void *fsdata = NULL;             
4031                                                  
4032                 bytes = iov_iter_count(i);       
4033 retry:                                           
4034                 offset = pos & (chunk - 1);      
4035                 bytes = min(chunk - offset, b    
4036                 balance_dirty_pages_ratelimit    
4037                                                  
4038                 /*                               
4039                  * Bring in the user page tha    
4040                  * Otherwise there's a nasty     
4041                  * same page as we're writing    
4042                  * up-to-date.                   
4043                  */                              
4044                 if (unlikely(fault_in_iov_ite    
4045                         status = -EFAULT;        
4046                         break;                   
4047                 }                                
4048                                                  
4049                 if (fatal_signal_pending(curr    
4050                         status = -EINTR;         
4051                         break;                   
4052                 }                                
4053                                                  
4054                 status = a_ops->write_begin(f    
4055                                                  
4056                 if (unlikely(status < 0))        
4057                         break;                   
4058                                                  
4059                 offset = offset_in_folio(foli    
4060                 if (bytes > folio_size(folio)    
4061                         bytes = folio_size(fo    
4062                                                  
4063                 if (mapping_writably_mapped(m    
4064                         flush_dcache_folio(fo    
4065                                                  
4066                 copied = copy_folio_from_iter    
4067                 flush_dcache_folio(folio);       
4068                                                  
4069                 status = a_ops->write_end(fil    
4070                                                  
4071                 if (unlikely(status != copied    
4072                         iov_iter_revert(i, co    
4073                         if (unlikely(status <    
4074                                 break;           
4075                 }                                
4076                 cond_resched();                  
4077                                                  
4078                 if (unlikely(status == 0)) {     
4079                         /*                       
4080                          * A short copy made     
4081                          * thing entirely.  M    
4082                          * halfway through, m    
4083                          * might be severe me    
4084                          */                      
4085                         if (chunk > PAGE_SIZE    
4086                                 chunk /= 2;      
4087                         if (copied) {            
4088                                 bytes = copie    
4089                                 goto retry;      
4090                         }                        
4091                 } else {                         
4092                         pos += status;           
4093                         written += status;       
4094                 }                                
4095         } while (iov_iter_count(i));             
4096                                                  
4097         if (!written)                            
4098                 return status;                   
4099         iocb->ki_pos += written;                 
4100         return written;                          
4101 }                                                
4102 EXPORT_SYMBOL(generic_perform_write);            
4103                                                  
4104 /**                                              
4105  * __generic_file_write_iter - write data to     
4106  * @iocb:       IO state structure (file, off    
4107  * @from:       iov_iter with data to write      
4108  *                                               
4109  * This function does all the work needed for    
4110  * file. It does all basic checks, removes SU    
4111  * modification times and calls proper subrou    
4112  * do direct IO or a standard buffered write.    
4113  *                                               
4114  * It expects i_rwsem to be grabbed unless we    
4115  * object which does not need locking at all.    
4116  *                                               
4117  * This function does *not* take care of sync    
4118  * A caller has to handle it. This is mainly     
4119  * avoid syncing under i_rwsem.                  
4120  *                                               
4121  * Return:                                       
4122  * * number of bytes written, even for trunca    
4123  * * negative error code if no data has been     
4124  */                                              
4125 ssize_t __generic_file_write_iter(struct kioc    
4126 {                                                
4127         struct file *file = iocb->ki_filp;       
4128         struct address_space *mapping = file-    
4129         struct inode *inode = mapping->host;     
4130         ssize_t ret;                             
4131                                                  
4132         ret = file_remove_privs(file);           
4133         if (ret)                                 
4134                 return ret;                      
4135                                                  
4136         ret = file_update_time(file);            
4137         if (ret)                                 
4138                 return ret;                      
4139                                                  
4140         if (iocb->ki_flags & IOCB_DIRECT) {      
4141                 ret = generic_file_direct_wri    
4142                 /*                               
4143                  * If the write stopped short    
4144                  * buffered writes.  Some fil    
4145                  * holes, for example.  For D    
4146                  * not succeed (even if it di    
4147                  * page-cache pages correctly    
4148                  */                              
4149                 if (ret < 0 || !iov_iter_coun    
4150                         return ret;              
4151                 return direct_write_fallback(    
4152                                 generic_perfo    
4153         }                                        
4154                                                  
4155         return generic_perform_write(iocb, fr    
4156 }                                                
4157 EXPORT_SYMBOL(__generic_file_write_iter);        
4158                                                  
4159 /**                                              
4160  * generic_file_write_iter - write data to a     
4161  * @iocb:       IO state structure               
4162  * @from:       iov_iter with data to write      
4163  *                                               
4164  * This is a wrapper around __generic_file_wr    
4165  * filesystems. It takes care of syncing the     
4166  * and acquires i_rwsem as needed.               
4167  * Return:                                       
4168  * * negative error code if no data has been     
4169  *   vfs_fsync_range() failed for a synchrono    
4170  * * number of bytes written, even for trunca    
4171  */                                              
4172 ssize_t generic_file_write_iter(struct kiocb     
4173 {                                                
4174         struct file *file = iocb->ki_filp;       
4175         struct inode *inode = file->f_mapping    
4176         ssize_t ret;                             
4177                                                  
4178         inode_lock(inode);                       
4179         ret = generic_write_checks(iocb, from    
4180         if (ret > 0)                             
4181                 ret = __generic_file_write_it    
4182         inode_unlock(inode);                     
4183                                                  
4184         if (ret > 0)                             
4185                 ret = generic_write_sync(iocb    
4186         return ret;                              
4187 }                                                
4188 EXPORT_SYMBOL(generic_file_write_iter);          
4189                                                  
4190 /**                                              
4191  * filemap_release_folio() - Release fs-speci    
4192  * @folio: The folio which the kernel is tryi    
4193  * @gfp: Memory allocation flags (and I/O mod    
4194  *                                               
4195  * The address_space is trying to release any    
4196  * (presumably at folio->private).               
4197  *                                               
4198  * This will also be called if the private_2     
4199  * indicating that the folio has other metada    
4200  *                                               
4201  * The @gfp argument specifies whether I/O ma    
4202  * this page (__GFP_IO), and whether the call    
4203  * (__GFP_RECLAIM & __GFP_FS).                   
4204  *                                               
4205  * Return: %true if the release was successfu    
4206  */                                              
4207 bool filemap_release_folio(struct folio *foli    
4208 {                                                
4209         struct address_space * const mapping     
4210                                                  
4211         BUG_ON(!folio_test_locked(folio));       
4212         if (!folio_needs_release(folio))         
4213                 return true;                     
4214         if (folio_test_writeback(folio))         
4215                 return false;                    
4216                                                  
4217         if (mapping && mapping->a_ops->releas    
4218                 return mapping->a_ops->releas    
4219         return try_to_free_buffers(folio);       
4220 }                                                
4221 EXPORT_SYMBOL(filemap_release_folio);            
4222                                                  
4223 /**                                              
4224  * filemap_invalidate_inode - Invalidate/forc    
4225  * @inode: The inode to flush                    
4226  * @flush: Set to write back rather than simp    
4227  * @start: First byte to in range.               
4228  * @end: Last byte in range (inclusive), or L    
4229  *       onwards.                                
4230  *                                               
4231  * Invalidate all the folios on an inode that    
4232  * range, possibly writing them back first.      
4233  * undertaken, the invalidate lock is held to    
4234  * installed.                                    
4235  */                                              
4236 int filemap_invalidate_inode(struct inode *in    
4237                              loff_t start, lo    
4238 {                                                
4239         struct address_space *mapping = inode    
4240         pgoff_t first = start >> PAGE_SHIFT;     
4241         pgoff_t last = end >> PAGE_SHIFT;        
4242         pgoff_t nr = end == LLONG_MAX ? ULONG    
4243                                                  
4244         if (!mapping || !mapping->nrpages ||     
4245                 goto out;                        
4246                                                  
4247         /* Prevent new folios from being adde    
4248         filemap_invalidate_lock(mapping);        
4249                                                  
4250         if (!mapping->nrpages)                   
4251                 goto unlock;                     
4252                                                  
4253         unmap_mapping_pages(mapping, first, n    
4254                                                  
4255         /* Write back the data if we're asked    
4256         if (flush) {                             
4257                 struct writeback_control wbc     
4258                         .sync_mode      = WB_    
4259                         .nr_to_write    = LON    
4260                         .range_start    = sta    
4261                         .range_end      = end    
4262                 };                               
4263                                                  
4264                 filemap_fdatawrite_wbc(mappin    
4265         }                                        
4266                                                  
4267         /* Wait for writeback to complete on     
4268         invalidate_inode_pages2_range(mapping    
4269                                                  
4270 unlock:                                          
4271         filemap_invalidate_unlock(mapping);      
4272 out:                                             
4273         return filemap_check_errors(mapping);    
4274 }                                                
4275 EXPORT_SYMBOL_GPL(filemap_invalidate_inode);     
4276                                                  
4277 #ifdef CONFIG_CACHESTAT_SYSCALL                  
4278 /**                                              
4279  * filemap_cachestat() - compute the page cac    
4280  * @mapping:    The mapping to compute the st    
4281  * @first_index:        The starting page cac    
4282  * @last_index: The final page index (inclusi    
4283  * @cs: the cachestat struct to write the res    
4284  *                                               
4285  * This will query the page cache statistics     
4286  * page range of [first_index, last_index] (i    
4287  * queried include: number of dirty pages, nu    
4288  * writeback, and the number of (recently) ev    
4289  */                                              
4290 static void filemap_cachestat(struct address_    
4291                 pgoff_t first_index, pgoff_t     
4292 {                                                
4293         XA_STATE(xas, &mapping->i_pages, firs    
4294         struct folio *folio;                     
4295                                                  
4296         /* Flush stats (and potentially sleep    
4297         mem_cgroup_flush_stats_ratelimited(NU    
4298                                                  
4299         rcu_read_lock();                         
4300         xas_for_each(&xas, folio, last_index)    
4301                 int order;                       
4302                 unsigned long nr_pages;          
4303                 pgoff_t folio_first_index, fo    
4304                                                  
4305                 /*                               
4306                  * Don't deref the folio. It     
4307                  * get freed (and reused) und    
4308                  *                               
4309                  * We *could* pin it, but tha    
4310                  * what should be a fast and     
4311                  *                               
4312                  * Instead, derive all inform    
4313                  * the rcu-protected xarray.     
4314                  */                              
4315                                                  
4316                 if (xas_retry(&xas, folio))      
4317                         continue;                
4318                                                  
4319                 order = xas_get_order(&xas);     
4320                 nr_pages = 1 << order;           
4321                 folio_first_index = round_dow    
4322                 folio_last_index = folio_firs    
4323                                                  
4324                 /* Folios might straddle the     
4325                 if (folio_first_index < first    
4326                         nr_pages -= first_ind    
4327                                                  
4328                 if (folio_last_index > last_i    
4329                         nr_pages -= folio_las    
4330                                                  
4331                 if (xa_is_value(folio)) {        
4332                         /* page is evicted */    
4333                         void *shadow = (void     
4334                         bool workingset; /* n    
4335                                                  
4336                         cs->nr_evicted += nr_    
4337                                                  
4338 #ifdef CONFIG_SWAP /* implies CONFIG_MMU */      
4339                         if (shmem_mapping(map    
4340                                 /* shmem file    
4341                                 swp_entry_t s    
4342                                                  
4343                                 /* swapin err    
4344                                 if (non_swap_    
4345                                         goto     
4346                                                  
4347                                 /*               
4348                                  * Getting a     
4349                                  * inode mean    
4350                                  * shmem_unus    
4351                                  * ensures sw    
4352                                  * freeing th    
4353                                  * we can rac    
4354                                  * invalidati    
4355                                  * a shadow i    
4356                                  */              
4357                                 shadow = get_    
4358                                 if (!shadow)     
4359                                         goto     
4360                         }                        
4361 #endif                                           
4362                         if (workingset_test_r    
4363                                 cs->nr_recent    
4364                                                  
4365                         goto resched;            
4366                 }                                
4367                                                  
4368                 /* page is in cache */           
4369                 cs->nr_cache += nr_pages;        
4370                                                  
4371                 if (xas_get_mark(&xas, PAGECA    
4372                         cs->nr_dirty += nr_pa    
4373                                                  
4374                 if (xas_get_mark(&xas, PAGECA    
4375                         cs->nr_writeback += n    
4376                                                  
4377 resched:                                         
4378                 if (need_resched()) {            
4379                         xas_pause(&xas);         
4380                         cond_resched_rcu();      
4381                 }                                
4382         }                                        
4383         rcu_read_unlock();                       
4384 }                                                
4385                                                  
4386 /*                                               
4387  * The cachestat(2) system call.                 
4388  *                                               
4389  * cachestat() returns the page cache statist    
4390  * bytes range specified by `off` and `len`:     
4391  * number of dirty pages, number of pages mar    
4392  * number of evicted pages, and number of rec    
4393  *                                               
4394  * An evicted page is a page that is previous    
4395  * but has been evicted since. A page is rece    
4396  * eviction was recent enough that its reentr    
4397  * indicate that it is actively being used by    
4398  * there is memory pressure on the system.       
4399  *                                               
4400  * `off` and `len` must be non-negative integ    
4401  * the queried range is [`off`, `off` + `len`    
4402  * we will query in the range from `off` to t    
4403  *                                               
4404  * The `flags` argument is unused for now, bu    
4405  * extensibility. User should pass 0 (i.e no     
4406  *                                               
4407  * Currently, hugetlbfs is not supported.        
4408  *                                               
4409  * Because the status of a page can change af    
4410  * but before it returns to the application,     
4411  * contain stale information.                    
4412  *                                               
4413  * return values:                                
4414  *  zero        - success                        
4415  *  -EFAULT     - cstat or cstat_range points    
4416  *  -EINVAL     - invalid flags                  
4417  *  -EBADF      - invalid file descriptor        
4418  *  -EOPNOTSUPP - file descriptor is of a hug    
4419  */                                              
4420 SYSCALL_DEFINE4(cachestat, unsigned int, fd,     
4421                 struct cachestat_range __user    
4422                 struct cachestat __user *, cs    
4423 {                                                
4424         struct fd f = fdget(fd);                 
4425         struct address_space *mapping;           
4426         struct cachestat_range csr;              
4427         struct cachestat cs;                     
4428         pgoff_t first_index, last_index;         
4429                                                  
4430         if (!fd_file(f))                         
4431                 return -EBADF;                   
4432                                                  
4433         if (copy_from_user(&csr, cstat_range,    
4434                         sizeof(struct cachest    
4435                 fdput(f);                        
4436                 return -EFAULT;                  
4437         }                                        
4438                                                  
4439         /* hugetlbfs is not supported */         
4440         if (is_file_hugepages(fd_file(f))) {     
4441                 fdput(f);                        
4442                 return -EOPNOTSUPP;              
4443         }                                        
4444                                                  
4445         if (flags != 0) {                        
4446                 fdput(f);                        
4447                 return -EINVAL;                  
4448         }                                        
4449                                                  
4450         first_index = csr.off >> PAGE_SHIFT;     
4451         last_index =                             
4452                 csr.len == 0 ? ULONG_MAX : (c    
4453         memset(&cs, 0, sizeof(struct cachesta    
4454         mapping = fd_file(f)->f_mapping;         
4455         filemap_cachestat(mapping, first_inde    
4456         fdput(f);                                
4457                                                  
4458         if (copy_to_user(cstat, &cs, sizeof(s    
4459                 return -EFAULT;                  
4460                                                  
4461         return 0;                                
4462 }                                                
4463 #endif /* CONFIG_CACHESTAT_SYSCALL */            
4464                                                  

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php