~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/btrfs/extent_io.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #include <linux/bitops.h>
  4 #include <linux/slab.h>
  5 #include <linux/bio.h>
  6 #include <linux/mm.h>
  7 #include <linux/pagemap.h>
  8 #include <linux/page-flags.h>
  9 #include <linux/sched/mm.h>
 10 #include <linux/spinlock.h>
 11 #include <linux/blkdev.h>
 12 #include <linux/swap.h>
 13 #include <linux/writeback.h>
 14 #include <linux/pagevec.h>
 15 #include <linux/prefetch.h>
 16 #include <linux/fsverity.h>
 17 #include "extent_io.h"
 18 #include "extent-io-tree.h"
 19 #include "extent_map.h"
 20 #include "ctree.h"
 21 #include "btrfs_inode.h"
 22 #include "bio.h"
 23 #include "locking.h"
 24 #include "backref.h"
 25 #include "disk-io.h"
 26 #include "subpage.h"
 27 #include "zoned.h"
 28 #include "block-group.h"
 29 #include "compression.h"
 30 #include "fs.h"
 31 #include "accessors.h"
 32 #include "file-item.h"
 33 #include "file.h"
 34 #include "dev-replace.h"
 35 #include "super.h"
 36 #include "transaction.h"
 37 
 38 static struct kmem_cache *extent_buffer_cache;
 39 
 40 #ifdef CONFIG_BTRFS_DEBUG
 41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
 42 {
 43         struct btrfs_fs_info *fs_info = eb->fs_info;
 44         unsigned long flags;
 45 
 46         spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
 47         list_add(&eb->leak_list, &fs_info->allocated_ebs);
 48         spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 49 }
 50 
 51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
 52 {
 53         struct btrfs_fs_info *fs_info = eb->fs_info;
 54         unsigned long flags;
 55 
 56         spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
 57         list_del(&eb->leak_list);
 58         spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 59 }
 60 
 61 void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
 62 {
 63         struct extent_buffer *eb;
 64         unsigned long flags;
 65 
 66         /*
 67          * If we didn't get into open_ctree our allocated_ebs will not be
 68          * initialized, so just skip this.
 69          */
 70         if (!fs_info->allocated_ebs.next)
 71                 return;
 72 
 73         WARN_ON(!list_empty(&fs_info->allocated_ebs));
 74         spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
 75         while (!list_empty(&fs_info->allocated_ebs)) {
 76                 eb = list_first_entry(&fs_info->allocated_ebs,
 77                                       struct extent_buffer, leak_list);
 78                 pr_err(
 79         "BTRFS: buffer leak start %llu len %u refs %d bflags %lu owner %llu\n",
 80                        eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
 81                        btrfs_header_owner(eb));
 82                 list_del(&eb->leak_list);
 83                 WARN_ON_ONCE(1);
 84                 kmem_cache_free(extent_buffer_cache, eb);
 85         }
 86         spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
 87 }
 88 #else
 89 #define btrfs_leak_debug_add_eb(eb)                     do {} while (0)
 90 #define btrfs_leak_debug_del_eb(eb)                     do {} while (0)
 91 #endif
 92 
 93 /*
 94  * Structure to record info about the bio being assembled, and other info like
 95  * how many bytes are there before stripe/ordered extent boundary.
 96  */
 97 struct btrfs_bio_ctrl {
 98         struct btrfs_bio *bbio;
 99         enum btrfs_compression_type compress_type;
100         u32 len_to_oe_boundary;
101         blk_opf_t opf;
102         btrfs_bio_end_io_t end_io_func;
103         struct writeback_control *wbc;
104 };
105 
106 static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
107 {
108         struct btrfs_bio *bbio = bio_ctrl->bbio;
109 
110         if (!bbio)
111                 return;
112 
113         /* Caller should ensure the bio has at least some range added */
114         ASSERT(bbio->bio.bi_iter.bi_size);
115 
116         if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
117             bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
118                 btrfs_submit_compressed_read(bbio);
119         else
120                 btrfs_submit_bio(bbio, 0);
121 
122         /* The bbio is owned by the end_io handler now */
123         bio_ctrl->bbio = NULL;
124 }
125 
126 /*
127  * Submit or fail the current bio in the bio_ctrl structure.
128  */
129 static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
130 {
131         struct btrfs_bio *bbio = bio_ctrl->bbio;
132 
133         if (!bbio)
134                 return;
135 
136         if (ret) {
137                 ASSERT(ret < 0);
138                 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
139                 /* The bio is owned by the end_io handler now */
140                 bio_ctrl->bbio = NULL;
141         } else {
142                 submit_one_bio(bio_ctrl);
143         }
144 }
145 
146 int __init extent_buffer_init_cachep(void)
147 {
148         extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
149                                                 sizeof(struct extent_buffer), 0, 0,
150                                                 NULL);
151         if (!extent_buffer_cache)
152                 return -ENOMEM;
153 
154         return 0;
155 }
156 
157 void __cold extent_buffer_free_cachep(void)
158 {
159         /*
160          * Make sure all delayed rcu free are flushed before we
161          * destroy caches.
162          */
163         rcu_barrier();
164         kmem_cache_destroy(extent_buffer_cache);
165 }
166 
167 static void process_one_page(struct btrfs_fs_info *fs_info,
168                              struct page *page, const struct page *locked_page,
169                              unsigned long page_ops, u64 start, u64 end)
170 {
171         struct folio *folio = page_folio(page);
172         u32 len;
173 
174         ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
175         len = end + 1 - start;
176 
177         if (page_ops & PAGE_SET_ORDERED)
178                 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
179         if (page_ops & PAGE_START_WRITEBACK) {
180                 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
181                 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
182         }
183         if (page_ops & PAGE_END_WRITEBACK)
184                 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
185 
186         if (page != locked_page && (page_ops & PAGE_UNLOCK))
187                 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
188 }
189 
190 static void __process_pages_contig(struct address_space *mapping,
191                                    const struct page *locked_page, u64 start, u64 end,
192                                    unsigned long page_ops)
193 {
194         struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
195         pgoff_t start_index = start >> PAGE_SHIFT;
196         pgoff_t end_index = end >> PAGE_SHIFT;
197         pgoff_t index = start_index;
198         struct folio_batch fbatch;
199         int i;
200 
201         folio_batch_init(&fbatch);
202         while (index <= end_index) {
203                 int found_folios;
204 
205                 found_folios = filemap_get_folios_contig(mapping, &index,
206                                 end_index, &fbatch);
207                 for (i = 0; i < found_folios; i++) {
208                         struct folio *folio = fbatch.folios[i];
209 
210                         process_one_page(fs_info, &folio->page, locked_page,
211                                          page_ops, start, end);
212                 }
213                 folio_batch_release(&fbatch);
214                 cond_resched();
215         }
216 }
217 
218 static noinline void __unlock_for_delalloc(const struct inode *inode,
219                                            const struct page *locked_page,
220                                            u64 start, u64 end)
221 {
222         unsigned long index = start >> PAGE_SHIFT;
223         unsigned long end_index = end >> PAGE_SHIFT;
224 
225         ASSERT(locked_page);
226         if (index == locked_page->index && end_index == index)
227                 return;
228 
229         __process_pages_contig(inode->i_mapping, locked_page, start, end,
230                                PAGE_UNLOCK);
231 }
232 
233 static noinline int lock_delalloc_pages(struct inode *inode,
234                                         const struct page *locked_page,
235                                         u64 start,
236                                         u64 end)
237 {
238         struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
239         struct address_space *mapping = inode->i_mapping;
240         pgoff_t start_index = start >> PAGE_SHIFT;
241         pgoff_t end_index = end >> PAGE_SHIFT;
242         pgoff_t index = start_index;
243         u64 processed_end = start;
244         struct folio_batch fbatch;
245 
246         if (index == locked_page->index && index == end_index)
247                 return 0;
248 
249         folio_batch_init(&fbatch);
250         while (index <= end_index) {
251                 unsigned int found_folios, i;
252 
253                 found_folios = filemap_get_folios_contig(mapping, &index,
254                                 end_index, &fbatch);
255                 if (found_folios == 0)
256                         goto out;
257 
258                 for (i = 0; i < found_folios; i++) {
259                         struct folio *folio = fbatch.folios[i];
260                         struct page *page = folio_page(folio, 0);
261                         u32 len = end + 1 - start;
262 
263                         if (page == locked_page)
264                                 continue;
265 
266                         if (btrfs_folio_start_writer_lock(fs_info, folio, start,
267                                                           len))
268                                 goto out;
269 
270                         if (!PageDirty(page) || page->mapping != mapping) {
271                                 btrfs_folio_end_writer_lock(fs_info, folio, start,
272                                                             len);
273                                 goto out;
274                         }
275 
276                         processed_end = page_offset(page) + PAGE_SIZE - 1;
277                 }
278                 folio_batch_release(&fbatch);
279                 cond_resched();
280         }
281 
282         return 0;
283 out:
284         folio_batch_release(&fbatch);
285         if (processed_end > start)
286                 __unlock_for_delalloc(inode, locked_page, start, processed_end);
287         return -EAGAIN;
288 }
289 
290 /*
291  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
292  * more than @max_bytes.
293  *
294  * @start:      The original start bytenr to search.
295  *              Will store the extent range start bytenr.
296  * @end:        The original end bytenr of the search range
297  *              Will store the extent range end bytenr.
298  *
299  * Return true if we find a delalloc range which starts inside the original
300  * range, and @start/@end will store the delalloc range start/end.
301  *
302  * Return false if we can't find any delalloc range which starts inside the
303  * original range, and @start/@end will be the non-delalloc range start/end.
304  */
305 EXPORT_FOR_TESTS
306 noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
307                                     struct page *locked_page, u64 *start,
308                                     u64 *end)
309 {
310         struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
311         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
312         const u64 orig_start = *start;
313         const u64 orig_end = *end;
314         /* The sanity tests may not set a valid fs_info. */
315         u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
316         u64 delalloc_start;
317         u64 delalloc_end;
318         bool found;
319         struct extent_state *cached_state = NULL;
320         int ret;
321         int loops = 0;
322 
323         /* Caller should pass a valid @end to indicate the search range end */
324         ASSERT(orig_end > orig_start);
325 
326         /* The range should at least cover part of the page */
327         ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
328                  orig_end <= page_offset(locked_page)));
329 again:
330         /* step one, find a bunch of delalloc bytes starting at start */
331         delalloc_start = *start;
332         delalloc_end = 0;
333         found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
334                                           max_bytes, &cached_state);
335         if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
336                 *start = delalloc_start;
337 
338                 /* @delalloc_end can be -1, never go beyond @orig_end */
339                 *end = min(delalloc_end, orig_end);
340                 free_extent_state(cached_state);
341                 return false;
342         }
343 
344         /*
345          * start comes from the offset of locked_page.  We have to lock
346          * pages in order, so we can't process delalloc bytes before
347          * locked_page
348          */
349         if (delalloc_start < *start)
350                 delalloc_start = *start;
351 
352         /*
353          * make sure to limit the number of pages we try to lock down
354          */
355         if (delalloc_end + 1 - delalloc_start > max_bytes)
356                 delalloc_end = delalloc_start + max_bytes - 1;
357 
358         /* step two, lock all the pages after the page that has start */
359         ret = lock_delalloc_pages(inode, locked_page,
360                                   delalloc_start, delalloc_end);
361         ASSERT(!ret || ret == -EAGAIN);
362         if (ret == -EAGAIN) {
363                 /* some of the pages are gone, lets avoid looping by
364                  * shortening the size of the delalloc range we're searching
365                  */
366                 free_extent_state(cached_state);
367                 cached_state = NULL;
368                 if (!loops) {
369                         max_bytes = PAGE_SIZE;
370                         loops = 1;
371                         goto again;
372                 } else {
373                         found = false;
374                         goto out_failed;
375                 }
376         }
377 
378         /* step three, lock the state bits for the whole range */
379         lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
380 
381         /* then test to make sure it is all still delalloc */
382         ret = test_range_bit(tree, delalloc_start, delalloc_end,
383                              EXTENT_DELALLOC, cached_state);
384 
385         unlock_extent(tree, delalloc_start, delalloc_end, &cached_state);
386         if (!ret) {
387                 __unlock_for_delalloc(inode, locked_page,
388                               delalloc_start, delalloc_end);
389                 cond_resched();
390                 goto again;
391         }
392         *start = delalloc_start;
393         *end = delalloc_end;
394 out_failed:
395         return found;
396 }
397 
398 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
399                                   const struct page *locked_page,
400                                   struct extent_state **cached,
401                                   u32 clear_bits, unsigned long page_ops)
402 {
403         clear_extent_bit(&inode->io_tree, start, end, clear_bits, cached);
404 
405         __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
406                                start, end, page_ops);
407 }
408 
409 static bool btrfs_verify_page(struct page *page, u64 start)
410 {
411         if (!fsverity_active(page->mapping->host) ||
412             PageUptodate(page) ||
413             start >= i_size_read(page->mapping->host))
414                 return true;
415         return fsverity_verify_page(page);
416 }
417 
418 static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
419 {
420         struct btrfs_fs_info *fs_info = page_to_fs_info(page);
421         struct folio *folio = page_folio(page);
422 
423         ASSERT(page_offset(page) <= start &&
424                start + len <= page_offset(page) + PAGE_SIZE);
425 
426         if (uptodate && btrfs_verify_page(page, start))
427                 btrfs_folio_set_uptodate(fs_info, folio, start, len);
428         else
429                 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
430 
431         if (!btrfs_is_subpage(fs_info, page->mapping))
432                 unlock_page(page);
433         else
434                 btrfs_subpage_end_reader(fs_info, folio, start, len);
435 }
436 
437 /*
438  * After a write IO is done, we need to:
439  *
440  * - clear the uptodate bits on error
441  * - clear the writeback bits in the extent tree for the range
442  * - filio_end_writeback()  if there is no more pending io for the folio
443  *
444  * Scheduling is not allowed, so the extent state tree is expected
445  * to have one and only one object corresponding to this IO.
446  */
447 static void end_bbio_data_write(struct btrfs_bio *bbio)
448 {
449         struct btrfs_fs_info *fs_info = bbio->fs_info;
450         struct bio *bio = &bbio->bio;
451         int error = blk_status_to_errno(bio->bi_status);
452         struct folio_iter fi;
453         const u32 sectorsize = fs_info->sectorsize;
454 
455         ASSERT(!bio_flagged(bio, BIO_CLONED));
456         bio_for_each_folio_all(fi, bio) {
457                 struct folio *folio = fi.folio;
458                 u64 start = folio_pos(folio) + fi.offset;
459                 u32 len = fi.length;
460 
461                 /* Only order 0 (single page) folios are allowed for data. */
462                 ASSERT(folio_order(folio) == 0);
463 
464                 /* Our read/write should always be sector aligned. */
465                 if (!IS_ALIGNED(fi.offset, sectorsize))
466                         btrfs_err(fs_info,
467                 "partial page write in btrfs with offset %zu and length %zu",
468                                   fi.offset, fi.length);
469                 else if (!IS_ALIGNED(fi.length, sectorsize))
470                         btrfs_info(fs_info,
471                 "incomplete page write with offset %zu and length %zu",
472                                    fi.offset, fi.length);
473 
474                 btrfs_finish_ordered_extent(bbio->ordered,
475                                 folio_page(folio, 0), start, len, !error);
476                 if (error)
477                         mapping_set_error(folio->mapping, error);
478                 btrfs_folio_clear_writeback(fs_info, folio, start, len);
479         }
480 
481         bio_put(bio);
482 }
483 
484 /*
485  * Record previously processed extent range
486  *
487  * For endio_readpage_release_extent() to handle a full extent range, reducing
488  * the extent io operations.
489  */
490 struct processed_extent {
491         struct btrfs_inode *inode;
492         /* Start of the range in @inode */
493         u64 start;
494         /* End of the range in @inode */
495         u64 end;
496         bool uptodate;
497 };
498 
499 /*
500  * Try to release processed extent range
501  *
502  * May not release the extent range right now if the current range is
503  * contiguous to processed extent.
504  *
505  * Will release processed extent when any of @inode, @uptodate, the range is
506  * no longer contiguous to the processed range.
507  *
508  * Passing @inode == NULL will force processed extent to be released.
509  */
510 static void endio_readpage_release_extent(struct processed_extent *processed,
511                               struct btrfs_inode *inode, u64 start, u64 end,
512                               bool uptodate)
513 {
514         struct extent_state *cached = NULL;
515         struct extent_io_tree *tree;
516 
517         /* The first extent, initialize @processed */
518         if (!processed->inode)
519                 goto update;
520 
521         /*
522          * Contiguous to processed extent, just uptodate the end.
523          *
524          * Several things to notice:
525          *
526          * - bio can be merged as long as on-disk bytenr is contiguous
527          *   This means we can have page belonging to other inodes, thus need to
528          *   check if the inode still matches.
529          * - bvec can contain range beyond current page for multi-page bvec
530          *   Thus we need to do processed->end + 1 >= start check
531          */
532         if (processed->inode == inode && processed->uptodate == uptodate &&
533             processed->end + 1 >= start && end >= processed->end) {
534                 processed->end = end;
535                 return;
536         }
537 
538         tree = &processed->inode->io_tree;
539         /*
540          * Now we don't have range contiguous to the processed range, release
541          * the processed range now.
542          */
543         unlock_extent(tree, processed->start, processed->end, &cached);
544 
545 update:
546         /* Update processed to current range */
547         processed->inode = inode;
548         processed->start = start;
549         processed->end = end;
550         processed->uptodate = uptodate;
551 }
552 
553 static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
554 {
555         struct folio *folio = page_folio(page);
556 
557         ASSERT(folio_test_locked(folio));
558         if (!btrfs_is_subpage(fs_info, folio->mapping))
559                 return;
560 
561         ASSERT(folio_test_private(folio));
562         btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
563 }
564 
565 /*
566  * After a data read IO is done, we need to:
567  *
568  * - clear the uptodate bits on error
569  * - set the uptodate bits if things worked
570  * - set the folio up to date if all extents in the tree are uptodate
571  * - clear the lock bit in the extent tree
572  * - unlock the folio if there are no other extents locked for it
573  *
574  * Scheduling is not allowed, so the extent state tree is expected
575  * to have one and only one object corresponding to this IO.
576  */
577 static void end_bbio_data_read(struct btrfs_bio *bbio)
578 {
579         struct btrfs_fs_info *fs_info = bbio->fs_info;
580         struct bio *bio = &bbio->bio;
581         struct processed_extent processed = { 0 };
582         struct folio_iter fi;
583         const u32 sectorsize = fs_info->sectorsize;
584 
585         ASSERT(!bio_flagged(bio, BIO_CLONED));
586         bio_for_each_folio_all(fi, &bbio->bio) {
587                 bool uptodate = !bio->bi_status;
588                 struct folio *folio = fi.folio;
589                 struct inode *inode = folio->mapping->host;
590                 u64 start;
591                 u64 end;
592                 u32 len;
593 
594                 /* For now only order 0 folios are supported for data. */
595                 ASSERT(folio_order(folio) == 0);
596                 btrfs_debug(fs_info,
597                         "%s: bi_sector=%llu, err=%d, mirror=%u",
598                         __func__, bio->bi_iter.bi_sector, bio->bi_status,
599                         bbio->mirror_num);
600 
601                 /*
602                  * We always issue full-sector reads, but if some block in a
603                  * folio fails to read, blk_update_request() will advance
604                  * bv_offset and adjust bv_len to compensate.  Print a warning
605                  * for unaligned offsets, and an error if they don't add up to
606                  * a full sector.
607                  */
608                 if (!IS_ALIGNED(fi.offset, sectorsize))
609                         btrfs_err(fs_info,
610                 "partial page read in btrfs with offset %zu and length %zu",
611                                   fi.offset, fi.length);
612                 else if (!IS_ALIGNED(fi.offset + fi.length, sectorsize))
613                         btrfs_info(fs_info,
614                 "incomplete page read with offset %zu and length %zu",
615                                    fi.offset, fi.length);
616 
617                 start = folio_pos(folio) + fi.offset;
618                 end = start + fi.length - 1;
619                 len = fi.length;
620 
621                 if (likely(uptodate)) {
622                         loff_t i_size = i_size_read(inode);
623                         pgoff_t end_index = i_size >> folio_shift(folio);
624 
625                         /*
626                          * Zero out the remaining part if this range straddles
627                          * i_size.
628                          *
629                          * Here we should only zero the range inside the folio,
630                          * not touch anything else.
631                          *
632                          * NOTE: i_size is exclusive while end is inclusive.
633                          */
634                         if (folio_index(folio) == end_index && i_size <= end) {
635                                 u32 zero_start = max(offset_in_folio(folio, i_size),
636                                                      offset_in_folio(folio, start));
637                                 u32 zero_len = offset_in_folio(folio, end) + 1 -
638                                                zero_start;
639 
640                                 folio_zero_range(folio, zero_start, zero_len);
641                         }
642                 }
643 
644                 /* Update page status and unlock. */
645                 end_page_read(folio_page(folio, 0), uptodate, start, len);
646                 endio_readpage_release_extent(&processed, BTRFS_I(inode),
647                                               start, end, uptodate);
648         }
649         /* Release the last extent */
650         endio_readpage_release_extent(&processed, NULL, 0, 0, false);
651         bio_put(bio);
652 }
653 
654 /*
655  * Populate every free slot in a provided array with folios using GFP_NOFS.
656  *
657  * @nr_folios:   number of folios to allocate
658  * @folio_array: the array to fill with folios; any existing non-NULL entries in
659  *               the array will be skipped
660  *
661  * Return: 0        if all folios were able to be allocated;
662  *         -ENOMEM  otherwise, the partially allocated folios would be freed and
663  *                  the array slots zeroed
664  */
665 int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array)
666 {
667         for (int i = 0; i < nr_folios; i++) {
668                 if (folio_array[i])
669                         continue;
670                 folio_array[i] = folio_alloc(GFP_NOFS, 0);
671                 if (!folio_array[i])
672                         goto error;
673         }
674         return 0;
675 error:
676         for (int i = 0; i < nr_folios; i++) {
677                 if (folio_array[i])
678                         folio_put(folio_array[i]);
679         }
680         return -ENOMEM;
681 }
682 
683 /*
684  * Populate every free slot in a provided array with pages, using GFP_NOFS.
685  *
686  * @nr_pages:   number of pages to allocate
687  * @page_array: the array to fill with pages; any existing non-null entries in
688  *              the array will be skipped
689  * @nofail:     whether using __GFP_NOFAIL flag
690  *
691  * Return: 0        if all pages were able to be allocated;
692  *         -ENOMEM  otherwise, the partially allocated pages would be freed and
693  *                  the array slots zeroed
694  */
695 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
696                            bool nofail)
697 {
698         const gfp_t gfp = nofail ? (GFP_NOFS | __GFP_NOFAIL) : GFP_NOFS;
699         unsigned int allocated;
700 
701         for (allocated = 0; allocated < nr_pages;) {
702                 unsigned int last = allocated;
703 
704                 allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
705                 if (unlikely(allocated == last)) {
706                         /* No progress, fail and do cleanup. */
707                         for (int i = 0; i < allocated; i++) {
708                                 __free_page(page_array[i]);
709                                 page_array[i] = NULL;
710                         }
711                         return -ENOMEM;
712                 }
713         }
714         return 0;
715 }
716 
717 /*
718  * Populate needed folios for the extent buffer.
719  *
720  * For now, the folios populated are always in order 0 (aka, single page).
721  */
722 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail)
723 {
724         struct page *page_array[INLINE_EXTENT_BUFFER_PAGES] = { 0 };
725         int num_pages = num_extent_pages(eb);
726         int ret;
727 
728         ret = btrfs_alloc_page_array(num_pages, page_array, nofail);
729         if (ret < 0)
730                 return ret;
731 
732         for (int i = 0; i < num_pages; i++)
733                 eb->folios[i] = page_folio(page_array[i]);
734         eb->folio_size = PAGE_SIZE;
735         eb->folio_shift = PAGE_SHIFT;
736         return 0;
737 }
738 
739 static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
740                                 struct page *page, u64 disk_bytenr,
741                                 unsigned int pg_offset)
742 {
743         struct bio *bio = &bio_ctrl->bbio->bio;
744         struct bio_vec *bvec = bio_last_bvec_all(bio);
745         const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
746 
747         if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
748                 /*
749                  * For compression, all IO should have its logical bytenr set
750                  * to the starting bytenr of the compressed extent.
751                  */
752                 return bio->bi_iter.bi_sector == sector;
753         }
754 
755         /*
756          * The contig check requires the following conditions to be met:
757          *
758          * 1) The pages are belonging to the same inode
759          *    This is implied by the call chain.
760          *
761          * 2) The range has adjacent logical bytenr
762          *
763          * 3) The range has adjacent file offset
764          *    This is required for the usage of btrfs_bio->file_offset.
765          */
766         return bio_end_sector(bio) == sector &&
767                 page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
768                 page_offset(page) + pg_offset;
769 }
770 
771 static void alloc_new_bio(struct btrfs_inode *inode,
772                           struct btrfs_bio_ctrl *bio_ctrl,
773                           u64 disk_bytenr, u64 file_offset)
774 {
775         struct btrfs_fs_info *fs_info = inode->root->fs_info;
776         struct btrfs_bio *bbio;
777 
778         bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
779                                bio_ctrl->end_io_func, NULL);
780         bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
781         bbio->inode = inode;
782         bbio->file_offset = file_offset;
783         bio_ctrl->bbio = bbio;
784         bio_ctrl->len_to_oe_boundary = U32_MAX;
785 
786         /* Limit data write bios to the ordered boundary. */
787         if (bio_ctrl->wbc) {
788                 struct btrfs_ordered_extent *ordered;
789 
790                 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
791                 if (ordered) {
792                         bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
793                                         ordered->file_offset +
794                                         ordered->disk_num_bytes - file_offset);
795                         bbio->ordered = ordered;
796                 }
797 
798                 /*
799                  * Pick the last added device to support cgroup writeback.  For
800                  * multi-device file systems this means blk-cgroup policies have
801                  * to always be set on the last added/replaced device.
802                  * This is a bit odd but has been like that for a long time.
803                  */
804                 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
805                 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
806         }
807 }
808 
809 /*
810  * @disk_bytenr: logical bytenr where the write will be
811  * @page:       page to add to the bio
812  * @size:       portion of page that we want to write to
813  * @pg_offset:  offset of the new bio or to check whether we are adding
814  *              a contiguous page to the previous one
815  *
816  * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
817  * new one in @bio_ctrl->bbio.
818  * The mirror number for this IO should already be initizlied in
819  * @bio_ctrl->mirror_num.
820  */
821 static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
822                                u64 disk_bytenr, struct page *page,
823                                size_t size, unsigned long pg_offset)
824 {
825         struct btrfs_inode *inode = page_to_inode(page);
826 
827         ASSERT(pg_offset + size <= PAGE_SIZE);
828         ASSERT(bio_ctrl->end_io_func);
829 
830         if (bio_ctrl->bbio &&
831             !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
832                 submit_one_bio(bio_ctrl);
833 
834         do {
835                 u32 len = size;
836 
837                 /* Allocate new bio if needed */
838                 if (!bio_ctrl->bbio) {
839                         alloc_new_bio(inode, bio_ctrl, disk_bytenr,
840                                       page_offset(page) + pg_offset);
841                 }
842 
843                 /* Cap to the current ordered extent boundary if there is one. */
844                 if (len > bio_ctrl->len_to_oe_boundary) {
845                         ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
846                         ASSERT(is_data_inode(inode));
847                         len = bio_ctrl->len_to_oe_boundary;
848                 }
849 
850                 if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
851                         /* bio full: move on to a new one */
852                         submit_one_bio(bio_ctrl);
853                         continue;
854                 }
855 
856                 if (bio_ctrl->wbc)
857                         wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
858 
859                 size -= len;
860                 pg_offset += len;
861                 disk_bytenr += len;
862 
863                 /*
864                  * len_to_oe_boundary defaults to U32_MAX, which isn't page or
865                  * sector aligned.  alloc_new_bio() then sets it to the end of
866                  * our ordered extent for writes into zoned devices.
867                  *
868                  * When len_to_oe_boundary is tracking an ordered extent, we
869                  * trust the ordered extent code to align things properly, and
870                  * the check above to cap our write to the ordered extent
871                  * boundary is correct.
872                  *
873                  * When len_to_oe_boundary is U32_MAX, the cap above would
874                  * result in a 4095 byte IO for the last page right before
875                  * we hit the bio limit of UINT_MAX.  bio_add_page() has all
876                  * the checks required to make sure we don't overflow the bio,
877                  * and we should just ignore len_to_oe_boundary completely
878                  * unless we're using it to track an ordered extent.
879                  *
880                  * It's pretty hard to make a bio sized U32_MAX, but it can
881                  * happen when the page cache is able to feed us contiguous
882                  * pages for large extents.
883                  */
884                 if (bio_ctrl->len_to_oe_boundary != U32_MAX)
885                         bio_ctrl->len_to_oe_boundary -= len;
886 
887                 /* Ordered extent boundary: move on to a new bio. */
888                 if (bio_ctrl->len_to_oe_boundary == 0)
889                         submit_one_bio(bio_ctrl);
890         } while (size);
891 }
892 
893 static int attach_extent_buffer_folio(struct extent_buffer *eb,
894                                       struct folio *folio,
895                                       struct btrfs_subpage *prealloc)
896 {
897         struct btrfs_fs_info *fs_info = eb->fs_info;
898         int ret = 0;
899 
900         /*
901          * If the page is mapped to btree inode, we should hold the private
902          * lock to prevent race.
903          * For cloned or dummy extent buffers, their pages are not mapped and
904          * will not race with any other ebs.
905          */
906         if (folio->mapping)
907                 lockdep_assert_held(&folio->mapping->i_private_lock);
908 
909         if (fs_info->nodesize >= PAGE_SIZE) {
910                 if (!folio_test_private(folio))
911                         folio_attach_private(folio, eb);
912                 else
913                         WARN_ON(folio_get_private(folio) != eb);
914                 return 0;
915         }
916 
917         /* Already mapped, just free prealloc */
918         if (folio_test_private(folio)) {
919                 btrfs_free_subpage(prealloc);
920                 return 0;
921         }
922 
923         if (prealloc)
924                 /* Has preallocated memory for subpage */
925                 folio_attach_private(folio, prealloc);
926         else
927                 /* Do new allocation to attach subpage */
928                 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
929         return ret;
930 }
931 
932 int set_page_extent_mapped(struct page *page)
933 {
934         return set_folio_extent_mapped(page_folio(page));
935 }
936 
937 int set_folio_extent_mapped(struct folio *folio)
938 {
939         struct btrfs_fs_info *fs_info;
940 
941         ASSERT(folio->mapping);
942 
943         if (folio_test_private(folio))
944                 return 0;
945 
946         fs_info = folio_to_fs_info(folio);
947 
948         if (btrfs_is_subpage(fs_info, folio->mapping))
949                 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
950 
951         folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
952         return 0;
953 }
954 
955 void clear_page_extent_mapped(struct page *page)
956 {
957         struct folio *folio = page_folio(page);
958         struct btrfs_fs_info *fs_info;
959 
960         ASSERT(page->mapping);
961 
962         if (!folio_test_private(folio))
963                 return;
964 
965         fs_info = page_to_fs_info(page);
966         if (btrfs_is_subpage(fs_info, page->mapping))
967                 return btrfs_detach_subpage(fs_info, folio);
968 
969         folio_detach_private(folio);
970 }
971 
972 static struct extent_map *__get_extent_map(struct inode *inode, struct page *page,
973                  u64 start, u64 len, struct extent_map **em_cached)
974 {
975         struct extent_map *em;
976 
977         ASSERT(em_cached);
978 
979         if (*em_cached) {
980                 em = *em_cached;
981                 if (extent_map_in_tree(em) && start >= em->start &&
982                     start < extent_map_end(em)) {
983                         refcount_inc(&em->refs);
984                         return em;
985                 }
986 
987                 free_extent_map(em);
988                 *em_cached = NULL;
989         }
990 
991         em = btrfs_get_extent(BTRFS_I(inode), page, start, len);
992         if (!IS_ERR(em)) {
993                 BUG_ON(*em_cached);
994                 refcount_inc(&em->refs);
995                 *em_cached = em;
996         }
997         return em;
998 }
999 /*
1000  * basic readpage implementation.  Locked extent state structs are inserted
1001  * into the tree that are removed when the IO is done (by the end_io
1002  * handlers)
1003  * XXX JDM: This needs looking at to ensure proper page locking
1004  * return 0 on success, otherwise return error
1005  */
1006 static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
1007                       struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
1008 {
1009         struct inode *inode = page->mapping->host;
1010         struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
1011         u64 start = page_offset(page);
1012         const u64 end = start + PAGE_SIZE - 1;
1013         u64 cur = start;
1014         u64 extent_offset;
1015         u64 last_byte = i_size_read(inode);
1016         u64 block_start;
1017         struct extent_map *em;
1018         int ret = 0;
1019         size_t pg_offset = 0;
1020         size_t iosize;
1021         size_t blocksize = fs_info->sectorsize;
1022         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
1023 
1024         ret = set_page_extent_mapped(page);
1025         if (ret < 0) {
1026                 unlock_extent(tree, start, end, NULL);
1027                 unlock_page(page);
1028                 return ret;
1029         }
1030 
1031         if (page->index == last_byte >> PAGE_SHIFT) {
1032                 size_t zero_offset = offset_in_page(last_byte);
1033 
1034                 if (zero_offset) {
1035                         iosize = PAGE_SIZE - zero_offset;
1036                         memzero_page(page, zero_offset, iosize);
1037                 }
1038         }
1039         bio_ctrl->end_io_func = end_bbio_data_read;
1040         begin_page_read(fs_info, page);
1041         while (cur <= end) {
1042                 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
1043                 bool force_bio_submit = false;
1044                 u64 disk_bytenr;
1045 
1046                 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
1047                 if (cur >= last_byte) {
1048                         iosize = PAGE_SIZE - pg_offset;
1049                         memzero_page(page, pg_offset, iosize);
1050                         unlock_extent(tree, cur, cur + iosize - 1, NULL);
1051                         end_page_read(page, true, cur, iosize);
1052                         break;
1053                 }
1054                 em = __get_extent_map(inode, page, cur, end - cur + 1, em_cached);
1055                 if (IS_ERR(em)) {
1056                         unlock_extent(tree, cur, end, NULL);
1057                         end_page_read(page, false, cur, end + 1 - cur);
1058                         return PTR_ERR(em);
1059                 }
1060                 extent_offset = cur - em->start;
1061                 BUG_ON(extent_map_end(em) <= cur);
1062                 BUG_ON(end < cur);
1063 
1064                 compress_type = extent_map_compression(em);
1065 
1066                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
1067                 iosize = ALIGN(iosize, blocksize);
1068                 if (compress_type != BTRFS_COMPRESS_NONE)
1069                         disk_bytenr = em->disk_bytenr;
1070                 else
1071                         disk_bytenr = extent_map_block_start(em) + extent_offset;
1072                 block_start = extent_map_block_start(em);
1073                 if (em->flags & EXTENT_FLAG_PREALLOC)
1074                         block_start = EXTENT_MAP_HOLE;
1075 
1076                 /*
1077                  * If we have a file range that points to a compressed extent
1078                  * and it's followed by a consecutive file range that points
1079                  * to the same compressed extent (possibly with a different
1080                  * offset and/or length, so it either points to the whole extent
1081                  * or only part of it), we must make sure we do not submit a
1082                  * single bio to populate the pages for the 2 ranges because
1083                  * this makes the compressed extent read zero out the pages
1084                  * belonging to the 2nd range. Imagine the following scenario:
1085                  *
1086                  *  File layout
1087                  *  [0 - 8K]                     [8K - 24K]
1088                  *    |                               |
1089                  *    |                               |
1090                  * points to extent X,         points to extent X,
1091                  * offset 4K, length of 8K     offset 0, length 16K
1092                  *
1093                  * [extent X, compressed length = 4K uncompressed length = 16K]
1094                  *
1095                  * If the bio to read the compressed extent covers both ranges,
1096                  * it will decompress extent X into the pages belonging to the
1097                  * first range and then it will stop, zeroing out the remaining
1098                  * pages that belong to the other range that points to extent X.
1099                  * So here we make sure we submit 2 bios, one for the first
1100                  * range and another one for the third range. Both will target
1101                  * the same physical extent from disk, but we can't currently
1102                  * make the compressed bio endio callback populate the pages
1103                  * for both ranges because each compressed bio is tightly
1104                  * coupled with a single extent map, and each range can have
1105                  * an extent map with a different offset value relative to the
1106                  * uncompressed data of our extent and different lengths. This
1107                  * is a corner case so we prioritize correctness over
1108                  * non-optimal behavior (submitting 2 bios for the same extent).
1109                  */
1110                 if (compress_type != BTRFS_COMPRESS_NONE &&
1111                     prev_em_start && *prev_em_start != (u64)-1 &&
1112                     *prev_em_start != em->start)
1113                         force_bio_submit = true;
1114 
1115                 if (prev_em_start)
1116                         *prev_em_start = em->start;
1117 
1118                 free_extent_map(em);
1119                 em = NULL;
1120 
1121                 /* we've found a hole, just zero and go on */
1122                 if (block_start == EXTENT_MAP_HOLE) {
1123                         memzero_page(page, pg_offset, iosize);
1124 
1125                         unlock_extent(tree, cur, cur + iosize - 1, NULL);
1126                         end_page_read(page, true, cur, iosize);
1127                         cur = cur + iosize;
1128                         pg_offset += iosize;
1129                         continue;
1130                 }
1131                 /* the get_extent function already copied into the page */
1132                 if (block_start == EXTENT_MAP_INLINE) {
1133                         unlock_extent(tree, cur, cur + iosize - 1, NULL);
1134                         end_page_read(page, true, cur, iosize);
1135                         cur = cur + iosize;
1136                         pg_offset += iosize;
1137                         continue;
1138                 }
1139 
1140                 if (bio_ctrl->compress_type != compress_type) {
1141                         submit_one_bio(bio_ctrl);
1142                         bio_ctrl->compress_type = compress_type;
1143                 }
1144 
1145                 if (force_bio_submit)
1146                         submit_one_bio(bio_ctrl);
1147                 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1148                                    pg_offset);
1149                 cur = cur + iosize;
1150                 pg_offset += iosize;
1151         }
1152 
1153         return 0;
1154 }
1155 
1156 int btrfs_read_folio(struct file *file, struct folio *folio)
1157 {
1158         struct page *page = &folio->page;
1159         struct btrfs_inode *inode = page_to_inode(page);
1160         u64 start = page_offset(page);
1161         u64 end = start + PAGE_SIZE - 1;
1162         struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
1163         struct extent_map *em_cached = NULL;
1164         int ret;
1165 
1166         btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1167 
1168         ret = btrfs_do_readpage(page, &em_cached, &bio_ctrl, NULL);
1169         free_extent_map(em_cached);
1170 
1171         /*
1172          * If btrfs_do_readpage() failed we will want to submit the assembled
1173          * bio to do the cleanup.
1174          */
1175         submit_one_bio(&bio_ctrl);
1176         return ret;
1177 }
1178 
1179 static inline void contiguous_readpages(struct page *pages[], int nr_pages,
1180                                         u64 start, u64 end,
1181                                         struct extent_map **em_cached,
1182                                         struct btrfs_bio_ctrl *bio_ctrl,
1183                                         u64 *prev_em_start)
1184 {
1185         struct btrfs_inode *inode = page_to_inode(pages[0]);
1186         int index;
1187 
1188         ASSERT(em_cached);
1189 
1190         btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
1191 
1192         for (index = 0; index < nr_pages; index++) {
1193                 btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
1194                                   prev_em_start);
1195                 put_page(pages[index]);
1196         }
1197 }
1198 
1199 /*
1200  * helper for __extent_writepage, doing all of the delayed allocation setup.
1201  *
1202  * This returns 1 if btrfs_run_delalloc_range function did all the work required
1203  * to write the page (copy into inline extent).  In this case the IO has
1204  * been started and the page is already unlocked.
1205  *
1206  * This returns 0 if all went well (page still locked)
1207  * This returns < 0 if there were errors (page still locked)
1208  */
1209 static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
1210                 struct page *page, struct writeback_control *wbc)
1211 {
1212         struct btrfs_fs_info *fs_info = inode_to_fs_info(&inode->vfs_inode);
1213         struct folio *folio = page_folio(page);
1214         const bool is_subpage = btrfs_is_subpage(fs_info, page->mapping);
1215         const u64 page_start = page_offset(page);
1216         const u64 page_end = page_start + PAGE_SIZE - 1;
1217         /*
1218          * Save the last found delalloc end. As the delalloc end can go beyond
1219          * page boundary, thus we cannot rely on subpage bitmap to locate the
1220          * last delalloc end.
1221          */
1222         u64 last_delalloc_end = 0;
1223         u64 delalloc_start = page_start;
1224         u64 delalloc_end = page_end;
1225         u64 delalloc_to_write = 0;
1226         int ret = 0;
1227 
1228         /* Lock all (subpage) delalloc ranges inside the page first. */
1229         while (delalloc_start < page_end) {
1230                 delalloc_end = page_end;
1231                 if (!find_lock_delalloc_range(&inode->vfs_inode, page,
1232                                               &delalloc_start, &delalloc_end)) {
1233                         delalloc_start = delalloc_end + 1;
1234                         continue;
1235                 }
1236                 btrfs_folio_set_writer_lock(fs_info, folio, delalloc_start,
1237                                             min(delalloc_end, page_end) + 1 -
1238                                             delalloc_start);
1239                 last_delalloc_end = delalloc_end;
1240                 delalloc_start = delalloc_end + 1;
1241         }
1242         delalloc_start = page_start;
1243 
1244         if (!last_delalloc_end)
1245                 goto out;
1246 
1247         /* Run the delalloc ranges for the above locked ranges. */
1248         while (delalloc_start < page_end) {
1249                 u64 found_start;
1250                 u32 found_len;
1251                 bool found;
1252 
1253                 if (!is_subpage) {
1254                         /*
1255                          * For non-subpage case, the found delalloc range must
1256                          * cover this page and there must be only one locked
1257                          * delalloc range.
1258                          */
1259                         found_start = page_start;
1260                         found_len = last_delalloc_end + 1 - found_start;
1261                         found = true;
1262                 } else {
1263                         found = btrfs_subpage_find_writer_locked(fs_info, folio,
1264                                         delalloc_start, &found_start, &found_len);
1265                 }
1266                 if (!found)
1267                         break;
1268                 /*
1269                  * The subpage range covers the last sector, the delalloc range may
1270                  * end beyond the page boundary, use the saved delalloc_end
1271                  * instead.
1272                  */
1273                 if (found_start + found_len >= page_end)
1274                         found_len = last_delalloc_end + 1 - found_start;
1275 
1276                 if (ret >= 0) {
1277                         /* No errors hit so far, run the current delalloc range. */
1278                         ret = btrfs_run_delalloc_range(inode, page, found_start,
1279                                                        found_start + found_len - 1,
1280                                                        wbc);
1281                 } else {
1282                         /*
1283                          * We've hit an error during previous delalloc range,
1284                          * have to cleanup the remaining locked ranges.
1285                          */
1286                         unlock_extent(&inode->io_tree, found_start,
1287                                       found_start + found_len - 1, NULL);
1288                         __unlock_for_delalloc(&inode->vfs_inode, page, found_start,
1289                                               found_start + found_len - 1);
1290                 }
1291 
1292                 /*
1293                  * We can hit btrfs_run_delalloc_range() with >0 return value.
1294                  *
1295                  * This happens when either the IO is already done and page
1296                  * unlocked (inline) or the IO submission and page unlock would
1297                  * be handled as async (compression).
1298                  *
1299                  * Inline is only possible for regular sectorsize for now.
1300                  *
1301                  * Compression is possible for both subpage and regular cases,
1302                  * but even for subpage compression only happens for page aligned
1303                  * range, thus the found delalloc range must go beyond current
1304                  * page.
1305                  */
1306                 if (ret > 0)
1307                         ASSERT(!is_subpage || found_start + found_len >= page_end);
1308 
1309                 /*
1310                  * Above btrfs_run_delalloc_range() may have unlocked the page,
1311                  * thus for the last range, we cannot touch the page anymore.
1312                  */
1313                 if (found_start + found_len >= last_delalloc_end + 1)
1314                         break;
1315 
1316                 delalloc_start = found_start + found_len;
1317         }
1318         if (ret < 0)
1319                 return ret;
1320 out:
1321         if (last_delalloc_end)
1322                 delalloc_end = last_delalloc_end;
1323         else
1324                 delalloc_end = page_end;
1325         /*
1326          * delalloc_end is already one less than the total length, so
1327          * we don't subtract one from PAGE_SIZE
1328          */
1329         delalloc_to_write +=
1330                 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE);
1331 
1332         /*
1333          * If btrfs_run_dealloc_range() already started I/O and unlocked
1334          * the pages, we just need to account for them here.
1335          */
1336         if (ret == 1) {
1337                 wbc->nr_to_write -= delalloc_to_write;
1338                 return 1;
1339         }
1340 
1341         if (wbc->nr_to_write < delalloc_to_write) {
1342                 int thresh = 8192;
1343 
1344                 if (delalloc_to_write < thresh * 2)
1345                         thresh = delalloc_to_write;
1346                 wbc->nr_to_write = min_t(u64, delalloc_to_write,
1347                                          thresh);
1348         }
1349 
1350         return 0;
1351 }
1352 
1353 /*
1354  * Find the first byte we need to write.
1355  *
1356  * For subpage, one page can contain several sectors, and
1357  * __extent_writepage_io() will just grab all extent maps in the page
1358  * range and try to submit all non-inline/non-compressed extents.
1359  *
1360  * This is a big problem for subpage, we shouldn't re-submit already written
1361  * data at all.
1362  * This function will lookup subpage dirty bit to find which range we really
1363  * need to submit.
1364  *
1365  * Return the next dirty range in [@start, @end).
1366  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
1367  */
1368 static void find_next_dirty_byte(const struct btrfs_fs_info *fs_info,
1369                                  struct page *page, u64 *start, u64 *end)
1370 {
1371         struct folio *folio = page_folio(page);
1372         struct btrfs_subpage *subpage = folio_get_private(folio);
1373         struct btrfs_subpage_info *spi = fs_info->subpage_info;
1374         u64 orig_start = *start;
1375         /* Declare as unsigned long so we can use bitmap ops */
1376         unsigned long flags;
1377         int range_start_bit;
1378         int range_end_bit;
1379 
1380         /*
1381          * For regular sector size == page size case, since one page only
1382          * contains one sector, we return the page offset directly.
1383          */
1384         if (!btrfs_is_subpage(fs_info, page->mapping)) {
1385                 *start = page_offset(page);
1386                 *end = page_offset(page) + PAGE_SIZE;
1387                 return;
1388         }
1389 
1390         range_start_bit = spi->dirty_offset +
1391                           (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
1392 
1393         /* We should have the page locked, but just in case */
1394         spin_lock_irqsave(&subpage->lock, flags);
1395         bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
1396                                spi->dirty_offset + spi->bitmap_nr_bits);
1397         spin_unlock_irqrestore(&subpage->lock, flags);
1398 
1399         range_start_bit -= spi->dirty_offset;
1400         range_end_bit -= spi->dirty_offset;
1401 
1402         *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
1403         *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
1404 }
1405 
1406 /*
1407  * helper for __extent_writepage.  This calls the writepage start hooks,
1408  * and does the loop to map the page into extents and bios.
1409  *
1410  * We return 1 if the IO is started and the page is unlocked,
1411  * 0 if all went well (page still locked)
1412  * < 0 if there were errors (page still locked)
1413  */
1414 static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
1415                                  struct page *page, u64 start, u32 len,
1416                                  struct btrfs_bio_ctrl *bio_ctrl,
1417                                  loff_t i_size,
1418                                  int *nr_ret)
1419 {
1420         struct btrfs_fs_info *fs_info = inode->root->fs_info;
1421         u64 cur = start;
1422         u64 end = start + len - 1;
1423         u64 extent_offset;
1424         u64 block_start;
1425         struct extent_map *em;
1426         int ret = 0;
1427         int nr = 0;
1428 
1429         ASSERT(start >= page_offset(page) &&
1430                start + len <= page_offset(page) + PAGE_SIZE);
1431 
1432         ret = btrfs_writepage_cow_fixup(page);
1433         if (ret) {
1434                 /* Fixup worker will requeue */
1435                 redirty_page_for_writepage(bio_ctrl->wbc, page);
1436                 unlock_page(page);
1437                 return 1;
1438         }
1439 
1440         bio_ctrl->end_io_func = end_bbio_data_write;
1441         while (cur <= end) {
1442                 u32 len = end - cur + 1;
1443                 u64 disk_bytenr;
1444                 u64 em_end;
1445                 u64 dirty_range_start = cur;
1446                 u64 dirty_range_end;
1447                 u32 iosize;
1448 
1449                 if (cur >= i_size) {
1450                         btrfs_mark_ordered_io_finished(inode, page, cur, len,
1451                                                        true);
1452                         /*
1453                          * This range is beyond i_size, thus we don't need to
1454                          * bother writing back.
1455                          * But we still need to clear the dirty subpage bit, or
1456                          * the next time the page gets dirtied, we will try to
1457                          * writeback the sectors with subpage dirty bits,
1458                          * causing writeback without ordered extent.
1459                          */
1460                         btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, len);
1461                         break;
1462                 }
1463 
1464                 find_next_dirty_byte(fs_info, page, &dirty_range_start,
1465                                      &dirty_range_end);
1466                 if (cur < dirty_range_start) {
1467                         cur = dirty_range_start;
1468                         continue;
1469                 }
1470 
1471                 em = btrfs_get_extent(inode, NULL, cur, len);
1472                 if (IS_ERR(em)) {
1473                         ret = PTR_ERR_OR_ZERO(em);
1474                         goto out_error;
1475                 }
1476 
1477                 extent_offset = cur - em->start;
1478                 em_end = extent_map_end(em);
1479                 ASSERT(cur <= em_end);
1480                 ASSERT(cur < end);
1481                 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
1482                 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
1483 
1484                 block_start = extent_map_block_start(em);
1485                 disk_bytenr = extent_map_block_start(em) + extent_offset;
1486 
1487                 ASSERT(!extent_map_is_compressed(em));
1488                 ASSERT(block_start != EXTENT_MAP_HOLE);
1489                 ASSERT(block_start != EXTENT_MAP_INLINE);
1490 
1491                 /*
1492                  * Note that em_end from extent_map_end() and dirty_range_end from
1493                  * find_next_dirty_byte() are all exclusive
1494                  */
1495                 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
1496                 free_extent_map(em);
1497                 em = NULL;
1498 
1499                 btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
1500                 if (!PageWriteback(page)) {
1501                         btrfs_err(inode->root->fs_info,
1502                                    "page %lu not writeback, cur %llu end %llu",
1503                                page->index, cur, end);
1504                 }
1505 
1506                 /*
1507                  * Although the PageDirty bit is cleared before entering this
1508                  * function, subpage dirty bit is not cleared.
1509                  * So clear subpage dirty bit here so next time we won't submit
1510                  * page for range already written to disk.
1511                  */
1512                 btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
1513 
1514                 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
1515                                    cur - page_offset(page));
1516                 cur += iosize;
1517                 nr++;
1518         }
1519 
1520         btrfs_folio_assert_not_dirty(fs_info, page_folio(page), start, len);
1521         *nr_ret = nr;
1522         return 0;
1523 
1524 out_error:
1525         /*
1526          * If we finish without problem, we should not only clear page dirty,
1527          * but also empty subpage dirty bits
1528          */
1529         *nr_ret = nr;
1530         return ret;
1531 }
1532 
1533 /*
1534  * the writepage semantics are similar to regular writepage.  extent
1535  * records are inserted to lock ranges in the tree, and as dirty areas
1536  * are found, they are marked writeback.  Then the lock bits are removed
1537  * and the end_io handler clears the writeback ranges
1538  *
1539  * Return 0 if everything goes well.
1540  * Return <0 for error.
1541  */
1542 static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
1543 {
1544         struct folio *folio = page_folio(page);
1545         struct inode *inode = page->mapping->host;
1546         const u64 page_start = page_offset(page);
1547         int ret;
1548         int nr = 0;
1549         size_t pg_offset;
1550         loff_t i_size = i_size_read(inode);
1551         unsigned long end_index = i_size >> PAGE_SHIFT;
1552 
1553         trace___extent_writepage(page, inode, bio_ctrl->wbc);
1554 
1555         WARN_ON(!PageLocked(page));
1556 
1557         pg_offset = offset_in_page(i_size);
1558         if (page->index > end_index ||
1559            (page->index == end_index && !pg_offset)) {
1560                 folio_invalidate(folio, 0, folio_size(folio));
1561                 folio_unlock(folio);
1562                 return 0;
1563         }
1564 
1565         if (page->index == end_index)
1566                 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
1567 
1568         ret = set_page_extent_mapped(page);
1569         if (ret < 0)
1570                 goto done;
1571 
1572         ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
1573         if (ret == 1)
1574                 return 0;
1575         if (ret)
1576                 goto done;
1577 
1578         ret = __extent_writepage_io(BTRFS_I(inode), page, page_offset(page),
1579                                     PAGE_SIZE, bio_ctrl, i_size, &nr);
1580         if (ret == 1)
1581                 return 0;
1582 
1583         bio_ctrl->wbc->nr_to_write--;
1584 
1585 done:
1586         if (nr == 0) {
1587                 /* make sure the mapping tag for page dirty gets cleared */
1588                 set_page_writeback(page);
1589                 end_page_writeback(page);
1590         }
1591         if (ret) {
1592                 btrfs_mark_ordered_io_finished(BTRFS_I(inode), page, page_start,
1593                                                PAGE_SIZE, !ret);
1594                 mapping_set_error(page->mapping, ret);
1595         }
1596 
1597         btrfs_folio_end_all_writers(inode_to_fs_info(inode), folio);
1598         ASSERT(ret <= 0);
1599         return ret;
1600 }
1601 
1602 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
1603 {
1604         wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
1605                        TASK_UNINTERRUPTIBLE);
1606 }
1607 
1608 /*
1609  * Lock extent buffer status and pages for writeback.
1610  *
1611  * Return %false if the extent buffer doesn't need to be submitted (e.g. the
1612  * extent buffer is not dirty)
1613  * Return %true is the extent buffer is submitted to bio.
1614  */
1615 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
1616                           struct writeback_control *wbc)
1617 {
1618         struct btrfs_fs_info *fs_info = eb->fs_info;
1619         bool ret = false;
1620 
1621         btrfs_tree_lock(eb);
1622         while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
1623                 btrfs_tree_unlock(eb);
1624                 if (wbc->sync_mode != WB_SYNC_ALL)
1625                         return false;
1626                 wait_on_extent_buffer_writeback(eb);
1627                 btrfs_tree_lock(eb);
1628         }
1629 
1630         /*
1631          * We need to do this to prevent races in people who check if the eb is
1632          * under IO since we can end up having no IO bits set for a short period
1633          * of time.
1634          */
1635         spin_lock(&eb->refs_lock);
1636         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
1637                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1638                 spin_unlock(&eb->refs_lock);
1639                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
1640                 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
1641                                          -eb->len,
1642                                          fs_info->dirty_metadata_batch);
1643                 ret = true;
1644         } else {
1645                 spin_unlock(&eb->refs_lock);
1646         }
1647         btrfs_tree_unlock(eb);
1648         return ret;
1649 }
1650 
1651 static void set_btree_ioerr(struct extent_buffer *eb)
1652 {
1653         struct btrfs_fs_info *fs_info = eb->fs_info;
1654 
1655         set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1656 
1657         /*
1658          * A read may stumble upon this buffer later, make sure that it gets an
1659          * error and knows there was an error.
1660          */
1661         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
1662 
1663         /*
1664          * We need to set the mapping with the io error as well because a write
1665          * error will flip the file system readonly, and then syncfs() will
1666          * return a 0 because we are readonly if we don't modify the err seq for
1667          * the superblock.
1668          */
1669         mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
1670 
1671         /*
1672          * If writeback for a btree extent that doesn't belong to a log tree
1673          * failed, increment the counter transaction->eb_write_errors.
1674          * We do this because while the transaction is running and before it's
1675          * committing (when we call filemap_fdata[write|wait]_range against
1676          * the btree inode), we might have
1677          * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
1678          * returns an error or an error happens during writeback, when we're
1679          * committing the transaction we wouldn't know about it, since the pages
1680          * can be no longer dirty nor marked anymore for writeback (if a
1681          * subsequent modification to the extent buffer didn't happen before the
1682          * transaction commit), which makes filemap_fdata[write|wait]_range not
1683          * able to find the pages which contain errors at transaction
1684          * commit time. So if this happens we must abort the transaction,
1685          * otherwise we commit a super block with btree roots that point to
1686          * btree nodes/leafs whose content on disk is invalid - either garbage
1687          * or the content of some node/leaf from a past generation that got
1688          * cowed or deleted and is no longer valid.
1689          *
1690          * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
1691          * not be enough - we need to distinguish between log tree extents vs
1692          * non-log tree extents, and the next filemap_fdatawait_range() call
1693          * will catch and clear such errors in the mapping - and that call might
1694          * be from a log sync and not from a transaction commit. Also, checking
1695          * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
1696          * not done and would not be reliable - the eb might have been released
1697          * from memory and reading it back again means that flag would not be
1698          * set (since it's a runtime flag, not persisted on disk).
1699          *
1700          * Using the flags below in the btree inode also makes us achieve the
1701          * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
1702          * writeback for all dirty pages and before filemap_fdatawait_range()
1703          * is called, the writeback for all dirty pages had already finished
1704          * with errors - because we were not using AS_EIO/AS_ENOSPC,
1705          * filemap_fdatawait_range() would return success, as it could not know
1706          * that writeback errors happened (the pages were no longer tagged for
1707          * writeback).
1708          */
1709         switch (eb->log_index) {
1710         case -1:
1711                 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
1712                 break;
1713         case 0:
1714                 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
1715                 break;
1716         case 1:
1717                 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
1718                 break;
1719         default:
1720                 BUG(); /* unexpected, logic error */
1721         }
1722 }
1723 
1724 /*
1725  * The endio specific version which won't touch any unsafe spinlock in endio
1726  * context.
1727  */
1728 static struct extent_buffer *find_extent_buffer_nolock(
1729                 const struct btrfs_fs_info *fs_info, u64 start)
1730 {
1731         struct extent_buffer *eb;
1732 
1733         rcu_read_lock();
1734         eb = radix_tree_lookup(&fs_info->buffer_radix,
1735                                start >> fs_info->sectorsize_bits);
1736         if (eb && atomic_inc_not_zero(&eb->refs)) {
1737                 rcu_read_unlock();
1738                 return eb;
1739         }
1740         rcu_read_unlock();
1741         return NULL;
1742 }
1743 
1744 static void end_bbio_meta_write(struct btrfs_bio *bbio)
1745 {
1746         struct extent_buffer *eb = bbio->private;
1747         struct btrfs_fs_info *fs_info = eb->fs_info;
1748         bool uptodate = !bbio->bio.bi_status;
1749         struct folio_iter fi;
1750         u32 bio_offset = 0;
1751 
1752         if (!uptodate)
1753                 set_btree_ioerr(eb);
1754 
1755         bio_for_each_folio_all(fi, &bbio->bio) {
1756                 u64 start = eb->start + bio_offset;
1757                 struct folio *folio = fi.folio;
1758                 u32 len = fi.length;
1759 
1760                 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1761                 bio_offset += len;
1762         }
1763 
1764         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
1765         smp_mb__after_atomic();
1766         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
1767 
1768         bio_put(&bbio->bio);
1769 }
1770 
1771 static void prepare_eb_write(struct extent_buffer *eb)
1772 {
1773         u32 nritems;
1774         unsigned long start;
1775         unsigned long end;
1776 
1777         clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
1778 
1779         /* Set btree blocks beyond nritems with 0 to avoid stale content */
1780         nritems = btrfs_header_nritems(eb);
1781         if (btrfs_header_level(eb) > 0) {
1782                 end = btrfs_node_key_ptr_offset(eb, nritems);
1783                 memzero_extent_buffer(eb, end, eb->len - end);
1784         } else {
1785                 /*
1786                  * Leaf:
1787                  * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
1788                  */
1789                 start = btrfs_item_nr_offset(eb, nritems);
1790                 end = btrfs_item_nr_offset(eb, 0);
1791                 if (nritems == 0)
1792                         end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
1793                 else
1794                         end += btrfs_item_offset(eb, nritems - 1);
1795                 memzero_extent_buffer(eb, start, end - start);
1796         }
1797 }
1798 
1799 static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
1800                                             struct writeback_control *wbc)
1801 {
1802         struct btrfs_fs_info *fs_info = eb->fs_info;
1803         struct btrfs_bio *bbio;
1804 
1805         prepare_eb_write(eb);
1806 
1807         bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
1808                                REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
1809                                eb->fs_info, end_bbio_meta_write, eb);
1810         bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
1811         bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
1812         wbc_init_bio(wbc, &bbio->bio);
1813         bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
1814         bbio->file_offset = eb->start;
1815         if (fs_info->nodesize < PAGE_SIZE) {
1816                 struct folio *folio = eb->folios[0];
1817                 bool ret;
1818 
1819                 folio_lock(folio);
1820                 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1821                 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1822                                                        eb->len)) {
1823                         folio_clear_dirty_for_io(folio);
1824                         wbc->nr_to_write--;
1825                 }
1826                 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1827                                     eb->start - folio_pos(folio));
1828                 ASSERT(ret);
1829                 wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1830                 folio_unlock(folio);
1831         } else {
1832                 int num_folios = num_extent_folios(eb);
1833 
1834                 for (int i = 0; i < num_folios; i++) {
1835                         struct folio *folio = eb->folios[i];
1836                         bool ret;
1837 
1838                         folio_lock(folio);
1839                         folio_clear_dirty_for_io(folio);
1840                         folio_start_writeback(folio);
1841                         ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1842                         ASSERT(ret);
1843                         wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1844                                                  eb->folio_size);
1845                         wbc->nr_to_write -= folio_nr_pages(folio);
1846                         folio_unlock(folio);
1847                 }
1848         }
1849         btrfs_submit_bio(bbio, 0);
1850 }
1851 
1852 /*
1853  * Submit one subpage btree page.
1854  *
1855  * The main difference to submit_eb_page() is:
1856  * - Page locking
1857  *   For subpage, we don't rely on page locking at all.
1858  *
1859  * - Flush write bio
1860  *   We only flush bio if we may be unable to fit current extent buffers into
1861  *   current bio.
1862  *
1863  * Return >=0 for the number of submitted extent buffers.
1864  * Return <0 for fatal error.
1865  */
1866 static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
1867 {
1868         struct btrfs_fs_info *fs_info = page_to_fs_info(page);
1869         struct folio *folio = page_folio(page);
1870         int submitted = 0;
1871         u64 page_start = page_offset(page);
1872         int bit_start = 0;
1873         int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
1874 
1875         /* Lock and write each dirty extent buffers in the range */
1876         while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
1877                 struct btrfs_subpage *subpage = folio_get_private(folio);
1878                 struct extent_buffer *eb;
1879                 unsigned long flags;
1880                 u64 start;
1881 
1882                 /*
1883                  * Take private lock to ensure the subpage won't be detached
1884                  * in the meantime.
1885                  */
1886                 spin_lock(&page->mapping->i_private_lock);
1887                 if (!folio_test_private(folio)) {
1888                         spin_unlock(&page->mapping->i_private_lock);
1889                         break;
1890                 }
1891                 spin_lock_irqsave(&subpage->lock, flags);
1892                 if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
1893                               subpage->bitmaps)) {
1894                         spin_unlock_irqrestore(&subpage->lock, flags);
1895                         spin_unlock(&page->mapping->i_private_lock);
1896                         bit_start++;
1897                         continue;
1898                 }
1899 
1900                 start = page_start + bit_start * fs_info->sectorsize;
1901                 bit_start += sectors_per_node;
1902 
1903                 /*
1904                  * Here we just want to grab the eb without touching extra
1905                  * spin locks, so call find_extent_buffer_nolock().
1906                  */
1907                 eb = find_extent_buffer_nolock(fs_info, start);
1908                 spin_unlock_irqrestore(&subpage->lock, flags);
1909                 spin_unlock(&page->mapping->i_private_lock);
1910 
1911                 /*
1912                  * The eb has already reached 0 refs thus find_extent_buffer()
1913                  * doesn't return it. We don't need to write back such eb
1914                  * anyway.
1915                  */
1916                 if (!eb)
1917                         continue;
1918 
1919                 if (lock_extent_buffer_for_io(eb, wbc)) {
1920                         write_one_eb(eb, wbc);
1921                         submitted++;
1922                 }
1923                 free_extent_buffer(eb);
1924         }
1925         return submitted;
1926 }
1927 
1928 /*
1929  * Submit all page(s) of one extent buffer.
1930  *
1931  * @page:       the page of one extent buffer
1932  * @eb_context: to determine if we need to submit this page, if current page
1933  *              belongs to this eb, we don't need to submit
1934  *
1935  * The caller should pass each page in their bytenr order, and here we use
1936  * @eb_context to determine if we have submitted pages of one extent buffer.
1937  *
1938  * If we have, we just skip until we hit a new page that doesn't belong to
1939  * current @eb_context.
1940  *
1941  * If not, we submit all the page(s) of the extent buffer.
1942  *
1943  * Return >0 if we have submitted the extent buffer successfully.
1944  * Return 0 if we don't need to submit the page, as it's already submitted by
1945  * previous call.
1946  * Return <0 for fatal error.
1947  */
1948 static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
1949 {
1950         struct writeback_control *wbc = ctx->wbc;
1951         struct address_space *mapping = page->mapping;
1952         struct folio *folio = page_folio(page);
1953         struct extent_buffer *eb;
1954         int ret;
1955 
1956         if (!folio_test_private(folio))
1957                 return 0;
1958 
1959         if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
1960                 return submit_eb_subpage(page, wbc);
1961 
1962         spin_lock(&mapping->i_private_lock);
1963         if (!folio_test_private(folio)) {
1964                 spin_unlock(&mapping->i_private_lock);
1965                 return 0;
1966         }
1967 
1968         eb = folio_get_private(folio);
1969 
1970         /*
1971          * Shouldn't happen and normally this would be a BUG_ON but no point
1972          * crashing the machine for something we can survive anyway.
1973          */
1974         if (WARN_ON(!eb)) {
1975                 spin_unlock(&mapping->i_private_lock);
1976                 return 0;
1977         }
1978 
1979         if (eb == ctx->eb) {
1980                 spin_unlock(&mapping->i_private_lock);
1981                 return 0;
1982         }
1983         ret = atomic_inc_not_zero(&eb->refs);
1984         spin_unlock(&mapping->i_private_lock);
1985         if (!ret)
1986                 return 0;
1987 
1988         ctx->eb = eb;
1989 
1990         ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx);
1991         if (ret) {
1992                 if (ret == -EBUSY)
1993                         ret = 0;
1994                 free_extent_buffer(eb);
1995                 return ret;
1996         }
1997 
1998         if (!lock_extent_buffer_for_io(eb, wbc)) {
1999                 free_extent_buffer(eb);
2000                 return 0;
2001         }
2002         /* Implies write in zoned mode. */
2003         if (ctx->zoned_bg) {
2004                 /* Mark the last eb in the block group. */
2005                 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
2006                 ctx->zoned_bg->meta_write_pointer += eb->len;
2007         }
2008         write_one_eb(eb, wbc);
2009         free_extent_buffer(eb);
2010         return 1;
2011 }
2012 
2013 int btree_write_cache_pages(struct address_space *mapping,
2014                                    struct writeback_control *wbc)
2015 {
2016         struct btrfs_eb_write_context ctx = { .wbc = wbc };
2017         struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host);
2018         int ret = 0;
2019         int done = 0;
2020         int nr_to_write_done = 0;
2021         struct folio_batch fbatch;
2022         unsigned int nr_folios;
2023         pgoff_t index;
2024         pgoff_t end;            /* Inclusive */
2025         int scanned = 0;
2026         xa_mark_t tag;
2027 
2028         folio_batch_init(&fbatch);
2029         if (wbc->range_cyclic) {
2030                 index = mapping->writeback_index; /* Start from prev offset */
2031                 end = -1;
2032                 /*
2033                  * Start from the beginning does not need to cycle over the
2034                  * range, mark it as scanned.
2035                  */
2036                 scanned = (index == 0);
2037         } else {
2038                 index = wbc->range_start >> PAGE_SHIFT;
2039                 end = wbc->range_end >> PAGE_SHIFT;
2040                 scanned = 1;
2041         }
2042         if (wbc->sync_mode == WB_SYNC_ALL)
2043                 tag = PAGECACHE_TAG_TOWRITE;
2044         else
2045                 tag = PAGECACHE_TAG_DIRTY;
2046         btrfs_zoned_meta_io_lock(fs_info);
2047 retry:
2048         if (wbc->sync_mode == WB_SYNC_ALL)
2049                 tag_pages_for_writeback(mapping, index, end);
2050         while (!done && !nr_to_write_done && (index <= end) &&
2051                (nr_folios = filemap_get_folios_tag(mapping, &index, end,
2052                                             tag, &fbatch))) {
2053                 unsigned i;
2054 
2055                 for (i = 0; i < nr_folios; i++) {
2056                         struct folio *folio = fbatch.folios[i];
2057 
2058                         ret = submit_eb_page(&folio->page, &ctx);
2059                         if (ret == 0)
2060                                 continue;
2061                         if (ret < 0) {
2062                                 done = 1;
2063                                 break;
2064                         }
2065 
2066                         /*
2067                          * the filesystem may choose to bump up nr_to_write.
2068                          * We have to make sure to honor the new nr_to_write
2069                          * at any time
2070                          */
2071                         nr_to_write_done = wbc->nr_to_write <= 0;
2072                 }
2073                 folio_batch_release(&fbatch);
2074                 cond_resched();
2075         }
2076         if (!scanned && !done) {
2077                 /*
2078                  * We hit the last page and there is more work to be done: wrap
2079                  * back to the start of the file
2080                  */
2081                 scanned = 1;
2082                 index = 0;
2083                 goto retry;
2084         }
2085         /*
2086          * If something went wrong, don't allow any metadata write bio to be
2087          * submitted.
2088          *
2089          * This would prevent use-after-free if we had dirty pages not
2090          * cleaned up, which can still happen by fuzzed images.
2091          *
2092          * - Bad extent tree
2093          *   Allowing existing tree block to be allocated for other trees.
2094          *
2095          * - Log tree operations
2096          *   Exiting tree blocks get allocated to log tree, bumps its
2097          *   generation, then get cleaned in tree re-balance.
2098          *   Such tree block will not be written back, since it's clean,
2099          *   thus no WRITTEN flag set.
2100          *   And after log writes back, this tree block is not traced by
2101          *   any dirty extent_io_tree.
2102          *
2103          * - Offending tree block gets re-dirtied from its original owner
2104          *   Since it has bumped generation, no WRITTEN flag, it can be
2105          *   reused without COWing. This tree block will not be traced
2106          *   by btrfs_transaction::dirty_pages.
2107          *
2108          *   Now such dirty tree block will not be cleaned by any dirty
2109          *   extent io tree. Thus we don't want to submit such wild eb
2110          *   if the fs already has error.
2111          *
2112          * We can get ret > 0 from submit_extent_page() indicating how many ebs
2113          * were submitted. Reset it to 0 to avoid false alerts for the caller.
2114          */
2115         if (ret > 0)
2116                 ret = 0;
2117         if (!ret && BTRFS_FS_ERROR(fs_info))
2118                 ret = -EROFS;
2119 
2120         if (ctx.zoned_bg)
2121                 btrfs_put_block_group(ctx.zoned_bg);
2122         btrfs_zoned_meta_io_unlock(fs_info);
2123         return ret;
2124 }
2125 
2126 /*
2127  * Walk the list of dirty pages of the given address space and write all of them.
2128  *
2129  * @mapping:   address space structure to write
2130  * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
2131  * @bio_ctrl:  holds context for the write, namely the bio
2132  *
2133  * If a page is already under I/O, write_cache_pages() skips it, even
2134  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
2135  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
2136  * and msync() need to guarantee that all the data which was dirty at the time
2137  * the call was made get new I/O started against them.  If wbc->sync_mode is
2138  * WB_SYNC_ALL then we were called for data integrity and we must wait for
2139  * existing IO to complete.
2140  */
2141 static int extent_write_cache_pages(struct address_space *mapping,
2142                              struct btrfs_bio_ctrl *bio_ctrl)
2143 {
2144         struct writeback_control *wbc = bio_ctrl->wbc;
2145         struct inode *inode = mapping->host;
2146         int ret = 0;
2147         int done = 0;
2148         int nr_to_write_done = 0;
2149         struct folio_batch fbatch;
2150         unsigned int nr_folios;
2151         pgoff_t index;
2152         pgoff_t end;            /* Inclusive */
2153         pgoff_t done_index;
2154         int range_whole = 0;
2155         int scanned = 0;
2156         xa_mark_t tag;
2157 
2158         /*
2159          * We have to hold onto the inode so that ordered extents can do their
2160          * work when the IO finishes.  The alternative to this is failing to add
2161          * an ordered extent if the igrab() fails there and that is a huge pain
2162          * to deal with, so instead just hold onto the inode throughout the
2163          * writepages operation.  If it fails here we are freeing up the inode
2164          * anyway and we'd rather not waste our time writing out stuff that is
2165          * going to be truncated anyway.
2166          */
2167         if (!igrab(inode))
2168                 return 0;
2169 
2170         folio_batch_init(&fbatch);
2171         if (wbc->range_cyclic) {
2172                 index = mapping->writeback_index; /* Start from prev offset */
2173                 end = -1;
2174                 /*
2175                  * Start from the beginning does not need to cycle over the
2176                  * range, mark it as scanned.
2177                  */
2178                 scanned = (index == 0);
2179         } else {
2180                 index = wbc->range_start >> PAGE_SHIFT;
2181                 end = wbc->range_end >> PAGE_SHIFT;
2182                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2183                         range_whole = 1;
2184                 scanned = 1;
2185         }
2186 
2187         /*
2188          * We do the tagged writepage as long as the snapshot flush bit is set
2189          * and we are the first one who do the filemap_flush() on this inode.
2190          *
2191          * The nr_to_write == LONG_MAX is needed to make sure other flushers do
2192          * not race in and drop the bit.
2193          */
2194         if (range_whole && wbc->nr_to_write == LONG_MAX &&
2195             test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
2196                                &BTRFS_I(inode)->runtime_flags))
2197                 wbc->tagged_writepages = 1;
2198 
2199         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2200                 tag = PAGECACHE_TAG_TOWRITE;
2201         else
2202                 tag = PAGECACHE_TAG_DIRTY;
2203 retry:
2204         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2205                 tag_pages_for_writeback(mapping, index, end);
2206         done_index = index;
2207         while (!done && !nr_to_write_done && (index <= end) &&
2208                         (nr_folios = filemap_get_folios_tag(mapping, &index,
2209                                                         end, tag, &fbatch))) {
2210                 unsigned i;
2211 
2212                 for (i = 0; i < nr_folios; i++) {
2213                         struct folio *folio = fbatch.folios[i];
2214 
2215                         done_index = folio_next_index(folio);
2216                         /*
2217                          * At this point we hold neither the i_pages lock nor
2218                          * the page lock: the page may be truncated or
2219                          * invalidated (changing page->mapping to NULL),
2220                          * or even swizzled back from swapper_space to
2221                          * tmpfs file mapping
2222                          */
2223                         if (!folio_trylock(folio)) {
2224                                 submit_write_bio(bio_ctrl, 0);
2225                                 folio_lock(folio);
2226                         }
2227 
2228                         if (unlikely(folio->mapping != mapping)) {
2229                                 folio_unlock(folio);
2230                                 continue;
2231                         }
2232 
2233                         if (!folio_test_dirty(folio)) {
2234                                 /* Someone wrote it for us. */
2235                                 folio_unlock(folio);
2236                                 continue;
2237                         }
2238 
2239                         if (wbc->sync_mode != WB_SYNC_NONE) {
2240                                 if (folio_test_writeback(folio))
2241                                         submit_write_bio(bio_ctrl, 0);
2242                                 folio_wait_writeback(folio);
2243                         }
2244 
2245                         if (folio_test_writeback(folio) ||
2246                             !folio_clear_dirty_for_io(folio)) {
2247                                 folio_unlock(folio);
2248                                 continue;
2249                         }
2250 
2251                         ret = __extent_writepage(&folio->page, bio_ctrl);
2252                         if (ret < 0) {
2253                                 done = 1;
2254                                 break;
2255                         }
2256 
2257                         /*
2258                          * The filesystem may choose to bump up nr_to_write.
2259                          * We have to make sure to honor the new nr_to_write
2260                          * at any time.
2261                          */
2262                         nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE &&
2263                                             wbc->nr_to_write <= 0);
2264                 }
2265                 folio_batch_release(&fbatch);
2266                 cond_resched();
2267         }
2268         if (!scanned && !done) {
2269                 /*
2270                  * We hit the last page and there is more work to be done: wrap
2271                  * back to the start of the file
2272                  */
2273                 scanned = 1;
2274                 index = 0;
2275 
2276                 /*
2277                  * If we're looping we could run into a page that is locked by a
2278                  * writer and that writer could be waiting on writeback for a
2279                  * page in our current bio, and thus deadlock, so flush the
2280                  * write bio here.
2281                  */
2282                 submit_write_bio(bio_ctrl, 0);
2283                 goto retry;
2284         }
2285 
2286         if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
2287                 mapping->writeback_index = done_index;
2288 
2289         btrfs_add_delayed_iput(BTRFS_I(inode));
2290         return ret;
2291 }
2292 
2293 /*
2294  * Submit the pages in the range to bio for call sites which delalloc range has
2295  * already been ran (aka, ordered extent inserted) and all pages are still
2296  * locked.
2297  */
2298 void extent_write_locked_range(struct inode *inode, const struct page *locked_page,
2299                                u64 start, u64 end, struct writeback_control *wbc,
2300                                bool pages_dirty)
2301 {
2302         bool found_error = false;
2303         int ret = 0;
2304         struct address_space *mapping = inode->i_mapping;
2305         struct btrfs_fs_info *fs_info = inode_to_fs_info(inode);
2306         const u32 sectorsize = fs_info->sectorsize;
2307         loff_t i_size = i_size_read(inode);
2308         u64 cur = start;
2309         struct btrfs_bio_ctrl bio_ctrl = {
2310                 .wbc = wbc,
2311                 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2312         };
2313 
2314         if (wbc->no_cgroup_owner)
2315                 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
2316 
2317         ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
2318 
2319         while (cur <= end) {
2320                 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
2321                 u32 cur_len = cur_end + 1 - cur;
2322                 struct page *page;
2323                 int nr = 0;
2324 
2325                 page = find_get_page(mapping, cur >> PAGE_SHIFT);
2326                 ASSERT(PageLocked(page));
2327                 if (pages_dirty && page != locked_page)
2328                         ASSERT(PageDirty(page));
2329 
2330                 ret = __extent_writepage_io(BTRFS_I(inode), page, cur, cur_len,
2331                                             &bio_ctrl, i_size, &nr);
2332                 if (ret == 1)
2333                         goto next_page;
2334 
2335                 /* Make sure the mapping tag for page dirty gets cleared. */
2336                 if (nr == 0) {
2337                         struct folio *folio;
2338 
2339                         folio = page_folio(page);
2340                         btrfs_folio_set_writeback(fs_info, folio, cur, cur_len);
2341                         btrfs_folio_clear_writeback(fs_info, folio, cur, cur_len);
2342                 }
2343                 if (ret) {
2344                         btrfs_mark_ordered_io_finished(BTRFS_I(inode), page,
2345                                                        cur, cur_len, !ret);
2346                         mapping_set_error(page->mapping, ret);
2347                 }
2348                 btrfs_folio_unlock_writer(fs_info, page_folio(page), cur, cur_len);
2349                 if (ret < 0)
2350                         found_error = true;
2351 next_page:
2352                 put_page(page);
2353                 cur = cur_end + 1;
2354         }
2355 
2356         submit_write_bio(&bio_ctrl, found_error ? ret : 0);
2357 }
2358 
2359 int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
2360 {
2361         struct inode *inode = mapping->host;
2362         int ret = 0;
2363         struct btrfs_bio_ctrl bio_ctrl = {
2364                 .wbc = wbc,
2365                 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
2366         };
2367 
2368         /*
2369          * Allow only a single thread to do the reloc work in zoned mode to
2370          * protect the write pointer updates.
2371          */
2372         btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
2373         ret = extent_write_cache_pages(mapping, &bio_ctrl);
2374         submit_write_bio(&bio_ctrl, ret);
2375         btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
2376         return ret;
2377 }
2378 
2379 void btrfs_readahead(struct readahead_control *rac)
2380 {
2381         struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
2382         struct page *pagepool[16];
2383         struct extent_map *em_cached = NULL;
2384         u64 prev_em_start = (u64)-1;
2385         int nr;
2386 
2387         while ((nr = readahead_page_batch(rac, pagepool))) {
2388                 u64 contig_start = readahead_pos(rac);
2389                 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
2390 
2391                 contiguous_readpages(pagepool, nr, contig_start, contig_end,
2392                                 &em_cached, &bio_ctrl, &prev_em_start);
2393         }
2394 
2395         if (em_cached)
2396                 free_extent_map(em_cached);
2397         submit_one_bio(&bio_ctrl);
2398 }
2399 
2400 /*
2401  * basic invalidate_folio code, this waits on any locked or writeback
2402  * ranges corresponding to the folio, and then deletes any extent state
2403  * records from the tree
2404  */
2405 int extent_invalidate_folio(struct extent_io_tree *tree,
2406                           struct folio *folio, size_t offset)
2407 {
2408         struct extent_state *cached_state = NULL;
2409         u64 start = folio_pos(folio);
2410         u64 end = start + folio_size(folio) - 1;
2411         size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2412 
2413         /* This function is only called for the btree inode */
2414         ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
2415 
2416         start += ALIGN(offset, blocksize);
2417         if (start > end)
2418                 return 0;
2419 
2420         lock_extent(tree, start, end, &cached_state);
2421         folio_wait_writeback(folio);
2422 
2423         /*
2424          * Currently for btree io tree, only EXTENT_LOCKED is utilized,
2425          * so here we only need to unlock the extent range to free any
2426          * existing extent state.
2427          */
2428         unlock_extent(tree, start, end, &cached_state);
2429         return 0;
2430 }
2431 
2432 /*
2433  * a helper for release_folio, this tests for areas of the page that
2434  * are locked or under IO and drops the related state bits if it is safe
2435  * to drop the page.
2436  */
2437 static bool try_release_extent_state(struct extent_io_tree *tree,
2438                                     struct page *page, gfp_t mask)
2439 {
2440         u64 start = page_offset(page);
2441         u64 end = start + PAGE_SIZE - 1;
2442         bool ret;
2443 
2444         if (test_range_bit_exists(tree, start, end, EXTENT_LOCKED)) {
2445                 ret = false;
2446         } else {
2447                 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
2448                                    EXTENT_DELALLOC_NEW | EXTENT_CTLBITS |
2449                                    EXTENT_QGROUP_RESERVED);
2450                 int ret2;
2451 
2452                 /*
2453                  * At this point we can safely clear everything except the
2454                  * locked bit, the nodatasum bit and the delalloc new bit.
2455                  * The delalloc new bit will be cleared by ordered extent
2456                  * completion.
2457                  */
2458                 ret2 = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
2459 
2460                 /* if clear_extent_bit failed for enomem reasons,
2461                  * we can't allow the release to continue.
2462                  */
2463                 if (ret2 < 0)
2464                         ret = false;
2465                 else
2466                         ret = true;
2467         }
2468         return ret;
2469 }
2470 
2471 /*
2472  * a helper for release_folio.  As long as there are no locked extents
2473  * in the range corresponding to the page, both state records and extent
2474  * map records are removed
2475  */
2476 bool try_release_extent_mapping(struct page *page, gfp_t mask)
2477 {
2478         u64 start = page_offset(page);
2479         u64 end = start + PAGE_SIZE - 1;
2480         struct btrfs_inode *inode = page_to_inode(page);
2481         struct extent_io_tree *io_tree = &inode->io_tree;
2482 
2483         while (start <= end) {
2484                 const u64 cur_gen = btrfs_get_fs_generation(inode->root->fs_info);
2485                 const u64 len = end - start + 1;
2486                 struct extent_map_tree *extent_tree = &inode->extent_tree;
2487                 struct extent_map *em;
2488 
2489                 write_lock(&extent_tree->lock);
2490                 em = lookup_extent_mapping(extent_tree, start, len);
2491                 if (!em) {
2492                         write_unlock(&extent_tree->lock);
2493                         break;
2494                 }
2495                 if ((em->flags & EXTENT_FLAG_PINNED) || em->start != start) {
2496                         write_unlock(&extent_tree->lock);
2497                         free_extent_map(em);
2498                         break;
2499                 }
2500                 if (test_range_bit_exists(io_tree, em->start,
2501                                           extent_map_end(em) - 1, EXTENT_LOCKED))
2502                         goto next;
2503                 /*
2504                  * If it's not in the list of modified extents, used by a fast
2505                  * fsync, we can remove it. If it's being logged we can safely
2506                  * remove it since fsync took an extra reference on the em.
2507                  */
2508                 if (list_empty(&em->list) || (em->flags & EXTENT_FLAG_LOGGING))
2509                         goto remove_em;
2510                 /*
2511                  * If it's in the list of modified extents, remove it only if
2512                  * its generation is older then the current one, in which case
2513                  * we don't need it for a fast fsync. Otherwise don't remove it,
2514                  * we could be racing with an ongoing fast fsync that could miss
2515                  * the new extent.
2516                  */
2517                 if (em->generation >= cur_gen)
2518                         goto next;
2519 remove_em:
2520                 /*
2521                  * We only remove extent maps that are not in the list of
2522                  * modified extents or that are in the list but with a
2523                  * generation lower then the current generation, so there is no
2524                  * need to set the full fsync flag on the inode (it hurts the
2525                  * fsync performance for workloads with a data size that exceeds
2526                  * or is close to the system's memory).
2527                  */
2528                 remove_extent_mapping(inode, em);
2529                 /* Once for the inode's extent map tree. */
2530                 free_extent_map(em);
2531 next:
2532                 start = extent_map_end(em);
2533                 write_unlock(&extent_tree->lock);
2534 
2535                 /* Once for us, for the lookup_extent_mapping() reference. */
2536                 free_extent_map(em);
2537 
2538                 if (need_resched()) {
2539                         /*
2540                          * If we need to resched but we can't block just exit
2541                          * and leave any remaining extent maps.
2542                          */
2543                         if (!gfpflags_allow_blocking(mask))
2544                                 break;
2545 
2546                         cond_resched();
2547                 }
2548         }
2549         return try_release_extent_state(io_tree, page, mask);
2550 }
2551 
2552 static void __free_extent_buffer(struct extent_buffer *eb)
2553 {
2554         kmem_cache_free(extent_buffer_cache, eb);
2555 }
2556 
2557 static int extent_buffer_under_io(const struct extent_buffer *eb)
2558 {
2559         return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
2560                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2561 }
2562 
2563 static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
2564 {
2565         struct btrfs_subpage *subpage;
2566 
2567         lockdep_assert_held(&folio->mapping->i_private_lock);
2568 
2569         if (folio_test_private(folio)) {
2570                 subpage = folio_get_private(folio);
2571                 if (atomic_read(&subpage->eb_refs))
2572                         return true;
2573                 /*
2574                  * Even there is no eb refs here, we may still have
2575                  * end_page_read() call relying on page::private.
2576                  */
2577                 if (atomic_read(&subpage->readers))
2578                         return true;
2579         }
2580         return false;
2581 }
2582 
2583 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio)
2584 {
2585         struct btrfs_fs_info *fs_info = eb->fs_info;
2586         const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2587 
2588         /*
2589          * For mapped eb, we're going to change the folio private, which should
2590          * be done under the i_private_lock.
2591          */
2592         if (mapped)
2593                 spin_lock(&folio->mapping->i_private_lock);
2594 
2595         if (!folio_test_private(folio)) {
2596                 if (mapped)
2597                         spin_unlock(&folio->mapping->i_private_lock);
2598                 return;
2599         }
2600 
2601         if (fs_info->nodesize >= PAGE_SIZE) {
2602                 /*
2603                  * We do this since we'll remove the pages after we've
2604                  * removed the eb from the radix tree, so we could race
2605                  * and have this page now attached to the new eb.  So
2606                  * only clear folio if it's still connected to
2607                  * this eb.
2608                  */
2609                 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
2610                         BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
2611                         BUG_ON(folio_test_dirty(folio));
2612                         BUG_ON(folio_test_writeback(folio));
2613                         /* We need to make sure we haven't be attached to a new eb. */
2614                         folio_detach_private(folio);
2615                 }
2616                 if (mapped)
2617                         spin_unlock(&folio->mapping->i_private_lock);
2618                 return;
2619         }
2620 
2621         /*
2622          * For subpage, we can have dummy eb with folio private attached.  In
2623          * this case, we can directly detach the private as such folio is only
2624          * attached to one dummy eb, no sharing.
2625          */
2626         if (!mapped) {
2627                 btrfs_detach_subpage(fs_info, folio);
2628                 return;
2629         }
2630 
2631         btrfs_folio_dec_eb_refs(fs_info, folio);
2632 
2633         /*
2634          * We can only detach the folio private if there are no other ebs in the
2635          * page range and no unfinished IO.
2636          */
2637         if (!folio_range_has_eb(fs_info, folio))
2638                 btrfs_detach_subpage(fs_info, folio);
2639 
2640         spin_unlock(&folio->mapping->i_private_lock);
2641 }
2642 
2643 /* Release all pages attached to the extent buffer */
2644 static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb)
2645 {
2646         ASSERT(!extent_buffer_under_io(eb));
2647 
2648         for (int i = 0; i < INLINE_EXTENT_BUFFER_PAGES; i++) {
2649                 struct folio *folio = eb->folios[i];
2650 
2651                 if (!folio)
2652                         continue;
2653 
2654                 detach_extent_buffer_folio(eb, folio);
2655 
2656                 /* One for when we allocated the folio. */
2657                 folio_put(folio);
2658         }
2659 }
2660 
2661 /*
2662  * Helper for releasing the extent buffer.
2663  */
2664 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
2665 {
2666         btrfs_release_extent_buffer_pages(eb);
2667         btrfs_leak_debug_del_eb(eb);
2668         __free_extent_buffer(eb);
2669 }
2670 
2671 static struct extent_buffer *
2672 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
2673                       unsigned long len)
2674 {
2675         struct extent_buffer *eb = NULL;
2676 
2677         eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
2678         eb->start = start;
2679         eb->len = len;
2680         eb->fs_info = fs_info;
2681         init_rwsem(&eb->lock);
2682 
2683         btrfs_leak_debug_add_eb(eb);
2684 
2685         spin_lock_init(&eb->refs_lock);
2686         atomic_set(&eb->refs, 1);
2687 
2688         ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
2689 
2690         return eb;
2691 }
2692 
2693 struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
2694 {
2695         struct extent_buffer *new;
2696         int num_folios = num_extent_folios(src);
2697         int ret;
2698 
2699         new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
2700         if (new == NULL)
2701                 return NULL;
2702 
2703         /*
2704          * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
2705          * btrfs_release_extent_buffer() have different behavior for
2706          * UNMAPPED subpage extent buffer.
2707          */
2708         set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
2709 
2710         ret = alloc_eb_folio_array(new, false);
2711         if (ret) {
2712                 btrfs_release_extent_buffer(new);
2713                 return NULL;
2714         }
2715 
2716         for (int i = 0; i < num_folios; i++) {
2717                 struct folio *folio = new->folios[i];
2718 
2719                 ret = attach_extent_buffer_folio(new, folio, NULL);
2720                 if (ret < 0) {
2721                         btrfs_release_extent_buffer(new);
2722                         return NULL;
2723                 }
2724                 WARN_ON(folio_test_dirty(folio));
2725         }
2726         copy_extent_buffer_full(new, src);
2727         set_extent_buffer_uptodate(new);
2728 
2729         return new;
2730 }
2731 
2732 struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2733                                                   u64 start, unsigned long len)
2734 {
2735         struct extent_buffer *eb;
2736         int num_folios = 0;
2737         int ret;
2738 
2739         eb = __alloc_extent_buffer(fs_info, start, len);
2740         if (!eb)
2741                 return NULL;
2742 
2743         ret = alloc_eb_folio_array(eb, false);
2744         if (ret)
2745                 goto err;
2746 
2747         num_folios = num_extent_folios(eb);
2748         for (int i = 0; i < num_folios; i++) {
2749                 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL);
2750                 if (ret < 0)
2751                         goto err;
2752         }
2753 
2754         set_extent_buffer_uptodate(eb);
2755         btrfs_set_header_nritems(eb, 0);
2756         set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
2757 
2758         return eb;
2759 err:
2760         for (int i = 0; i < num_folios; i++) {
2761                 if (eb->folios[i]) {
2762                         detach_extent_buffer_folio(eb, eb->folios[i]);
2763                         folio_put(eb->folios[i]);
2764                 }
2765         }
2766         __free_extent_buffer(eb);
2767         return NULL;
2768 }
2769 
2770 struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
2771                                                 u64 start)
2772 {
2773         return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
2774 }
2775 
2776 static void check_buffer_tree_ref(struct extent_buffer *eb)
2777 {
2778         int refs;
2779         /*
2780          * The TREE_REF bit is first set when the extent_buffer is added
2781          * to the radix tree. It is also reset, if unset, when a new reference
2782          * is created by find_extent_buffer.
2783          *
2784          * It is only cleared in two cases: freeing the last non-tree
2785          * reference to the extent_buffer when its STALE bit is set or
2786          * calling release_folio when the tree reference is the only reference.
2787          *
2788          * In both cases, care is taken to ensure that the extent_buffer's
2789          * pages are not under io. However, release_folio can be concurrently
2790          * called with creating new references, which is prone to race
2791          * conditions between the calls to check_buffer_tree_ref in those
2792          * codepaths and clearing TREE_REF in try_release_extent_buffer.
2793          *
2794          * The actual lifetime of the extent_buffer in the radix tree is
2795          * adequately protected by the refcount, but the TREE_REF bit and
2796          * its corresponding reference are not. To protect against this
2797          * class of races, we call check_buffer_tree_ref from the codepaths
2798          * which trigger io. Note that once io is initiated, TREE_REF can no
2799          * longer be cleared, so that is the moment at which any such race is
2800          * best fixed.
2801          */
2802         refs = atomic_read(&eb->refs);
2803         if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2804                 return;
2805 
2806         spin_lock(&eb->refs_lock);
2807         if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
2808                 atomic_inc(&eb->refs);
2809         spin_unlock(&eb->refs_lock);
2810 }
2811 
2812 static void mark_extent_buffer_accessed(struct extent_buffer *eb)
2813 {
2814         int num_folios= num_extent_folios(eb);
2815 
2816         check_buffer_tree_ref(eb);
2817 
2818         for (int i = 0; i < num_folios; i++)
2819                 folio_mark_accessed(eb->folios[i]);
2820 }
2821 
2822 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
2823                                          u64 start)
2824 {
2825         struct extent_buffer *eb;
2826 
2827         eb = find_extent_buffer_nolock(fs_info, start);
2828         if (!eb)
2829                 return NULL;
2830         /*
2831          * Lock our eb's refs_lock to avoid races with free_extent_buffer().
2832          * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
2833          * another task running free_extent_buffer() might have seen that flag
2834          * set, eb->refs == 2, that the buffer isn't under IO (dirty and
2835          * writeback flags not set) and it's still in the tree (flag
2836          * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
2837          * decrementing the extent buffer's reference count twice.  So here we
2838          * could race and increment the eb's reference count, clear its stale
2839          * flag, mark it as dirty and drop our reference before the other task
2840          * finishes executing free_extent_buffer, which would later result in
2841          * an attempt to free an extent buffer that is dirty.
2842          */
2843         if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
2844                 spin_lock(&eb->refs_lock);
2845                 spin_unlock(&eb->refs_lock);
2846         }
2847         mark_extent_buffer_accessed(eb);
2848         return eb;
2849 }
2850 
2851 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
2852 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
2853                                         u64 start)
2854 {
2855         struct extent_buffer *eb, *exists = NULL;
2856         int ret;
2857 
2858         eb = find_extent_buffer(fs_info, start);
2859         if (eb)
2860                 return eb;
2861         eb = alloc_dummy_extent_buffer(fs_info, start);
2862         if (!eb)
2863                 return ERR_PTR(-ENOMEM);
2864         eb->fs_info = fs_info;
2865 again:
2866         ret = radix_tree_preload(GFP_NOFS);
2867         if (ret) {
2868                 exists = ERR_PTR(ret);
2869                 goto free_eb;
2870         }
2871         spin_lock(&fs_info->buffer_lock);
2872         ret = radix_tree_insert(&fs_info->buffer_radix,
2873                                 start >> fs_info->sectorsize_bits, eb);
2874         spin_unlock(&fs_info->buffer_lock);
2875         radix_tree_preload_end();
2876         if (ret == -EEXIST) {
2877                 exists = find_extent_buffer(fs_info, start);
2878                 if (exists)
2879                         goto free_eb;
2880                 else
2881                         goto again;
2882         }
2883         check_buffer_tree_ref(eb);
2884         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
2885 
2886         return eb;
2887 free_eb:
2888         btrfs_release_extent_buffer(eb);
2889         return exists;
2890 }
2891 #endif
2892 
2893 static struct extent_buffer *grab_extent_buffer(
2894                 struct btrfs_fs_info *fs_info, struct page *page)
2895 {
2896         struct folio *folio = page_folio(page);
2897         struct extent_buffer *exists;
2898 
2899         lockdep_assert_held(&page->mapping->i_private_lock);
2900 
2901         /*
2902          * For subpage case, we completely rely on radix tree to ensure we
2903          * don't try to insert two ebs for the same bytenr.  So here we always
2904          * return NULL and just continue.
2905          */
2906         if (fs_info->nodesize < PAGE_SIZE)
2907                 return NULL;
2908 
2909         /* Page not yet attached to an extent buffer */
2910         if (!folio_test_private(folio))
2911                 return NULL;
2912 
2913         /*
2914          * We could have already allocated an eb for this page and attached one
2915          * so lets see if we can get a ref on the existing eb, and if we can we
2916          * know it's good and we can just return that one, else we know we can
2917          * just overwrite folio private.
2918          */
2919         exists = folio_get_private(folio);
2920         if (atomic_inc_not_zero(&exists->refs))
2921                 return exists;
2922 
2923         WARN_ON(PageDirty(page));
2924         folio_detach_private(folio);
2925         return NULL;
2926 }
2927 
2928 static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
2929 {
2930         if (!IS_ALIGNED(start, fs_info->sectorsize)) {
2931                 btrfs_err(fs_info, "bad tree block start %llu", start);
2932                 return -EINVAL;
2933         }
2934 
2935         if (fs_info->nodesize < PAGE_SIZE &&
2936             offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
2937                 btrfs_err(fs_info,
2938                 "tree block crosses page boundary, start %llu nodesize %u",
2939                           start, fs_info->nodesize);
2940                 return -EINVAL;
2941         }
2942         if (fs_info->nodesize >= PAGE_SIZE &&
2943             !PAGE_ALIGNED(start)) {
2944                 btrfs_err(fs_info,
2945                 "tree block is not page aligned, start %llu nodesize %u",
2946                           start, fs_info->nodesize);
2947                 return -EINVAL;
2948         }
2949         if (!IS_ALIGNED(start, fs_info->nodesize) &&
2950             !test_and_set_bit(BTRFS_FS_UNALIGNED_TREE_BLOCK, &fs_info->flags)) {
2951                 btrfs_warn(fs_info,
2952 "tree block not nodesize aligned, start %llu nodesize %u, can be resolved by a full metadata balance",
2953                               start, fs_info->nodesize);
2954         }
2955         return 0;
2956 }
2957 
2958 
2959 /*
2960  * Return 0 if eb->folios[i] is attached to btree inode successfully.
2961  * Return >0 if there is already another extent buffer for the range,
2962  * and @found_eb_ret would be updated.
2963  * Return -EAGAIN if the filemap has an existing folio but with different size
2964  * than @eb.
2965  * The caller needs to free the existing folios and retry using the same order.
2966  */
2967 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
2968                                       struct btrfs_subpage *prealloc,
2969                                       struct extent_buffer **found_eb_ret)
2970 {
2971 
2972         struct btrfs_fs_info *fs_info = eb->fs_info;
2973         struct address_space *mapping = fs_info->btree_inode->i_mapping;
2974         const unsigned long index = eb->start >> PAGE_SHIFT;
2975         struct folio *existing_folio = NULL;
2976         int ret;
2977 
2978         ASSERT(found_eb_ret);
2979 
2980         /* Caller should ensure the folio exists. */
2981         ASSERT(eb->folios[i]);
2982 
2983 retry:
2984         ret = filemap_add_folio(mapping, eb->folios[i], index + i,
2985                                 GFP_NOFS | __GFP_NOFAIL);
2986         if (!ret)
2987                 goto finish;
2988 
2989         existing_folio = filemap_lock_folio(mapping, index + i);
2990         /* The page cache only exists for a very short time, just retry. */
2991         if (IS_ERR(existing_folio)) {
2992                 existing_folio = NULL;
2993                 goto retry;
2994         }
2995 
2996         /* For now, we should only have single-page folios for btree inode. */
2997         ASSERT(folio_nr_pages(existing_folio) == 1);
2998 
2999         if (folio_size(existing_folio) != eb->folio_size) {
3000                 folio_unlock(existing_folio);
3001                 folio_put(existing_folio);
3002                 return -EAGAIN;
3003         }
3004 
3005 finish:
3006         spin_lock(&mapping->i_private_lock);
3007         if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
3008                 /* We're going to reuse the existing page, can drop our folio now. */
3009                 __free_page(folio_page(eb->folios[i], 0));
3010                 eb->folios[i] = existing_folio;
3011         } else if (existing_folio) {
3012                 struct extent_buffer *existing_eb;
3013 
3014                 existing_eb = grab_extent_buffer(fs_info,
3015                                                  folio_page(existing_folio, 0));
3016                 if (existing_eb) {
3017                         /* The extent buffer still exists, we can use it directly. */
3018                         *found_eb_ret = existing_eb;
3019                         spin_unlock(&mapping->i_private_lock);
3020                         folio_unlock(existing_folio);
3021                         folio_put(existing_folio);
3022                         return 1;
3023                 }
3024                 /* The extent buffer no longer exists, we can reuse the folio. */
3025                 __free_page(folio_page(eb->folios[i], 0));
3026                 eb->folios[i] = existing_folio;
3027         }
3028         eb->folio_size = folio_size(eb->folios[i]);
3029         eb->folio_shift = folio_shift(eb->folios[i]);
3030         /* Should not fail, as we have preallocated the memory. */
3031         ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
3032         ASSERT(!ret);
3033         /*
3034          * To inform we have an extra eb under allocation, so that
3035          * detach_extent_buffer_page() won't release the folio private when the
3036          * eb hasn't been inserted into radix tree yet.
3037          *
3038          * The ref will be decreased when the eb releases the page, in
3039          * detach_extent_buffer_page().  Thus needs no special handling in the
3040          * error path.
3041          */
3042         btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
3043         spin_unlock(&mapping->i_private_lock);
3044         return 0;
3045 }
3046 
3047 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
3048                                           u64 start, u64 owner_root, int level)
3049 {
3050         unsigned long len = fs_info->nodesize;
3051         int num_folios;
3052         int attached = 0;
3053         struct extent_buffer *eb;
3054         struct extent_buffer *existing_eb = NULL;
3055         struct btrfs_subpage *prealloc = NULL;
3056         u64 lockdep_owner = owner_root;
3057         bool page_contig = true;
3058         int uptodate = 1;
3059         int ret;
3060 
3061         if (check_eb_alignment(fs_info, start))
3062                 return ERR_PTR(-EINVAL);
3063 
3064 #if BITS_PER_LONG == 32
3065         if (start >= MAX_LFS_FILESIZE) {
3066                 btrfs_err_rl(fs_info,
3067                 "extent buffer %llu is beyond 32bit page cache limit", start);
3068                 btrfs_err_32bit_limit(fs_info);
3069                 return ERR_PTR(-EOVERFLOW);
3070         }
3071         if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
3072                 btrfs_warn_32bit_limit(fs_info);
3073 #endif
3074 
3075         eb = find_extent_buffer(fs_info, start);
3076         if (eb)
3077                 return eb;
3078 
3079         eb = __alloc_extent_buffer(fs_info, start, len);
3080         if (!eb)
3081                 return ERR_PTR(-ENOMEM);
3082 
3083         /*
3084          * The reloc trees are just snapshots, so we need them to appear to be
3085          * just like any other fs tree WRT lockdep.
3086          */
3087         if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
3088                 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
3089 
3090         btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
3091 
3092         /*
3093          * Preallocate folio private for subpage case, so that we won't
3094          * allocate memory with i_private_lock nor page lock hold.
3095          *
3096          * The memory will be freed by attach_extent_buffer_page() or freed
3097          * manually if we exit earlier.
3098          */
3099         if (fs_info->nodesize < PAGE_SIZE) {
3100                 prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
3101                 if (IS_ERR(prealloc)) {
3102                         ret = PTR_ERR(prealloc);
3103                         goto out;
3104                 }
3105         }
3106 
3107 reallocate:
3108         /* Allocate all pages first. */
3109         ret = alloc_eb_folio_array(eb, true);
3110         if (ret < 0) {
3111                 btrfs_free_subpage(prealloc);
3112                 goto out;
3113         }
3114 
3115         num_folios = num_extent_folios(eb);
3116         /* Attach all pages to the filemap. */
3117         for (int i = 0; i < num_folios; i++) {
3118                 struct folio *folio;
3119 
3120                 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
3121                 if (ret > 0) {
3122                         ASSERT(existing_eb);
3123                         goto out;
3124                 }
3125 
3126                 /*
3127                  * TODO: Special handling for a corner case where the order of
3128                  * folios mismatch between the new eb and filemap.
3129                  *
3130                  * This happens when:
3131                  *
3132                  * - the new eb is using higher order folio
3133                  *
3134                  * - the filemap is still using 0-order folios for the range
3135                  *   This can happen at the previous eb allocation, and we don't
3136                  *   have higher order folio for the call.
3137                  *
3138                  * - the existing eb has already been freed
3139                  *
3140                  * In this case, we have to free the existing folios first, and
3141                  * re-allocate using the same order.
3142                  * Thankfully this is not going to happen yet, as we're still
3143                  * using 0-order folios.
3144                  */
3145                 if (unlikely(ret == -EAGAIN)) {
3146                         ASSERT(0);
3147                         goto reallocate;
3148                 }
3149                 attached++;
3150 
3151                 /*
3152                  * Only after attach_eb_folio_to_filemap(), eb->folios[] is
3153                  * reliable, as we may choose to reuse the existing page cache
3154                  * and free the allocated page.
3155                  */
3156                 folio = eb->folios[i];
3157                 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3158 
3159                 /*
3160                  * Check if the current page is physically contiguous with previous eb
3161                  * page.
3162                  * At this stage, either we allocated a large folio, thus @i
3163                  * would only be 0, or we fall back to per-page allocation.
3164                  */
3165                 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3166                         page_contig = false;
3167 
3168                 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
3169                         uptodate = 0;
3170 
3171                 /*
3172                  * We can't unlock the pages just yet since the extent buffer
3173                  * hasn't been properly inserted in the radix tree, this
3174                  * opens a race with btree_release_folio which can free a page
3175                  * while we are still filling in all pages for the buffer and
3176                  * we could crash.
3177                  */
3178         }
3179         if (uptodate)
3180                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3181         /* All pages are physically contiguous, can skip cross page handling. */
3182         if (page_contig)
3183                 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start);
3184 again:
3185         ret = radix_tree_preload(GFP_NOFS);
3186         if (ret)
3187                 goto out;
3188 
3189         spin_lock(&fs_info->buffer_lock);
3190         ret = radix_tree_insert(&fs_info->buffer_radix,
3191                                 start >> fs_info->sectorsize_bits, eb);
3192         spin_unlock(&fs_info->buffer_lock);
3193         radix_tree_preload_end();
3194         if (ret == -EEXIST) {
3195                 ret = 0;
3196                 existing_eb = find_extent_buffer(fs_info, start);
3197                 if (existing_eb)
3198                         goto out;
3199                 else
3200                         goto again;
3201         }
3202         /* add one reference for the tree */
3203         check_buffer_tree_ref(eb);
3204         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
3205 
3206         /*
3207          * Now it's safe to unlock the pages because any calls to
3208          * btree_release_folio will correctly detect that a page belongs to a
3209          * live buffer and won't free them prematurely.
3210          */
3211         for (int i = 0; i < num_folios; i++)
3212                 unlock_page(folio_page(eb->folios[i], 0));
3213         return eb;
3214 
3215 out:
3216         WARN_ON(!atomic_dec_and_test(&eb->refs));
3217 
3218         /*
3219          * Any attached folios need to be detached before we unlock them.  This
3220          * is because when we're inserting our new folios into the mapping, and
3221          * then attaching our eb to that folio.  If we fail to insert our folio
3222          * we'll lookup the folio for that index, and grab that EB.  We do not
3223          * want that to grab this eb, as we're getting ready to free it.  So we
3224          * have to detach it first and then unlock it.
3225          *
3226          * We have to drop our reference and NULL it out here because in the
3227          * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb.
3228          * Below when we call btrfs_release_extent_buffer() we will call
3229          * detach_extent_buffer_folio() on our remaining pages in the !subpage
3230          * case.  If we left eb->folios[i] populated in the subpage case we'd
3231          * double put our reference and be super sad.
3232          */
3233         for (int i = 0; i < attached; i++) {
3234                 ASSERT(eb->folios[i]);
3235                 detach_extent_buffer_folio(eb, eb->folios[i]);
3236                 unlock_page(folio_page(eb->folios[i], 0));
3237                 folio_put(eb->folios[i]);
3238                 eb->folios[i] = NULL;
3239         }
3240         /*
3241          * Now all pages of that extent buffer is unmapped, set UNMAPPED flag,
3242          * so it can be cleaned up without utlizing page->mapping.
3243          */
3244         set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3245 
3246         btrfs_release_extent_buffer(eb);
3247         if (ret < 0)
3248                 return ERR_PTR(ret);
3249         ASSERT(existing_eb);
3250         return existing_eb;
3251 }
3252 
3253 static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
3254 {
3255         struct extent_buffer *eb =
3256                         container_of(head, struct extent_buffer, rcu_head);
3257 
3258         __free_extent_buffer(eb);
3259 }
3260 
3261 static int release_extent_buffer(struct extent_buffer *eb)
3262         __releases(&eb->refs_lock)
3263 {
3264         lockdep_assert_held(&eb->refs_lock);
3265 
3266         WARN_ON(atomic_read(&eb->refs) == 0);
3267         if (atomic_dec_and_test(&eb->refs)) {
3268                 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
3269                         struct btrfs_fs_info *fs_info = eb->fs_info;
3270 
3271                         spin_unlock(&eb->refs_lock);
3272 
3273                         spin_lock(&fs_info->buffer_lock);
3274                         radix_tree_delete(&fs_info->buffer_radix,
3275                                           eb->start >> fs_info->sectorsize_bits);
3276                         spin_unlock(&fs_info->buffer_lock);
3277                 } else {
3278                         spin_unlock(&eb->refs_lock);
3279                 }
3280 
3281                 btrfs_leak_debug_del_eb(eb);
3282                 /* Should be safe to release our pages at this point */
3283                 btrfs_release_extent_buffer_pages(eb);
3284 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3285                 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
3286                         __free_extent_buffer(eb);
3287                         return 1;
3288                 }
3289 #endif
3290                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
3291                 return 1;
3292         }
3293         spin_unlock(&eb->refs_lock);
3294 
3295         return 0;
3296 }
3297 
3298 void free_extent_buffer(struct extent_buffer *eb)
3299 {
3300         int refs;
3301         if (!eb)
3302                 return;
3303 
3304         refs = atomic_read(&eb->refs);
3305         while (1) {
3306                 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
3307                     || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
3308                         refs == 1))
3309                         break;
3310                 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
3311                         return;
3312         }
3313 
3314         spin_lock(&eb->refs_lock);
3315         if (atomic_read(&eb->refs) == 2 &&
3316             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
3317             !extent_buffer_under_io(eb) &&
3318             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3319                 atomic_dec(&eb->refs);
3320 
3321         /*
3322          * I know this is terrible, but it's temporary until we stop tracking
3323          * the uptodate bits and such for the extent buffers.
3324          */
3325         release_extent_buffer(eb);
3326 }
3327 
3328 void free_extent_buffer_stale(struct extent_buffer *eb)
3329 {
3330         if (!eb)
3331                 return;
3332 
3333         spin_lock(&eb->refs_lock);
3334         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
3335 
3336         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
3337             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
3338                 atomic_dec(&eb->refs);
3339         release_extent_buffer(eb);
3340 }
3341 
3342 static void btree_clear_folio_dirty(struct folio *folio)
3343 {
3344         ASSERT(folio_test_dirty(folio));
3345         ASSERT(folio_test_locked(folio));
3346         folio_clear_dirty_for_io(folio);
3347         xa_lock_irq(&folio->mapping->i_pages);
3348         if (!folio_test_dirty(folio))
3349                 __xa_clear_mark(&folio->mapping->i_pages,
3350                                 folio_index(folio), PAGECACHE_TAG_DIRTY);
3351         xa_unlock_irq(&folio->mapping->i_pages);
3352 }
3353 
3354 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
3355 {
3356         struct btrfs_fs_info *fs_info = eb->fs_info;
3357         struct folio *folio = eb->folios[0];
3358         bool last;
3359 
3360         /* btree_clear_folio_dirty() needs page locked. */
3361         folio_lock(folio);
3362         last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
3363         if (last)
3364                 btree_clear_folio_dirty(folio);
3365         folio_unlock(folio);
3366         WARN_ON(atomic_read(&eb->refs) == 0);
3367 }
3368 
3369 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
3370                               struct extent_buffer *eb)
3371 {
3372         struct btrfs_fs_info *fs_info = eb->fs_info;
3373         int num_folios;
3374 
3375         btrfs_assert_tree_write_locked(eb);
3376 
3377         if (trans && btrfs_header_generation(eb) != trans->transid)
3378                 return;
3379 
3380         /*
3381          * Instead of clearing the dirty flag off of the buffer, mark it as
3382          * EXTENT_BUFFER_ZONED_ZEROOUT. This allows us to preserve
3383          * write-ordering in zoned mode, without the need to later re-dirty
3384          * the extent_buffer.
3385          *
3386          * The actual zeroout of the buffer will happen later in
3387          * btree_csum_one_bio.
3388          */
3389         if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3390                 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
3391                 return;
3392         }
3393 
3394         if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
3395                 return;
3396 
3397         percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
3398                                  fs_info->dirty_metadata_batch);
3399 
3400         if (eb->fs_info->nodesize < PAGE_SIZE)
3401                 return clear_subpage_extent_buffer_dirty(eb);
3402 
3403         num_folios = num_extent_folios(eb);
3404         for (int i = 0; i < num_folios; i++) {
3405                 struct folio *folio = eb->folios[i];
3406 
3407                 if (!folio_test_dirty(folio))
3408                         continue;
3409                 folio_lock(folio);
3410                 btree_clear_folio_dirty(folio);
3411                 folio_unlock(folio);
3412         }
3413         WARN_ON(atomic_read(&eb->refs) == 0);
3414 }
3415 
3416 void set_extent_buffer_dirty(struct extent_buffer *eb)
3417 {
3418         int num_folios;
3419         bool was_dirty;
3420 
3421         check_buffer_tree_ref(eb);
3422 
3423         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3424 
3425         num_folios = num_extent_folios(eb);
3426         WARN_ON(atomic_read(&eb->refs) == 0);
3427         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
3428         WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
3429 
3430         if (!was_dirty) {
3431                 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
3432 
3433                 /*
3434                  * For subpage case, we can have other extent buffers in the
3435                  * same page, and in clear_subpage_extent_buffer_dirty() we
3436                  * have to clear page dirty without subpage lock held.
3437                  * This can cause race where our page gets dirty cleared after
3438                  * we just set it.
3439                  *
3440                  * Thankfully, clear_subpage_extent_buffer_dirty() has locked
3441                  * its page for other reasons, we can use page lock to prevent
3442                  * the above race.
3443                  */
3444                 if (subpage)
3445                         lock_page(folio_page(eb->folios[0], 0));
3446                 for (int i = 0; i < num_folios; i++)
3447                         btrfs_folio_set_dirty(eb->fs_info, eb->folios[i],
3448                                               eb->start, eb->len);
3449                 if (subpage)
3450                         unlock_page(folio_page(eb->folios[0], 0));
3451                 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
3452                                          eb->len,
3453                                          eb->fs_info->dirty_metadata_batch);
3454         }
3455 #ifdef CONFIG_BTRFS_DEBUG
3456         for (int i = 0; i < num_folios; i++)
3457                 ASSERT(folio_test_dirty(eb->folios[i]));
3458 #endif
3459 }
3460 
3461 void clear_extent_buffer_uptodate(struct extent_buffer *eb)
3462 {
3463         struct btrfs_fs_info *fs_info = eb->fs_info;
3464         int num_folios = num_extent_folios(eb);
3465 
3466         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3467         for (int i = 0; i < num_folios; i++) {
3468                 struct folio *folio = eb->folios[i];
3469 
3470                 if (!folio)
3471                         continue;
3472 
3473                 /*
3474                  * This is special handling for metadata subpage, as regular
3475                  * btrfs_is_subpage() can not handle cloned/dummy metadata.
3476                  */
3477                 if (fs_info->nodesize >= PAGE_SIZE)
3478                         folio_clear_uptodate(folio);
3479                 else
3480                         btrfs_subpage_clear_uptodate(fs_info, folio,
3481                                                      eb->start, eb->len);
3482         }
3483 }
3484 
3485 void set_extent_buffer_uptodate(struct extent_buffer *eb)
3486 {
3487         struct btrfs_fs_info *fs_info = eb->fs_info;
3488         int num_folios = num_extent_folios(eb);
3489 
3490         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3491         for (int i = 0; i < num_folios; i++) {
3492                 struct folio *folio = eb->folios[i];
3493 
3494                 /*
3495                  * This is special handling for metadata subpage, as regular
3496                  * btrfs_is_subpage() can not handle cloned/dummy metadata.
3497                  */
3498                 if (fs_info->nodesize >= PAGE_SIZE)
3499                         folio_mark_uptodate(folio);
3500                 else
3501                         btrfs_subpage_set_uptodate(fs_info, folio,
3502                                                    eb->start, eb->len);
3503         }
3504 }
3505 
3506 static void clear_extent_buffer_reading(struct extent_buffer *eb)
3507 {
3508         clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
3509         smp_mb__after_atomic();
3510         wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
3511 }
3512 
3513 static void end_bbio_meta_read(struct btrfs_bio *bbio)
3514 {
3515         struct extent_buffer *eb = bbio->private;
3516         struct btrfs_fs_info *fs_info = eb->fs_info;
3517         bool uptodate = !bbio->bio.bi_status;
3518         struct folio_iter fi;
3519         u32 bio_offset = 0;
3520 
3521         /*
3522          * If the extent buffer is marked UPTODATE before the read operation
3523          * completes, other calls to read_extent_buffer_pages() will return
3524          * early without waiting for the read to finish, causing data races.
3525          */
3526         WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags));
3527 
3528         eb->read_mirror = bbio->mirror_num;
3529 
3530         if (uptodate &&
3531             btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
3532                 uptodate = false;
3533 
3534         if (uptodate) {
3535                 set_extent_buffer_uptodate(eb);
3536         } else {
3537                 clear_extent_buffer_uptodate(eb);
3538                 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3539         }
3540 
3541         bio_for_each_folio_all(fi, &bbio->bio) {
3542                 struct folio *folio = fi.folio;
3543                 u64 start = eb->start + bio_offset;
3544                 u32 len = fi.length;
3545 
3546                 if (uptodate)
3547                         btrfs_folio_set_uptodate(fs_info, folio, start, len);
3548                 else
3549                         btrfs_folio_clear_uptodate(fs_info, folio, start, len);
3550 
3551                 bio_offset += len;
3552         }
3553 
3554         clear_extent_buffer_reading(eb);
3555         free_extent_buffer(eb);
3556 
3557         bio_put(&bbio->bio);
3558 }
3559 
3560 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
3561                              const struct btrfs_tree_parent_check *check)
3562 {
3563         struct btrfs_bio *bbio;
3564         bool ret;
3565 
3566         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3567                 return 0;
3568 
3569         /*
3570          * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
3571          * operation, which could potentially still be in flight.  In this case
3572          * we simply want to return an error.
3573          */
3574         if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
3575                 return -EIO;
3576 
3577         /* Someone else is already reading the buffer, just wait for it. */
3578         if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
3579                 goto done;
3580 
3581         /*
3582          * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
3583          * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
3584          * started and finished reading the same eb.  In this case, UPTODATE
3585          * will now be set, and we shouldn't read it in again.
3586          */
3587         if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
3588                 clear_extent_buffer_reading(eb);
3589                 return 0;
3590         }
3591 
3592         clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
3593         eb->read_mirror = 0;
3594         check_buffer_tree_ref(eb);
3595         atomic_inc(&eb->refs);
3596 
3597         bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
3598                                REQ_OP_READ | REQ_META, eb->fs_info,
3599                                end_bbio_meta_read, eb);
3600         bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
3601         bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
3602         bbio->file_offset = eb->start;
3603         memcpy(&bbio->parent_check, check, sizeof(*check));
3604         if (eb->fs_info->nodesize < PAGE_SIZE) {
3605                 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len,
3606                                     eb->start - folio_pos(eb->folios[0]));
3607                 ASSERT(ret);
3608         } else {
3609                 int num_folios = num_extent_folios(eb);
3610 
3611                 for (int i = 0; i < num_folios; i++) {
3612                         struct folio *folio = eb->folios[i];
3613 
3614                         ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
3615                         ASSERT(ret);
3616                 }
3617         }
3618         btrfs_submit_bio(bbio, mirror_num);
3619 
3620 done:
3621         if (wait == WAIT_COMPLETE) {
3622                 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
3623                 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
3624                         return -EIO;
3625         }
3626 
3627         return 0;
3628 }
3629 
3630 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
3631                             unsigned long len)
3632 {
3633         btrfs_warn(eb->fs_info,
3634                 "access to eb bytenr %llu len %u out of range start %lu len %lu",
3635                 eb->start, eb->len, start, len);
3636         WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
3637 
3638         return true;
3639 }
3640 
3641 /*
3642  * Check if the [start, start + len) range is valid before reading/writing
3643  * the eb.
3644  * NOTE: @start and @len are offset inside the eb, not logical address.
3645  *
3646  * Caller should not touch the dst/src memory if this function returns error.
3647  */
3648 static inline int check_eb_range(const struct extent_buffer *eb,
3649                                  unsigned long start, unsigned long len)
3650 {
3651         unsigned long offset;
3652 
3653         /* start, start + len should not go beyond eb->len nor overflow */
3654         if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
3655                 return report_eb_range(eb, start, len);
3656 
3657         return false;
3658 }
3659 
3660 void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
3661                         unsigned long start, unsigned long len)
3662 {
3663         const int unit_size = eb->folio_size;
3664         size_t cur;
3665         size_t offset;
3666         char *dst = (char *)dstv;
3667         unsigned long i = get_eb_folio_index(eb, start);
3668 
3669         if (check_eb_range(eb, start, len)) {
3670                 /*
3671                  * Invalid range hit, reset the memory, so callers won't get
3672                  * some random garbage for their uninitialized memory.
3673                  */
3674                 memset(dstv, 0, len);
3675                 return;
3676         }
3677 
3678         if (eb->addr) {
3679                 memcpy(dstv, eb->addr + start, len);
3680                 return;
3681         }
3682 
3683         offset = get_eb_offset_in_folio(eb, start);
3684 
3685         while (len > 0) {
3686                 char *kaddr;
3687 
3688                 cur = min(len, unit_size - offset);
3689                 kaddr = folio_address(eb->folios[i]);
3690                 memcpy(dst, kaddr + offset, cur);
3691 
3692                 dst += cur;
3693                 len -= cur;
3694                 offset = 0;
3695                 i++;
3696         }
3697 }
3698 
3699 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
3700                                        void __user *dstv,
3701                                        unsigned long start, unsigned long len)
3702 {
3703         const int unit_size = eb->folio_size;
3704         size_t cur;
3705         size_t offset;
3706         char __user *dst = (char __user *)dstv;
3707         unsigned long i = get_eb_folio_index(eb, start);
3708         int ret = 0;
3709 
3710         WARN_ON(start > eb->len);
3711         WARN_ON(start + len > eb->start + eb->len);
3712 
3713         if (eb->addr) {
3714                 if (copy_to_user_nofault(dstv, eb->addr + start, len))
3715                         ret = -EFAULT;
3716                 return ret;
3717         }
3718 
3719         offset = get_eb_offset_in_folio(eb, start);
3720 
3721         while (len > 0) {
3722                 char *kaddr;
3723 
3724                 cur = min(len, unit_size - offset);
3725                 kaddr = folio_address(eb->folios[i]);
3726                 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
3727                         ret = -EFAULT;
3728                         break;
3729                 }
3730 
3731                 dst += cur;
3732                 len -= cur;
3733                 offset = 0;
3734                 i++;
3735         }
3736 
3737         return ret;
3738 }
3739 
3740 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
3741                          unsigned long start, unsigned long len)
3742 {
3743         const int unit_size = eb->folio_size;
3744         size_t cur;
3745         size_t offset;
3746         char *kaddr;
3747         char *ptr = (char *)ptrv;
3748         unsigned long i = get_eb_folio_index(eb, start);
3749         int ret = 0;
3750 
3751         if (check_eb_range(eb, start, len))
3752                 return -EINVAL;
3753 
3754         if (eb->addr)
3755                 return memcmp(ptrv, eb->addr + start, len);
3756 
3757         offset = get_eb_offset_in_folio(eb, start);
3758 
3759         while (len > 0) {
3760                 cur = min(len, unit_size - offset);
3761                 kaddr = folio_address(eb->folios[i]);
3762                 ret = memcmp(ptr, kaddr + offset, cur);
3763                 if (ret)
3764                         break;
3765 
3766                 ptr += cur;
3767                 len -= cur;
3768                 offset = 0;
3769                 i++;
3770         }
3771         return ret;
3772 }
3773 
3774 /*
3775  * Check that the extent buffer is uptodate.
3776  *
3777  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
3778  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3779  */
3780 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i)
3781 {
3782         struct btrfs_fs_info *fs_info = eb->fs_info;
3783         struct folio *folio = eb->folios[i];
3784 
3785         ASSERT(folio);
3786 
3787         /*
3788          * If we are using the commit root we could potentially clear a page
3789          * Uptodate while we're using the extent buffer that we've previously
3790          * looked up.  We don't want to complain in this case, as the page was
3791          * valid before, we just didn't write it out.  Instead we want to catch
3792          * the case where we didn't actually read the block properly, which
3793          * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
3794          */
3795         if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
3796                 return;
3797 
3798         if (fs_info->nodesize < PAGE_SIZE) {
3799                 folio = eb->folios[0];
3800                 ASSERT(i == 0);
3801                 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
3802                                                          eb->start, eb->len)))
3803                         btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
3804         } else {
3805                 WARN_ON(!folio_test_uptodate(folio));
3806         }
3807 }
3808 
3809 static void __write_extent_buffer(const struct extent_buffer *eb,
3810                                   const void *srcv, unsigned long start,
3811                                   unsigned long len, bool use_memmove)
3812 {
3813         const int unit_size = eb->folio_size;
3814         size_t cur;
3815         size_t offset;
3816         char *kaddr;
3817         const char *src = (const char *)srcv;
3818         unsigned long i = get_eb_folio_index(eb, start);
3819         /* For unmapped (dummy) ebs, no need to check their uptodate status. */
3820         const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
3821 
3822         if (check_eb_range(eb, start, len))
3823                 return;
3824 
3825         if (eb->addr) {
3826                 if (use_memmove)
3827                         memmove(eb->addr + start, srcv, len);
3828                 else
3829                         memcpy(eb->addr + start, srcv, len);
3830                 return;
3831         }
3832 
3833         offset = get_eb_offset_in_folio(eb, start);
3834 
3835         while (len > 0) {
3836                 if (check_uptodate)
3837                         assert_eb_folio_uptodate(eb, i);
3838 
3839                 cur = min(len, unit_size - offset);
3840                 kaddr = folio_address(eb->folios[i]);
3841                 if (use_memmove)
3842                         memmove(kaddr + offset, src, cur);
3843                 else
3844                         memcpy(kaddr + offset, src, cur);
3845 
3846                 src += cur;
3847                 len -= cur;
3848                 offset = 0;
3849                 i++;
3850         }
3851 }
3852 
3853 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
3854                          unsigned long start, unsigned long len)
3855 {
3856         return __write_extent_buffer(eb, srcv, start, len, false);
3857 }
3858 
3859 static void memset_extent_buffer(const struct extent_buffer *eb, int c,
3860                                  unsigned long start, unsigned long len)
3861 {
3862         const int unit_size = eb->folio_size;
3863         unsigned long cur = start;
3864 
3865         if (eb->addr) {
3866                 memset(eb->addr + start, c, len);
3867                 return;
3868         }
3869 
3870         while (cur < start + len) {
3871                 unsigned long index = get_eb_folio_index(eb, cur);
3872                 unsigned int offset = get_eb_offset_in_folio(eb, cur);
3873                 unsigned int cur_len = min(start + len - cur, unit_size - offset);
3874 
3875                 assert_eb_folio_uptodate(eb, index);
3876                 memset(folio_address(eb->folios[index]) + offset, c, cur_len);
3877 
3878                 cur += cur_len;
3879         }
3880 }
3881 
3882 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
3883                            unsigned long len)
3884 {
3885         if (check_eb_range(eb, start, len))
3886                 return;
3887         return memset_extent_buffer(eb, 0, start, len);
3888 }
3889 
3890 void copy_extent_buffer_full(const struct extent_buffer *dst,
3891                              const struct extent_buffer *src)
3892 {
3893         const int unit_size = src->folio_size;
3894         unsigned long cur = 0;
3895 
3896         ASSERT(dst->len == src->len);
3897 
3898         while (cur < src->len) {
3899                 unsigned long index = get_eb_folio_index(src, cur);
3900                 unsigned long offset = get_eb_offset_in_folio(src, cur);
3901                 unsigned long cur_len = min(src->len, unit_size - offset);
3902                 void *addr = folio_address(src->folios[index]) + offset;
3903 
3904                 write_extent_buffer(dst, addr, cur, cur_len);
3905 
3906                 cur += cur_len;
3907         }
3908 }
3909 
3910 void copy_extent_buffer(const struct extent_buffer *dst,
3911                         const struct extent_buffer *src,
3912                         unsigned long dst_offset, unsigned long src_offset,
3913                         unsigned long len)
3914 {
3915         const int unit_size = dst->folio_size;
3916         u64 dst_len = dst->len;
3917         size_t cur;
3918         size_t offset;
3919         char *kaddr;
3920         unsigned long i = get_eb_folio_index(dst, dst_offset);
3921 
3922         if (check_eb_range(dst, dst_offset, len) ||
3923             check_eb_range(src, src_offset, len))
3924                 return;
3925 
3926         WARN_ON(src->len != dst_len);
3927 
3928         offset = get_eb_offset_in_folio(dst, dst_offset);
3929 
3930         while (len > 0) {
3931                 assert_eb_folio_uptodate(dst, i);
3932 
3933                 cur = min(len, (unsigned long)(unit_size - offset));
3934 
3935                 kaddr = folio_address(dst->folios[i]);
3936                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3937 
3938                 src_offset += cur;
3939                 len -= cur;
3940                 offset = 0;
3941                 i++;
3942         }
3943 }
3944 
3945 /*
3946  * Calculate the folio and offset of the byte containing the given bit number.
3947  *
3948  * @eb:           the extent buffer
3949  * @start:        offset of the bitmap item in the extent buffer
3950  * @nr:           bit number
3951  * @folio_index:  return index of the folio in the extent buffer that contains
3952  *                the given bit number
3953  * @folio_offset: return offset into the folio given by folio_index
3954  *
3955  * This helper hides the ugliness of finding the byte in an extent buffer which
3956  * contains a given bit.
3957  */
3958 static inline void eb_bitmap_offset(const struct extent_buffer *eb,
3959                                     unsigned long start, unsigned long nr,
3960                                     unsigned long *folio_index,
3961                                     size_t *folio_offset)
3962 {
3963         size_t byte_offset = BIT_BYTE(nr);
3964         size_t offset;
3965 
3966         /*
3967          * The byte we want is the offset of the extent buffer + the offset of
3968          * the bitmap item in the extent buffer + the offset of the byte in the
3969          * bitmap item.
3970          */
3971         offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset;
3972 
3973         *folio_index = offset >> eb->folio_shift;
3974         *folio_offset = offset_in_eb_folio(eb, offset);
3975 }
3976 
3977 /*
3978  * Determine whether a bit in a bitmap item is set.
3979  *
3980  * @eb:     the extent buffer
3981  * @start:  offset of the bitmap item in the extent buffer
3982  * @nr:     bit number to test
3983  */
3984 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
3985                            unsigned long nr)
3986 {
3987         unsigned long i;
3988         size_t offset;
3989         u8 *kaddr;
3990 
3991         eb_bitmap_offset(eb, start, nr, &i, &offset);
3992         assert_eb_folio_uptodate(eb, i);
3993         kaddr = folio_address(eb->folios[i]);
3994         return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
3995 }
3996 
3997 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr)
3998 {
3999         unsigned long index = get_eb_folio_index(eb, bytenr);
4000 
4001         if (check_eb_range(eb, bytenr, 1))
4002                 return NULL;
4003         return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr);
4004 }
4005 
4006 /*
4007  * Set an area of a bitmap to 1.
4008  *
4009  * @eb:     the extent buffer
4010  * @start:  offset of the bitmap item in the extent buffer
4011  * @pos:    bit number of the first bit
4012  * @len:    number of bits to set
4013  */
4014 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
4015                               unsigned long pos, unsigned long len)
4016 {
4017         unsigned int first_byte = start + BIT_BYTE(pos);
4018         unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4019         const bool same_byte = (first_byte == last_byte);
4020         u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4021         u8 *kaddr;
4022 
4023         if (same_byte)
4024                 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4025 
4026         /* Handle the first byte. */
4027         kaddr = extent_buffer_get_byte(eb, first_byte);
4028         *kaddr |= mask;
4029         if (same_byte)
4030                 return;
4031 
4032         /* Handle the byte aligned part. */
4033         ASSERT(first_byte + 1 <= last_byte);
4034         memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1);
4035 
4036         /* Handle the last byte. */
4037         kaddr = extent_buffer_get_byte(eb, last_byte);
4038         *kaddr |= BITMAP_LAST_BYTE_MASK(pos + len);
4039 }
4040 
4041 
4042 /*
4043  * Clear an area of a bitmap.
4044  *
4045  * @eb:     the extent buffer
4046  * @start:  offset of the bitmap item in the extent buffer
4047  * @pos:    bit number of the first bit
4048  * @len:    number of bits to clear
4049  */
4050 void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
4051                                 unsigned long start, unsigned long pos,
4052                                 unsigned long len)
4053 {
4054         unsigned int first_byte = start + BIT_BYTE(pos);
4055         unsigned int last_byte = start + BIT_BYTE(pos + len - 1);
4056         const bool same_byte = (first_byte == last_byte);
4057         u8 mask = BITMAP_FIRST_BYTE_MASK(pos);
4058         u8 *kaddr;
4059 
4060         if (same_byte)
4061                 mask &= BITMAP_LAST_BYTE_MASK(pos + len);
4062 
4063         /* Handle the first byte. */
4064         kaddr = extent_buffer_get_byte(eb, first_byte);
4065         *kaddr &= ~mask;
4066         if (same_byte)
4067                 return;
4068 
4069         /* Handle the byte aligned part. */
4070         ASSERT(first_byte + 1 <= last_byte);
4071         memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1);
4072 
4073         /* Handle the last byte. */
4074         kaddr = extent_buffer_get_byte(eb, last_byte);
4075         *kaddr &= ~BITMAP_LAST_BYTE_MASK(pos + len);
4076 }
4077 
4078 static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
4079 {
4080         unsigned long distance = (src > dst) ? src - dst : dst - src;
4081         return distance < len;
4082 }
4083 
4084 void memcpy_extent_buffer(const struct extent_buffer *dst,
4085                           unsigned long dst_offset, unsigned long src_offset,
4086                           unsigned long len)
4087 {
4088         const int unit_size = dst->folio_size;
4089         unsigned long cur_off = 0;
4090 
4091         if (check_eb_range(dst, dst_offset, len) ||
4092             check_eb_range(dst, src_offset, len))
4093                 return;
4094 
4095         if (dst->addr) {
4096                 const bool use_memmove = areas_overlap(src_offset, dst_offset, len);
4097 
4098                 if (use_memmove)
4099                         memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4100                 else
4101                         memcpy(dst->addr + dst_offset, dst->addr + src_offset, len);
4102                 return;
4103         }
4104 
4105         while (cur_off < len) {
4106                 unsigned long cur_src = cur_off + src_offset;
4107                 unsigned long folio_index = get_eb_folio_index(dst, cur_src);
4108                 unsigned long folio_off = get_eb_offset_in_folio(dst, cur_src);
4109                 unsigned long cur_len = min(src_offset + len - cur_src,
4110                                             unit_size - folio_off);
4111                 void *src_addr = folio_address(dst->folios[folio_index]) + folio_off;
4112                 const bool use_memmove = areas_overlap(src_offset + cur_off,
4113                                                        dst_offset + cur_off, cur_len);
4114 
4115                 __write_extent_buffer(dst, src_addr, dst_offset + cur_off, cur_len,
4116                                       use_memmove);
4117                 cur_off += cur_len;
4118         }
4119 }
4120 
4121 void memmove_extent_buffer(const struct extent_buffer *dst,
4122                            unsigned long dst_offset, unsigned long src_offset,
4123                            unsigned long len)
4124 {
4125         unsigned long dst_end = dst_offset + len - 1;
4126         unsigned long src_end = src_offset + len - 1;
4127 
4128         if (check_eb_range(dst, dst_offset, len) ||
4129             check_eb_range(dst, src_offset, len))
4130                 return;
4131 
4132         if (dst_offset < src_offset) {
4133                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
4134                 return;
4135         }
4136 
4137         if (dst->addr) {
4138                 memmove(dst->addr + dst_offset, dst->addr + src_offset, len);
4139                 return;
4140         }
4141 
4142         while (len > 0) {
4143                 unsigned long src_i;
4144                 size_t cur;
4145                 size_t dst_off_in_folio;
4146                 size_t src_off_in_folio;
4147                 void *src_addr;
4148                 bool use_memmove;
4149 
4150                 src_i = get_eb_folio_index(dst, src_end);
4151 
4152                 dst_off_in_folio = get_eb_offset_in_folio(dst, dst_end);
4153                 src_off_in_folio = get_eb_offset_in_folio(dst, src_end);
4154 
4155                 cur = min_t(unsigned long, len, src_off_in_folio + 1);
4156                 cur = min(cur, dst_off_in_folio + 1);
4157 
4158                 src_addr = folio_address(dst->folios[src_i]) + src_off_in_folio -
4159                                          cur + 1;
4160                 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1,
4161                                             cur);
4162 
4163                 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur,
4164                                       use_memmove);
4165 
4166                 dst_end -= cur;
4167                 src_end -= cur;
4168                 len -= cur;
4169         }
4170 }
4171 
4172 #define GANG_LOOKUP_SIZE        16
4173 static struct extent_buffer *get_next_extent_buffer(
4174                 const struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
4175 {
4176         struct extent_buffer *gang[GANG_LOOKUP_SIZE];
4177         struct extent_buffer *found = NULL;
4178         u64 page_start = page_offset(page);
4179         u64 cur = page_start;
4180 
4181         ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
4182         lockdep_assert_held(&fs_info->buffer_lock);
4183 
4184         while (cur < page_start + PAGE_SIZE) {
4185                 int ret;
4186                 int i;
4187 
4188                 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
4189                                 (void **)gang, cur >> fs_info->sectorsize_bits,
4190                                 min_t(unsigned int, GANG_LOOKUP_SIZE,
4191                                       PAGE_SIZE / fs_info->nodesize));
4192                 if (ret == 0)
4193                         goto out;
4194                 for (i = 0; i < ret; i++) {
4195                         /* Already beyond page end */
4196                         if (gang[i]->start >= page_start + PAGE_SIZE)
4197                                 goto out;
4198                         /* Found one */
4199                         if (gang[i]->start >= bytenr) {
4200                                 found = gang[i];
4201                                 goto out;
4202                         }
4203                 }
4204                 cur = gang[ret - 1]->start + gang[ret - 1]->len;
4205         }
4206 out:
4207         return found;
4208 }
4209 
4210 static int try_release_subpage_extent_buffer(struct page *page)
4211 {
4212         struct btrfs_fs_info *fs_info = page_to_fs_info(page);
4213         u64 cur = page_offset(page);
4214         const u64 end = page_offset(page) + PAGE_SIZE;
4215         int ret;
4216 
4217         while (cur < end) {
4218                 struct extent_buffer *eb = NULL;
4219 
4220                 /*
4221                  * Unlike try_release_extent_buffer() which uses folio private
4222                  * to grab buffer, for subpage case we rely on radix tree, thus
4223                  * we need to ensure radix tree consistency.
4224                  *
4225                  * We also want an atomic snapshot of the radix tree, thus go
4226                  * with spinlock rather than RCU.
4227                  */
4228                 spin_lock(&fs_info->buffer_lock);
4229                 eb = get_next_extent_buffer(fs_info, page, cur);
4230                 if (!eb) {
4231                         /* No more eb in the page range after or at cur */
4232                         spin_unlock(&fs_info->buffer_lock);
4233                         break;
4234                 }
4235                 cur = eb->start + eb->len;
4236 
4237                 /*
4238                  * The same as try_release_extent_buffer(), to ensure the eb
4239                  * won't disappear out from under us.
4240                  */
4241                 spin_lock(&eb->refs_lock);
4242                 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4243                         spin_unlock(&eb->refs_lock);
4244                         spin_unlock(&fs_info->buffer_lock);
4245                         break;
4246                 }
4247                 spin_unlock(&fs_info->buffer_lock);
4248 
4249                 /*
4250                  * If tree ref isn't set then we know the ref on this eb is a
4251                  * real ref, so just return, this eb will likely be freed soon
4252                  * anyway.
4253                  */
4254                 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4255                         spin_unlock(&eb->refs_lock);
4256                         break;
4257                 }
4258 
4259                 /*
4260                  * Here we don't care about the return value, we will always
4261                  * check the folio private at the end.  And
4262                  * release_extent_buffer() will release the refs_lock.
4263                  */
4264                 release_extent_buffer(eb);
4265         }
4266         /*
4267          * Finally to check if we have cleared folio private, as if we have
4268          * released all ebs in the page, the folio private should be cleared now.
4269          */
4270         spin_lock(&page->mapping->i_private_lock);
4271         if (!folio_test_private(page_folio(page)))
4272                 ret = 1;
4273         else
4274                 ret = 0;
4275         spin_unlock(&page->mapping->i_private_lock);
4276         return ret;
4277 
4278 }
4279 
4280 int try_release_extent_buffer(struct page *page)
4281 {
4282         struct folio *folio = page_folio(page);
4283         struct extent_buffer *eb;
4284 
4285         if (page_to_fs_info(page)->nodesize < PAGE_SIZE)
4286                 return try_release_subpage_extent_buffer(page);
4287 
4288         /*
4289          * We need to make sure nobody is changing folio private, as we rely on
4290          * folio private as the pointer to extent buffer.
4291          */
4292         spin_lock(&page->mapping->i_private_lock);
4293         if (!folio_test_private(folio)) {
4294                 spin_unlock(&page->mapping->i_private_lock);
4295                 return 1;
4296         }
4297 
4298         eb = folio_get_private(folio);
4299         BUG_ON(!eb);
4300 
4301         /*
4302          * This is a little awful but should be ok, we need to make sure that
4303          * the eb doesn't disappear out from under us while we're looking at
4304          * this page.
4305          */
4306         spin_lock(&eb->refs_lock);
4307         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
4308                 spin_unlock(&eb->refs_lock);
4309                 spin_unlock(&page->mapping->i_private_lock);
4310                 return 0;
4311         }
4312         spin_unlock(&page->mapping->i_private_lock);
4313 
4314         /*
4315          * If tree ref isn't set then we know the ref on this eb is a real ref,
4316          * so just return, this page will likely be freed soon anyway.
4317          */
4318         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
4319                 spin_unlock(&eb->refs_lock);
4320                 return 0;
4321         }
4322 
4323         return release_extent_buffer(eb);
4324 }
4325 
4326 /*
4327  * Attempt to readahead a child block.
4328  *
4329  * @fs_info:    the fs_info
4330  * @bytenr:     bytenr to read
4331  * @owner_root: objectid of the root that owns this eb
4332  * @gen:        generation for the uptodate check, can be 0
4333  * @level:      level for the eb
4334  *
4335  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
4336  * normal uptodate check of the eb, without checking the generation.  If we have
4337  * to read the block we will not block on anything.
4338  */
4339 void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
4340                                 u64 bytenr, u64 owner_root, u64 gen, int level)
4341 {
4342         struct btrfs_tree_parent_check check = {
4343                 .has_first_key = 0,
4344                 .level = level,
4345                 .transid = gen
4346         };
4347         struct extent_buffer *eb;
4348         int ret;
4349 
4350         eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
4351         if (IS_ERR(eb))
4352                 return;
4353 
4354         if (btrfs_buffer_uptodate(eb, gen, 1)) {
4355                 free_extent_buffer(eb);
4356                 return;
4357         }
4358 
4359         ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
4360         if (ret < 0)
4361                 free_extent_buffer_stale(eb);
4362         else
4363                 free_extent_buffer(eb);
4364 }
4365 
4366 /*
4367  * Readahead a node's child block.
4368  *
4369  * @node:       parent node we're reading from
4370  * @slot:       slot in the parent node for the child we want to read
4371  *
4372  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
4373  * the slot in the node provided.
4374  */
4375 void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
4376 {
4377         btrfs_readahead_tree_block(node->fs_info,
4378                                    btrfs_node_blockptr(node, slot),
4379                                    btrfs_header_owner(node),
4380                                    btrfs_node_ptr_generation(node, slot),
4381                                    btrfs_header_level(node) - 1);
4382 }
4383 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php