~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/iomap/buffered-io.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2010 Red Hat, Inc.
  4  * Copyright (C) 2016-2023 Christoph Hellwig.
  5  */
  6 #include <linux/module.h>
  7 #include <linux/compiler.h>
  8 #include <linux/fs.h>
  9 #include <linux/iomap.h>
 10 #include <linux/pagemap.h>
 11 #include <linux/uio.h>
 12 #include <linux/buffer_head.h>
 13 #include <linux/dax.h>
 14 #include <linux/writeback.h>
 15 #include <linux/list_sort.h>
 16 #include <linux/swap.h>
 17 #include <linux/bio.h>
 18 #include <linux/sched/signal.h>
 19 #include <linux/migrate.h>
 20 #include "trace.h"
 21 
 22 #include "../internal.h"
 23 
 24 #define IOEND_BATCH_SIZE        4096
 25 
 26 typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
 27 /*
 28  * Structure allocated for each folio to track per-block uptodate, dirty state
 29  * and I/O completions.
 30  */
 31 struct iomap_folio_state {
 32         spinlock_t              state_lock;
 33         unsigned int            read_bytes_pending;
 34         atomic_t                write_bytes_pending;
 35 
 36         /*
 37          * Each block has two bits in this bitmap:
 38          * Bits [0..blocks_per_folio) has the uptodate status.
 39          * Bits [b_p_f...(2*b_p_f))   has the dirty status.
 40          */
 41         unsigned long           state[];
 42 };
 43 
 44 static struct bio_set iomap_ioend_bioset;
 45 
 46 static inline bool ifs_is_fully_uptodate(struct folio *folio,
 47                 struct iomap_folio_state *ifs)
 48 {
 49         struct inode *inode = folio->mapping->host;
 50 
 51         return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
 52 }
 53 
 54 static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
 55                 unsigned int block)
 56 {
 57         return test_bit(block, ifs->state);
 58 }
 59 
 60 static bool ifs_set_range_uptodate(struct folio *folio,
 61                 struct iomap_folio_state *ifs, size_t off, size_t len)
 62 {
 63         struct inode *inode = folio->mapping->host;
 64         unsigned int first_blk = off >> inode->i_blkbits;
 65         unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
 66         unsigned int nr_blks = last_blk - first_blk + 1;
 67 
 68         bitmap_set(ifs->state, first_blk, nr_blks);
 69         return ifs_is_fully_uptodate(folio, ifs);
 70 }
 71 
 72 static void iomap_set_range_uptodate(struct folio *folio, size_t off,
 73                 size_t len)
 74 {
 75         struct iomap_folio_state *ifs = folio->private;
 76         unsigned long flags;
 77         bool uptodate = true;
 78 
 79         if (ifs) {
 80                 spin_lock_irqsave(&ifs->state_lock, flags);
 81                 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
 82                 spin_unlock_irqrestore(&ifs->state_lock, flags);
 83         }
 84 
 85         if (uptodate)
 86                 folio_mark_uptodate(folio);
 87 }
 88 
 89 static inline bool ifs_block_is_dirty(struct folio *folio,
 90                 struct iomap_folio_state *ifs, int block)
 91 {
 92         struct inode *inode = folio->mapping->host;
 93         unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
 94 
 95         return test_bit(block + blks_per_folio, ifs->state);
 96 }
 97 
 98 static unsigned ifs_find_dirty_range(struct folio *folio,
 99                 struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
100 {
101         struct inode *inode = folio->mapping->host;
102         unsigned start_blk =
103                 offset_in_folio(folio, *range_start) >> inode->i_blkbits;
104         unsigned end_blk = min_not_zero(
105                 offset_in_folio(folio, range_end) >> inode->i_blkbits,
106                 i_blocks_per_folio(inode, folio));
107         unsigned nblks = 1;
108 
109         while (!ifs_block_is_dirty(folio, ifs, start_blk))
110                 if (++start_blk == end_blk)
111                         return 0;
112 
113         while (start_blk + nblks < end_blk) {
114                 if (!ifs_block_is_dirty(folio, ifs, start_blk + nblks))
115                         break;
116                 nblks++;
117         }
118 
119         *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
120         return nblks << inode->i_blkbits;
121 }
122 
123 static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
124                 u64 range_end)
125 {
126         struct iomap_folio_state *ifs = folio->private;
127 
128         if (*range_start >= range_end)
129                 return 0;
130 
131         if (ifs)
132                 return ifs_find_dirty_range(folio, ifs, range_start, range_end);
133         return range_end - *range_start;
134 }
135 
136 static void ifs_clear_range_dirty(struct folio *folio,
137                 struct iomap_folio_state *ifs, size_t off, size_t len)
138 {
139         struct inode *inode = folio->mapping->host;
140         unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
141         unsigned int first_blk = (off >> inode->i_blkbits);
142         unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
143         unsigned int nr_blks = last_blk - first_blk + 1;
144         unsigned long flags;
145 
146         spin_lock_irqsave(&ifs->state_lock, flags);
147         bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
148         spin_unlock_irqrestore(&ifs->state_lock, flags);
149 }
150 
151 static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
152 {
153         struct iomap_folio_state *ifs = folio->private;
154 
155         if (ifs)
156                 ifs_clear_range_dirty(folio, ifs, off, len);
157 }
158 
159 static void ifs_set_range_dirty(struct folio *folio,
160                 struct iomap_folio_state *ifs, size_t off, size_t len)
161 {
162         struct inode *inode = folio->mapping->host;
163         unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
164         unsigned int first_blk = (off >> inode->i_blkbits);
165         unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
166         unsigned int nr_blks = last_blk - first_blk + 1;
167         unsigned long flags;
168 
169         spin_lock_irqsave(&ifs->state_lock, flags);
170         bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
171         spin_unlock_irqrestore(&ifs->state_lock, flags);
172 }
173 
174 static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
175 {
176         struct iomap_folio_state *ifs = folio->private;
177 
178         if (ifs)
179                 ifs_set_range_dirty(folio, ifs, off, len);
180 }
181 
182 static struct iomap_folio_state *ifs_alloc(struct inode *inode,
183                 struct folio *folio, unsigned int flags)
184 {
185         struct iomap_folio_state *ifs = folio->private;
186         unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
187         gfp_t gfp;
188 
189         if (ifs || nr_blocks <= 1)
190                 return ifs;
191 
192         if (flags & IOMAP_NOWAIT)
193                 gfp = GFP_NOWAIT;
194         else
195                 gfp = GFP_NOFS | __GFP_NOFAIL;
196 
197         /*
198          * ifs->state tracks two sets of state flags when the
199          * filesystem block size is smaller than the folio size.
200          * The first state tracks per-block uptodate and the
201          * second tracks per-block dirty state.
202          */
203         ifs = kzalloc(struct_size(ifs, state,
204                       BITS_TO_LONGS(2 * nr_blocks)), gfp);
205         if (!ifs)
206                 return ifs;
207 
208         spin_lock_init(&ifs->state_lock);
209         if (folio_test_uptodate(folio))
210                 bitmap_set(ifs->state, 0, nr_blocks);
211         if (folio_test_dirty(folio))
212                 bitmap_set(ifs->state, nr_blocks, nr_blocks);
213         folio_attach_private(folio, ifs);
214 
215         return ifs;
216 }
217 
218 static void ifs_free(struct folio *folio)
219 {
220         struct iomap_folio_state *ifs = folio_detach_private(folio);
221 
222         if (!ifs)
223                 return;
224         WARN_ON_ONCE(ifs->read_bytes_pending != 0);
225         WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
226         WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
227                         folio_test_uptodate(folio));
228         kfree(ifs);
229 }
230 
231 /*
232  * Calculate the range inside the folio that we actually need to read.
233  */
234 static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
235                 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
236 {
237         struct iomap_folio_state *ifs = folio->private;
238         loff_t orig_pos = *pos;
239         loff_t isize = i_size_read(inode);
240         unsigned block_bits = inode->i_blkbits;
241         unsigned block_size = (1 << block_bits);
242         size_t poff = offset_in_folio(folio, *pos);
243         size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
244         size_t orig_plen = plen;
245         unsigned first = poff >> block_bits;
246         unsigned last = (poff + plen - 1) >> block_bits;
247 
248         /*
249          * If the block size is smaller than the page size, we need to check the
250          * per-block uptodate status and adjust the offset and length if needed
251          * to avoid reading in already uptodate ranges.
252          */
253         if (ifs) {
254                 unsigned int i;
255 
256                 /* move forward for each leading block marked uptodate */
257                 for (i = first; i <= last; i++) {
258                         if (!ifs_block_is_uptodate(ifs, i))
259                                 break;
260                         *pos += block_size;
261                         poff += block_size;
262                         plen -= block_size;
263                         first++;
264                 }
265 
266                 /* truncate len if we find any trailing uptodate block(s) */
267                 for ( ; i <= last; i++) {
268                         if (ifs_block_is_uptodate(ifs, i)) {
269                                 plen -= (last - i + 1) * block_size;
270                                 last = i - 1;
271                                 break;
272                         }
273                 }
274         }
275 
276         /*
277          * If the extent spans the block that contains the i_size, we need to
278          * handle both halves separately so that we properly zero data in the
279          * page cache for blocks that are entirely outside of i_size.
280          */
281         if (orig_pos <= isize && orig_pos + orig_plen > isize) {
282                 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
283 
284                 if (first <= end && last > end)
285                         plen -= (last - end) * block_size;
286         }
287 
288         *offp = poff;
289         *lenp = plen;
290 }
291 
292 static void iomap_finish_folio_read(struct folio *folio, size_t off,
293                 size_t len, int error)
294 {
295         struct iomap_folio_state *ifs = folio->private;
296         bool uptodate = !error;
297         bool finished = true;
298 
299         if (ifs) {
300                 unsigned long flags;
301 
302                 spin_lock_irqsave(&ifs->state_lock, flags);
303                 if (!error)
304                         uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
305                 ifs->read_bytes_pending -= len;
306                 finished = !ifs->read_bytes_pending;
307                 spin_unlock_irqrestore(&ifs->state_lock, flags);
308         }
309 
310         if (finished)
311                 folio_end_read(folio, uptodate);
312 }
313 
314 static void iomap_read_end_io(struct bio *bio)
315 {
316         int error = blk_status_to_errno(bio->bi_status);
317         struct folio_iter fi;
318 
319         bio_for_each_folio_all(fi, bio)
320                 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
321         bio_put(bio);
322 }
323 
324 struct iomap_readpage_ctx {
325         struct folio            *cur_folio;
326         bool                    cur_folio_in_bio;
327         struct bio              *bio;
328         struct readahead_control *rac;
329 };
330 
331 /**
332  * iomap_read_inline_data - copy inline data into the page cache
333  * @iter: iteration structure
334  * @folio: folio to copy to
335  *
336  * Copy the inline data in @iter into @folio and zero out the rest of the folio.
337  * Only a single IOMAP_INLINE extent is allowed at the end of each file.
338  * Returns zero for success to complete the read, or the usual negative errno.
339  */
340 static int iomap_read_inline_data(const struct iomap_iter *iter,
341                 struct folio *folio)
342 {
343         const struct iomap *iomap = iomap_iter_srcmap(iter);
344         size_t size = i_size_read(iter->inode) - iomap->offset;
345         size_t offset = offset_in_folio(folio, iomap->offset);
346 
347         if (folio_test_uptodate(folio))
348                 return 0;
349 
350         if (WARN_ON_ONCE(size > iomap->length))
351                 return -EIO;
352         if (offset > 0)
353                 ifs_alloc(iter->inode, folio, iter->flags);
354 
355         folio_fill_tail(folio, offset, iomap->inline_data, size);
356         iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
357         return 0;
358 }
359 
360 static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
361                 loff_t pos)
362 {
363         const struct iomap *srcmap = iomap_iter_srcmap(iter);
364 
365         return srcmap->type != IOMAP_MAPPED ||
366                 (srcmap->flags & IOMAP_F_NEW) ||
367                 pos >= i_size_read(iter->inode);
368 }
369 
370 static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
371                 struct iomap_readpage_ctx *ctx, loff_t offset)
372 {
373         const struct iomap *iomap = &iter->iomap;
374         loff_t pos = iter->pos + offset;
375         loff_t length = iomap_length(iter) - offset;
376         struct folio *folio = ctx->cur_folio;
377         struct iomap_folio_state *ifs;
378         loff_t orig_pos = pos;
379         size_t poff, plen;
380         sector_t sector;
381 
382         if (iomap->type == IOMAP_INLINE)
383                 return iomap_read_inline_data(iter, folio);
384 
385         /* zero post-eof blocks as the page may be mapped */
386         ifs = ifs_alloc(iter->inode, folio, iter->flags);
387         iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
388         if (plen == 0)
389                 goto done;
390 
391         if (iomap_block_needs_zeroing(iter, pos)) {
392                 folio_zero_range(folio, poff, plen);
393                 iomap_set_range_uptodate(folio, poff, plen);
394                 goto done;
395         }
396 
397         ctx->cur_folio_in_bio = true;
398         if (ifs) {
399                 spin_lock_irq(&ifs->state_lock);
400                 ifs->read_bytes_pending += plen;
401                 spin_unlock_irq(&ifs->state_lock);
402         }
403 
404         sector = iomap_sector(iomap, pos);
405         if (!ctx->bio ||
406             bio_end_sector(ctx->bio) != sector ||
407             !bio_add_folio(ctx->bio, folio, plen, poff)) {
408                 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
409                 gfp_t orig_gfp = gfp;
410                 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
411 
412                 if (ctx->bio)
413                         submit_bio(ctx->bio);
414 
415                 if (ctx->rac) /* same as readahead_gfp_mask */
416                         gfp |= __GFP_NORETRY | __GFP_NOWARN;
417                 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
418                                      REQ_OP_READ, gfp);
419                 /*
420                  * If the bio_alloc fails, try it again for a single page to
421                  * avoid having to deal with partial page reads.  This emulates
422                  * what do_mpage_read_folio does.
423                  */
424                 if (!ctx->bio) {
425                         ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
426                                              orig_gfp);
427                 }
428                 if (ctx->rac)
429                         ctx->bio->bi_opf |= REQ_RAHEAD;
430                 ctx->bio->bi_iter.bi_sector = sector;
431                 ctx->bio->bi_end_io = iomap_read_end_io;
432                 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
433         }
434 
435 done:
436         /*
437          * Move the caller beyond our range so that it keeps making progress.
438          * For that, we have to include any leading non-uptodate ranges, but
439          * we can skip trailing ones as they will be handled in the next
440          * iteration.
441          */
442         return pos - orig_pos + plen;
443 }
444 
445 static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
446                 struct iomap_readpage_ctx *ctx)
447 {
448         struct folio *folio = ctx->cur_folio;
449         size_t offset = offset_in_folio(folio, iter->pos);
450         loff_t length = min_t(loff_t, folio_size(folio) - offset,
451                               iomap_length(iter));
452         loff_t done, ret;
453 
454         for (done = 0; done < length; done += ret) {
455                 ret = iomap_readpage_iter(iter, ctx, done);
456                 if (ret <= 0)
457                         return ret;
458         }
459 
460         return done;
461 }
462 
463 int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
464 {
465         struct iomap_iter iter = {
466                 .inode          = folio->mapping->host,
467                 .pos            = folio_pos(folio),
468                 .len            = folio_size(folio),
469         };
470         struct iomap_readpage_ctx ctx = {
471                 .cur_folio      = folio,
472         };
473         int ret;
474 
475         trace_iomap_readpage(iter.inode, 1);
476 
477         while ((ret = iomap_iter(&iter, ops)) > 0)
478                 iter.processed = iomap_read_folio_iter(&iter, &ctx);
479 
480         if (ctx.bio) {
481                 submit_bio(ctx.bio);
482                 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
483         } else {
484                 WARN_ON_ONCE(ctx.cur_folio_in_bio);
485                 folio_unlock(folio);
486         }
487 
488         /*
489          * Just like mpage_readahead and block_read_full_folio, we always
490          * return 0 and just set the folio error flag on errors.  This
491          * should be cleaned up throughout the stack eventually.
492          */
493         return 0;
494 }
495 EXPORT_SYMBOL_GPL(iomap_read_folio);
496 
497 static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
498                 struct iomap_readpage_ctx *ctx)
499 {
500         loff_t length = iomap_length(iter);
501         loff_t done, ret;
502 
503         for (done = 0; done < length; done += ret) {
504                 if (ctx->cur_folio &&
505                     offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
506                         if (!ctx->cur_folio_in_bio)
507                                 folio_unlock(ctx->cur_folio);
508                         ctx->cur_folio = NULL;
509                 }
510                 if (!ctx->cur_folio) {
511                         ctx->cur_folio = readahead_folio(ctx->rac);
512                         ctx->cur_folio_in_bio = false;
513                 }
514                 ret = iomap_readpage_iter(iter, ctx, done);
515                 if (ret <= 0)
516                         return ret;
517         }
518 
519         return done;
520 }
521 
522 /**
523  * iomap_readahead - Attempt to read pages from a file.
524  * @rac: Describes the pages to be read.
525  * @ops: The operations vector for the filesystem.
526  *
527  * This function is for filesystems to call to implement their readahead
528  * address_space operation.
529  *
530  * Context: The @ops callbacks may submit I/O (eg to read the addresses of
531  * blocks from disc), and may wait for it.  The caller may be trying to
532  * access a different page, and so sleeping excessively should be avoided.
533  * It may allocate memory, but should avoid costly allocations.  This
534  * function is called with memalloc_nofs set, so allocations will not cause
535  * the filesystem to be reentered.
536  */
537 void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
538 {
539         struct iomap_iter iter = {
540                 .inode  = rac->mapping->host,
541                 .pos    = readahead_pos(rac),
542                 .len    = readahead_length(rac),
543         };
544         struct iomap_readpage_ctx ctx = {
545                 .rac    = rac,
546         };
547 
548         trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
549 
550         while (iomap_iter(&iter, ops) > 0)
551                 iter.processed = iomap_readahead_iter(&iter, &ctx);
552 
553         if (ctx.bio)
554                 submit_bio(ctx.bio);
555         if (ctx.cur_folio) {
556                 if (!ctx.cur_folio_in_bio)
557                         folio_unlock(ctx.cur_folio);
558         }
559 }
560 EXPORT_SYMBOL_GPL(iomap_readahead);
561 
562 /*
563  * iomap_is_partially_uptodate checks whether blocks within a folio are
564  * uptodate or not.
565  *
566  * Returns true if all blocks which correspond to the specified part
567  * of the folio are uptodate.
568  */
569 bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
570 {
571         struct iomap_folio_state *ifs = folio->private;
572         struct inode *inode = folio->mapping->host;
573         unsigned first, last, i;
574 
575         if (!ifs)
576                 return false;
577 
578         /* Caller's range may extend past the end of this folio */
579         count = min(folio_size(folio) - from, count);
580 
581         /* First and last blocks in range within folio */
582         first = from >> inode->i_blkbits;
583         last = (from + count - 1) >> inode->i_blkbits;
584 
585         for (i = first; i <= last; i++)
586                 if (!ifs_block_is_uptodate(ifs, i))
587                         return false;
588         return true;
589 }
590 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
591 
592 /**
593  * iomap_get_folio - get a folio reference for writing
594  * @iter: iteration structure
595  * @pos: start offset of write
596  * @len: Suggested size of folio to create.
597  *
598  * Returns a locked reference to the folio at @pos, or an error pointer if the
599  * folio could not be obtained.
600  */
601 struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
602 {
603         fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
604 
605         if (iter->flags & IOMAP_NOWAIT)
606                 fgp |= FGP_NOWAIT;
607         fgp |= fgf_set_order(len);
608 
609         return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
610                         fgp, mapping_gfp_mask(iter->inode->i_mapping));
611 }
612 EXPORT_SYMBOL_GPL(iomap_get_folio);
613 
614 bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
615 {
616         trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
617                         folio_size(folio));
618 
619         /*
620          * If the folio is dirty, we refuse to release our metadata because
621          * it may be partially dirty.  Once we track per-block dirty state,
622          * we can release the metadata if every block is dirty.
623          */
624         if (folio_test_dirty(folio))
625                 return false;
626         ifs_free(folio);
627         return true;
628 }
629 EXPORT_SYMBOL_GPL(iomap_release_folio);
630 
631 void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
632 {
633         trace_iomap_invalidate_folio(folio->mapping->host,
634                                         folio_pos(folio) + offset, len);
635 
636         /*
637          * If we're invalidating the entire folio, clear the dirty state
638          * from it and release it to avoid unnecessary buildup of the LRU.
639          */
640         if (offset == 0 && len == folio_size(folio)) {
641                 WARN_ON_ONCE(folio_test_writeback(folio));
642                 folio_cancel_dirty(folio);
643                 ifs_free(folio);
644         }
645 }
646 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
647 
648 bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
649 {
650         struct inode *inode = mapping->host;
651         size_t len = folio_size(folio);
652 
653         ifs_alloc(inode, folio, 0);
654         iomap_set_range_dirty(folio, 0, len);
655         return filemap_dirty_folio(mapping, folio);
656 }
657 EXPORT_SYMBOL_GPL(iomap_dirty_folio);
658 
659 static void
660 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
661 {
662         loff_t i_size = i_size_read(inode);
663 
664         /*
665          * Only truncate newly allocated pages beyoned EOF, even if the
666          * write started inside the existing inode size.
667          */
668         if (pos + len > i_size)
669                 truncate_pagecache_range(inode, max(pos, i_size),
670                                          pos + len - 1);
671 }
672 
673 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
674                 size_t poff, size_t plen, const struct iomap *iomap)
675 {
676         struct bio_vec bvec;
677         struct bio bio;
678 
679         bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
680         bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
681         bio_add_folio_nofail(&bio, folio, plen, poff);
682         return submit_bio_wait(&bio);
683 }
684 
685 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
686                 size_t len, struct folio *folio)
687 {
688         const struct iomap *srcmap = iomap_iter_srcmap(iter);
689         struct iomap_folio_state *ifs;
690         loff_t block_size = i_blocksize(iter->inode);
691         loff_t block_start = round_down(pos, block_size);
692         loff_t block_end = round_up(pos + len, block_size);
693         unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
694         size_t from = offset_in_folio(folio, pos), to = from + len;
695         size_t poff, plen;
696 
697         /*
698          * If the write or zeroing completely overlaps the current folio, then
699          * entire folio will be dirtied so there is no need for
700          * per-block state tracking structures to be attached to this folio.
701          * For the unshare case, we must read in the ondisk contents because we
702          * are not changing pagecache contents.
703          */
704         if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
705             pos + len >= folio_pos(folio) + folio_size(folio))
706                 return 0;
707 
708         ifs = ifs_alloc(iter->inode, folio, iter->flags);
709         if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
710                 return -EAGAIN;
711 
712         if (folio_test_uptodate(folio))
713                 return 0;
714 
715         do {
716                 iomap_adjust_read_range(iter->inode, folio, &block_start,
717                                 block_end - block_start, &poff, &plen);
718                 if (plen == 0)
719                         break;
720 
721                 if (!(iter->flags & IOMAP_UNSHARE) &&
722                     (from <= poff || from >= poff + plen) &&
723                     (to <= poff || to >= poff + plen))
724                         continue;
725 
726                 if (iomap_block_needs_zeroing(iter, block_start)) {
727                         if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
728                                 return -EIO;
729                         folio_zero_segments(folio, poff, from, to, poff + plen);
730                 } else {
731                         int status;
732 
733                         if (iter->flags & IOMAP_NOWAIT)
734                                 return -EAGAIN;
735 
736                         status = iomap_read_folio_sync(block_start, folio,
737                                         poff, plen, srcmap);
738                         if (status)
739                                 return status;
740                 }
741                 iomap_set_range_uptodate(folio, poff, plen);
742         } while ((block_start += plen) < block_end);
743 
744         return 0;
745 }
746 
747 static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
748                 size_t len)
749 {
750         const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
751 
752         if (folio_ops && folio_ops->get_folio)
753                 return folio_ops->get_folio(iter, pos, len);
754         else
755                 return iomap_get_folio(iter, pos, len);
756 }
757 
758 static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
759                 struct folio *folio)
760 {
761         const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
762 
763         if (folio_ops && folio_ops->put_folio) {
764                 folio_ops->put_folio(iter->inode, pos, ret, folio);
765         } else {
766                 folio_unlock(folio);
767                 folio_put(folio);
768         }
769 }
770 
771 static int iomap_write_begin_inline(const struct iomap_iter *iter,
772                 struct folio *folio)
773 {
774         /* needs more work for the tailpacking case; disable for now */
775         if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
776                 return -EIO;
777         return iomap_read_inline_data(iter, folio);
778 }
779 
780 static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
781                 size_t len, struct folio **foliop)
782 {
783         const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
784         const struct iomap *srcmap = iomap_iter_srcmap(iter);
785         struct folio *folio;
786         int status = 0;
787 
788         BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
789         if (srcmap != &iter->iomap)
790                 BUG_ON(pos + len > srcmap->offset + srcmap->length);
791 
792         if (fatal_signal_pending(current))
793                 return -EINTR;
794 
795         if (!mapping_large_folio_support(iter->inode->i_mapping))
796                 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
797 
798         folio = __iomap_get_folio(iter, pos, len);
799         if (IS_ERR(folio))
800                 return PTR_ERR(folio);
801 
802         /*
803          * Now we have a locked folio, before we do anything with it we need to
804          * check that the iomap we have cached is not stale. The inode extent
805          * mapping can change due to concurrent IO in flight (e.g.
806          * IOMAP_UNWRITTEN state can change and memory reclaim could have
807          * reclaimed a previously partially written page at this index after IO
808          * completion before this write reaches this file offset) and hence we
809          * could do the wrong thing here (zero a page range incorrectly or fail
810          * to zero) and corrupt data.
811          */
812         if (folio_ops && folio_ops->iomap_valid) {
813                 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
814                                                          &iter->iomap);
815                 if (!iomap_valid) {
816                         iter->iomap.flags |= IOMAP_F_STALE;
817                         status = 0;
818                         goto out_unlock;
819                 }
820         }
821 
822         if (pos + len > folio_pos(folio) + folio_size(folio))
823                 len = folio_pos(folio) + folio_size(folio) - pos;
824 
825         if (srcmap->type == IOMAP_INLINE)
826                 status = iomap_write_begin_inline(iter, folio);
827         else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
828                 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
829         else
830                 status = __iomap_write_begin(iter, pos, len, folio);
831 
832         if (unlikely(status))
833                 goto out_unlock;
834 
835         *foliop = folio;
836         return 0;
837 
838 out_unlock:
839         __iomap_put_folio(iter, pos, 0, folio);
840 
841         return status;
842 }
843 
844 static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
845                 size_t copied, struct folio *folio)
846 {
847         flush_dcache_folio(folio);
848 
849         /*
850          * The blocks that were entirely written will now be uptodate, so we
851          * don't have to worry about a read_folio reading them and overwriting a
852          * partial write.  However, if we've encountered a short write and only
853          * partially written into a block, it will not be marked uptodate, so a
854          * read_folio might come in and destroy our partial write.
855          *
856          * Do the simplest thing and just treat any short write to a
857          * non-uptodate page as a zero-length write, and force the caller to
858          * redo the whole thing.
859          */
860         if (unlikely(copied < len && !folio_test_uptodate(folio)))
861                 return false;
862         iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
863         iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
864         filemap_dirty_folio(inode->i_mapping, folio);
865         return true;
866 }
867 
868 static void iomap_write_end_inline(const struct iomap_iter *iter,
869                 struct folio *folio, loff_t pos, size_t copied)
870 {
871         const struct iomap *iomap = &iter->iomap;
872         void *addr;
873 
874         WARN_ON_ONCE(!folio_test_uptodate(folio));
875         BUG_ON(!iomap_inline_data_valid(iomap));
876 
877         flush_dcache_folio(folio);
878         addr = kmap_local_folio(folio, pos);
879         memcpy(iomap_inline_data(iomap, pos), addr, copied);
880         kunmap_local(addr);
881 
882         mark_inode_dirty(iter->inode);
883 }
884 
885 /*
886  * Returns true if all copied bytes have been written to the pagecache,
887  * otherwise return false.
888  */
889 static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
890                 size_t copied, struct folio *folio)
891 {
892         const struct iomap *srcmap = iomap_iter_srcmap(iter);
893 
894         if (srcmap->type == IOMAP_INLINE) {
895                 iomap_write_end_inline(iter, folio, pos, copied);
896                 return true;
897         }
898 
899         if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
900                 size_t bh_written;
901 
902                 bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
903                                         len, copied, &folio->page, NULL);
904                 WARN_ON_ONCE(bh_written != copied && bh_written != 0);
905                 return bh_written == copied;
906         }
907 
908         return __iomap_write_end(iter->inode, pos, len, copied, folio);
909 }
910 
911 static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
912 {
913         loff_t length = iomap_length(iter);
914         loff_t pos = iter->pos;
915         ssize_t total_written = 0;
916         long status = 0;
917         struct address_space *mapping = iter->inode->i_mapping;
918         size_t chunk = mapping_max_folio_size(mapping);
919         unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
920 
921         do {
922                 struct folio *folio;
923                 loff_t old_size;
924                 size_t offset;          /* Offset into folio */
925                 size_t bytes;           /* Bytes to write to folio */
926                 size_t copied;          /* Bytes copied from user */
927                 size_t written;         /* Bytes have been written */
928 
929                 bytes = iov_iter_count(i);
930 retry:
931                 offset = pos & (chunk - 1);
932                 bytes = min(chunk - offset, bytes);
933                 status = balance_dirty_pages_ratelimited_flags(mapping,
934                                                                bdp_flags);
935                 if (unlikely(status))
936                         break;
937 
938                 if (bytes > length)
939                         bytes = length;
940 
941                 /*
942                  * Bring in the user page that we'll copy from _first_.
943                  * Otherwise there's a nasty deadlock on copying from the
944                  * same page as we're writing to, without it being marked
945                  * up-to-date.
946                  *
947                  * For async buffered writes the assumption is that the user
948                  * page has already been faulted in. This can be optimized by
949                  * faulting the user page.
950                  */
951                 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
952                         status = -EFAULT;
953                         break;
954                 }
955 
956                 status = iomap_write_begin(iter, pos, bytes, &folio);
957                 if (unlikely(status)) {
958                         iomap_write_failed(iter->inode, pos, bytes);
959                         break;
960                 }
961                 if (iter->iomap.flags & IOMAP_F_STALE)
962                         break;
963 
964                 offset = offset_in_folio(folio, pos);
965                 if (bytes > folio_size(folio) - offset)
966                         bytes = folio_size(folio) - offset;
967 
968                 if (mapping_writably_mapped(mapping))
969                         flush_dcache_folio(folio);
970 
971                 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
972                 written = iomap_write_end(iter, pos, bytes, copied, folio) ?
973                           copied : 0;
974 
975                 /*
976                  * Update the in-memory inode size after copying the data into
977                  * the page cache.  It's up to the file system to write the
978                  * updated size to disk, preferably after I/O completion so that
979                  * no stale data is exposed.  Only once that's done can we
980                  * unlock and release the folio.
981                  */
982                 old_size = iter->inode->i_size;
983                 if (pos + written > old_size) {
984                         i_size_write(iter->inode, pos + written);
985                         iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
986                 }
987                 __iomap_put_folio(iter, pos, written, folio);
988 
989                 if (old_size < pos)
990                         pagecache_isize_extended(iter->inode, old_size, pos);
991 
992                 cond_resched();
993                 if (unlikely(written == 0)) {
994                         /*
995                          * A short copy made iomap_write_end() reject the
996                          * thing entirely.  Might be memory poisoning
997                          * halfway through, might be a race with munmap,
998                          * might be severe memory pressure.
999                          */
1000                         iomap_write_failed(iter->inode, pos, bytes);
1001                         iov_iter_revert(i, copied);
1002 
1003                         if (chunk > PAGE_SIZE)
1004                                 chunk /= 2;
1005                         if (copied) {
1006                                 bytes = copied;
1007                                 goto retry;
1008                         }
1009                 } else {
1010                         pos += written;
1011                         total_written += written;
1012                         length -= written;
1013                 }
1014         } while (iov_iter_count(i) && length);
1015 
1016         if (status == -EAGAIN) {
1017                 iov_iter_revert(i, total_written);
1018                 return -EAGAIN;
1019         }
1020         return total_written ? total_written : status;
1021 }
1022 
1023 ssize_t
1024 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1025                 const struct iomap_ops *ops)
1026 {
1027         struct iomap_iter iter = {
1028                 .inode          = iocb->ki_filp->f_mapping->host,
1029                 .pos            = iocb->ki_pos,
1030                 .len            = iov_iter_count(i),
1031                 .flags          = IOMAP_WRITE,
1032         };
1033         ssize_t ret;
1034 
1035         if (iocb->ki_flags & IOCB_NOWAIT)
1036                 iter.flags |= IOMAP_NOWAIT;
1037 
1038         while ((ret = iomap_iter(&iter, ops)) > 0)
1039                 iter.processed = iomap_write_iter(&iter, i);
1040 
1041         if (unlikely(iter.pos == iocb->ki_pos))
1042                 return ret;
1043         ret = iter.pos - iocb->ki_pos;
1044         iocb->ki_pos = iter.pos;
1045         return ret;
1046 }
1047 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1048 
1049 static int iomap_write_delalloc_ifs_punch(struct inode *inode,
1050                 struct folio *folio, loff_t start_byte, loff_t end_byte,
1051                 iomap_punch_t punch)
1052 {
1053         unsigned int first_blk, last_blk, i;
1054         loff_t last_byte;
1055         u8 blkbits = inode->i_blkbits;
1056         struct iomap_folio_state *ifs;
1057         int ret = 0;
1058 
1059         /*
1060          * When we have per-block dirty tracking, there can be
1061          * blocks within a folio which are marked uptodate
1062          * but not dirty. In that case it is necessary to punch
1063          * out such blocks to avoid leaking any delalloc blocks.
1064          */
1065         ifs = folio->private;
1066         if (!ifs)
1067                 return ret;
1068 
1069         last_byte = min_t(loff_t, end_byte - 1,
1070                         folio_pos(folio) + folio_size(folio) - 1);
1071         first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1072         last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1073         for (i = first_blk; i <= last_blk; i++) {
1074                 if (!ifs_block_is_dirty(folio, ifs, i)) {
1075                         ret = punch(inode, folio_pos(folio) + (i << blkbits),
1076                                     1 << blkbits);
1077                         if (ret)
1078                                 return ret;
1079                 }
1080         }
1081 
1082         return ret;
1083 }
1084 
1085 
1086 static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1087                 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1088                 iomap_punch_t punch)
1089 {
1090         int ret = 0;
1091 
1092         if (!folio_test_dirty(folio))
1093                 return ret;
1094 
1095         /* if dirty, punch up to offset */
1096         if (start_byte > *punch_start_byte) {
1097                 ret = punch(inode, *punch_start_byte,
1098                                 start_byte - *punch_start_byte);
1099                 if (ret)
1100                         return ret;
1101         }
1102 
1103         /* Punch non-dirty blocks within folio */
1104         ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
1105                         end_byte, punch);
1106         if (ret)
1107                 return ret;
1108 
1109         /*
1110          * Make sure the next punch start is correctly bound to
1111          * the end of this data range, not the end of the folio.
1112          */
1113         *punch_start_byte = min_t(loff_t, end_byte,
1114                                 folio_pos(folio) + folio_size(folio));
1115 
1116         return ret;
1117 }
1118 
1119 /*
1120  * Scan the data range passed to us for dirty page cache folios. If we find a
1121  * dirty folio, punch out the preceding range and update the offset from which
1122  * the next punch will start from.
1123  *
1124  * We can punch out storage reservations under clean pages because they either
1125  * contain data that has been written back - in which case the delalloc punch
1126  * over that range is a no-op - or they have been read faults in which case they
1127  * contain zeroes and we can remove the delalloc backing range and any new
1128  * writes to those pages will do the normal hole filling operation...
1129  *
1130  * This makes the logic simple: we only need to keep the delalloc extents only
1131  * over the dirty ranges of the page cache.
1132  *
1133  * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1134  * simplify range iterations.
1135  */
1136 static int iomap_write_delalloc_scan(struct inode *inode,
1137                 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1138                 iomap_punch_t punch)
1139 {
1140         while (start_byte < end_byte) {
1141                 struct folio    *folio;
1142                 int ret;
1143 
1144                 /* grab locked page */
1145                 folio = filemap_lock_folio(inode->i_mapping,
1146                                 start_byte >> PAGE_SHIFT);
1147                 if (IS_ERR(folio)) {
1148                         start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1149                                         PAGE_SIZE;
1150                         continue;
1151                 }
1152 
1153                 ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1154                                                  start_byte, end_byte, punch);
1155                 if (ret) {
1156                         folio_unlock(folio);
1157                         folio_put(folio);
1158                         return ret;
1159                 }
1160 
1161                 /* move offset to start of next folio in range */
1162                 start_byte = folio_next_index(folio) << PAGE_SHIFT;
1163                 folio_unlock(folio);
1164                 folio_put(folio);
1165         }
1166         return 0;
1167 }
1168 
1169 /*
1170  * Punch out all the delalloc blocks in the range given except for those that
1171  * have dirty data still pending in the page cache - those are going to be
1172  * written and so must still retain the delalloc backing for writeback.
1173  *
1174  * As we are scanning the page cache for data, we don't need to reimplement the
1175  * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1176  * start and end of data ranges correctly even for sub-folio block sizes. This
1177  * byte range based iteration is especially convenient because it means we
1178  * don't have to care about variable size folios, nor where the start or end of
1179  * the data range lies within a folio, if they lie within the same folio or even
1180  * if there are multiple discontiguous data ranges within the folio.
1181  *
1182  * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1183  * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1184  * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1185  * date. A write page fault can then mark it dirty. If we then fail a write()
1186  * beyond EOF into that up to date cached range, we allocate a delalloc block
1187  * beyond EOF and then have to punch it out. Because the range is up to date,
1188  * mapping_seek_hole_data() will return it, and we will skip the punch because
1189  * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1190  * beyond EOF in this case as writeback will never write back and covert that
1191  * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1192  * resulting in always punching out the range from the EOF to the end of the
1193  * range the iomap spans.
1194  *
1195  * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1196  * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1197  * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1198  * returns the end of the data range (data_end). Using closed intervals would
1199  * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1200  * the code to subtle off-by-one bugs....
1201  */
1202 static int iomap_write_delalloc_release(struct inode *inode,
1203                 loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
1204 {
1205         loff_t punch_start_byte = start_byte;
1206         loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1207         int error = 0;
1208 
1209         /*
1210          * Lock the mapping to avoid races with page faults re-instantiating
1211          * folios and dirtying them via ->page_mkwrite whilst we walk the
1212          * cache and perform delalloc extent removal. Failing to do this can
1213          * leave dirty pages with no space reservation in the cache.
1214          */
1215         filemap_invalidate_lock(inode->i_mapping);
1216         while (start_byte < scan_end_byte) {
1217                 loff_t          data_end;
1218 
1219                 start_byte = mapping_seek_hole_data(inode->i_mapping,
1220                                 start_byte, scan_end_byte, SEEK_DATA);
1221                 /*
1222                  * If there is no more data to scan, all that is left is to
1223                  * punch out the remaining range.
1224                  */
1225                 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1226                         break;
1227                 if (start_byte < 0) {
1228                         error = start_byte;
1229                         goto out_unlock;
1230                 }
1231                 WARN_ON_ONCE(start_byte < punch_start_byte);
1232                 WARN_ON_ONCE(start_byte > scan_end_byte);
1233 
1234                 /*
1235                  * We find the end of this contiguous cached data range by
1236                  * seeking from start_byte to the beginning of the next hole.
1237                  */
1238                 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1239                                 scan_end_byte, SEEK_HOLE);
1240                 if (data_end < 0) {
1241                         error = data_end;
1242                         goto out_unlock;
1243                 }
1244 
1245                 /*
1246                  * If we race with post-direct I/O invalidation of the page cache,
1247                  * there might be no data left at start_byte.
1248                  */
1249                 if (data_end == start_byte)
1250                         continue;
1251 
1252                 WARN_ON_ONCE(data_end < start_byte);
1253                 WARN_ON_ONCE(data_end > scan_end_byte);
1254 
1255                 error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1256                                 start_byte, data_end, punch);
1257                 if (error)
1258                         goto out_unlock;
1259 
1260                 /* The next data search starts at the end of this one. */
1261                 start_byte = data_end;
1262         }
1263 
1264         if (punch_start_byte < end_byte)
1265                 error = punch(inode, punch_start_byte,
1266                                 end_byte - punch_start_byte);
1267 out_unlock:
1268         filemap_invalidate_unlock(inode->i_mapping);
1269         return error;
1270 }
1271 
1272 /*
1273  * When a short write occurs, the filesystem may need to remove reserved space
1274  * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1275  * filesystems that use delayed allocation, we need to punch out delalloc
1276  * extents from the range that are not dirty in the page cache. As the write can
1277  * race with page faults, there can be dirty pages over the delalloc extent
1278  * outside the range of a short write but still within the delalloc extent
1279  * allocated for this iomap.
1280  *
1281  * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1282  * simplify range iterations.
1283  *
1284  * The punch() callback *must* only punch delalloc extents in the range passed
1285  * to it. It must skip over all other types of extents in the range and leave
1286  * them completely unchanged. It must do this punch atomically with respect to
1287  * other extent modifications.
1288  *
1289  * The punch() callback may be called with a folio locked to prevent writeback
1290  * extent allocation racing at the edge of the range we are currently punching.
1291  * The locked folio may or may not cover the range being punched, so it is not
1292  * safe for the punch() callback to lock folios itself.
1293  *
1294  * Lock order is:
1295  *
1296  * inode->i_rwsem (shared or exclusive)
1297  *   inode->i_mapping->invalidate_lock (exclusive)
1298  *     folio_lock()
1299  *       ->punch
1300  *         internal filesystem allocation lock
1301  */
1302 int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1303                 struct iomap *iomap, loff_t pos, loff_t length,
1304                 ssize_t written, iomap_punch_t punch)
1305 {
1306         loff_t                  start_byte;
1307         loff_t                  end_byte;
1308         unsigned int            blocksize = i_blocksize(inode);
1309 
1310         if (iomap->type != IOMAP_DELALLOC)
1311                 return 0;
1312 
1313         /* If we didn't reserve the blocks, we're not allowed to punch them. */
1314         if (!(iomap->flags & IOMAP_F_NEW))
1315                 return 0;
1316 
1317         /*
1318          * start_byte refers to the first unused block after a short write. If
1319          * nothing was written, round offset down to point at the first block in
1320          * the range.
1321          */
1322         if (unlikely(!written))
1323                 start_byte = round_down(pos, blocksize);
1324         else
1325                 start_byte = round_up(pos + written, blocksize);
1326         end_byte = round_up(pos + length, blocksize);
1327 
1328         /* Nothing to do if we've written the entire delalloc extent */
1329         if (start_byte >= end_byte)
1330                 return 0;
1331 
1332         return iomap_write_delalloc_release(inode, start_byte, end_byte,
1333                                         punch);
1334 }
1335 EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
1336 
1337 static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1338 {
1339         struct iomap *iomap = &iter->iomap;
1340         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1341         loff_t pos = iter->pos;
1342         loff_t length = iomap_length(iter);
1343         loff_t written = 0;
1344 
1345         /* don't bother with blocks that are not shared to start with */
1346         if (!(iomap->flags & IOMAP_F_SHARED))
1347                 return length;
1348         /* don't bother with holes or unwritten extents */
1349         if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1350                 return length;
1351 
1352         do {
1353                 struct folio *folio;
1354                 int status;
1355                 size_t offset;
1356                 size_t bytes = min_t(u64, SIZE_MAX, length);
1357                 bool ret;
1358 
1359                 status = iomap_write_begin(iter, pos, bytes, &folio);
1360                 if (unlikely(status))
1361                         return status;
1362                 if (iomap->flags & IOMAP_F_STALE)
1363                         break;
1364 
1365                 offset = offset_in_folio(folio, pos);
1366                 if (bytes > folio_size(folio) - offset)
1367                         bytes = folio_size(folio) - offset;
1368 
1369                 ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1370                 __iomap_put_folio(iter, pos, bytes, folio);
1371                 if (WARN_ON_ONCE(!ret))
1372                         return -EIO;
1373 
1374                 cond_resched();
1375 
1376                 pos += bytes;
1377                 written += bytes;
1378                 length -= bytes;
1379 
1380                 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1381         } while (length > 0);
1382 
1383         return written;
1384 }
1385 
1386 int
1387 iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1388                 const struct iomap_ops *ops)
1389 {
1390         struct iomap_iter iter = {
1391                 .inode          = inode,
1392                 .pos            = pos,
1393                 .flags          = IOMAP_WRITE | IOMAP_UNSHARE,
1394         };
1395         loff_t size = i_size_read(inode);
1396         int ret;
1397 
1398         if (pos < 0 || pos >= size)
1399                 return 0;
1400 
1401         iter.len = min(len, size - pos);
1402         while ((ret = iomap_iter(&iter, ops)) > 0)
1403                 iter.processed = iomap_unshare_iter(&iter);
1404         return ret;
1405 }
1406 EXPORT_SYMBOL_GPL(iomap_file_unshare);
1407 
1408 static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1409 {
1410         const struct iomap *srcmap = iomap_iter_srcmap(iter);
1411         loff_t pos = iter->pos;
1412         loff_t length = iomap_length(iter);
1413         loff_t written = 0;
1414 
1415         /* already zeroed?  we're done. */
1416         if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1417                 return length;
1418 
1419         do {
1420                 struct folio *folio;
1421                 int status;
1422                 size_t offset;
1423                 size_t bytes = min_t(u64, SIZE_MAX, length);
1424                 bool ret;
1425 
1426                 status = iomap_write_begin(iter, pos, bytes, &folio);
1427                 if (status)
1428                         return status;
1429                 if (iter->iomap.flags & IOMAP_F_STALE)
1430                         break;
1431 
1432                 offset = offset_in_folio(folio, pos);
1433                 if (bytes > folio_size(folio) - offset)
1434                         bytes = folio_size(folio) - offset;
1435 
1436                 folio_zero_range(folio, offset, bytes);
1437                 folio_mark_accessed(folio);
1438 
1439                 ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1440                 __iomap_put_folio(iter, pos, bytes, folio);
1441                 if (WARN_ON_ONCE(!ret))
1442                         return -EIO;
1443 
1444                 pos += bytes;
1445                 length -= bytes;
1446                 written += bytes;
1447         } while (length > 0);
1448 
1449         if (did_zero)
1450                 *did_zero = true;
1451         return written;
1452 }
1453 
1454 int
1455 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1456                 const struct iomap_ops *ops)
1457 {
1458         struct iomap_iter iter = {
1459                 .inode          = inode,
1460                 .pos            = pos,
1461                 .len            = len,
1462                 .flags          = IOMAP_ZERO,
1463         };
1464         int ret;
1465 
1466         while ((ret = iomap_iter(&iter, ops)) > 0)
1467                 iter.processed = iomap_zero_iter(&iter, did_zero);
1468         return ret;
1469 }
1470 EXPORT_SYMBOL_GPL(iomap_zero_range);
1471 
1472 int
1473 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1474                 const struct iomap_ops *ops)
1475 {
1476         unsigned int blocksize = i_blocksize(inode);
1477         unsigned int off = pos & (blocksize - 1);
1478 
1479         /* Block boundary? Nothing to do */
1480         if (!off)
1481                 return 0;
1482         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1483 }
1484 EXPORT_SYMBOL_GPL(iomap_truncate_page);
1485 
1486 static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1487                 struct folio *folio)
1488 {
1489         loff_t length = iomap_length(iter);
1490         int ret;
1491 
1492         if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1493                 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1494                                               &iter->iomap);
1495                 if (ret)
1496                         return ret;
1497                 block_commit_write(&folio->page, 0, length);
1498         } else {
1499                 WARN_ON_ONCE(!folio_test_uptodate(folio));
1500                 folio_mark_dirty(folio);
1501         }
1502 
1503         return length;
1504 }
1505 
1506 vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1507 {
1508         struct iomap_iter iter = {
1509                 .inode          = file_inode(vmf->vma->vm_file),
1510                 .flags          = IOMAP_WRITE | IOMAP_FAULT,
1511         };
1512         struct folio *folio = page_folio(vmf->page);
1513         ssize_t ret;
1514 
1515         folio_lock(folio);
1516         ret = folio_mkwrite_check_truncate(folio, iter.inode);
1517         if (ret < 0)
1518                 goto out_unlock;
1519         iter.pos = folio_pos(folio);
1520         iter.len = ret;
1521         while ((ret = iomap_iter(&iter, ops)) > 0)
1522                 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1523 
1524         if (ret < 0)
1525                 goto out_unlock;
1526         folio_wait_stable(folio);
1527         return VM_FAULT_LOCKED;
1528 out_unlock:
1529         folio_unlock(folio);
1530         return vmf_fs_error(ret);
1531 }
1532 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1533 
1534 static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1535                 size_t len)
1536 {
1537         struct iomap_folio_state *ifs = folio->private;
1538 
1539         WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1540         WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1541 
1542         if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1543                 folio_end_writeback(folio);
1544 }
1545 
1546 /*
1547  * We're now finished for good with this ioend structure.  Update the page
1548  * state, release holds on bios, and finally free up memory.  Do not use the
1549  * ioend after this.
1550  */
1551 static u32
1552 iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1553 {
1554         struct inode *inode = ioend->io_inode;
1555         struct bio *bio = &ioend->io_bio;
1556         struct folio_iter fi;
1557         u32 folio_count = 0;
1558 
1559         if (error) {
1560                 mapping_set_error(inode->i_mapping, error);
1561                 if (!bio_flagged(bio, BIO_QUIET)) {
1562                         pr_err_ratelimited(
1563 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1564                                 inode->i_sb->s_id, inode->i_ino,
1565                                 ioend->io_offset, ioend->io_sector);
1566                 }
1567         }
1568 
1569         /* walk all folios in bio, ending page IO on them */
1570         bio_for_each_folio_all(fi, bio) {
1571                 iomap_finish_folio_write(inode, fi.folio, fi.length);
1572                 folio_count++;
1573         }
1574 
1575         bio_put(bio);   /* frees the ioend */
1576         return folio_count;
1577 }
1578 
1579 /*
1580  * Ioend completion routine for merged bios. This can only be called from task
1581  * contexts as merged ioends can be of unbound length. Hence we have to break up
1582  * the writeback completions into manageable chunks to avoid long scheduler
1583  * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1584  * good batch processing throughput without creating adverse scheduler latency
1585  * conditions.
1586  */
1587 void
1588 iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1589 {
1590         struct list_head tmp;
1591         u32 completions;
1592 
1593         might_sleep();
1594 
1595         list_replace_init(&ioend->io_list, &tmp);
1596         completions = iomap_finish_ioend(ioend, error);
1597 
1598         while (!list_empty(&tmp)) {
1599                 if (completions > IOEND_BATCH_SIZE * 8) {
1600                         cond_resched();
1601                         completions = 0;
1602                 }
1603                 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1604                 list_del_init(&ioend->io_list);
1605                 completions += iomap_finish_ioend(ioend, error);
1606         }
1607 }
1608 EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1609 
1610 /*
1611  * We can merge two adjacent ioends if they have the same set of work to do.
1612  */
1613 static bool
1614 iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1615 {
1616         if (ioend->io_bio.bi_status != next->io_bio.bi_status)
1617                 return false;
1618         if ((ioend->io_flags & IOMAP_F_SHARED) ^
1619             (next->io_flags & IOMAP_F_SHARED))
1620                 return false;
1621         if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1622             (next->io_type == IOMAP_UNWRITTEN))
1623                 return false;
1624         if (ioend->io_offset + ioend->io_size != next->io_offset)
1625                 return false;
1626         /*
1627          * Do not merge physically discontiguous ioends. The filesystem
1628          * completion functions will have to iterate the physical
1629          * discontiguities even if we merge the ioends at a logical level, so
1630          * we don't gain anything by merging physical discontiguities here.
1631          *
1632          * We cannot use bio->bi_iter.bi_sector here as it is modified during
1633          * submission so does not point to the start sector of the bio at
1634          * completion.
1635          */
1636         if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1637                 return false;
1638         return true;
1639 }
1640 
1641 void
1642 iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1643 {
1644         struct iomap_ioend *next;
1645 
1646         INIT_LIST_HEAD(&ioend->io_list);
1647 
1648         while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1649                         io_list))) {
1650                 if (!iomap_ioend_can_merge(ioend, next))
1651                         break;
1652                 list_move_tail(&next->io_list, &ioend->io_list);
1653                 ioend->io_size += next->io_size;
1654         }
1655 }
1656 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1657 
1658 static int
1659 iomap_ioend_compare(void *priv, const struct list_head *a,
1660                 const struct list_head *b)
1661 {
1662         struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1663         struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1664 
1665         if (ia->io_offset < ib->io_offset)
1666                 return -1;
1667         if (ia->io_offset > ib->io_offset)
1668                 return 1;
1669         return 0;
1670 }
1671 
1672 void
1673 iomap_sort_ioends(struct list_head *ioend_list)
1674 {
1675         list_sort(NULL, ioend_list, iomap_ioend_compare);
1676 }
1677 EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1678 
1679 static void iomap_writepage_end_bio(struct bio *bio)
1680 {
1681         iomap_finish_ioend(iomap_ioend_from_bio(bio),
1682                         blk_status_to_errno(bio->bi_status));
1683 }
1684 
1685 /*
1686  * Submit the final bio for an ioend.
1687  *
1688  * If @error is non-zero, it means that we have a situation where some part of
1689  * the submission process has failed after we've marked pages for writeback.
1690  * We cannot cancel ioend directly in that case, so call the bio end I/O handler
1691  * with the error status here to run the normal I/O completion handler to clear
1692  * the writeback bit and let the file system proess the errors.
1693  */
1694 static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
1695 {
1696         if (!wpc->ioend)
1697                 return error;
1698 
1699         /*
1700          * Let the file systems prepare the I/O submission and hook in an I/O
1701          * comletion handler.  This also needs to happen in case after a
1702          * failure happened so that the file system end I/O handler gets called
1703          * to clean up.
1704          */
1705         if (wpc->ops->prepare_ioend)
1706                 error = wpc->ops->prepare_ioend(wpc->ioend, error);
1707 
1708         if (error) {
1709                 wpc->ioend->io_bio.bi_status = errno_to_blk_status(error);
1710                 bio_endio(&wpc->ioend->io_bio);
1711         } else {
1712                 submit_bio(&wpc->ioend->io_bio);
1713         }
1714 
1715         wpc->ioend = NULL;
1716         return error;
1717 }
1718 
1719 static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
1720                 struct writeback_control *wbc, struct inode *inode, loff_t pos)
1721 {
1722         struct iomap_ioend *ioend;
1723         struct bio *bio;
1724 
1725         bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1726                                REQ_OP_WRITE | wbc_to_write_flags(wbc),
1727                                GFP_NOFS, &iomap_ioend_bioset);
1728         bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
1729         bio->bi_end_io = iomap_writepage_end_bio;
1730         wbc_init_bio(wbc, bio);
1731         bio->bi_write_hint = inode->i_write_hint;
1732 
1733         ioend = iomap_ioend_from_bio(bio);
1734         INIT_LIST_HEAD(&ioend->io_list);
1735         ioend->io_type = wpc->iomap.type;
1736         ioend->io_flags = wpc->iomap.flags;
1737         ioend->io_inode = inode;
1738         ioend->io_size = 0;
1739         ioend->io_offset = pos;
1740         ioend->io_sector = bio->bi_iter.bi_sector;
1741 
1742         wpc->nr_folios = 0;
1743         return ioend;
1744 }
1745 
1746 static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
1747 {
1748         if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1749             (wpc->ioend->io_flags & IOMAP_F_SHARED))
1750                 return false;
1751         if (wpc->iomap.type != wpc->ioend->io_type)
1752                 return false;
1753         if (pos != wpc->ioend->io_offset + wpc->ioend->io_size)
1754                 return false;
1755         if (iomap_sector(&wpc->iomap, pos) !=
1756             bio_end_sector(&wpc->ioend->io_bio))
1757                 return false;
1758         /*
1759          * Limit ioend bio chain lengths to minimise IO completion latency. This
1760          * also prevents long tight loops ending page writeback on all the
1761          * folios in the ioend.
1762          */
1763         if (wpc->nr_folios >= IOEND_BATCH_SIZE)
1764                 return false;
1765         return true;
1766 }
1767 
1768 /*
1769  * Test to see if we have an existing ioend structure that we could append to
1770  * first; otherwise finish off the current ioend and start another.
1771  *
1772  * If a new ioend is created and cached, the old ioend is submitted to the block
1773  * layer instantly.  Batching optimisations are provided by higher level block
1774  * plugging.
1775  *
1776  * At the end of a writeback pass, there will be a cached ioend remaining on the
1777  * writepage context that the caller will need to submit.
1778  */
1779 static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
1780                 struct writeback_control *wbc, struct folio *folio,
1781                 struct inode *inode, loff_t pos, unsigned len)
1782 {
1783         struct iomap_folio_state *ifs = folio->private;
1784         size_t poff = offset_in_folio(folio, pos);
1785         int error;
1786 
1787         if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) {
1788 new_ioend:
1789                 error = iomap_submit_ioend(wpc, 0);
1790                 if (error)
1791                         return error;
1792                 wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos);
1793         }
1794 
1795         if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
1796                 goto new_ioend;
1797 
1798         if (ifs)
1799                 atomic_add(len, &ifs->write_bytes_pending);
1800         wpc->ioend->io_size += len;
1801         wbc_account_cgroup_owner(wbc, &folio->page, len);
1802         return 0;
1803 }
1804 
1805 static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
1806                 struct writeback_control *wbc, struct folio *folio,
1807                 struct inode *inode, u64 pos, unsigned dirty_len,
1808                 unsigned *count)
1809 {
1810         int error;
1811 
1812         do {
1813                 unsigned map_len;
1814 
1815                 error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len);
1816                 if (error)
1817                         break;
1818                 trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap);
1819 
1820                 map_len = min_t(u64, dirty_len,
1821                         wpc->iomap.offset + wpc->iomap.length - pos);
1822                 WARN_ON_ONCE(!folio->private && map_len < dirty_len);
1823 
1824                 switch (wpc->iomap.type) {
1825                 case IOMAP_INLINE:
1826                         WARN_ON_ONCE(1);
1827                         error = -EIO;
1828                         break;
1829                 case IOMAP_HOLE:
1830                         break;
1831                 default:
1832                         error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
1833                                         map_len);
1834                         if (!error)
1835                                 (*count)++;
1836                         break;
1837                 }
1838                 dirty_len -= map_len;
1839                 pos += map_len;
1840         } while (dirty_len && !error);
1841 
1842         /*
1843          * We cannot cancel the ioend directly here on error.  We may have
1844          * already set other pages under writeback and hence we have to run I/O
1845          * completion to mark the error state of the pages under writeback
1846          * appropriately.
1847          *
1848          * Just let the file system know what portion of the folio failed to
1849          * map.
1850          */
1851         if (error && wpc->ops->discard_folio)
1852                 wpc->ops->discard_folio(folio, pos);
1853         return error;
1854 }
1855 
1856 /*
1857  * Check interaction of the folio with the file end.
1858  *
1859  * If the folio is entirely beyond i_size, return false.  If it straddles
1860  * i_size, adjust end_pos and zero all data beyond i_size.
1861  */
1862 static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
1863                 u64 *end_pos)
1864 {
1865         u64 isize = i_size_read(inode);
1866 
1867         if (*end_pos > isize) {
1868                 size_t poff = offset_in_folio(folio, isize);
1869                 pgoff_t end_index = isize >> PAGE_SHIFT;
1870 
1871                 /*
1872                  * If the folio is entirely ouside of i_size, skip it.
1873                  *
1874                  * This can happen due to a truncate operation that is in
1875                  * progress and in that case truncate will finish it off once
1876                  * we've dropped the folio lock.
1877                  *
1878                  * Note that the pgoff_t used for end_index is an unsigned long.
1879                  * If the given offset is greater than 16TB on a 32-bit system,
1880                  * then if we checked if the folio is fully outside i_size with
1881                  * "if (folio->index >= end_index + 1)", "end_index + 1" would
1882                  * overflow and evaluate to 0.  Hence this folio would be
1883                  * redirtied and written out repeatedly, which would result in
1884                  * an infinite loop; the user program performing this operation
1885                  * would hang.  Instead, we can detect this situation by
1886                  * checking if the folio is totally beyond i_size or if its
1887                  * offset is just equal to the EOF.
1888                  */
1889                 if (folio->index > end_index ||
1890                     (folio->index == end_index && poff == 0))
1891                         return false;
1892 
1893                 /*
1894                  * The folio straddles i_size.
1895                  *
1896                  * It must be zeroed out on each and every writepage invocation
1897                  * because it may be mmapped:
1898                  *
1899                  *    A file is mapped in multiples of the page size.  For a
1900                  *    file that is not a multiple of the page size, the
1901                  *    remaining memory is zeroed when mapped, and writes to that
1902                  *    region are not written out to the file.
1903                  *
1904                  * Also adjust the writeback range to skip all blocks entirely
1905                  * beyond i_size.
1906                  */
1907                 folio_zero_segment(folio, poff, folio_size(folio));
1908                 *end_pos = round_up(isize, i_blocksize(inode));
1909         }
1910 
1911         return true;
1912 }
1913 
1914 static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1915                 struct writeback_control *wbc, struct folio *folio)
1916 {
1917         struct iomap_folio_state *ifs = folio->private;
1918         struct inode *inode = folio->mapping->host;
1919         u64 pos = folio_pos(folio);
1920         u64 end_pos = pos + folio_size(folio);
1921         unsigned count = 0;
1922         int error = 0;
1923         u32 rlen;
1924 
1925         WARN_ON_ONCE(!folio_test_locked(folio));
1926         WARN_ON_ONCE(folio_test_dirty(folio));
1927         WARN_ON_ONCE(folio_test_writeback(folio));
1928 
1929         trace_iomap_writepage(inode, pos, folio_size(folio));
1930 
1931         if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
1932                 folio_unlock(folio);
1933                 return 0;
1934         }
1935         WARN_ON_ONCE(end_pos <= pos);
1936 
1937         if (i_blocks_per_folio(inode, folio) > 1) {
1938                 if (!ifs) {
1939                         ifs = ifs_alloc(inode, folio, 0);
1940                         iomap_set_range_dirty(folio, 0, end_pos - pos);
1941                 }
1942 
1943                 /*
1944                  * Keep the I/O completion handler from clearing the writeback
1945                  * bit until we have submitted all blocks by adding a bias to
1946                  * ifs->write_bytes_pending, which is dropped after submitting
1947                  * all blocks.
1948                  */
1949                 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1950                 atomic_inc(&ifs->write_bytes_pending);
1951         }
1952 
1953         /*
1954          * Set the writeback bit ASAP, as the I/O completion for the single
1955          * block per folio case happen hit as soon as we're submitting the bio.
1956          */
1957         folio_start_writeback(folio);
1958 
1959         /*
1960          * Walk through the folio to find dirty areas to write back.
1961          */
1962         while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) {
1963                 error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
1964                                 pos, rlen, &count);
1965                 if (error)
1966                         break;
1967                 pos += rlen;
1968         }
1969 
1970         if (count)
1971                 wpc->nr_folios++;
1972 
1973         /*
1974          * We can have dirty bits set past end of file in page_mkwrite path
1975          * while mapping the last partial folio. Hence it's better to clear
1976          * all the dirty bits in the folio here.
1977          */
1978         iomap_clear_range_dirty(folio, 0, folio_size(folio));
1979 
1980         /*
1981          * Usually the writeback bit is cleared by the I/O completion handler.
1982          * But we may end up either not actually writing any blocks, or (when
1983          * there are multiple blocks in a folio) all I/O might have finished
1984          * already at this point.  In that case we need to clear the writeback
1985          * bit ourselves right after unlocking the page.
1986          */
1987         folio_unlock(folio);
1988         if (ifs) {
1989                 if (atomic_dec_and_test(&ifs->write_bytes_pending))
1990                         folio_end_writeback(folio);
1991         } else {
1992                 if (!count)
1993                         folio_end_writeback(folio);
1994         }
1995         mapping_set_error(inode->i_mapping, error);
1996         return error;
1997 }
1998 
1999 int
2000 iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
2001                 struct iomap_writepage_ctx *wpc,
2002                 const struct iomap_writeback_ops *ops)
2003 {
2004         struct folio *folio = NULL;
2005         int error;
2006 
2007         /*
2008          * Writeback from reclaim context should never happen except in the case
2009          * of a VM regression so warn about it and refuse to write the data.
2010          */
2011         if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
2012                         PF_MEMALLOC))
2013                 return -EIO;
2014 
2015         wpc->ops = ops;
2016         while ((folio = writeback_iter(mapping, wbc, folio, &error)))
2017                 error = iomap_writepage_map(wpc, wbc, folio);
2018         return iomap_submit_ioend(wpc, error);
2019 }
2020 EXPORT_SYMBOL_GPL(iomap_writepages);
2021 
2022 static int __init iomap_init(void)
2023 {
2024         return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
2025                            offsetof(struct iomap_ioend, io_bio),
2026                            BIOSET_NEED_BVECS);
2027 }
2028 fs_initcall(iomap_init);
2029 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php