~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/fs-io-buffered.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 #ifndef NO_BCACHEFS_FS
  3 
  4 #include "bcachefs.h"
  5 #include "alloc_foreground.h"
  6 #include "bkey_buf.h"
  7 #include "fs-io.h"
  8 #include "fs-io-buffered.h"
  9 #include "fs-io-direct.h"
 10 #include "fs-io-pagecache.h"
 11 #include "io_read.h"
 12 #include "io_write.h"
 13 
 14 #include <linux/backing-dev.h>
 15 #include <linux/pagemap.h>
 16 #include <linux/writeback.h>
 17 
 18 static inline bool bio_full(struct bio *bio, unsigned len)
 19 {
 20         if (bio->bi_vcnt >= bio->bi_max_vecs)
 21                 return true;
 22         if (bio->bi_iter.bi_size > UINT_MAX - len)
 23                 return true;
 24         return false;
 25 }
 26 
 27 /* readpage(s): */
 28 
 29 static void bch2_readpages_end_io(struct bio *bio)
 30 {
 31         struct folio_iter fi;
 32 
 33         bio_for_each_folio_all(fi, bio)
 34                 folio_end_read(fi.folio, bio->bi_status == BLK_STS_OK);
 35 
 36         bio_put(bio);
 37 }
 38 
 39 struct readpages_iter {
 40         struct address_space    *mapping;
 41         unsigned                idx;
 42         folios                  folios;
 43 };
 44 
 45 static int readpages_iter_init(struct readpages_iter *iter,
 46                                struct readahead_control *ractl)
 47 {
 48         struct folio *folio;
 49 
 50         *iter = (struct readpages_iter) { ractl->mapping };
 51 
 52         while ((folio = __readahead_folio(ractl))) {
 53                 if (!bch2_folio_create(folio, GFP_KERNEL) ||
 54                     darray_push(&iter->folios, folio)) {
 55                         bch2_folio_release(folio);
 56                         ractl->_nr_pages += folio_nr_pages(folio);
 57                         ractl->_index -= folio_nr_pages(folio);
 58                         return iter->folios.nr ? 0 : -ENOMEM;
 59                 }
 60 
 61                 folio_put(folio);
 62         }
 63 
 64         return 0;
 65 }
 66 
 67 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
 68 {
 69         if (iter->idx >= iter->folios.nr)
 70                 return NULL;
 71         return iter->folios.data[iter->idx];
 72 }
 73 
 74 static inline void readpage_iter_advance(struct readpages_iter *iter)
 75 {
 76         iter->idx++;
 77 }
 78 
 79 static bool extent_partial_reads_expensive(struct bkey_s_c k)
 80 {
 81         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
 82         struct bch_extent_crc_unpacked crc;
 83         const union bch_extent_entry *i;
 84 
 85         bkey_for_each_crc(k.k, ptrs, crc, i)
 86                 if (crc.csum_type || crc.compression_type)
 87                         return true;
 88         return false;
 89 }
 90 
 91 static int readpage_bio_extend(struct btree_trans *trans,
 92                                struct readpages_iter *iter,
 93                                struct bio *bio,
 94                                unsigned sectors_this_extent,
 95                                bool get_more)
 96 {
 97         /* Don't hold btree locks while allocating memory: */
 98         bch2_trans_unlock(trans);
 99 
100         while (bio_sectors(bio) < sectors_this_extent &&
101                bio->bi_vcnt < bio->bi_max_vecs) {
102                 struct folio *folio = readpage_iter_peek(iter);
103                 int ret;
104 
105                 if (folio) {
106                         readpage_iter_advance(iter);
107                 } else {
108                         pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
109 
110                         if (!get_more)
111                                 break;
112 
113                         folio = xa_load(&iter->mapping->i_pages, folio_offset);
114                         if (folio && !xa_is_value(folio))
115                                 break;
116 
117                         folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
118                         if (!folio)
119                                 break;
120 
121                         if (!__bch2_folio_create(folio, GFP_KERNEL)) {
122                                 folio_put(folio);
123                                 break;
124                         }
125 
126                         ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
127                         if (ret) {
128                                 __bch2_folio_release(folio);
129                                 folio_put(folio);
130                                 break;
131                         }
132 
133                         folio_put(folio);
134                 }
135 
136                 BUG_ON(folio_sector(folio) != bio_end_sector(bio));
137 
138                 BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
139         }
140 
141         return bch2_trans_relock(trans);
142 }
143 
144 static void bchfs_read(struct btree_trans *trans,
145                        struct bch_read_bio *rbio,
146                        subvol_inum inum,
147                        struct readpages_iter *readpages_iter)
148 {
149         struct bch_fs *c = trans->c;
150         struct btree_iter iter;
151         struct bkey_buf sk;
152         int flags = BCH_READ_RETRY_IF_STALE|
153                 BCH_READ_MAY_PROMOTE;
154         u32 snapshot;
155         int ret = 0;
156 
157         rbio->c = c;
158         rbio->start_time = local_clock();
159         rbio->subvol = inum.subvol;
160 
161         bch2_bkey_buf_init(&sk);
162 retry:
163         bch2_trans_begin(trans);
164         iter = (struct btree_iter) { NULL };
165 
166         ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
167         if (ret)
168                 goto err;
169 
170         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
171                              SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
172                              BTREE_ITER_slots);
173         while (1) {
174                 struct bkey_s_c k;
175                 unsigned bytes, sectors, offset_into_extent;
176                 enum btree_id data_btree = BTREE_ID_extents;
177 
178                 /*
179                  * read_extent -> io_time_reset may cause a transaction restart
180                  * without returning an error, we need to check for that here:
181                  */
182                 ret = bch2_trans_relock(trans);
183                 if (ret)
184                         break;
185 
186                 bch2_btree_iter_set_pos(&iter,
187                                 POS(inum.inum, rbio->bio.bi_iter.bi_sector));
188 
189                 k = bch2_btree_iter_peek_slot(&iter);
190                 ret = bkey_err(k);
191                 if (ret)
192                         break;
193 
194                 offset_into_extent = iter.pos.offset -
195                         bkey_start_offset(k.k);
196                 sectors = k.k->size - offset_into_extent;
197 
198                 bch2_bkey_buf_reassemble(&sk, c, k);
199 
200                 ret = bch2_read_indirect_extent(trans, &data_btree,
201                                         &offset_into_extent, &sk);
202                 if (ret)
203                         break;
204 
205                 k = bkey_i_to_s_c(sk.k);
206 
207                 sectors = min(sectors, k.k->size - offset_into_extent);
208 
209                 if (readpages_iter) {
210                         ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
211                                                   extent_partial_reads_expensive(k));
212                         if (ret)
213                                 break;
214                 }
215 
216                 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
217                 swap(rbio->bio.bi_iter.bi_size, bytes);
218 
219                 if (rbio->bio.bi_iter.bi_size == bytes)
220                         flags |= BCH_READ_LAST_FRAGMENT;
221 
222                 bch2_bio_page_state_set(&rbio->bio, k);
223 
224                 bch2_read_extent(trans, rbio, iter.pos,
225                                  data_btree, k, offset_into_extent, flags);
226 
227                 if (flags & BCH_READ_LAST_FRAGMENT)
228                         break;
229 
230                 swap(rbio->bio.bi_iter.bi_size, bytes);
231                 bio_advance(&rbio->bio, bytes);
232 
233                 ret = btree_trans_too_many_iters(trans);
234                 if (ret)
235                         break;
236         }
237 err:
238         bch2_trans_iter_exit(trans, &iter);
239 
240         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
241                 goto retry;
242 
243         if (ret) {
244                 bch_err_inum_offset_ratelimited(c,
245                                 iter.pos.inode,
246                                 iter.pos.offset << 9,
247                                 "read error %i from btree lookup", ret);
248                 rbio->bio.bi_status = BLK_STS_IOERR;
249                 bio_endio(&rbio->bio);
250         }
251 
252         bch2_bkey_buf_exit(&sk, c);
253 }
254 
255 void bch2_readahead(struct readahead_control *ractl)
256 {
257         struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
258         struct bch_fs *c = inode->v.i_sb->s_fs_info;
259         struct bch_io_opts opts;
260         struct folio *folio;
261         struct readpages_iter readpages_iter;
262 
263         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
264 
265         int ret = readpages_iter_init(&readpages_iter, ractl);
266         if (ret)
267                 return;
268 
269         bch2_pagecache_add_get(inode);
270 
271         struct btree_trans *trans = bch2_trans_get(c);
272         while ((folio = readpage_iter_peek(&readpages_iter))) {
273                 unsigned n = min_t(unsigned,
274                                    readpages_iter.folios.nr -
275                                    readpages_iter.idx,
276                                    BIO_MAX_VECS);
277                 struct bch_read_bio *rbio =
278                         rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
279                                                    GFP_KERNEL, &c->bio_read),
280                                   opts);
281 
282                 readpage_iter_advance(&readpages_iter);
283 
284                 rbio->bio.bi_iter.bi_sector = folio_sector(folio);
285                 rbio->bio.bi_end_io = bch2_readpages_end_io;
286                 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
287 
288                 bchfs_read(trans, rbio, inode_inum(inode),
289                            &readpages_iter);
290                 bch2_trans_unlock(trans);
291         }
292         bch2_trans_put(trans);
293 
294         bch2_pagecache_add_put(inode);
295 
296         darray_exit(&readpages_iter.folios);
297 }
298 
299 static void bch2_read_single_folio_end_io(struct bio *bio)
300 {
301         complete(bio->bi_private);
302 }
303 
304 int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
305 {
306         struct bch_inode_info *inode = to_bch_ei(mapping->host);
307         struct bch_fs *c = inode->v.i_sb->s_fs_info;
308         struct bch_read_bio *rbio;
309         struct bch_io_opts opts;
310         int ret;
311         DECLARE_COMPLETION_ONSTACK(done);
312 
313         if (!bch2_folio_create(folio, GFP_KERNEL))
314                 return -ENOMEM;
315 
316         bch2_inode_opts_get(&opts, c, &inode->ei_inode);
317 
318         rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
319                          opts);
320         rbio->bio.bi_private = &done;
321         rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
322 
323         rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
324         rbio->bio.bi_iter.bi_sector = folio_sector(folio);
325         BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
326 
327         bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0));
328         wait_for_completion(&done);
329 
330         ret = blk_status_to_errno(rbio->bio.bi_status);
331         bio_put(&rbio->bio);
332 
333         if (ret < 0)
334                 return ret;
335 
336         folio_mark_uptodate(folio);
337         return 0;
338 }
339 
340 int bch2_read_folio(struct file *file, struct folio *folio)
341 {
342         int ret;
343 
344         ret = bch2_read_single_folio(folio, folio->mapping);
345         folio_unlock(folio);
346         return bch2_err_class(ret);
347 }
348 
349 /* writepages: */
350 
351 struct bch_writepage_io {
352         struct bch_inode_info           *inode;
353 
354         /* must be last: */
355         struct bch_write_op             op;
356 };
357 
358 struct bch_writepage_state {
359         struct bch_writepage_io *io;
360         struct bch_io_opts      opts;
361         struct bch_folio_sector *tmp;
362         unsigned                tmp_sectors;
363 };
364 
365 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
366                                                                   struct bch_inode_info *inode)
367 {
368         struct bch_writepage_state ret = { 0 };
369 
370         bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
371         return ret;
372 }
373 
374 /*
375  * Determine when a writepage io is full. We have to limit writepage bios to a
376  * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
377  * what the bounce path in bch2_write_extent() can handle. In theory we could
378  * loosen this restriction for non-bounce I/O, but we don't have that context
379  * here. Ideally, we can up this limit and make it configurable in the future
380  * when the bounce path can be enhanced to accommodate larger source bios.
381  */
382 static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
383 {
384         struct bio *bio = &io->op.wbio.bio;
385         return bio_full(bio, len) ||
386                 (bio->bi_iter.bi_size + len > BIO_MAX_VECS * PAGE_SIZE);
387 }
388 
389 static void bch2_writepage_io_done(struct bch_write_op *op)
390 {
391         struct bch_writepage_io *io =
392                 container_of(op, struct bch_writepage_io, op);
393         struct bch_fs *c = io->op.c;
394         struct bio *bio = &io->op.wbio.bio;
395         struct folio_iter fi;
396         unsigned i;
397 
398         if (io->op.error) {
399                 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
400 
401                 bio_for_each_folio_all(fi, bio) {
402                         struct bch_folio *s;
403 
404                         mapping_set_error(fi.folio->mapping, -EIO);
405 
406                         s = __bch2_folio(fi.folio);
407                         spin_lock(&s->lock);
408                         for (i = 0; i < folio_sectors(fi.folio); i++)
409                                 s->s[i].nr_replicas = 0;
410                         spin_unlock(&s->lock);
411                 }
412         }
413 
414         if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
415                 bio_for_each_folio_all(fi, bio) {
416                         struct bch_folio *s;
417 
418                         s = __bch2_folio(fi.folio);
419                         spin_lock(&s->lock);
420                         for (i = 0; i < folio_sectors(fi.folio); i++)
421                                 s->s[i].nr_replicas = 0;
422                         spin_unlock(&s->lock);
423                 }
424         }
425 
426         /*
427          * racing with fallocate can cause us to add fewer sectors than
428          * expected - but we shouldn't add more sectors than expected:
429          */
430         WARN_ON_ONCE(io->op.i_sectors_delta > 0);
431 
432         /*
433          * (error (due to going RO) halfway through a page can screw that up
434          * slightly)
435          * XXX wtf?
436            BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
437          */
438 
439         /*
440          * The writeback flag is effectively our ref on the inode -
441          * fixup i_blocks before calling folio_end_writeback:
442          */
443         bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
444 
445         bio_for_each_folio_all(fi, bio) {
446                 struct bch_folio *s = __bch2_folio(fi.folio);
447 
448                 if (atomic_dec_and_test(&s->write_count))
449                         folio_end_writeback(fi.folio);
450         }
451 
452         bio_put(&io->op.wbio.bio);
453 }
454 
455 static void bch2_writepage_do_io(struct bch_writepage_state *w)
456 {
457         struct bch_writepage_io *io = w->io;
458 
459         w->io = NULL;
460         closure_call(&io->op.cl, bch2_write, NULL, NULL);
461 }
462 
463 /*
464  * Get a bch_writepage_io and add @page to it - appending to an existing one if
465  * possible, else allocating a new one:
466  */
467 static void bch2_writepage_io_alloc(struct bch_fs *c,
468                                     struct writeback_control *wbc,
469                                     struct bch_writepage_state *w,
470                                     struct bch_inode_info *inode,
471                                     u64 sector,
472                                     unsigned nr_replicas)
473 {
474         struct bch_write_op *op;
475 
476         w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
477                                               REQ_OP_WRITE,
478                                               GFP_KERNEL,
479                                               &c->writepage_bioset),
480                              struct bch_writepage_io, op.wbio.bio);
481 
482         w->io->inode            = inode;
483         op                      = &w->io->op;
484         bch2_write_op_init(op, c, w->opts);
485         op->target              = w->opts.foreground_target;
486         op->nr_replicas         = nr_replicas;
487         op->res.nr_replicas     = nr_replicas;
488         op->write_point         = writepoint_hashed(inode->ei_last_dirtied);
489         op->subvol              = inode->ei_subvol;
490         op->pos                 = POS(inode->v.i_ino, sector);
491         op->end_io              = bch2_writepage_io_done;
492         op->devs_need_flush     = &inode->ei_devs_need_flush;
493         op->wbio.bio.bi_iter.bi_sector = sector;
494         op->wbio.bio.bi_opf     = wbc_to_write_flags(wbc);
495 }
496 
497 static int __bch2_writepage(struct folio *folio,
498                             struct writeback_control *wbc,
499                             void *data)
500 {
501         struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
502         struct bch_fs *c = inode->v.i_sb->s_fs_info;
503         struct bch_writepage_state *w = data;
504         struct bch_folio *s;
505         unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
506         loff_t i_size = i_size_read(&inode->v);
507         int ret;
508 
509         EBUG_ON(!folio_test_uptodate(folio));
510 
511         /* Is the folio fully inside i_size? */
512         if (folio_end_pos(folio) <= i_size)
513                 goto do_io;
514 
515         /* Is the folio fully outside i_size? (truncate in progress) */
516         if (folio_pos(folio) >= i_size) {
517                 folio_unlock(folio);
518                 return 0;
519         }
520 
521         /*
522          * The folio straddles i_size.  It must be zeroed out on each and every
523          * writepage invocation because it may be mmapped.  "A file is mapped
524          * in multiples of the folio size.  For a file that is not a multiple of
525          * the  folio size, the remaining memory is zeroed when mapped, and
526          * writes to that region are not written out to the file."
527          */
528         folio_zero_segment(folio,
529                            i_size - folio_pos(folio),
530                            folio_size(folio));
531 do_io:
532         f_sectors = folio_sectors(folio);
533         s = bch2_folio(folio);
534 
535         if (f_sectors > w->tmp_sectors) {
536                 kfree(w->tmp);
537                 w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), GFP_NOFS|__GFP_NOFAIL);
538                 w->tmp_sectors = f_sectors;
539         }
540 
541         /*
542          * Things get really hairy with errors during writeback:
543          */
544         ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
545         BUG_ON(ret);
546 
547         /* Before unlocking the page, get copy of reservations: */
548         spin_lock(&s->lock);
549         memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
550 
551         for (i = 0; i < f_sectors; i++) {
552                 if (s->s[i].state < SECTOR_dirty)
553                         continue;
554 
555                 nr_replicas_this_write =
556                         min_t(unsigned, nr_replicas_this_write,
557                               s->s[i].nr_replicas +
558                               s->s[i].replicas_reserved);
559         }
560 
561         for (i = 0; i < f_sectors; i++) {
562                 if (s->s[i].state < SECTOR_dirty)
563                         continue;
564 
565                 s->s[i].nr_replicas = w->opts.compression
566                         ? 0 : nr_replicas_this_write;
567 
568                 s->s[i].replicas_reserved = 0;
569                 bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
570         }
571         spin_unlock(&s->lock);
572 
573         BUG_ON(atomic_read(&s->write_count));
574         atomic_set(&s->write_count, 1);
575 
576         BUG_ON(folio_test_writeback(folio));
577         folio_start_writeback(folio);
578 
579         folio_unlock(folio);
580 
581         offset = 0;
582         while (1) {
583                 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
584                 u64 sector;
585 
586                 while (offset < f_sectors &&
587                        w->tmp[offset].state < SECTOR_dirty)
588                         offset++;
589 
590                 if (offset == f_sectors)
591                         break;
592 
593                 while (offset + sectors < f_sectors &&
594                        w->tmp[offset + sectors].state >= SECTOR_dirty) {
595                         reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
596                         dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
597                         sectors++;
598                 }
599                 BUG_ON(!sectors);
600 
601                 sector = folio_sector(folio) + offset;
602 
603                 if (w->io &&
604                     (w->io->op.res.nr_replicas != nr_replicas_this_write ||
605                      bch_io_full(w->io, sectors << 9) ||
606                      bio_end_sector(&w->io->op.wbio.bio) != sector))
607                         bch2_writepage_do_io(w);
608 
609                 if (!w->io)
610                         bch2_writepage_io_alloc(c, wbc, w, inode, sector,
611                                                 nr_replicas_this_write);
612 
613                 atomic_inc(&s->write_count);
614 
615                 BUG_ON(inode != w->io->inode);
616                 BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
617                                      sectors << 9, offset << 9));
618 
619                 /* Check for writing past i_size: */
620                 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
621                           round_up(i_size, block_bytes(c)) &&
622                           !test_bit(BCH_FS_emergency_ro, &c->flags),
623                           "writing past i_size: %llu > %llu (unrounded %llu)\n",
624                           bio_end_sector(&w->io->op.wbio.bio) << 9,
625                           round_up(i_size, block_bytes(c)),
626                           i_size);
627 
628                 w->io->op.res.sectors += reserved_sectors;
629                 w->io->op.i_sectors_delta -= dirty_sectors;
630                 w->io->op.new_i_size = i_size;
631 
632                 offset += sectors;
633         }
634 
635         if (atomic_dec_and_test(&s->write_count))
636                 folio_end_writeback(folio);
637 
638         return 0;
639 }
640 
641 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
642 {
643         struct bch_fs *c = mapping->host->i_sb->s_fs_info;
644         struct bch_writepage_state w =
645                 bch_writepage_state_init(c, to_bch_ei(mapping->host));
646         struct blk_plug plug;
647         int ret;
648 
649         blk_start_plug(&plug);
650         ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
651         if (w.io)
652                 bch2_writepage_do_io(&w);
653         blk_finish_plug(&plug);
654         kfree(w.tmp);
655         return bch2_err_class(ret);
656 }
657 
658 /* buffered writes: */
659 
660 int bch2_write_begin(struct file *file, struct address_space *mapping,
661                      loff_t pos, unsigned len,
662                      struct page **pagep, void **fsdata)
663 {
664         struct bch_inode_info *inode = to_bch_ei(mapping->host);
665         struct bch_fs *c = inode->v.i_sb->s_fs_info;
666         struct bch2_folio_reservation *res;
667         struct folio *folio;
668         unsigned offset;
669         int ret = -ENOMEM;
670 
671         res = kmalloc(sizeof(*res), GFP_KERNEL);
672         if (!res)
673                 return -ENOMEM;
674 
675         bch2_folio_reservation_init(c, inode, res);
676         *fsdata = res;
677 
678         bch2_pagecache_add_get(inode);
679 
680         folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
681                                     FGP_WRITEBEGIN | fgf_set_order(len),
682                                     mapping_gfp_mask(mapping));
683         if (IS_ERR_OR_NULL(folio))
684                 goto err_unlock;
685 
686         offset = pos - folio_pos(folio);
687         len = min_t(size_t, len, folio_end_pos(folio) - pos);
688 
689         if (folio_test_uptodate(folio))
690                 goto out;
691 
692         /* If we're writing entire folio, don't need to read it in first: */
693         if (!offset && len == folio_size(folio))
694                 goto out;
695 
696         if (!offset && pos + len >= inode->v.i_size) {
697                 folio_zero_segment(folio, len, folio_size(folio));
698                 flush_dcache_folio(folio);
699                 goto out;
700         }
701 
702         if (folio_pos(folio) >= inode->v.i_size) {
703                 folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
704                 flush_dcache_folio(folio);
705                 goto out;
706         }
707 readpage:
708         ret = bch2_read_single_folio(folio, mapping);
709         if (ret)
710                 goto err;
711 out:
712         ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
713         if (ret)
714                 goto err;
715 
716         ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
717         if (ret) {
718                 if (!folio_test_uptodate(folio)) {
719                         /*
720                          * If the folio hasn't been read in, we won't know if we
721                          * actually need a reservation - we don't actually need
722                          * to read here, we just need to check if the folio is
723                          * fully backed by uncompressed data:
724                          */
725                         goto readpage;
726                 }
727 
728                 goto err;
729         }
730 
731         *pagep = &folio->page;
732         return 0;
733 err:
734         folio_unlock(folio);
735         folio_put(folio);
736         *pagep = NULL;
737 err_unlock:
738         bch2_pagecache_add_put(inode);
739         kfree(res);
740         *fsdata = NULL;
741         return bch2_err_class(ret);
742 }
743 
744 int bch2_write_end(struct file *file, struct address_space *mapping,
745                    loff_t pos, unsigned len, unsigned copied,
746                    struct page *page, void *fsdata)
747 {
748         struct bch_inode_info *inode = to_bch_ei(mapping->host);
749         struct bch_fs *c = inode->v.i_sb->s_fs_info;
750         struct bch2_folio_reservation *res = fsdata;
751         struct folio *folio = page_folio(page);
752         unsigned offset = pos - folio_pos(folio);
753 
754         lockdep_assert_held(&inode->v.i_rwsem);
755         BUG_ON(offset + copied > folio_size(folio));
756 
757         if (unlikely(copied < len && !folio_test_uptodate(folio))) {
758                 /*
759                  * The folio needs to be read in, but that would destroy
760                  * our partial write - simplest thing is to just force
761                  * userspace to redo the write:
762                  */
763                 folio_zero_range(folio, 0, folio_size(folio));
764                 flush_dcache_folio(folio);
765                 copied = 0;
766         }
767 
768         spin_lock(&inode->v.i_lock);
769         if (pos + copied > inode->v.i_size)
770                 i_size_write(&inode->v, pos + copied);
771         spin_unlock(&inode->v.i_lock);
772 
773         if (copied) {
774                 if (!folio_test_uptodate(folio))
775                         folio_mark_uptodate(folio);
776 
777                 bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
778 
779                 inode->ei_last_dirtied = (unsigned long) current;
780         }
781 
782         folio_unlock(folio);
783         folio_put(folio);
784         bch2_pagecache_add_put(inode);
785 
786         bch2_folio_reservation_put(c, inode, res);
787         kfree(res);
788 
789         return copied;
790 }
791 
792 static noinline void folios_trunc(folios *fs, struct folio **fi)
793 {
794         while (fs->data + fs->nr > fi) {
795                 struct folio *f = darray_pop(fs);
796 
797                 folio_unlock(f);
798                 folio_put(f);
799         }
800 }
801 
802 static int __bch2_buffered_write(struct bch_inode_info *inode,
803                                  struct address_space *mapping,
804                                  struct iov_iter *iter,
805                                  loff_t pos, unsigned len)
806 {
807         struct bch_fs *c = inode->v.i_sb->s_fs_info;
808         struct bch2_folio_reservation res;
809         folios fs;
810         struct folio *f;
811         unsigned copied = 0, f_offset, f_copied;
812         u64 end = pos + len, f_pos, f_len;
813         loff_t last_folio_pos = inode->v.i_size;
814         int ret = 0;
815 
816         BUG_ON(!len);
817 
818         bch2_folio_reservation_init(c, inode, &res);
819         darray_init(&fs);
820 
821         ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
822                                                FGP_WRITEBEGIN | fgf_set_order(len),
823                                                mapping_gfp_mask(mapping), &fs);
824         if (ret)
825                 goto out;
826 
827         BUG_ON(!fs.nr);
828 
829         f = darray_first(fs);
830         if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
831                 ret = bch2_read_single_folio(f, mapping);
832                 if (ret)
833                         goto out;
834         }
835 
836         f = darray_last(fs);
837         end = min(end, folio_end_pos(f));
838         last_folio_pos = folio_pos(f);
839         if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
840                 if (end >= inode->v.i_size) {
841                         folio_zero_range(f, 0, folio_size(f));
842                 } else {
843                         ret = bch2_read_single_folio(f, mapping);
844                         if (ret)
845                                 goto out;
846                 }
847         }
848 
849         ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
850         if (ret)
851                 goto out;
852 
853         f_pos = pos;
854         f_offset = pos - folio_pos(darray_first(fs));
855         darray_for_each(fs, fi) {
856                 ssize_t f_reserved;
857 
858                 f = *fi;
859                 f_len = min(end, folio_end_pos(f)) - f_pos;
860                 f_reserved = bch2_folio_reservation_get_partial(c, inode, f, &res, f_offset, f_len);
861 
862                 if (unlikely(f_reserved != f_len)) {
863                         if (f_reserved < 0) {
864                                 if (f == darray_first(fs)) {
865                                         ret = f_reserved;
866                                         goto out;
867                                 }
868 
869                                 folios_trunc(&fs, fi);
870                                 end = min(end, folio_end_pos(darray_last(fs)));
871                         } else {
872                                 folios_trunc(&fs, fi + 1);
873                                 end = f_pos + f_reserved;
874                         }
875 
876                         break;
877                 }
878 
879                 f_pos = folio_end_pos(f);
880                 f_offset = 0;
881         }
882 
883         if (mapping_writably_mapped(mapping))
884                 darray_for_each(fs, fi)
885                         flush_dcache_folio(*fi);
886 
887         f_pos = pos;
888         f_offset = pos - folio_pos(darray_first(fs));
889         darray_for_each(fs, fi) {
890                 f = *fi;
891                 f_len = min(end, folio_end_pos(f)) - f_pos;
892                 f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
893                 if (!f_copied) {
894                         folios_trunc(&fs, fi);
895                         break;
896                 }
897 
898                 if (!folio_test_uptodate(f) &&
899                     f_copied != folio_size(f) &&
900                     pos + copied + f_copied < inode->v.i_size) {
901                         iov_iter_revert(iter, f_copied);
902                         folio_zero_range(f, 0, folio_size(f));
903                         folios_trunc(&fs, fi);
904                         break;
905                 }
906 
907                 flush_dcache_folio(f);
908                 copied += f_copied;
909 
910                 if (f_copied != f_len) {
911                         folios_trunc(&fs, fi + 1);
912                         break;
913                 }
914 
915                 f_pos = folio_end_pos(f);
916                 f_offset = 0;
917         }
918 
919         if (!copied)
920                 goto out;
921 
922         end = pos + copied;
923 
924         spin_lock(&inode->v.i_lock);
925         if (end > inode->v.i_size)
926                 i_size_write(&inode->v, end);
927         spin_unlock(&inode->v.i_lock);
928 
929         f_pos = pos;
930         f_offset = pos - folio_pos(darray_first(fs));
931         darray_for_each(fs, fi) {
932                 f = *fi;
933                 f_len = min(end, folio_end_pos(f)) - f_pos;
934 
935                 if (!folio_test_uptodate(f))
936                         folio_mark_uptodate(f);
937 
938                 bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
939 
940                 f_pos = folio_end_pos(f);
941                 f_offset = 0;
942         }
943 
944         inode->ei_last_dirtied = (unsigned long) current;
945 out:
946         darray_for_each(fs, fi) {
947                 folio_unlock(*fi);
948                 folio_put(*fi);
949         }
950 
951         /*
952          * If the last folio added to the mapping starts beyond current EOF, we
953          * performed a short write but left around at least one post-EOF folio.
954          * Clean up the mapping before we return.
955          */
956         if (last_folio_pos >= inode->v.i_size)
957                 truncate_pagecache(&inode->v, inode->v.i_size);
958 
959         darray_exit(&fs);
960         bch2_folio_reservation_put(c, inode, &res);
961 
962         return copied ?: ret;
963 }
964 
965 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
966 {
967         struct file *file = iocb->ki_filp;
968         struct address_space *mapping = file->f_mapping;
969         struct bch_inode_info *inode = file_bch_inode(file);
970         loff_t pos = iocb->ki_pos;
971         ssize_t written = 0;
972         int ret = 0;
973 
974         bch2_pagecache_add_get(inode);
975 
976         do {
977                 unsigned offset = pos & (PAGE_SIZE - 1);
978                 unsigned bytes = iov_iter_count(iter);
979 again:
980                 /*
981                  * Bring in the user page that we will copy from _first_.
982                  * Otherwise there's a nasty deadlock on copying from the
983                  * same page as we're writing to, without it being marked
984                  * up-to-date.
985                  *
986                  * Not only is this an optimisation, but it is also required
987                  * to check that the address is actually valid, when atomic
988                  * usercopies are used, below.
989                  */
990                 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
991                         bytes = min_t(unsigned long, iov_iter_count(iter),
992                                       PAGE_SIZE - offset);
993 
994                         if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
995                                 ret = -EFAULT;
996                                 break;
997                         }
998                 }
999 
1000                 if (unlikely(fatal_signal_pending(current))) {
1001                         ret = -EINTR;
1002                         break;
1003                 }
1004 
1005                 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1006                 if (unlikely(ret < 0))
1007                         break;
1008 
1009                 cond_resched();
1010 
1011                 if (unlikely(ret == 0)) {
1012                         /*
1013                          * If we were unable to copy any data at all, we must
1014                          * fall back to a single segment length write.
1015                          *
1016                          * If we didn't fallback here, we could livelock
1017                          * because not all segments in the iov can be copied at
1018                          * once without a pagefault.
1019                          */
1020                         bytes = min_t(unsigned long, PAGE_SIZE - offset,
1021                                       iov_iter_single_seg_count(iter));
1022                         goto again;
1023                 }
1024                 pos += ret;
1025                 written += ret;
1026                 ret = 0;
1027 
1028                 balance_dirty_pages_ratelimited(mapping);
1029         } while (iov_iter_count(iter));
1030 
1031         bch2_pagecache_add_put(inode);
1032 
1033         return written ? written : ret;
1034 }
1035 
1036 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1037 {
1038         struct file *file = iocb->ki_filp;
1039         struct bch_inode_info *inode = file_bch_inode(file);
1040         ssize_t ret;
1041 
1042         if (iocb->ki_flags & IOCB_DIRECT) {
1043                 ret = bch2_direct_write(iocb, from);
1044                 goto out;
1045         }
1046 
1047         inode_lock(&inode->v);
1048 
1049         ret = generic_write_checks(iocb, from);
1050         if (ret <= 0)
1051                 goto unlock;
1052 
1053         ret = file_remove_privs(file);
1054         if (ret)
1055                 goto unlock;
1056 
1057         ret = file_update_time(file);
1058         if (ret)
1059                 goto unlock;
1060 
1061         ret = bch2_buffered_write(iocb, from);
1062         if (likely(ret > 0))
1063                 iocb->ki_pos += ret;
1064 unlock:
1065         inode_unlock(&inode->v);
1066 
1067         if (ret > 0)
1068                 ret = generic_write_sync(iocb, ret);
1069 out:
1070         return bch2_err_class(ret);
1071 }
1072 
1073 void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
1074 {
1075         bioset_exit(&c->writepage_bioset);
1076 }
1077 
1078 int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
1079 {
1080         if (bioset_init(&c->writepage_bioset,
1081                         4, offsetof(struct bch_writepage_io, op.wbio.bio),
1082                         BIOSET_NEED_BVECS))
1083                 return -BCH_ERR_ENOMEM_writepage_bioset_init;
1084 
1085         return 0;
1086 }
1087 
1088 #endif /* NO_BCACHEFS_FS */
1089 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php