~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/io_read.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Some low level IO code, and hacks for various block layer limitations
  4  *
  5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6  * Copyright 2012 Google, Inc.
  7  */
  8 
  9 #include "bcachefs.h"
 10 #include "alloc_background.h"
 11 #include "alloc_foreground.h"
 12 #include "btree_update.h"
 13 #include "buckets.h"
 14 #include "checksum.h"
 15 #include "clock.h"
 16 #include "compress.h"
 17 #include "data_update.h"
 18 #include "disk_groups.h"
 19 #include "ec.h"
 20 #include "error.h"
 21 #include "io_read.h"
 22 #include "io_misc.h"
 23 #include "io_write.h"
 24 #include "subvolume.h"
 25 #include "trace.h"
 26 
 27 #include <linux/sched/mm.h>
 28 
 29 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
 30 
 31 static bool bch2_target_congested(struct bch_fs *c, u16 target)
 32 {
 33         const struct bch_devs_mask *devs;
 34         unsigned d, nr = 0, total = 0;
 35         u64 now = local_clock(), last;
 36         s64 congested;
 37         struct bch_dev *ca;
 38 
 39         if (!target)
 40                 return false;
 41 
 42         rcu_read_lock();
 43         devs = bch2_target_to_mask(c, target) ?:
 44                 &c->rw_devs[BCH_DATA_user];
 45 
 46         for_each_set_bit(d, devs->d, BCH_SB_MEMBERS_MAX) {
 47                 ca = rcu_dereference(c->devs[d]);
 48                 if (!ca)
 49                         continue;
 50 
 51                 congested = atomic_read(&ca->congested);
 52                 last = READ_ONCE(ca->congested_last);
 53                 if (time_after64(now, last))
 54                         congested -= (now - last) >> 12;
 55 
 56                 total += max(congested, 0LL);
 57                 nr++;
 58         }
 59         rcu_read_unlock();
 60 
 61         return bch2_rand_range(nr * CONGESTED_MAX) < total;
 62 }
 63 
 64 #else
 65 
 66 static bool bch2_target_congested(struct bch_fs *c, u16 target)
 67 {
 68         return false;
 69 }
 70 
 71 #endif
 72 
 73 /* Cache promotion on read */
 74 
 75 struct promote_op {
 76         struct rcu_head         rcu;
 77         u64                     start_time;
 78 
 79         struct rhash_head       hash;
 80         struct bpos             pos;
 81 
 82         struct data_update      write;
 83         struct bio_vec          bi_inline_vecs[]; /* must be last */
 84 };
 85 
 86 static const struct rhashtable_params bch_promote_params = {
 87         .head_offset            = offsetof(struct promote_op, hash),
 88         .key_offset             = offsetof(struct promote_op, pos),
 89         .key_len                = sizeof(struct bpos),
 90         .automatic_shrinking    = true,
 91 };
 92 
 93 static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
 94                                   struct bpos pos,
 95                                   struct bch_io_opts opts,
 96                                   unsigned flags,
 97                                   struct bch_io_failures *failed)
 98 {
 99         if (!failed) {
100                 BUG_ON(!opts.promote_target);
101 
102                 if (!(flags & BCH_READ_MAY_PROMOTE))
103                         return -BCH_ERR_nopromote_may_not;
104 
105                 if (bch2_bkey_has_target(c, k, opts.promote_target))
106                         return -BCH_ERR_nopromote_already_promoted;
107 
108                 if (bkey_extent_is_unwritten(k))
109                         return -BCH_ERR_nopromote_unwritten;
110 
111                 if (bch2_target_congested(c, opts.promote_target))
112                         return -BCH_ERR_nopromote_congested;
113         }
114 
115         if (rhashtable_lookup_fast(&c->promote_table, &pos,
116                                    bch_promote_params))
117                 return -BCH_ERR_nopromote_in_flight;
118 
119         return 0;
120 }
121 
122 static void promote_free(struct bch_fs *c, struct promote_op *op)
123 {
124         int ret;
125 
126         bch2_data_update_exit(&op->write);
127 
128         ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
129                                      bch_promote_params);
130         BUG_ON(ret);
131         bch2_write_ref_put(c, BCH_WRITE_REF_promote);
132         kfree_rcu(op, rcu);
133 }
134 
135 static void promote_done(struct bch_write_op *wop)
136 {
137         struct promote_op *op =
138                 container_of(wop, struct promote_op, write.op);
139         struct bch_fs *c = op->write.op.c;
140 
141         bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
142                                op->start_time);
143         promote_free(c, op);
144 }
145 
146 static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
147 {
148         struct bio *bio = &op->write.op.wbio.bio;
149 
150         trace_and_count(op->write.op.c, read_promote, &rbio->bio);
151 
152         /* we now own pages: */
153         BUG_ON(!rbio->bounce);
154         BUG_ON(rbio->bio.bi_vcnt > bio->bi_max_vecs);
155 
156         memcpy(bio->bi_io_vec, rbio->bio.bi_io_vec,
157                sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
158         swap(bio->bi_vcnt, rbio->bio.bi_vcnt);
159 
160         bch2_data_update_read_done(&op->write, rbio->pick.crc);
161 }
162 
163 static struct promote_op *__promote_alloc(struct btree_trans *trans,
164                                           enum btree_id btree_id,
165                                           struct bkey_s_c k,
166                                           struct bpos pos,
167                                           struct extent_ptr_decoded *pick,
168                                           struct bch_io_opts opts,
169                                           unsigned sectors,
170                                           struct bch_read_bio **rbio,
171                                           struct bch_io_failures *failed)
172 {
173         struct bch_fs *c = trans->c;
174         struct promote_op *op = NULL;
175         struct bio *bio;
176         unsigned pages = DIV_ROUND_UP(sectors, PAGE_SECTORS);
177         int ret;
178 
179         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
180                 return ERR_PTR(-BCH_ERR_nopromote_no_writes);
181 
182         op = kzalloc(struct_size(op, bi_inline_vecs, pages), GFP_KERNEL);
183         if (!op) {
184                 ret = -BCH_ERR_nopromote_enomem;
185                 goto err;
186         }
187 
188         op->start_time = local_clock();
189         op->pos = pos;
190 
191         /*
192          * We don't use the mempool here because extents that aren't
193          * checksummed or compressed can be too big for the mempool:
194          */
195         *rbio = kzalloc(sizeof(struct bch_read_bio) +
196                         sizeof(struct bio_vec) * pages,
197                         GFP_KERNEL);
198         if (!*rbio) {
199                 ret = -BCH_ERR_nopromote_enomem;
200                 goto err;
201         }
202 
203         rbio_init(&(*rbio)->bio, opts);
204         bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
205 
206         if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) {
207                 ret = -BCH_ERR_nopromote_enomem;
208                 goto err;
209         }
210 
211         (*rbio)->bounce         = true;
212         (*rbio)->split          = true;
213         (*rbio)->kmalloc        = true;
214 
215         if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
216                                           bch_promote_params)) {
217                 ret = -BCH_ERR_nopromote_in_flight;
218                 goto err;
219         }
220 
221         bio = &op->write.op.wbio.bio;
222         bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
223 
224         struct data_update_opts update_opts = {};
225 
226         if (!failed) {
227                 update_opts.target = opts.promote_target;
228                 update_opts.extra_replicas = 1;
229                 update_opts.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED;
230         } else {
231                 update_opts.target = opts.foreground_target;
232 
233                 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
234                 unsigned i = 0;
235                 bkey_for_each_ptr(ptrs, ptr) {
236                         if (bch2_dev_io_failures(failed, ptr->dev))
237                                 update_opts.rewrite_ptrs |= BIT(i);
238                         i++;
239                 }
240         }
241 
242         ret = bch2_data_update_init(trans, NULL, NULL, &op->write,
243                         writepoint_hashed((unsigned long) current),
244                         opts,
245                         update_opts,
246                         btree_id, k);
247         /*
248          * possible errors: -BCH_ERR_nocow_lock_blocked,
249          * -BCH_ERR_ENOSPC_disk_reservation:
250          */
251         if (ret) {
252                 BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash,
253                                               bch_promote_params));
254                 goto err;
255         }
256 
257         op->write.op.end_io = promote_done;
258 
259         return op;
260 err:
261         if (*rbio)
262                 bio_free_pages(&(*rbio)->bio);
263         kfree(*rbio);
264         *rbio = NULL;
265         kfree(op);
266         bch2_write_ref_put(c, BCH_WRITE_REF_promote);
267         return ERR_PTR(ret);
268 }
269 
270 noinline
271 static struct promote_op *promote_alloc(struct btree_trans *trans,
272                                         struct bvec_iter iter,
273                                         struct bkey_s_c k,
274                                         struct extent_ptr_decoded *pick,
275                                         struct bch_io_opts opts,
276                                         unsigned flags,
277                                         struct bch_read_bio **rbio,
278                                         bool *bounce,
279                                         bool *read_full,
280                                         struct bch_io_failures *failed)
281 {
282         struct bch_fs *c = trans->c;
283         /*
284          * if failed != NULL we're not actually doing a promote, we're
285          * recovering from an io/checksum error
286          */
287         bool promote_full = (failed ||
288                              *read_full ||
289                              READ_ONCE(c->promote_whole_extents));
290         /* data might have to be decompressed in the write path: */
291         unsigned sectors = promote_full
292                 ? max(pick->crc.compressed_size, pick->crc.live_size)
293                 : bvec_iter_sectors(iter);
294         struct bpos pos = promote_full
295                 ? bkey_start_pos(k.k)
296                 : POS(k.k->p.inode, iter.bi_sector);
297         struct promote_op *promote;
298         int ret;
299 
300         ret = should_promote(c, k, pos, opts, flags, failed);
301         if (ret)
302                 goto nopromote;
303 
304         promote = __promote_alloc(trans,
305                                   k.k->type == KEY_TYPE_reflink_v
306                                   ? BTREE_ID_reflink
307                                   : BTREE_ID_extents,
308                                   k, pos, pick, opts, sectors, rbio, failed);
309         ret = PTR_ERR_OR_ZERO(promote);
310         if (ret)
311                 goto nopromote;
312 
313         *bounce         = true;
314         *read_full      = promote_full;
315         return promote;
316 nopromote:
317         trace_read_nopromote(c, ret);
318         return NULL;
319 }
320 
321 /* Read */
322 
323 #define READ_RETRY_AVOID        1
324 #define READ_RETRY              2
325 #define READ_ERR                3
326 
327 enum rbio_context {
328         RBIO_CONTEXT_NULL,
329         RBIO_CONTEXT_HIGHPRI,
330         RBIO_CONTEXT_UNBOUND,
331 };
332 
333 static inline struct bch_read_bio *
334 bch2_rbio_parent(struct bch_read_bio *rbio)
335 {
336         return rbio->split ? rbio->parent : rbio;
337 }
338 
339 __always_inline
340 static void bch2_rbio_punt(struct bch_read_bio *rbio, work_func_t fn,
341                            enum rbio_context context,
342                            struct workqueue_struct *wq)
343 {
344         if (context <= rbio->context) {
345                 fn(&rbio->work);
346         } else {
347                 rbio->work.func         = fn;
348                 rbio->context           = context;
349                 queue_work(wq, &rbio->work);
350         }
351 }
352 
353 static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
354 {
355         BUG_ON(rbio->bounce && !rbio->split);
356 
357         if (rbio->promote)
358                 promote_free(rbio->c, rbio->promote);
359         rbio->promote = NULL;
360 
361         if (rbio->bounce)
362                 bch2_bio_free_pages_pool(rbio->c, &rbio->bio);
363 
364         if (rbio->split) {
365                 struct bch_read_bio *parent = rbio->parent;
366 
367                 if (rbio->kmalloc)
368                         kfree(rbio);
369                 else
370                         bio_put(&rbio->bio);
371 
372                 rbio = parent;
373         }
374 
375         return rbio;
376 }
377 
378 /*
379  * Only called on a top level bch_read_bio to complete an entire read request,
380  * not a split:
381  */
382 static void bch2_rbio_done(struct bch_read_bio *rbio)
383 {
384         if (rbio->start_time)
385                 bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
386                                        rbio->start_time);
387         bio_endio(&rbio->bio);
388 }
389 
390 static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio,
391                                      struct bvec_iter bvec_iter,
392                                      struct bch_io_failures *failed,
393                                      unsigned flags)
394 {
395         struct btree_trans *trans = bch2_trans_get(c);
396         struct btree_iter iter;
397         struct bkey_buf sk;
398         struct bkey_s_c k;
399         int ret;
400 
401         flags &= ~BCH_READ_LAST_FRAGMENT;
402         flags |= BCH_READ_MUST_CLONE;
403 
404         bch2_bkey_buf_init(&sk);
405 
406         bch2_trans_iter_init(trans, &iter, rbio->data_btree,
407                              rbio->read_pos, BTREE_ITER_slots);
408 retry:
409         bch2_trans_begin(trans);
410         rbio->bio.bi_status = 0;
411 
412         k = bch2_btree_iter_peek_slot(&iter);
413         if (bkey_err(k))
414                 goto err;
415 
416         bch2_bkey_buf_reassemble(&sk, c, k);
417         k = bkey_i_to_s_c(sk.k);
418 
419         if (!bch2_bkey_matches_ptr(c, k,
420                                    rbio->pick.ptr,
421                                    rbio->data_pos.offset -
422                                    rbio->pick.crc.offset)) {
423                 /* extent we wanted to read no longer exists: */
424                 rbio->hole = true;
425                 goto out;
426         }
427 
428         ret = __bch2_read_extent(trans, rbio, bvec_iter,
429                                  rbio->read_pos,
430                                  rbio->data_btree,
431                                  k, 0, failed, flags);
432         if (ret == READ_RETRY)
433                 goto retry;
434         if (ret)
435                 goto err;
436 out:
437         bch2_rbio_done(rbio);
438         bch2_trans_iter_exit(trans, &iter);
439         bch2_trans_put(trans);
440         bch2_bkey_buf_exit(&sk, c);
441         return;
442 err:
443         rbio->bio.bi_status = BLK_STS_IOERR;
444         goto out;
445 }
446 
447 static void bch2_rbio_retry(struct work_struct *work)
448 {
449         struct bch_read_bio *rbio =
450                 container_of(work, struct bch_read_bio, work);
451         struct bch_fs *c        = rbio->c;
452         struct bvec_iter iter   = rbio->bvec_iter;
453         unsigned flags          = rbio->flags;
454         subvol_inum inum = {
455                 .subvol = rbio->subvol,
456                 .inum   = rbio->read_pos.inode,
457         };
458         struct bch_io_failures failed = { .nr = 0 };
459 
460         trace_and_count(c, read_retry, &rbio->bio);
461 
462         if (rbio->retry == READ_RETRY_AVOID)
463                 bch2_mark_io_failure(&failed, &rbio->pick);
464 
465         rbio->bio.bi_status = 0;
466 
467         rbio = bch2_rbio_free(rbio);
468 
469         flags |= BCH_READ_IN_RETRY;
470         flags &= ~BCH_READ_MAY_PROMOTE;
471 
472         if (flags & BCH_READ_NODECODE) {
473                 bch2_read_retry_nodecode(c, rbio, iter, &failed, flags);
474         } else {
475                 flags &= ~BCH_READ_LAST_FRAGMENT;
476                 flags |= BCH_READ_MUST_CLONE;
477 
478                 __bch2_read(c, rbio, iter, inum, &failed, flags);
479         }
480 }
481 
482 static void bch2_rbio_error(struct bch_read_bio *rbio, int retry,
483                             blk_status_t error)
484 {
485         rbio->retry = retry;
486 
487         if (rbio->flags & BCH_READ_IN_RETRY)
488                 return;
489 
490         if (retry == READ_ERR) {
491                 rbio = bch2_rbio_free(rbio);
492 
493                 rbio->bio.bi_status = error;
494                 bch2_rbio_done(rbio);
495         } else {
496                 bch2_rbio_punt(rbio, bch2_rbio_retry,
497                                RBIO_CONTEXT_UNBOUND, system_unbound_wq);
498         }
499 }
500 
501 static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
502                                    struct bch_read_bio *rbio)
503 {
504         struct bch_fs *c = rbio->c;
505         u64 data_offset = rbio->data_pos.offset - rbio->pick.crc.offset;
506         struct bch_extent_crc_unpacked new_crc;
507         struct btree_iter iter;
508         struct bkey_i *new;
509         struct bkey_s_c k;
510         int ret = 0;
511 
512         if (crc_is_compressed(rbio->pick.crc))
513                 return 0;
514 
515         k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
516                                BTREE_ITER_slots|BTREE_ITER_intent);
517         if ((ret = bkey_err(k)))
518                 goto out;
519 
520         if (bversion_cmp(k.k->version, rbio->version) ||
521             !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
522                 goto out;
523 
524         /* Extent was merged? */
525         if (bkey_start_offset(k.k) < data_offset ||
526             k.k->p.offset > data_offset + rbio->pick.crc.uncompressed_size)
527                 goto out;
528 
529         if (bch2_rechecksum_bio(c, &rbio->bio, rbio->version,
530                         rbio->pick.crc, NULL, &new_crc,
531                         bkey_start_offset(k.k) - data_offset, k.k->size,
532                         rbio->pick.crc.csum_type)) {
533                 bch_err(c, "error verifying existing checksum while narrowing checksum (memory corruption?)");
534                 ret = 0;
535                 goto out;
536         }
537 
538         /*
539          * going to be temporarily appending another checksum entry:
540          */
541         new = bch2_trans_kmalloc(trans, bkey_bytes(k.k) +
542                                  sizeof(struct bch_extent_crc128));
543         if ((ret = PTR_ERR_OR_ZERO(new)))
544                 goto out;
545 
546         bkey_reassemble(new, k);
547 
548         if (!bch2_bkey_narrow_crcs(new, new_crc))
549                 goto out;
550 
551         ret = bch2_trans_update(trans, &iter, new,
552                                 BTREE_UPDATE_internal_snapshot_node);
553 out:
554         bch2_trans_iter_exit(trans, &iter);
555         return ret;
556 }
557 
558 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
559 {
560         bch2_trans_do(rbio->c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
561                       __bch2_rbio_narrow_crcs(trans, rbio));
562 }
563 
564 /* Inner part that may run in process context */
565 static void __bch2_read_endio(struct work_struct *work)
566 {
567         struct bch_read_bio *rbio =
568                 container_of(work, struct bch_read_bio, work);
569         struct bch_fs *c        = rbio->c;
570         struct bio *src         = &rbio->bio;
571         struct bio *dst         = &bch2_rbio_parent(rbio)->bio;
572         struct bvec_iter dst_iter = rbio->bvec_iter;
573         struct bch_extent_crc_unpacked crc = rbio->pick.crc;
574         struct nonce nonce = extent_nonce(rbio->version, crc);
575         unsigned nofs_flags;
576         struct bch_csum csum;
577         int ret;
578 
579         nofs_flags = memalloc_nofs_save();
580 
581         /* Reset iterator for checksumming and copying bounced data: */
582         if (rbio->bounce) {
583                 src->bi_iter.bi_size            = crc.compressed_size << 9;
584                 src->bi_iter.bi_idx             = 0;
585                 src->bi_iter.bi_bvec_done       = 0;
586         } else {
587                 src->bi_iter                    = rbio->bvec_iter;
588         }
589 
590         csum = bch2_checksum_bio(c, crc.csum_type, nonce, src);
591         if (bch2_crc_cmp(csum, rbio->pick.crc.csum) && !c->opts.no_data_io)
592                 goto csum_err;
593 
594         /*
595          * XXX
596          * We need to rework the narrow_crcs path to deliver the read completion
597          * first, and then punt to a different workqueue, otherwise we're
598          * holding up reads while doing btree updates which is bad for memory
599          * reclaim.
600          */
601         if (unlikely(rbio->narrow_crcs))
602                 bch2_rbio_narrow_crcs(rbio);
603 
604         if (rbio->flags & BCH_READ_NODECODE)
605                 goto nodecode;
606 
607         /* Adjust crc to point to subset of data we want: */
608         crc.offset     += rbio->offset_into_extent;
609         crc.live_size   = bvec_iter_sectors(rbio->bvec_iter);
610 
611         if (crc_is_compressed(crc)) {
612                 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
613                 if (ret)
614                         goto decrypt_err;
615 
616                 if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
617                     !c->opts.no_data_io)
618                         goto decompression_err;
619         } else {
620                 /* don't need to decrypt the entire bio: */
621                 nonce = nonce_add(nonce, crc.offset << 9);
622                 bio_advance(src, crc.offset << 9);
623 
624                 BUG_ON(src->bi_iter.bi_size < dst_iter.bi_size);
625                 src->bi_iter.bi_size = dst_iter.bi_size;
626 
627                 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
628                 if (ret)
629                         goto decrypt_err;
630 
631                 if (rbio->bounce) {
632                         struct bvec_iter src_iter = src->bi_iter;
633 
634                         bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
635                 }
636         }
637 
638         if (rbio->promote) {
639                 /*
640                  * Re encrypt data we decrypted, so it's consistent with
641                  * rbio->crc:
642                  */
643                 ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
644                 if (ret)
645                         goto decrypt_err;
646 
647                 promote_start(rbio->promote, rbio);
648                 rbio->promote = NULL;
649         }
650 nodecode:
651         if (likely(!(rbio->flags & BCH_READ_IN_RETRY))) {
652                 rbio = bch2_rbio_free(rbio);
653                 bch2_rbio_done(rbio);
654         }
655 out:
656         memalloc_nofs_restore(nofs_flags);
657         return;
658 csum_err:
659         /*
660          * Checksum error: if the bio wasn't bounced, we may have been
661          * reading into buffers owned by userspace (that userspace can
662          * scribble over) - retry the read, bouncing it this time:
663          */
664         if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
665                 rbio->flags |= BCH_READ_MUST_BOUNCE;
666                 bch2_rbio_error(rbio, READ_RETRY, BLK_STS_IOERR);
667                 goto out;
668         }
669 
670         struct printbuf buf = PRINTBUF;
671         buf.atomic++;
672         prt_str(&buf, "data ");
673         bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum);
674 
675         struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
676         if (ca) {
677                 bch_err_inum_offset_ratelimited(ca,
678                         rbio->read_pos.inode,
679                         rbio->read_pos.offset << 9,
680                         "data %s", buf.buf);
681                 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
682         }
683         printbuf_exit(&buf);
684         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
685         goto out;
686 decompression_err:
687         bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
688                                         rbio->read_pos.offset << 9,
689                                         "decompression error");
690         bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
691         goto out;
692 decrypt_err:
693         bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode,
694                                         rbio->read_pos.offset << 9,
695                                         "decrypt error");
696         bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR);
697         goto out;
698 }
699 
700 static void bch2_read_endio(struct bio *bio)
701 {
702         struct bch_read_bio *rbio =
703                 container_of(bio, struct bch_read_bio, bio);
704         struct bch_fs *c        = rbio->c;
705         struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
706         struct workqueue_struct *wq = NULL;
707         enum rbio_context context = RBIO_CONTEXT_NULL;
708 
709         if (rbio->have_ioref) {
710                 bch2_latency_acct(ca, rbio->submit_time, READ);
711                 percpu_ref_put(&ca->io_ref);
712         }
713 
714         if (!rbio->split)
715                 rbio->bio.bi_end_io = rbio->end_io;
716 
717         if (bio->bi_status) {
718                 if (ca) {
719                         bch_err_inum_offset_ratelimited(ca,
720                                 rbio->read_pos.inode,
721                                 rbio->read_pos.offset,
722                                 "data read error: %s",
723                                 bch2_blk_status_to_str(bio->bi_status));
724                         bch2_io_error(ca, BCH_MEMBER_ERROR_read);
725                 }
726                 bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status);
727                 return;
728         }
729 
730         if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
731             (ca && dev_ptr_stale(ca, &rbio->pick.ptr))) {
732                 trace_and_count(c, read_reuse_race, &rbio->bio);
733 
734                 if (rbio->flags & BCH_READ_RETRY_IF_STALE)
735                         bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
736                 else
737                         bch2_rbio_error(rbio, READ_ERR, BLK_STS_AGAIN);
738                 return;
739         }
740 
741         if (rbio->narrow_crcs ||
742             rbio->promote ||
743             crc_is_compressed(rbio->pick.crc) ||
744             bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
745                 context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq;
746         else if (rbio->pick.crc.csum_type)
747                 context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;
748 
749         bch2_rbio_punt(rbio, __bch2_read_endio, context, wq);
750 }
751 
752 int __bch2_read_indirect_extent(struct btree_trans *trans,
753                                 unsigned *offset_into_extent,
754                                 struct bkey_buf *orig_k)
755 {
756         struct btree_iter iter;
757         struct bkey_s_c k;
758         u64 reflink_offset;
759         int ret;
760 
761         reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
762                 *offset_into_extent;
763 
764         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink,
765                                POS(0, reflink_offset), 0);
766         ret = bkey_err(k);
767         if (ret)
768                 goto err;
769 
770         if (k.k->type != KEY_TYPE_reflink_v &&
771             k.k->type != KEY_TYPE_indirect_inline_data) {
772                 bch_err_inum_offset_ratelimited(trans->c,
773                         orig_k->k->k.p.inode,
774                         orig_k->k->k.p.offset << 9,
775                         "%llu len %u points to nonexistent indirect extent %llu",
776                         orig_k->k->k.p.offset,
777                         orig_k->k->k.size,
778                         reflink_offset);
779                 bch2_inconsistent_error(trans->c);
780                 ret = -EIO;
781                 goto err;
782         }
783 
784         *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k);
785         bch2_bkey_buf_reassemble(orig_k, trans->c, k);
786 err:
787         bch2_trans_iter_exit(trans, &iter);
788         return ret;
789 }
790 
791 static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
792                                                    struct bch_dev *ca,
793                                                    struct bkey_s_c k,
794                                                    struct bch_extent_ptr ptr)
795 {
796         struct bch_fs *c = trans->c;
797         struct btree_iter iter;
798         struct printbuf buf = PRINTBUF;
799         int ret;
800 
801         bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
802                              PTR_BUCKET_POS(ca, &ptr),
803                              BTREE_ITER_cached);
804 
805         u8 *gen = bucket_gen(ca, iter.pos.offset);
806         if (gen) {
807 
808                 prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
809                 printbuf_indent_add(&buf, 2);
810 
811                 bch2_bkey_val_to_text(&buf, c, k);
812                 prt_newline(&buf);
813 
814                 prt_printf(&buf, "memory gen: %u", *gen);
815 
816                 ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
817                 if (!ret) {
818                         prt_newline(&buf);
819                         bch2_bkey_val_to_text(&buf, c, k);
820                 }
821         } else {
822                 prt_printf(&buf, "Attempting to read from invalid bucket %llu:%llu:\n",
823                            iter.pos.inode, iter.pos.offset);
824                 printbuf_indent_add(&buf, 2);
825 
826                 prt_printf(&buf, "first bucket %u nbuckets %llu\n",
827                            ca->mi.first_bucket, ca->mi.nbuckets);
828 
829                 bch2_bkey_val_to_text(&buf, c, k);
830                 prt_newline(&buf);
831         }
832 
833         bch2_fs_inconsistent(c, "%s", buf.buf);
834 
835         bch2_trans_iter_exit(trans, &iter);
836         printbuf_exit(&buf);
837 }
838 
839 int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
840                        struct bvec_iter iter, struct bpos read_pos,
841                        enum btree_id data_btree, struct bkey_s_c k,
842                        unsigned offset_into_extent,
843                        struct bch_io_failures *failed, unsigned flags)
844 {
845         struct bch_fs *c = trans->c;
846         struct extent_ptr_decoded pick;
847         struct bch_read_bio *rbio = NULL;
848         struct promote_op *promote = NULL;
849         bool bounce = false, read_full = false, narrow_crcs = false;
850         struct bpos data_pos = bkey_start_pos(k.k);
851         int pick_ret;
852 
853         if (bkey_extent_is_inline_data(k.k)) {
854                 unsigned bytes = min_t(unsigned, iter.bi_size,
855                                        bkey_inline_data_bytes(k.k));
856 
857                 swap(iter.bi_size, bytes);
858                 memcpy_to_bio(&orig->bio, iter, bkey_inline_data_p(k));
859                 swap(iter.bi_size, bytes);
860                 bio_advance_iter(&orig->bio, &iter, bytes);
861                 zero_fill_bio_iter(&orig->bio, iter);
862                 goto out_read_done;
863         }
864 retry_pick:
865         pick_ret = bch2_bkey_pick_read_device(c, k, failed, &pick);
866 
867         /* hole or reservation - just zero fill: */
868         if (!pick_ret)
869                 goto hole;
870 
871         if (pick_ret < 0) {
872                 bch_err_inum_offset_ratelimited(c,
873                                 read_pos.inode, read_pos.offset << 9,
874                                 "no device to read from");
875                 goto err;
876         }
877 
878         struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
879 
880         /*
881          * Stale dirty pointers are treated as IO errors, but @failed isn't
882          * allocated unless we're in the retry path - so if we're not in the
883          * retry path, don't check here, it'll be caught in bch2_read_endio()
884          * and we'll end up in the retry path:
885          */
886         if ((flags & BCH_READ_IN_RETRY) &&
887             !pick.ptr.cached &&
888             ca &&
889             unlikely(dev_ptr_stale(ca, &pick.ptr))) {
890                 read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
891                 bch2_mark_io_failure(failed, &pick);
892                 percpu_ref_put(&ca->io_ref);
893                 goto retry_pick;
894         }
895 
896         /*
897          * Unlock the iterator while the btree node's lock is still in
898          * cache, before doing the IO:
899          */
900         bch2_trans_unlock(trans);
901 
902         if (flags & BCH_READ_NODECODE) {
903                 /*
904                  * can happen if we retry, and the extent we were going to read
905                  * has been merged in the meantime:
906                  */
907                 if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS) {
908                         if (ca)
909                                 percpu_ref_put(&ca->io_ref);
910                         goto hole;
911                 }
912 
913                 iter.bi_size    = pick.crc.compressed_size << 9;
914                 goto get_bio;
915         }
916 
917         if (!(flags & BCH_READ_LAST_FRAGMENT) ||
918             bio_flagged(&orig->bio, BIO_CHAIN))
919                 flags |= BCH_READ_MUST_CLONE;
920 
921         narrow_crcs = !(flags & BCH_READ_IN_RETRY) &&
922                 bch2_can_narrow_extent_crcs(k, pick.crc);
923 
924         if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
925                 flags |= BCH_READ_MUST_BOUNCE;
926 
927         EBUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
928 
929         if (crc_is_compressed(pick.crc) ||
930             (pick.crc.csum_type != BCH_CSUM_none &&
931              (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
932               (bch2_csum_type_is_encryption(pick.crc.csum_type) &&
933                (flags & BCH_READ_USER_MAPPED)) ||
934               (flags & BCH_READ_MUST_BOUNCE)))) {
935                 read_full = true;
936                 bounce = true;
937         }
938 
939         if (orig->opts.promote_target)// || failed)
940                 promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags,
941                                         &rbio, &bounce, &read_full, failed);
942 
943         if (!read_full) {
944                 EBUG_ON(crc_is_compressed(pick.crc));
945                 EBUG_ON(pick.crc.csum_type &&
946                         (bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
947                          bvec_iter_sectors(iter) != pick.crc.live_size ||
948                          pick.crc.offset ||
949                          offset_into_extent));
950 
951                 data_pos.offset += offset_into_extent;
952                 pick.ptr.offset += pick.crc.offset +
953                         offset_into_extent;
954                 offset_into_extent              = 0;
955                 pick.crc.compressed_size        = bvec_iter_sectors(iter);
956                 pick.crc.uncompressed_size      = bvec_iter_sectors(iter);
957                 pick.crc.offset                 = 0;
958                 pick.crc.live_size              = bvec_iter_sectors(iter);
959         }
960 get_bio:
961         if (rbio) {
962                 /*
963                  * promote already allocated bounce rbio:
964                  * promote needs to allocate a bio big enough for uncompressing
965                  * data in the write path, but we're not going to use it all
966                  * here:
967                  */
968                 EBUG_ON(rbio->bio.bi_iter.bi_size <
969                        pick.crc.compressed_size << 9);
970                 rbio->bio.bi_iter.bi_size =
971                         pick.crc.compressed_size << 9;
972         } else if (bounce) {
973                 unsigned sectors = pick.crc.compressed_size;
974 
975                 rbio = rbio_init(bio_alloc_bioset(NULL,
976                                                   DIV_ROUND_UP(sectors, PAGE_SECTORS),
977                                                   0,
978                                                   GFP_NOFS,
979                                                   &c->bio_read_split),
980                                  orig->opts);
981 
982                 bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
983                 rbio->bounce    = true;
984                 rbio->split     = true;
985         } else if (flags & BCH_READ_MUST_CLONE) {
986                 /*
987                  * Have to clone if there were any splits, due to error
988                  * reporting issues (if a split errored, and retrying didn't
989                  * work, when it reports the error to its parent (us) we don't
990                  * know if the error was from our bio, and we should retry, or
991                  * from the whole bio, in which case we don't want to retry and
992                  * lose the error)
993                  */
994                 rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOFS,
995                                                  &c->bio_read_split),
996                                  orig->opts);
997                 rbio->bio.bi_iter = iter;
998                 rbio->split     = true;
999         } else {
1000                 rbio = orig;
1001                 rbio->bio.bi_iter = iter;
1002                 EBUG_ON(bio_flagged(&rbio->bio, BIO_CHAIN));
1003         }
1004 
1005         EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size);
1006 
1007         rbio->c                 = c;
1008         rbio->submit_time       = local_clock();
1009         if (rbio->split)
1010                 rbio->parent    = orig;
1011         else
1012                 rbio->end_io    = orig->bio.bi_end_io;
1013         rbio->bvec_iter         = iter;
1014         rbio->offset_into_extent= offset_into_extent;
1015         rbio->flags             = flags;
1016         rbio->have_ioref        = ca != NULL;
1017         rbio->narrow_crcs       = narrow_crcs;
1018         rbio->hole              = 0;
1019         rbio->retry             = 0;
1020         rbio->context           = 0;
1021         /* XXX: only initialize this if needed */
1022         rbio->devs_have         = bch2_bkey_devs(k);
1023         rbio->pick              = pick;
1024         rbio->subvol            = orig->subvol;
1025         rbio->read_pos          = read_pos;
1026         rbio->data_btree        = data_btree;
1027         rbio->data_pos          = data_pos;
1028         rbio->version           = k.k->version;
1029         rbio->promote           = promote;
1030         INIT_WORK(&rbio->work, NULL);
1031 
1032         if (flags & BCH_READ_NODECODE)
1033                 orig->pick = pick;
1034 
1035         rbio->bio.bi_opf        = orig->bio.bi_opf;
1036         rbio->bio.bi_iter.bi_sector = pick.ptr.offset;
1037         rbio->bio.bi_end_io     = bch2_read_endio;
1038 
1039         if (rbio->bounce)
1040                 trace_and_count(c, read_bounce, &rbio->bio);
1041 
1042         this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
1043         bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
1044 
1045         /*
1046          * If it's being moved internally, we don't want to flag it as a cache
1047          * hit:
1048          */
1049         if (ca && pick.ptr.cached && !(flags & BCH_READ_NODECODE))
1050                 bch2_bucket_io_time_reset(trans, pick.ptr.dev,
1051                         PTR_BUCKET_NR(ca, &pick.ptr), READ);
1052 
1053         if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
1054                 bio_inc_remaining(&orig->bio);
1055                 trace_and_count(c, read_split, &orig->bio);
1056         }
1057 
1058         if (!rbio->pick.idx) {
1059                 if (!rbio->have_ioref) {
1060                         bch_err_inum_offset_ratelimited(c,
1061                                         read_pos.inode,
1062                                         read_pos.offset << 9,
1063                                         "no device to read from");
1064                         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1065                         goto out;
1066                 }
1067 
1068                 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_user],
1069                              bio_sectors(&rbio->bio));
1070                 bio_set_dev(&rbio->bio, ca->disk_sb.bdev);
1071 
1072                 if (unlikely(c->opts.no_data_io)) {
1073                         if (likely(!(flags & BCH_READ_IN_RETRY)))
1074                                 bio_endio(&rbio->bio);
1075                 } else {
1076                         if (likely(!(flags & BCH_READ_IN_RETRY)))
1077                                 submit_bio(&rbio->bio);
1078                         else
1079                                 submit_bio_wait(&rbio->bio);
1080                 }
1081 
1082                 /*
1083                  * We just submitted IO which may block, we expect relock fail
1084                  * events and shouldn't count them:
1085                  */
1086                 trans->notrace_relock_fail = true;
1087         } else {
1088                 /* Attempting reconstruct read: */
1089                 if (bch2_ec_read_extent(trans, rbio)) {
1090                         bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
1091                         goto out;
1092                 }
1093 
1094                 if (likely(!(flags & BCH_READ_IN_RETRY)))
1095                         bio_endio(&rbio->bio);
1096         }
1097 out:
1098         if (likely(!(flags & BCH_READ_IN_RETRY))) {
1099                 return 0;
1100         } else {
1101                 int ret;
1102 
1103                 rbio->context = RBIO_CONTEXT_UNBOUND;
1104                 bch2_read_endio(&rbio->bio);
1105 
1106                 ret = rbio->retry;
1107                 rbio = bch2_rbio_free(rbio);
1108 
1109                 if (ret == READ_RETRY_AVOID) {
1110                         bch2_mark_io_failure(failed, &pick);
1111                         ret = READ_RETRY;
1112                 }
1113 
1114                 if (!ret)
1115                         goto out_read_done;
1116 
1117                 return ret;
1118         }
1119 
1120 err:
1121         if (flags & BCH_READ_IN_RETRY)
1122                 return READ_ERR;
1123 
1124         orig->bio.bi_status = BLK_STS_IOERR;
1125         goto out_read_done;
1126 
1127 hole:
1128         /*
1129          * won't normally happen in the BCH_READ_NODECODE
1130          * (bch2_move_extent()) path, but if we retry and the extent we wanted
1131          * to read no longer exists we have to signal that:
1132          */
1133         if (flags & BCH_READ_NODECODE)
1134                 orig->hole = true;
1135 
1136         zero_fill_bio_iter(&orig->bio, iter);
1137 out_read_done:
1138         if (flags & BCH_READ_LAST_FRAGMENT)
1139                 bch2_rbio_done(orig);
1140         return 0;
1141 }
1142 
1143 void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
1144                  struct bvec_iter bvec_iter, subvol_inum inum,
1145                  struct bch_io_failures *failed, unsigned flags)
1146 {
1147         struct btree_trans *trans = bch2_trans_get(c);
1148         struct btree_iter iter;
1149         struct bkey_buf sk;
1150         struct bkey_s_c k;
1151         int ret;
1152 
1153         BUG_ON(flags & BCH_READ_NODECODE);
1154 
1155         bch2_bkey_buf_init(&sk);
1156         bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1157                              POS(inum.inum, bvec_iter.bi_sector),
1158                              BTREE_ITER_slots);
1159 
1160         while (1) {
1161                 unsigned bytes, sectors, offset_into_extent;
1162                 enum btree_id data_btree = BTREE_ID_extents;
1163 
1164                 bch2_trans_begin(trans);
1165 
1166                 u32 snapshot;
1167                 ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
1168                 if (ret)
1169                         goto err;
1170 
1171                 bch2_btree_iter_set_snapshot(&iter, snapshot);
1172 
1173                 bch2_btree_iter_set_pos(&iter,
1174                                 POS(inum.inum, bvec_iter.bi_sector));
1175 
1176                 k = bch2_btree_iter_peek_slot(&iter);
1177                 ret = bkey_err(k);
1178                 if (ret)
1179                         goto err;
1180 
1181                 offset_into_extent = iter.pos.offset -
1182                         bkey_start_offset(k.k);
1183                 sectors = k.k->size - offset_into_extent;
1184 
1185                 bch2_bkey_buf_reassemble(&sk, c, k);
1186 
1187                 ret = bch2_read_indirect_extent(trans, &data_btree,
1188                                         &offset_into_extent, &sk);
1189                 if (ret)
1190                         goto err;
1191 
1192                 k = bkey_i_to_s_c(sk.k);
1193 
1194                 /*
1195                  * With indirect extents, the amount of data to read is the min
1196                  * of the original extent and the indirect extent:
1197                  */
1198                 sectors = min(sectors, k.k->size - offset_into_extent);
1199 
1200                 bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9;
1201                 swap(bvec_iter.bi_size, bytes);
1202 
1203                 if (bvec_iter.bi_size == bytes)
1204                         flags |= BCH_READ_LAST_FRAGMENT;
1205 
1206                 ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
1207                                          data_btree, k,
1208                                          offset_into_extent, failed, flags);
1209                 if (ret)
1210                         goto err;
1211 
1212                 if (flags & BCH_READ_LAST_FRAGMENT)
1213                         break;
1214 
1215                 swap(bvec_iter.bi_size, bytes);
1216                 bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
1217 
1218                 ret = btree_trans_too_many_iters(trans);
1219                 if (ret)
1220                         goto err;
1221 err:
1222                 if (ret &&
1223                     !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
1224                     ret != READ_RETRY &&
1225                     ret != READ_RETRY_AVOID)
1226                         break;
1227         }
1228 
1229         bch2_trans_iter_exit(trans, &iter);
1230         bch2_trans_put(trans);
1231         bch2_bkey_buf_exit(&sk, c);
1232 
1233         if (ret) {
1234                 bch_err_inum_offset_ratelimited(c, inum.inum,
1235                                                 bvec_iter.bi_sector << 9,
1236                                                 "read error %i from btree lookup", ret);
1237                 rbio->bio.bi_status = BLK_STS_IOERR;
1238                 bch2_rbio_done(rbio);
1239         }
1240 }
1241 
1242 void bch2_fs_io_read_exit(struct bch_fs *c)
1243 {
1244         if (c->promote_table.tbl)
1245                 rhashtable_destroy(&c->promote_table);
1246         bioset_exit(&c->bio_read_split);
1247         bioset_exit(&c->bio_read);
1248 }
1249 
1250 int bch2_fs_io_read_init(struct bch_fs *c)
1251 {
1252         if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
1253                         BIOSET_NEED_BVECS))
1254                 return -BCH_ERR_ENOMEM_bio_read_init;
1255 
1256         if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
1257                         BIOSET_NEED_BVECS))
1258                 return -BCH_ERR_ENOMEM_bio_read_split_init;
1259 
1260         if (rhashtable_init(&c->promote_table, &bch_promote_params))
1261                 return -BCH_ERR_ENOMEM_promote_table_init;
1262 
1263         return 0;
1264 }
1265 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php