~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/btree_write_buffer.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #include "bcachefs.h"
  4 #include "bkey_buf.h"
  5 #include "btree_locking.h"
  6 #include "btree_update.h"
  7 #include "btree_update_interior.h"
  8 #include "btree_write_buffer.h"
  9 #include "disk_accounting.h"
 10 #include "error.h"
 11 #include "extents.h"
 12 #include "journal.h"
 13 #include "journal_io.h"
 14 #include "journal_reclaim.h"
 15 
 16 #include <linux/prefetch.h>
 17 #include <linux/sort.h>
 18 
 19 static int bch2_btree_write_buffer_journal_flush(struct journal *,
 20                                 struct journal_entry_pin *, u64);
 21 
 22 static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *);
 23 
 24 static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
 25 {
 26         return (cmp_int(l->hi, r->hi) ?:
 27                 cmp_int(l->mi, r->mi) ?:
 28                 cmp_int(l->lo, r->lo)) >= 0;
 29 }
 30 
 31 static inline bool wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r)
 32 {
 33 #ifdef CONFIG_X86_64
 34         int cmp;
 35 
 36         asm("mov   (%[l]), %%rax;"
 37             "sub   (%[r]), %%rax;"
 38             "mov  8(%[l]), %%rax;"
 39             "sbb  8(%[r]), %%rax;"
 40             "mov 16(%[l]), %%rax;"
 41             "sbb 16(%[r]), %%rax;"
 42             : "=@ccae" (cmp)
 43             : [l] "r" (l), [r] "r" (r)
 44             : "rax", "cc");
 45 
 46         EBUG_ON(cmp != __wb_key_ref_cmp(l, r));
 47         return cmp;
 48 #else
 49         return __wb_key_ref_cmp(l, r);
 50 #endif
 51 }
 52 
 53 static int wb_key_seq_cmp(const void *_l, const void *_r)
 54 {
 55         const struct btree_write_buffered_key *l = _l;
 56         const struct btree_write_buffered_key *r = _r;
 57 
 58         return cmp_int(l->journal_seq, r->journal_seq);
 59 }
 60 
 61 /* Compare excluding idx, the low 24 bits: */
 62 static inline bool wb_key_eq(const void *_l, const void *_r)
 63 {
 64         const struct wb_key_ref *l = _l;
 65         const struct wb_key_ref *r = _r;
 66 
 67         return !((l->hi ^ r->hi)|
 68                  (l->mi ^ r->mi)|
 69                  ((l->lo >> 24) ^ (r->lo >> 24)));
 70 }
 71 
 72 static noinline void wb_sort(struct wb_key_ref *base, size_t num)
 73 {
 74         size_t n = num, a = num / 2;
 75 
 76         if (!a)         /* num < 2 || size == 0 */
 77                 return;
 78 
 79         for (;;) {
 80                 size_t b, c, d;
 81 
 82                 if (a)                  /* Building heap: sift down --a */
 83                         --a;
 84                 else if (--n)           /* Sorting: Extract root to --n */
 85                         swap(base[0], base[n]);
 86                 else                    /* Sort complete */
 87                         break;
 88 
 89                 /*
 90                  * Sift element at "a" down into heap.  This is the
 91                  * "bottom-up" variant, which significantly reduces
 92                  * calls to cmp_func(): we find the sift-down path all
 93                  * the way to the leaves (one compare per level), then
 94                  * backtrack to find where to insert the target element.
 95                  *
 96                  * Because elements tend to sift down close to the leaves,
 97                  * this uses fewer compares than doing two per level
 98                  * on the way down.  (A bit more than half as many on
 99                  * average, 3/4 worst-case.)
100                  */
101                 for (b = a; c = 2*b + 1, (d = c + 1) < n;)
102                         b = wb_key_ref_cmp(base + c, base + d) ? c : d;
103                 if (d == n)             /* Special case last leaf with no sibling */
104                         b = c;
105 
106                 /* Now backtrack from "b" to the correct location for "a" */
107                 while (b != a && wb_key_ref_cmp(base + a, base + b))
108                         b = (b - 1) / 2;
109                 c = b;                  /* Where "a" belongs */
110                 while (b != a) {        /* Shift it into place */
111                         b = (b - 1) / 2;
112                         swap(base[b], base[c]);
113                 }
114         }
115 }
116 
117 static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
118                                           struct btree_iter *iter,
119                                           struct btree_write_buffered_key *wb)
120 {
121         struct btree_path *path = btree_iter_path(trans, iter);
122 
123         bch2_btree_node_unlock_write(trans, path, path->l[0].b);
124 
125         trans->journal_res.seq = wb->journal_seq;
126 
127         return bch2_trans_update(trans, iter, &wb->k,
128                                  BTREE_UPDATE_internal_snapshot_node) ?:
129                 bch2_trans_commit(trans, NULL, NULL,
130                                   BCH_TRANS_COMMIT_no_enospc|
131                                   BCH_TRANS_COMMIT_no_check_rw|
132                                   BCH_TRANS_COMMIT_no_journal_res|
133                                   BCH_TRANS_COMMIT_journal_reclaim);
134 }
135 
136 static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *iter,
137                                struct btree_write_buffered_key *wb,
138                                bool *write_locked,
139                                bool *accounting_accumulated,
140                                size_t *fast)
141 {
142         struct btree_path *path;
143         int ret;
144 
145         EBUG_ON(!wb->journal_seq);
146         EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
147         EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
148 
149         ret = bch2_btree_iter_traverse(iter);
150         if (ret)
151                 return ret;
152 
153         if (!*accounting_accumulated && wb->k.k.type == KEY_TYPE_accounting) {
154                 struct bkey u;
155                 struct bkey_s_c k = bch2_btree_path_peek_slot_exact(btree_iter_path(trans, iter), &u);
156 
157                 if (k.k->type == KEY_TYPE_accounting)
158                         bch2_accounting_accumulate(bkey_i_to_accounting(&wb->k),
159                                                    bkey_s_c_to_accounting(k));
160         }
161         *accounting_accumulated = true;
162 
163         /*
164          * We can't clone a path that has write locks: unshare it now, before
165          * set_pos and traverse():
166          */
167         if (btree_iter_path(trans, iter)->ref > 1)
168                 iter->path = __bch2_btree_path_make_mut(trans, iter->path, true, _THIS_IP_);
169 
170         path = btree_iter_path(trans, iter);
171 
172         if (!*write_locked) {
173                 ret = bch2_btree_node_lock_write(trans, path, &path->l[0].b->c);
174                 if (ret)
175                         return ret;
176 
177                 bch2_btree_node_prep_for_write(trans, path, path->l[0].b);
178                 *write_locked = true;
179         }
180 
181         if (unlikely(!bch2_btree_node_insert_fits(path->l[0].b, wb->k.k.u64s))) {
182                 *write_locked = false;
183                 return wb_flush_one_slowpath(trans, iter, wb);
184         }
185 
186         bch2_btree_insert_key_leaf(trans, path, &wb->k, wb->journal_seq);
187         (*fast)++;
188         return 0;
189 }
190 
191 /*
192  * Update a btree with a write buffered key using the journal seq of the
193  * original write buffer insert.
194  *
195  * It is not safe to rejournal the key once it has been inserted into the write
196  * buffer because that may break recovery ordering. For example, the key may
197  * have already been modified in the active write buffer in a seq that comes
198  * before the current transaction. If we were to journal this key again and
199  * crash, recovery would process updates in the wrong order.
200  */
201 static int
202 btree_write_buffered_insert(struct btree_trans *trans,
203                           struct btree_write_buffered_key *wb)
204 {
205         struct btree_iter iter;
206         int ret;
207 
208         bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
209                              BTREE_ITER_cached|BTREE_ITER_intent);
210 
211         trans->journal_res.seq = wb->journal_seq;
212 
213         ret   = bch2_btree_iter_traverse(&iter) ?:
214                 bch2_trans_update(trans, &iter, &wb->k,
215                                   BTREE_UPDATE_internal_snapshot_node);
216         bch2_trans_iter_exit(trans, &iter);
217         return ret;
218 }
219 
220 static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb)
221 {
222         struct bch_fs *c = container_of(wb, struct bch_fs, btree_write_buffer);
223         struct journal *j = &c->journal;
224 
225         if (!wb->inc.keys.nr)
226                 return;
227 
228         bch2_journal_pin_add(j, wb->inc.keys.data[0].journal_seq, &wb->flushing.pin,
229                              bch2_btree_write_buffer_journal_flush);
230 
231         darray_resize(&wb->flushing.keys, min_t(size_t, 1U << 20, wb->flushing.keys.nr + wb->inc.keys.nr));
232         darray_resize(&wb->sorted, wb->flushing.keys.size);
233 
234         if (!wb->flushing.keys.nr && wb->sorted.size >= wb->inc.keys.nr) {
235                 swap(wb->flushing.keys, wb->inc.keys);
236                 goto out;
237         }
238 
239         size_t nr = min(darray_room(wb->flushing.keys),
240                         wb->sorted.size - wb->flushing.keys.nr);
241         nr = min(nr, wb->inc.keys.nr);
242 
243         memcpy(&darray_top(wb->flushing.keys),
244                wb->inc.keys.data,
245                sizeof(wb->inc.keys.data[0]) * nr);
246 
247         memmove(wb->inc.keys.data,
248                 wb->inc.keys.data + nr,
249                sizeof(wb->inc.keys.data[0]) * (wb->inc.keys.nr - nr));
250 
251         wb->flushing.keys.nr    += nr;
252         wb->inc.keys.nr         -= nr;
253 out:
254         if (!wb->inc.keys.nr)
255                 bch2_journal_pin_drop(j, &wb->inc.pin);
256         else
257                 bch2_journal_pin_update(j, wb->inc.keys.data[0].journal_seq, &wb->inc.pin,
258                                         bch2_btree_write_buffer_journal_flush);
259 
260         if (j->watermark) {
261                 spin_lock(&j->lock);
262                 bch2_journal_set_watermark(j);
263                 spin_unlock(&j->lock);
264         }
265 
266         BUG_ON(wb->sorted.size < wb->flushing.keys.nr);
267 }
268 
269 static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
270 {
271         struct bch_fs *c = trans->c;
272         struct journal *j = &c->journal;
273         struct btree_write_buffer *wb = &c->btree_write_buffer;
274         struct btree_iter iter = { NULL };
275         size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0;
276         bool write_locked = false;
277         bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
278         int ret = 0;
279 
280         bch2_trans_unlock(trans);
281         bch2_trans_begin(trans);
282 
283         mutex_lock(&wb->inc.lock);
284         move_keys_from_inc_to_flushing(wb);
285         mutex_unlock(&wb->inc.lock);
286 
287         for (size_t i = 0; i < wb->flushing.keys.nr; i++) {
288                 wb->sorted.data[i].idx = i;
289                 wb->sorted.data[i].btree = wb->flushing.keys.data[i].btree;
290                 memcpy(&wb->sorted.data[i].pos, &wb->flushing.keys.data[i].k.k.p, sizeof(struct bpos));
291         }
292         wb->sorted.nr = wb->flushing.keys.nr;
293 
294         /*
295          * We first sort so that we can detect and skip redundant updates, and
296          * then we attempt to flush in sorted btree order, as this is most
297          * efficient.
298          *
299          * However, since we're not flushing in the order they appear in the
300          * journal we won't be able to drop our journal pin until everything is
301          * flushed - which means this could deadlock the journal if we weren't
302          * passing BCH_TRANS_COMMIT_journal_reclaim. This causes the update to fail
303          * if it would block taking a journal reservation.
304          *
305          * If that happens, simply skip the key so we can optimistically insert
306          * as many keys as possible in the fast path.
307          */
308         wb_sort(wb->sorted.data, wb->sorted.nr);
309 
310         darray_for_each(wb->sorted, i) {
311                 struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
312 
313                 for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
314                         prefetch(&wb->flushing.keys.data[n->idx]);
315 
316                 BUG_ON(!k->journal_seq);
317 
318                 if (!accounting_replay_done &&
319                     k->k.k.type == KEY_TYPE_accounting) {
320                         slowpath++;
321                         continue;
322                 }
323 
324                 if (i + 1 < &darray_top(wb->sorted) &&
325                     wb_key_eq(i, i + 1)) {
326                         struct btree_write_buffered_key *n = &wb->flushing.keys.data[i[1].idx];
327 
328                         if (k->k.k.type == KEY_TYPE_accounting &&
329                             n->k.k.type == KEY_TYPE_accounting)
330                                 bch2_accounting_accumulate(bkey_i_to_accounting(&n->k),
331                                                            bkey_i_to_s_c_accounting(&k->k));
332 
333                         overwritten++;
334                         n->journal_seq = min_t(u64, n->journal_seq, k->journal_seq);
335                         k->journal_seq = 0;
336                         continue;
337                 }
338 
339                 if (write_locked) {
340                         struct btree_path *path = btree_iter_path(trans, &iter);
341 
342                         if (path->btree_id != i->btree ||
343                             bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
344                                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
345                                 write_locked = false;
346 
347                                 ret = lockrestart_do(trans,
348                                         bch2_btree_iter_traverse(&iter) ?:
349                                         bch2_foreground_maybe_merge(trans, iter.path, 0,
350                                                         BCH_WATERMARK_reclaim|
351                                                         BCH_TRANS_COMMIT_journal_reclaim|
352                                                         BCH_TRANS_COMMIT_no_check_rw|
353                                                         BCH_TRANS_COMMIT_no_enospc));
354                                 if (ret)
355                                         goto err;
356                         }
357                 }
358 
359                 if (!iter.path || iter.btree_id != k->btree) {
360                         bch2_trans_iter_exit(trans, &iter);
361                         bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
362                                              BTREE_ITER_intent|BTREE_ITER_all_snapshots);
363                 }
364 
365                 bch2_btree_iter_set_pos(&iter, k->k.k.p);
366                 btree_iter_path(trans, &iter)->preserve = false;
367 
368                 bool accounting_accumulated = false;
369                 do {
370                         if (race_fault()) {
371                                 ret = -BCH_ERR_journal_reclaim_would_deadlock;
372                                 break;
373                         }
374 
375                         ret = wb_flush_one(trans, &iter, k, &write_locked,
376                                            &accounting_accumulated, &fast);
377                         if (!write_locked)
378                                 bch2_trans_begin(trans);
379                 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
380 
381                 if (!ret) {
382                         k->journal_seq = 0;
383                 } else if (ret == -BCH_ERR_journal_reclaim_would_deadlock) {
384                         slowpath++;
385                         ret = 0;
386                 } else
387                         break;
388         }
389 
390         if (write_locked) {
391                 struct btree_path *path = btree_iter_path(trans, &iter);
392                 bch2_btree_node_unlock_write(trans, path, path->l[0].b);
393         }
394         bch2_trans_iter_exit(trans, &iter);
395 
396         if (ret)
397                 goto err;
398 
399         if (slowpath) {
400                 /*
401                  * Flush in the order they were present in the journal, so that
402                  * we can release journal pins:
403                  * The fastpath zapped the seq of keys that were successfully flushed so
404                  * we can skip those here.
405                  */
406                 trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
407 
408                 sort(wb->flushing.keys.data,
409                      wb->flushing.keys.nr,
410                      sizeof(wb->flushing.keys.data[0]),
411                      wb_key_seq_cmp, NULL);
412 
413                 darray_for_each(wb->flushing.keys, i) {
414                         if (!i->journal_seq)
415                                 continue;
416 
417                         if (!accounting_replay_done &&
418                             i->k.k.type == KEY_TYPE_accounting) {
419                                 could_not_insert++;
420                                 continue;
421                         }
422 
423                         if (!could_not_insert)
424                                 bch2_journal_pin_update(j, i->journal_seq, &wb->flushing.pin,
425                                                         bch2_btree_write_buffer_journal_flush);
426 
427                         bch2_trans_begin(trans);
428 
429                         ret = commit_do(trans, NULL, NULL,
430                                         BCH_WATERMARK_reclaim|
431                                         BCH_TRANS_COMMIT_journal_reclaim|
432                                         BCH_TRANS_COMMIT_no_check_rw|
433                                         BCH_TRANS_COMMIT_no_enospc|
434                                         BCH_TRANS_COMMIT_no_journal_res ,
435                                         btree_write_buffered_insert(trans, i));
436                         if (ret)
437                                 goto err;
438 
439                         i->journal_seq = 0;
440                 }
441 
442                 /*
443                  * If journal replay hasn't finished with accounting keys we
444                  * can't flush accounting keys at all - condense them and leave
445                  * them for next time.
446                  *
447                  * Q: Can the write buffer overflow?
448                  * A Shouldn't be any actual risk. It's just new accounting
449                  * updates that the write buffer can't flush, and those are only
450                  * going to be generated by interior btree node updates as
451                  * journal replay has to split/rewrite nodes to make room for
452                  * its updates.
453                  *
454                  * And for those new acounting updates, updates to the same
455                  * counters get accumulated as they're flushed from the journal
456                  * to the write buffer - see the patch for eytzingcer tree
457                  * accumulated. So we could only overflow if the number of
458                  * distinct counters touched somehow was very large.
459                  */
460                 if (could_not_insert) {
461                         struct btree_write_buffered_key *dst = wb->flushing.keys.data;
462 
463                         darray_for_each(wb->flushing.keys, i)
464                                 if (i->journal_seq)
465                                         *dst++ = *i;
466                         wb->flushing.keys.nr = dst - wb->flushing.keys.data;
467                 }
468         }
469 err:
470         if (ret || !could_not_insert) {
471                 bch2_journal_pin_drop(j, &wb->flushing.pin);
472                 wb->flushing.keys.nr = 0;
473         }
474 
475         bch2_fs_fatal_err_on(ret, c, "%s", bch2_err_str(ret));
476         trace_write_buffer_flush(trans, wb->flushing.keys.nr, overwritten, fast, 0);
477         return ret;
478 }
479 
480 static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq)
481 {
482         struct journal *j = &c->journal;
483         struct journal_buf *buf;
484         int ret = 0;
485 
486         while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) {
487                 ret = bch2_journal_keys_to_write_buffer(c, buf);
488                 mutex_unlock(&j->buf_lock);
489         }
490 
491         return ret;
492 }
493 
494 static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq)
495 {
496         struct bch_fs *c = trans->c;
497         struct btree_write_buffer *wb = &c->btree_write_buffer;
498         int ret = 0, fetch_from_journal_err;
499 
500         do {
501                 bch2_trans_unlock(trans);
502 
503                 fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq);
504 
505                 /*
506                  * On memory allocation failure, bch2_btree_write_buffer_flush_locked()
507                  * is not guaranteed to empty wb->inc:
508                  */
509                 mutex_lock(&wb->flushing.lock);
510                 ret = bch2_btree_write_buffer_flush_locked(trans);
511                 mutex_unlock(&wb->flushing.lock);
512         } while (!ret &&
513                  (fetch_from_journal_err ||
514                   (wb->inc.pin.seq && wb->inc.pin.seq <= seq) ||
515                   (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq)));
516 
517         return ret;
518 }
519 
520 static int bch2_btree_write_buffer_journal_flush(struct journal *j,
521                                 struct journal_entry_pin *_pin, u64 seq)
522 {
523         struct bch_fs *c = container_of(j, struct bch_fs, journal);
524 
525         return bch2_trans_run(c, btree_write_buffer_flush_seq(trans, seq));
526 }
527 
528 int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
529 {
530         struct bch_fs *c = trans->c;
531 
532         trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
533 
534         return btree_write_buffer_flush_seq(trans, journal_cur_seq(&c->journal));
535 }
536 
537 int bch2_btree_write_buffer_flush_nocheck_rw(struct btree_trans *trans)
538 {
539         struct bch_fs *c = trans->c;
540         struct btree_write_buffer *wb = &c->btree_write_buffer;
541         int ret = 0;
542 
543         if (mutex_trylock(&wb->flushing.lock)) {
544                 ret = bch2_btree_write_buffer_flush_locked(trans);
545                 mutex_unlock(&wb->flushing.lock);
546         }
547 
548         return ret;
549 }
550 
551 int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
552 {
553         struct bch_fs *c = trans->c;
554 
555         if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer))
556                 return -BCH_ERR_erofs_no_writes;
557 
558         int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
559         bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
560         return ret;
561 }
562 
563 /*
564  * In check and repair code, when checking references to write buffer btrees we
565  * need to issue a flush before we have a definitive error: this issues a flush
566  * if this is a key we haven't yet checked.
567  */
568 int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
569                                         struct bkey_s_c referring_k,
570                                         struct bkey_buf *last_flushed)
571 {
572         struct bch_fs *c = trans->c;
573         struct bkey_buf tmp;
574         int ret = 0;
575 
576         bch2_bkey_buf_init(&tmp);
577 
578         if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) {
579                 bch2_bkey_buf_reassemble(&tmp, c, referring_k);
580 
581                 if (bkey_is_btree_ptr(referring_k.k)) {
582                         bch2_trans_unlock(trans);
583                         bch2_btree_interior_updates_flush(c);
584                 }
585 
586                 ret = bch2_btree_write_buffer_flush_sync(trans);
587                 if (ret)
588                         goto err;
589 
590                 bch2_bkey_buf_copy(last_flushed, c, tmp.k);
591                 ret = -BCH_ERR_transaction_restart_write_buffer_flush;
592         }
593 err:
594         bch2_bkey_buf_exit(&tmp, c);
595         return ret;
596 }
597 
598 static void bch2_btree_write_buffer_flush_work(struct work_struct *work)
599 {
600         struct bch_fs *c = container_of(work, struct bch_fs, btree_write_buffer.flush_work);
601         struct btree_write_buffer *wb = &c->btree_write_buffer;
602         int ret;
603 
604         mutex_lock(&wb->flushing.lock);
605         do {
606                 ret = bch2_trans_run(c, bch2_btree_write_buffer_flush_locked(trans));
607         } while (!ret && bch2_btree_write_buffer_should_flush(c));
608         mutex_unlock(&wb->flushing.lock);
609 
610         bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
611 }
612 
613 static void wb_accounting_sort(struct btree_write_buffer *wb)
614 {
615         eytzinger0_sort(wb->accounting.data, wb->accounting.nr,
616                         sizeof(wb->accounting.data[0]),
617                         wb_key_cmp, NULL);
618 }
619 
620 int bch2_accounting_key_to_wb_slowpath(struct bch_fs *c, enum btree_id btree,
621                                        struct bkey_i_accounting *k)
622 {
623         struct btree_write_buffer *wb = &c->btree_write_buffer;
624         struct btree_write_buffered_key new = { .btree = btree };
625 
626         bkey_copy(&new.k, &k->k_i);
627 
628         int ret = darray_push(&wb->accounting, new);
629         if (ret)
630                 return ret;
631 
632         wb_accounting_sort(wb);
633         return 0;
634 }
635 
636 int bch2_journal_key_to_wb_slowpath(struct bch_fs *c,
637                              struct journal_keys_to_wb *dst,
638                              enum btree_id btree, struct bkey_i *k)
639 {
640         struct btree_write_buffer *wb = &c->btree_write_buffer;
641         int ret;
642 retry:
643         ret = darray_make_room_gfp(&dst->wb->keys, 1, GFP_KERNEL);
644         if (!ret && dst->wb == &wb->flushing)
645                 ret = darray_resize(&wb->sorted, wb->flushing.keys.size);
646 
647         if (unlikely(ret)) {
648                 if (dst->wb == &c->btree_write_buffer.flushing) {
649                         mutex_unlock(&dst->wb->lock);
650                         dst->wb = &c->btree_write_buffer.inc;
651                         bch2_journal_pin_add(&c->journal, dst->seq, &dst->wb->pin,
652                                              bch2_btree_write_buffer_journal_flush);
653                         goto retry;
654                 }
655 
656                 return ret;
657         }
658 
659         dst->room = darray_room(dst->wb->keys);
660         if (dst->wb == &wb->flushing)
661                 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
662         BUG_ON(!dst->room);
663         BUG_ON(!dst->seq);
664 
665         struct btree_write_buffered_key *wb_k = &darray_top(dst->wb->keys);
666         wb_k->journal_seq       = dst->seq;
667         wb_k->btree             = btree;
668         bkey_copy(&wb_k->k, k);
669         dst->wb->keys.nr++;
670         dst->room--;
671         return 0;
672 }
673 
674 void bch2_journal_keys_to_write_buffer_start(struct bch_fs *c, struct journal_keys_to_wb *dst, u64 seq)
675 {
676         struct btree_write_buffer *wb = &c->btree_write_buffer;
677 
678         if (mutex_trylock(&wb->flushing.lock)) {
679                 mutex_lock(&wb->inc.lock);
680                 move_keys_from_inc_to_flushing(wb);
681 
682                 /*
683                  * Attempt to skip wb->inc, and add keys directly to
684                  * wb->flushing, saving us a copy later:
685                  */
686 
687                 if (!wb->inc.keys.nr) {
688                         dst->wb = &wb->flushing;
689                 } else {
690                         mutex_unlock(&wb->flushing.lock);
691                         dst->wb = &wb->inc;
692                 }
693         } else {
694                 mutex_lock(&wb->inc.lock);
695                 dst->wb = &wb->inc;
696         }
697 
698         dst->room = darray_room(dst->wb->keys);
699         if (dst->wb == &wb->flushing)
700                 dst->room = min(dst->room, wb->sorted.size - wb->flushing.keys.nr);
701         dst->seq = seq;
702 
703         bch2_journal_pin_add(&c->journal, seq, &dst->wb->pin,
704                              bch2_btree_write_buffer_journal_flush);
705 
706         darray_for_each(wb->accounting, i)
707                 memset(&i->k.v, 0, bkey_val_bytes(&i->k.k));
708 }
709 
710 int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_to_wb *dst)
711 {
712         struct btree_write_buffer *wb = &c->btree_write_buffer;
713         unsigned live_accounting_keys = 0;
714         int ret = 0;
715 
716         darray_for_each(wb->accounting, i)
717                 if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&i->k))) {
718                         i->journal_seq = dst->seq;
719                         live_accounting_keys++;
720                         ret = __bch2_journal_key_to_wb(c, dst, i->btree, &i->k);
721                         if (ret)
722                                 break;
723                 }
724 
725         if (live_accounting_keys * 2 < wb->accounting.nr) {
726                 struct btree_write_buffered_key *dst = wb->accounting.data;
727 
728                 darray_for_each(wb->accounting, src)
729                         if (!bch2_accounting_key_is_zero(bkey_i_to_s_c_accounting(&src->k)))
730                                 *dst++ = *src;
731                 wb->accounting.nr = dst - wb->accounting.data;
732                 wb_accounting_sort(wb);
733         }
734 
735         if (!dst->wb->keys.nr)
736                 bch2_journal_pin_drop(&c->journal, &dst->wb->pin);
737 
738         if (bch2_btree_write_buffer_should_flush(c) &&
739             __bch2_write_ref_tryget(c, BCH_WRITE_REF_btree_write_buffer) &&
740             !queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work))
741                 bch2_write_ref_put(c, BCH_WRITE_REF_btree_write_buffer);
742 
743         if (dst->wb == &wb->flushing)
744                 mutex_unlock(&wb->flushing.lock);
745         mutex_unlock(&wb->inc.lock);
746 
747         return ret;
748 }
749 
750 static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
751 {
752         struct journal_keys_to_wb dst;
753         int ret = 0;
754 
755         bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq));
756 
757         for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) {
758                 jset_entry_for_each_key(entry, k) {
759                         ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k);
760                         if (ret)
761                                 goto out;
762                 }
763 
764                 entry->type = BCH_JSET_ENTRY_btree_keys;
765         }
766 
767         spin_lock(&c->journal.lock);
768         buf->need_flush_to_write_buffer = false;
769         spin_unlock(&c->journal.lock);
770 out:
771         ret = bch2_journal_keys_to_write_buffer_end(c, &dst) ?: ret;
772         return ret;
773 }
774 
775 static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size)
776 {
777         if (wb->keys.size >= new_size)
778                 return 0;
779 
780         if (!mutex_trylock(&wb->lock))
781                 return -EINTR;
782 
783         int ret = darray_resize(&wb->keys, new_size);
784         mutex_unlock(&wb->lock);
785         return ret;
786 }
787 
788 int bch2_btree_write_buffer_resize(struct bch_fs *c, size_t new_size)
789 {
790         struct btree_write_buffer *wb = &c->btree_write_buffer;
791 
792         return wb_keys_resize(&wb->flushing, new_size) ?:
793                 wb_keys_resize(&wb->inc, new_size);
794 }
795 
796 void bch2_fs_btree_write_buffer_exit(struct bch_fs *c)
797 {
798         struct btree_write_buffer *wb = &c->btree_write_buffer;
799 
800         BUG_ON((wb->inc.keys.nr || wb->flushing.keys.nr) &&
801                !bch2_journal_error(&c->journal));
802 
803         darray_exit(&wb->accounting);
804         darray_exit(&wb->sorted);
805         darray_exit(&wb->flushing.keys);
806         darray_exit(&wb->inc.keys);
807 }
808 
809 int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
810 {
811         struct btree_write_buffer *wb = &c->btree_write_buffer;
812 
813         mutex_init(&wb->inc.lock);
814         mutex_init(&wb->flushing.lock);
815         INIT_WORK(&wb->flush_work, bch2_btree_write_buffer_flush_work);
816 
817         /* Will be resized by journal as needed: */
818         unsigned initial_size = 1 << 16;
819 
820         return  darray_make_room(&wb->inc.keys, initial_size) ?:
821                 darray_make_room(&wb->flushing.keys, initial_size) ?:
822                 darray_make_room(&wb->sorted, initial_size);
823 }
824 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php