~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/journal.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * bcachefs journalling code, for btree insertions
  4  *
  5  * Copyright 2012 Google, Inc.
  6  */
  7 
  8 #include "bcachefs.h"
  9 #include "alloc_foreground.h"
 10 #include "bkey_methods.h"
 11 #include "btree_gc.h"
 12 #include "btree_update.h"
 13 #include "btree_write_buffer.h"
 14 #include "buckets.h"
 15 #include "error.h"
 16 #include "journal.h"
 17 #include "journal_io.h"
 18 #include "journal_reclaim.h"
 19 #include "journal_sb.h"
 20 #include "journal_seq_blacklist.h"
 21 #include "trace.h"
 22 
 23 static const char * const bch2_journal_errors[] = {
 24 #define x(n)    #n,
 25         JOURNAL_ERRORS()
 26 #undef x
 27         NULL
 28 };
 29 
 30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
 31 {
 32         return seq > j->seq_ondisk;
 33 }
 34 
 35 static bool __journal_entry_is_open(union journal_res_state state)
 36 {
 37         return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
 38 }
 39 
 40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
 41 {
 42         return atomic64_read(&j->seq) - j->seq_ondisk;
 43 }
 44 
 45 static bool journal_entry_is_open(struct journal *j)
 46 {
 47         return __journal_entry_is_open(j->reservations);
 48 }
 49 
 50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
 51 {
 52         union journal_res_state s = READ_ONCE(j->reservations);
 53         unsigned i = seq & JOURNAL_BUF_MASK;
 54         struct journal_buf *buf = j->buf + i;
 55 
 56         prt_printf(out, "seq:\t%llu\n", seq);
 57         printbuf_indent_add(out, 2);
 58 
 59         prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
 60 
 61         prt_printf(out, "size:\t");
 62         prt_human_readable_u64(out, vstruct_bytes(buf->data));
 63         prt_newline(out);
 64 
 65         prt_printf(out, "expires:\t");
 66         prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
 67 
 68         prt_printf(out, "flags:\t");
 69         if (buf->noflush)
 70                 prt_str(out, "noflush ");
 71         if (buf->must_flush)
 72                 prt_str(out, "must_flush ");
 73         if (buf->separate_flush)
 74                 prt_str(out, "separate_flush ");
 75         if (buf->need_flush_to_write_buffer)
 76                 prt_str(out, "need_flush_to_write_buffer ");
 77         if (buf->write_started)
 78                 prt_str(out, "write_started ");
 79         if (buf->write_allocated)
 80                 prt_str(out, "write_allocated ");
 81         if (buf->write_done)
 82                 prt_str(out, "write_done");
 83         prt_newline(out);
 84 
 85         printbuf_indent_sub(out, 2);
 86 }
 87 
 88 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
 89 {
 90         if (!out->nr_tabstops)
 91                 printbuf_tabstop_push(out, 24);
 92 
 93         for (u64 seq = journal_last_unwritten_seq(j);
 94              seq <= journal_cur_seq(j);
 95              seq++)
 96                 bch2_journal_buf_to_text(out, j, seq);
 97         prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
 98 }
 99 
100 static inline struct journal_buf *
101 journal_seq_to_buf(struct journal *j, u64 seq)
102 {
103         struct journal_buf *buf = NULL;
104 
105         EBUG_ON(seq > journal_cur_seq(j));
106 
107         if (journal_seq_unwritten(j, seq)) {
108                 buf = j->buf + (seq & JOURNAL_BUF_MASK);
109                 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
110         }
111         return buf;
112 }
113 
114 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
115 {
116         unsigned i;
117 
118         for (i = 0; i < ARRAY_SIZE(p->list); i++)
119                 INIT_LIST_HEAD(&p->list[i]);
120         INIT_LIST_HEAD(&p->flushed);
121         atomic_set(&p->count, count);
122         p->devs.nr = 0;
123 }
124 
125 /*
126  * Detect stuck journal conditions and trigger shutdown. Technically the journal
127  * can end up stuck for a variety of reasons, such as a blocked I/O, journal
128  * reservation lockup, etc. Since this is a fatal error with potentially
129  * unpredictable characteristics, we want to be fairly conservative before we
130  * decide to shut things down.
131  *
132  * Consider the journal stuck when it appears full with no ability to commit
133  * btree transactions, to discard journal buckets, nor acquire priority
134  * (reserved watermark) reservation.
135  */
136 static inline bool
137 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
138 {
139         struct bch_fs *c = container_of(j, struct bch_fs, journal);
140         bool stuck = false;
141         struct printbuf buf = PRINTBUF;
142 
143         if (!(error == JOURNAL_ERR_journal_full ||
144               error == JOURNAL_ERR_journal_pin_full) ||
145             nr_unwritten_journal_entries(j) ||
146             (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
147                 return stuck;
148 
149         spin_lock(&j->lock);
150 
151         if (j->can_discard) {
152                 spin_unlock(&j->lock);
153                 return stuck;
154         }
155 
156         stuck = true;
157 
158         /*
159          * The journal shutdown path will set ->err_seq, but do it here first to
160          * serialize against concurrent failures and avoid duplicate error
161          * reports.
162          */
163         if (j->err_seq) {
164                 spin_unlock(&j->lock);
165                 return stuck;
166         }
167         j->err_seq = journal_cur_seq(j);
168         spin_unlock(&j->lock);
169 
170         bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
171                 bch2_journal_errors[error]);
172         bch2_journal_debug_to_text(&buf, j);
173         bch_err(c, "%s", buf.buf);
174 
175         printbuf_reset(&buf);
176         bch2_journal_pins_to_text(&buf, j);
177         bch_err(c, "Journal pins:\n%s", buf.buf);
178         printbuf_exit(&buf);
179 
180         bch2_fatal_error(c);
181         dump_stack();
182 
183         return stuck;
184 }
185 
186 void bch2_journal_do_writes(struct journal *j)
187 {
188         for (u64 seq = journal_last_unwritten_seq(j);
189              seq <= journal_cur_seq(j);
190              seq++) {
191                 unsigned idx = seq & JOURNAL_BUF_MASK;
192                 struct journal_buf *w = j->buf + idx;
193 
194                 if (w->write_started && !w->write_allocated)
195                         break;
196                 if (w->write_started)
197                         continue;
198 
199                 if (!journal_state_count(j->reservations, idx)) {
200                         w->write_started = true;
201                         closure_call(&w->io, bch2_journal_write, j->wq, NULL);
202                 }
203 
204                 break;
205         }
206 }
207 
208 /*
209  * Final processing when the last reference of a journal buffer has been
210  * dropped. Drop the pin list reference acquired at journal entry open and write
211  * the buffer, if requested.
212  */
213 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
214 {
215         lockdep_assert_held(&j->lock);
216 
217         if (__bch2_journal_pin_put(j, seq))
218                 bch2_journal_reclaim_fast(j);
219         bch2_journal_do_writes(j);
220 }
221 
222 /*
223  * Returns true if journal entry is now closed:
224  *
225  * We don't close a journal_buf until the next journal_buf is finished writing,
226  * and can be opened again - this also initializes the next journal_buf:
227  */
228 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
229 {
230         struct bch_fs *c = container_of(j, struct bch_fs, journal);
231         struct journal_buf *buf = journal_cur_buf(j);
232         union journal_res_state old, new;
233         unsigned sectors;
234 
235         BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
236                closed_val != JOURNAL_ENTRY_ERROR_VAL);
237 
238         lockdep_assert_held(&j->lock);
239 
240         old.v = atomic64_read(&j->reservations.counter);
241         do {
242                 new.v = old.v;
243                 new.cur_entry_offset = closed_val;
244 
245                 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
246                     old.cur_entry_offset == new.cur_entry_offset)
247                         return;
248         } while (!atomic64_try_cmpxchg(&j->reservations.counter,
249                                        &old.v, new.v));
250 
251         if (!__journal_entry_is_open(old))
252                 return;
253 
254         /* Close out old buffer: */
255         buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
256 
257         if (trace_journal_entry_close_enabled() && trace) {
258                 struct printbuf pbuf = PRINTBUF;
259                 pbuf.atomic++;
260 
261                 prt_str(&pbuf, "entry size: ");
262                 prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
263                 prt_newline(&pbuf);
264                 bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
265                 trace_journal_entry_close(c, pbuf.buf);
266                 printbuf_exit(&pbuf);
267         }
268 
269         sectors = vstruct_blocks_plus(buf->data, c->block_bits,
270                                       buf->u64s_reserved) << c->block_bits;
271         BUG_ON(sectors > buf->sectors);
272         buf->sectors = sectors;
273 
274         /*
275          * We have to set last_seq here, _before_ opening a new journal entry:
276          *
277          * A threads may replace an old pin with a new pin on their current
278          * journal reservation - the expectation being that the journal will
279          * contain either what the old pin protected or what the new pin
280          * protects.
281          *
282          * After the old pin is dropped journal_last_seq() won't include the old
283          * pin, so we can only write the updated last_seq on the entry that
284          * contains whatever the new pin protects.
285          *
286          * Restated, we can _not_ update last_seq for a given entry if there
287          * could be a newer entry open with reservations/pins that have been
288          * taken against it.
289          *
290          * Hence, we want update/set last_seq on the current journal entry right
291          * before we open a new one:
292          */
293         buf->last_seq           = journal_last_seq(j);
294         buf->data->last_seq     = cpu_to_le64(buf->last_seq);
295         BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
296 
297         cancel_delayed_work(&j->write_work);
298 
299         bch2_journal_space_available(j);
300 
301         __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
302 }
303 
304 void bch2_journal_halt(struct journal *j)
305 {
306         spin_lock(&j->lock);
307         __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
308         if (!j->err_seq)
309                 j->err_seq = journal_cur_seq(j);
310         journal_wake(j);
311         spin_unlock(&j->lock);
312 }
313 
314 static bool journal_entry_want_write(struct journal *j)
315 {
316         bool ret = !journal_entry_is_open(j) ||
317                 journal_cur_seq(j) == journal_last_unwritten_seq(j);
318 
319         /* Don't close it yet if we already have a write in flight: */
320         if (ret)
321                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
322         else if (nr_unwritten_journal_entries(j)) {
323                 struct journal_buf *buf = journal_cur_buf(j);
324 
325                 if (!buf->flush_time) {
326                         buf->flush_time = local_clock() ?: 1;
327                         buf->expires = jiffies;
328                 }
329         }
330 
331         return ret;
332 }
333 
334 bool bch2_journal_entry_close(struct journal *j)
335 {
336         bool ret;
337 
338         spin_lock(&j->lock);
339         ret = journal_entry_want_write(j);
340         spin_unlock(&j->lock);
341 
342         return ret;
343 }
344 
345 /*
346  * should _only_ called from journal_res_get() - when we actually want a
347  * journal reservation - journal entry is open means journal is dirty:
348  */
349 static int journal_entry_open(struct journal *j)
350 {
351         struct bch_fs *c = container_of(j, struct bch_fs, journal);
352         struct journal_buf *buf = j->buf +
353                 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
354         union journal_res_state old, new;
355         int u64s;
356 
357         lockdep_assert_held(&j->lock);
358         BUG_ON(journal_entry_is_open(j));
359         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
360 
361         if (j->blocked)
362                 return JOURNAL_ERR_blocked;
363 
364         if (j->cur_entry_error)
365                 return j->cur_entry_error;
366 
367         if (bch2_journal_error(j))
368                 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
369 
370         if (!fifo_free(&j->pin))
371                 return JOURNAL_ERR_journal_pin_full;
372 
373         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
374                 return JOURNAL_ERR_max_in_flight;
375 
376         BUG_ON(!j->cur_entry_sectors);
377 
378         buf->expires            =
379                 (journal_cur_seq(j) == j->flushed_seq_ondisk
380                  ? jiffies
381                  : j->last_flush_write) +
382                 msecs_to_jiffies(c->opts.journal_flush_delay);
383 
384         buf->u64s_reserved      = j->entry_u64s_reserved;
385         buf->disk_sectors       = j->cur_entry_sectors;
386         buf->sectors            = min(buf->disk_sectors, buf->buf_size >> 9);
387 
388         u64s = (int) (buf->sectors << 9) / sizeof(u64) -
389                 journal_entry_overhead(j);
390         u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
391 
392         if (u64s <= (ssize_t) j->early_journal_entries.nr)
393                 return JOURNAL_ERR_journal_full;
394 
395         if (fifo_empty(&j->pin) && j->reclaim_thread)
396                 wake_up_process(j->reclaim_thread);
397 
398         /*
399          * The fifo_push() needs to happen at the same time as j->seq is
400          * incremented for journal_last_seq() to be calculated correctly
401          */
402         atomic64_inc(&j->seq);
403         journal_pin_list_init(fifo_push_ref(&j->pin), 1);
404 
405         BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
406 
407         BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
408 
409         bkey_extent_init(&buf->key);
410         buf->noflush            = false;
411         buf->must_flush         = false;
412         buf->separate_flush     = false;
413         buf->flush_time         = 0;
414         buf->need_flush_to_write_buffer = true;
415         buf->write_started      = false;
416         buf->write_allocated    = false;
417         buf->write_done         = false;
418 
419         memset(buf->data, 0, sizeof(*buf->data));
420         buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
421         buf->data->u64s = 0;
422 
423         if (j->early_journal_entries.nr) {
424                 memcpy(buf->data->_data, j->early_journal_entries.data,
425                        j->early_journal_entries.nr * sizeof(u64));
426                 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
427         }
428 
429         /*
430          * Must be set before marking the journal entry as open:
431          */
432         j->cur_entry_u64s = u64s;
433 
434         old.v = atomic64_read(&j->reservations.counter);
435         do {
436                 new.v = old.v;
437 
438                 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
439 
440                 new.idx++;
441                 BUG_ON(journal_state_count(new, new.idx));
442                 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
443 
444                 journal_state_inc(&new);
445 
446                 /* Handle any already added entries */
447                 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
448         } while (!atomic64_try_cmpxchg(&j->reservations.counter,
449                                        &old.v, new.v));
450 
451         if (nr_unwritten_journal_entries(j) == 1)
452                 mod_delayed_work(j->wq,
453                                  &j->write_work,
454                                  msecs_to_jiffies(c->opts.journal_flush_delay));
455         journal_wake(j);
456 
457         if (j->early_journal_entries.nr)
458                 darray_exit(&j->early_journal_entries);
459         return 0;
460 }
461 
462 static bool journal_quiesced(struct journal *j)
463 {
464         bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
465 
466         if (!ret)
467                 bch2_journal_entry_close(j);
468         return ret;
469 }
470 
471 static void journal_quiesce(struct journal *j)
472 {
473         wait_event(j->wait, journal_quiesced(j));
474 }
475 
476 static void journal_write_work(struct work_struct *work)
477 {
478         struct journal *j = container_of(work, struct journal, write_work.work);
479 
480         spin_lock(&j->lock);
481         if (__journal_entry_is_open(j->reservations)) {
482                 long delta = journal_cur_buf(j)->expires - jiffies;
483 
484                 if (delta > 0)
485                         mod_delayed_work(j->wq, &j->write_work, delta);
486                 else
487                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
488         }
489         spin_unlock(&j->lock);
490 }
491 
492 static int __journal_res_get(struct journal *j, struct journal_res *res,
493                              unsigned flags)
494 {
495         struct bch_fs *c = container_of(j, struct bch_fs, journal);
496         struct journal_buf *buf;
497         bool can_discard;
498         int ret;
499 retry:
500         if (journal_res_get_fast(j, res, flags))
501                 return 0;
502 
503         if (bch2_journal_error(j))
504                 return -BCH_ERR_erofs_journal_err;
505 
506         if (j->blocked)
507                 return -BCH_ERR_journal_res_get_blocked;
508 
509         if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
510                 ret = JOURNAL_ERR_journal_full;
511                 can_discard = j->can_discard;
512                 goto out;
513         }
514 
515         if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
516                 ret = JOURNAL_ERR_max_in_flight;
517                 goto out;
518         }
519 
520         spin_lock(&j->lock);
521 
522         /*
523          * Recheck after taking the lock, so we don't race with another thread
524          * that just did journal_entry_open() and call bch2_journal_entry_close()
525          * unnecessarily
526          */
527         if (journal_res_get_fast(j, res, flags)) {
528                 ret = 0;
529                 goto unlock;
530         }
531 
532         /*
533          * If we couldn't get a reservation because the current buf filled up,
534          * and we had room for a bigger entry on disk, signal that we want to
535          * realloc the journal bufs:
536          */
537         buf = journal_cur_buf(j);
538         if (journal_entry_is_open(j) &&
539             buf->buf_size >> 9 < buf->disk_sectors &&
540             buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
541                 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
542 
543         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
544         ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
545 unlock:
546         can_discard = j->can_discard;
547         spin_unlock(&j->lock);
548 out:
549         if (ret == JOURNAL_ERR_retry)
550                 goto retry;
551         if (!ret)
552                 return 0;
553 
554         if (journal_error_check_stuck(j, ret, flags))
555                 ret = -BCH_ERR_journal_res_get_blocked;
556 
557         if (ret == JOURNAL_ERR_max_in_flight &&
558             track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
559 
560                 struct printbuf buf = PRINTBUF;
561                 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
562                 bch2_journal_bufs_to_text(&buf, j);
563                 trace_journal_entry_full(c, buf.buf);
564                 printbuf_exit(&buf);
565                 count_event(c, journal_entry_full);
566         }
567 
568         /*
569          * Journal is full - can't rely on reclaim from work item due to
570          * freezing:
571          */
572         if ((ret == JOURNAL_ERR_journal_full ||
573              ret == JOURNAL_ERR_journal_pin_full) &&
574             !(flags & JOURNAL_RES_GET_NONBLOCK)) {
575                 if (can_discard) {
576                         bch2_journal_do_discards(j);
577                         goto retry;
578                 }
579 
580                 if (mutex_trylock(&j->reclaim_lock)) {
581                         bch2_journal_reclaim(j);
582                         mutex_unlock(&j->reclaim_lock);
583                 }
584         }
585 
586         return ret == JOURNAL_ERR_insufficient_devices
587                 ? -BCH_ERR_erofs_journal_err
588                 : -BCH_ERR_journal_res_get_blocked;
589 }
590 
591 /*
592  * Essentially the entry function to the journaling code. When bcachefs is doing
593  * a btree insert, it calls this function to get the current journal write.
594  * Journal write is the structure used set up journal writes. The calling
595  * function will then add its keys to the structure, queuing them for the next
596  * write.
597  *
598  * To ensure forward progress, the current task must not be holding any
599  * btree node write locks.
600  */
601 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
602                                   unsigned flags)
603 {
604         int ret;
605 
606         closure_wait_event(&j->async_wait,
607                    (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
608                    (flags & JOURNAL_RES_GET_NONBLOCK));
609         return ret;
610 }
611 
612 /* journal_entry_res: */
613 
614 void bch2_journal_entry_res_resize(struct journal *j,
615                                    struct journal_entry_res *res,
616                                    unsigned new_u64s)
617 {
618         union journal_res_state state;
619         int d = new_u64s - res->u64s;
620 
621         spin_lock(&j->lock);
622 
623         j->entry_u64s_reserved += d;
624         if (d <= 0)
625                 goto out;
626 
627         j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
628         smp_mb();
629         state = READ_ONCE(j->reservations);
630 
631         if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
632             state.cur_entry_offset > j->cur_entry_u64s) {
633                 j->cur_entry_u64s += d;
634                 /*
635                  * Not enough room in current journal entry, have to flush it:
636                  */
637                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
638         } else {
639                 journal_cur_buf(j)->u64s_reserved += d;
640         }
641 out:
642         spin_unlock(&j->lock);
643         res->u64s += d;
644 }
645 
646 /* journal flushing: */
647 
648 /**
649  * bch2_journal_flush_seq_async - wait for a journal entry to be written
650  * @j:          journal object
651  * @seq:        seq to flush
652  * @parent:     closure object to wait with
653  * Returns:     1 if @seq has already been flushed, 0 if @seq is being flushed,
654  *              -EIO if @seq will never be flushed
655  *
656  * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
657  * necessary
658  */
659 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
660                                  struct closure *parent)
661 {
662         struct journal_buf *buf;
663         int ret = 0;
664 
665         if (seq <= j->flushed_seq_ondisk)
666                 return 1;
667 
668         spin_lock(&j->lock);
669 
670         if (WARN_ONCE(seq > journal_cur_seq(j),
671                       "requested to flush journal seq %llu, but currently at %llu",
672                       seq, journal_cur_seq(j)))
673                 goto out;
674 
675         /* Recheck under lock: */
676         if (j->err_seq && seq >= j->err_seq) {
677                 ret = -EIO;
678                 goto out;
679         }
680 
681         if (seq <= j->flushed_seq_ondisk) {
682                 ret = 1;
683                 goto out;
684         }
685 
686         /* if seq was written, but not flushed - flush a newer one instead */
687         seq = max(seq, journal_last_unwritten_seq(j));
688 
689 recheck_need_open:
690         if (seq > journal_cur_seq(j)) {
691                 struct journal_res res = { 0 };
692 
693                 if (journal_entry_is_open(j))
694                         __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
695 
696                 spin_unlock(&j->lock);
697 
698                 /*
699                  * We're called from bch2_journal_flush_seq() -> wait_event();
700                  * but this might block. We won't usually block, so we won't
701                  * livelock:
702                  */
703                 sched_annotate_sleep();
704                 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
705                 if (ret)
706                         return ret;
707 
708                 seq = res.seq;
709                 buf = journal_seq_to_buf(j, seq);
710                 buf->must_flush = true;
711 
712                 if (!buf->flush_time) {
713                         buf->flush_time = local_clock() ?: 1;
714                         buf->expires = jiffies;
715                 }
716 
717                 if (parent && !closure_wait(&buf->wait, parent))
718                         BUG();
719 
720                 bch2_journal_res_put(j, &res);
721 
722                 spin_lock(&j->lock);
723                 goto want_write;
724         }
725 
726         /*
727          * if write was kicked off without a flush, or if we promised it
728          * wouldn't be a flush, flush the next sequence number instead
729          */
730         buf = journal_seq_to_buf(j, seq);
731         if (buf->noflush) {
732                 seq++;
733                 goto recheck_need_open;
734         }
735 
736         buf->must_flush = true;
737 
738         if (parent && !closure_wait(&buf->wait, parent))
739                 BUG();
740 want_write:
741         if (seq == journal_cur_seq(j))
742                 journal_entry_want_write(j);
743 out:
744         spin_unlock(&j->lock);
745         return ret;
746 }
747 
748 int bch2_journal_flush_seq(struct journal *j, u64 seq)
749 {
750         u64 start_time = local_clock();
751         int ret, ret2;
752 
753         /*
754          * Don't update time_stats when @seq is already flushed:
755          */
756         if (seq <= j->flushed_seq_ondisk)
757                 return 0;
758 
759         ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
760 
761         if (!ret)
762                 bch2_time_stats_update(j->flush_seq_time, start_time);
763 
764         return ret ?: ret2 < 0 ? ret2 : 0;
765 }
766 
767 /*
768  * bch2_journal_flush_async - if there is an open journal entry, or a journal
769  * still being written, write it and wait for the write to complete
770  */
771 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
772 {
773         bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
774 }
775 
776 int bch2_journal_flush(struct journal *j)
777 {
778         return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
779 }
780 
781 /*
782  * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
783  * @seq
784  */
785 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
786 {
787         struct bch_fs *c = container_of(j, struct bch_fs, journal);
788         u64 unwritten_seq;
789         bool ret = false;
790 
791         if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
792                 return false;
793 
794         if (seq <= c->journal.flushed_seq_ondisk)
795                 return false;
796 
797         spin_lock(&j->lock);
798         if (seq <= c->journal.flushed_seq_ondisk)
799                 goto out;
800 
801         for (unwritten_seq = journal_last_unwritten_seq(j);
802              unwritten_seq < seq;
803              unwritten_seq++) {
804                 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
805 
806                 /* journal flush already in flight, or flush requseted */
807                 if (buf->must_flush)
808                         goto out;
809 
810                 buf->noflush = true;
811         }
812 
813         ret = true;
814 out:
815         spin_unlock(&j->lock);
816         return ret;
817 }
818 
819 int bch2_journal_meta(struct journal *j)
820 {
821         struct journal_buf *buf;
822         struct journal_res res;
823         int ret;
824 
825         memset(&res, 0, sizeof(res));
826 
827         ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
828         if (ret)
829                 return ret;
830 
831         buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
832         buf->must_flush = true;
833 
834         if (!buf->flush_time) {
835                 buf->flush_time = local_clock() ?: 1;
836                 buf->expires = jiffies;
837         }
838 
839         bch2_journal_res_put(j, &res);
840 
841         return bch2_journal_flush_seq(j, res.seq);
842 }
843 
844 /* block/unlock the journal: */
845 
846 void bch2_journal_unblock(struct journal *j)
847 {
848         spin_lock(&j->lock);
849         j->blocked--;
850         spin_unlock(&j->lock);
851 
852         journal_wake(j);
853 }
854 
855 void bch2_journal_block(struct journal *j)
856 {
857         spin_lock(&j->lock);
858         j->blocked++;
859         spin_unlock(&j->lock);
860 
861         journal_quiesce(j);
862 }
863 
864 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
865 {
866         struct journal_buf *ret = NULL;
867 
868         /* We're inside wait_event(), but using mutex_lock(: */
869         sched_annotate_sleep();
870         mutex_lock(&j->buf_lock);
871         spin_lock(&j->lock);
872         max_seq = min(max_seq, journal_cur_seq(j));
873 
874         for (u64 seq = journal_last_unwritten_seq(j);
875              seq <= max_seq;
876              seq++) {
877                 unsigned idx = seq & JOURNAL_BUF_MASK;
878                 struct journal_buf *buf = j->buf + idx;
879 
880                 if (buf->need_flush_to_write_buffer) {
881                         if (seq == journal_cur_seq(j))
882                                 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
883 
884                         union journal_res_state s;
885                         s.v = atomic64_read_acquire(&j->reservations.counter);
886 
887                         ret = journal_state_count(s, idx)
888                                 ? ERR_PTR(-EAGAIN)
889                                 : buf;
890                         break;
891                 }
892         }
893 
894         spin_unlock(&j->lock);
895         if (IS_ERR_OR_NULL(ret))
896                 mutex_unlock(&j->buf_lock);
897         return ret;
898 }
899 
900 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
901 {
902         struct journal_buf *ret;
903 
904         wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
905         return ret;
906 }
907 
908 /* allocate journal on a device: */
909 
910 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
911                                          bool new_fs, struct closure *cl)
912 {
913         struct bch_fs *c = ca->fs;
914         struct journal_device *ja = &ca->journal;
915         u64 *new_bucket_seq = NULL, *new_buckets = NULL;
916         struct open_bucket **ob = NULL;
917         long *bu = NULL;
918         unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
919         int ret = 0;
920 
921         BUG_ON(nr <= ja->nr);
922 
923         bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
924         ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
925         new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
926         new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
927         if (!bu || !ob || !new_buckets || !new_bucket_seq) {
928                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
929                 goto err_free;
930         }
931 
932         for (nr_got = 0; nr_got < nr_want; nr_got++) {
933                 if (new_fs) {
934                         bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
935                         if (bu[nr_got] < 0) {
936                                 ret = -BCH_ERR_ENOSPC_bucket_alloc;
937                                 break;
938                         }
939                 } else {
940                         ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal,
941                                                        BCH_DATA_journal, cl);
942                         ret = PTR_ERR_OR_ZERO(ob[nr_got]);
943                         if (ret)
944                                 break;
945 
946                         ret = bch2_trans_run(c,
947                                 bch2_trans_mark_metadata_bucket(trans, ca,
948                                                 ob[nr_got]->bucket, BCH_DATA_journal,
949                                                 ca->mi.bucket_size, BTREE_TRIGGER_transactional));
950                         if (ret) {
951                                 bch2_open_bucket_put(c, ob[nr_got]);
952                                 bch_err_msg(c, ret, "marking new journal buckets");
953                                 break;
954                         }
955 
956                         bu[nr_got] = ob[nr_got]->bucket;
957                 }
958         }
959 
960         if (!nr_got)
961                 goto err_free;
962 
963         /* Don't return an error if we successfully allocated some buckets: */
964         ret = 0;
965 
966         if (c) {
967                 bch2_journal_flush_all_pins(&c->journal);
968                 bch2_journal_block(&c->journal);
969                 mutex_lock(&c->sb_lock);
970         }
971 
972         memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
973         memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
974 
975         BUG_ON(ja->discard_idx > ja->nr);
976 
977         pos = ja->discard_idx ?: ja->nr;
978 
979         memmove(new_buckets + pos + nr_got,
980                 new_buckets + pos,
981                 sizeof(new_buckets[0]) * (ja->nr - pos));
982         memmove(new_bucket_seq + pos + nr_got,
983                 new_bucket_seq + pos,
984                 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
985 
986         for (i = 0; i < nr_got; i++) {
987                 new_buckets[pos + i] = bu[i];
988                 new_bucket_seq[pos + i] = 0;
989         }
990 
991         nr = ja->nr + nr_got;
992 
993         ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
994         if (ret)
995                 goto err_unblock;
996 
997         if (!new_fs)
998                 bch2_write_super(c);
999 
1000         /* Commit: */
1001         if (c)
1002                 spin_lock(&c->journal.lock);
1003 
1004         swap(new_buckets,       ja->buckets);
1005         swap(new_bucket_seq,    ja->bucket_seq);
1006         ja->nr = nr;
1007 
1008         if (pos <= ja->discard_idx)
1009                 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1010         if (pos <= ja->dirty_idx_ondisk)
1011                 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1012         if (pos <= ja->dirty_idx)
1013                 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1014         if (pos <= ja->cur_idx)
1015                 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1016 
1017         if (c)
1018                 spin_unlock(&c->journal.lock);
1019 err_unblock:
1020         if (c) {
1021                 bch2_journal_unblock(&c->journal);
1022                 mutex_unlock(&c->sb_lock);
1023         }
1024 
1025         if (ret && !new_fs)
1026                 for (i = 0; i < nr_got; i++)
1027                         bch2_trans_run(c,
1028                                 bch2_trans_mark_metadata_bucket(trans, ca,
1029                                                 bu[i], BCH_DATA_free, 0,
1030                                                 BTREE_TRIGGER_transactional));
1031 err_free:
1032         if (!new_fs)
1033                 for (i = 0; i < nr_got; i++)
1034                         bch2_open_bucket_put(c, ob[i]);
1035 
1036         kfree(new_bucket_seq);
1037         kfree(new_buckets);
1038         kfree(ob);
1039         kfree(bu);
1040         return ret;
1041 }
1042 
1043 /*
1044  * Allocate more journal space at runtime - not currently making use if it, but
1045  * the code works:
1046  */
1047 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1048                                 unsigned nr)
1049 {
1050         struct journal_device *ja = &ca->journal;
1051         struct closure cl;
1052         int ret = 0;
1053 
1054         closure_init_stack(&cl);
1055 
1056         down_write(&c->state_lock);
1057 
1058         /* don't handle reducing nr of buckets yet: */
1059         if (nr < ja->nr)
1060                 goto unlock;
1061 
1062         while (ja->nr < nr) {
1063                 struct disk_reservation disk_res = { 0, 0, 0 };
1064 
1065                 /*
1066                  * note: journal buckets aren't really counted as _sectors_ used yet, so
1067                  * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1068                  * when space used goes up without a reservation - but we do need the
1069                  * reservation to ensure we'll actually be able to allocate:
1070                  *
1071                  * XXX: that's not right, disk reservations only ensure a
1072                  * filesystem-wide allocation will succeed, this is a device
1073                  * specific allocation - we can hang here:
1074                  */
1075 
1076                 ret = bch2_disk_reservation_get(c, &disk_res,
1077                                                 bucket_to_sector(ca, nr - ja->nr), 1, 0);
1078                 if (ret)
1079                         break;
1080 
1081                 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1082 
1083                 bch2_disk_reservation_put(c, &disk_res);
1084 
1085                 closure_sync(&cl);
1086 
1087                 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1088                         break;
1089         }
1090 
1091         bch_err_fn(c, ret);
1092 unlock:
1093         up_write(&c->state_lock);
1094         return ret;
1095 }
1096 
1097 int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
1098 {
1099         unsigned nr;
1100         int ret;
1101 
1102         if (dynamic_fault("bcachefs:add:journal_alloc")) {
1103                 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1104                 goto err;
1105         }
1106 
1107         /* 1/128th of the device by default: */
1108         nr = ca->mi.nbuckets >> 7;
1109 
1110         /*
1111          * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1112          * is smaller:
1113          */
1114         nr = clamp_t(unsigned, nr,
1115                      BCH_JOURNAL_BUCKETS_MIN,
1116                      min(1 << 13,
1117                          (1 << 24) / ca->mi.bucket_size));
1118 
1119         ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL);
1120 err:
1121         bch_err_fn(ca, ret);
1122         return ret;
1123 }
1124 
1125 int bch2_fs_journal_alloc(struct bch_fs *c)
1126 {
1127         for_each_online_member(c, ca) {
1128                 if (ca->journal.nr)
1129                         continue;
1130 
1131                 int ret = bch2_dev_journal_alloc(ca, true);
1132                 if (ret) {
1133                         percpu_ref_put(&ca->io_ref);
1134                         return ret;
1135                 }
1136         }
1137 
1138         return 0;
1139 }
1140 
1141 /* startup/shutdown: */
1142 
1143 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1144 {
1145         bool ret = false;
1146         u64 seq;
1147 
1148         spin_lock(&j->lock);
1149         for (seq = journal_last_unwritten_seq(j);
1150              seq <= journal_cur_seq(j) && !ret;
1151              seq++) {
1152                 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1153 
1154                 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1155                         ret = true;
1156         }
1157         spin_unlock(&j->lock);
1158 
1159         return ret;
1160 }
1161 
1162 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1163 {
1164         wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1165 }
1166 
1167 void bch2_fs_journal_stop(struct journal *j)
1168 {
1169         if (!test_bit(JOURNAL_running, &j->flags))
1170                 return;
1171 
1172         bch2_journal_reclaim_stop(j);
1173         bch2_journal_flush_all_pins(j);
1174 
1175         wait_event(j->wait, bch2_journal_entry_close(j));
1176 
1177         /*
1178          * Always write a new journal entry, to make sure the clock hands are up
1179          * to date (and match the superblock)
1180          */
1181         bch2_journal_meta(j);
1182 
1183         journal_quiesce(j);
1184         cancel_delayed_work_sync(&j->write_work);
1185 
1186         WARN(!bch2_journal_error(j) &&
1187              test_bit(JOURNAL_replay_done, &j->flags) &&
1188              j->last_empty_seq != journal_cur_seq(j),
1189              "journal shutdown error: cur seq %llu but last empty seq %llu",
1190              journal_cur_seq(j), j->last_empty_seq);
1191 
1192         if (!bch2_journal_error(j))
1193                 clear_bit(JOURNAL_running, &j->flags);
1194 }
1195 
1196 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1197 {
1198         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1199         struct journal_entry_pin_list *p;
1200         struct journal_replay *i, **_i;
1201         struct genradix_iter iter;
1202         bool had_entries = false;
1203         u64 last_seq = cur_seq, nr, seq;
1204 
1205         genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1206                 i = *_i;
1207 
1208                 if (journal_replay_ignore(i))
1209                         continue;
1210 
1211                 last_seq = le64_to_cpu(i->j.last_seq);
1212                 break;
1213         }
1214 
1215         nr = cur_seq - last_seq;
1216 
1217         if (nr + 1 > j->pin.size) {
1218                 free_fifo(&j->pin);
1219                 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1220                 if (!j->pin.data) {
1221                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1222                         return -BCH_ERR_ENOMEM_journal_pin_fifo;
1223                 }
1224         }
1225 
1226         j->replay_journal_seq   = last_seq;
1227         j->replay_journal_seq_end = cur_seq;
1228         j->last_seq_ondisk      = last_seq;
1229         j->flushed_seq_ondisk   = cur_seq - 1;
1230         j->seq_ondisk           = cur_seq - 1;
1231         j->pin.front            = last_seq;
1232         j->pin.back             = cur_seq;
1233         atomic64_set(&j->seq, cur_seq - 1);
1234 
1235         fifo_for_each_entry_ptr(p, &j->pin, seq)
1236                 journal_pin_list_init(p, 1);
1237 
1238         genradix_for_each(&c->journal_entries, iter, _i) {
1239                 i = *_i;
1240 
1241                 if (journal_replay_ignore(i))
1242                         continue;
1243 
1244                 seq = le64_to_cpu(i->j.seq);
1245                 BUG_ON(seq >= cur_seq);
1246 
1247                 if (seq < last_seq)
1248                         continue;
1249 
1250                 if (journal_entry_empty(&i->j))
1251                         j->last_empty_seq = le64_to_cpu(i->j.seq);
1252 
1253                 p = journal_seq_pin(j, seq);
1254 
1255                 p->devs.nr = 0;
1256                 darray_for_each(i->ptrs, ptr)
1257                         bch2_dev_list_add_dev(&p->devs, ptr->dev);
1258 
1259                 had_entries = true;
1260         }
1261 
1262         if (!had_entries)
1263                 j->last_empty_seq = cur_seq - 1; /* to match j->seq */
1264 
1265         spin_lock(&j->lock);
1266 
1267         set_bit(JOURNAL_running, &j->flags);
1268         j->last_flush_write = jiffies;
1269 
1270         j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1271         j->reservations.unwritten_idx++;
1272 
1273         c->last_bucket_seq_cleanup = journal_cur_seq(j);
1274 
1275         bch2_journal_space_available(j);
1276         spin_unlock(&j->lock);
1277 
1278         return bch2_journal_reclaim_start(j);
1279 }
1280 
1281 /* init/exit: */
1282 
1283 void bch2_dev_journal_exit(struct bch_dev *ca)
1284 {
1285         struct journal_device *ja = &ca->journal;
1286 
1287         for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1288                 kfree(ja->bio[i]);
1289                 ja->bio[i] = NULL;
1290         }
1291 
1292         kfree(ja->buckets);
1293         kfree(ja->bucket_seq);
1294         ja->buckets     = NULL;
1295         ja->bucket_seq  = NULL;
1296 }
1297 
1298 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1299 {
1300         struct journal_device *ja = &ca->journal;
1301         struct bch_sb_field_journal *journal_buckets =
1302                 bch2_sb_field_get(sb, journal);
1303         struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1304                 bch2_sb_field_get(sb, journal_v2);
1305 
1306         ja->nr = 0;
1307 
1308         if (journal_buckets_v2) {
1309                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1310 
1311                 for (unsigned i = 0; i < nr; i++)
1312                         ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1313         } else if (journal_buckets) {
1314                 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1315         }
1316 
1317         ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1318         if (!ja->bucket_seq)
1319                 return -BCH_ERR_ENOMEM_dev_journal_init;
1320 
1321         unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1322 
1323         for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1324                 ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1325                                      nr_bvecs), GFP_KERNEL);
1326                 if (!ja->bio[i])
1327                         return -BCH_ERR_ENOMEM_dev_journal_init;
1328 
1329                 ja->bio[i]->ca = ca;
1330                 ja->bio[i]->buf_idx = i;
1331                 bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1332         }
1333 
1334         ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1335         if (!ja->buckets)
1336                 return -BCH_ERR_ENOMEM_dev_journal_init;
1337 
1338         if (journal_buckets_v2) {
1339                 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1340                 unsigned dst = 0;
1341 
1342                 for (unsigned i = 0; i < nr; i++)
1343                         for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1344                                 ja->buckets[dst++] =
1345                                         le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1346         } else if (journal_buckets) {
1347                 for (unsigned i = 0; i < ja->nr; i++)
1348                         ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1349         }
1350 
1351         return 0;
1352 }
1353 
1354 void bch2_fs_journal_exit(struct journal *j)
1355 {
1356         if (j->wq)
1357                 destroy_workqueue(j->wq);
1358 
1359         darray_exit(&j->early_journal_entries);
1360 
1361         for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1362                 kvfree(j->buf[i].data);
1363         free_fifo(&j->pin);
1364 }
1365 
1366 int bch2_fs_journal_init(struct journal *j)
1367 {
1368         static struct lock_class_key res_key;
1369 
1370         mutex_init(&j->buf_lock);
1371         spin_lock_init(&j->lock);
1372         spin_lock_init(&j->err_lock);
1373         init_waitqueue_head(&j->wait);
1374         INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1375         init_waitqueue_head(&j->reclaim_wait);
1376         init_waitqueue_head(&j->pin_flush_wait);
1377         mutex_init(&j->reclaim_lock);
1378         mutex_init(&j->discard_lock);
1379 
1380         lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1381 
1382         atomic64_set(&j->reservations.counter,
1383                 ((union journal_res_state)
1384                  { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1385 
1386         if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1387                 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1388 
1389         for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1390                 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1391                 j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1392                 if (!j->buf[i].data)
1393                         return -BCH_ERR_ENOMEM_journal_buf;
1394                 j->buf[i].idx = i;
1395         }
1396 
1397         j->pin.front = j->pin.back = 1;
1398 
1399         j->wq = alloc_workqueue("bcachefs_journal",
1400                                 WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1401         if (!j->wq)
1402                 return -BCH_ERR_ENOMEM_fs_other_alloc;
1403         return 0;
1404 }
1405 
1406 /* debug: */
1407 
1408 static const char * const bch2_journal_flags_strs[] = {
1409 #define x(n)    #n,
1410         JOURNAL_FLAGS()
1411 #undef x
1412         NULL
1413 };
1414 
1415 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1416 {
1417         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1418         union journal_res_state s;
1419         unsigned long now = jiffies;
1420         u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1421 
1422         printbuf_tabstops_reset(out);
1423         printbuf_tabstop_push(out, 28);
1424         out->atomic++;
1425 
1426         rcu_read_lock();
1427         s = READ_ONCE(j->reservations);
1428 
1429         prt_printf(out, "flags:\t");
1430         prt_bitflags(out, bch2_journal_flags_strs, j->flags);
1431         prt_newline(out);
1432         prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
1433         prt_printf(out, "seq:\t%llu\n",                         journal_cur_seq(j));
1434         prt_printf(out, "seq_ondisk:\t%llu\n",                  j->seq_ondisk);
1435         prt_printf(out, "last_seq:\t%llu\n",                    journal_last_seq(j));
1436         prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
1437         prt_printf(out, "flushed_seq_ondisk:\t%llu\n",          j->flushed_seq_ondisk);
1438         prt_printf(out, "watermark:\t%s\n",                     bch2_watermarks[j->watermark]);
1439         prt_printf(out, "each entry reserved:\t%u\n",           j->entry_u64s_reserved);
1440         prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
1441         prt_printf(out, "nr noflush writes:\t%llu\n",           j->nr_noflush_writes);
1442         prt_printf(out, "average write size:\t");
1443         prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1444         prt_newline(out);
1445         prt_printf(out, "nr direct reclaim:\t%llu\n",           j->nr_direct_reclaim);
1446         prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
1447         prt_printf(out, "reclaim kicked:\t%u\n",                j->reclaim_kicked);
1448         prt_printf(out, "reclaim runs in:\t%u ms\n",            time_after(j->next_reclaim, now)
1449                ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1450         prt_printf(out, "blocked:\t%u\n",                       j->blocked);
1451         prt_printf(out, "current entry sectors:\t%u\n",         j->cur_entry_sectors);
1452         prt_printf(out, "current entry error:\t%s\n",           bch2_journal_errors[j->cur_entry_error]);
1453         prt_printf(out, "current entry:\t");
1454 
1455         switch (s.cur_entry_offset) {
1456         case JOURNAL_ENTRY_ERROR_VAL:
1457                 prt_printf(out, "error\n");
1458                 break;
1459         case JOURNAL_ENTRY_CLOSED_VAL:
1460                 prt_printf(out, "closed\n");
1461                 break;
1462         default:
1463                 prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1464                 break;
1465         }
1466 
1467         prt_printf(out, "unwritten entries:\n");
1468         bch2_journal_bufs_to_text(out, j);
1469 
1470         prt_printf(out, "space:\n");
1471         printbuf_indent_add(out, 2);
1472         prt_printf(out, "discarded\t%u:%u\n",
1473                j->space[journal_space_discarded].next_entry,
1474                j->space[journal_space_discarded].total);
1475         prt_printf(out, "clean ondisk\t%u:%u\n",
1476                j->space[journal_space_clean_ondisk].next_entry,
1477                j->space[journal_space_clean_ondisk].total);
1478         prt_printf(out, "clean\t%u:%u\n",
1479                j->space[journal_space_clean].next_entry,
1480                j->space[journal_space_clean].total);
1481         prt_printf(out, "total\t%u:%u\n",
1482                j->space[journal_space_total].next_entry,
1483                j->space[journal_space_total].total);
1484         printbuf_indent_sub(out, 2);
1485 
1486         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1487                 struct journal_device *ja = &ca->journal;
1488 
1489                 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1490                         continue;
1491 
1492                 if (!ja->nr)
1493                         continue;
1494 
1495                 prt_printf(out, "dev %u:\n",                    ca->dev_idx);
1496                 printbuf_indent_add(out, 2);
1497                 prt_printf(out, "nr\t%u\n",                     ja->nr);
1498                 prt_printf(out, "bucket size\t%u\n",            ca->mi.bucket_size);
1499                 prt_printf(out, "available\t%u:%u\n",           bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1500                 prt_printf(out, "discard_idx\t%u\n",            ja->discard_idx);
1501                 prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk,   ja->bucket_seq[ja->dirty_idx_ondisk]);
1502                 prt_printf(out, "dirty_idx\t%u (seq %llu)\n",   ja->dirty_idx,          ja->bucket_seq[ja->dirty_idx]);
1503                 prt_printf(out, "cur_idx\t%u (seq %llu)\n",     ja->cur_idx,            ja->bucket_seq[ja->cur_idx]);
1504                 printbuf_indent_sub(out, 2);
1505         }
1506 
1507         rcu_read_unlock();
1508 
1509         --out->atomic;
1510 }
1511 
1512 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1513 {
1514         spin_lock(&j->lock);
1515         __bch2_journal_debug_to_text(out, j);
1516         spin_unlock(&j->lock);
1517 }
1518 
1519 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1520 {
1521         struct journal_entry_pin_list *pin_list;
1522         struct journal_entry_pin *pin;
1523 
1524         spin_lock(&j->lock);
1525         if (!test_bit(JOURNAL_running, &j->flags)) {
1526                 spin_unlock(&j->lock);
1527                 return true;
1528         }
1529 
1530         *seq = max(*seq, j->pin.front);
1531 
1532         if (*seq >= j->pin.back) {
1533                 spin_unlock(&j->lock);
1534                 return true;
1535         }
1536 
1537         out->atomic++;
1538 
1539         pin_list = journal_seq_pin(j, *seq);
1540 
1541         prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
1542         printbuf_indent_add(out, 2);
1543 
1544         for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1545                 list_for_each_entry(pin, &pin_list->list[i], list)
1546                         prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1547 
1548         if (!list_empty(&pin_list->flushed))
1549                 prt_printf(out, "flushed:\n");
1550 
1551         list_for_each_entry(pin, &pin_list->flushed, list)
1552                 prt_printf(out, "\t%px %ps\n", pin, pin->flush);
1553 
1554         printbuf_indent_sub(out, 2);
1555 
1556         --out->atomic;
1557         spin_unlock(&j->lock);
1558 
1559         return false;
1560 }
1561 
1562 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1563 {
1564         u64 seq = 0;
1565 
1566         while (!bch2_journal_seq_pins_to_text(out, j, &seq))
1567                 seq++;
1568 }
1569 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php