~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/journal_reclaim.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #include "bcachefs.h"
  4 #include "btree_key_cache.h"
  5 #include "btree_update.h"
  6 #include "btree_write_buffer.h"
  7 #include "buckets.h"
  8 #include "errcode.h"
  9 #include "error.h"
 10 #include "journal.h"
 11 #include "journal_io.h"
 12 #include "journal_reclaim.h"
 13 #include "replicas.h"
 14 #include "sb-members.h"
 15 #include "trace.h"
 16 
 17 #include <linux/kthread.h>
 18 #include <linux/sched/mm.h>
 19 
 20 /* Free space calculations: */
 21 
 22 static unsigned journal_space_from(struct journal_device *ja,
 23                                    enum journal_space_from from)
 24 {
 25         switch (from) {
 26         case journal_space_discarded:
 27                 return ja->discard_idx;
 28         case journal_space_clean_ondisk:
 29                 return ja->dirty_idx_ondisk;
 30         case journal_space_clean:
 31                 return ja->dirty_idx;
 32         default:
 33                 BUG();
 34         }
 35 }
 36 
 37 unsigned bch2_journal_dev_buckets_available(struct journal *j,
 38                                             struct journal_device *ja,
 39                                             enum journal_space_from from)
 40 {
 41         unsigned available = (journal_space_from(ja, from) -
 42                               ja->cur_idx - 1 + ja->nr) % ja->nr;
 43 
 44         /*
 45          * Don't use the last bucket unless writing the new last_seq
 46          * will make another bucket available:
 47          */
 48         if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
 49                 --available;
 50 
 51         return available;
 52 }
 53 
 54 void bch2_journal_set_watermark(struct journal *j)
 55 {
 56         struct bch_fs *c = container_of(j, struct bch_fs, journal);
 57         bool low_on_space = j->space[journal_space_clean].total * 4 <=
 58                 j->space[journal_space_total].total;
 59         bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
 60         bool low_on_wb = bch2_btree_write_buffer_must_wait(c);
 61         unsigned watermark = low_on_space || low_on_pin || low_on_wb
 62                 ? BCH_WATERMARK_reclaim
 63                 : BCH_WATERMARK_stripe;
 64 
 65         if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) ||
 66             track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) ||
 67             track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
 68                 trace_and_count(c, journal_full, c);
 69 
 70         mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
 71 
 72         swap(watermark, j->watermark);
 73         if (watermark > j->watermark)
 74                 journal_wake(j);
 75 }
 76 
 77 static struct journal_space
 78 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
 79                             enum journal_space_from from)
 80 {
 81         struct journal_device *ja = &ca->journal;
 82         unsigned sectors, buckets, unwritten;
 83         u64 seq;
 84 
 85         if (from == journal_space_total)
 86                 return (struct journal_space) {
 87                         .next_entry     = ca->mi.bucket_size,
 88                         .total          = ca->mi.bucket_size * ja->nr,
 89                 };
 90 
 91         buckets = bch2_journal_dev_buckets_available(j, ja, from);
 92         sectors = ja->sectors_free;
 93 
 94         /*
 95          * We that we don't allocate the space for a journal entry
 96          * until we write it out - thus, account for it here:
 97          */
 98         for (seq = journal_last_unwritten_seq(j);
 99              seq <= journal_cur_seq(j);
100              seq++) {
101                 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
102 
103                 if (!unwritten)
104                         continue;
105 
106                 /* entry won't fit on this device, skip: */
107                 if (unwritten > ca->mi.bucket_size)
108                         continue;
109 
110                 if (unwritten >= sectors) {
111                         if (!buckets) {
112                                 sectors = 0;
113                                 break;
114                         }
115 
116                         buckets--;
117                         sectors = ca->mi.bucket_size;
118                 }
119 
120                 sectors -= unwritten;
121         }
122 
123         if (sectors < ca->mi.bucket_size && buckets) {
124                 buckets--;
125                 sectors = ca->mi.bucket_size;
126         }
127 
128         return (struct journal_space) {
129                 .next_entry     = sectors,
130                 .total          = sectors + buckets * ca->mi.bucket_size,
131         };
132 }
133 
134 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
135                             enum journal_space_from from)
136 {
137         struct bch_fs *c = container_of(j, struct bch_fs, journal);
138         unsigned pos, nr_devs = 0;
139         struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
140 
141         BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
142 
143         rcu_read_lock();
144         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
145                 if (!ca->journal.nr)
146                         continue;
147 
148                 space = journal_dev_space_available(j, ca, from);
149                 if (!space.next_entry)
150                         continue;
151 
152                 for (pos = 0; pos < nr_devs; pos++)
153                         if (space.total > dev_space[pos].total)
154                                 break;
155 
156                 array_insert_item(dev_space, nr_devs, pos, space);
157         }
158         rcu_read_unlock();
159 
160         if (nr_devs < nr_devs_want)
161                 return (struct journal_space) { 0, 0 };
162 
163         /*
164          * We sorted largest to smallest, and we want the smallest out of the
165          * @nr_devs_want largest devices:
166          */
167         return dev_space[nr_devs_want - 1];
168 }
169 
170 void bch2_journal_space_available(struct journal *j)
171 {
172         struct bch_fs *c = container_of(j, struct bch_fs, journal);
173         unsigned clean, clean_ondisk, total;
174         unsigned max_entry_size  = min(j->buf[0].buf_size >> 9,
175                                        j->buf[1].buf_size >> 9);
176         unsigned nr_online = 0, nr_devs_want;
177         bool can_discard = false;
178         int ret = 0;
179 
180         lockdep_assert_held(&j->lock);
181 
182         rcu_read_lock();
183         for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
184                 struct journal_device *ja = &ca->journal;
185 
186                 if (!ja->nr)
187                         continue;
188 
189                 while (ja->dirty_idx != ja->cur_idx &&
190                        ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
191                         ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
192 
193                 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
194                        ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
195                         ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
196 
197                 if (ja->discard_idx != ja->dirty_idx_ondisk)
198                         can_discard = true;
199 
200                 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
201                 nr_online++;
202         }
203         rcu_read_unlock();
204 
205         j->can_discard = can_discard;
206 
207         if (nr_online < metadata_replicas_required(c)) {
208                 struct printbuf buf = PRINTBUF;
209                 buf.atomic++;
210                 prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
211                            "rw journal devs:", nr_online, metadata_replicas_required(c));
212 
213                 rcu_read_lock();
214                 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
215                         prt_printf(&buf, " %s", ca->name);
216                 rcu_read_unlock();
217 
218                 bch_err(c, "%s", buf.buf);
219                 printbuf_exit(&buf);
220                 ret = JOURNAL_ERR_insufficient_devices;
221                 goto out;
222         }
223 
224         nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
225 
226         for (unsigned i = 0; i < journal_space_nr; i++)
227                 j->space[i] = __journal_space_available(j, nr_devs_want, i);
228 
229         clean_ondisk    = j->space[journal_space_clean_ondisk].total;
230         clean           = j->space[journal_space_clean].total;
231         total           = j->space[journal_space_total].total;
232 
233         if (!j->space[journal_space_discarded].next_entry)
234                 ret = JOURNAL_ERR_journal_full;
235 
236         if ((j->space[journal_space_clean_ondisk].next_entry <
237              j->space[journal_space_clean_ondisk].total) &&
238             (clean - clean_ondisk <= total / 8) &&
239             (clean_ondisk * 2 > clean))
240                 set_bit(JOURNAL_may_skip_flush, &j->flags);
241         else
242                 clear_bit(JOURNAL_may_skip_flush, &j->flags);
243 
244         bch2_journal_set_watermark(j);
245 out:
246         j->cur_entry_sectors    = !ret ? j->space[journal_space_discarded].next_entry : 0;
247         j->cur_entry_error      = ret;
248 
249         if (!ret)
250                 journal_wake(j);
251 }
252 
253 /* Discards - last part of journal reclaim: */
254 
255 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
256 {
257         bool ret;
258 
259         spin_lock(&j->lock);
260         ret = ja->discard_idx != ja->dirty_idx_ondisk;
261         spin_unlock(&j->lock);
262 
263         return ret;
264 }
265 
266 /*
267  * Advance ja->discard_idx as long as it points to buckets that are no longer
268  * dirty, issuing discards if necessary:
269  */
270 void bch2_journal_do_discards(struct journal *j)
271 {
272         struct bch_fs *c = container_of(j, struct bch_fs, journal);
273 
274         mutex_lock(&j->discard_lock);
275 
276         for_each_rw_member(c, ca) {
277                 struct journal_device *ja = &ca->journal;
278 
279                 while (should_discard_bucket(j, ja)) {
280                         if (!c->opts.nochanges &&
281                             ca->mi.discard &&
282                             bdev_max_discard_sectors(ca->disk_sb.bdev))
283                                 blkdev_issue_discard(ca->disk_sb.bdev,
284                                         bucket_to_sector(ca,
285                                                 ja->buckets[ja->discard_idx]),
286                                         ca->mi.bucket_size, GFP_NOFS);
287 
288                         spin_lock(&j->lock);
289                         ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
290 
291                         bch2_journal_space_available(j);
292                         spin_unlock(&j->lock);
293                 }
294         }
295 
296         mutex_unlock(&j->discard_lock);
297 }
298 
299 /*
300  * Journal entry pinning - machinery for holding a reference on a given journal
301  * entry, holding it open to ensure it gets replayed during recovery:
302  */
303 
304 void bch2_journal_reclaim_fast(struct journal *j)
305 {
306         bool popped = false;
307 
308         lockdep_assert_held(&j->lock);
309 
310         /*
311          * Unpin journal entries whose reference counts reached zero, meaning
312          * all btree nodes got written out
313          */
314         while (!fifo_empty(&j->pin) &&
315                j->pin.front <= j->seq_ondisk &&
316                !atomic_read(&fifo_peek_front(&j->pin).count)) {
317                 j->pin.front++;
318                 popped = true;
319         }
320 
321         if (popped)
322                 bch2_journal_space_available(j);
323 }
324 
325 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
326 {
327         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
328 
329         return atomic_dec_and_test(&pin_list->count);
330 }
331 
332 void bch2_journal_pin_put(struct journal *j, u64 seq)
333 {
334         if (__bch2_journal_pin_put(j, seq)) {
335                 spin_lock(&j->lock);
336                 bch2_journal_reclaim_fast(j);
337                 spin_unlock(&j->lock);
338         }
339 }
340 
341 static inline bool __journal_pin_drop(struct journal *j,
342                                       struct journal_entry_pin *pin)
343 {
344         struct journal_entry_pin_list *pin_list;
345 
346         if (!journal_pin_active(pin))
347                 return false;
348 
349         if (j->flush_in_progress == pin)
350                 j->flush_in_progress_dropped = true;
351 
352         pin_list = journal_seq_pin(j, pin->seq);
353         pin->seq = 0;
354         list_del_init(&pin->list);
355 
356         /*
357          * Unpinning a journal entry may make journal_next_bucket() succeed, if
358          * writing a new last_seq will now make another bucket available:
359          */
360         return atomic_dec_and_test(&pin_list->count) &&
361                 pin_list == &fifo_peek_front(&j->pin);
362 }
363 
364 void bch2_journal_pin_drop(struct journal *j,
365                            struct journal_entry_pin *pin)
366 {
367         spin_lock(&j->lock);
368         if (__journal_pin_drop(j, pin))
369                 bch2_journal_reclaim_fast(j);
370         spin_unlock(&j->lock);
371 }
372 
373 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
374 {
375         if (fn == bch2_btree_node_flush0 ||
376             fn == bch2_btree_node_flush1)
377                 return JOURNAL_PIN_btree;
378         else if (fn == bch2_btree_key_cache_journal_flush)
379                 return JOURNAL_PIN_key_cache;
380         else
381                 return JOURNAL_PIN_other;
382 }
383 
384 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
385                           struct journal_entry_pin *pin,
386                           journal_pin_flush_fn flush_fn,
387                           enum journal_pin_type type)
388 {
389         struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
390 
391         /*
392          * flush_fn is how we identify journal pins in debugfs, so must always
393          * exist, even if it doesn't do anything:
394          */
395         BUG_ON(!flush_fn);
396 
397         atomic_inc(&pin_list->count);
398         pin->seq        = seq;
399         pin->flush      = flush_fn;
400         list_add(&pin->list, &pin_list->list[type]);
401 }
402 
403 void bch2_journal_pin_copy(struct journal *j,
404                            struct journal_entry_pin *dst,
405                            struct journal_entry_pin *src,
406                            journal_pin_flush_fn flush_fn)
407 {
408         spin_lock(&j->lock);
409 
410         u64 seq = READ_ONCE(src->seq);
411 
412         if (seq < journal_last_seq(j)) {
413                 /*
414                  * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
415                  * the src pin - with the pin dropped, the entry to pin might no
416                  * longer to exist, but that means there's no longer anything to
417                  * copy and we can bail out here:
418                  */
419                 spin_unlock(&j->lock);
420                 return;
421         }
422 
423         bool reclaim = __journal_pin_drop(j, dst);
424 
425         bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
426 
427         if (reclaim)
428                 bch2_journal_reclaim_fast(j);
429 
430         /*
431          * If the journal is currently full,  we might want to call flush_fn
432          * immediately:
433          */
434         if (seq == journal_last_seq(j))
435                 journal_wake(j);
436         spin_unlock(&j->lock);
437 }
438 
439 void bch2_journal_pin_set(struct journal *j, u64 seq,
440                           struct journal_entry_pin *pin,
441                           journal_pin_flush_fn flush_fn)
442 {
443         spin_lock(&j->lock);
444 
445         BUG_ON(seq < journal_last_seq(j));
446 
447         bool reclaim = __journal_pin_drop(j, pin);
448 
449         bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
450 
451         if (reclaim)
452                 bch2_journal_reclaim_fast(j);
453         /*
454          * If the journal is currently full,  we might want to call flush_fn
455          * immediately:
456          */
457         if (seq == journal_last_seq(j))
458                 journal_wake(j);
459 
460         spin_unlock(&j->lock);
461 }
462 
463 /**
464  * bch2_journal_pin_flush: ensure journal pin callback is no longer running
465  * @j:          journal object
466  * @pin:        pin to flush
467  */
468 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
469 {
470         BUG_ON(journal_pin_active(pin));
471 
472         wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
473 }
474 
475 /*
476  * Journal reclaim: flush references to open journal entries to reclaim space in
477  * the journal
478  *
479  * May be done by the journal code in the background as needed to free up space
480  * for more journal entries, or as part of doing a clean shutdown, or to migrate
481  * data off of a specific device:
482  */
483 
484 static struct journal_entry_pin *
485 journal_get_next_pin(struct journal *j,
486                      u64 seq_to_flush,
487                      unsigned allowed_below_seq,
488                      unsigned allowed_above_seq,
489                      u64 *seq)
490 {
491         struct journal_entry_pin_list *pin_list;
492         struct journal_entry_pin *ret = NULL;
493         unsigned i;
494 
495         fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
496                 if (*seq > seq_to_flush && !allowed_above_seq)
497                         break;
498 
499                 for (i = 0; i < JOURNAL_PIN_NR; i++)
500                         if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
501                             ((1U << i) & allowed_above_seq)) {
502                                 ret = list_first_entry_or_null(&pin_list->list[i],
503                                         struct journal_entry_pin, list);
504                                 if (ret)
505                                         return ret;
506                         }
507         }
508 
509         return NULL;
510 }
511 
512 /* returns true if we did work */
513 static size_t journal_flush_pins(struct journal *j,
514                                  u64 seq_to_flush,
515                                  unsigned allowed_below_seq,
516                                  unsigned allowed_above_seq,
517                                  unsigned min_any,
518                                  unsigned min_key_cache)
519 {
520         struct journal_entry_pin *pin;
521         size_t nr_flushed = 0;
522         journal_pin_flush_fn flush_fn;
523         u64 seq;
524         int err;
525 
526         lockdep_assert_held(&j->reclaim_lock);
527 
528         while (1) {
529                 unsigned allowed_above = allowed_above_seq;
530                 unsigned allowed_below = allowed_below_seq;
531 
532                 if (min_any) {
533                         allowed_above |= ~0;
534                         allowed_below |= ~0;
535                 }
536 
537                 if (min_key_cache) {
538                         allowed_above |= 1U << JOURNAL_PIN_key_cache;
539                         allowed_below |= 1U << JOURNAL_PIN_key_cache;
540                 }
541 
542                 cond_resched();
543 
544                 j->last_flushed = jiffies;
545 
546                 spin_lock(&j->lock);
547                 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
548                 if (pin) {
549                         BUG_ON(j->flush_in_progress);
550                         j->flush_in_progress = pin;
551                         j->flush_in_progress_dropped = false;
552                         flush_fn = pin->flush;
553                 }
554                 spin_unlock(&j->lock);
555 
556                 if (!pin)
557                         break;
558 
559                 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
560                         min_key_cache--;
561 
562                 if (min_any)
563                         min_any--;
564 
565                 err = flush_fn(j, pin, seq);
566 
567                 spin_lock(&j->lock);
568                 /* Pin might have been dropped or rearmed: */
569                 if (likely(!err && !j->flush_in_progress_dropped))
570                         list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
571                 j->flush_in_progress = NULL;
572                 j->flush_in_progress_dropped = false;
573                 spin_unlock(&j->lock);
574 
575                 wake_up(&j->pin_flush_wait);
576 
577                 if (err)
578                         break;
579 
580                 nr_flushed++;
581         }
582 
583         return nr_flushed;
584 }
585 
586 static u64 journal_seq_to_flush(struct journal *j)
587 {
588         struct bch_fs *c = container_of(j, struct bch_fs, journal);
589         u64 seq_to_flush = 0;
590 
591         spin_lock(&j->lock);
592 
593         for_each_rw_member(c, ca) {
594                 struct journal_device *ja = &ca->journal;
595                 unsigned nr_buckets, bucket_to_flush;
596 
597                 if (!ja->nr)
598                         continue;
599 
600                 /* Try to keep the journal at most half full: */
601                 nr_buckets = ja->nr / 2;
602 
603                 nr_buckets = min(nr_buckets, ja->nr);
604 
605                 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
606                 seq_to_flush = max(seq_to_flush,
607                                    ja->bucket_seq[bucket_to_flush]);
608         }
609 
610         /* Also flush if the pin fifo is more than half full */
611         seq_to_flush = max_t(s64, seq_to_flush,
612                              (s64) journal_cur_seq(j) -
613                              (j->pin.size >> 1));
614         spin_unlock(&j->lock);
615 
616         return seq_to_flush;
617 }
618 
619 /**
620  * __bch2_journal_reclaim - free up journal buckets
621  * @j:          journal object
622  * @direct:     direct or background reclaim?
623  * @kicked:     requested to run since we last ran?
624  * Returns:     0 on success, or -EIO if the journal has been shutdown
625  *
626  * Background journal reclaim writes out btree nodes. It should be run
627  * early enough so that we never completely run out of journal buckets.
628  *
629  * High watermarks for triggering background reclaim:
630  * - FIFO has fewer than 512 entries left
631  * - fewer than 25% journal buckets free
632  *
633  * Background reclaim runs until low watermarks are reached:
634  * - FIFO has more than 1024 entries left
635  * - more than 50% journal buckets free
636  *
637  * As long as a reclaim can complete in the time it takes to fill up
638  * 512 journal entries or 25% of all journal buckets, then
639  * journal_next_bucket() should not stall.
640  */
641 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
642 {
643         struct bch_fs *c = container_of(j, struct bch_fs, journal);
644         bool kthread = (current->flags & PF_KTHREAD) != 0;
645         u64 seq_to_flush;
646         size_t min_nr, min_key_cache, nr_flushed;
647         unsigned flags;
648         int ret = 0;
649 
650         /*
651          * We can't invoke memory reclaim while holding the reclaim_lock -
652          * journal reclaim is required to make progress for memory reclaim
653          * (cleaning the caches), so we can't get stuck in memory reclaim while
654          * we're holding the reclaim lock:
655          */
656         lockdep_assert_held(&j->reclaim_lock);
657         flags = memalloc_noreclaim_save();
658 
659         do {
660                 if (kthread && kthread_should_stop())
661                         break;
662 
663                 if (bch2_journal_error(j)) {
664                         ret = -EIO;
665                         break;
666                 }
667 
668                 bch2_journal_do_discards(j);
669 
670                 seq_to_flush = journal_seq_to_flush(j);
671                 min_nr = 0;
672 
673                 /*
674                  * If it's been longer than j->reclaim_delay_ms since we last flushed,
675                  * make sure to flush at least one journal pin:
676                  */
677                 if (time_after(jiffies, j->last_flushed +
678                                msecs_to_jiffies(c->opts.journal_reclaim_delay)))
679                         min_nr = 1;
680 
681                 if (j->watermark != BCH_WATERMARK_stripe)
682                         min_nr = 1;
683 
684                 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
685                         min_nr = 1;
686 
687                 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
688 
689                 trace_and_count(c, journal_reclaim_start, c,
690                                 direct, kicked,
691                                 min_nr, min_key_cache,
692                                 atomic_read(&c->btree_cache.dirty),
693                                 c->btree_cache.used,
694                                 atomic_long_read(&c->btree_key_cache.nr_dirty),
695                                 atomic_long_read(&c->btree_key_cache.nr_keys));
696 
697                 nr_flushed = journal_flush_pins(j, seq_to_flush,
698                                                 ~0, 0,
699                                                 min_nr, min_key_cache);
700 
701                 if (direct)
702                         j->nr_direct_reclaim += nr_flushed;
703                 else
704                         j->nr_background_reclaim += nr_flushed;
705                 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
706 
707                 if (nr_flushed)
708                         wake_up(&j->reclaim_wait);
709         } while ((min_nr || min_key_cache) && nr_flushed && !direct);
710 
711         memalloc_noreclaim_restore(flags);
712 
713         return ret;
714 }
715 
716 int bch2_journal_reclaim(struct journal *j)
717 {
718         return __bch2_journal_reclaim(j, true, true);
719 }
720 
721 static int bch2_journal_reclaim_thread(void *arg)
722 {
723         struct journal *j = arg;
724         struct bch_fs *c = container_of(j, struct bch_fs, journal);
725         unsigned long delay, now;
726         bool journal_empty;
727         int ret = 0;
728 
729         set_freezable();
730 
731         j->last_flushed = jiffies;
732 
733         while (!ret && !kthread_should_stop()) {
734                 bool kicked = j->reclaim_kicked;
735 
736                 j->reclaim_kicked = false;
737 
738                 mutex_lock(&j->reclaim_lock);
739                 ret = __bch2_journal_reclaim(j, false, kicked);
740                 mutex_unlock(&j->reclaim_lock);
741 
742                 now = jiffies;
743                 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
744                 j->next_reclaim = j->last_flushed + delay;
745 
746                 if (!time_in_range(j->next_reclaim, now, now + delay))
747                         j->next_reclaim = now + delay;
748 
749                 while (1) {
750                         set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
751                         if (kthread_should_stop())
752                                 break;
753                         if (j->reclaim_kicked)
754                                 break;
755 
756                         spin_lock(&j->lock);
757                         journal_empty = fifo_empty(&j->pin);
758                         spin_unlock(&j->lock);
759 
760                         if (journal_empty)
761                                 schedule();
762                         else if (time_after(j->next_reclaim, jiffies))
763                                 schedule_timeout(j->next_reclaim - jiffies);
764                         else
765                                 break;
766                 }
767                 __set_current_state(TASK_RUNNING);
768         }
769 
770         return 0;
771 }
772 
773 void bch2_journal_reclaim_stop(struct journal *j)
774 {
775         struct task_struct *p = j->reclaim_thread;
776 
777         j->reclaim_thread = NULL;
778 
779         if (p) {
780                 kthread_stop(p);
781                 put_task_struct(p);
782         }
783 }
784 
785 int bch2_journal_reclaim_start(struct journal *j)
786 {
787         struct bch_fs *c = container_of(j, struct bch_fs, journal);
788         struct task_struct *p;
789         int ret;
790 
791         if (j->reclaim_thread)
792                 return 0;
793 
794         p = kthread_create(bch2_journal_reclaim_thread, j,
795                            "bch-reclaim/%s", c->name);
796         ret = PTR_ERR_OR_ZERO(p);
797         bch_err_msg(c, ret, "creating journal reclaim thread");
798         if (ret)
799                 return ret;
800 
801         get_task_struct(p);
802         j->reclaim_thread = p;
803         wake_up_process(p);
804         return 0;
805 }
806 
807 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
808                               bool *did_work)
809 {
810         int ret;
811 
812         ret = bch2_journal_error(j);
813         if (ret)
814                 return ret;
815 
816         mutex_lock(&j->reclaim_lock);
817 
818         if (journal_flush_pins(j, seq_to_flush,
819                                (1U << JOURNAL_PIN_key_cache)|
820                                (1U << JOURNAL_PIN_other), 0, 0, 0) ||
821             journal_flush_pins(j, seq_to_flush,
822                                (1U << JOURNAL_PIN_btree), 0, 0, 0))
823                 *did_work = true;
824 
825         if (seq_to_flush > journal_cur_seq(j))
826                 bch2_journal_entry_close(j);
827 
828         spin_lock(&j->lock);
829         /*
830          * If journal replay hasn't completed, the unreplayed journal entries
831          * hold refs on their corresponding sequence numbers
832          */
833         ret = !test_bit(JOURNAL_replay_done, &j->flags) ||
834                 journal_last_seq(j) > seq_to_flush ||
835                 !fifo_used(&j->pin);
836 
837         spin_unlock(&j->lock);
838         mutex_unlock(&j->reclaim_lock);
839 
840         return ret;
841 }
842 
843 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
844 {
845         /* time_stats this */
846         bool did_work = false;
847 
848         if (!test_bit(JOURNAL_running, &j->flags))
849                 return false;
850 
851         closure_wait_event(&j->async_wait,
852                 journal_flush_done(j, seq_to_flush, &did_work));
853 
854         return did_work;
855 }
856 
857 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
858 {
859         struct bch_fs *c = container_of(j, struct bch_fs, journal);
860         struct journal_entry_pin_list *p;
861         u64 iter, seq = 0;
862         int ret = 0;
863 
864         spin_lock(&j->lock);
865         fifo_for_each_entry_ptr(p, &j->pin, iter)
866                 if (dev_idx >= 0
867                     ? bch2_dev_list_has_dev(p->devs, dev_idx)
868                     : p->devs.nr < c->opts.metadata_replicas)
869                         seq = iter;
870         spin_unlock(&j->lock);
871 
872         bch2_journal_flush_pins(j, seq);
873 
874         ret = bch2_journal_error(j);
875         if (ret)
876                 return ret;
877 
878         mutex_lock(&c->replicas_gc_lock);
879         bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
880 
881         /*
882          * Now that we've populated replicas_gc, write to the journal to mark
883          * active journal devices. This handles the case where the journal might
884          * be empty. Otherwise we could clear all journal replicas and
885          * temporarily put the fs into an unrecoverable state. Journal recovery
886          * expects to find devices marked for journal data on unclean mount.
887          */
888         ret = bch2_journal_meta(&c->journal);
889         if (ret)
890                 goto err;
891 
892         seq = 0;
893         spin_lock(&j->lock);
894         while (!ret) {
895                 struct bch_replicas_padded replicas;
896 
897                 seq = max(seq, journal_last_seq(j));
898                 if (seq >= j->pin.back)
899                         break;
900                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
901                                          journal_seq_pin(j, seq)->devs);
902                 seq++;
903 
904                 if (replicas.e.nr_devs) {
905                         spin_unlock(&j->lock);
906                         ret = bch2_mark_replicas(c, &replicas.e);
907                         spin_lock(&j->lock);
908                 }
909         }
910         spin_unlock(&j->lock);
911 err:
912         ret = bch2_replicas_gc_end(c, ret);
913         mutex_unlock(&c->replicas_gc_lock);
914 
915         return ret;
916 }
917 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php