~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/bcachefs.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _BCACHEFS_H
  3 #define _BCACHEFS_H
  4 
  5 /*
  6  * SOME HIGH LEVEL CODE DOCUMENTATION:
  7  *
  8  * Bcache mostly works with cache sets, cache devices, and backing devices.
  9  *
 10  * Support for multiple cache devices hasn't quite been finished off yet, but
 11  * it's about 95% plumbed through. A cache set and its cache devices is sort of
 12  * like a md raid array and its component devices. Most of the code doesn't care
 13  * about individual cache devices, the main abstraction is the cache set.
 14  *
 15  * Multiple cache devices is intended to give us the ability to mirror dirty
 16  * cached data and metadata, without mirroring clean cached data.
 17  *
 18  * Backing devices are different, in that they have a lifetime independent of a
 19  * cache set. When you register a newly formatted backing device it'll come up
 20  * in passthrough mode, and then you can attach and detach a backing device from
 21  * a cache set at runtime - while it's mounted and in use. Detaching implicitly
 22  * invalidates any cached data for that backing device.
 23  *
 24  * A cache set can have multiple (many) backing devices attached to it.
 25  *
 26  * There's also flash only volumes - this is the reason for the distinction
 27  * between struct cached_dev and struct bcache_device. A flash only volume
 28  * works much like a bcache device that has a backing device, except the
 29  * "cached" data is always dirty. The end result is that we get thin
 30  * provisioning with very little additional code.
 31  *
 32  * Flash only volumes work but they're not production ready because the moving
 33  * garbage collector needs more work. More on that later.
 34  *
 35  * BUCKETS/ALLOCATION:
 36  *
 37  * Bcache is primarily designed for caching, which means that in normal
 38  * operation all of our available space will be allocated. Thus, we need an
 39  * efficient way of deleting things from the cache so we can write new things to
 40  * it.
 41  *
 42  * To do this, we first divide the cache device up into buckets. A bucket is the
 43  * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
 44  * works efficiently.
 45  *
 46  * Each bucket has a 16 bit priority, and an 8 bit generation associated with
 47  * it. The gens and priorities for all the buckets are stored contiguously and
 48  * packed on disk (in a linked list of buckets - aside from the superblock, all
 49  * of bcache's metadata is stored in buckets).
 50  *
 51  * The priority is used to implement an LRU. We reset a bucket's priority when
 52  * we allocate it or on cache it, and every so often we decrement the priority
 53  * of each bucket. It could be used to implement something more sophisticated,
 54  * if anyone ever gets around to it.
 55  *
 56  * The generation is used for invalidating buckets. Each pointer also has an 8
 57  * bit generation embedded in it; for a pointer to be considered valid, its gen
 58  * must match the gen of the bucket it points into.  Thus, to reuse a bucket all
 59  * we have to do is increment its gen (and write its new gen to disk; we batch
 60  * this up).
 61  *
 62  * Bcache is entirely COW - we never write twice to a bucket, even buckets that
 63  * contain metadata (including btree nodes).
 64  *
 65  * THE BTREE:
 66  *
 67  * Bcache is in large part design around the btree.
 68  *
 69  * At a high level, the btree is just an index of key -> ptr tuples.
 70  *
 71  * Keys represent extents, and thus have a size field. Keys also have a variable
 72  * number of pointers attached to them (potentially zero, which is handy for
 73  * invalidating the cache).
 74  *
 75  * The key itself is an inode:offset pair. The inode number corresponds to a
 76  * backing device or a flash only volume. The offset is the ending offset of the
 77  * extent within the inode - not the starting offset; this makes lookups
 78  * slightly more convenient.
 79  *
 80  * Pointers contain the cache device id, the offset on that device, and an 8 bit
 81  * generation number. More on the gen later.
 82  *
 83  * Index lookups are not fully abstracted - cache lookups in particular are
 84  * still somewhat mixed in with the btree code, but things are headed in that
 85  * direction.
 86  *
 87  * Updates are fairly well abstracted, though. There are two different ways of
 88  * updating the btree; insert and replace.
 89  *
 90  * BTREE_INSERT will just take a list of keys and insert them into the btree -
 91  * overwriting (possibly only partially) any extents they overlap with. This is
 92  * used to update the index after a write.
 93  *
 94  * BTREE_REPLACE is really cmpxchg(); it inserts a key into the btree iff it is
 95  * overwriting a key that matches another given key. This is used for inserting
 96  * data into the cache after a cache miss, and for background writeback, and for
 97  * the moving garbage collector.
 98  *
 99  * There is no "delete" operation; deleting things from the index is
100  * accomplished by either by invalidating pointers (by incrementing a bucket's
101  * gen) or by inserting a key with 0 pointers - which will overwrite anything
102  * previously present at that location in the index.
103  *
104  * This means that there are always stale/invalid keys in the btree. They're
105  * filtered out by the code that iterates through a btree node, and removed when
106  * a btree node is rewritten.
107  *
108  * BTREE NODES:
109  *
110  * Our unit of allocation is a bucket, and we can't arbitrarily allocate and
111  * free smaller than a bucket - so, that's how big our btree nodes are.
112  *
113  * (If buckets are really big we'll only use part of the bucket for a btree node
114  * - no less than 1/4th - but a bucket still contains no more than a single
115  * btree node. I'd actually like to change this, but for now we rely on the
116  * bucket's gen for deleting btree nodes when we rewrite/split a node.)
117  *
118  * Anyways, btree nodes are big - big enough to be inefficient with a textbook
119  * btree implementation.
120  *
121  * The way this is solved is that btree nodes are internally log structured; we
122  * can append new keys to an existing btree node without rewriting it. This
123  * means each set of keys we write is sorted, but the node is not.
124  *
125  * We maintain this log structure in memory - keeping 1Mb of keys sorted would
126  * be expensive, and we have to distinguish between the keys we have written and
127  * the keys we haven't. So to do a lookup in a btree node, we have to search
128  * each sorted set. But we do merge written sets together lazily, so the cost of
129  * these extra searches is quite low (normally most of the keys in a btree node
130  * will be in one big set, and then there'll be one or two sets that are much
131  * smaller).
132  *
133  * This log structure makes bcache's btree more of a hybrid between a
134  * conventional btree and a compacting data structure, with some of the
135  * advantages of both.
136  *
137  * GARBAGE COLLECTION:
138  *
139  * We can't just invalidate any bucket - it might contain dirty data or
140  * metadata. If it once contained dirty data, other writes might overwrite it
141  * later, leaving no valid pointers into that bucket in the index.
142  *
143  * Thus, the primary purpose of garbage collection is to find buckets to reuse.
144  * It also counts how much valid data it each bucket currently contains, so that
145  * allocation can reuse buckets sooner when they've been mostly overwritten.
146  *
147  * It also does some things that are really internal to the btree
148  * implementation. If a btree node contains pointers that are stale by more than
149  * some threshold, it rewrites the btree node to avoid the bucket's generation
150  * wrapping around. It also merges adjacent btree nodes if they're empty enough.
151  *
152  * THE JOURNAL:
153  *
154  * Bcache's journal is not necessary for consistency; we always strictly
155  * order metadata writes so that the btree and everything else is consistent on
156  * disk in the event of an unclean shutdown, and in fact bcache had writeback
157  * caching (with recovery from unclean shutdown) before journalling was
158  * implemented.
159  *
160  * Rather, the journal is purely a performance optimization; we can't complete a
161  * write until we've updated the index on disk, otherwise the cache would be
162  * inconsistent in the event of an unclean shutdown. This means that without the
163  * journal, on random write workloads we constantly have to update all the leaf
164  * nodes in the btree, and those writes will be mostly empty (appending at most
165  * a few keys each) - highly inefficient in terms of amount of metadata writes,
166  * and it puts more strain on the various btree resorting/compacting code.
167  *
168  * The journal is just a log of keys we've inserted; on startup we just reinsert
169  * all the keys in the open journal entries. That means that when we're updating
170  * a node in the btree, we can wait until a 4k block of keys fills up before
171  * writing them out.
172  *
173  * For simplicity, we only journal updates to leaf nodes; updates to parent
174  * nodes are rare enough (since our leaf nodes are huge) that it wasn't worth
175  * the complexity to deal with journalling them (in particular, journal replay)
176  * - updates to non leaf nodes just happen synchronously (see btree_split()).
177  */
178 
179 #undef pr_fmt
180 #ifdef __KERNEL__
181 #define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
182 #else
183 #define pr_fmt(fmt) "%s() " fmt "\n", __func__
184 #endif
185 
186 #include <linux/backing-dev-defs.h>
187 #include <linux/bug.h>
188 #include <linux/bio.h>
189 #include <linux/closure.h>
190 #include <linux/kobject.h>
191 #include <linux/list.h>
192 #include <linux/math64.h>
193 #include <linux/mutex.h>
194 #include <linux/percpu-refcount.h>
195 #include <linux/percpu-rwsem.h>
196 #include <linux/refcount.h>
197 #include <linux/rhashtable.h>
198 #include <linux/rwsem.h>
199 #include <linux/semaphore.h>
200 #include <linux/seqlock.h>
201 #include <linux/shrinker.h>
202 #include <linux/srcu.h>
203 #include <linux/types.h>
204 #include <linux/workqueue.h>
205 #include <linux/zstd.h>
206 
207 #include "bcachefs_format.h"
208 #include "disk_accounting_types.h"
209 #include "errcode.h"
210 #include "fifo.h"
211 #include "nocow_locking_types.h"
212 #include "opts.h"
213 #include "recovery_passes_types.h"
214 #include "sb-errors_types.h"
215 #include "seqmutex.h"
216 #include "time_stats.h"
217 #include "util.h"
218 
219 #ifdef CONFIG_BCACHEFS_DEBUG
220 #define BCH_WRITE_REF_DEBUG
221 #endif
222 
223 #ifndef dynamic_fault
224 #define dynamic_fault(...)              0
225 #endif
226 
227 #define race_fault(...)                 dynamic_fault("bcachefs:race")
228 
229 #define count_event(_c, _name)  this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
230 
231 #define trace_and_count(_c, _name, ...)                                 \
232 do {                                                                    \
233         count_event(_c, _name);                                         \
234         trace_##_name(__VA_ARGS__);                                     \
235 } while (0)
236 
237 #define bch2_fs_init_fault(name)                                        \
238         dynamic_fault("bcachefs:bch_fs_init:" name)
239 #define bch2_meta_read_fault(name)                                      \
240          dynamic_fault("bcachefs:meta:read:" name)
241 #define bch2_meta_write_fault(name)                                     \
242          dynamic_fault("bcachefs:meta:write:" name)
243 
244 #ifdef __KERNEL__
245 #define BCACHEFS_LOG_PREFIX
246 #endif
247 
248 #ifdef BCACHEFS_LOG_PREFIX
249 
250 #define bch2_log_msg(_c, fmt)                   "bcachefs (%s): " fmt, ((_c)->name)
251 #define bch2_fmt_dev(_ca, fmt)                  "bcachefs (%s): " fmt "\n", ((_ca)->name)
252 #define bch2_fmt_dev_offset(_ca, _offset, fmt)  "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name), (_offset)
253 #define bch2_fmt_inum(_c, _inum, fmt)           "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
254 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt)                   \
255          "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
256 
257 #else
258 
259 #define bch2_log_msg(_c, fmt)                   fmt
260 #define bch2_fmt_dev(_ca, fmt)                  "%s: " fmt "\n", ((_ca)->name)
261 #define bch2_fmt_dev_offset(_ca, _offset, fmt)  "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
262 #define bch2_fmt_inum(_c, _inum, fmt)           "inum %llu: " fmt "\n", (_inum)
263 #define bch2_fmt_inum_offset(_c, _inum, _offset, fmt)                           \
264          "inum %llu offset %llu: " fmt "\n", (_inum), (_offset)
265 
266 #endif
267 
268 #define bch2_fmt(_c, fmt)               bch2_log_msg(_c, fmt "\n")
269 
270 void bch2_print_str(struct bch_fs *, const char *);
271 
272 __printf(2, 3)
273 void bch2_print_opts(struct bch_opts *, const char *, ...);
274 
275 __printf(2, 3)
276 void __bch2_print(struct bch_fs *c, const char *fmt, ...);
277 
278 #define maybe_dev_to_fs(_c)     _Generic((_c),                          \
279         struct bch_dev *:       ((struct bch_dev *) (_c))->fs,          \
280         struct bch_fs *:        (_c))
281 
282 #define bch2_print(_c, ...) __bch2_print(maybe_dev_to_fs(_c), __VA_ARGS__)
283 
284 #define bch2_print_ratelimited(_c, ...)                                 \
285 do {                                                                    \
286         static DEFINE_RATELIMIT_STATE(_rs,                              \
287                                       DEFAULT_RATELIMIT_INTERVAL,       \
288                                       DEFAULT_RATELIMIT_BURST);         \
289                                                                         \
290         if (__ratelimit(&_rs))                                          \
291                 bch2_print(_c, __VA_ARGS__);                            \
292 } while (0)
293 
294 #define bch_info(c, fmt, ...) \
295         bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
296 #define bch_notice(c, fmt, ...) \
297         bch2_print(c, KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
298 #define bch_warn(c, fmt, ...) \
299         bch2_print(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
300 #define bch_warn_ratelimited(c, fmt, ...) \
301         bch2_print_ratelimited(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
302 
303 #define bch_err(c, fmt, ...) \
304         bch2_print(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
305 #define bch_err_dev(ca, fmt, ...) \
306         bch2_print(c, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
307 #define bch_err_dev_offset(ca, _offset, fmt, ...) \
308         bch2_print(c, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
309 #define bch_err_inum(c, _inum, fmt, ...) \
310         bch2_print(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
311 #define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
312         bch2_print(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
313 
314 #define bch_err_ratelimited(c, fmt, ...) \
315         bch2_print_ratelimited(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
316 #define bch_err_dev_ratelimited(ca, fmt, ...) \
317         bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
318 #define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
319         bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev_offset(ca, _offset, fmt), ##__VA_ARGS__)
320 #define bch_err_inum_ratelimited(c, _inum, fmt, ...) \
321         bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum(c, _inum, fmt), ##__VA_ARGS__)
322 #define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
323         bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
324 
325 static inline bool should_print_err(int err)
326 {
327         return err && !bch2_err_matches(err, BCH_ERR_transaction_restart);
328 }
329 
330 #define bch_err_fn(_c, _ret)                                            \
331 do {                                                                    \
332         if (should_print_err(_ret))                                     \
333                 bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
334 } while (0)
335 
336 #define bch_err_fn_ratelimited(_c, _ret)                                \
337 do {                                                                    \
338         if (should_print_err(_ret))                                     \
339                 bch_err_ratelimited(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
340 } while (0)
341 
342 #define bch_err_msg(_c, _ret, _msg, ...)                                \
343 do {                                                                    \
344         if (should_print_err(_ret))                                     \
345                 bch_err(_c, "%s(): error " _msg " %s", __func__,        \
346                         ##__VA_ARGS__, bch2_err_str(_ret));             \
347 } while (0)
348 
349 #define bch_verbose(c, fmt, ...)                                        \
350 do {                                                                    \
351         if ((c)->opts.verbose)                                          \
352                 bch_info(c, fmt, ##__VA_ARGS__);                        \
353 } while (0)
354 
355 #define pr_verbose_init(opts, fmt, ...)                                 \
356 do {                                                                    \
357         if (opt_get(opts, verbose))                                     \
358                 pr_info(fmt, ##__VA_ARGS__);                            \
359 } while (0)
360 
361 /* Parameters that are useful for debugging, but should always be compiled in: */
362 #define BCH_DEBUG_PARAMS_ALWAYS()                                       \
363         BCH_DEBUG_PARAM(key_merging_disabled,                           \
364                 "Disables merging of extents")                          \
365         BCH_DEBUG_PARAM(btree_node_merging_disabled,                    \
366                 "Disables merging of btree nodes")                      \
367         BCH_DEBUG_PARAM(btree_gc_always_rewrite,                        \
368                 "Causes mark and sweep to compact and rewrite every "   \
369                 "btree node it traverses")                              \
370         BCH_DEBUG_PARAM(btree_gc_rewrite_disabled,                      \
371                 "Disables rewriting of btree nodes during mark and sweep")\
372         BCH_DEBUG_PARAM(btree_shrinker_disabled,                        \
373                 "Disables the shrinker callback for the btree node cache")\
374         BCH_DEBUG_PARAM(verify_btree_ondisk,                            \
375                 "Reread btree nodes at various points to verify the "   \
376                 "mergesort in the read path against modifications "     \
377                 "done in memory")                                       \
378         BCH_DEBUG_PARAM(verify_all_btree_replicas,                      \
379                 "When reading btree nodes, read all replicas and "      \
380                 "compare them")                                         \
381         BCH_DEBUG_PARAM(backpointers_no_use_write_buffer,               \
382                 "Don't use the write buffer for backpointers, enabling "\
383                 "extra runtime checks")
384 
385 /* Parameters that should only be compiled in debug mode: */
386 #define BCH_DEBUG_PARAMS_DEBUG()                                        \
387         BCH_DEBUG_PARAM(expensive_debug_checks,                         \
388                 "Enables various runtime debugging checks that "        \
389                 "significantly affect performance")                     \
390         BCH_DEBUG_PARAM(debug_check_iterators,                          \
391                 "Enables extra verification for btree iterators")       \
392         BCH_DEBUG_PARAM(debug_check_btree_accounting,                   \
393                 "Verify btree accounting for keys within a node")       \
394         BCH_DEBUG_PARAM(journal_seq_verify,                             \
395                 "Store the journal sequence number in the version "     \
396                 "number of every btree key, and verify that btree "     \
397                 "update ordering is preserved during recovery")         \
398         BCH_DEBUG_PARAM(inject_invalid_keys,                            \
399                 "Store the journal sequence number in the version "     \
400                 "number of every btree key, and verify that btree "     \
401                 "update ordering is preserved during recovery")         \
402         BCH_DEBUG_PARAM(test_alloc_startup,                             \
403                 "Force allocator startup to use the slowpath where it"  \
404                 "can't find enough free buckets without invalidating"   \
405                 "cached data")                                          \
406         BCH_DEBUG_PARAM(force_reconstruct_read,                         \
407                 "Force reads to use the reconstruct path, when reading" \
408                 "from erasure coded extents")                           \
409         BCH_DEBUG_PARAM(test_restart_gc,                                \
410                 "Test restarting mark and sweep gc when bucket gens change")
411 
412 #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG()
413 
414 #ifdef CONFIG_BCACHEFS_DEBUG
415 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALL()
416 #else
417 #define BCH_DEBUG_PARAMS() BCH_DEBUG_PARAMS_ALWAYS()
418 #endif
419 
420 #define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
421 BCH_DEBUG_PARAMS()
422 #undef BCH_DEBUG_PARAM
423 
424 #ifndef CONFIG_BCACHEFS_DEBUG
425 #define BCH_DEBUG_PARAM(name, description) static const __maybe_unused bool bch2_##name;
426 BCH_DEBUG_PARAMS_DEBUG()
427 #undef BCH_DEBUG_PARAM
428 #endif
429 
430 #define BCH_TIME_STATS()                        \
431         x(btree_node_mem_alloc)                 \
432         x(btree_node_split)                     \
433         x(btree_node_compact)                   \
434         x(btree_node_merge)                     \
435         x(btree_node_sort)                      \
436         x(btree_node_read)                      \
437         x(btree_node_read_done)                 \
438         x(btree_interior_update_foreground)     \
439         x(btree_interior_update_total)          \
440         x(btree_gc)                             \
441         x(data_write)                           \
442         x(data_read)                            \
443         x(data_promote)                         \
444         x(journal_flush_write)                  \
445         x(journal_noflush_write)                \
446         x(journal_flush_seq)                    \
447         x(blocked_journal_low_on_space)         \
448         x(blocked_journal_low_on_pin)           \
449         x(blocked_journal_max_in_flight)        \
450         x(blocked_key_cache_flush)              \
451         x(blocked_allocate)                     \
452         x(blocked_allocate_open_bucket)         \
453         x(blocked_write_buffer_full)            \
454         x(nocow_lock_contended)
455 
456 enum bch_time_stats {
457 #define x(name) BCH_TIME_##name,
458         BCH_TIME_STATS()
459 #undef x
460         BCH_TIME_STAT_NR
461 };
462 
463 #include "alloc_types.h"
464 #include "btree_gc_types.h"
465 #include "btree_types.h"
466 #include "btree_node_scan_types.h"
467 #include "btree_write_buffer_types.h"
468 #include "buckets_types.h"
469 #include "buckets_waiting_for_journal_types.h"
470 #include "clock_types.h"
471 #include "disk_groups_types.h"
472 #include "ec_types.h"
473 #include "journal_types.h"
474 #include "keylist_types.h"
475 #include "quota_types.h"
476 #include "rebalance_types.h"
477 #include "replicas_types.h"
478 #include "sb-members_types.h"
479 #include "subvolume_types.h"
480 #include "super_types.h"
481 #include "thread_with_file_types.h"
482 
483 /* Number of nodes btree coalesce will try to coalesce at once */
484 #define GC_MERGE_NODES          4U
485 
486 /* Maximum number of nodes we might need to allocate atomically: */
487 #define BTREE_RESERVE_MAX       (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
488 
489 /* Size of the freelist we allocate btree nodes from: */
490 #define BTREE_NODE_RESERVE      (BTREE_RESERVE_MAX * 4)
491 
492 #define BTREE_NODE_OPEN_BUCKET_RESERVE  (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
493 
494 struct btree;
495 
496 struct io_count {
497         u64                     sectors[2][BCH_DATA_NR];
498 };
499 
500 struct discard_in_flight {
501         bool                    in_progress:1;
502         u64                     bucket:63;
503 };
504 
505 struct bch_dev {
506         struct kobject          kobj;
507 #ifdef CONFIG_BCACHEFS_DEBUG
508         atomic_long_t           ref;
509         bool                    dying;
510         unsigned long           last_put;
511 #else
512         struct percpu_ref       ref;
513 #endif
514         struct completion       ref_completion;
515         struct percpu_ref       io_ref;
516         struct completion       io_ref_completion;
517 
518         struct bch_fs           *fs;
519 
520         u8                      dev_idx;
521         /*
522          * Cached version of this device's member info from superblock
523          * Committed by bch2_write_super() -> bch_fs_mi_update()
524          */
525         struct bch_member_cpu   mi;
526         atomic64_t              errors[BCH_MEMBER_ERROR_NR];
527 
528         __uuid_t                uuid;
529         char                    name[BDEVNAME_SIZE];
530 
531         struct bch_sb_handle    disk_sb;
532         struct bch_sb           *sb_read_scratch;
533         int                     sb_write_error;
534         dev_t                   dev;
535         atomic_t                flush_seq;
536 
537         struct bch_devs_mask    self;
538 
539         /*
540          * Buckets:
541          * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
542          * gc_gens_lock, for device resize - holding any is sufficient for
543          * access: Or rcu_read_lock(), but only for dev_ptr_stale():
544          */
545         struct bucket_array __rcu *buckets_gc;
546         struct bucket_gens __rcu *bucket_gens;
547         u8                      *oldest_gen;
548         unsigned long           *buckets_nouse;
549         struct rw_semaphore     bucket_lock;
550 
551         struct bch_dev_usage __percpu   *usage;
552 
553         /* Allocator: */
554         u64                     new_fs_bucket_idx;
555         u64                     alloc_cursor[3];
556 
557         unsigned                nr_open_buckets;
558         unsigned                nr_btree_reserve;
559 
560         size_t                  inc_gen_needs_gc;
561         size_t                  inc_gen_really_needs_gc;
562         size_t                  buckets_waiting_on_journal;
563 
564         struct work_struct      invalidate_work;
565         struct work_struct      discard_work;
566         struct mutex            discard_buckets_in_flight_lock;
567         DARRAY(struct discard_in_flight)        discard_buckets_in_flight;
568         struct work_struct      discard_fast_work;
569 
570         atomic64_t              rebalance_work;
571 
572         struct journal_device   journal;
573         u64                     prev_journal_sector;
574 
575         struct work_struct      io_error_work;
576 
577         /* The rest of this all shows up in sysfs */
578         atomic64_t              cur_latency[2];
579         struct bch2_time_stats_quantiles io_latency[2];
580 
581 #define CONGESTED_MAX           1024
582         atomic_t                congested;
583         u64                     congested_last;
584 
585         struct io_count __percpu *io_done;
586 };
587 
588 /*
589  * initial_gc_unfixed
590  * error
591  * topology error
592  */
593 
594 #define BCH_FS_FLAGS()                  \
595         x(new_fs)                       \
596         x(started)                      \
597         x(btree_running)                \
598         x(accounting_replay_done)       \
599         x(may_go_rw)                    \
600         x(rw)                           \
601         x(was_rw)                       \
602         x(stopping)                     \
603         x(emergency_ro)                 \
604         x(going_ro)                     \
605         x(write_disable_complete)       \
606         x(clean_shutdown)               \
607         x(fsck_running)                 \
608         x(initial_gc_unfixed)           \
609         x(need_delete_dead_snapshots)   \
610         x(error)                        \
611         x(topology_error)               \
612         x(errors_fixed)                 \
613         x(errors_not_fixed)             \
614         x(no_invalid_checks)
615 
616 enum bch_fs_flags {
617 #define x(n)            BCH_FS_##n,
618         BCH_FS_FLAGS()
619 #undef x
620 };
621 
622 struct btree_debug {
623         unsigned                id;
624 };
625 
626 #define BCH_TRANSACTIONS_NR 128
627 
628 struct btree_transaction_stats {
629         struct bch2_time_stats  duration;
630         struct bch2_time_stats  lock_hold_times;
631         struct mutex            lock;
632         unsigned                nr_max_paths;
633         unsigned                journal_entries_size;
634         unsigned                max_mem;
635         char                    *max_paths_text;
636 };
637 
638 struct bch_fs_pcpu {
639         u64                     sectors_available;
640 };
641 
642 struct journal_seq_blacklist_table {
643         size_t                  nr;
644         struct journal_seq_blacklist_table_entry {
645                 u64             start;
646                 u64             end;
647                 bool            dirty;
648         }                       entries[];
649 };
650 
651 struct journal_keys {
652         /* must match layout in darray_types.h */
653         size_t                  nr, size;
654         struct journal_key {
655                 u64             journal_seq;
656                 u32             journal_offset;
657                 enum btree_id   btree_id:8;
658                 unsigned        level:8;
659                 bool            allocated;
660                 bool            overwritten;
661                 struct bkey_i   *k;
662         }                       *data;
663         /*
664          * Gap buffer: instead of all the empty space in the array being at the
665          * end of the buffer - from @nr to @size - the empty space is at @gap.
666          * This means that sequential insertions are O(n) instead of O(n^2).
667          */
668         size_t                  gap;
669         atomic_t                ref;
670         bool                    initial_ref_held;
671 };
672 
673 struct btree_trans_buf {
674         struct btree_trans      *trans;
675 };
676 
677 #define BCACHEFS_ROOT_SUBVOL_INUM                                       \
678         ((subvol_inum) { BCACHEFS_ROOT_SUBVOL,  BCACHEFS_ROOT_INO })
679 
680 #define BCH_WRITE_REFS()                                                \
681         x(trans)                                                        \
682         x(write)                                                        \
683         x(promote)                                                      \
684         x(node_rewrite)                                                 \
685         x(stripe_create)                                                \
686         x(stripe_delete)                                                \
687         x(reflink)                                                      \
688         x(fallocate)                                                    \
689         x(fsync)                                                        \
690         x(dio_write)                                                    \
691         x(discard)                                                      \
692         x(discard_fast)                                                 \
693         x(invalidate)                                                   \
694         x(delete_dead_snapshots)                                        \
695         x(gc_gens)                                                      \
696         x(snapshot_delete_pagecache)                                    \
697         x(sysfs)                                                        \
698         x(btree_write_buffer)
699 
700 enum bch_write_ref {
701 #define x(n) BCH_WRITE_REF_##n,
702         BCH_WRITE_REFS()
703 #undef x
704         BCH_WRITE_REF_NR,
705 };
706 
707 struct bch_fs {
708         struct closure          cl;
709 
710         struct list_head        list;
711         struct kobject          kobj;
712         struct kobject          counters_kobj;
713         struct kobject          internal;
714         struct kobject          opts_dir;
715         struct kobject          time_stats;
716         unsigned long           flags;
717 
718         int                     minor;
719         struct device           *chardev;
720         struct super_block      *vfs_sb;
721         dev_t                   dev;
722         char                    name[40];
723         struct stdio_redirect   *stdio;
724         struct task_struct      *stdio_filter;
725 
726         /* ro/rw, add/remove/resize devices: */
727         struct rw_semaphore     state_lock;
728 
729         /* Counts outstanding writes, for clean transition to read-only */
730 #ifdef BCH_WRITE_REF_DEBUG
731         atomic_long_t           writes[BCH_WRITE_REF_NR];
732 #else
733         struct percpu_ref       writes;
734 #endif
735         /*
736          * Analagous to c->writes, for asynchronous ops that don't necessarily
737          * need fs to be read-write
738          */
739         refcount_t              ro_ref;
740         wait_queue_head_t       ro_ref_wait;
741 
742         struct work_struct      read_only_work;
743 
744         struct bch_dev __rcu    *devs[BCH_SB_MEMBERS_MAX];
745 
746         struct bch_accounting_mem accounting;
747 
748         struct bch_replicas_cpu replicas;
749         struct bch_replicas_cpu replicas_gc;
750         struct mutex            replicas_gc_lock;
751 
752         struct journal_entry_res btree_root_journal_res;
753         struct journal_entry_res clock_journal_res;
754 
755         struct bch_disk_groups_cpu __rcu *disk_groups;
756 
757         struct bch_opts         opts;
758 
759         /* Updated by bch2_sb_update():*/
760         struct {
761                 __uuid_t        uuid;
762                 __uuid_t        user_uuid;
763 
764                 u16             version;
765                 u16             version_min;
766                 u16             version_upgrade_complete;
767 
768                 u8              nr_devices;
769                 u8              clean;
770 
771                 u8              encryption_type;
772 
773                 u64             time_base_lo;
774                 u32             time_base_hi;
775                 unsigned        time_units_per_sec;
776                 unsigned        nsec_per_time_unit;
777                 u64             features;
778                 u64             compat;
779                 unsigned long   errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)];
780                 u64             btrees_lost_data;
781         }                       sb;
782 
783 
784         struct bch_sb_handle    disk_sb;
785 
786         unsigned short          block_bits;     /* ilog2(block_size) */
787 
788         u16                     btree_foreground_merge_threshold;
789 
790         struct closure          sb_write;
791         struct mutex            sb_lock;
792 
793         /* snapshot.c: */
794         struct snapshot_table __rcu *snapshots;
795         struct mutex            snapshot_table_lock;
796         struct rw_semaphore     snapshot_create_lock;
797 
798         struct work_struct      snapshot_delete_work;
799         struct work_struct      snapshot_wait_for_pagecache_and_delete_work;
800         snapshot_id_list        snapshots_unlinked;
801         struct mutex            snapshots_unlinked_lock;
802 
803         /* BTREE CACHE */
804         struct bio_set          btree_bio;
805         struct workqueue_struct *btree_read_complete_wq;
806         struct workqueue_struct *btree_write_submit_wq;
807 
808         struct btree_root       btree_roots_known[BTREE_ID_NR];
809         DARRAY(struct btree_root) btree_roots_extra;
810         struct mutex            btree_root_lock;
811 
812         struct btree_cache      btree_cache;
813 
814         /*
815          * Cache of allocated btree nodes - if we allocate a btree node and
816          * don't use it, if we free it that space can't be reused until going
817          * _all_ the way through the allocator (which exposes us to a livelock
818          * when allocating btree reserves fail halfway through) - instead, we
819          * can stick them here:
820          */
821         struct btree_alloc      btree_reserve_cache[BTREE_NODE_RESERVE * 2];
822         unsigned                btree_reserve_cache_nr;
823         struct mutex            btree_reserve_cache_lock;
824 
825         mempool_t               btree_interior_update_pool;
826         struct list_head        btree_interior_update_list;
827         struct list_head        btree_interior_updates_unwritten;
828         struct mutex            btree_interior_update_lock;
829         struct closure_waitlist btree_interior_update_wait;
830 
831         struct workqueue_struct *btree_interior_update_worker;
832         struct work_struct      btree_interior_update_work;
833 
834         struct workqueue_struct *btree_node_rewrite_worker;
835 
836         struct list_head        pending_node_rewrites;
837         struct mutex            pending_node_rewrites_lock;
838 
839         /* btree_io.c: */
840         spinlock_t              btree_write_error_lock;
841         struct btree_write_stats {
842                 atomic64_t      nr;
843                 atomic64_t      bytes;
844         }                       btree_write_stats[BTREE_WRITE_TYPE_NR];
845 
846         /* btree_iter.c: */
847         struct seqmutex         btree_trans_lock;
848         struct list_head        btree_trans_list;
849         mempool_t               btree_trans_pool;
850         mempool_t               btree_trans_mem_pool;
851         struct btree_trans_buf  __percpu        *btree_trans_bufs;
852 
853         struct srcu_struct      btree_trans_barrier;
854         bool                    btree_trans_barrier_initialized;
855 
856         struct btree_key_cache  btree_key_cache;
857         unsigned                btree_key_cache_btrees;
858 
859         struct btree_write_buffer btree_write_buffer;
860 
861         struct workqueue_struct *btree_update_wq;
862         struct workqueue_struct *btree_io_complete_wq;
863         /* copygc needs its own workqueue for index updates.. */
864         struct workqueue_struct *copygc_wq;
865         /*
866          * Use a dedicated wq for write ref holder tasks. Required to avoid
867          * dependency problems with other wq tasks that can block on ref
868          * draining, such as read-only transition.
869          */
870         struct workqueue_struct *write_ref_wq;
871 
872         /* ALLOCATION */
873         struct bch_devs_mask    rw_devs[BCH_DATA_NR];
874 
875         u64                     capacity; /* sectors */
876         u64                     reserved; /* sectors */
877 
878         /*
879          * When capacity _decreases_ (due to a disk being removed), we
880          * increment capacity_gen - this invalidates outstanding reservations
881          * and forces them to be revalidated
882          */
883         u32                     capacity_gen;
884         unsigned                bucket_size_max;
885 
886         atomic64_t              sectors_available;
887         struct mutex            sectors_available_lock;
888 
889         struct bch_fs_pcpu __percpu     *pcpu;
890 
891         struct percpu_rw_semaphore      mark_lock;
892 
893         seqcount_t                      usage_lock;
894         struct bch_fs_usage_base __percpu *usage;
895         u64 __percpu            *online_reserved;
896 
897         unsigned long           allocator_last_stuck;
898 
899         struct io_clock         io_clock[2];
900 
901         /* JOURNAL SEQ BLACKLIST */
902         struct journal_seq_blacklist_table *
903                                 journal_seq_blacklist_table;
904 
905         /* ALLOCATOR */
906         spinlock_t              freelist_lock;
907         struct closure_waitlist freelist_wait;
908 
909         open_bucket_idx_t       open_buckets_freelist;
910         open_bucket_idx_t       open_buckets_nr_free;
911         struct closure_waitlist open_buckets_wait;
912         struct open_bucket      open_buckets[OPEN_BUCKETS_COUNT];
913         open_bucket_idx_t       open_buckets_hash[OPEN_BUCKETS_COUNT];
914 
915         open_bucket_idx_t       open_buckets_partial[OPEN_BUCKETS_COUNT];
916         open_bucket_idx_t       open_buckets_partial_nr;
917 
918         struct write_point      btree_write_point;
919         struct write_point      rebalance_write_point;
920 
921         struct write_point      write_points[WRITE_POINT_MAX];
922         struct hlist_head       write_points_hash[WRITE_POINT_HASH_NR];
923         struct mutex            write_points_hash_lock;
924         unsigned                write_points_nr;
925 
926         struct buckets_waiting_for_journal buckets_waiting_for_journal;
927 
928         /* GARBAGE COLLECTION */
929         struct work_struct      gc_gens_work;
930         unsigned long           gc_count;
931 
932         enum btree_id           gc_gens_btree;
933         struct bpos             gc_gens_pos;
934 
935         /*
936          * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
937          * has been marked by GC.
938          *
939          * gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
940          *
941          * Protected by gc_pos_lock. Only written to by GC thread, so GC thread
942          * can read without a lock.
943          */
944         seqcount_t              gc_pos_lock;
945         struct gc_pos           gc_pos;
946 
947         /*
948          * The allocation code needs gc_mark in struct bucket to be correct, but
949          * it's not while a gc is in progress.
950          */
951         struct rw_semaphore     gc_lock;
952         struct mutex            gc_gens_lock;
953 
954         /* IO PATH */
955         struct semaphore        io_in_flight;
956         struct bio_set          bio_read;
957         struct bio_set          bio_read_split;
958         struct bio_set          bio_write;
959         struct bio_set          replica_set;
960         struct mutex            bio_bounce_pages_lock;
961         mempool_t               bio_bounce_pages;
962         struct bucket_nocow_lock_table
963                                 nocow_locks;
964         struct rhashtable       promote_table;
965 
966         mempool_t               compression_bounce[2];
967         mempool_t               compress_workspace[BCH_COMPRESSION_TYPE_NR];
968         mempool_t               decompress_workspace;
969         size_t                  zstd_workspace_size;
970 
971         struct crypto_shash     *sha256;
972         struct crypto_sync_skcipher *chacha20;
973         struct crypto_shash     *poly1305;
974 
975         atomic64_t              key_version;
976 
977         mempool_t               large_bkey_pool;
978 
979         /* MOVE.C */
980         struct list_head        moving_context_list;
981         struct mutex            moving_context_lock;
982 
983         /* REBALANCE */
984         struct bch_fs_rebalance rebalance;
985 
986         /* COPYGC */
987         struct task_struct      *copygc_thread;
988         struct write_point      copygc_write_point;
989         s64                     copygc_wait_at;
990         s64                     copygc_wait;
991         bool                    copygc_running;
992         wait_queue_head_t       copygc_running_wq;
993 
994         /* STRIPES: */
995         GENRADIX(struct stripe) stripes;
996         GENRADIX(struct gc_stripe) gc_stripes;
997 
998         struct hlist_head       ec_stripes_new[32];
999         spinlock_t              ec_stripes_new_lock;
1000 
1001         ec_stripes_heap         ec_stripes_heap;
1002         struct mutex            ec_stripes_heap_lock;
1003 
1004         /* ERASURE CODING */
1005         struct list_head        ec_stripe_head_list;
1006         struct mutex            ec_stripe_head_lock;
1007 
1008         struct list_head        ec_stripe_new_list;
1009         struct mutex            ec_stripe_new_lock;
1010         wait_queue_head_t       ec_stripe_new_wait;
1011 
1012         struct work_struct      ec_stripe_create_work;
1013         u64                     ec_stripe_hint;
1014 
1015         struct work_struct      ec_stripe_delete_work;
1016 
1017         struct bio_set          ec_bioset;
1018 
1019         /* REFLINK */
1020         reflink_gc_table        reflink_gc_table;
1021         size_t                  reflink_gc_nr;
1022 
1023         /* fs.c */
1024         struct list_head        vfs_inodes_list;
1025         struct mutex            vfs_inodes_lock;
1026 
1027         /* VFS IO PATH - fs-io.c */
1028         struct bio_set          writepage_bioset;
1029         struct bio_set          dio_write_bioset;
1030         struct bio_set          dio_read_bioset;
1031         struct bio_set          nocow_flush_bioset;
1032 
1033         /* QUOTAS */
1034         struct bch_memquota_type quotas[QTYP_NR];
1035 
1036         /* RECOVERY */
1037         u64                     journal_replay_seq_start;
1038         u64                     journal_replay_seq_end;
1039         /*
1040          * Two different uses:
1041          * "Has this fsck pass?" - i.e. should this type of error be an
1042          * emergency read-only
1043          * And, in certain situations fsck will rewind to an earlier pass: used
1044          * for signaling to the toplevel code which pass we want to run now.
1045          */
1046         enum bch_recovery_pass  curr_recovery_pass;
1047         /* bitmap of explicitly enabled recovery passes: */
1048         u64                     recovery_passes_explicit;
1049         /* bitmask of recovery passes that we actually ran */
1050         u64                     recovery_passes_complete;
1051         /* never rewinds version of curr_recovery_pass */
1052         enum bch_recovery_pass  recovery_pass_done;
1053         struct semaphore        online_fsck_mutex;
1054 
1055         /* DEBUG JUNK */
1056         struct dentry           *fs_debug_dir;
1057         struct dentry           *btree_debug_dir;
1058         struct btree_debug      btree_debug[BTREE_ID_NR];
1059         struct btree            *verify_data;
1060         struct btree_node       *verify_ondisk;
1061         struct mutex            verify_lock;
1062 
1063         u64                     *unused_inode_hints;
1064         unsigned                inode_shard_bits;
1065 
1066         /*
1067          * A btree node on disk could have too many bsets for an iterator to fit
1068          * on the stack - have to dynamically allocate them
1069          */
1070         mempool_t               fill_iter;
1071 
1072         mempool_t               btree_bounce_pool;
1073 
1074         struct journal          journal;
1075         GENRADIX(struct journal_replay *) journal_entries;
1076         u64                     journal_entries_base_seq;
1077         struct journal_keys     journal_keys;
1078         struct list_head        journal_iters;
1079 
1080         struct find_btree_nodes found_btree_nodes;
1081 
1082         u64                     last_bucket_seq_cleanup;
1083 
1084         u64                     counters_on_mount[BCH_COUNTER_NR];
1085         u64 __percpu            *counters;
1086 
1087         unsigned                copy_gc_enabled:1;
1088         bool                    promote_whole_extents;
1089 
1090         struct bch2_time_stats  times[BCH_TIME_STAT_NR];
1091 
1092         struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
1093 
1094         /* ERRORS */
1095         struct list_head        fsck_error_msgs;
1096         struct mutex            fsck_error_msgs_lock;
1097         bool                    fsck_alloc_msgs_err;
1098 
1099         bch_sb_errors_cpu       fsck_error_counts;
1100         struct mutex            fsck_error_counts_lock;
1101 };
1102 
1103 extern struct wait_queue_head bch2_read_only_wait;
1104 
1105 static inline void bch2_write_ref_get(struct bch_fs *c, enum bch_write_ref ref)
1106 {
1107 #ifdef BCH_WRITE_REF_DEBUG
1108         atomic_long_inc(&c->writes[ref]);
1109 #else
1110         percpu_ref_get(&c->writes);
1111 #endif
1112 }
1113 
1114 static inline bool __bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
1115 {
1116 #ifdef BCH_WRITE_REF_DEBUG
1117         return !test_bit(BCH_FS_going_ro, &c->flags) &&
1118                 atomic_long_inc_not_zero(&c->writes[ref]);
1119 #else
1120         return percpu_ref_tryget(&c->writes);
1121 #endif
1122 }
1123 
1124 static inline bool bch2_write_ref_tryget(struct bch_fs *c, enum bch_write_ref ref)
1125 {
1126 #ifdef BCH_WRITE_REF_DEBUG
1127         return !test_bit(BCH_FS_going_ro, &c->flags) &&
1128                 atomic_long_inc_not_zero(&c->writes[ref]);
1129 #else
1130         return percpu_ref_tryget_live(&c->writes);
1131 #endif
1132 }
1133 
1134 static inline void bch2_write_ref_put(struct bch_fs *c, enum bch_write_ref ref)
1135 {
1136 #ifdef BCH_WRITE_REF_DEBUG
1137         long v = atomic_long_dec_return(&c->writes[ref]);
1138 
1139         BUG_ON(v < 0);
1140         if (v)
1141                 return;
1142         for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
1143                 if (atomic_long_read(&c->writes[i]))
1144                         return;
1145 
1146         set_bit(BCH_FS_write_disable_complete, &c->flags);
1147         wake_up(&bch2_read_only_wait);
1148 #else
1149         percpu_ref_put(&c->writes);
1150 #endif
1151 }
1152 
1153 static inline bool bch2_ro_ref_tryget(struct bch_fs *c)
1154 {
1155         if (test_bit(BCH_FS_stopping, &c->flags))
1156                 return false;
1157 
1158         return refcount_inc_not_zero(&c->ro_ref);
1159 }
1160 
1161 static inline void bch2_ro_ref_put(struct bch_fs *c)
1162 {
1163         if (refcount_dec_and_test(&c->ro_ref))
1164                 wake_up(&c->ro_ref_wait);
1165 }
1166 
1167 static inline void bch2_set_ra_pages(struct bch_fs *c, unsigned ra_pages)
1168 {
1169 #ifndef NO_BCACHEFS_FS
1170         if (c->vfs_sb)
1171                 c->vfs_sb->s_bdi->ra_pages = ra_pages;
1172 #endif
1173 }
1174 
1175 static inline unsigned bucket_bytes(const struct bch_dev *ca)
1176 {
1177         return ca->mi.bucket_size << 9;
1178 }
1179 
1180 static inline unsigned block_bytes(const struct bch_fs *c)
1181 {
1182         return c->opts.block_size;
1183 }
1184 
1185 static inline unsigned block_sectors(const struct bch_fs *c)
1186 {
1187         return c->opts.block_size >> 9;
1188 }
1189 
1190 static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
1191 {
1192         return c->btree_key_cache_btrees & (1U << btree);
1193 }
1194 
1195 static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
1196 {
1197         struct timespec64 t;
1198         s32 rem;
1199 
1200         time += c->sb.time_base_lo;
1201 
1202         t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
1203         t.tv_nsec = rem * c->sb.nsec_per_time_unit;
1204         return t;
1205 }
1206 
1207 static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
1208 {
1209         return (ts.tv_sec * c->sb.time_units_per_sec +
1210                 (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
1211 }
1212 
1213 static inline s64 bch2_current_time(const struct bch_fs *c)
1214 {
1215         struct timespec64 now;
1216 
1217         ktime_get_coarse_real_ts64(&now);
1218         return timespec_to_bch2_time(c, now);
1219 }
1220 
1221 static inline u64 bch2_current_io_time(const struct bch_fs *c, int rw)
1222 {
1223         return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX);
1224 }
1225 
1226 static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
1227 {
1228         struct stdio_redirect *stdio = c->stdio;
1229 
1230         if (c->stdio_filter && c->stdio_filter != current)
1231                 stdio = NULL;
1232         return stdio;
1233 }
1234 
1235 static inline unsigned metadata_replicas_required(struct bch_fs *c)
1236 {
1237         return min(c->opts.metadata_replicas,
1238                    c->opts.metadata_replicas_required);
1239 }
1240 
1241 static inline unsigned data_replicas_required(struct bch_fs *c)
1242 {
1243         return min(c->opts.data_replicas,
1244                    c->opts.data_replicas_required);
1245 }
1246 
1247 #define BKEY_PADDED_ONSTACK(key, pad)                           \
1248         struct { struct bkey_i key; __u64 key ## _pad[pad]; }
1249 
1250 #endif /* _BCACHEFS_H */
1251 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php