~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/bcachefs_format.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _BCACHEFS_FORMAT_H
  3 #define _BCACHEFS_FORMAT_H
  4 
  5 /*
  6  * bcachefs on disk data structures
  7  *
  8  * OVERVIEW:
  9  *
 10  * There are three main types of on disk data structures in bcachefs (this is
 11  * reduced from 5 in bcache)
 12  *
 13  *  - superblock
 14  *  - journal
 15  *  - btree
 16  *
 17  * The btree is the primary structure; most metadata exists as keys in the
 18  * various btrees. There are only a small number of btrees, they're not
 19  * sharded - we have one btree for extents, another for inodes, et cetera.
 20  *
 21  * SUPERBLOCK:
 22  *
 23  * The superblock contains the location of the journal, the list of devices in
 24  * the filesystem, and in general any metadata we need in order to decide
 25  * whether we can start a filesystem or prior to reading the journal/btree
 26  * roots.
 27  *
 28  * The superblock is extensible, and most of the contents of the superblock are
 29  * in variable length, type tagged fields; see struct bch_sb_field.
 30  *
 31  * Backup superblocks do not reside in a fixed location; also, superblocks do
 32  * not have a fixed size. To locate backup superblocks we have struct
 33  * bch_sb_layout; we store a copy of this inside every superblock, and also
 34  * before the first superblock.
 35  *
 36  * JOURNAL:
 37  *
 38  * The journal primarily records btree updates in the order they occurred;
 39  * journal replay consists of just iterating over all the keys in the open
 40  * journal entries and re-inserting them into the btrees.
 41  *
 42  * The journal also contains entry types for the btree roots, and blacklisted
 43  * journal sequence numbers (see journal_seq_blacklist.c).
 44  *
 45  * BTREE:
 46  *
 47  * bcachefs btrees are copy on write b+ trees, where nodes are big (typically
 48  * 128k-256k) and log structured. We use struct btree_node for writing the first
 49  * entry in a given node (offset 0), and struct btree_node_entry for all
 50  * subsequent writes.
 51  *
 52  * After the header, btree node entries contain a list of keys in sorted order.
 53  * Values are stored inline with the keys; since values are variable length (and
 54  * keys effectively are variable length too, due to packing) we can't do random
 55  * access without building up additional in memory tables in the btree node read
 56  * path.
 57  *
 58  * BTREE KEYS (struct bkey):
 59  *
 60  * The various btrees share a common format for the key - so as to avoid
 61  * switching in fastpath lookup/comparison code - but define their own
 62  * structures for the key values.
 63  *
 64  * The size of a key/value pair is stored as a u8 in units of u64s, so the max
 65  * size is just under 2k. The common part also contains a type tag for the
 66  * value, and a format field indicating whether the key is packed or not (and
 67  * also meant to allow adding new key fields in the future, if desired).
 68  *
 69  * bkeys, when stored within a btree node, may also be packed. In that case, the
 70  * bkey_format in that node is used to unpack it. Packed bkeys mean that we can
 71  * be generous with field sizes in the common part of the key format (64 bit
 72  * inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
 73  */
 74 
 75 #include <asm/types.h>
 76 #include <asm/byteorder.h>
 77 #include <linux/kernel.h>
 78 #include <linux/uuid.h>
 79 #include <uapi/linux/magic.h>
 80 #include "vstructs.h"
 81 
 82 #ifdef __KERNEL__
 83 typedef uuid_t __uuid_t;
 84 #endif
 85 
 86 #define BITMASK(name, type, field, offset, end)                         \
 87 static const __maybe_unused unsigned    name##_OFFSET = offset;         \
 88 static const __maybe_unused unsigned    name##_BITS = (end - offset);   \
 89                                                                         \
 90 static inline __u64 name(const type *k)                                 \
 91 {                                                                       \
 92         return (k->field >> offset) & ~(~0ULL << (end - offset));       \
 93 }                                                                       \
 94                                                                         \
 95 static inline void SET_##name(type *k, __u64 v)                         \
 96 {                                                                       \
 97         k->field &= ~(~(~0ULL << (end - offset)) << offset);            \
 98         k->field |= (v & ~(~0ULL << (end - offset))) << offset;         \
 99 }
100 
101 #define LE_BITMASK(_bits, name, type, field, offset, end)               \
102 static const __maybe_unused unsigned    name##_OFFSET = offset;         \
103 static const __maybe_unused unsigned    name##_BITS = (end - offset);   \
104 static const __maybe_unused __u##_bits  name##_MAX = (1ULL << (end - offset)) - 1;\
105                                                                         \
106 static inline __u64 name(const type *k)                                 \
107 {                                                                       \
108         return (__le##_bits##_to_cpu(k->field) >> offset) &             \
109                 ~(~0ULL << (end - offset));                             \
110 }                                                                       \
111                                                                         \
112 static inline void SET_##name(type *k, __u64 v)                         \
113 {                                                                       \
114         __u##_bits new = __le##_bits##_to_cpu(k->field);                \
115                                                                         \
116         new &= ~(~(~0ULL << (end - offset)) << offset);                 \
117         new |= (v & ~(~0ULL << (end - offset))) << offset;              \
118         k->field = __cpu_to_le##_bits(new);                             \
119 }
120 
121 #define LE16_BITMASK(n, t, f, o, e)     LE_BITMASK(16, n, t, f, o, e)
122 #define LE32_BITMASK(n, t, f, o, e)     LE_BITMASK(32, n, t, f, o, e)
123 #define LE64_BITMASK(n, t, f, o, e)     LE_BITMASK(64, n, t, f, o, e)
124 
125 struct bkey_format {
126         __u8            key_u64s;
127         __u8            nr_fields;
128         /* One unused slot for now: */
129         __u8            bits_per_field[6];
130         __le64          field_offset[6];
131 };
132 
133 /* Btree keys - all units are in sectors */
134 
135 struct bpos {
136         /*
137          * Word order matches machine byte order - btree code treats a bpos as a
138          * single large integer, for search/comparison purposes
139          *
140          * Note that wherever a bpos is embedded in another on disk data
141          * structure, it has to be byte swabbed when reading in metadata that
142          * wasn't written in native endian order:
143          */
144 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
145         __u32           snapshot;
146         __u64           offset;
147         __u64           inode;
148 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
149         __u64           inode;
150         __u64           offset;         /* Points to end of extent - sectors */
151         __u32           snapshot;
152 #else
153 #error edit for your odd byteorder.
154 #endif
155 } __packed
156 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
157 __aligned(4)
158 #endif
159 ;
160 
161 #define KEY_INODE_MAX                   ((__u64)~0ULL)
162 #define KEY_OFFSET_MAX                  ((__u64)~0ULL)
163 #define KEY_SNAPSHOT_MAX                ((__u32)~0U)
164 #define KEY_SIZE_MAX                    ((__u32)~0U)
165 
166 static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
167 {
168         return (struct bpos) {
169                 .inode          = inode,
170                 .offset         = offset,
171                 .snapshot       = snapshot,
172         };
173 }
174 
175 #define POS_MIN                         SPOS(0, 0, 0)
176 #define POS_MAX                         SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
177 #define SPOS_MAX                        SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
178 #define POS(_inode, _offset)            SPOS(_inode, _offset, 0)
179 
180 /* Empty placeholder struct, for container_of() */
181 struct bch_val {
182         __u64           __nothing[0];
183 };
184 
185 struct bversion {
186 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
187         __u64           lo;
188         __u32           hi;
189 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
190         __u32           hi;
191         __u64           lo;
192 #endif
193 } __packed
194 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
195 __aligned(4)
196 #endif
197 ;
198 
199 struct bkey {
200         /* Size of combined key and value, in u64s */
201         __u8            u64s;
202 
203         /* Format of key (0 for format local to btree node) */
204 #if defined(__LITTLE_ENDIAN_BITFIELD)
205         __u8            format:7,
206                         needs_whiteout:1;
207 #elif defined (__BIG_ENDIAN_BITFIELD)
208         __u8            needs_whiteout:1,
209                         format:7;
210 #else
211 #error edit for your odd byteorder.
212 #endif
213 
214         /* Type of the value */
215         __u8            type;
216 
217 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
218         __u8            pad[1];
219 
220         struct bversion version;
221         __u32           size;           /* extent size, in sectors */
222         struct bpos     p;
223 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
224         struct bpos     p;
225         __u32           size;           /* extent size, in sectors */
226         struct bversion version;
227 
228         __u8            pad[1];
229 #endif
230 } __packed
231 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
232 /*
233  * The big-endian version of bkey can't be compiled by rustc with the "aligned"
234  * attr since it doesn't allow types to have both "packed" and "aligned" attrs.
235  * So for Rust compatibility, don't include this. It can be included in the LE
236  * version because the "packed" attr is redundant in that case.
237  *
238  * History: (quoting Kent)
239  *
240  * Specifically, when i was designing bkey, I wanted the header to be no
241  * bigger than necessary so that bkey_packed could use the rest. That means that
242  * decently offten extent keys will fit into only 8 bytes, instead of spilling over
243  * to 16.
244  *
245  * But packed_bkey treats the part after the header - the packed section -
246  * as a single multi word, variable length integer. And bkey, the unpacked
247  * version, is just a special case version of a bkey_packed; all the packed
248  * bkey code will work on keys in any packed format, the in-memory
249  * representation of an unpacked key also is just one type of packed key...
250  *
251  * So that constrains the key part of a bkig endian bkey to start right
252  * after the header.
253  *
254  * If we ever do a bkey_v2 and need to expand the hedaer by another byte for
255  * some reason - that will clean up this wart.
256  */
257 __aligned(8)
258 #endif
259 ;
260 
261 struct bkey_packed {
262         __u64           _data[0];
263 
264         /* Size of combined key and value, in u64s */
265         __u8            u64s;
266 
267         /* Format of key (0 for format local to btree node) */
268 
269         /*
270          * XXX: next incompat on disk format change, switch format and
271          * needs_whiteout - bkey_packed() will be cheaper if format is the high
272          * bits of the bitfield
273          */
274 #if defined(__LITTLE_ENDIAN_BITFIELD)
275         __u8            format:7,
276                         needs_whiteout:1;
277 #elif defined (__BIG_ENDIAN_BITFIELD)
278         __u8            needs_whiteout:1,
279                         format:7;
280 #endif
281 
282         /* Type of the value */
283         __u8            type;
284         __u8            key_start[0];
285 
286         /*
287          * We copy bkeys with struct assignment in various places, and while
288          * that shouldn't be done with packed bkeys we can't disallow it in C,
289          * and it's legal to cast a bkey to a bkey_packed  - so padding it out
290          * to the same size as struct bkey should hopefully be safest.
291          */
292         __u8            pad[sizeof(struct bkey) - 3];
293 } __packed __aligned(8);
294 
295 typedef struct {
296         __le64                  lo;
297         __le64                  hi;
298 } bch_le128;
299 
300 #define BKEY_U64s                       (sizeof(struct bkey) / sizeof(__u64))
301 #define BKEY_U64s_MAX                   U8_MAX
302 #define BKEY_VAL_U64s_MAX               (BKEY_U64s_MAX - BKEY_U64s)
303 
304 #define KEY_PACKED_BITS_START           24
305 
306 #define KEY_FORMAT_LOCAL_BTREE          0
307 #define KEY_FORMAT_CURRENT              1
308 
309 enum bch_bkey_fields {
310         BKEY_FIELD_INODE,
311         BKEY_FIELD_OFFSET,
312         BKEY_FIELD_SNAPSHOT,
313         BKEY_FIELD_SIZE,
314         BKEY_FIELD_VERSION_HI,
315         BKEY_FIELD_VERSION_LO,
316         BKEY_NR_FIELDS,
317 };
318 
319 #define bkey_format_field(name, field)                                  \
320         [BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
321 
322 #define BKEY_FORMAT_CURRENT                                             \
323 ((struct bkey_format) {                                                 \
324         .key_u64s       = BKEY_U64s,                                    \
325         .nr_fields      = BKEY_NR_FIELDS,                               \
326         .bits_per_field = {                                             \
327                 bkey_format_field(INODE,        p.inode),               \
328                 bkey_format_field(OFFSET,       p.offset),              \
329                 bkey_format_field(SNAPSHOT,     p.snapshot),            \
330                 bkey_format_field(SIZE,         size),                  \
331                 bkey_format_field(VERSION_HI,   version.hi),            \
332                 bkey_format_field(VERSION_LO,   version.lo),            \
333         },                                                              \
334 })
335 
336 /* bkey with inline value */
337 struct bkey_i {
338         __u64                   _data[0];
339 
340         struct bkey     k;
341         struct bch_val  v;
342 };
343 
344 #define POS_KEY(_pos)                                                   \
345 ((struct bkey) {                                                        \
346         .u64s           = BKEY_U64s,                                    \
347         .format         = KEY_FORMAT_CURRENT,                           \
348         .p              = _pos,                                         \
349 })
350 
351 #define KEY(_inode, _offset, _size)                                     \
352 ((struct bkey) {                                                        \
353         .u64s           = BKEY_U64s,                                    \
354         .format         = KEY_FORMAT_CURRENT,                           \
355         .p              = POS(_inode, _offset),                         \
356         .size           = _size,                                        \
357 })
358 
359 static inline void bkey_init(struct bkey *k)
360 {
361         *k = KEY(0, 0, 0);
362 }
363 
364 #define bkey_bytes(_k)          ((_k)->u64s * sizeof(__u64))
365 
366 #define __BKEY_PADDED(key, pad)                                 \
367         struct bkey_i key; __u64 key ## _pad[pad]
368 
369 /*
370  * - DELETED keys are used internally to mark keys that should be ignored but
371  *   override keys in composition order.  Their version number is ignored.
372  *
373  * - DISCARDED keys indicate that the data is all 0s because it has been
374  *   discarded. DISCARDs may have a version; if the version is nonzero the key
375  *   will be persistent, otherwise the key will be dropped whenever the btree
376  *   node is rewritten (like DELETED keys).
377  *
378  * - ERROR: any read of the data returns a read error, as the data was lost due
379  *   to a failing device. Like DISCARDED keys, they can be removed (overridden)
380  *   by new writes or cluster-wide GC. Node repair can also overwrite them with
381  *   the same or a more recent version number, but not with an older version
382  *   number.
383  *
384  * - WHITEOUT: for hash table btrees
385  */
386 #define BCH_BKEY_TYPES()                                \
387         x(deleted,              0)                      \
388         x(whiteout,             1)                      \
389         x(error,                2)                      \
390         x(cookie,               3)                      \
391         x(hash_whiteout,        4)                      \
392         x(btree_ptr,            5)                      \
393         x(extent,               6)                      \
394         x(reservation,          7)                      \
395         x(inode,                8)                      \
396         x(inode_generation,     9)                      \
397         x(dirent,               10)                     \
398         x(xattr,                11)                     \
399         x(alloc,                12)                     \
400         x(quota,                13)                     \
401         x(stripe,               14)                     \
402         x(reflink_p,            15)                     \
403         x(reflink_v,            16)                     \
404         x(inline_data,          17)                     \
405         x(btree_ptr_v2,         18)                     \
406         x(indirect_inline_data, 19)                     \
407         x(alloc_v2,             20)                     \
408         x(subvolume,            21)                     \
409         x(snapshot,             22)                     \
410         x(inode_v2,             23)                     \
411         x(alloc_v3,             24)                     \
412         x(set,                  25)                     \
413         x(lru,                  26)                     \
414         x(alloc_v4,             27)                     \
415         x(backpointer,          28)                     \
416         x(inode_v3,             29)                     \
417         x(bucket_gens,          30)                     \
418         x(snapshot_tree,        31)                     \
419         x(logged_op_truncate,   32)                     \
420         x(logged_op_finsert,    33)                     \
421         x(accounting,           34)
422 
423 enum bch_bkey_type {
424 #define x(name, nr) KEY_TYPE_##name     = nr,
425         BCH_BKEY_TYPES()
426 #undef x
427         KEY_TYPE_MAX,
428 };
429 
430 struct bch_deleted {
431         struct bch_val          v;
432 };
433 
434 struct bch_whiteout {
435         struct bch_val          v;
436 };
437 
438 struct bch_error {
439         struct bch_val          v;
440 };
441 
442 struct bch_cookie {
443         struct bch_val          v;
444         __le64                  cookie;
445 };
446 
447 struct bch_hash_whiteout {
448         struct bch_val          v;
449 };
450 
451 struct bch_set {
452         struct bch_val          v;
453 };
454 
455 /* 128 bits, sufficient for cryptographic MACs: */
456 struct bch_csum {
457         __le64                  lo;
458         __le64                  hi;
459 } __packed __aligned(8);
460 
461 struct bch_backpointer {
462         struct bch_val          v;
463         __u8                    btree_id;
464         __u8                    level;
465         __u8                    data_type;
466         __u64                   bucket_offset:40;
467         __u32                   bucket_len;
468         struct bpos             pos;
469 } __packed __aligned(8);
470 
471 /* Optional/variable size superblock sections: */
472 
473 struct bch_sb_field {
474         __u64                   _data[0];
475         __le32                  u64s;
476         __le32                  type;
477 };
478 
479 #define BCH_SB_FIELDS()                         \
480         x(journal,                      0)      \
481         x(members_v1,                   1)      \
482         x(crypt,                        2)      \
483         x(replicas_v0,                  3)      \
484         x(quota,                        4)      \
485         x(disk_groups,                  5)      \
486         x(clean,                        6)      \
487         x(replicas,                     7)      \
488         x(journal_seq_blacklist,        8)      \
489         x(journal_v2,                   9)      \
490         x(counters,                     10)     \
491         x(members_v2,                   11)     \
492         x(errors,                       12)     \
493         x(ext,                          13)     \
494         x(downgrade,                    14)
495 
496 #include "alloc_background_format.h"
497 #include "dirent_format.h"
498 #include "disk_accounting_format.h"
499 #include "disk_groups_format.h"
500 #include "extents_format.h"
501 #include "ec_format.h"
502 #include "dirent_format.h"
503 #include "disk_groups_format.h"
504 #include "inode_format.h"
505 #include "journal_seq_blacklist_format.h"
506 #include "logged_ops_format.h"
507 #include "lru_format.h"
508 #include "quota_format.h"
509 #include "reflink_format.h"
510 #include "replicas_format.h"
511 #include "snapshot_format.h"
512 #include "subvolume_format.h"
513 #include "sb-counters_format.h"
514 #include "sb-downgrade_format.h"
515 #include "sb-errors_format.h"
516 #include "sb-members_format.h"
517 #include "xattr_format.h"
518 
519 enum bch_sb_field_type {
520 #define x(f, nr)        BCH_SB_FIELD_##f = nr,
521         BCH_SB_FIELDS()
522 #undef x
523         BCH_SB_FIELD_NR
524 };
525 
526 /*
527  * Most superblock fields are replicated in all device's superblocks - a few are
528  * not:
529  */
530 #define BCH_SINGLE_DEVICE_SB_FIELDS             \
531         ((1U << BCH_SB_FIELD_journal)|          \
532          (1U << BCH_SB_FIELD_journal_v2))
533 
534 /* BCH_SB_FIELD_journal: */
535 
536 struct bch_sb_field_journal {
537         struct bch_sb_field     field;
538         __le64                  buckets[];
539 };
540 
541 struct bch_sb_field_journal_v2 {
542         struct bch_sb_field     field;
543 
544         struct bch_sb_field_journal_v2_entry {
545                 __le64          start;
546                 __le64          nr;
547         }                       d[];
548 };
549 
550 /* BCH_SB_FIELD_crypt: */
551 
552 struct nonce {
553         __le32                  d[4];
554 };
555 
556 struct bch_key {
557         __le64                  key[4];
558 };
559 
560 #define BCH_KEY_MAGIC                                   \
561         (((__u64) 'b' <<  0)|((__u64) 'c' <<  8)|               \
562          ((__u64) 'h' << 16)|((__u64) '*' << 24)|               \
563          ((__u64) '*' << 32)|((__u64) 'k' << 40)|               \
564          ((__u64) 'e' << 48)|((__u64) 'y' << 56))
565 
566 struct bch_encrypted_key {
567         __le64                  magic;
568         struct bch_key          key;
569 };
570 
571 /*
572  * If this field is present in the superblock, it stores an encryption key which
573  * is used encrypt all other data/metadata. The key will normally be encrypted
574  * with the key userspace provides, but if encryption has been turned off we'll
575  * just store the master key unencrypted in the superblock so we can access the
576  * previously encrypted data.
577  */
578 struct bch_sb_field_crypt {
579         struct bch_sb_field     field;
580 
581         __le64                  flags;
582         __le64                  kdf_flags;
583         struct bch_encrypted_key key;
584 };
585 
586 LE64_BITMASK(BCH_CRYPT_KDF_TYPE,        struct bch_sb_field_crypt, flags, 0, 4);
587 
588 enum bch_kdf_types {
589         BCH_KDF_SCRYPT          = 0,
590         BCH_KDF_NR              = 1,
591 };
592 
593 /* stored as base 2 log of scrypt params: */
594 LE64_BITMASK(BCH_KDF_SCRYPT_N,  struct bch_sb_field_crypt, kdf_flags,  0, 16);
595 LE64_BITMASK(BCH_KDF_SCRYPT_R,  struct bch_sb_field_crypt, kdf_flags, 16, 32);
596 LE64_BITMASK(BCH_KDF_SCRYPT_P,  struct bch_sb_field_crypt, kdf_flags, 32, 48);
597 
598 /*
599  * On clean shutdown, store btree roots and current journal sequence number in
600  * the superblock:
601  */
602 struct jset_entry {
603         __le16                  u64s;
604         __u8                    btree_id;
605         __u8                    level;
606         __u8                    type; /* designates what this jset holds */
607         __u8                    pad[3];
608 
609         struct bkey_i           start[0];
610         __u64                   _data[];
611 };
612 
613 struct bch_sb_field_clean {
614         struct bch_sb_field     field;
615 
616         __le32                  flags;
617         __le16                  _read_clock; /* no longer used */
618         __le16                  _write_clock;
619         __le64                  journal_seq;
620 
621         struct jset_entry       start[0];
622         __u64                   _data[];
623 };
624 
625 struct bch_sb_field_ext {
626         struct bch_sb_field     field;
627         __le64                  recovery_passes_required[2];
628         __le64                  errors_silent[8];
629         __le64                  btrees_lost_data;
630 };
631 
632 /* Superblock: */
633 
634 /*
635  * New versioning scheme:
636  * One common version number for all on disk data structures - superblock, btree
637  * nodes, journal entries
638  */
639 #define BCH_VERSION_MAJOR(_v)           ((__u16) ((_v) >> 10))
640 #define BCH_VERSION_MINOR(_v)           ((__u16) ((_v) & ~(~0U << 10)))
641 #define BCH_VERSION(_major, _minor)     (((_major) << 10)|(_minor) << 0)
642 
643 /*
644  * field 1:             version name
645  * field 2:             BCH_VERSION(major, minor)
646  * field 3:             recovery passess required on upgrade
647  */
648 #define BCH_METADATA_VERSIONS()                                         \
649         x(bkey_renumber,                BCH_VERSION(0, 10))             \
650         x(inode_btree_change,           BCH_VERSION(0, 11))             \
651         x(snapshot,                     BCH_VERSION(0, 12))             \
652         x(inode_backpointers,           BCH_VERSION(0, 13))             \
653         x(btree_ptr_sectors_written,    BCH_VERSION(0, 14))             \
654         x(snapshot_2,                   BCH_VERSION(0, 15))             \
655         x(reflink_p_fix,                BCH_VERSION(0, 16))             \
656         x(subvol_dirent,                BCH_VERSION(0, 17))             \
657         x(inode_v2,                     BCH_VERSION(0, 18))             \
658         x(freespace,                    BCH_VERSION(0, 19))             \
659         x(alloc_v4,                     BCH_VERSION(0, 20))             \
660         x(new_data_types,               BCH_VERSION(0, 21))             \
661         x(backpointers,                 BCH_VERSION(0, 22))             \
662         x(inode_v3,                     BCH_VERSION(0, 23))             \
663         x(unwritten_extents,            BCH_VERSION(0, 24))             \
664         x(bucket_gens,                  BCH_VERSION(0, 25))             \
665         x(lru_v2,                       BCH_VERSION(0, 26))             \
666         x(fragmentation_lru,            BCH_VERSION(0, 27))             \
667         x(no_bps_in_alloc_keys,         BCH_VERSION(0, 28))             \
668         x(snapshot_trees,               BCH_VERSION(0, 29))             \
669         x(major_minor,                  BCH_VERSION(1,  0))             \
670         x(snapshot_skiplists,           BCH_VERSION(1,  1))             \
671         x(deleted_inodes,               BCH_VERSION(1,  2))             \
672         x(rebalance_work,               BCH_VERSION(1,  3))             \
673         x(member_seq,                   BCH_VERSION(1,  4))             \
674         x(subvolume_fs_parent,          BCH_VERSION(1,  5))             \
675         x(btree_subvolume_children,     BCH_VERSION(1,  6))             \
676         x(mi_btree_bitmap,              BCH_VERSION(1,  7))             \
677         x(bucket_stripe_sectors,        BCH_VERSION(1,  8))             \
678         x(disk_accounting_v2,           BCH_VERSION(1,  9))             \
679         x(disk_accounting_v3,           BCH_VERSION(1, 10))             \
680         x(disk_accounting_inum,         BCH_VERSION(1, 11))             \
681         x(rebalance_work_acct_fix,      BCH_VERSION(1, 12))
682 
683 enum bcachefs_metadata_version {
684         bcachefs_metadata_version_min = 9,
685 #define x(t, n) bcachefs_metadata_version_##t = n,
686         BCH_METADATA_VERSIONS()
687 #undef x
688         bcachefs_metadata_version_max
689 };
690 
691 static const __maybe_unused
692 unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
693 
694 #define bcachefs_metadata_version_current       (bcachefs_metadata_version_max - 1)
695 
696 #define BCH_SB_SECTOR                   8
697 
698 #define BCH_SB_LAYOUT_SIZE_BITS_MAX     16 /* 32 MB */
699 
700 struct bch_sb_layout {
701         __uuid_t                magic;  /* bcachefs superblock UUID */
702         __u8                    layout_type;
703         __u8                    sb_max_size_bits; /* base 2 of 512 byte sectors */
704         __u8                    nr_superblocks;
705         __u8                    pad[5];
706         __le64                  sb_offset[61];
707 } __packed __aligned(8);
708 
709 #define BCH_SB_LAYOUT_SECTOR    7
710 
711 /*
712  * @offset      - sector where this sb was written
713  * @version     - on disk format version
714  * @version_min - Oldest metadata version this filesystem contains; so we can
715  *                safely drop compatibility code and refuse to mount filesystems
716  *                we'd need it for
717  * @magic       - identifies as a bcachefs superblock (BCHFS_MAGIC)
718  * @seq         - incremented each time superblock is written
719  * @uuid        - used for generating various magic numbers and identifying
720  *                member devices, never changes
721  * @user_uuid   - user visible UUID, may be changed
722  * @label       - filesystem label
723  * @seq         - identifies most recent superblock, incremented each time
724  *                superblock is written
725  * @features    - enabled incompatible features
726  */
727 struct bch_sb {
728         struct bch_csum         csum;
729         __le16                  version;
730         __le16                  version_min;
731         __le16                  pad[2];
732         __uuid_t                magic;
733         __uuid_t                uuid;
734         __uuid_t                user_uuid;
735         __u8                    label[BCH_SB_LABEL_SIZE];
736         __le64                  offset;
737         __le64                  seq;
738 
739         __le16                  block_size;
740         __u8                    dev_idx;
741         __u8                    nr_devices;
742         __le32                  u64s;
743 
744         __le64                  time_base_lo;
745         __le32                  time_base_hi;
746         __le32                  time_precision;
747 
748         __le64                  flags[7];
749         __le64                  write_time;
750         __le64                  features[2];
751         __le64                  compat[2];
752 
753         struct bch_sb_layout    layout;
754 
755         struct bch_sb_field     start[0];
756         __le64                  _data[];
757 } __packed __aligned(8);
758 
759 /*
760  * Flags:
761  * BCH_SB_INITALIZED    - set on first mount
762  * BCH_SB_CLEAN         - did we shut down cleanly? Just a hint, doesn't affect
763  *                        behaviour of mount/recovery path:
764  * BCH_SB_INODE_32BIT   - limit inode numbers to 32 bits
765  * BCH_SB_128_BIT_MACS  - 128 bit macs instead of 80
766  * BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
767  *                         DATA/META_CSUM_TYPE. Also indicates encryption
768  *                         algorithm in use, if/when we get more than one
769  */
770 
771 LE16_BITMASK(BCH_SB_BLOCK_SIZE,         struct bch_sb, block_size, 0, 16);
772 
773 LE64_BITMASK(BCH_SB_INITIALIZED,        struct bch_sb, flags[0],  0,  1);
774 LE64_BITMASK(BCH_SB_CLEAN,              struct bch_sb, flags[0],  1,  2);
775 LE64_BITMASK(BCH_SB_CSUM_TYPE,          struct bch_sb, flags[0],  2,  8);
776 LE64_BITMASK(BCH_SB_ERROR_ACTION,       struct bch_sb, flags[0],  8, 12);
777 
778 LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE,    struct bch_sb, flags[0], 12, 28);
779 
780 LE64_BITMASK(BCH_SB_GC_RESERVE,         struct bch_sb, flags[0], 28, 33);
781 LE64_BITMASK(BCH_SB_ROOT_RESERVE,       struct bch_sb, flags[0], 33, 40);
782 
783 LE64_BITMASK(BCH_SB_META_CSUM_TYPE,     struct bch_sb, flags[0], 40, 44);
784 LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE,     struct bch_sb, flags[0], 44, 48);
785 
786 LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
787 LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
788 
789 LE64_BITMASK(BCH_SB_POSIX_ACL,          struct bch_sb, flags[0], 56, 57);
790 LE64_BITMASK(BCH_SB_USRQUOTA,           struct bch_sb, flags[0], 57, 58);
791 LE64_BITMASK(BCH_SB_GRPQUOTA,           struct bch_sb, flags[0], 58, 59);
792 LE64_BITMASK(BCH_SB_PRJQUOTA,           struct bch_sb, flags[0], 59, 60);
793 
794 LE64_BITMASK(BCH_SB_HAS_ERRORS,         struct bch_sb, flags[0], 60, 61);
795 LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
796 
797 LE64_BITMASK(BCH_SB_BIG_ENDIAN,         struct bch_sb, flags[0], 62, 63);
798 
799 LE64_BITMASK(BCH_SB_STR_HASH_TYPE,      struct bch_sb, flags[1],  0,  4);
800 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1],  4,  8);
801 LE64_BITMASK(BCH_SB_INODE_32BIT,        struct bch_sb, flags[1],  8,  9);
802 
803 LE64_BITMASK(BCH_SB_128_BIT_MACS,       struct bch_sb, flags[1],  9, 10);
804 LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE,    struct bch_sb, flags[1], 10, 14);
805 
806 /*
807  * Max size of an extent that may require bouncing to read or write
808  * (checksummed, compressed): 64k
809  */
810 LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
811                                         struct bch_sb, flags[1], 14, 20);
812 
813 LE64_BITMASK(BCH_SB_META_REPLICAS_REQ,  struct bch_sb, flags[1], 20, 24);
814 LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ,  struct bch_sb, flags[1], 24, 28);
815 
816 LE64_BITMASK(BCH_SB_PROMOTE_TARGET,     struct bch_sb, flags[1], 28, 40);
817 LE64_BITMASK(BCH_SB_FOREGROUND_TARGET,  struct bch_sb, flags[1], 40, 52);
818 LE64_BITMASK(BCH_SB_BACKGROUND_TARGET,  struct bch_sb, flags[1], 52, 64);
819 
820 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
821                                         struct bch_sb, flags[2],  0,  4);
822 LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES,   struct bch_sb, flags[2],  4, 64);
823 
824 LE64_BITMASK(BCH_SB_ERASURE_CODE,       struct bch_sb, flags[3],  0, 16);
825 LE64_BITMASK(BCH_SB_METADATA_TARGET,    struct bch_sb, flags[3], 16, 28);
826 LE64_BITMASK(BCH_SB_SHARD_INUMS,        struct bch_sb, flags[3], 28, 29);
827 LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
828 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
829 LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
830 LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
831 LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
832 LE64_BITMASK(BCH_SB_NOCOW,              struct bch_sb, flags[4], 33, 34);
833 LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE,  struct bch_sb, flags[4], 34, 54);
834 LE64_BITMASK(BCH_SB_VERSION_UPGRADE,    struct bch_sb, flags[4], 54, 56);
835 
836 LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
837 LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
838                                         struct bch_sb, flags[4], 60, 64);
839 
840 LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
841                                         struct bch_sb, flags[5],  0, 16);
842 LE64_BITMASK(BCH_SB_ALLOCATOR_STUCK_TIMEOUT,
843                                         struct bch_sb, flags[5], 16, 32);
844 
845 static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
846 {
847         return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
848 }
849 
850 static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
851 {
852         SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
853         SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
854 }
855 
856 static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
857 {
858         return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
859                 (BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
860 }
861 
862 static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
863 {
864         SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
865         SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
866 }
867 
868 /*
869  * Features:
870  *
871  * journal_seq_blacklist_v3:    gates BCH_SB_FIELD_journal_seq_blacklist
872  * reflink:                     gates KEY_TYPE_reflink
873  * inline_data:                 gates KEY_TYPE_inline_data
874  * new_siphash:                 gates BCH_STR_HASH_siphash
875  * new_extent_overwrite:        gates BTREE_NODE_NEW_EXTENT_OVERWRITE
876  */
877 #define BCH_SB_FEATURES()                       \
878         x(lz4,                          0)      \
879         x(gzip,                         1)      \
880         x(zstd,                         2)      \
881         x(atomic_nlink,                 3)      \
882         x(ec,                           4)      \
883         x(journal_seq_blacklist_v3,     5)      \
884         x(reflink,                      6)      \
885         x(new_siphash,                  7)      \
886         x(inline_data,                  8)      \
887         x(new_extent_overwrite,         9)      \
888         x(incompressible,               10)     \
889         x(btree_ptr_v2,                 11)     \
890         x(extents_above_btree_updates,  12)     \
891         x(btree_updates_journalled,     13)     \
892         x(reflink_inline_data,          14)     \
893         x(new_varint,                   15)     \
894         x(journal_no_flush,             16)     \
895         x(alloc_v2,                     17)     \
896         x(extents_across_btree_nodes,   18)
897 
898 #define BCH_SB_FEATURES_ALWAYS                          \
899         ((1ULL << BCH_FEATURE_new_extent_overwrite)|    \
900          (1ULL << BCH_FEATURE_extents_above_btree_updates)|\
901          (1ULL << BCH_FEATURE_btree_updates_journalled)|\
902          (1ULL << BCH_FEATURE_alloc_v2)|\
903          (1ULL << BCH_FEATURE_extents_across_btree_nodes))
904 
905 #define BCH_SB_FEATURES_ALL                             \
906         (BCH_SB_FEATURES_ALWAYS|                        \
907          (1ULL << BCH_FEATURE_new_siphash)|             \
908          (1ULL << BCH_FEATURE_btree_ptr_v2)|            \
909          (1ULL << BCH_FEATURE_new_varint)|              \
910          (1ULL << BCH_FEATURE_journal_no_flush))
911 
912 enum bch_sb_feature {
913 #define x(f, n) BCH_FEATURE_##f,
914         BCH_SB_FEATURES()
915 #undef x
916         BCH_FEATURE_NR,
917 };
918 
919 #define BCH_SB_COMPAT()                                 \
920         x(alloc_info,                           0)      \
921         x(alloc_metadata,                       1)      \
922         x(extents_above_btree_updates_done,     2)      \
923         x(bformat_overflow_done,                3)
924 
925 enum bch_sb_compat {
926 #define x(f, n) BCH_COMPAT_##f,
927         BCH_SB_COMPAT()
928 #undef x
929         BCH_COMPAT_NR,
930 };
931 
932 /* options: */
933 
934 #define BCH_VERSION_UPGRADE_OPTS()      \
935         x(compatible,           0)      \
936         x(incompatible,         1)      \
937         x(none,                 2)
938 
939 enum bch_version_upgrade_opts {
940 #define x(t, n) BCH_VERSION_UPGRADE_##t = n,
941         BCH_VERSION_UPGRADE_OPTS()
942 #undef x
943 };
944 
945 #define BCH_REPLICAS_MAX                4U
946 
947 #define BCH_BKEY_PTRS_MAX               16U
948 
949 #define BCH_ERROR_ACTIONS()             \
950         x(continue,             0)      \
951         x(fix_safe,             1)      \
952         x(panic,                2)      \
953         x(ro,                   3)
954 
955 enum bch_error_actions {
956 #define x(t, n) BCH_ON_ERROR_##t = n,
957         BCH_ERROR_ACTIONS()
958 #undef x
959         BCH_ON_ERROR_NR
960 };
961 
962 #define BCH_STR_HASH_TYPES()            \
963         x(crc32c,               0)      \
964         x(crc64,                1)      \
965         x(siphash_old,          2)      \
966         x(siphash,              3)
967 
968 enum bch_str_hash_type {
969 #define x(t, n) BCH_STR_HASH_##t = n,
970         BCH_STR_HASH_TYPES()
971 #undef x
972         BCH_STR_HASH_NR
973 };
974 
975 #define BCH_STR_HASH_OPTS()             \
976         x(crc32c,               0)      \
977         x(crc64,                1)      \
978         x(siphash,              2)
979 
980 enum bch_str_hash_opts {
981 #define x(t, n) BCH_STR_HASH_OPT_##t = n,
982         BCH_STR_HASH_OPTS()
983 #undef x
984         BCH_STR_HASH_OPT_NR
985 };
986 
987 #define BCH_CSUM_TYPES()                        \
988         x(none,                         0)      \
989         x(crc32c_nonzero,               1)      \
990         x(crc64_nonzero,                2)      \
991         x(chacha20_poly1305_80,         3)      \
992         x(chacha20_poly1305_128,        4)      \
993         x(crc32c,                       5)      \
994         x(crc64,                        6)      \
995         x(xxhash,                       7)
996 
997 enum bch_csum_type {
998 #define x(t, n) BCH_CSUM_##t = n,
999         BCH_CSUM_TYPES()
1000 #undef x
1001         BCH_CSUM_NR
1002 };
1003 
1004 static const __maybe_unused unsigned bch_crc_bytes[] = {
1005         [BCH_CSUM_none]                         = 0,
1006         [BCH_CSUM_crc32c_nonzero]               = 4,
1007         [BCH_CSUM_crc32c]                       = 4,
1008         [BCH_CSUM_crc64_nonzero]                = 8,
1009         [BCH_CSUM_crc64]                        = 8,
1010         [BCH_CSUM_xxhash]                       = 8,
1011         [BCH_CSUM_chacha20_poly1305_80]         = 10,
1012         [BCH_CSUM_chacha20_poly1305_128]        = 16,
1013 };
1014 
1015 static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
1016 {
1017         switch (type) {
1018         case BCH_CSUM_chacha20_poly1305_80:
1019         case BCH_CSUM_chacha20_poly1305_128:
1020                 return true;
1021         default:
1022                 return false;
1023         }
1024 }
1025 
1026 #define BCH_CSUM_OPTS()                 \
1027         x(none,                 0)      \
1028         x(crc32c,               1)      \
1029         x(crc64,                2)      \
1030         x(xxhash,               3)
1031 
1032 enum bch_csum_opts {
1033 #define x(t, n) BCH_CSUM_OPT_##t = n,
1034         BCH_CSUM_OPTS()
1035 #undef x
1036         BCH_CSUM_OPT_NR
1037 };
1038 
1039 #define BCH_COMPRESSION_TYPES()         \
1040         x(none,                 0)      \
1041         x(lz4_old,              1)      \
1042         x(gzip,                 2)      \
1043         x(lz4,                  3)      \
1044         x(zstd,                 4)      \
1045         x(incompressible,       5)
1046 
1047 enum bch_compression_type {
1048 #define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
1049         BCH_COMPRESSION_TYPES()
1050 #undef x
1051         BCH_COMPRESSION_TYPE_NR
1052 };
1053 
1054 #define BCH_COMPRESSION_OPTS()          \
1055         x(none,         0)              \
1056         x(lz4,          1)              \
1057         x(gzip,         2)              \
1058         x(zstd,         3)
1059 
1060 enum bch_compression_opts {
1061 #define x(t, n) BCH_COMPRESSION_OPT_##t = n,
1062         BCH_COMPRESSION_OPTS()
1063 #undef x
1064         BCH_COMPRESSION_OPT_NR
1065 };
1066 
1067 /*
1068  * Magic numbers
1069  *
1070  * The various other data structures have their own magic numbers, which are
1071  * xored with the first part of the cache set's UUID
1072  */
1073 
1074 #define BCACHE_MAGIC                                                    \
1075         UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca,                           \
1076                   0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
1077 #define BCHFS_MAGIC                                                     \
1078         UUID_INIT(0xc68573f6, 0x66ce, 0x90a9,                           \
1079                   0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
1080 
1081 #define BCACHEFS_STATFS_MAGIC           BCACHEFS_SUPER_MAGIC
1082 
1083 #define JSET_MAGIC              __cpu_to_le64(0x245235c1a3625032ULL)
1084 #define BSET_MAGIC              __cpu_to_le64(0x90135c78b99e07f5ULL)
1085 
1086 static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
1087 {
1088         __le64 ret;
1089 
1090         memcpy(&ret, &sb->uuid, sizeof(ret));
1091         return ret;
1092 }
1093 
1094 static inline __u64 __jset_magic(struct bch_sb *sb)
1095 {
1096         return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
1097 }
1098 
1099 static inline __u64 __bset_magic(struct bch_sb *sb)
1100 {
1101         return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
1102 }
1103 
1104 /* Journal */
1105 
1106 #define JSET_KEYS_U64s  (sizeof(struct jset_entry) / sizeof(__u64))
1107 
1108 #define BCH_JSET_ENTRY_TYPES()                  \
1109         x(btree_keys,           0)              \
1110         x(btree_root,           1)              \
1111         x(prio_ptrs,            2)              \
1112         x(blacklist,            3)              \
1113         x(blacklist_v2,         4)              \
1114         x(usage,                5)              \
1115         x(data_usage,           6)              \
1116         x(clock,                7)              \
1117         x(dev_usage,            8)              \
1118         x(log,                  9)              \
1119         x(overwrite,            10)             \
1120         x(write_buffer_keys,    11)             \
1121         x(datetime,             12)
1122 
1123 enum bch_jset_entry_type {
1124 #define x(f, nr)        BCH_JSET_ENTRY_##f      = nr,
1125         BCH_JSET_ENTRY_TYPES()
1126 #undef x
1127         BCH_JSET_ENTRY_NR
1128 };
1129 
1130 static inline bool jset_entry_is_key(struct jset_entry *e)
1131 {
1132         switch (e->type) {
1133         case BCH_JSET_ENTRY_btree_keys:
1134         case BCH_JSET_ENTRY_btree_root:
1135         case BCH_JSET_ENTRY_write_buffer_keys:
1136                 return true;
1137         }
1138 
1139         return false;
1140 }
1141 
1142 /*
1143  * Journal sequence numbers can be blacklisted: bsets record the max sequence
1144  * number of all the journal entries they contain updates for, so that on
1145  * recovery we can ignore those bsets that contain index updates newer that what
1146  * made it into the journal.
1147  *
1148  * This means that we can't reuse that journal_seq - we have to skip it, and
1149  * then record that we skipped it so that the next time we crash and recover we
1150  * don't think there was a missing journal entry.
1151  */
1152 struct jset_entry_blacklist {
1153         struct jset_entry       entry;
1154         __le64                  seq;
1155 };
1156 
1157 struct jset_entry_blacklist_v2 {
1158         struct jset_entry       entry;
1159         __le64                  start;
1160         __le64                  end;
1161 };
1162 
1163 #define BCH_FS_USAGE_TYPES()                    \
1164         x(reserved,             0)              \
1165         x(inodes,               1)              \
1166         x(key_version,          2)
1167 
1168 enum bch_fs_usage_type {
1169 #define x(f, nr)        BCH_FS_USAGE_##f        = nr,
1170         BCH_FS_USAGE_TYPES()
1171 #undef x
1172         BCH_FS_USAGE_NR
1173 };
1174 
1175 struct jset_entry_usage {
1176         struct jset_entry       entry;
1177         __le64                  v;
1178 } __packed;
1179 
1180 struct jset_entry_data_usage {
1181         struct jset_entry       entry;
1182         __le64                  v;
1183         struct bch_replicas_entry_v1 r;
1184 } __packed;
1185 
1186 struct jset_entry_clock {
1187         struct jset_entry       entry;
1188         __u8                    rw;
1189         __u8                    pad[7];
1190         __le64                  time;
1191 } __packed;
1192 
1193 struct jset_entry_dev_usage_type {
1194         __le64                  buckets;
1195         __le64                  sectors;
1196         __le64                  fragmented;
1197 } __packed;
1198 
1199 struct jset_entry_dev_usage {
1200         struct jset_entry       entry;
1201         __le32                  dev;
1202         __u32                   pad;
1203 
1204         __le64                  _buckets_ec;            /* No longer used */
1205         __le64                  _buckets_unavailable;   /* No longer used */
1206 
1207         struct jset_entry_dev_usage_type d[];
1208 };
1209 
1210 static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
1211 {
1212         return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
1213                 sizeof(struct jset_entry_dev_usage_type);
1214 }
1215 
1216 struct jset_entry_log {
1217         struct jset_entry       entry;
1218         u8                      d[];
1219 } __packed __aligned(8);
1220 
1221 struct jset_entry_datetime {
1222         struct jset_entry       entry;
1223         __le64                  seconds;
1224 } __packed __aligned(8);
1225 
1226 /*
1227  * On disk format for a journal entry:
1228  * seq is monotonically increasing; every journal entry has its own unique
1229  * sequence number.
1230  *
1231  * last_seq is the oldest journal entry that still has keys the btree hasn't
1232  * flushed to disk yet.
1233  *
1234  * version is for on disk format changes.
1235  */
1236 struct jset {
1237         struct bch_csum         csum;
1238 
1239         __le64                  magic;
1240         __le64                  seq;
1241         __le32                  version;
1242         __le32                  flags;
1243 
1244         __le32                  u64s; /* size of d[] in u64s */
1245 
1246         __u8                    encrypted_start[0];
1247 
1248         __le16                  _read_clock; /* no longer used */
1249         __le16                  _write_clock;
1250 
1251         /* Sequence number of oldest dirty journal entry */
1252         __le64                  last_seq;
1253 
1254 
1255         struct jset_entry       start[0];
1256         __u64                   _data[];
1257 } __packed __aligned(8);
1258 
1259 LE32_BITMASK(JSET_CSUM_TYPE,    struct jset, flags, 0, 4);
1260 LE32_BITMASK(JSET_BIG_ENDIAN,   struct jset, flags, 4, 5);
1261 LE32_BITMASK(JSET_NO_FLUSH,     struct jset, flags, 5, 6);
1262 
1263 #define BCH_JOURNAL_BUCKETS_MIN         8
1264 
1265 /* Btree: */
1266 
1267 enum btree_id_flags {
1268         BTREE_ID_EXTENTS        = BIT(0),
1269         BTREE_ID_SNAPSHOTS      = BIT(1),
1270         BTREE_ID_SNAPSHOT_FIELD = BIT(2),
1271         BTREE_ID_DATA           = BIT(3),
1272 };
1273 
1274 #define BCH_BTREE_IDS()                                                         \
1275         x(extents,              0,      BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
1276           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1277           BIT_ULL(KEY_TYPE_error)|                                              \
1278           BIT_ULL(KEY_TYPE_cookie)|                                             \
1279           BIT_ULL(KEY_TYPE_extent)|                                             \
1280           BIT_ULL(KEY_TYPE_reservation)|                                        \
1281           BIT_ULL(KEY_TYPE_reflink_p)|                                          \
1282           BIT_ULL(KEY_TYPE_inline_data))                                        \
1283         x(inodes,               1,      BTREE_ID_SNAPSHOTS,                     \
1284           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1285           BIT_ULL(KEY_TYPE_inode)|                                              \
1286           BIT_ULL(KEY_TYPE_inode_v2)|                                           \
1287           BIT_ULL(KEY_TYPE_inode_v3)|                                           \
1288           BIT_ULL(KEY_TYPE_inode_generation))                                   \
1289         x(dirents,              2,      BTREE_ID_SNAPSHOTS,                     \
1290           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1291           BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
1292           BIT_ULL(KEY_TYPE_dirent))                                             \
1293         x(xattrs,               3,      BTREE_ID_SNAPSHOTS,                     \
1294           BIT_ULL(KEY_TYPE_whiteout)|                                           \
1295           BIT_ULL(KEY_TYPE_cookie)|                                             \
1296           BIT_ULL(KEY_TYPE_hash_whiteout)|                                      \
1297           BIT_ULL(KEY_TYPE_xattr))                                              \
1298         x(alloc,                4,      0,                                      \
1299           BIT_ULL(KEY_TYPE_alloc)|                                              \
1300           BIT_ULL(KEY_TYPE_alloc_v2)|                                           \
1301           BIT_ULL(KEY_TYPE_alloc_v3)|                                           \
1302           BIT_ULL(KEY_TYPE_alloc_v4))                                           \
1303         x(quotas,               5,      0,                                      \
1304           BIT_ULL(KEY_TYPE_quota))                                              \
1305         x(stripes,              6,      0,                                      \
1306           BIT_ULL(KEY_TYPE_stripe))                                             \
1307         x(reflink,              7,      BTREE_ID_EXTENTS|BTREE_ID_DATA,         \
1308           BIT_ULL(KEY_TYPE_reflink_v)|                                          \
1309           BIT_ULL(KEY_TYPE_indirect_inline_data)|                               \
1310           BIT_ULL(KEY_TYPE_error))                                              \
1311         x(subvolumes,           8,      0,                                      \
1312           BIT_ULL(KEY_TYPE_subvolume))                                          \
1313         x(snapshots,            9,      0,                                      \
1314           BIT_ULL(KEY_TYPE_snapshot))                                           \
1315         x(lru,                  10,     0,                                      \
1316           BIT_ULL(KEY_TYPE_set))                                                \
1317         x(freespace,            11,     BTREE_ID_EXTENTS,                       \
1318           BIT_ULL(KEY_TYPE_set))                                                \
1319         x(need_discard,         12,     0,                                      \
1320           BIT_ULL(KEY_TYPE_set))                                                \
1321         x(backpointers,         13,     0,                                      \
1322           BIT_ULL(KEY_TYPE_backpointer))                                        \
1323         x(bucket_gens,          14,     0,                                      \
1324           BIT_ULL(KEY_TYPE_bucket_gens))                                        \
1325         x(snapshot_trees,       15,     0,                                      \
1326           BIT_ULL(KEY_TYPE_snapshot_tree))                                      \
1327         x(deleted_inodes,       16,     BTREE_ID_SNAPSHOT_FIELD,                \
1328           BIT_ULL(KEY_TYPE_set))                                                \
1329         x(logged_ops,           17,     0,                                      \
1330           BIT_ULL(KEY_TYPE_logged_op_truncate)|                                 \
1331           BIT_ULL(KEY_TYPE_logged_op_finsert))                                  \
1332         x(rebalance_work,       18,     BTREE_ID_SNAPSHOT_FIELD,                \
1333           BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))                       \
1334         x(subvolume_children,   19,     0,                                      \
1335           BIT_ULL(KEY_TYPE_set))                                                \
1336         x(accounting,           20,     BTREE_ID_SNAPSHOT_FIELD,                \
1337           BIT_ULL(KEY_TYPE_accounting))                                         \
1338 
1339 enum btree_id {
1340 #define x(name, nr, ...) BTREE_ID_##name = nr,
1341         BCH_BTREE_IDS()
1342 #undef x
1343         BTREE_ID_NR
1344 };
1345 
1346 /*
1347  * Maximum number of btrees that we will _ever_ have under the current scheme,
1348  * where we refer to them with 64 bit bitfields - and we also need a bit for
1349  * the interior btree node type:
1350  */
1351 #define BTREE_ID_NR_MAX         63
1352 
1353 static inline bool btree_id_is_alloc(enum btree_id id)
1354 {
1355         switch (id) {
1356         case BTREE_ID_alloc:
1357         case BTREE_ID_backpointers:
1358         case BTREE_ID_need_discard:
1359         case BTREE_ID_freespace:
1360         case BTREE_ID_bucket_gens:
1361                 return true;
1362         default:
1363                 return false;
1364         }
1365 }
1366 
1367 #define BTREE_MAX_DEPTH         4U
1368 
1369 /* Btree nodes */
1370 
1371 /*
1372  * Btree nodes
1373  *
1374  * On disk a btree node is a list/log of these; within each set the keys are
1375  * sorted
1376  */
1377 struct bset {
1378         __le64                  seq;
1379 
1380         /*
1381          * Highest journal entry this bset contains keys for.
1382          * If on recovery we don't see that journal entry, this bset is ignored:
1383          * this allows us to preserve the order of all index updates after a
1384          * crash, since the journal records a total order of all index updates
1385          * and anything that didn't make it to the journal doesn't get used.
1386          */
1387         __le64                  journal_seq;
1388 
1389         __le32                  flags;
1390         __le16                  version;
1391         __le16                  u64s; /* count of d[] in u64s */
1392 
1393         struct bkey_packed      start[0];
1394         __u64                   _data[];
1395 } __packed __aligned(8);
1396 
1397 LE32_BITMASK(BSET_CSUM_TYPE,    struct bset, flags, 0, 4);
1398 
1399 LE32_BITMASK(BSET_BIG_ENDIAN,   struct bset, flags, 4, 5);
1400 LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
1401                                 struct bset, flags, 5, 6);
1402 
1403 /* Sector offset within the btree node: */
1404 LE32_BITMASK(BSET_OFFSET,       struct bset, flags, 16, 32);
1405 
1406 struct btree_node {
1407         struct bch_csum         csum;
1408         __le64                  magic;
1409 
1410         /* this flags field is encrypted, unlike bset->flags: */
1411         __le64                  flags;
1412 
1413         /* Closed interval: */
1414         struct bpos             min_key;
1415         struct bpos             max_key;
1416         struct bch_extent_ptr   _ptr; /* not used anymore */
1417         struct bkey_format      format;
1418 
1419         union {
1420         struct bset             keys;
1421         struct {
1422                 __u8            pad[22];
1423                 __le16          u64s;
1424                 __u64           _data[0];
1425 
1426         };
1427         };
1428 } __packed __aligned(8);
1429 
1430 LE64_BITMASK(BTREE_NODE_ID_LO,  struct btree_node, flags,  0,  4);
1431 LE64_BITMASK(BTREE_NODE_LEVEL,  struct btree_node, flags,  4,  8);
1432 LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
1433                                 struct btree_node, flags,  8,  9);
1434 LE64_BITMASK(BTREE_NODE_ID_HI,  struct btree_node, flags,  9, 25);
1435 /* 25-32 unused */
1436 LE64_BITMASK(BTREE_NODE_SEQ,    struct btree_node, flags, 32, 64);
1437 
1438 static inline __u64 BTREE_NODE_ID(struct btree_node *n)
1439 {
1440         return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
1441 }
1442 
1443 static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
1444 {
1445         SET_BTREE_NODE_ID_LO(n, v);
1446         SET_BTREE_NODE_ID_HI(n, v >> 4);
1447 }
1448 
1449 struct btree_node_entry {
1450         struct bch_csum         csum;
1451 
1452         union {
1453         struct bset             keys;
1454         struct {
1455                 __u8            pad[22];
1456                 __le16          u64s;
1457                 __u64           _data[0];
1458         };
1459         };
1460 } __packed __aligned(8);
1461 
1462 #endif /* _BCACHEFS_FORMAT_H */
1463 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php