1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * Copyright (C) 2007 Oracle. All rights rese 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 4 */ 5 5 6 #ifndef BTRFS_CTREE_H 6 #ifndef BTRFS_CTREE_H 7 #define BTRFS_CTREE_H 7 #define BTRFS_CTREE_H 8 8 9 #include "linux/cleanup.h" !! 9 #include <linux/mm.h> 10 #include <linux/pagemap.h> !! 10 #include <linux/sched/signal.h> 11 #include <linux/spinlock.h> !! 11 #include <linux/highmem.h> 12 #include <linux/rbtree.h> !! 12 #include <linux/fs.h> 13 #include <linux/mutex.h> !! 13 #include <linux/rwsem.h> >> 14 #include <linux/semaphore.h> >> 15 #include <linux/completion.h> >> 16 #include <linux/backing-dev.h> 14 #include <linux/wait.h> 17 #include <linux/wait.h> 15 #include <linux/list.h> !! 18 #include <linux/slab.h> 16 #include <linux/atomic.h> !! 19 #include <trace/events/btrfs.h> 17 #include <linux/xarray.h> !! 20 #include <asm/kmap_types.h> >> 21 #include <asm/unaligned.h> >> 22 #include <linux/pagemap.h> >> 23 #include <linux/btrfs.h> >> 24 #include <linux/btrfs_tree.h> >> 25 #include <linux/workqueue.h> >> 26 #include <linux/security.h> >> 27 #include <linux/sizes.h> >> 28 #include <linux/dynamic_debug.h> 18 #include <linux/refcount.h> 29 #include <linux/refcount.h> 19 #include <uapi/linux/btrfs_tree.h> !! 30 #include <linux/crc32c.h> 20 #include "locking.h" << 21 #include "fs.h" << 22 #include "accessors.h" << 23 #include "extent-io-tree.h" 31 #include "extent-io-tree.h" >> 32 #include "extent_io.h" >> 33 #include "extent_map.h" >> 34 #include "async-thread.h" >> 35 #include "block-rsv.h" 24 36 25 struct extent_buffer; << 26 struct btrfs_block_rsv; << 27 struct btrfs_trans_handle; 37 struct btrfs_trans_handle; >> 38 struct btrfs_transaction; >> 39 struct btrfs_pending_snapshot; >> 40 struct btrfs_delayed_ref_root; >> 41 struct btrfs_space_info; 28 struct btrfs_block_group; 42 struct btrfs_block_group; >> 43 extern struct kmem_cache *btrfs_trans_handle_cachep; >> 44 extern struct kmem_cache *btrfs_bit_radix_cachep; >> 45 extern struct kmem_cache *btrfs_path_cachep; >> 46 extern struct kmem_cache *btrfs_free_space_cachep; >> 47 extern struct kmem_cache *btrfs_free_space_bitmap_cachep; >> 48 struct btrfs_ordered_sum; >> 49 struct btrfs_ref; >> 50 >> 51 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ >> 52 >> 53 /* >> 54 * Maximum number of mirrors that can be available for all profiles counting >> 55 * the target device of dev-replace as one. During an active device replace >> 56 * procedure, the target device of the copy operation is a mirror for the >> 57 * filesystem data as well that can be used to read data in order to repair >> 58 * read errors on other disks. >> 59 * >> 60 * Current value is derived from RAID1C4 with 4 copies. >> 61 */ >> 62 #define BTRFS_MAX_MIRRORS (4 + 1) >> 63 >> 64 #define BTRFS_MAX_LEVEL 8 >> 65 >> 66 #define BTRFS_OLDEST_GENERATION 0ULL >> 67 >> 68 /* >> 69 * the max metadata block size. This limit is somewhat artificial, >> 70 * but the memmove costs go through the roof for larger blocks. >> 71 */ >> 72 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 >> 73 >> 74 /* >> 75 * we can actually store much bigger names, but lets not confuse the rest >> 76 * of linux >> 77 */ >> 78 #define BTRFS_NAME_LEN 255 >> 79 >> 80 /* >> 81 * Theoretical limit is larger, but we keep this down to a sane >> 82 * value. That should limit greatly the possibility of collisions on >> 83 * inode ref items. >> 84 */ >> 85 #define BTRFS_LINK_MAX 65535U >> 86 >> 87 #define BTRFS_EMPTY_DIR_SIZE 0 >> 88 >> 89 /* ioprio of readahead is set to idle */ >> 90 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) >> 91 >> 92 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M >> 93 >> 94 /* >> 95 * Use large batch size to reduce overhead of metadata updates. On the reader >> 96 * side, we only read it when we are close to ENOSPC and the read overhead is >> 97 * mostly related to the number of CPUs, so it is OK to use arbitrary large >> 98 * value here. >> 99 */ >> 100 #define BTRFS_TOTAL_BYTES_PINNED_BATCH SZ_128M >> 101 >> 102 #define BTRFS_MAX_EXTENT_SIZE SZ_128M >> 103 >> 104 /* >> 105 * Deltas are an effective way to populate global statistics. Give macro names >> 106 * to make it clear what we're doing. An example is discard_extents in >> 107 * btrfs_free_space_ctl. >> 108 */ >> 109 #define BTRFS_STAT_NR_ENTRIES 2 >> 110 #define BTRFS_STAT_CURR 0 >> 111 #define BTRFS_STAT_PREV 1 >> 112 >> 113 /* >> 114 * Count how many BTRFS_MAX_EXTENT_SIZE cover the @size >> 115 */ >> 116 static inline u32 count_max_extents(u64 size) >> 117 { >> 118 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); >> 119 } >> 120 >> 121 static inline unsigned long btrfs_chunk_item_size(int num_stripes) >> 122 { >> 123 BUG_ON(num_stripes == 0); >> 124 return sizeof(struct btrfs_chunk) + >> 125 sizeof(struct btrfs_stripe) * (num_stripes - 1); >> 126 } 29 127 30 /* Read ahead values for struct btrfs_path.rea !! 128 /* >> 129 * Runtime (in-memory) states of filesystem >> 130 */ 31 enum { 131 enum { 32 READA_NONE, !! 132 /* Global indicator of serious filesystem errors */ 33 READA_BACK, !! 133 BTRFS_FS_STATE_ERROR, 34 READA_FORWARD, << 35 /* 134 /* 36 * Similar to READA_FORWARD but unlike !! 135 * Filesystem is being remounted, allow to skip some operations, like 37 * !! 136 * defrag 38 * 1) It will trigger readahead even f << 39 * each other on disk; << 40 * 2) It also triggers readahead for n << 41 * 3) During a search, even when a nod << 42 * will still trigger readahead for << 43 * it. << 44 * << 45 * This is meant to be used only when << 46 * entire tree or a very large part of << 47 */ 137 */ 48 READA_FORWARD_ALWAYS, !! 138 BTRFS_FS_STATE_REMOUNTING, >> 139 /* Track if a transaction abort has been reported on this filesystem */ >> 140 BTRFS_FS_STATE_TRANS_ABORTED, >> 141 /* >> 142 * Bio operations should be blocked on this filesystem because a source >> 143 * or target device is being destroyed as part of a device replace >> 144 */ >> 145 BTRFS_FS_STATE_DEV_REPLACING, >> 146 /* The btrfs_fs_info created for self-tests */ >> 147 BTRFS_FS_STATE_DUMMY_FS_INFO, 49 }; 148 }; 50 149 >> 150 #define BTRFS_BACKREF_REV_MAX 256 >> 151 #define BTRFS_BACKREF_REV_SHIFT 56 >> 152 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ >> 153 BTRFS_BACKREF_REV_SHIFT) >> 154 >> 155 #define BTRFS_OLD_BACKREF_REV 0 >> 156 #define BTRFS_MIXED_BACKREF_REV 1 >> 157 >> 158 /* >> 159 * every tree block (leaf or node) starts with this header. >> 160 */ >> 161 struct btrfs_header { >> 162 /* these first four must match the super block */ >> 163 u8 csum[BTRFS_CSUM_SIZE]; >> 164 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ >> 165 __le64 bytenr; /* which block this node is supposed to live in */ >> 166 __le64 flags; >> 167 >> 168 /* allowed to be different from the super from here on down */ >> 169 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; >> 170 __le64 generation; >> 171 __le64 owner; >> 172 __le32 nritems; >> 173 u8 level; >> 174 } __attribute__ ((__packed__)); >> 175 >> 176 /* >> 177 * this is a very generous portion of the super block, giving us >> 178 * room to translate 14 chunks with 3 stripes each. >> 179 */ >> 180 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 >> 181 >> 182 /* >> 183 * just in case we somehow lose the roots and are not able to mount, >> 184 * we store an array of the roots from previous transactions >> 185 * in the super. >> 186 */ >> 187 #define BTRFS_NUM_BACKUP_ROOTS 4 >> 188 struct btrfs_root_backup { >> 189 __le64 tree_root; >> 190 __le64 tree_root_gen; >> 191 >> 192 __le64 chunk_root; >> 193 __le64 chunk_root_gen; >> 194 >> 195 __le64 extent_root; >> 196 __le64 extent_root_gen; >> 197 >> 198 __le64 fs_root; >> 199 __le64 fs_root_gen; >> 200 >> 201 __le64 dev_root; >> 202 __le64 dev_root_gen; >> 203 >> 204 __le64 csum_root; >> 205 __le64 csum_root_gen; >> 206 >> 207 __le64 total_bytes; >> 208 __le64 bytes_used; >> 209 __le64 num_devices; >> 210 /* future */ >> 211 __le64 unused_64[4]; >> 212 >> 213 u8 tree_root_level; >> 214 u8 chunk_root_level; >> 215 u8 extent_root_level; >> 216 u8 fs_root_level; >> 217 u8 dev_root_level; >> 218 u8 csum_root_level; >> 219 /* future and to align */ >> 220 u8 unused_8[10]; >> 221 } __attribute__ ((__packed__)); >> 222 >> 223 /* >> 224 * the super block basically lists the main trees of the FS >> 225 * it currently lacks any block count etc etc >> 226 */ >> 227 struct btrfs_super_block { >> 228 /* the first 4 fields must match struct btrfs_header */ >> 229 u8 csum[BTRFS_CSUM_SIZE]; >> 230 /* FS specific UUID, visible to user */ >> 231 u8 fsid[BTRFS_FSID_SIZE]; >> 232 __le64 bytenr; /* this block number */ >> 233 __le64 flags; >> 234 >> 235 /* allowed to be different from the btrfs_header from here own down */ >> 236 __le64 magic; >> 237 __le64 generation; >> 238 __le64 root; >> 239 __le64 chunk_root; >> 240 __le64 log_root; >> 241 >> 242 /* this will help find the new super based on the log root */ >> 243 __le64 log_root_transid; >> 244 __le64 total_bytes; >> 245 __le64 bytes_used; >> 246 __le64 root_dir_objectid; >> 247 __le64 num_devices; >> 248 __le32 sectorsize; >> 249 __le32 nodesize; >> 250 __le32 __unused_leafsize; >> 251 __le32 stripesize; >> 252 __le32 sys_chunk_array_size; >> 253 __le64 chunk_root_generation; >> 254 __le64 compat_flags; >> 255 __le64 compat_ro_flags; >> 256 __le64 incompat_flags; >> 257 __le16 csum_type; >> 258 u8 root_level; >> 259 u8 chunk_root_level; >> 260 u8 log_root_level; >> 261 struct btrfs_dev_item dev_item; >> 262 >> 263 char label[BTRFS_LABEL_SIZE]; >> 264 >> 265 __le64 cache_generation; >> 266 __le64 uuid_tree_generation; >> 267 >> 268 /* the UUID written into btree blocks */ >> 269 u8 metadata_uuid[BTRFS_FSID_SIZE]; >> 270 >> 271 /* future expansion */ >> 272 __le64 reserved[28]; >> 273 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; >> 274 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; >> 275 } __attribute__ ((__packed__)); >> 276 >> 277 /* >> 278 * Compat flags that we support. If any incompat flags are set other than the >> 279 * ones specified below then we will fail to mount >> 280 */ >> 281 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL >> 282 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL >> 283 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL >> 284 >> 285 #define BTRFS_FEATURE_COMPAT_RO_SUPP \ >> 286 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \ >> 287 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID) >> 288 >> 289 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL >> 290 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL >> 291 >> 292 #define BTRFS_FEATURE_INCOMPAT_SUPP \ >> 293 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ >> 294 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ >> 295 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ >> 296 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ >> 297 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ >> 298 BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \ >> 299 BTRFS_FEATURE_INCOMPAT_RAID56 | \ >> 300 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ >> 301 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ >> 302 BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ >> 303 BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \ >> 304 BTRFS_FEATURE_INCOMPAT_RAID1C34) >> 305 >> 306 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ >> 307 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) >> 308 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL >> 309 >> 310 /* >> 311 * A leaf is full of items. offset and size tell us where to find >> 312 * the item in the leaf (relative to the start of the data area) >> 313 */ >> 314 struct btrfs_item { >> 315 struct btrfs_disk_key key; >> 316 __le32 offset; >> 317 __le32 size; >> 318 } __attribute__ ((__packed__)); >> 319 >> 320 /* >> 321 * leaves have an item area and a data area: >> 322 * [item0, item1....itemN] [free space] [dataN...data1, data0] >> 323 * >> 324 * The data is separate from the items to get the keys closer together >> 325 * during searches. >> 326 */ >> 327 struct btrfs_leaf { >> 328 struct btrfs_header header; >> 329 struct btrfs_item items[]; >> 330 } __attribute__ ((__packed__)); >> 331 >> 332 /* >> 333 * all non-leaf blocks are nodes, they hold only keys and pointers to >> 334 * other blocks >> 335 */ >> 336 struct btrfs_key_ptr { >> 337 struct btrfs_disk_key key; >> 338 __le64 blockptr; >> 339 __le64 generation; >> 340 } __attribute__ ((__packed__)); >> 341 >> 342 struct btrfs_node { >> 343 struct btrfs_header header; >> 344 struct btrfs_key_ptr ptrs[]; >> 345 } __attribute__ ((__packed__)); >> 346 51 /* 347 /* 52 * btrfs_paths remember the path taken from th 348 * btrfs_paths remember the path taken from the root down to the leaf. 53 * level 0 is always the leaf, and nodes[1...B 349 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 54 * to any other levels that are present. 350 * to any other levels that are present. 55 * 351 * 56 * The slots array records the index of the it 352 * The slots array records the index of the item or block pointer 57 * used while walking the tree. 353 * used while walking the tree. 58 */ 354 */ >> 355 enum { READA_NONE, READA_BACK, READA_FORWARD }; 59 struct btrfs_path { 356 struct btrfs_path { 60 struct extent_buffer *nodes[BTRFS_MAX_ 357 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 61 int slots[BTRFS_MAX_LEVEL]; 358 int slots[BTRFS_MAX_LEVEL]; 62 /* if there is real range locking, thi 359 /* if there is real range locking, this locks field will change */ 63 u8 locks[BTRFS_MAX_LEVEL]; 360 u8 locks[BTRFS_MAX_LEVEL]; 64 u8 reada; 361 u8 reada; 65 /* keep some upper locks as we walk do 362 /* keep some upper locks as we walk down */ 66 u8 lowest_level; 363 u8 lowest_level; 67 364 68 /* 365 /* 69 * set by btrfs_split_item, tells sear 366 * set by btrfs_split_item, tells search_slot to keep all locks 70 * and to force calls to keep space in 367 * and to force calls to keep space in the nodes 71 */ 368 */ 72 unsigned int search_for_split:1; 369 unsigned int search_for_split:1; 73 unsigned int keep_locks:1; 370 unsigned int keep_locks:1; 74 unsigned int skip_locking:1; 371 unsigned int skip_locking:1; >> 372 unsigned int leave_spinning:1; 75 unsigned int search_commit_root:1; 373 unsigned int search_commit_root:1; 76 unsigned int need_commit_sem:1; 374 unsigned int need_commit_sem:1; 77 unsigned int skip_release_on_error:1; 375 unsigned int skip_release_on_error:1; >> 376 }; >> 377 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \ >> 378 sizeof(struct btrfs_item)) >> 379 struct btrfs_dev_replace { >> 380 u64 replace_state; /* see #define above */ >> 381 time64_t time_started; /* seconds since 1-Jan-1970 */ >> 382 time64_t time_stopped; /* seconds since 1-Jan-1970 */ >> 383 atomic64_t num_write_errors; >> 384 atomic64_t num_uncorrectable_read_errors; >> 385 >> 386 u64 cursor_left; >> 387 u64 committed_cursor_left; >> 388 u64 cursor_left_last_write_of_item; >> 389 u64 cursor_right; >> 390 >> 391 u64 cont_reading_from_srcdev_mode; /* see #define above */ >> 392 >> 393 int is_valid; >> 394 int item_needs_writeback; >> 395 struct btrfs_device *srcdev; >> 396 struct btrfs_device *tgtdev; >> 397 >> 398 struct mutex lock_finishing_cancel_unmount; >> 399 struct rw_semaphore rwsem; >> 400 >> 401 struct btrfs_scrub_progress scrub_progress; >> 402 >> 403 struct percpu_counter bio_counter; >> 404 wait_queue_head_t replace_wait; >> 405 }; >> 406 >> 407 /* >> 408 * free clusters are used to claim free space in relatively large chunks, >> 409 * allowing us to do less seeky writes. They are used for all metadata >> 410 * allocations. In ssd_spread mode they are also used for data allocations. >> 411 */ >> 412 struct btrfs_free_cluster { >> 413 spinlock_t lock; >> 414 spinlock_t refill_lock; >> 415 struct rb_root root; >> 416 >> 417 /* largest extent in this cluster */ >> 418 u64 max_size; >> 419 >> 420 /* first extent starting offset */ >> 421 u64 window_start; >> 422 >> 423 /* We did a full search and couldn't create a cluster */ >> 424 bool fragmented; >> 425 >> 426 struct btrfs_block_group *block_group; 78 /* 427 /* 79 * Indicate that new item (btrfs_searc !! 428 * when a cluster is allocated from a block group, we put the 80 * existing item and ins_len contains !! 429 * cluster onto a list in the block group so that it can 81 * header (ie. sizeof(struct btrfs_ite !! 430 * be freed before the block group is freed. 82 */ 431 */ 83 unsigned int search_for_extension:1; !! 432 struct list_head block_group_list; 84 /* Stop search if any locks need to be << 85 unsigned int nowait:1; << 86 }; 433 }; 87 434 88 #define BTRFS_PATH_AUTO_FREE(path_name) !! 435 enum btrfs_caching_type { 89 struct btrfs_path *path_name __free(bt !! 436 BTRFS_CACHE_NO, >> 437 BTRFS_CACHE_STARTED, >> 438 BTRFS_CACHE_FAST, >> 439 BTRFS_CACHE_FINISHED, >> 440 BTRFS_CACHE_ERROR, >> 441 }; 90 442 91 /* 443 /* 92 * The state of btrfs root !! 444 * Tree to record all locked full stripes of a RAID5/6 block group 93 */ 445 */ >> 446 struct btrfs_full_stripe_locks_tree { >> 447 struct rb_root root; >> 448 struct mutex lock; >> 449 }; >> 450 >> 451 /* Discard control. */ >> 452 /* >> 453 * Async discard uses multiple lists to differentiate the discard filter >> 454 * parameters. Index 0 is for completely free block groups where we need to >> 455 * ensure the entire block group is trimmed without being lossy. Indices >> 456 * afterwards represent monotonically decreasing discard filter sizes to >> 457 * prioritize what should be discarded next. >> 458 */ >> 459 #define BTRFS_NR_DISCARD_LISTS 3 >> 460 #define BTRFS_DISCARD_INDEX_UNUSED 0 >> 461 #define BTRFS_DISCARD_INDEX_START 1 >> 462 >> 463 struct btrfs_discard_ctl { >> 464 struct workqueue_struct *discard_workers; >> 465 struct delayed_work work; >> 466 spinlock_t lock; >> 467 struct btrfs_block_group *block_group; >> 468 struct list_head discard_list[BTRFS_NR_DISCARD_LISTS]; >> 469 u64 prev_discard; >> 470 atomic_t discardable_extents; >> 471 atomic64_t discardable_bytes; >> 472 u64 max_discard_size; >> 473 unsigned long delay; >> 474 u32 iops_limit; >> 475 u32 kbps_limit; >> 476 u64 discard_extent_bytes; >> 477 u64 discard_bitmap_bytes; >> 478 atomic64_t discard_bytes_saved; >> 479 }; >> 480 >> 481 /* delayed seq elem */ >> 482 struct seq_list { >> 483 struct list_head list; >> 484 u64 seq; >> 485 }; >> 486 >> 487 #define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 } >> 488 >> 489 #define SEQ_LAST ((u64)-1) >> 490 >> 491 enum btrfs_orphan_cleanup_state { >> 492 ORPHAN_CLEANUP_STARTED = 1, >> 493 ORPHAN_CLEANUP_DONE = 2, >> 494 }; >> 495 >> 496 void btrfs_init_async_reclaim_work(struct work_struct *work); >> 497 >> 498 /* fs_info */ >> 499 struct reloc_control; >> 500 struct btrfs_device; >> 501 struct btrfs_fs_devices; >> 502 struct btrfs_balance_control; >> 503 struct btrfs_delayed_root; >> 504 >> 505 /* >> 506 * Block group or device which contains an active swapfile. Used for preventing >> 507 * unsafe operations while a swapfile is active. >> 508 * >> 509 * These are sorted on (ptr, inode) (note that a block group or device can >> 510 * contain more than one swapfile). We compare the pointer values because we >> 511 * don't actually care what the object is, we just need a quick check whether >> 512 * the object exists in the rbtree. >> 513 */ >> 514 struct btrfs_swapfile_pin { >> 515 struct rb_node node; >> 516 void *ptr; >> 517 struct inode *inode; >> 518 /* >> 519 * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr >> 520 * points to a struct btrfs_device. >> 521 */ >> 522 bool is_block_group; >> 523 }; >> 524 >> 525 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr); >> 526 94 enum { 527 enum { >> 528 BTRFS_FS_BARRIER, >> 529 BTRFS_FS_CLOSING_START, >> 530 BTRFS_FS_CLOSING_DONE, >> 531 BTRFS_FS_LOG_RECOVERING, >> 532 BTRFS_FS_OPEN, >> 533 BTRFS_FS_QUOTA_ENABLED, >> 534 BTRFS_FS_UPDATE_UUID_TREE_GEN, >> 535 BTRFS_FS_CREATING_FREE_SPACE_TREE, >> 536 BTRFS_FS_BTREE_ERR, >> 537 BTRFS_FS_LOG1_ERR, >> 538 BTRFS_FS_LOG2_ERR, >> 539 BTRFS_FS_QUOTA_OVERRIDE, >> 540 /* Used to record internally whether fs has been frozen */ >> 541 BTRFS_FS_FROZEN, 95 /* 542 /* 96 * btrfs_record_root_in_trans is a mul !! 543 * Indicate that a whole-filesystem exclusive operation is running 97 * with the balancing code. But the !! 544 * (device replace, resize, device add/delete, balance) 98 * first time the root is added to eac << 99 * is used to tell us when more checks << 100 */ 545 */ 101 BTRFS_ROOT_IN_TRANS_SETUP, !! 546 BTRFS_FS_EXCL_OP, >> 547 /* >> 548 * To info transaction_kthread we need an immediate commit so it >> 549 * doesn't need to wait for commit_interval >> 550 */ >> 551 BTRFS_FS_NEED_ASYNC_COMMIT, >> 552 /* >> 553 * Indicate that balance has been set up from the ioctl and is in the >> 554 * main phase. The fs_info::balance_ctl is initialized. >> 555 * Set and cleared while holding fs_info::balance_mutex. >> 556 */ >> 557 BTRFS_FS_BALANCE_RUNNING, >> 558 >> 559 /* Indicate that the cleaner thread is awake and doing something. */ >> 560 BTRFS_FS_CLEANER_RUNNING, >> 561 >> 562 /* >> 563 * The checksumming has an optimized version and is considered fast, >> 564 * so we don't need to offload checksums to workqueues. >> 565 */ >> 566 BTRFS_FS_CSUM_IMPL_FAST, >> 567 >> 568 /* Indicate that the discard workqueue can service discards. */ >> 569 BTRFS_FS_DISCARD_RUNNING, >> 570 }; >> 571 >> 572 struct btrfs_fs_info { >> 573 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; >> 574 unsigned long flags; >> 575 struct btrfs_root *extent_root; >> 576 struct btrfs_root *tree_root; >> 577 struct btrfs_root *chunk_root; >> 578 struct btrfs_root *dev_root; >> 579 struct btrfs_root *fs_root; >> 580 struct btrfs_root *csum_root; >> 581 struct btrfs_root *quota_root; >> 582 struct btrfs_root *uuid_root; >> 583 struct btrfs_root *free_space_root; >> 584 >> 585 /* the log root tree is a directory of all the other log roots */ >> 586 struct btrfs_root *log_root_tree; >> 587 >> 588 spinlock_t fs_roots_radix_lock; >> 589 struct radix_tree_root fs_roots_radix; >> 590 >> 591 /* block group cache stuff */ >> 592 spinlock_t block_group_cache_lock; >> 593 u64 first_logical_byte; >> 594 struct rb_root block_group_cache_tree; >> 595 >> 596 /* keep track of unallocated space */ >> 597 atomic64_t free_chunk_space; >> 598 >> 599 struct extent_io_tree freed_extents[2]; >> 600 struct extent_io_tree *pinned_extents; >> 601 >> 602 /* logical->physical extent mapping */ >> 603 struct extent_map_tree mapping_tree; >> 604 >> 605 /* >> 606 * block reservation for extent, checksum, root tree and >> 607 * delayed dir index item >> 608 */ >> 609 struct btrfs_block_rsv global_block_rsv; >> 610 /* block reservation for metadata operations */ >> 611 struct btrfs_block_rsv trans_block_rsv; >> 612 /* block reservation for chunk tree */ >> 613 struct btrfs_block_rsv chunk_block_rsv; >> 614 /* block reservation for delayed operations */ >> 615 struct btrfs_block_rsv delayed_block_rsv; >> 616 /* block reservation for delayed refs */ >> 617 struct btrfs_block_rsv delayed_refs_rsv; >> 618 >> 619 struct btrfs_block_rsv empty_block_rsv; >> 620 >> 621 u64 generation; >> 622 u64 last_trans_committed; >> 623 u64 avg_delayed_ref_runtime; >> 624 >> 625 /* >> 626 * this is updated to the current trans every time a full commit >> 627 * is required instead of the faster short fsync log commits >> 628 */ >> 629 u64 last_trans_log_full_commit; >> 630 unsigned long mount_opt; >> 631 /* >> 632 * Track requests for actions that need to be done during transaction >> 633 * commit (like for some mount options). >> 634 */ >> 635 unsigned long pending_changes; >> 636 unsigned long compress_type:4; >> 637 unsigned int compress_level; >> 638 u32 commit_interval; >> 639 /* >> 640 * It is a suggestive number, the read side is safe even it gets a >> 641 * wrong number because we will write out the data into a regular >> 642 * extent. The write side(mount/remount) is under ->s_umount lock, >> 643 * so it is also safe. >> 644 */ >> 645 u64 max_inline; >> 646 >> 647 struct btrfs_transaction *running_transaction; >> 648 wait_queue_head_t transaction_throttle; >> 649 wait_queue_head_t transaction_wait; >> 650 wait_queue_head_t transaction_blocked_wait; >> 651 wait_queue_head_t async_submit_wait; 102 652 103 /* 653 /* 104 * Set if tree blocks of this root can !! 654 * Used to protect the incompat_flags, compat_flags, compat_ro_flags 105 * Only subvolume trees and their relo !! 655 * when they are updated. 106 * Conflicts with TRACK_DIRTY bit. << 107 * 656 * 108 * This affects two things: !! 657 * Because we do not clear the flags for ever, so we needn't use >> 658 * the lock on the read side. 109 * 659 * 110 * - How balance works !! 660 * We also needn't use the lock when we mount the fs, because 111 * For shareable roots, we need to u !! 661 * there is no other task which will update the flag. 112 * replacement for balance, and need !! 662 */ 113 * snapshot creation to handle them. !! 663 spinlock_t super_lock; >> 664 struct btrfs_super_block *super_copy; >> 665 struct btrfs_super_block *super_for_commit; >> 666 struct super_block *sb; >> 667 struct inode *btree_inode; >> 668 struct mutex tree_log_mutex; >> 669 struct mutex transaction_kthread_mutex; >> 670 struct mutex cleaner_mutex; >> 671 struct mutex chunk_mutex; >> 672 >> 673 /* >> 674 * this is taken to make sure we don't set block groups ro after >> 675 * the free space cache has been allocated on them >> 676 */ >> 677 struct mutex ro_block_group_mutex; >> 678 >> 679 /* this is used during read/modify/write to make sure >> 680 * no two ios are trying to mod the same stripe at the same >> 681 * time >> 682 */ >> 683 struct btrfs_stripe_hash_table *stripe_hash_table; >> 684 >> 685 /* >> 686 * this protects the ordered operations list only while we are >> 687 * processing all of the entries on it. This way we make >> 688 * sure the commit code doesn't find the list temporarily empty >> 689 * because another function happens to be doing non-waiting preflush >> 690 * before jumping into the main commit. >> 691 */ >> 692 struct mutex ordered_operations_mutex; >> 693 >> 694 struct rw_semaphore commit_root_sem; >> 695 >> 696 struct rw_semaphore cleanup_work_sem; >> 697 >> 698 struct rw_semaphore subvol_sem; >> 699 struct srcu_struct subvol_srcu; >> 700 >> 701 spinlock_t trans_lock; >> 702 /* >> 703 * the reloc mutex goes with the trans lock, it is taken >> 704 * during commit to protect us from the relocation code >> 705 */ >> 706 struct mutex reloc_mutex; >> 707 >> 708 struct list_head trans_list; >> 709 struct list_head dead_roots; >> 710 struct list_head caching_block_groups; >> 711 >> 712 spinlock_t delayed_iput_lock; >> 713 struct list_head delayed_iputs; >> 714 atomic_t nr_delayed_iputs; >> 715 wait_queue_head_t delayed_iputs_wait; >> 716 >> 717 atomic64_t tree_mod_seq; >> 718 >> 719 /* this protects tree_mod_log and tree_mod_seq_list */ >> 720 rwlock_t tree_mod_log_lock; >> 721 struct rb_root tree_mod_log; >> 722 struct list_head tree_mod_seq_list; >> 723 >> 724 atomic_t async_delalloc_pages; >> 725 >> 726 /* >> 727 * this is used to protect the following list -- ordered_roots. >> 728 */ >> 729 spinlock_t ordered_root_lock; >> 730 >> 731 /* >> 732 * all fs/file tree roots in which there are data=ordered extents >> 733 * pending writeback are added into this list. 114 * 734 * 115 * While for non-shareable trees, we !! 735 * these can span multiple transactions and basically include 116 * with COW. !! 736 * every dirty data page that isn't from nodatacow >> 737 */ >> 738 struct list_head ordered_roots; >> 739 >> 740 struct mutex delalloc_root_mutex; >> 741 spinlock_t delalloc_root_lock; >> 742 /* all fs/file tree roots that have delalloc inodes. */ >> 743 struct list_head delalloc_roots; >> 744 >> 745 /* >> 746 * there is a pool of worker threads for checksumming during writes >> 747 * and a pool for checksumming after reads. This is because readers >> 748 * can run with FS locks held, and the writers may be waiting for >> 749 * those locks. We don't want ordering in the pending list to cause >> 750 * deadlocks, and so the two are serviced separately. 117 * 751 * 118 * - How dirty roots are tracked !! 752 * A third pool does submit_bio to avoid deadlocking with the other 119 * For shareable roots, btrfs_record !! 753 * two 120 * track them, while non-subvolume r !! 754 */ 121 * don't need to set this manually. !! 755 struct btrfs_workqueue *workers; >> 756 struct btrfs_workqueue *delalloc_workers; >> 757 struct btrfs_workqueue *flush_workers; >> 758 struct btrfs_workqueue *endio_workers; >> 759 struct btrfs_workqueue *endio_meta_workers; >> 760 struct btrfs_workqueue *endio_raid56_workers; >> 761 struct btrfs_workqueue *endio_repair_workers; >> 762 struct btrfs_workqueue *rmw_workers; >> 763 struct btrfs_workqueue *endio_meta_write_workers; >> 764 struct btrfs_workqueue *endio_write_workers; >> 765 struct btrfs_workqueue *endio_freespace_worker; >> 766 struct btrfs_workqueue *caching_workers; >> 767 struct btrfs_workqueue *readahead_workers; >> 768 >> 769 /* >> 770 * fixup workers take dirty pages that didn't properly go through >> 771 * the cow mechanism and make them safe to write. It happens >> 772 * for the sys_munmap function call path >> 773 */ >> 774 struct btrfs_workqueue *fixup_workers; >> 775 struct btrfs_workqueue *delayed_workers; >> 776 >> 777 struct task_struct *transaction_kthread; >> 778 struct task_struct *cleaner_kthread; >> 779 u32 thread_pool_size; >> 780 >> 781 struct kobject *space_info_kobj; >> 782 >> 783 u64 total_pinned; >> 784 >> 785 /* used to keep from writing metadata until there is a nice batch */ >> 786 struct percpu_counter dirty_metadata_bytes; >> 787 struct percpu_counter delalloc_bytes; >> 788 struct percpu_counter dio_bytes; >> 789 s32 dirty_metadata_batch; >> 790 s32 delalloc_batch; >> 791 >> 792 struct list_head dirty_cowonly_roots; >> 793 >> 794 struct btrfs_fs_devices *fs_devices; >> 795 >> 796 /* >> 797 * The space_info list is effectively read only after initial >> 798 * setup. It is populated at mount time and cleaned up after >> 799 * all block groups are removed. RCU is used to protect it. >> 800 */ >> 801 struct list_head space_info; >> 802 >> 803 struct btrfs_space_info *data_sinfo; >> 804 >> 805 struct reloc_control *reloc_ctl; >> 806 >> 807 /* data_alloc_cluster is only used in ssd_spread mode */ >> 808 struct btrfs_free_cluster data_alloc_cluster; >> 809 >> 810 /* all metadata allocations go through this cluster */ >> 811 struct btrfs_free_cluster meta_alloc_cluster; >> 812 >> 813 /* auto defrag inodes go here */ >> 814 spinlock_t defrag_inodes_lock; >> 815 struct rb_root defrag_inodes; >> 816 atomic_t defrag_running; >> 817 >> 818 /* Used to protect avail_{data, metadata, system}_alloc_bits */ >> 819 seqlock_t profiles_lock; >> 820 /* >> 821 * these three are in extended format (availability of single >> 822 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other >> 823 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) >> 824 */ >> 825 u64 avail_data_alloc_bits; >> 826 u64 avail_metadata_alloc_bits; >> 827 u64 avail_system_alloc_bits; >> 828 >> 829 /* restriper state */ >> 830 spinlock_t balance_lock; >> 831 struct mutex balance_mutex; >> 832 atomic_t balance_pause_req; >> 833 atomic_t balance_cancel_req; >> 834 struct btrfs_balance_control *balance_ctl; >> 835 wait_queue_head_t balance_wait_q; >> 836 >> 837 u32 data_chunk_allocations; >> 838 u32 metadata_ratio; >> 839 >> 840 void *bdev_holder; >> 841 >> 842 /* private scrub information */ >> 843 struct mutex scrub_lock; >> 844 atomic_t scrubs_running; >> 845 atomic_t scrub_pause_req; >> 846 atomic_t scrubs_paused; >> 847 atomic_t scrub_cancel_req; >> 848 wait_queue_head_t scrub_pause_wait; >> 849 /* >> 850 * The worker pointers are NULL iff the refcount is 0, ie. scrub is not >> 851 * running. >> 852 */ >> 853 refcount_t scrub_workers_refcnt; >> 854 struct btrfs_workqueue *scrub_workers; >> 855 struct btrfs_workqueue *scrub_wr_completion_workers; >> 856 struct btrfs_workqueue *scrub_parity_workers; >> 857 >> 858 struct btrfs_discard_ctl discard_ctl; >> 859 >> 860 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY >> 861 u32 check_integrity_print_mask; >> 862 #endif >> 863 /* is qgroup tracking in a consistent state? */ >> 864 u64 qgroup_flags; >> 865 >> 866 /* holds configuration and tracking. Protected by qgroup_lock */ >> 867 struct rb_root qgroup_tree; >> 868 spinlock_t qgroup_lock; >> 869 >> 870 /* >> 871 * used to avoid frequently calling ulist_alloc()/ulist_free() >> 872 * when doing qgroup accounting, it must be protected by qgroup_lock. 122 */ 873 */ 123 BTRFS_ROOT_SHAREABLE, !! 874 struct ulist *qgroup_ulist; >> 875 >> 876 /* protect user change for quota operations */ >> 877 struct mutex qgroup_ioctl_lock; >> 878 >> 879 /* list of dirty qgroups to be written at next commit */ >> 880 struct list_head dirty_qgroups; >> 881 >> 882 /* used by qgroup for an efficient tree traversal */ >> 883 u64 qgroup_seq; >> 884 >> 885 /* qgroup rescan items */ >> 886 struct mutex qgroup_rescan_lock; /* protects the progress item */ >> 887 struct btrfs_key qgroup_rescan_progress; >> 888 struct btrfs_workqueue *qgroup_rescan_workers; >> 889 struct completion qgroup_rescan_completion; >> 890 struct btrfs_work qgroup_rescan_work; >> 891 bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */ >> 892 >> 893 /* filesystem state */ >> 894 unsigned long fs_state; >> 895 >> 896 struct btrfs_delayed_root *delayed_root; >> 897 >> 898 /* readahead tree */ >> 899 spinlock_t reada_lock; >> 900 struct radix_tree_root reada_tree; >> 901 >> 902 /* readahead works cnt */ >> 903 atomic_t reada_works_cnt; >> 904 >> 905 /* Extent buffer radix tree */ >> 906 spinlock_t buffer_lock; >> 907 struct radix_tree_root buffer_radix; >> 908 >> 909 /* next backup root to be overwritten */ >> 910 int backup_root_index; >> 911 >> 912 /* device replace state */ >> 913 struct btrfs_dev_replace dev_replace; >> 914 >> 915 struct semaphore uuid_tree_rescan_sem; >> 916 >> 917 /* Used to reclaim the metadata space in the background. */ >> 918 struct work_struct async_reclaim_work; >> 919 >> 920 spinlock_t unused_bgs_lock; >> 921 struct list_head unused_bgs; >> 922 struct mutex unused_bg_unpin_mutex; >> 923 struct mutex delete_unused_bgs_mutex; >> 924 >> 925 /* Cached block sizes */ >> 926 u32 nodesize; >> 927 u32 sectorsize; >> 928 u32 stripesize; >> 929 >> 930 /* Block groups and devices containing active swapfiles. */ >> 931 spinlock_t swapfile_pins_lock; >> 932 struct rb_root swapfile_pins; >> 933 >> 934 struct crypto_shash *csum_shash; >> 935 >> 936 /* >> 937 * Number of send operations in progress. >> 938 * Updated while holding fs_info::balance_mutex. >> 939 */ >> 940 int send_in_progress; >> 941 >> 942 #ifdef CONFIG_BTRFS_FS_REF_VERIFY >> 943 spinlock_t ref_verify_lock; >> 944 struct rb_root block_tree; >> 945 #endif >> 946 >> 947 #ifdef CONFIG_BTRFS_DEBUG >> 948 struct kobject *debug_kobj; >> 949 struct kobject *discard_debug_kobj; >> 950 #endif >> 951 }; >> 952 >> 953 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) >> 954 { >> 955 return sb->s_fs_info; >> 956 } >> 957 >> 958 struct btrfs_subvolume_writers { >> 959 struct percpu_counter counter; >> 960 wait_queue_head_t wait; >> 961 }; >> 962 >> 963 /* >> 964 * The state of btrfs root >> 965 */ >> 966 enum { >> 967 /* >> 968 * btrfs_record_root_in_trans is a multi-step process, and it can race >> 969 * with the balancing code. But the race is very small, and only the >> 970 * first time the root is added to each transaction. So IN_TRANS_SETUP >> 971 * is used to tell us when more checks are required >> 972 */ >> 973 BTRFS_ROOT_IN_TRANS_SETUP, >> 974 BTRFS_ROOT_REF_COWS, 124 BTRFS_ROOT_TRACK_DIRTY, 975 BTRFS_ROOT_TRACK_DIRTY, 125 BTRFS_ROOT_IN_RADIX, 976 BTRFS_ROOT_IN_RADIX, 126 BTRFS_ROOT_ORPHAN_ITEM_INSERTED, 977 BTRFS_ROOT_ORPHAN_ITEM_INSERTED, 127 BTRFS_ROOT_DEFRAG_RUNNING, 978 BTRFS_ROOT_DEFRAG_RUNNING, 128 BTRFS_ROOT_FORCE_COW, 979 BTRFS_ROOT_FORCE_COW, 129 BTRFS_ROOT_MULTI_LOG_TASKS, 980 BTRFS_ROOT_MULTI_LOG_TASKS, 130 BTRFS_ROOT_DIRTY, 981 BTRFS_ROOT_DIRTY, 131 BTRFS_ROOT_DELETING, 982 BTRFS_ROOT_DELETING, 132 983 133 /* 984 /* 134 * Reloc tree is orphan, only kept her 985 * Reloc tree is orphan, only kept here for qgroup delayed subtree scan 135 * 986 * 136 * Set for the subvolume tree owning t 987 * Set for the subvolume tree owning the reloc tree. 137 */ 988 */ 138 BTRFS_ROOT_DEAD_RELOC_TREE, 989 BTRFS_ROOT_DEAD_RELOC_TREE, 139 /* Mark dead root stored on device who 990 /* Mark dead root stored on device whose cleanup needs to be resumed */ 140 BTRFS_ROOT_DEAD_TREE, 991 BTRFS_ROOT_DEAD_TREE, 141 /* The root has a log tree. Used for s << 142 BTRFS_ROOT_HAS_LOG_TREE, << 143 /* Qgroup flushing is in progress */ << 144 BTRFS_ROOT_QGROUP_FLUSHING, << 145 /* We started the orphan cleanup for t << 146 BTRFS_ROOT_ORPHAN_CLEANUP, << 147 /* This root has a drop operation that << 148 BTRFS_ROOT_UNFINISHED_DROP, << 149 /* This reloc root needs to have its b << 150 BTRFS_ROOT_RESET_LOCKDEP_CLASS, << 151 }; 992 }; 152 993 153 /* 994 /* 154 * Record swapped tree blocks of a subvolume t 995 * Record swapped tree blocks of a subvolume tree for delayed subtree trace 155 * code. For detail check comment in fs/btrfs/ 996 * code. For detail check comment in fs/btrfs/qgroup.c. 156 */ 997 */ 157 struct btrfs_qgroup_swapped_blocks { 998 struct btrfs_qgroup_swapped_blocks { 158 spinlock_t lock; 999 spinlock_t lock; 159 /* RM_EMPTY_ROOT() of above blocks[] * 1000 /* RM_EMPTY_ROOT() of above blocks[] */ 160 bool swapped; 1001 bool swapped; 161 struct rb_root blocks[BTRFS_MAX_LEVEL] 1002 struct rb_root blocks[BTRFS_MAX_LEVEL]; 162 }; 1003 }; 163 1004 164 /* 1005 /* 165 * in ram representation of the tree. extent_ 1006 * in ram representation of the tree. extent_root is used for all allocations 166 * and for the extent tree extent_root root. 1007 * and for the extent tree extent_root root. 167 */ 1008 */ 168 struct btrfs_root { 1009 struct btrfs_root { 169 struct rb_node rb_node; << 170 << 171 struct extent_buffer *node; 1010 struct extent_buffer *node; 172 1011 173 struct extent_buffer *commit_root; 1012 struct extent_buffer *commit_root; 174 struct btrfs_root *log_root; 1013 struct btrfs_root *log_root; 175 struct btrfs_root *reloc_root; 1014 struct btrfs_root *reloc_root; 176 1015 177 unsigned long state; 1016 unsigned long state; 178 struct btrfs_root_item root_item; 1017 struct btrfs_root_item root_item; 179 struct btrfs_key root_key; 1018 struct btrfs_key root_key; 180 struct btrfs_fs_info *fs_info; 1019 struct btrfs_fs_info *fs_info; 181 struct extent_io_tree dirty_log_pages; 1020 struct extent_io_tree dirty_log_pages; 182 1021 183 struct mutex objectid_mutex; 1022 struct mutex objectid_mutex; 184 1023 185 spinlock_t accounting_lock; 1024 spinlock_t accounting_lock; 186 struct btrfs_block_rsv *block_rsv; 1025 struct btrfs_block_rsv *block_rsv; 187 1026 >> 1027 /* free ino cache stuff */ >> 1028 struct btrfs_free_space_ctl *free_ino_ctl; >> 1029 enum btrfs_caching_type ino_cache_state; >> 1030 spinlock_t ino_cache_lock; >> 1031 wait_queue_head_t ino_cache_wait; >> 1032 struct btrfs_free_space_ctl *free_ino_pinned; >> 1033 u64 ino_cache_progress; >> 1034 struct inode *ino_cache_inode; >> 1035 188 struct mutex log_mutex; 1036 struct mutex log_mutex; 189 wait_queue_head_t log_writer_wait; 1037 wait_queue_head_t log_writer_wait; 190 wait_queue_head_t log_commit_wait[2]; 1038 wait_queue_head_t log_commit_wait[2]; 191 struct list_head log_ctxs[2]; 1039 struct list_head log_ctxs[2]; 192 /* Used only for log trees of subvolum << 193 atomic_t log_writers; 1040 atomic_t log_writers; 194 atomic_t log_commit[2]; 1041 atomic_t log_commit[2]; 195 /* Used only for log trees of subvolum << 196 atomic_t log_batch; 1042 atomic_t log_batch; 197 /* << 198 * Protected by the 'log_mutex' lock b << 199 * that lock to avoid unnecessary lock << 200 * should be read using btrfs_get_root << 201 * log tree in which case it can be di << 202 * field should always use btrfs_set_r << 203 * trees where the field can be update << 204 */ << 205 int log_transid; 1043 int log_transid; 206 /* No matter the commit succeeds or no 1044 /* No matter the commit succeeds or not*/ 207 int log_transid_committed; 1045 int log_transid_committed; 208 /* !! 1046 /* Just be updated when the commit succeeds. */ 209 * Just be updated when the commit suc << 210 * btrfs_get_root_last_log_commit() an << 211 * to access this field. << 212 */ << 213 int last_log_commit; 1047 int last_log_commit; 214 pid_t log_start_pid; 1048 pid_t log_start_pid; 215 1049 216 u64 last_trans; 1050 u64 last_trans; 217 1051 218 u64 free_objectid; !! 1052 u32 type; >> 1053 >> 1054 u64 highest_objectid; 219 1055 >> 1056 u64 defrag_trans_start; 220 struct btrfs_key defrag_progress; 1057 struct btrfs_key defrag_progress; 221 struct btrfs_key defrag_max; 1058 struct btrfs_key defrag_max; 222 1059 223 /* The dirty list is only used by non- !! 1060 /* the dirty list is only used by non-reference counted roots */ 224 struct list_head dirty_list; 1061 struct list_head dirty_list; 225 1062 226 struct list_head root_list; 1063 struct list_head root_list; 227 1064 228 /* !! 1065 spinlock_t log_extents_lock[2]; 229 * Xarray that keeps track of in-memor !! 1066 struct list_head logged_list[2]; 230 * @inode_lock. !! 1067 231 */ !! 1068 int orphan_cleanup_state; 232 struct xarray inodes; !! 1069 >> 1070 spinlock_t inode_lock; >> 1071 /* red-black tree that keeps track of in-memory inodes */ >> 1072 struct rb_root inode_tree; 233 1073 234 /* 1074 /* 235 * Xarray that keeps track of delayed !! 1075 * radix tree that keeps track of delayed nodes of every inode, 236 * by @inode_lock. !! 1076 * protected by inode_lock 237 */ 1077 */ 238 struct xarray delayed_nodes; !! 1078 struct radix_tree_root delayed_nodes_tree; 239 /* 1079 /* 240 * right now this just gets used so th 1080 * right now this just gets used so that a root has its own devid 241 * for stat. It may be used for more 1081 * for stat. It may be used for more later 242 */ 1082 */ 243 dev_t anon_dev; 1083 dev_t anon_dev; 244 1084 245 spinlock_t root_item_lock; 1085 spinlock_t root_item_lock; 246 refcount_t refs; 1086 refcount_t refs; 247 1087 248 struct mutex delalloc_mutex; 1088 struct mutex delalloc_mutex; 249 spinlock_t delalloc_lock; 1089 spinlock_t delalloc_lock; 250 /* 1090 /* 251 * all of the inodes that have delallo 1091 * all of the inodes that have delalloc bytes. It is possible for 252 * this list to be empty even when the 1092 * this list to be empty even when there is still dirty data=ordered 253 * extents waiting to finish IO. 1093 * extents waiting to finish IO. 254 */ 1094 */ 255 struct list_head delalloc_inodes; 1095 struct list_head delalloc_inodes; 256 struct list_head delalloc_root; 1096 struct list_head delalloc_root; 257 u64 nr_delalloc_inodes; 1097 u64 nr_delalloc_inodes; 258 1098 259 struct mutex ordered_extent_mutex; 1099 struct mutex ordered_extent_mutex; 260 /* 1100 /* 261 * this is used by the balancing code 1101 * this is used by the balancing code to wait for all the pending 262 * ordered extents 1102 * ordered extents 263 */ 1103 */ 264 spinlock_t ordered_extent_lock; 1104 spinlock_t ordered_extent_lock; 265 1105 266 /* 1106 /* 267 * all of the data=ordered extents pen 1107 * all of the data=ordered extents pending writeback 268 * these can span multiple transaction 1108 * these can span multiple transactions and basically include 269 * every dirty data page that isn't fr 1109 * every dirty data page that isn't from nodatacow 270 */ 1110 */ 271 struct list_head ordered_extents; 1111 struct list_head ordered_extents; 272 struct list_head ordered_root; 1112 struct list_head ordered_root; 273 u64 nr_ordered_extents; 1113 u64 nr_ordered_extents; 274 1114 275 /* 1115 /* 276 * Not empty if this subvolume root ha 1116 * Not empty if this subvolume root has gone through tree block swap 277 * (relocation) 1117 * (relocation) 278 * 1118 * 279 * Will be used by reloc_control::dirt 1119 * Will be used by reloc_control::dirty_subvol_roots. 280 */ 1120 */ 281 struct list_head reloc_dirty_list; 1121 struct list_head reloc_dirty_list; 282 1122 283 /* 1123 /* 284 * Number of currently running SEND io 1124 * Number of currently running SEND ioctls to prevent 285 * manipulation with the read-only sta 1125 * manipulation with the read-only status via SUBVOL_SETFLAGS 286 */ 1126 */ 287 int send_in_progress; 1127 int send_in_progress; 288 /* 1128 /* 289 * Number of currently running dedupli 1129 * Number of currently running deduplication operations that have a 290 * destination inode belonging to this 1130 * destination inode belonging to this root. Protected by the lock 291 * root_item_lock. 1131 * root_item_lock. 292 */ 1132 */ 293 int dedupe_in_progress; 1133 int dedupe_in_progress; 294 /* For exclusion of snapshot creation !! 1134 struct btrfs_subvolume_writers *subv_writers; 295 struct btrfs_drew_lock snapshot_lock; !! 1135 atomic_t will_be_snapshotted; 296 << 297 atomic_t snapshot_force_cow; 1136 atomic_t snapshot_force_cow; 298 1137 299 /* For qgroup metadata reserved space 1138 /* For qgroup metadata reserved space */ 300 spinlock_t qgroup_meta_rsv_lock; 1139 spinlock_t qgroup_meta_rsv_lock; 301 u64 qgroup_meta_rsv_pertrans; 1140 u64 qgroup_meta_rsv_pertrans; 302 u64 qgroup_meta_rsv_prealloc; 1141 u64 qgroup_meta_rsv_prealloc; 303 wait_queue_head_t qgroup_flush_wait; << 304 1142 305 /* Number of active swapfiles */ 1143 /* Number of active swapfiles */ 306 atomic_t nr_swapfiles; 1144 atomic_t nr_swapfiles; 307 1145 308 /* Record pairs of swapped blocks for 1146 /* Record pairs of swapped blocks for qgroup */ 309 struct btrfs_qgroup_swapped_blocks swa 1147 struct btrfs_qgroup_swapped_blocks swapped_blocks; 310 1148 311 /* Used only by log trees, when loggin << 312 struct extent_io_tree log_csum_range; << 313 << 314 /* Used in simple quotas, track root d << 315 u64 relocation_src_root; << 316 << 317 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 1149 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 318 u64 alloc_bytenr; 1150 u64 alloc_bytenr; 319 #endif 1151 #endif >> 1152 }; 320 1153 321 #ifdef CONFIG_BTRFS_DEBUG !! 1154 struct btrfs_clone_extent_info { 322 struct list_head leak_list; !! 1155 u64 disk_offset; 323 #endif !! 1156 u64 disk_len; >> 1157 u64 data_offset; >> 1158 u64 data_len; >> 1159 u64 file_offset; >> 1160 char *extent_buf; >> 1161 u32 item_size; 324 }; 1162 }; 325 1163 326 static inline bool btrfs_root_readonly(const s !! 1164 struct btrfs_file_private { >> 1165 void *filldir_buf; >> 1166 }; >> 1167 >> 1168 static inline u32 btrfs_inode_sectorsize(const struct inode *inode) 327 { 1169 { 328 /* Byte-swap the constant at compile t !! 1170 return btrfs_sb(inode->i_sb)->sectorsize; 329 return (root->root_item.flags & cpu_to << 330 } 1171 } 331 1172 332 static inline bool btrfs_root_dead(const struc !! 1173 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) 333 { 1174 { 334 /* Byte-swap the constant at compile t !! 1175 335 return (root->root_item.flags & cpu_to !! 1176 return info->nodesize - sizeof(struct btrfs_header); 336 } 1177 } 337 1178 338 static inline u64 btrfs_root_id(const struct b !! 1179 #define BTRFS_LEAF_DATA_OFFSET offsetof(struct btrfs_leaf, items) >> 1180 >> 1181 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info) 339 { 1182 { 340 return root->root_key.objectid; !! 1183 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item); 341 } 1184 } 342 1185 343 static inline int btrfs_get_root_log_transid(c !! 1186 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info) 344 { 1187 { 345 return READ_ONCE(root->log_transid); !! 1188 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr); 346 } 1189 } 347 1190 348 static inline void btrfs_set_root_log_transid( !! 1191 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ >> 1192 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) >> 1193 static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info) 349 { 1194 { 350 WRITE_ONCE(root->log_transid, log_tran !! 1195 return BTRFS_MAX_ITEM_SIZE(info) - >> 1196 BTRFS_FILE_EXTENT_INLINE_DATA_START; 351 } 1197 } 352 1198 353 static inline int btrfs_get_root_last_log_comm !! 1199 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info) 354 { 1200 { 355 return READ_ONCE(root->last_log_commit !! 1201 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item); 356 } 1202 } 357 1203 358 static inline void btrfs_set_root_last_log_com !! 1204 /* >> 1205 * Flags for mount options. >> 1206 * >> 1207 * Note: don't forget to add new options to btrfs_show_options() >> 1208 */ >> 1209 #define BTRFS_MOUNT_NODATASUM (1 << 0) >> 1210 #define BTRFS_MOUNT_NODATACOW (1 << 1) >> 1211 #define BTRFS_MOUNT_NOBARRIER (1 << 2) >> 1212 #define BTRFS_MOUNT_SSD (1 << 3) >> 1213 #define BTRFS_MOUNT_DEGRADED (1 << 4) >> 1214 #define BTRFS_MOUNT_COMPRESS (1 << 5) >> 1215 #define BTRFS_MOUNT_NOTREELOG (1 << 6) >> 1216 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) >> 1217 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) >> 1218 #define BTRFS_MOUNT_NOSSD (1 << 9) >> 1219 #define BTRFS_MOUNT_DISCARD_SYNC (1 << 10) >> 1220 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) >> 1221 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) >> 1222 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) >> 1223 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) >> 1224 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) >> 1225 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) >> 1226 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) >> 1227 #define BTRFS_MOUNT_USEBACKUPROOT (1 << 18) >> 1228 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) >> 1229 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) >> 1230 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) >> 1231 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) >> 1232 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) >> 1233 #define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24) >> 1234 #define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25) >> 1235 #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26) >> 1236 #define BTRFS_MOUNT_NOLOGREPLAY (1 << 27) >> 1237 #define BTRFS_MOUNT_REF_VERIFY (1 << 28) >> 1238 #define BTRFS_MOUNT_DISCARD_ASYNC (1 << 29) >> 1239 >> 1240 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) >> 1241 #define BTRFS_DEFAULT_MAX_INLINE (2048) >> 1242 >> 1243 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) >> 1244 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) >> 1245 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) >> 1246 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \ >> 1247 BTRFS_MOUNT_##opt) >> 1248 >> 1249 #define btrfs_set_and_info(fs_info, opt, fmt, args...) \ >> 1250 { \ >> 1251 if (!btrfs_test_opt(fs_info, opt)) \ >> 1252 btrfs_info(fs_info, fmt, ##args); \ >> 1253 btrfs_set_opt(fs_info->mount_opt, opt); \ >> 1254 } >> 1255 >> 1256 #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \ >> 1257 { \ >> 1258 if (btrfs_test_opt(fs_info, opt)) \ >> 1259 btrfs_info(fs_info, fmt, ##args); \ >> 1260 btrfs_clear_opt(fs_info->mount_opt, opt); \ >> 1261 } >> 1262 >> 1263 /* >> 1264 * Requests for changes that need to be done during transaction commit. >> 1265 * >> 1266 * Internal mount options that are used for special handling of the real >> 1267 * mount options (eg. cannot be set during remount and have to be set during >> 1268 * transaction commit) >> 1269 */ >> 1270 >> 1271 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) >> 1272 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) >> 1273 #define BTRFS_PENDING_COMMIT (2) >> 1274 >> 1275 #define btrfs_test_pending(info, opt) \ >> 1276 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) >> 1277 #define btrfs_set_pending(info, opt) \ >> 1278 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) >> 1279 #define btrfs_clear_pending(info, opt) \ >> 1280 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) >> 1281 >> 1282 /* >> 1283 * Helpers for setting pending mount option changes. >> 1284 * >> 1285 * Expects corresponding macros >> 1286 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name >> 1287 */ >> 1288 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ >> 1289 do { \ >> 1290 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ >> 1291 btrfs_info((info), fmt, ##args); \ >> 1292 btrfs_set_pending((info), SET_##opt); \ >> 1293 btrfs_clear_pending((info), CLEAR_##opt); \ >> 1294 } \ >> 1295 } while(0) >> 1296 >> 1297 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ >> 1298 do { \ >> 1299 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ >> 1300 btrfs_info((info), fmt, ##args); \ >> 1301 btrfs_set_pending((info), CLEAR_##opt); \ >> 1302 btrfs_clear_pending((info), SET_##opt); \ >> 1303 } \ >> 1304 } while(0) >> 1305 >> 1306 /* >> 1307 * Inode flags >> 1308 */ >> 1309 #define BTRFS_INODE_NODATASUM (1 << 0) >> 1310 #define BTRFS_INODE_NODATACOW (1 << 1) >> 1311 #define BTRFS_INODE_READONLY (1 << 2) >> 1312 #define BTRFS_INODE_NOCOMPRESS (1 << 3) >> 1313 #define BTRFS_INODE_PREALLOC (1 << 4) >> 1314 #define BTRFS_INODE_SYNC (1 << 5) >> 1315 #define BTRFS_INODE_IMMUTABLE (1 << 6) >> 1316 #define BTRFS_INODE_APPEND (1 << 7) >> 1317 #define BTRFS_INODE_NODUMP (1 << 8) >> 1318 #define BTRFS_INODE_NOATIME (1 << 9) >> 1319 #define BTRFS_INODE_DIRSYNC (1 << 10) >> 1320 #define BTRFS_INODE_COMPRESS (1 << 11) >> 1321 >> 1322 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) >> 1323 >> 1324 #define BTRFS_INODE_FLAG_MASK \ >> 1325 (BTRFS_INODE_NODATASUM | \ >> 1326 BTRFS_INODE_NODATACOW | \ >> 1327 BTRFS_INODE_READONLY | \ >> 1328 BTRFS_INODE_NOCOMPRESS | \ >> 1329 BTRFS_INODE_PREALLOC | \ >> 1330 BTRFS_INODE_SYNC | \ >> 1331 BTRFS_INODE_IMMUTABLE | \ >> 1332 BTRFS_INODE_APPEND | \ >> 1333 BTRFS_INODE_NODUMP | \ >> 1334 BTRFS_INODE_NOATIME | \ >> 1335 BTRFS_INODE_DIRSYNC | \ >> 1336 BTRFS_INODE_COMPRESS | \ >> 1337 BTRFS_INODE_ROOT_ITEM_INIT) >> 1338 >> 1339 struct btrfs_map_token { >> 1340 const struct extent_buffer *eb; >> 1341 char *kaddr; >> 1342 unsigned long offset; >> 1343 }; >> 1344 >> 1345 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \ >> 1346 ((bytes) >> (fs_info)->sb->s_blocksize_bits) >> 1347 >> 1348 static inline void btrfs_init_map_token(struct btrfs_map_token *token, >> 1349 struct extent_buffer *eb) >> 1350 { >> 1351 token->eb = eb; >> 1352 token->kaddr = NULL; >> 1353 } >> 1354 >> 1355 /* some macros to generate set/get functions for the struct fields. This >> 1356 * assumes there is a lefoo_to_cpu for every type, so lets make a simple >> 1357 * one for u8: >> 1358 */ >> 1359 #define le8_to_cpu(v) (v) >> 1360 #define cpu_to_le8(v) (v) >> 1361 #define __le8 u8 >> 1362 >> 1363 #define read_eb_member(eb, ptr, type, member, result) (\ >> 1364 read_extent_buffer(eb, (char *)(result), \ >> 1365 ((unsigned long)(ptr)) + \ >> 1366 offsetof(type, member), \ >> 1367 sizeof(((type *)0)->member))) >> 1368 >> 1369 #define write_eb_member(eb, ptr, type, member, result) (\ >> 1370 write_extent_buffer(eb, (char *)(result), \ >> 1371 ((unsigned long)(ptr)) + \ >> 1372 offsetof(type, member), \ >> 1373 sizeof(((type *)0)->member))) >> 1374 >> 1375 #define DECLARE_BTRFS_SETGET_BITS(bits) \ >> 1376 u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \ >> 1377 const void *ptr, unsigned long off, \ >> 1378 struct btrfs_map_token *token); \ >> 1379 void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \ >> 1380 unsigned long off, u##bits val, \ >> 1381 struct btrfs_map_token *token); \ >> 1382 u##bits btrfs_get_##bits(const struct extent_buffer *eb, \ >> 1383 const void *ptr, unsigned long off); \ >> 1384 void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ >> 1385 unsigned long off, u##bits val); >> 1386 >> 1387 DECLARE_BTRFS_SETGET_BITS(8) >> 1388 DECLARE_BTRFS_SETGET_BITS(16) >> 1389 DECLARE_BTRFS_SETGET_BITS(32) >> 1390 DECLARE_BTRFS_SETGET_BITS(64) >> 1391 >> 1392 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ >> 1393 static inline u##bits btrfs_##name(const struct extent_buffer *eb, \ >> 1394 const type *s) \ >> 1395 { \ >> 1396 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1397 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ >> 1398 } \ >> 1399 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ >> 1400 u##bits val) \ >> 1401 { \ >> 1402 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1403 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ >> 1404 } \ >> 1405 static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\ >> 1406 const type *s, \ >> 1407 struct btrfs_map_token *token) \ >> 1408 { \ >> 1409 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1410 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ >> 1411 } \ >> 1412 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ >> 1413 type *s, u##bits val, \ >> 1414 struct btrfs_map_token *token) \ >> 1415 { \ >> 1416 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1417 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ >> 1418 } >> 1419 >> 1420 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ >> 1421 static inline u##bits btrfs_##name(const struct extent_buffer *eb) \ >> 1422 { \ >> 1423 const type *p = page_address(eb->pages[0]); \ >> 1424 u##bits res = le##bits##_to_cpu(p->member); \ >> 1425 return res; \ >> 1426 } \ >> 1427 static inline void btrfs_set_##name(struct extent_buffer *eb, \ >> 1428 u##bits val) \ >> 1429 { \ >> 1430 type *p = page_address(eb->pages[0]); \ >> 1431 p->member = cpu_to_le##bits(val); \ >> 1432 } >> 1433 >> 1434 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ >> 1435 static inline u##bits btrfs_##name(const type *s) \ >> 1436 { \ >> 1437 return le##bits##_to_cpu(s->member); \ >> 1438 } \ >> 1439 static inline void btrfs_set_##name(type *s, u##bits val) \ >> 1440 { \ >> 1441 s->member = cpu_to_le##bits(val); \ >> 1442 } >> 1443 >> 1444 >> 1445 static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb, >> 1446 struct btrfs_dev_item *s) >> 1447 { >> 1448 BUILD_BUG_ON(sizeof(u64) != >> 1449 sizeof(((struct btrfs_dev_item *)0))->total_bytes); >> 1450 return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item, >> 1451 total_bytes)); >> 1452 } >> 1453 static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb, >> 1454 struct btrfs_dev_item *s, >> 1455 u64 val) >> 1456 { >> 1457 BUILD_BUG_ON(sizeof(u64) != >> 1458 sizeof(((struct btrfs_dev_item *)0))->total_bytes); >> 1459 WARN_ON(!IS_ALIGNED(val, eb->fs_info->sectorsize)); >> 1460 btrfs_set_64(eb, s, offsetof(struct btrfs_dev_item, total_bytes), val); >> 1461 } >> 1462 >> 1463 >> 1464 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); >> 1465 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); >> 1466 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); >> 1467 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); >> 1468 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, >> 1469 start_offset, 64); >> 1470 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); >> 1471 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); >> 1472 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); >> 1473 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); >> 1474 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); >> 1475 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); >> 1476 >> 1477 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); >> 1478 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, >> 1479 total_bytes, 64); >> 1480 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, >> 1481 bytes_used, 64); >> 1482 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, >> 1483 io_align, 32); >> 1484 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, >> 1485 io_width, 32); >> 1486 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, >> 1487 sector_size, 32); >> 1488 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); >> 1489 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, >> 1490 dev_group, 32); >> 1491 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, >> 1492 seek_speed, 8); >> 1493 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, >> 1494 bandwidth, 8); >> 1495 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, >> 1496 generation, 64); >> 1497 >> 1498 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) >> 1499 { >> 1500 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); >> 1501 } >> 1502 >> 1503 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) >> 1504 { >> 1505 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); >> 1506 } >> 1507 >> 1508 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); >> 1509 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); >> 1510 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); >> 1511 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); >> 1512 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); >> 1513 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); >> 1514 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); >> 1515 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); >> 1516 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); >> 1517 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); >> 1518 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); >> 1519 >> 1520 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) >> 1521 { >> 1522 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); >> 1523 } >> 1524 >> 1525 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); >> 1526 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); >> 1527 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, >> 1528 stripe_len, 64); >> 1529 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, >> 1530 io_align, 32); >> 1531 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, >> 1532 io_width, 32); >> 1533 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, >> 1534 sector_size, 32); >> 1535 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); >> 1536 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, >> 1537 num_stripes, 16); >> 1538 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, >> 1539 sub_stripes, 16); >> 1540 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); >> 1541 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); >> 1542 >> 1543 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, >> 1544 int nr) >> 1545 { >> 1546 unsigned long offset = (unsigned long)c; >> 1547 offset += offsetof(struct btrfs_chunk, stripe); >> 1548 offset += nr * sizeof(struct btrfs_stripe); >> 1549 return (struct btrfs_stripe *)offset; >> 1550 } >> 1551 >> 1552 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) >> 1553 { >> 1554 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); >> 1555 } >> 1556 >> 1557 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, >> 1558 struct btrfs_chunk *c, int nr) >> 1559 { >> 1560 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); >> 1561 } >> 1562 >> 1563 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, >> 1564 struct btrfs_chunk *c, int nr) >> 1565 { >> 1566 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); >> 1567 } >> 1568 >> 1569 /* struct btrfs_block_group_item */ >> 1570 BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item, >> 1571 used, 64); >> 1572 BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item, >> 1573 used, 64); >> 1574 BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid, >> 1575 struct btrfs_block_group_item, chunk_objectid, 64); >> 1576 >> 1577 BTRFS_SETGET_FUNCS(block_group_chunk_objectid, >> 1578 struct btrfs_block_group_item, chunk_objectid, 64); >> 1579 BTRFS_SETGET_FUNCS(block_group_flags, >> 1580 struct btrfs_block_group_item, flags, 64); >> 1581 BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags, >> 1582 struct btrfs_block_group_item, flags, 64); >> 1583 >> 1584 /* struct btrfs_free_space_info */ >> 1585 BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info, >> 1586 extent_count, 32); >> 1587 BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32); >> 1588 >> 1589 /* struct btrfs_inode_ref */ >> 1590 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); >> 1591 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); >> 1592 >> 1593 /* struct btrfs_inode_extref */ >> 1594 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, >> 1595 parent_objectid, 64); >> 1596 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, >> 1597 name_len, 16); >> 1598 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); >> 1599 >> 1600 /* struct btrfs_inode_item */ >> 1601 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); >> 1602 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); >> 1603 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); >> 1604 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); >> 1605 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); >> 1606 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); >> 1607 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); >> 1608 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); >> 1609 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); >> 1610 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); >> 1611 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); >> 1612 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); >> 1613 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, >> 1614 generation, 64); >> 1615 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, >> 1616 sequence, 64); >> 1617 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, >> 1618 transid, 64); >> 1619 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); >> 1620 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, >> 1621 nbytes, 64); >> 1622 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, >> 1623 block_group, 64); >> 1624 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); >> 1625 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); >> 1626 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); >> 1627 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); >> 1628 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); >> 1629 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); >> 1630 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); >> 1631 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); >> 1632 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); >> 1633 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); >> 1634 >> 1635 /* struct btrfs_dev_extent */ >> 1636 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, >> 1637 chunk_tree, 64); >> 1638 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, >> 1639 chunk_objectid, 64); >> 1640 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, >> 1641 chunk_offset, 64); >> 1642 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); >> 1643 >> 1644 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) >> 1645 { >> 1646 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); >> 1647 return (unsigned long)dev + ptr; >> 1648 } >> 1649 >> 1650 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); >> 1651 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, >> 1652 generation, 64); >> 1653 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); >> 1654 >> 1655 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); >> 1656 >> 1657 >> 1658 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); >> 1659 >> 1660 static inline void btrfs_tree_block_key(struct extent_buffer *eb, >> 1661 struct btrfs_tree_block_info *item, >> 1662 struct btrfs_disk_key *key) >> 1663 { >> 1664 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); >> 1665 } >> 1666 >> 1667 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, >> 1668 struct btrfs_tree_block_info *item, >> 1669 struct btrfs_disk_key *key) >> 1670 { >> 1671 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); >> 1672 } >> 1673 >> 1674 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, >> 1675 root, 64); >> 1676 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, >> 1677 objectid, 64); >> 1678 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, >> 1679 offset, 64); >> 1680 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, >> 1681 count, 32); >> 1682 >> 1683 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, >> 1684 count, 32); >> 1685 >> 1686 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, >> 1687 type, 8); >> 1688 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, >> 1689 offset, 64); >> 1690 >> 1691 static inline u32 btrfs_extent_inline_ref_size(int type) >> 1692 { >> 1693 if (type == BTRFS_TREE_BLOCK_REF_KEY || >> 1694 type == BTRFS_SHARED_BLOCK_REF_KEY) >> 1695 return sizeof(struct btrfs_extent_inline_ref); >> 1696 if (type == BTRFS_SHARED_DATA_REF_KEY) >> 1697 return sizeof(struct btrfs_shared_data_ref) + >> 1698 sizeof(struct btrfs_extent_inline_ref); >> 1699 if (type == BTRFS_EXTENT_DATA_REF_KEY) >> 1700 return sizeof(struct btrfs_extent_data_ref) + >> 1701 offsetof(struct btrfs_extent_inline_ref, offset); >> 1702 return 0; >> 1703 } >> 1704 >> 1705 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); >> 1706 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, >> 1707 generation, 64); >> 1708 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); >> 1709 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); >> 1710 >> 1711 /* struct btrfs_node */ >> 1712 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); >> 1713 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); >> 1714 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, >> 1715 blockptr, 64); >> 1716 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, >> 1717 generation, 64); >> 1718 >> 1719 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) >> 1720 { >> 1721 unsigned long ptr; >> 1722 ptr = offsetof(struct btrfs_node, ptrs) + >> 1723 sizeof(struct btrfs_key_ptr) * nr; >> 1724 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); >> 1725 } >> 1726 >> 1727 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, >> 1728 int nr, u64 val) >> 1729 { >> 1730 unsigned long ptr; >> 1731 ptr = offsetof(struct btrfs_node, ptrs) + >> 1732 sizeof(struct btrfs_key_ptr) * nr; >> 1733 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); >> 1734 } >> 1735 >> 1736 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) >> 1737 { >> 1738 unsigned long ptr; >> 1739 ptr = offsetof(struct btrfs_node, ptrs) + >> 1740 sizeof(struct btrfs_key_ptr) * nr; >> 1741 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); >> 1742 } >> 1743 >> 1744 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, >> 1745 int nr, u64 val) >> 1746 { >> 1747 unsigned long ptr; >> 1748 ptr = offsetof(struct btrfs_node, ptrs) + >> 1749 sizeof(struct btrfs_key_ptr) * nr; >> 1750 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); >> 1751 } >> 1752 >> 1753 static inline unsigned long btrfs_node_key_ptr_offset(int nr) >> 1754 { >> 1755 return offsetof(struct btrfs_node, ptrs) + >> 1756 sizeof(struct btrfs_key_ptr) * nr; >> 1757 } >> 1758 >> 1759 void btrfs_node_key(const struct extent_buffer *eb, >> 1760 struct btrfs_disk_key *disk_key, int nr); >> 1761 >> 1762 static inline void btrfs_set_node_key(struct extent_buffer *eb, >> 1763 struct btrfs_disk_key *disk_key, int nr) >> 1764 { >> 1765 unsigned long ptr; >> 1766 ptr = btrfs_node_key_ptr_offset(nr); >> 1767 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, >> 1768 struct btrfs_key_ptr, key, disk_key); >> 1769 } >> 1770 >> 1771 /* struct btrfs_item */ >> 1772 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); >> 1773 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); >> 1774 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); >> 1775 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); >> 1776 >> 1777 static inline unsigned long btrfs_item_nr_offset(int nr) >> 1778 { >> 1779 return offsetof(struct btrfs_leaf, items) + >> 1780 sizeof(struct btrfs_item) * nr; >> 1781 } >> 1782 >> 1783 static inline struct btrfs_item *btrfs_item_nr(int nr) >> 1784 { >> 1785 return (struct btrfs_item *)btrfs_item_nr_offset(nr); >> 1786 } >> 1787 >> 1788 static inline u32 btrfs_item_end(const struct extent_buffer *eb, >> 1789 struct btrfs_item *item) >> 1790 { >> 1791 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); >> 1792 } >> 1793 >> 1794 static inline u32 btrfs_item_end_nr(const struct extent_buffer *eb, int nr) >> 1795 { >> 1796 return btrfs_item_end(eb, btrfs_item_nr(nr)); >> 1797 } >> 1798 >> 1799 static inline u32 btrfs_item_offset_nr(const struct extent_buffer *eb, int nr) 359 { 1800 { 360 WRITE_ONCE(root->last_log_commit, comm !! 1801 return btrfs_item_offset(eb, btrfs_item_nr(nr)); 361 } 1802 } 362 1803 363 static inline u64 btrfs_get_root_last_trans(co !! 1804 static inline u32 btrfs_item_size_nr(const struct extent_buffer *eb, int nr) 364 { 1805 { 365 return READ_ONCE(root->last_trans); !! 1806 return btrfs_item_size(eb, btrfs_item_nr(nr)); 366 } 1807 } 367 1808 368 static inline void btrfs_set_root_last_trans(s !! 1809 static inline void btrfs_item_key(const struct extent_buffer *eb, >> 1810 struct btrfs_disk_key *disk_key, int nr) 369 { 1811 { 370 WRITE_ONCE(root->last_trans, transid); !! 1812 struct btrfs_item *item = btrfs_item_nr(nr); >> 1813 read_eb_member(eb, item, struct btrfs_item, key, disk_key); 371 } 1814 } 372 1815 >> 1816 static inline void btrfs_set_item_key(struct extent_buffer *eb, >> 1817 struct btrfs_disk_key *disk_key, int nr) >> 1818 { >> 1819 struct btrfs_item *item = btrfs_item_nr(nr); >> 1820 write_eb_member(eb, item, struct btrfs_item, key, disk_key); >> 1821 } >> 1822 >> 1823 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); >> 1824 373 /* 1825 /* 374 * Structure that conveys information about an !! 1826 * struct btrfs_root_ref 375 * all the extents in a file range. << 376 */ 1827 */ 377 struct btrfs_replace_extent_info { !! 1828 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 378 u64 disk_offset; !! 1829 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 379 u64 disk_len; !! 1830 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 380 u64 data_offset; << 381 u64 data_len; << 382 u64 file_offset; << 383 /* Pointer to a file extent item of ty << 384 char *extent_buf; << 385 /* << 386 * Set to true when attempting to repl << 387 * described by this structure, set to << 388 * existing extent into a file range. << 389 */ << 390 bool is_new_extent; << 391 /* Indicate if we should update the in << 392 bool update_times; << 393 /* Meaningful only if is_new_extent is << 394 int qgroup_reserved; << 395 /* << 396 * Meaningful only if is_new_extent is << 397 * Used to track how many extent items << 398 * subvolume tree that refer to the ex << 399 * so that we know when to create a ne << 400 * one. << 401 */ << 402 int insertions; << 403 }; << 404 1831 405 /* Arguments for btrfs_drop_extents() */ !! 1832 /* struct btrfs_dir_item */ 406 struct btrfs_drop_extents_args { !! 1833 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 407 /* Input parameters */ !! 1834 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); >> 1835 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); >> 1836 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); >> 1837 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); >> 1838 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, >> 1839 data_len, 16); >> 1840 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, >> 1841 name_len, 16); >> 1842 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, >> 1843 transid, 64); 408 1844 409 /* !! 1845 static inline void btrfs_dir_item_key(const struct extent_buffer *eb, 410 * If NULL, btrfs_drop_extents() will !! 1846 const struct btrfs_dir_item *item, 411 * If 'replace_extent' is true, this m !! 1847 struct btrfs_disk_key *key) 412 * is always released except if 'repla !! 1848 { 413 * btrfs_drop_extents() sets 'extent_i !! 1849 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 414 * the path is kept locked. !! 1850 } 415 */ << 416 struct btrfs_path *path; << 417 /* Start offset of the range to drop e << 418 u64 start; << 419 /* End (exclusive, last byte + 1) of t << 420 u64 end; << 421 /* If true drop all the extent maps in << 422 bool drop_cache; << 423 /* << 424 * If true it means we want to insert << 425 * the extents in the range. If this i << 426 * parameter must be set as well and t << 427 * be set to true by btrfs_drop_extent << 428 * extent. << 429 * Note: when this is set to true the << 430 */ << 431 bool replace_extent; << 432 /* << 433 * Used if 'replace_extent' is true. S << 434 * insert after dropping all existing << 435 */ << 436 u32 extent_item_size; << 437 1851 438 /* Output parameters */ !! 1852 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, >> 1853 struct btrfs_dir_item *item, >> 1854 const struct btrfs_disk_key *key) >> 1855 { >> 1856 write_eb_member(eb, item, struct btrfs_dir_item, location, key); >> 1857 } 439 1858 440 /* !! 1859 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 441 * Set to the minimum between the inpu !! 1860 num_entries, 64); 442 * (exclusive, last byte + 1) of the l !! 1861 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 443 * set even if btrfs_drop_extents() re !! 1862 num_bitmaps, 64); 444 */ !! 1863 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 445 u64 drop_end; !! 1864 generation, 64); 446 /* << 447 * The number of allocated bytes found << 448 * than the range's length when there << 449 */ << 450 u64 bytes_found; << 451 /* << 452 * Only set if 'replace_extent' is tru << 453 * to insert a replacement extent afte << 454 * range, otherwise set to false by bt << 455 * Also, if btrfs_drop_extents() has s << 456 * returned with the path locked, othe << 457 * false it has returned with the path << 458 */ << 459 bool extent_inserted; << 460 }; << 461 1865 462 struct btrfs_file_private { !! 1866 static inline void btrfs_free_space_key(const struct extent_buffer *eb, 463 void *filldir_buf; !! 1867 const struct btrfs_free_space_header *h, 464 u64 last_index; !! 1868 struct btrfs_disk_key *key) 465 struct extent_state *llseek_cached_sta !! 1869 { 466 /* Task that allocated this structure. !! 1870 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 467 struct task_struct *owner_task; !! 1871 } 468 }; << 469 1872 470 static inline u32 BTRFS_LEAF_DATA_SIZE(const s !! 1873 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, >> 1874 struct btrfs_free_space_header *h, >> 1875 const struct btrfs_disk_key *key) 471 { 1876 { 472 return info->nodesize - sizeof(struct !! 1877 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 473 } 1878 } 474 1879 475 static inline u32 BTRFS_MAX_ITEM_SIZE(const st !! 1880 /* struct btrfs_disk_key */ >> 1881 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, >> 1882 objectid, 64); >> 1883 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); >> 1884 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); >> 1885 >> 1886 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, >> 1887 const struct btrfs_disk_key *disk) 476 { 1888 { 477 return BTRFS_LEAF_DATA_SIZE(info) - si !! 1889 cpu->offset = le64_to_cpu(disk->offset); >> 1890 cpu->type = disk->type; >> 1891 cpu->objectid = le64_to_cpu(disk->objectid); 478 } 1892 } 479 1893 480 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(con !! 1894 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, >> 1895 const struct btrfs_key *cpu) 481 { 1896 { 482 return BTRFS_LEAF_DATA_SIZE(info) / si !! 1897 disk->offset = cpu_to_le64(cpu->offset); >> 1898 disk->type = cpu->type; >> 1899 disk->objectid = cpu_to_le64(cpu->objectid); 483 } 1900 } 484 1901 485 static inline u32 BTRFS_MAX_XATTR_SIZE(const s !! 1902 static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb, >> 1903 struct btrfs_key *key, int nr) 486 { 1904 { 487 return BTRFS_MAX_ITEM_SIZE(info) - siz !! 1905 struct btrfs_disk_key disk_key; >> 1906 btrfs_node_key(eb, &disk_key, nr); >> 1907 btrfs_disk_key_to_cpu(key, &disk_key); 488 } 1908 } 489 1909 490 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \ !! 1910 static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb, 491 ((bytes) >> (f !! 1911 struct btrfs_key *key, int nr) >> 1912 { >> 1913 struct btrfs_disk_key disk_key; >> 1914 btrfs_item_key(eb, &disk_key, nr); >> 1915 btrfs_disk_key_to_cpu(key, &disk_key); >> 1916 } 492 1917 493 static inline gfp_t btrfs_alloc_write_mask(str !! 1918 static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb, >> 1919 const struct btrfs_dir_item *item, >> 1920 struct btrfs_key *key) 494 { 1921 { 495 return mapping_gfp_constraint(mapping, !! 1922 struct btrfs_disk_key disk_key; >> 1923 btrfs_dir_item_key(eb, item, &disk_key); >> 1924 btrfs_disk_key_to_cpu(key, &disk_key); 496 } 1925 } 497 1926 498 void btrfs_error_unpin_extent_range(struct btr !! 1927 /* struct btrfs_header */ 499 int btrfs_discard_extent(struct btrfs_fs_info !! 1928 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); 500 u64 num_bytes, u64 *a !! 1929 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, 501 int btrfs_trim_fs(struct btrfs_fs_info *fs_inf !! 1930 generation, 64); >> 1931 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); >> 1932 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); >> 1933 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); >> 1934 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); >> 1935 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, >> 1936 generation, 64); >> 1937 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); >> 1938 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, >> 1939 nritems, 32); >> 1940 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); 502 1941 503 /* ctree.c */ !! 1942 static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag) 504 int __init btrfs_ctree_init(void); !! 1943 { 505 void __cold btrfs_ctree_exit(void); !! 1944 return (btrfs_header_flags(eb) & flag) == flag; >> 1945 } 506 1946 507 int btrfs_bin_search(struct extent_buffer *eb, !! 1947 static inline void btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) 508 const struct btrfs_key *k !! 1948 { >> 1949 u64 flags = btrfs_header_flags(eb); >> 1950 btrfs_set_header_flags(eb, flags | flag); >> 1951 } 509 1952 510 int __pure btrfs_comp_cpu_keys(const struct bt !! 1953 static inline void btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) >> 1954 { >> 1955 u64 flags = btrfs_header_flags(eb); >> 1956 btrfs_set_header_flags(eb, flags & ~flag); >> 1957 } >> 1958 >> 1959 static inline int btrfs_header_backref_rev(const struct extent_buffer *eb) >> 1960 { >> 1961 u64 flags = btrfs_header_flags(eb); >> 1962 return flags >> BTRFS_BACKREF_REV_SHIFT; >> 1963 } >> 1964 >> 1965 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, >> 1966 int rev) >> 1967 { >> 1968 u64 flags = btrfs_header_flags(eb); >> 1969 flags &= ~BTRFS_BACKREF_REV_MASK; >> 1970 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; >> 1971 btrfs_set_header_flags(eb, flags); >> 1972 } >> 1973 >> 1974 static inline unsigned long btrfs_header_fsid(void) >> 1975 { >> 1976 return offsetof(struct btrfs_header, fsid); >> 1977 } >> 1978 >> 1979 static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb) >> 1980 { >> 1981 return offsetof(struct btrfs_header, chunk_tree_uuid); >> 1982 } >> 1983 >> 1984 static inline int btrfs_is_leaf(const struct extent_buffer *eb) >> 1985 { >> 1986 return btrfs_header_level(eb) == 0; >> 1987 } >> 1988 >> 1989 /* struct btrfs_root_item */ >> 1990 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, >> 1991 generation, 64); >> 1992 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); >> 1993 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); >> 1994 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); >> 1995 >> 1996 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, >> 1997 generation, 64); >> 1998 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); >> 1999 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); >> 2000 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); >> 2001 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); >> 2002 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); >> 2003 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); >> 2004 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); >> 2005 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, >> 2006 last_snapshot, 64); >> 2007 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, >> 2008 generation_v2, 64); >> 2009 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, >> 2010 ctransid, 64); >> 2011 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, >> 2012 otransid, 64); >> 2013 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, >> 2014 stransid, 64); >> 2015 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, >> 2016 rtransid, 64); >> 2017 >> 2018 static inline bool btrfs_root_readonly(const struct btrfs_root *root) >> 2019 { >> 2020 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; >> 2021 } >> 2022 >> 2023 static inline bool btrfs_root_dead(const struct btrfs_root *root) >> 2024 { >> 2025 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; >> 2026 } >> 2027 >> 2028 /* struct btrfs_root_backup */ >> 2029 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, >> 2030 tree_root, 64); >> 2031 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, >> 2032 tree_root_gen, 64); >> 2033 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, >> 2034 tree_root_level, 8); >> 2035 >> 2036 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, >> 2037 chunk_root, 64); >> 2038 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, >> 2039 chunk_root_gen, 64); >> 2040 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, >> 2041 chunk_root_level, 8); >> 2042 >> 2043 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, >> 2044 extent_root, 64); >> 2045 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, >> 2046 extent_root_gen, 64); >> 2047 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, >> 2048 extent_root_level, 8); >> 2049 >> 2050 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, >> 2051 fs_root, 64); >> 2052 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, >> 2053 fs_root_gen, 64); >> 2054 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, >> 2055 fs_root_level, 8); >> 2056 >> 2057 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, >> 2058 dev_root, 64); >> 2059 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, >> 2060 dev_root_gen, 64); >> 2061 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, >> 2062 dev_root_level, 8); >> 2063 >> 2064 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, >> 2065 csum_root, 64); >> 2066 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, >> 2067 csum_root_gen, 64); >> 2068 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, >> 2069 csum_root_level, 8); >> 2070 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, >> 2071 total_bytes, 64); >> 2072 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, >> 2073 bytes_used, 64); >> 2074 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, >> 2075 num_devices, 64); >> 2076 >> 2077 /* struct btrfs_balance_item */ >> 2078 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); >> 2079 >> 2080 static inline void btrfs_balance_data(const struct extent_buffer *eb, >> 2081 const struct btrfs_balance_item *bi, >> 2082 struct btrfs_disk_balance_args *ba) >> 2083 { >> 2084 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); >> 2085 } >> 2086 >> 2087 static inline void btrfs_set_balance_data(struct extent_buffer *eb, >> 2088 struct btrfs_balance_item *bi, >> 2089 const struct btrfs_disk_balance_args *ba) >> 2090 { >> 2091 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); >> 2092 } >> 2093 >> 2094 static inline void btrfs_balance_meta(const struct extent_buffer *eb, >> 2095 const struct btrfs_balance_item *bi, >> 2096 struct btrfs_disk_balance_args *ba) >> 2097 { >> 2098 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); >> 2099 } >> 2100 >> 2101 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, >> 2102 struct btrfs_balance_item *bi, >> 2103 const struct btrfs_disk_balance_args *ba) >> 2104 { >> 2105 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); >> 2106 } >> 2107 >> 2108 static inline void btrfs_balance_sys(const struct extent_buffer *eb, >> 2109 const struct btrfs_balance_item *bi, >> 2110 struct btrfs_disk_balance_args *ba) >> 2111 { >> 2112 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); >> 2113 } >> 2114 >> 2115 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, >> 2116 struct btrfs_balance_item *bi, >> 2117 const struct btrfs_disk_balance_args *ba) >> 2118 { >> 2119 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); >> 2120 } >> 2121 >> 2122 static inline void >> 2123 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, >> 2124 const struct btrfs_disk_balance_args *disk) >> 2125 { >> 2126 memset(cpu, 0, sizeof(*cpu)); >> 2127 >> 2128 cpu->profiles = le64_to_cpu(disk->profiles); >> 2129 cpu->usage = le64_to_cpu(disk->usage); >> 2130 cpu->devid = le64_to_cpu(disk->devid); >> 2131 cpu->pstart = le64_to_cpu(disk->pstart); >> 2132 cpu->pend = le64_to_cpu(disk->pend); >> 2133 cpu->vstart = le64_to_cpu(disk->vstart); >> 2134 cpu->vend = le64_to_cpu(disk->vend); >> 2135 cpu->target = le64_to_cpu(disk->target); >> 2136 cpu->flags = le64_to_cpu(disk->flags); >> 2137 cpu->limit = le64_to_cpu(disk->limit); >> 2138 cpu->stripes_min = le32_to_cpu(disk->stripes_min); >> 2139 cpu->stripes_max = le32_to_cpu(disk->stripes_max); >> 2140 } >> 2141 >> 2142 static inline void >> 2143 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, >> 2144 const struct btrfs_balance_args *cpu) >> 2145 { >> 2146 memset(disk, 0, sizeof(*disk)); >> 2147 >> 2148 disk->profiles = cpu_to_le64(cpu->profiles); >> 2149 disk->usage = cpu_to_le64(cpu->usage); >> 2150 disk->devid = cpu_to_le64(cpu->devid); >> 2151 disk->pstart = cpu_to_le64(cpu->pstart); >> 2152 disk->pend = cpu_to_le64(cpu->pend); >> 2153 disk->vstart = cpu_to_le64(cpu->vstart); >> 2154 disk->vend = cpu_to_le64(cpu->vend); >> 2155 disk->target = cpu_to_le64(cpu->target); >> 2156 disk->flags = cpu_to_le64(cpu->flags); >> 2157 disk->limit = cpu_to_le64(cpu->limit); >> 2158 disk->stripes_min = cpu_to_le32(cpu->stripes_min); >> 2159 disk->stripes_max = cpu_to_le32(cpu->stripes_max); >> 2160 } >> 2161 >> 2162 /* struct btrfs_super_block */ >> 2163 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); >> 2164 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); >> 2165 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, >> 2166 generation, 64); >> 2167 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); >> 2168 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, >> 2169 struct btrfs_super_block, sys_chunk_array_size, 32); >> 2170 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, >> 2171 struct btrfs_super_block, chunk_root_generation, 64); >> 2172 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, >> 2173 root_level, 8); >> 2174 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, >> 2175 chunk_root, 64); >> 2176 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, >> 2177 chunk_root_level, 8); >> 2178 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, >> 2179 log_root, 64); >> 2180 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, >> 2181 log_root_transid, 64); >> 2182 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, >> 2183 log_root_level, 8); >> 2184 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, >> 2185 total_bytes, 64); >> 2186 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, >> 2187 bytes_used, 64); >> 2188 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, >> 2189 sectorsize, 32); >> 2190 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, >> 2191 nodesize, 32); >> 2192 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, >> 2193 stripesize, 32); >> 2194 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, >> 2195 root_dir_objectid, 64); >> 2196 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, >> 2197 num_devices, 64); >> 2198 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, >> 2199 compat_flags, 64); >> 2200 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, >> 2201 compat_ro_flags, 64); >> 2202 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, >> 2203 incompat_flags, 64); >> 2204 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, >> 2205 csum_type, 16); >> 2206 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, >> 2207 cache_generation, 64); >> 2208 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); >> 2209 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, >> 2210 uuid_tree_generation, 64); >> 2211 >> 2212 int btrfs_super_csum_size(const struct btrfs_super_block *s); >> 2213 const char *btrfs_super_csum_name(u16 csum_type); >> 2214 const char *btrfs_super_csum_driver(u16 csum_type); >> 2215 size_t __const btrfs_get_num_csums(void); 511 2216 512 #ifdef __LITTLE_ENDIAN << 513 2217 514 /* 2218 /* 515 * Compare two keys, on little-endian the disk !! 2219 * The leaf data grows from end-to-front in the node. 516 * we can avoid the conversion. !! 2220 * this returns the address of the start of the last item, >> 2221 * which is the stop of the leaf data stack 517 */ 2222 */ 518 static inline int btrfs_comp_keys(const struct !! 2223 static inline unsigned int leaf_data_end(const struct extent_buffer *leaf) 519 const struct << 520 { 2224 { 521 const struct btrfs_key *k1 = (const st !! 2225 u32 nr = btrfs_header_nritems(leaf); >> 2226 >> 2227 if (nr == 0) >> 2228 return BTRFS_LEAF_DATA_SIZE(leaf->fs_info); >> 2229 return btrfs_item_offset_nr(leaf, nr - 1); >> 2230 } >> 2231 >> 2232 /* struct btrfs_file_extent_item */ >> 2233 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); >> 2234 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, >> 2235 struct btrfs_file_extent_item, disk_bytenr, 64); >> 2236 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, >> 2237 struct btrfs_file_extent_item, offset, 64); >> 2238 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, >> 2239 struct btrfs_file_extent_item, generation, 64); >> 2240 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, >> 2241 struct btrfs_file_extent_item, num_bytes, 64); >> 2242 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, >> 2243 struct btrfs_file_extent_item, disk_num_bytes, 64); >> 2244 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, >> 2245 struct btrfs_file_extent_item, compression, 8); >> 2246 >> 2247 static inline unsigned long >> 2248 btrfs_file_extent_inline_start(const struct btrfs_file_extent_item *e) >> 2249 { >> 2250 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; >> 2251 } >> 2252 >> 2253 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) >> 2254 { >> 2255 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; >> 2256 } >> 2257 >> 2258 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, >> 2259 disk_bytenr, 64); >> 2260 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, >> 2261 generation, 64); >> 2262 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, >> 2263 disk_num_bytes, 64); >> 2264 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, >> 2265 offset, 64); >> 2266 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, >> 2267 num_bytes, 64); >> 2268 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, >> 2269 ram_bytes, 64); >> 2270 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, >> 2271 compression, 8); >> 2272 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, >> 2273 encryption, 8); >> 2274 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, >> 2275 other_encoding, 16); 522 2276 523 return btrfs_comp_cpu_keys(k1, k2); !! 2277 /* >> 2278 * this returns the number of bytes used by the item on disk, minus the >> 2279 * size of any extent headers. If a file is compressed on disk, this is >> 2280 * the compressed size >> 2281 */ >> 2282 static inline u32 btrfs_file_extent_inline_item_len( >> 2283 const struct extent_buffer *eb, >> 2284 struct btrfs_item *e) >> 2285 { >> 2286 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; >> 2287 } >> 2288 >> 2289 /* btrfs_qgroup_status_item */ >> 2290 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, >> 2291 generation, 64); >> 2292 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, >> 2293 version, 64); >> 2294 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, >> 2295 flags, 64); >> 2296 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, >> 2297 rescan, 64); >> 2298 >> 2299 /* btrfs_qgroup_info_item */ >> 2300 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, >> 2301 generation, 64); >> 2302 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); >> 2303 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, >> 2304 rfer_cmpr, 64); >> 2305 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); >> 2306 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, >> 2307 excl_cmpr, 64); >> 2308 >> 2309 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, >> 2310 struct btrfs_qgroup_info_item, generation, 64); >> 2311 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, >> 2312 rfer, 64); >> 2313 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, >> 2314 struct btrfs_qgroup_info_item, rfer_cmpr, 64); >> 2315 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, >> 2316 excl, 64); >> 2317 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, >> 2318 struct btrfs_qgroup_info_item, excl_cmpr, 64); >> 2319 >> 2320 /* btrfs_qgroup_limit_item */ >> 2321 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, >> 2322 flags, 64); >> 2323 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, >> 2324 max_rfer, 64); >> 2325 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, >> 2326 max_excl, 64); >> 2327 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, >> 2328 rsv_rfer, 64); >> 2329 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, >> 2330 rsv_excl, 64); >> 2331 >> 2332 /* btrfs_dev_replace_item */ >> 2333 BTRFS_SETGET_FUNCS(dev_replace_src_devid, >> 2334 struct btrfs_dev_replace_item, src_devid, 64); >> 2335 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, >> 2336 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, >> 2337 64); >> 2338 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, >> 2339 replace_state, 64); >> 2340 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, >> 2341 time_started, 64); >> 2342 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, >> 2343 time_stopped, 64); >> 2344 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, >> 2345 num_write_errors, 64); >> 2346 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, >> 2347 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, >> 2348 64); >> 2349 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, >> 2350 cursor_left, 64); >> 2351 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, >> 2352 cursor_right, 64); >> 2353 >> 2354 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, >> 2355 struct btrfs_dev_replace_item, src_devid, 64); >> 2356 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, >> 2357 struct btrfs_dev_replace_item, >> 2358 cont_reading_from_srcdev_mode, 64); >> 2359 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, >> 2360 struct btrfs_dev_replace_item, replace_state, 64); >> 2361 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, >> 2362 struct btrfs_dev_replace_item, time_started, 64); >> 2363 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, >> 2364 struct btrfs_dev_replace_item, time_stopped, 64); >> 2365 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, >> 2366 struct btrfs_dev_replace_item, num_write_errors, 64); >> 2367 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, >> 2368 struct btrfs_dev_replace_item, >> 2369 num_uncorrectable_read_errors, 64); >> 2370 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, >> 2371 struct btrfs_dev_replace_item, cursor_left, 64); >> 2372 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, >> 2373 struct btrfs_dev_replace_item, cursor_right, 64); >> 2374 >> 2375 /* helper function to cast into the data area of the leaf. */ >> 2376 #define btrfs_item_ptr(leaf, slot, type) \ >> 2377 ((type *)(BTRFS_LEAF_DATA_OFFSET + \ >> 2378 btrfs_item_offset_nr(leaf, slot))) >> 2379 >> 2380 #define btrfs_item_ptr_offset(leaf, slot) \ >> 2381 ((unsigned long)(BTRFS_LEAF_DATA_OFFSET + \ >> 2382 btrfs_item_offset_nr(leaf, slot))) >> 2383 >> 2384 static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length) >> 2385 { >> 2386 return crc32c(crc, address, length); 524 } 2387 } 525 2388 526 #else !! 2389 static inline void btrfs_crc32c_final(u32 crc, u8 *result) >> 2390 { >> 2391 put_unaligned_le32(~crc, result); >> 2392 } 527 2393 528 /* Compare two keys in a memcmp fashion. */ !! 2394 static inline u64 btrfs_name_hash(const char *name, int len) 529 static inline int btrfs_comp_keys(const struct << 530 const struct << 531 { 2395 { 532 struct btrfs_key k1; !! 2396 return crc32c((u32)~1, name, len); >> 2397 } 533 2398 534 btrfs_disk_key_to_cpu(&k1, disk); !! 2399 /* >> 2400 * Figure the key offset of an extended inode ref >> 2401 */ >> 2402 static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name, >> 2403 int len) >> 2404 { >> 2405 return (u64) crc32c(parent_objectid, name, len); >> 2406 } 535 2407 536 return btrfs_comp_cpu_keys(&k1, k2); !! 2408 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) >> 2409 { >> 2410 return mapping_gfp_constraint(mapping, ~__GFP_FS); 537 } 2411 } 538 2412 539 #endif !! 2413 /* extent-tree.c */ >> 2414 >> 2415 enum btrfs_inline_ref_type { >> 2416 BTRFS_REF_TYPE_INVALID, >> 2417 BTRFS_REF_TYPE_BLOCK, >> 2418 BTRFS_REF_TYPE_DATA, >> 2419 BTRFS_REF_TYPE_ANY, >> 2420 }; >> 2421 >> 2422 int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, >> 2423 struct btrfs_extent_inline_ref *iref, >> 2424 enum btrfs_inline_ref_type is_data); >> 2425 u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset); >> 2426 >> 2427 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes); 540 2428 >> 2429 /* >> 2430 * Use this if we would be adding new items, as we could split nodes as we cow >> 2431 * down the tree. >> 2432 */ >> 2433 static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info, >> 2434 unsigned num_items) >> 2435 { >> 2436 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; >> 2437 } >> 2438 >> 2439 /* >> 2440 * Doing a truncate or a modification won't result in new nodes or leaves, just >> 2441 * what we need for COW. >> 2442 */ >> 2443 static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info, >> 2444 unsigned num_items) >> 2445 { >> 2446 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; >> 2447 } >> 2448 >> 2449 int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, >> 2450 u64 start, u64 num_bytes); >> 2451 void btrfs_free_excluded_extents(struct btrfs_block_group *cache); >> 2452 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, >> 2453 unsigned long count); >> 2454 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, >> 2455 struct btrfs_delayed_ref_root *delayed_refs, >> 2456 struct btrfs_delayed_ref_head *head); >> 2457 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); >> 2458 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, >> 2459 struct btrfs_fs_info *fs_info, u64 bytenr, >> 2460 u64 offset, int metadata, u64 *refs, u64 *flags); >> 2461 int btrfs_pin_extent(struct btrfs_fs_info *fs_info, >> 2462 u64 bytenr, u64 num, int reserved); >> 2463 int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, >> 2464 u64 bytenr, u64 num_bytes); >> 2465 int btrfs_exclude_logged_extents(struct extent_buffer *eb); >> 2466 int btrfs_cross_ref_exist(struct btrfs_root *root, >> 2467 u64 objectid, u64 offset, u64 bytenr); >> 2468 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, >> 2469 struct btrfs_root *root, >> 2470 u64 parent, u64 root_objectid, >> 2471 const struct btrfs_disk_key *key, >> 2472 int level, u64 hint, >> 2473 u64 empty_size); >> 2474 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, >> 2475 struct btrfs_root *root, >> 2476 struct extent_buffer *buf, >> 2477 u64 parent, int last_ref); >> 2478 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, >> 2479 struct btrfs_root *root, u64 owner, >> 2480 u64 offset, u64 ram_bytes, >> 2481 struct btrfs_key *ins); >> 2482 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, >> 2483 u64 root_objectid, u64 owner, u64 offset, >> 2484 struct btrfs_key *ins); >> 2485 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes, >> 2486 u64 min_alloc_size, u64 empty_size, u64 hint_byte, >> 2487 struct btrfs_key *ins, int is_data, int delalloc); >> 2488 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, >> 2489 struct extent_buffer *buf, int full_backref); >> 2490 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, >> 2491 struct extent_buffer *buf, int full_backref); >> 2492 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, >> 2493 u64 bytenr, u64 num_bytes, u64 flags, >> 2494 int level, int is_data); >> 2495 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref); >> 2496 >> 2497 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, >> 2498 u64 start, u64 len, int delalloc); >> 2499 int btrfs_pin_reserved_extent(struct btrfs_fs_info *fs_info, u64 start, >> 2500 u64 len); >> 2501 void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info); >> 2502 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans); >> 2503 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, >> 2504 struct btrfs_ref *generic_ref); >> 2505 >> 2506 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); >> 2507 void btrfs_get_block_group_trimming(struct btrfs_block_group *cache); >> 2508 void btrfs_put_block_group_trimming(struct btrfs_block_group *cache); >> 2509 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); >> 2510 >> 2511 enum btrfs_reserve_flush_enum { >> 2512 /* If we are in the transaction, we can't flush anything.*/ >> 2513 BTRFS_RESERVE_NO_FLUSH, >> 2514 /* >> 2515 * Flushing delalloc may cause deadlock somewhere, in this >> 2516 * case, use FLUSH LIMIT >> 2517 */ >> 2518 BTRFS_RESERVE_FLUSH_LIMIT, >> 2519 BTRFS_RESERVE_FLUSH_EVICT, >> 2520 BTRFS_RESERVE_FLUSH_ALL, >> 2521 }; >> 2522 >> 2523 enum btrfs_flush_state { >> 2524 FLUSH_DELAYED_ITEMS_NR = 1, >> 2525 FLUSH_DELAYED_ITEMS = 2, >> 2526 FLUSH_DELAYED_REFS_NR = 3, >> 2527 FLUSH_DELAYED_REFS = 4, >> 2528 FLUSH_DELALLOC = 5, >> 2529 FLUSH_DELALLOC_WAIT = 6, >> 2530 ALLOC_CHUNK = 7, >> 2531 ALLOC_CHUNK_FORCE = 8, >> 2532 RUN_DELAYED_IPUTS = 9, >> 2533 COMMIT_TRANS = 10, >> 2534 }; >> 2535 >> 2536 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, >> 2537 struct btrfs_block_rsv *rsv, >> 2538 int nitems, bool use_global_rsv); >> 2539 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, >> 2540 struct btrfs_block_rsv *rsv); >> 2541 void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes); >> 2542 >> 2543 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); >> 2544 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); >> 2545 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, >> 2546 u64 start, u64 end); >> 2547 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, >> 2548 u64 num_bytes, u64 *actual_bytes); >> 2549 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range); >> 2550 >> 2551 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); >> 2552 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, >> 2553 struct btrfs_fs_info *fs_info); >> 2554 int btrfs_start_write_no_snapshotting(struct btrfs_root *root); >> 2555 void btrfs_end_write_no_snapshotting(struct btrfs_root *root); >> 2556 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); >> 2557 >> 2558 /* ctree.c */ >> 2559 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, >> 2560 int level, int *slot); >> 2561 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); 541 int btrfs_previous_item(struct btrfs_root *roo 2562 int btrfs_previous_item(struct btrfs_root *root, 542 struct btrfs_path *pat 2563 struct btrfs_path *path, u64 min_objectid, 543 int type); 2564 int type); 544 int btrfs_previous_extent_item(struct btrfs_ro 2565 int btrfs_previous_extent_item(struct btrfs_root *root, 545 struct btrfs_path *pat 2566 struct btrfs_path *path, u64 min_objectid); 546 void btrfs_set_item_key_safe(struct btrfs_tran !! 2567 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 547 const struct btrf !! 2568 struct btrfs_path *path, 548 const struct btrf 2569 const struct btrfs_key *new_key); 549 struct extent_buffer *btrfs_root_node(struct b 2570 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); >> 2571 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); >> 2572 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root); 550 int btrfs_find_next_key(struct btrfs_root *roo 2573 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 551 struct btrfs_key *key, 2574 struct btrfs_key *key, int lowest_level, 552 u64 min_trans); 2575 u64 min_trans); 553 int btrfs_search_forward(struct btrfs_root *ro 2576 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 554 struct btrfs_path *pa 2577 struct btrfs_path *path, 555 u64 min_trans); 2578 u64 min_trans); 556 struct extent_buffer *btrfs_read_node_slot(str 2579 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent, 557 int 2580 int slot); 558 2581 559 int btrfs_cow_block(struct btrfs_trans_handle 2582 int btrfs_cow_block(struct btrfs_trans_handle *trans, 560 struct btrfs_root *root, s 2583 struct btrfs_root *root, struct extent_buffer *buf, 561 struct extent_buffer *pare 2584 struct extent_buffer *parent, int parent_slot, 562 struct extent_buffer **cow !! 2585 struct extent_buffer **cow_ret); 563 enum btrfs_lock_nesting ne << 564 int btrfs_force_cow_block(struct btrfs_trans_h << 565 struct btrfs_root *r << 566 struct extent_buffer << 567 struct extent_buffer << 568 struct extent_buffer << 569 u64 search_start, u6 << 570 enum btrfs_lock_nest << 571 int btrfs_copy_root(struct btrfs_trans_handle 2586 int btrfs_copy_root(struct btrfs_trans_handle *trans, 572 struct btrfs_root *root, 2587 struct btrfs_root *root, 573 struct extent_buffer *bu 2588 struct extent_buffer *buf, 574 struct extent_buffer **c 2589 struct extent_buffer **cow_ret, u64 new_root_objectid); 575 bool btrfs_block_can_be_shared(struct btrfs_tr !! 2590 int btrfs_block_can_be_shared(struct btrfs_root *root, 576 struct btrfs_ro !! 2591 struct extent_buffer *buf); 577 struct extent_b !! 2592 void btrfs_extend_item(struct btrfs_path *path, u32 data_size); 578 int btrfs_del_ptr(struct btrfs_trans_handle *t !! 2593 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end); 579 struct btrfs_path *path, int << 580 void btrfs_extend_item(struct btrfs_trans_hand << 581 const struct btrfs_path << 582 void btrfs_truncate_item(struct btrfs_trans_ha << 583 const struct btrfs_pa << 584 int btrfs_split_item(struct btrfs_trans_handle 2594 int btrfs_split_item(struct btrfs_trans_handle *trans, 585 struct btrfs_root *root, 2595 struct btrfs_root *root, 586 struct btrfs_path *path, 2596 struct btrfs_path *path, 587 const struct btrfs_key *n 2597 const struct btrfs_key *new_key, 588 unsigned long split_offse 2598 unsigned long split_offset); 589 int btrfs_duplicate_item(struct btrfs_trans_ha 2599 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 590 struct btrfs_root *ro 2600 struct btrfs_root *root, 591 struct btrfs_path *pa 2601 struct btrfs_path *path, 592 const struct btrfs_ke 2602 const struct btrfs_key *new_key); 593 int btrfs_find_item(struct btrfs_root *fs_root 2603 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 594 u64 inum, u64 ioff, u8 key_typ 2604 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 595 int btrfs_search_slot(struct btrfs_trans_handl 2605 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 596 const struct btrfs_key * 2606 const struct btrfs_key *key, struct btrfs_path *p, 597 int ins_len, int cow); 2607 int ins_len, int cow); 598 int btrfs_search_old_slot(struct btrfs_root *r 2608 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 599 struct btrfs_path *p 2609 struct btrfs_path *p, u64 time_seq); 600 int btrfs_search_slot_for_read(struct btrfs_ro 2610 int btrfs_search_slot_for_read(struct btrfs_root *root, 601 const struct bt 2611 const struct btrfs_key *key, 602 struct btrfs_pa 2612 struct btrfs_path *p, int find_higher, 603 int return_any) 2613 int return_any); >> 2614 int btrfs_realloc_node(struct btrfs_trans_handle *trans, >> 2615 struct btrfs_root *root, struct extent_buffer *parent, >> 2616 int start_slot, u64 *last_ret, >> 2617 struct btrfs_key *progress); 604 void btrfs_release_path(struct btrfs_path *p); 2618 void btrfs_release_path(struct btrfs_path *p); 605 struct btrfs_path *btrfs_alloc_path(void); 2619 struct btrfs_path *btrfs_alloc_path(void); 606 void btrfs_free_path(struct btrfs_path *p); 2620 void btrfs_free_path(struct btrfs_path *p); 607 DEFINE_FREE(btrfs_free_path, struct btrfs_path << 608 2621 609 int btrfs_del_items(struct btrfs_trans_handle 2622 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 610 struct btrfs_path *path, in 2623 struct btrfs_path *path, int slot, int nr); 611 static inline int btrfs_del_item(struct btrfs_ 2624 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 612 struct btrfs_ 2625 struct btrfs_root *root, 613 struct btrfs_ 2626 struct btrfs_path *path) 614 { 2627 { 615 return btrfs_del_items(trans, root, pa 2628 return btrfs_del_items(trans, root, path, path->slots[0], 1); 616 } 2629 } 617 2630 618 /* !! 2631 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 619 * Describes a batch of items to insert in a b !! 2632 const struct btrfs_key *cpu_key, u32 *data_size, 620 * btrfs_insert_empty_items(). !! 2633 u32 total_data, u32 total_size, int nr); 621 */ << 622 struct btrfs_item_batch { << 623 /* << 624 * Pointer to an array containing the << 625 * sorted order). << 626 */ << 627 const struct btrfs_key *keys; << 628 /* Pointer to an array containing the << 629 const u32 *data_sizes; << 630 /* << 631 * The sum of data sizes for all items << 632 * setting up the data_sizes array, so << 633 * than having btrfs_insert_empty_item << 634 * doing it, as it would avoid an extr << 635 * array, and in the case of setup_ite << 636 * it while holding a write lock on a << 637 * too, unnecessarily increasing the s << 638 */ << 639 u32 total_data_size; << 640 /* Size of the keys and data_sizes arr << 641 int nr; << 642 }; << 643 << 644 void btrfs_setup_item_for_insert(struct btrfs_ << 645 struct btrfs_ << 646 struct btrfs_ << 647 const struct << 648 u32 data_size << 649 int btrfs_insert_item(struct btrfs_trans_handl 2634 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 650 const struct btrfs_key * 2635 const struct btrfs_key *key, void *data, u32 data_size); 651 int btrfs_insert_empty_items(struct btrfs_tran 2636 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 652 struct btrfs_root 2637 struct btrfs_root *root, 653 struct btrfs_path 2638 struct btrfs_path *path, 654 const struct btrf !! 2639 const struct btrfs_key *cpu_key, u32 *data_size, >> 2640 int nr); 655 2641 656 static inline int btrfs_insert_empty_item(stru 2642 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 657 stru 2643 struct btrfs_root *root, 658 stru 2644 struct btrfs_path *path, 659 cons 2645 const struct btrfs_key *key, 660 u32 2646 u32 data_size) 661 { 2647 { 662 struct btrfs_item_batch batch; !! 2648 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 663 << 664 batch.keys = key; << 665 batch.data_sizes = &data_size; << 666 batch.total_data_size = data_size; << 667 batch.nr = 1; << 668 << 669 return btrfs_insert_empty_items(trans, << 670 } 2649 } 671 2650 >> 2651 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); >> 2652 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 672 int btrfs_next_old_leaf(struct btrfs_root *roo 2653 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 673 u64 time_seq); 2654 u64 time_seq); >> 2655 static inline int btrfs_next_old_item(struct btrfs_root *root, >> 2656 struct btrfs_path *p, u64 time_seq) >> 2657 { >> 2658 ++p->slots[0]; >> 2659 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) >> 2660 return btrfs_next_old_leaf(root, p, time_seq); >> 2661 return 0; >> 2662 } >> 2663 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) >> 2664 { >> 2665 return btrfs_next_old_item(root, p, 0); >> 2666 } >> 2667 int btrfs_leaf_free_space(struct extent_buffer *leaf); >> 2668 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, >> 2669 struct btrfs_block_rsv *block_rsv, >> 2670 int update_ref, int for_reloc); >> 2671 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, >> 2672 struct btrfs_root *root, >> 2673 struct extent_buffer *node, >> 2674 struct extent_buffer *parent); >> 2675 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) >> 2676 { >> 2677 /* >> 2678 * Do it this way so we only ever do one test_bit in the normal case. >> 2679 */ >> 2680 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) { >> 2681 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)) >> 2682 return 2; >> 2683 return 1; >> 2684 } >> 2685 return 0; >> 2686 } >> 2687 >> 2688 /* >> 2689 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do >> 2690 * anything except sleeping. This function is used to check the status of >> 2691 * the fs. >> 2692 */ >> 2693 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info) >> 2694 { >> 2695 return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info); >> 2696 } >> 2697 >> 2698 static inline void free_fs_info(struct btrfs_fs_info *fs_info) >> 2699 { >> 2700 kfree(fs_info->balance_ctl); >> 2701 kfree(fs_info->delayed_root); >> 2702 kfree(fs_info->extent_root); >> 2703 kfree(fs_info->tree_root); >> 2704 kfree(fs_info->chunk_root); >> 2705 kfree(fs_info->dev_root); >> 2706 kfree(fs_info->csum_root); >> 2707 kfree(fs_info->quota_root); >> 2708 kfree(fs_info->uuid_root); >> 2709 kfree(fs_info->free_space_root); >> 2710 kfree(fs_info->super_copy); >> 2711 kfree(fs_info->super_for_commit); >> 2712 kvfree(fs_info); >> 2713 } >> 2714 >> 2715 /* tree mod log functions from ctree.c */ >> 2716 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, >> 2717 struct seq_list *elem); >> 2718 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, >> 2719 struct seq_list *elem); >> 2720 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); >> 2721 >> 2722 /* root-item.c */ >> 2723 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id, >> 2724 u64 ref_id, u64 dirid, u64 sequence, const char *name, >> 2725 int name_len); >> 2726 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id, >> 2727 u64 ref_id, u64 dirid, u64 *sequence, const char *name, >> 2728 int name_len); >> 2729 int btrfs_del_root(struct btrfs_trans_handle *trans, >> 2730 const struct btrfs_key *key); >> 2731 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, >> 2732 const struct btrfs_key *key, >> 2733 struct btrfs_root_item *item); >> 2734 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, >> 2735 struct btrfs_root *root, >> 2736 struct btrfs_key *key, >> 2737 struct btrfs_root_item *item); >> 2738 int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key, >> 2739 struct btrfs_path *path, struct btrfs_root_item *root_item, >> 2740 struct btrfs_key *root_key); >> 2741 int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info); >> 2742 void btrfs_set_root_node(struct btrfs_root_item *item, >> 2743 struct extent_buffer *node); >> 2744 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); >> 2745 void btrfs_update_root_times(struct btrfs_trans_handle *trans, >> 2746 struct btrfs_root *root); >> 2747 >> 2748 /* uuid-tree.c */ >> 2749 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, u8 *uuid, u8 type, >> 2750 u64 subid); >> 2751 int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, u8 *uuid, u8 type, >> 2752 u64 subid); >> 2753 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, >> 2754 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, >> 2755 u64)); >> 2756 >> 2757 /* dir-item.c */ >> 2758 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, >> 2759 const char *name, int name_len); >> 2760 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name, >> 2761 int name_len, struct btrfs_inode *dir, >> 2762 struct btrfs_key *location, u8 type, u64 index); >> 2763 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, >> 2764 struct btrfs_root *root, >> 2765 struct btrfs_path *path, u64 dir, >> 2766 const char *name, int name_len, >> 2767 int mod); >> 2768 struct btrfs_dir_item * >> 2769 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, >> 2770 struct btrfs_root *root, >> 2771 struct btrfs_path *path, u64 dir, >> 2772 u64 objectid, const char *name, int name_len, >> 2773 int mod); >> 2774 struct btrfs_dir_item * >> 2775 btrfs_search_dir_index_item(struct btrfs_root *root, >> 2776 struct btrfs_path *path, u64 dirid, >> 2777 const char *name, int name_len); >> 2778 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, >> 2779 struct btrfs_root *root, >> 2780 struct btrfs_path *path, >> 2781 struct btrfs_dir_item *di); >> 2782 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, >> 2783 struct btrfs_root *root, >> 2784 struct btrfs_path *path, u64 objectid, >> 2785 const char *name, u16 name_len, >> 2786 const void *data, u16 data_len); >> 2787 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, >> 2788 struct btrfs_root *root, >> 2789 struct btrfs_path *path, u64 dir, >> 2790 const char *name, u16 name_len, >> 2791 int mod); >> 2792 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info, >> 2793 struct btrfs_path *path, >> 2794 const char *name, >> 2795 int name_len); >> 2796 >> 2797 /* orphan.c */ >> 2798 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, >> 2799 struct btrfs_root *root, u64 offset); >> 2800 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, >> 2801 struct btrfs_root *root, u64 offset); >> 2802 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); >> 2803 >> 2804 /* inode-item.c */ >> 2805 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, >> 2806 struct btrfs_root *root, >> 2807 const char *name, int name_len, >> 2808 u64 inode_objectid, u64 ref_objectid, u64 index); >> 2809 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, >> 2810 struct btrfs_root *root, >> 2811 const char *name, int name_len, >> 2812 u64 inode_objectid, u64 ref_objectid, u64 *index); >> 2813 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, >> 2814 struct btrfs_root *root, >> 2815 struct btrfs_path *path, u64 objectid); >> 2816 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root >> 2817 *root, struct btrfs_path *path, >> 2818 struct btrfs_key *location, int mod); 674 2819 675 int btrfs_search_backwards(struct btrfs_root * !! 2820 struct btrfs_inode_extref * 676 struct btrfs_path * !! 2821 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, >> 2822 struct btrfs_root *root, >> 2823 struct btrfs_path *path, >> 2824 const char *name, int name_len, >> 2825 u64 inode_objectid, u64 ref_objectid, int ins_len, >> 2826 int cow); >> 2827 >> 2828 struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf, >> 2829 int slot, const char *name, >> 2830 int name_len); >> 2831 struct btrfs_inode_extref *btrfs_find_name_in_ext_backref( >> 2832 struct extent_buffer *leaf, int slot, u64 ref_objectid, >> 2833 const char *name, int name_len); >> 2834 /* file-item.c */ >> 2835 struct btrfs_dio_private; >> 2836 int btrfs_del_csums(struct btrfs_trans_handle *trans, >> 2837 struct btrfs_root *root, u64 bytenr, u64 len); >> 2838 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, >> 2839 u64 offset, u8 *dst); >> 2840 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, >> 2841 struct btrfs_root *root, >> 2842 u64 objectid, u64 pos, >> 2843 u64 disk_offset, u64 disk_num_bytes, >> 2844 u64 num_bytes, u64 offset, u64 ram_bytes, >> 2845 u8 compression, u8 encryption, u16 other_encoding); >> 2846 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, >> 2847 struct btrfs_root *root, >> 2848 struct btrfs_path *path, u64 objectid, >> 2849 u64 bytenr, int mod); >> 2850 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, >> 2851 struct btrfs_root *root, >> 2852 struct btrfs_ordered_sum *sums); >> 2853 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio, >> 2854 u64 file_start, int contig); >> 2855 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, >> 2856 struct list_head *list, int search_commit); >> 2857 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, >> 2858 const struct btrfs_path *path, >> 2859 struct btrfs_file_extent_item *fi, >> 2860 const bool new_inline, >> 2861 struct extent_map *em); >> 2862 >> 2863 /* inode.c */ >> 2864 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, >> 2865 u64 start, u64 len); >> 2866 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, >> 2867 u64 *orig_start, u64 *orig_block_len, >> 2868 u64 *ram_bytes); >> 2869 >> 2870 void __btrfs_del_delalloc_inode(struct btrfs_root *root, >> 2871 struct btrfs_inode *inode); >> 2872 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); >> 2873 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); >> 2874 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, >> 2875 struct btrfs_root *root, >> 2876 struct btrfs_inode *dir, struct btrfs_inode *inode, >> 2877 const char *name, int name_len); >> 2878 int btrfs_add_link(struct btrfs_trans_handle *trans, >> 2879 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, >> 2880 const char *name, int name_len, int add_backref, u64 index); >> 2881 int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry); >> 2882 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, >> 2883 int front); >> 2884 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, >> 2885 struct btrfs_root *root, >> 2886 struct inode *inode, u64 new_size, >> 2887 u32 min_type); 677 2888 678 int btrfs_get_next_valid_item(struct btrfs_roo !! 2889 int btrfs_start_delalloc_snapshot(struct btrfs_root *root); 679 struct btrfs_pat !! 2890 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr); >> 2891 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, >> 2892 unsigned int extra_bits, >> 2893 struct extent_state **cached_state); >> 2894 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, >> 2895 struct btrfs_root *new_root, >> 2896 struct btrfs_root *parent_root, >> 2897 u64 new_dirid); >> 2898 void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state, >> 2899 unsigned *bits); >> 2900 void btrfs_clear_delalloc_extent(struct inode *inode, >> 2901 struct extent_state *state, unsigned *bits); >> 2902 void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new, >> 2903 struct extent_state *other); >> 2904 void btrfs_split_delalloc_extent(struct inode *inode, >> 2905 struct extent_state *orig, u64 split); >> 2906 int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio, >> 2907 unsigned long bio_flags); >> 2908 void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end); >> 2909 vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf); >> 2910 int btrfs_readpage(struct file *file, struct page *page); >> 2911 void btrfs_evict_inode(struct inode *inode); >> 2912 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); >> 2913 struct inode *btrfs_alloc_inode(struct super_block *sb); >> 2914 void btrfs_destroy_inode(struct inode *inode); >> 2915 void btrfs_free_inode(struct inode *inode); >> 2916 int btrfs_drop_inode(struct inode *inode); >> 2917 int __init btrfs_init_cachep(void); >> 2918 void __cold btrfs_destroy_cachep(void); >> 2919 struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, >> 2920 struct btrfs_root *root, struct btrfs_path *path); >> 2921 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, >> 2922 struct btrfs_root *root); >> 2923 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, >> 2924 struct page *page, size_t pg_offset, >> 2925 u64 start, u64 end); >> 2926 int btrfs_update_inode(struct btrfs_trans_handle *trans, >> 2927 struct btrfs_root *root, >> 2928 struct inode *inode); >> 2929 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, >> 2930 struct btrfs_root *root, struct inode *inode); >> 2931 int btrfs_orphan_add(struct btrfs_trans_handle *trans, >> 2932 struct btrfs_inode *inode); >> 2933 int btrfs_orphan_cleanup(struct btrfs_root *root); >> 2934 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); >> 2935 void btrfs_add_delayed_iput(struct inode *inode); >> 2936 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info); >> 2937 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info); >> 2938 int btrfs_prealloc_file_range(struct inode *inode, int mode, >> 2939 u64 start, u64 num_bytes, u64 min_size, >> 2940 loff_t actual_len, u64 *alloc_hint); >> 2941 int btrfs_prealloc_file_range_trans(struct inode *inode, >> 2942 struct btrfs_trans_handle *trans, int mode, >> 2943 u64 start, u64 num_bytes, u64 min_size, >> 2944 loff_t actual_len, u64 *alloc_hint); >> 2945 int btrfs_run_delalloc_range(struct inode *inode, struct page *locked_page, >> 2946 u64 start, u64 end, int *page_started, unsigned long *nr_written, >> 2947 struct writeback_control *wbc); >> 2948 int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end); >> 2949 void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, >> 2950 u64 end, int uptodate); >> 2951 extern const struct dentry_operations btrfs_dentry_operations; >> 2952 >> 2953 /* ioctl.c */ >> 2954 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); >> 2955 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); >> 2956 int btrfs_ioctl_get_supported_features(void __user *arg); >> 2957 void btrfs_sync_inode_flags_to_i_flags(struct inode *inode); >> 2958 int __pure btrfs_is_empty_uuid(u8 *uuid); >> 2959 int btrfs_defrag_file(struct inode *inode, struct file *file, >> 2960 struct btrfs_ioctl_defrag_range_args *range, >> 2961 u64 newer_than, unsigned long max_pages); >> 2962 void btrfs_get_block_group_info(struct list_head *groups_list, >> 2963 struct btrfs_ioctl_space_info *space); >> 2964 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info, >> 2965 struct btrfs_ioctl_balance_args *bargs); >> 2966 >> 2967 /* file.c */ >> 2968 int __init btrfs_auto_defrag_init(void); >> 2969 void __cold btrfs_auto_defrag_exit(void); >> 2970 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, >> 2971 struct btrfs_inode *inode); >> 2972 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); >> 2973 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); >> 2974 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); >> 2975 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, >> 2976 int skip_pinned); >> 2977 extern const struct file_operations btrfs_file_operations; >> 2978 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, >> 2979 struct btrfs_root *root, struct inode *inode, >> 2980 struct btrfs_path *path, u64 start, u64 end, >> 2981 u64 *drop_end, int drop_cache, >> 2982 int replace_extent, >> 2983 u32 extent_item_size, >> 2984 int *key_inserted); >> 2985 int btrfs_drop_extents(struct btrfs_trans_handle *trans, >> 2986 struct btrfs_root *root, struct inode *inode, u64 start, >> 2987 u64 end, int drop_cache); >> 2988 int btrfs_punch_hole_range(struct inode *inode, struct btrfs_path *path, >> 2989 const u64 start, const u64 end, >> 2990 struct btrfs_clone_extent_info *clone_info, >> 2991 struct btrfs_trans_handle **trans_out); >> 2992 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, >> 2993 struct btrfs_inode *inode, u64 start, u64 end); >> 2994 int btrfs_release_file(struct inode *inode, struct file *file); >> 2995 int btrfs_dirty_pages(struct inode *inode, struct page **pages, >> 2996 size_t num_pages, loff_t pos, size_t write_bytes, >> 2997 struct extent_state **cached); >> 2998 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); >> 2999 loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in, >> 3000 struct file *file_out, loff_t pos_out, >> 3001 loff_t len, unsigned int remap_flags); >> 3002 >> 3003 /* tree-defrag.c */ >> 3004 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, >> 3005 struct btrfs_root *root); >> 3006 >> 3007 /* super.c */ >> 3008 int btrfs_parse_options(struct btrfs_fs_info *info, char *options, >> 3009 unsigned long new_flags); >> 3010 int btrfs_sync_fs(struct super_block *sb, int wait); >> 3011 >> 3012 static inline __printf(2, 3) __cold >> 3013 void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) >> 3014 { >> 3015 } >> 3016 >> 3017 #ifdef CONFIG_PRINTK >> 3018 __printf(2, 3) >> 3019 __cold >> 3020 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); >> 3021 #else >> 3022 #define btrfs_printk(fs_info, fmt, args...) \ >> 3023 btrfs_no_printk(fs_info, fmt, ##args) >> 3024 #endif >> 3025 >> 3026 #define btrfs_emerg(fs_info, fmt, args...) \ >> 3027 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) >> 3028 #define btrfs_alert(fs_info, fmt, args...) \ >> 3029 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) >> 3030 #define btrfs_crit(fs_info, fmt, args...) \ >> 3031 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) >> 3032 #define btrfs_err(fs_info, fmt, args...) \ >> 3033 btrfs_printk(fs_info, KERN_ERR fmt, ##args) >> 3034 #define btrfs_warn(fs_info, fmt, args...) \ >> 3035 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) >> 3036 #define btrfs_notice(fs_info, fmt, args...) \ >> 3037 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) >> 3038 #define btrfs_info(fs_info, fmt, args...) \ >> 3039 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 680 3040 681 /* 3041 /* 682 * Search in @root for a given @key, and store !! 3042 * Wrappers that use printk_in_rcu 683 * << 684 * @root: The root node of the tree. << 685 * @key: The key we are looking for. << 686 * @found_key: Will hold the found item. << 687 * @path: Holds the current slot/leaf. << 688 * @iter_ret: Contains the value returned fr << 689 * btrfs_get_next_valid_item, whi << 690 * << 691 * The @iter_ret is an output variable that wi << 692 * btrfs_search_slot, if it encountered an err << 693 * btrfs_get_next_valid_item otherwise. That r << 694 * slot was found, 1 if there were no more lea << 695 * << 696 * It's recommended to use a separate variable << 697 * set the function return value so there's no << 698 * values stemming from btrfs_search_slot. << 699 */ 3043 */ 700 #define btrfs_for_each_slot(root, key, found_k !! 3044 #define btrfs_emerg_in_rcu(fs_info, fmt, args...) \ 701 for (iter_ret = btrfs_search_slot(NULL !! 3045 btrfs_printk_in_rcu(fs_info, KERN_EMERG fmt, ##args) 702 (iter_ret) >= 0 && !! 3046 #define btrfs_alert_in_rcu(fs_info, fmt, args...) \ 703 (iter_ret = btrfs_get_next_val !! 3047 btrfs_printk_in_rcu(fs_info, KERN_ALERT fmt, ##args) 704 (path)->slots[0]++ !! 3048 #define btrfs_crit_in_rcu(fs_info, fmt, args...) \ 705 ) !! 3049 btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args) >> 3050 #define btrfs_err_in_rcu(fs_info, fmt, args...) \ >> 3051 btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args) >> 3052 #define btrfs_warn_in_rcu(fs_info, fmt, args...) \ >> 3053 btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args) >> 3054 #define btrfs_notice_in_rcu(fs_info, fmt, args...) \ >> 3055 btrfs_printk_in_rcu(fs_info, KERN_NOTICE fmt, ##args) >> 3056 #define btrfs_info_in_rcu(fs_info, fmt, args...) \ >> 3057 btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args) 706 3058 707 int btrfs_next_old_item(struct btrfs_root *roo !! 3059 /* >> 3060 * Wrappers that use a ratelimited printk_in_rcu >> 3061 */ >> 3062 #define btrfs_emerg_rl_in_rcu(fs_info, fmt, args...) \ >> 3063 btrfs_printk_rl_in_rcu(fs_info, KERN_EMERG fmt, ##args) >> 3064 #define btrfs_alert_rl_in_rcu(fs_info, fmt, args...) \ >> 3065 btrfs_printk_rl_in_rcu(fs_info, KERN_ALERT fmt, ##args) >> 3066 #define btrfs_crit_rl_in_rcu(fs_info, fmt, args...) \ >> 3067 btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args) >> 3068 #define btrfs_err_rl_in_rcu(fs_info, fmt, args...) \ >> 3069 btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args) >> 3070 #define btrfs_warn_rl_in_rcu(fs_info, fmt, args...) \ >> 3071 btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args) >> 3072 #define btrfs_notice_rl_in_rcu(fs_info, fmt, args...) \ >> 3073 btrfs_printk_rl_in_rcu(fs_info, KERN_NOTICE fmt, ##args) >> 3074 #define btrfs_info_rl_in_rcu(fs_info, fmt, args...) \ >> 3075 btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args) 708 3076 709 /* 3077 /* 710 * Search the tree again to find a leaf with g !! 3078 * Wrappers that use a ratelimited printk 711 * << 712 * Returns 0 if it found something or 1 if the << 713 * Returns < 0 on error. << 714 */ 3079 */ 715 static inline int btrfs_next_leaf(struct btrfs !! 3080 #define btrfs_emerg_rl(fs_info, fmt, args...) \ >> 3081 btrfs_printk_ratelimited(fs_info, KERN_EMERG fmt, ##args) >> 3082 #define btrfs_alert_rl(fs_info, fmt, args...) \ >> 3083 btrfs_printk_ratelimited(fs_info, KERN_ALERT fmt, ##args) >> 3084 #define btrfs_crit_rl(fs_info, fmt, args...) \ >> 3085 btrfs_printk_ratelimited(fs_info, KERN_CRIT fmt, ##args) >> 3086 #define btrfs_err_rl(fs_info, fmt, args...) \ >> 3087 btrfs_printk_ratelimited(fs_info, KERN_ERR fmt, ##args) >> 3088 #define btrfs_warn_rl(fs_info, fmt, args...) \ >> 3089 btrfs_printk_ratelimited(fs_info, KERN_WARNING fmt, ##args) >> 3090 #define btrfs_notice_rl(fs_info, fmt, args...) \ >> 3091 btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args) >> 3092 #define btrfs_info_rl(fs_info, fmt, args...) \ >> 3093 btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args) >> 3094 >> 3095 #if defined(CONFIG_DYNAMIC_DEBUG) >> 3096 #define btrfs_debug(fs_info, fmt, args...) \ >> 3097 _dynamic_func_call_no_desc(fmt, btrfs_printk, \ >> 3098 fs_info, KERN_DEBUG fmt, ##args) >> 3099 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ >> 3100 _dynamic_func_call_no_desc(fmt, btrfs_printk_in_rcu, \ >> 3101 fs_info, KERN_DEBUG fmt, ##args) >> 3102 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ >> 3103 _dynamic_func_call_no_desc(fmt, btrfs_printk_rl_in_rcu, \ >> 3104 fs_info, KERN_DEBUG fmt, ##args) >> 3105 #define btrfs_debug_rl(fs_info, fmt, args...) \ >> 3106 _dynamic_func_call_no_desc(fmt, btrfs_printk_ratelimited, \ >> 3107 fs_info, KERN_DEBUG fmt, ##args) >> 3108 #elif defined(DEBUG) >> 3109 #define btrfs_debug(fs_info, fmt, args...) \ >> 3110 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3111 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ >> 3112 btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) >> 3113 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ >> 3114 btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args) >> 3115 #define btrfs_debug_rl(fs_info, fmt, args...) \ >> 3116 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args) >> 3117 #else >> 3118 #define btrfs_debug(fs_info, fmt, args...) \ >> 3119 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3120 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ >> 3121 btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) >> 3122 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ >> 3123 btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) >> 3124 #define btrfs_debug_rl(fs_info, fmt, args...) \ >> 3125 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3126 #endif >> 3127 >> 3128 #define btrfs_printk_in_rcu(fs_info, fmt, args...) \ >> 3129 do { \ >> 3130 rcu_read_lock(); \ >> 3131 btrfs_printk(fs_info, fmt, ##args); \ >> 3132 rcu_read_unlock(); \ >> 3133 } while (0) >> 3134 >> 3135 #define btrfs_no_printk_in_rcu(fs_info, fmt, args...) \ >> 3136 do { \ >> 3137 rcu_read_lock(); \ >> 3138 btrfs_no_printk(fs_info, fmt, ##args); \ >> 3139 rcu_read_unlock(); \ >> 3140 } while (0) >> 3141 >> 3142 #define btrfs_printk_ratelimited(fs_info, fmt, args...) \ >> 3143 do { \ >> 3144 static DEFINE_RATELIMIT_STATE(_rs, \ >> 3145 DEFAULT_RATELIMIT_INTERVAL, \ >> 3146 DEFAULT_RATELIMIT_BURST); \ >> 3147 if (__ratelimit(&_rs)) \ >> 3148 btrfs_printk(fs_info, fmt, ##args); \ >> 3149 } while (0) >> 3150 >> 3151 #define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \ >> 3152 do { \ >> 3153 rcu_read_lock(); \ >> 3154 btrfs_printk_ratelimited(fs_info, fmt, ##args); \ >> 3155 rcu_read_unlock(); \ >> 3156 } while (0) >> 3157 >> 3158 #ifdef CONFIG_BTRFS_ASSERT >> 3159 __cold __noreturn >> 3160 static inline void assertfail(const char *expr, const char *file, int line) 716 { 3161 { 717 return btrfs_next_old_leaf(root, path, !! 3162 pr_err("assertion failed: %s, in %s:%d\n", expr, file, line); >> 3163 BUG(); 718 } 3164 } 719 3165 720 static inline int btrfs_next_item(struct btrfs !! 3166 #define ASSERT(expr) \ >> 3167 (likely(expr) ? (void)0 : assertfail(#expr, __FILE__, __LINE__)) >> 3168 >> 3169 #else >> 3170 static inline void assertfail(const char *expr, const char* file, int line) { } >> 3171 #define ASSERT(expr) (void)(expr) >> 3172 #endif >> 3173 >> 3174 /* >> 3175 * Use that for functions that are conditionally exported for sanity tests but >> 3176 * otherwise static >> 3177 */ >> 3178 #ifndef CONFIG_BTRFS_FS_RUN_SANITY_TESTS >> 3179 #define EXPORT_FOR_TESTS static >> 3180 #else >> 3181 #define EXPORT_FOR_TESTS >> 3182 #endif >> 3183 >> 3184 __cold >> 3185 static inline void btrfs_print_v0_err(struct btrfs_fs_info *fs_info) 721 { 3186 { 722 return btrfs_next_old_item(root, p, 0) !! 3187 btrfs_err(fs_info, >> 3188 "Unsupported V0 extent filesystem detected. Aborting. Please re-create your filesystem with a newer kernel"); 723 } 3189 } 724 int btrfs_leaf_free_space(const struct extent_ !! 3190 >> 3191 __printf(5, 6) >> 3192 __cold >> 3193 void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, >> 3194 unsigned int line, int errno, const char *fmt, ...); >> 3195 >> 3196 const char * __attribute_const__ btrfs_decode_error(int errno); >> 3197 >> 3198 __cold >> 3199 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, >> 3200 const char *function, >> 3201 unsigned int line, int errno); >> 3202 >> 3203 /* >> 3204 * Call btrfs_abort_transaction as early as possible when an error condition is >> 3205 * detected, that way the exact line number is reported. >> 3206 */ >> 3207 #define btrfs_abort_transaction(trans, errno) \ >> 3208 do { \ >> 3209 /* Report first abort since mount */ \ >> 3210 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ >> 3211 &((trans)->fs_info->fs_state))) { \ >> 3212 if ((errno) != -EIO) { \ >> 3213 WARN(1, KERN_DEBUG \ >> 3214 "BTRFS: Transaction aborted (error %d)\n", \ >> 3215 (errno)); \ >> 3216 } else { \ >> 3217 btrfs_debug((trans)->fs_info, \ >> 3218 "Transaction aborted (error %d)", \ >> 3219 (errno)); \ >> 3220 } \ >> 3221 } \ >> 3222 __btrfs_abort_transaction((trans), __func__, \ >> 3223 __LINE__, (errno)); \ >> 3224 } while (0) >> 3225 >> 3226 #define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \ >> 3227 do { \ >> 3228 __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ >> 3229 (errno), fmt, ##args); \ >> 3230 } while (0) >> 3231 >> 3232 __printf(5, 6) >> 3233 __cold >> 3234 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, >> 3235 unsigned int line, int errno, const char *fmt, ...); >> 3236 /* >> 3237 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic >> 3238 * will panic(). Otherwise we BUG() here. >> 3239 */ >> 3240 #define btrfs_panic(fs_info, errno, fmt, args...) \ >> 3241 do { \ >> 3242 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ >> 3243 BUG(); \ >> 3244 } while (0) >> 3245 >> 3246 >> 3247 /* compatibility and incompatibility defines */ >> 3248 >> 3249 #define btrfs_set_fs_incompat(__fs_info, opt) \ >> 3250 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, \ >> 3251 #opt) >> 3252 >> 3253 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, >> 3254 u64 flag, const char* name) >> 3255 { >> 3256 struct btrfs_super_block *disk_super; >> 3257 u64 features; >> 3258 >> 3259 disk_super = fs_info->super_copy; >> 3260 features = btrfs_super_incompat_flags(disk_super); >> 3261 if (!(features & flag)) { >> 3262 spin_lock(&fs_info->super_lock); >> 3263 features = btrfs_super_incompat_flags(disk_super); >> 3264 if (!(features & flag)) { >> 3265 features |= flag; >> 3266 btrfs_set_super_incompat_flags(disk_super, features); >> 3267 btrfs_info(fs_info, >> 3268 "setting incompat feature flag for %s (0x%llx)", >> 3269 name, flag); >> 3270 } >> 3271 spin_unlock(&fs_info->super_lock); >> 3272 } >> 3273 } >> 3274 >> 3275 #define btrfs_clear_fs_incompat(__fs_info, opt) \ >> 3276 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt, \ >> 3277 #opt) >> 3278 >> 3279 static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, >> 3280 u64 flag, const char* name) >> 3281 { >> 3282 struct btrfs_super_block *disk_super; >> 3283 u64 features; >> 3284 >> 3285 disk_super = fs_info->super_copy; >> 3286 features = btrfs_super_incompat_flags(disk_super); >> 3287 if (features & flag) { >> 3288 spin_lock(&fs_info->super_lock); >> 3289 features = btrfs_super_incompat_flags(disk_super); >> 3290 if (features & flag) { >> 3291 features &= ~flag; >> 3292 btrfs_set_super_incompat_flags(disk_super, features); >> 3293 btrfs_info(fs_info, >> 3294 "clearing incompat feature flag for %s (0x%llx)", >> 3295 name, flag); >> 3296 } >> 3297 spin_unlock(&fs_info->super_lock); >> 3298 } >> 3299 } >> 3300 >> 3301 #define btrfs_fs_incompat(fs_info, opt) \ >> 3302 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) >> 3303 >> 3304 static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) >> 3305 { >> 3306 struct btrfs_super_block *disk_super; >> 3307 disk_super = fs_info->super_copy; >> 3308 return !!(btrfs_super_incompat_flags(disk_super) & flag); >> 3309 } >> 3310 >> 3311 #define btrfs_set_fs_compat_ro(__fs_info, opt) \ >> 3312 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, \ >> 3313 #opt) >> 3314 >> 3315 static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, >> 3316 u64 flag, const char *name) >> 3317 { >> 3318 struct btrfs_super_block *disk_super; >> 3319 u64 features; >> 3320 >> 3321 disk_super = fs_info->super_copy; >> 3322 features = btrfs_super_compat_ro_flags(disk_super); >> 3323 if (!(features & flag)) { >> 3324 spin_lock(&fs_info->super_lock); >> 3325 features = btrfs_super_compat_ro_flags(disk_super); >> 3326 if (!(features & flag)) { >> 3327 features |= flag; >> 3328 btrfs_set_super_compat_ro_flags(disk_super, features); >> 3329 btrfs_info(fs_info, >> 3330 "setting compat-ro feature flag for %s (0x%llx)", >> 3331 name, flag); >> 3332 } >> 3333 spin_unlock(&fs_info->super_lock); >> 3334 } >> 3335 } >> 3336 >> 3337 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ >> 3338 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt, \ >> 3339 #opt) >> 3340 >> 3341 static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, >> 3342 u64 flag, const char *name) >> 3343 { >> 3344 struct btrfs_super_block *disk_super; >> 3345 u64 features; >> 3346 >> 3347 disk_super = fs_info->super_copy; >> 3348 features = btrfs_super_compat_ro_flags(disk_super); >> 3349 if (features & flag) { >> 3350 spin_lock(&fs_info->super_lock); >> 3351 features = btrfs_super_compat_ro_flags(disk_super); >> 3352 if (features & flag) { >> 3353 features &= ~flag; >> 3354 btrfs_set_super_compat_ro_flags(disk_super, features); >> 3355 btrfs_info(fs_info, >> 3356 "clearing compat-ro feature flag for %s (0x%llx)", >> 3357 name, flag); >> 3358 } >> 3359 spin_unlock(&fs_info->super_lock); >> 3360 } >> 3361 } >> 3362 >> 3363 #define btrfs_fs_compat_ro(fs_info, opt) \ >> 3364 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) >> 3365 >> 3366 static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) >> 3367 { >> 3368 struct btrfs_super_block *disk_super; >> 3369 disk_super = fs_info->super_copy; >> 3370 return !!(btrfs_super_compat_ro_flags(disk_super) & flag); >> 3371 } >> 3372 >> 3373 /* acl.c */ >> 3374 #ifdef CONFIG_BTRFS_FS_POSIX_ACL >> 3375 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); >> 3376 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); >> 3377 int btrfs_init_acl(struct btrfs_trans_handle *trans, >> 3378 struct inode *inode, struct inode *dir); >> 3379 #else >> 3380 #define btrfs_get_acl NULL >> 3381 #define btrfs_set_acl NULL >> 3382 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, >> 3383 struct inode *inode, struct inode *dir) >> 3384 { >> 3385 return 0; >> 3386 } >> 3387 #endif >> 3388 >> 3389 /* relocation.c */ >> 3390 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start); >> 3391 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, >> 3392 struct btrfs_root *root); >> 3393 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, >> 3394 struct btrfs_root *root); >> 3395 int btrfs_recover_relocation(struct btrfs_root *root); >> 3396 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); >> 3397 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, >> 3398 struct btrfs_root *root, struct extent_buffer *buf, >> 3399 struct extent_buffer *cow); >> 3400 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, >> 3401 u64 *bytes_to_reserve); >> 3402 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, >> 3403 struct btrfs_pending_snapshot *pending); >> 3404 >> 3405 /* scrub.c */ >> 3406 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, >> 3407 u64 end, struct btrfs_scrub_progress *progress, >> 3408 int readonly, int is_dev_replace); >> 3409 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info); >> 3410 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info); >> 3411 int btrfs_scrub_cancel(struct btrfs_fs_info *info); >> 3412 int btrfs_scrub_cancel_dev(struct btrfs_device *dev); >> 3413 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, >> 3414 struct btrfs_scrub_progress *progress); >> 3415 static inline void btrfs_init_full_stripe_locks_tree( >> 3416 struct btrfs_full_stripe_locks_tree *locks_root) >> 3417 { >> 3418 locks_root->root = RB_ROOT; >> 3419 mutex_init(&locks_root->lock); >> 3420 } >> 3421 >> 3422 /* dev-replace.c */ >> 3423 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); >> 3424 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); >> 3425 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); >> 3426 >> 3427 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) >> 3428 { >> 3429 btrfs_bio_counter_sub(fs_info, 1); >> 3430 } >> 3431 >> 3432 /* reada.c */ >> 3433 struct reada_control { >> 3434 struct btrfs_fs_info *fs_info; /* tree to prefetch */ >> 3435 struct btrfs_key key_start; >> 3436 struct btrfs_key key_end; /* exclusive */ >> 3437 atomic_t elems; >> 3438 struct kref refcnt; >> 3439 wait_queue_head_t wait; >> 3440 }; >> 3441 struct reada_control *btrfs_reada_add(struct btrfs_root *root, >> 3442 struct btrfs_key *start, struct btrfs_key *end); >> 3443 int btrfs_reada_wait(void *handle); >> 3444 void btrfs_reada_detach(void *handle); >> 3445 int btree_readahead_hook(struct extent_buffer *eb, int err); 725 3446 726 static inline int is_fstree(u64 rootid) 3447 static inline int is_fstree(u64 rootid) 727 { 3448 { 728 if (rootid == BTRFS_FS_TREE_OBJECTID | 3449 if (rootid == BTRFS_FS_TREE_OBJECTID || 729 ((s64)rootid >= (s64)BTRFS_FIRST_F 3450 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID && 730 !btrfs_qgroup_level(rootid))) 3451 !btrfs_qgroup_level(rootid))) 731 return 1; 3452 return 1; 732 return 0; 3453 return 0; 733 } 3454 } 734 3455 735 static inline bool btrfs_is_data_reloc_root(co !! 3456 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 736 { 3457 { 737 return root->root_key.objectid == BTRF !! 3458 return signal_pending(current); 738 } 3459 } 739 3460 740 u16 btrfs_csum_type_size(u16 type); !! 3461 #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len)) 741 int btrfs_super_csum_size(const struct btrfs_s << 742 const char *btrfs_super_csum_name(u16 csum_typ << 743 const char *btrfs_super_csum_driver(u16 csum_t << 744 size_t __attribute_const__ btrfs_get_num_csums << 745 3462 746 /* !! 3463 /* Sanity test specific functions */ 747 * We use page status Private2 to indicate the !! 3464 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 748 * unfinished IO. !! 3465 void btrfs_test_inode_set_ops(struct inode *inode); 749 * !! 3466 void btrfs_test_destroy_inode(struct inode *inode); 750 * Rename the Private2 accessors to Ordered, t !! 3467 751 */ !! 3468 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) 752 #define PageOrdered(page) PagePr !! 3469 { 753 #define SetPageOrdered(page) SetPag !! 3470 return test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state); 754 #define ClearPageOrdered(page) ClearP !! 3471 } 755 #define folio_test_ordered(folio) folio_ !! 3472 #else 756 #define folio_set_ordered(folio) folio_ !! 3473 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) 757 #define folio_clear_ordered(folio) folio_ !! 3474 { >> 3475 return 0; >> 3476 } >> 3477 #endif 758 3478 759 #endif 3479 #endif 760 3480
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.