1 /* SPDX-License-Identifier: GPL-2.0 */ << 2 /* 1 /* 3 * Copyright (C) 2007 Oracle. All rights rese 2 * Copyright (C) 2007 Oracle. All rights reserved. >> 3 * >> 4 * This program is free software; you can redistribute it and/or >> 5 * modify it under the terms of the GNU General Public >> 6 * License v2 as published by the Free Software Foundation. >> 7 * >> 8 * This program is distributed in the hope that it will be useful, >> 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of >> 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU >> 11 * General Public License for more details. >> 12 * >> 13 * You should have received a copy of the GNU General Public >> 14 * License along with this program; if not, write to the >> 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, >> 16 * Boston, MA 021110-1307, USA. 4 */ 17 */ 5 18 6 #ifndef BTRFS_CTREE_H !! 19 #ifndef __BTRFS_CTREE__ 7 #define BTRFS_CTREE_H !! 20 #define __BTRFS_CTREE__ 8 21 9 #include "linux/cleanup.h" !! 22 #include <linux/mm.h> 10 #include <linux/pagemap.h> !! 23 #include <linux/sched/signal.h> 11 #include <linux/spinlock.h> !! 24 #include <linux/highmem.h> 12 #include <linux/rbtree.h> !! 25 #include <linux/fs.h> 13 #include <linux/mutex.h> !! 26 #include <linux/rwsem.h> >> 27 #include <linux/semaphore.h> >> 28 #include <linux/completion.h> >> 29 #include <linux/backing-dev.h> 14 #include <linux/wait.h> 30 #include <linux/wait.h> 15 #include <linux/list.h> !! 31 #include <linux/slab.h> 16 #include <linux/atomic.h> !! 32 #include <linux/kobject.h> 17 #include <linux/xarray.h> !! 33 #include <trace/events/btrfs.h> >> 34 #include <asm/kmap_types.h> >> 35 #include <linux/pagemap.h> >> 36 #include <linux/btrfs.h> >> 37 #include <linux/btrfs_tree.h> >> 38 #include <linux/workqueue.h> >> 39 #include <linux/security.h> >> 40 #include <linux/sizes.h> >> 41 #include <linux/dynamic_debug.h> 18 #include <linux/refcount.h> 42 #include <linux/refcount.h> 19 #include <uapi/linux/btrfs_tree.h> !! 43 #include "extent_io.h" 20 #include "locking.h" !! 44 #include "extent_map.h" 21 #include "fs.h" !! 45 #include "async-thread.h" 22 #include "accessors.h" << 23 #include "extent-io-tree.h" << 24 46 25 struct extent_buffer; << 26 struct btrfs_block_rsv; << 27 struct btrfs_trans_handle; 47 struct btrfs_trans_handle; 28 struct btrfs_block_group; !! 48 struct btrfs_transaction; >> 49 struct btrfs_pending_snapshot; >> 50 extern struct kmem_cache *btrfs_trans_handle_cachep; >> 51 extern struct kmem_cache *btrfs_transaction_cachep; >> 52 extern struct kmem_cache *btrfs_bit_radix_cachep; >> 53 extern struct kmem_cache *btrfs_path_cachep; >> 54 extern struct kmem_cache *btrfs_free_space_cachep; >> 55 struct btrfs_ordered_sum; 29 56 30 /* Read ahead values for struct btrfs_path.rea !! 57 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 31 enum { !! 58 #define STATIC noinline 32 READA_NONE, !! 59 #else 33 READA_BACK, !! 60 #define STATIC static noinline 34 READA_FORWARD, !! 61 #endif 35 /* !! 62 36 * Similar to READA_FORWARD but unlike !! 63 #define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */ 37 * !! 64 38 * 1) It will trigger readahead even f !! 65 #define BTRFS_MAX_MIRRORS 3 39 * each other on disk; !! 66 40 * 2) It also triggers readahead for n !! 67 #define BTRFS_MAX_LEVEL 8 41 * 3) During a search, even when a nod !! 68 42 * will still trigger readahead for !! 69 #define BTRFS_COMPAT_EXTENT_TREE_V0 43 * it. !! 70 44 * !! 71 /* 45 * This is meant to be used only when !! 72 * the max metadata block size. This limit is somewhat artificial, 46 * entire tree or a very large part of !! 73 * but the memmove costs go through the roof for larger blocks. 47 */ !! 74 */ 48 READA_FORWARD_ALWAYS, !! 75 #define BTRFS_MAX_METADATA_BLOCKSIZE 65536 >> 76 >> 77 /* >> 78 * we can actually store much bigger names, but lets not confuse the rest >> 79 * of linux >> 80 */ >> 81 #define BTRFS_NAME_LEN 255 >> 82 >> 83 /* >> 84 * Theoretical limit is larger, but we keep this down to a sane >> 85 * value. That should limit greatly the possibility of collisions on >> 86 * inode ref items. >> 87 */ >> 88 #define BTRFS_LINK_MAX 65535U >> 89 >> 90 static const int btrfs_csum_sizes[] = { 4 }; >> 91 >> 92 /* four bytes for CRC32 */ >> 93 #define BTRFS_EMPTY_DIR_SIZE 0 >> 94 >> 95 /* ioprio of readahead is set to idle */ >> 96 #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) >> 97 >> 98 #define BTRFS_DIRTY_METADATA_THRESH SZ_32M >> 99 >> 100 #define BTRFS_MAX_EXTENT_SIZE SZ_128M >> 101 >> 102 /* >> 103 * Count how many BTRFS_MAX_EXTENT_SIZE cover the @size >> 104 */ >> 105 static inline u32 count_max_extents(u64 size) >> 106 { >> 107 return div_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, BTRFS_MAX_EXTENT_SIZE); >> 108 } >> 109 >> 110 struct btrfs_mapping_tree { >> 111 struct extent_map_tree map_tree; 49 }; 112 }; 50 113 >> 114 static inline unsigned long btrfs_chunk_item_size(int num_stripes) >> 115 { >> 116 BUG_ON(num_stripes == 0); >> 117 return sizeof(struct btrfs_chunk) + >> 118 sizeof(struct btrfs_stripe) * (num_stripes - 1); >> 119 } >> 120 >> 121 /* >> 122 * File system states >> 123 */ >> 124 #define BTRFS_FS_STATE_ERROR 0 >> 125 #define BTRFS_FS_STATE_REMOUNTING 1 >> 126 #define BTRFS_FS_STATE_TRANS_ABORTED 2 >> 127 #define BTRFS_FS_STATE_DEV_REPLACING 3 >> 128 #define BTRFS_FS_STATE_DUMMY_FS_INFO 4 >> 129 >> 130 #define BTRFS_BACKREF_REV_MAX 256 >> 131 #define BTRFS_BACKREF_REV_SHIFT 56 >> 132 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ >> 133 BTRFS_BACKREF_REV_SHIFT) >> 134 >> 135 #define BTRFS_OLD_BACKREF_REV 0 >> 136 #define BTRFS_MIXED_BACKREF_REV 1 >> 137 >> 138 /* >> 139 * every tree block (leaf or node) starts with this header. >> 140 */ >> 141 struct btrfs_header { >> 142 /* these first four must match the super block */ >> 143 u8 csum[BTRFS_CSUM_SIZE]; >> 144 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ >> 145 __le64 bytenr; /* which block this node is supposed to live in */ >> 146 __le64 flags; >> 147 >> 148 /* allowed to be different from the super from here on down */ >> 149 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; >> 150 __le64 generation; >> 151 __le64 owner; >> 152 __le32 nritems; >> 153 u8 level; >> 154 } __attribute__ ((__packed__)); >> 155 >> 156 /* >> 157 * this is a very generous portion of the super block, giving us >> 158 * room to translate 14 chunks with 3 stripes each. >> 159 */ >> 160 #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 >> 161 >> 162 /* >> 163 * just in case we somehow lose the roots and are not able to mount, >> 164 * we store an array of the roots from previous transactions >> 165 * in the super. >> 166 */ >> 167 #define BTRFS_NUM_BACKUP_ROOTS 4 >> 168 struct btrfs_root_backup { >> 169 __le64 tree_root; >> 170 __le64 tree_root_gen; >> 171 >> 172 __le64 chunk_root; >> 173 __le64 chunk_root_gen; >> 174 >> 175 __le64 extent_root; >> 176 __le64 extent_root_gen; >> 177 >> 178 __le64 fs_root; >> 179 __le64 fs_root_gen; >> 180 >> 181 __le64 dev_root; >> 182 __le64 dev_root_gen; >> 183 >> 184 __le64 csum_root; >> 185 __le64 csum_root_gen; >> 186 >> 187 __le64 total_bytes; >> 188 __le64 bytes_used; >> 189 __le64 num_devices; >> 190 /* future */ >> 191 __le64 unused_64[4]; >> 192 >> 193 u8 tree_root_level; >> 194 u8 chunk_root_level; >> 195 u8 extent_root_level; >> 196 u8 fs_root_level; >> 197 u8 dev_root_level; >> 198 u8 csum_root_level; >> 199 /* future and to align */ >> 200 u8 unused_8[10]; >> 201 } __attribute__ ((__packed__)); >> 202 >> 203 /* >> 204 * the super block basically lists the main trees of the FS >> 205 * it currently lacks any block count etc etc >> 206 */ >> 207 struct btrfs_super_block { >> 208 u8 csum[BTRFS_CSUM_SIZE]; >> 209 /* the first 4 fields must match struct btrfs_header */ >> 210 u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */ >> 211 __le64 bytenr; /* this block number */ >> 212 __le64 flags; >> 213 >> 214 /* allowed to be different from the btrfs_header from here own down */ >> 215 __le64 magic; >> 216 __le64 generation; >> 217 __le64 root; >> 218 __le64 chunk_root; >> 219 __le64 log_root; >> 220 >> 221 /* this will help find the new super based on the log root */ >> 222 __le64 log_root_transid; >> 223 __le64 total_bytes; >> 224 __le64 bytes_used; >> 225 __le64 root_dir_objectid; >> 226 __le64 num_devices; >> 227 __le32 sectorsize; >> 228 __le32 nodesize; >> 229 __le32 __unused_leafsize; >> 230 __le32 stripesize; >> 231 __le32 sys_chunk_array_size; >> 232 __le64 chunk_root_generation; >> 233 __le64 compat_flags; >> 234 __le64 compat_ro_flags; >> 235 __le64 incompat_flags; >> 236 __le16 csum_type; >> 237 u8 root_level; >> 238 u8 chunk_root_level; >> 239 u8 log_root_level; >> 240 struct btrfs_dev_item dev_item; >> 241 >> 242 char label[BTRFS_LABEL_SIZE]; >> 243 >> 244 __le64 cache_generation; >> 245 __le64 uuid_tree_generation; >> 246 >> 247 /* future expansion */ >> 248 __le64 reserved[30]; >> 249 u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE]; >> 250 struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS]; >> 251 } __attribute__ ((__packed__)); >> 252 >> 253 /* >> 254 * Compat flags that we support. If any incompat flags are set other than the >> 255 * ones specified below then we will fail to mount >> 256 */ >> 257 #define BTRFS_FEATURE_COMPAT_SUPP 0ULL >> 258 #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL >> 259 #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL >> 260 >> 261 #define BTRFS_FEATURE_COMPAT_RO_SUPP \ >> 262 (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE | \ >> 263 BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE_VALID) >> 264 >> 265 #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL >> 266 #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL >> 267 >> 268 #define BTRFS_FEATURE_INCOMPAT_SUPP \ >> 269 (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ >> 270 BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ >> 271 BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ >> 272 BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ >> 273 BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ >> 274 BTRFS_FEATURE_INCOMPAT_RAID56 | \ >> 275 BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ >> 276 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ >> 277 BTRFS_FEATURE_INCOMPAT_NO_HOLES) >> 278 >> 279 #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ >> 280 (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) >> 281 #define BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR 0ULL >> 282 >> 283 /* >> 284 * A leaf is full of items. offset and size tell us where to find >> 285 * the item in the leaf (relative to the start of the data area) >> 286 */ >> 287 struct btrfs_item { >> 288 struct btrfs_disk_key key; >> 289 __le32 offset; >> 290 __le32 size; >> 291 } __attribute__ ((__packed__)); >> 292 >> 293 /* >> 294 * leaves have an item area and a data area: >> 295 * [item0, item1....itemN] [free space] [dataN...data1, data0] >> 296 * >> 297 * The data is separate from the items to get the keys closer together >> 298 * during searches. >> 299 */ >> 300 struct btrfs_leaf { >> 301 struct btrfs_header header; >> 302 struct btrfs_item items[]; >> 303 } __attribute__ ((__packed__)); >> 304 >> 305 /* >> 306 * all non-leaf blocks are nodes, they hold only keys and pointers to >> 307 * other blocks >> 308 */ >> 309 struct btrfs_key_ptr { >> 310 struct btrfs_disk_key key; >> 311 __le64 blockptr; >> 312 __le64 generation; >> 313 } __attribute__ ((__packed__)); >> 314 >> 315 struct btrfs_node { >> 316 struct btrfs_header header; >> 317 struct btrfs_key_ptr ptrs[]; >> 318 } __attribute__ ((__packed__)); >> 319 51 /* 320 /* 52 * btrfs_paths remember the path taken from th 321 * btrfs_paths remember the path taken from the root down to the leaf. 53 * level 0 is always the leaf, and nodes[1...B 322 * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point 54 * to any other levels that are present. 323 * to any other levels that are present. 55 * 324 * 56 * The slots array records the index of the it 325 * The slots array records the index of the item or block pointer 57 * used while walking the tree. 326 * used while walking the tree. 58 */ 327 */ >> 328 enum { READA_NONE = 0, READA_BACK, READA_FORWARD }; 59 struct btrfs_path { 329 struct btrfs_path { 60 struct extent_buffer *nodes[BTRFS_MAX_ 330 struct extent_buffer *nodes[BTRFS_MAX_LEVEL]; 61 int slots[BTRFS_MAX_LEVEL]; 331 int slots[BTRFS_MAX_LEVEL]; 62 /* if there is real range locking, thi 332 /* if there is real range locking, this locks field will change */ 63 u8 locks[BTRFS_MAX_LEVEL]; 333 u8 locks[BTRFS_MAX_LEVEL]; 64 u8 reada; 334 u8 reada; 65 /* keep some upper locks as we walk do 335 /* keep some upper locks as we walk down */ 66 u8 lowest_level; 336 u8 lowest_level; 67 337 68 /* 338 /* 69 * set by btrfs_split_item, tells sear 339 * set by btrfs_split_item, tells search_slot to keep all locks 70 * and to force calls to keep space in 340 * and to force calls to keep space in the nodes 71 */ 341 */ 72 unsigned int search_for_split:1; 342 unsigned int search_for_split:1; 73 unsigned int keep_locks:1; 343 unsigned int keep_locks:1; 74 unsigned int skip_locking:1; 344 unsigned int skip_locking:1; >> 345 unsigned int leave_spinning:1; 75 unsigned int search_commit_root:1; 346 unsigned int search_commit_root:1; 76 unsigned int need_commit_sem:1; 347 unsigned int need_commit_sem:1; 77 unsigned int skip_release_on_error:1; 348 unsigned int skip_release_on_error:1; >> 349 }; >> 350 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \ >> 351 sizeof(struct btrfs_item)) >> 352 struct btrfs_dev_replace { >> 353 u64 replace_state; /* see #define above */ >> 354 u64 time_started; /* seconds since 1-Jan-1970 */ >> 355 u64 time_stopped; /* seconds since 1-Jan-1970 */ >> 356 atomic64_t num_write_errors; >> 357 atomic64_t num_uncorrectable_read_errors; >> 358 >> 359 u64 cursor_left; >> 360 u64 committed_cursor_left; >> 361 u64 cursor_left_last_write_of_item; >> 362 u64 cursor_right; >> 363 >> 364 u64 cont_reading_from_srcdev_mode; /* see #define above */ >> 365 >> 366 int is_valid; >> 367 int item_needs_writeback; >> 368 struct btrfs_device *srcdev; >> 369 struct btrfs_device *tgtdev; >> 370 >> 371 pid_t lock_owner; >> 372 atomic_t nesting_level; >> 373 struct mutex lock_finishing_cancel_unmount; >> 374 rwlock_t lock; >> 375 atomic_t read_locks; >> 376 atomic_t blocking_readers; >> 377 wait_queue_head_t read_lock_wq; >> 378 >> 379 struct btrfs_scrub_progress scrub_progress; >> 380 }; >> 381 >> 382 /* For raid type sysfs entries */ >> 383 struct raid_kobject { >> 384 int raid_type; >> 385 struct kobject kobj; >> 386 }; >> 387 >> 388 struct btrfs_space_info { >> 389 spinlock_t lock; >> 390 >> 391 u64 total_bytes; /* total bytes in the space, >> 392 this doesn't take mirrors into account */ >> 393 u64 bytes_used; /* total bytes used, >> 394 this doesn't take mirrors into account */ >> 395 u64 bytes_pinned; /* total bytes pinned, will be freed when the >> 396 transaction finishes */ >> 397 u64 bytes_reserved; /* total bytes the allocator has reserved for >> 398 current allocations */ >> 399 u64 bytes_may_use; /* number of bytes that may be used for >> 400 delalloc/allocations */ >> 401 u64 bytes_readonly; /* total bytes that are read only */ >> 402 >> 403 u64 max_extent_size; /* This will hold the maximum extent size of >> 404 the space info if we had an ENOSPC in the >> 405 allocator. */ >> 406 >> 407 unsigned int full:1; /* indicates that we cannot allocate any more >> 408 chunks for this space */ >> 409 unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ >> 410 >> 411 unsigned int flush:1; /* set if we are trying to make space */ >> 412 >> 413 unsigned int force_alloc; /* set if we need to force a chunk >> 414 alloc for this space */ >> 415 >> 416 u64 disk_used; /* total bytes used on disk */ >> 417 u64 disk_total; /* total bytes on disk, takes mirrors into >> 418 account */ >> 419 >> 420 u64 flags; >> 421 >> 422 /* >> 423 * bytes_pinned is kept in line with what is actually pinned, as in >> 424 * we've called update_block_group and dropped the bytes_used counter >> 425 * and increased the bytes_pinned counter. However this means that >> 426 * bytes_pinned does not reflect the bytes that will be pinned once the >> 427 * delayed refs are flushed, so this counter is inc'ed every time we >> 428 * call btrfs_free_extent so it is a realtime count of what will be >> 429 * freed once the transaction is committed. It will be zeroed every >> 430 * time the transaction commits. >> 431 */ >> 432 struct percpu_counter total_bytes_pinned; >> 433 >> 434 struct list_head list; >> 435 /* Protected by the spinlock 'lock'. */ >> 436 struct list_head ro_bgs; >> 437 struct list_head priority_tickets; >> 438 struct list_head tickets; >> 439 /* >> 440 * tickets_id just indicates the next ticket will be handled, so note >> 441 * it's not stored per ticket. >> 442 */ >> 443 u64 tickets_id; >> 444 >> 445 struct rw_semaphore groups_sem; >> 446 /* for block groups in our same type */ >> 447 struct list_head block_groups[BTRFS_NR_RAID_TYPES]; >> 448 wait_queue_head_t wait; >> 449 >> 450 struct kobject kobj; >> 451 struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES]; >> 452 }; >> 453 >> 454 #define BTRFS_BLOCK_RSV_GLOBAL 1 >> 455 #define BTRFS_BLOCK_RSV_DELALLOC 2 >> 456 #define BTRFS_BLOCK_RSV_TRANS 3 >> 457 #define BTRFS_BLOCK_RSV_CHUNK 4 >> 458 #define BTRFS_BLOCK_RSV_DELOPS 5 >> 459 #define BTRFS_BLOCK_RSV_EMPTY 6 >> 460 #define BTRFS_BLOCK_RSV_TEMP 7 >> 461 >> 462 struct btrfs_block_rsv { >> 463 u64 size; >> 464 u64 reserved; >> 465 struct btrfs_space_info *space_info; >> 466 spinlock_t lock; >> 467 unsigned short full; >> 468 unsigned short type; >> 469 unsigned short failfast; >> 470 }; >> 471 >> 472 /* >> 473 * free clusters are used to claim free space in relatively large chunks, >> 474 * allowing us to do less seeky writes. They are used for all metadata >> 475 * allocations and data allocations in ssd mode. >> 476 */ >> 477 struct btrfs_free_cluster { >> 478 spinlock_t lock; >> 479 spinlock_t refill_lock; >> 480 struct rb_root root; >> 481 >> 482 /* largest extent in this cluster */ >> 483 u64 max_size; >> 484 >> 485 /* first extent starting offset */ >> 486 u64 window_start; >> 487 >> 488 /* We did a full search and couldn't create a cluster */ >> 489 bool fragmented; >> 490 >> 491 struct btrfs_block_group_cache *block_group; 78 /* 492 /* 79 * Indicate that new item (btrfs_searc !! 493 * when a cluster is allocated from a block group, we put the 80 * existing item and ins_len contains !! 494 * cluster onto a list in the block group so that it can 81 * header (ie. sizeof(struct btrfs_ite !! 495 * be freed before the block group is freed. 82 */ 496 */ 83 unsigned int search_for_extension:1; !! 497 struct list_head block_group_list; 84 /* Stop search if any locks need to be << 85 unsigned int nowait:1; << 86 }; 498 }; 87 499 88 #define BTRFS_PATH_AUTO_FREE(path_name) !! 500 enum btrfs_caching_type { 89 struct btrfs_path *path_name __free(bt !! 501 BTRFS_CACHE_NO = 0, >> 502 BTRFS_CACHE_STARTED = 1, >> 503 BTRFS_CACHE_FAST = 2, >> 504 BTRFS_CACHE_FINISHED = 3, >> 505 BTRFS_CACHE_ERROR = 4, >> 506 }; >> 507 >> 508 enum btrfs_disk_cache_state { >> 509 BTRFS_DC_WRITTEN = 0, >> 510 BTRFS_DC_ERROR = 1, >> 511 BTRFS_DC_CLEAR = 2, >> 512 BTRFS_DC_SETUP = 3, >> 513 }; >> 514 >> 515 struct btrfs_caching_control { >> 516 struct list_head list; >> 517 struct mutex mutex; >> 518 wait_queue_head_t wait; >> 519 struct btrfs_work work; >> 520 struct btrfs_block_group_cache *block_group; >> 521 u64 progress; >> 522 refcount_t count; >> 523 }; >> 524 >> 525 /* Once caching_thread() finds this much free space, it will wake up waiters. */ >> 526 #define CACHING_CTL_WAKE_UP (1024 * 1024 * 2) >> 527 >> 528 struct btrfs_io_ctl { >> 529 void *cur, *orig; >> 530 struct page *page; >> 531 struct page **pages; >> 532 struct btrfs_fs_info *fs_info; >> 533 struct inode *inode; >> 534 unsigned long size; >> 535 int index; >> 536 int num_pages; >> 537 int entries; >> 538 int bitmaps; >> 539 unsigned check_crcs:1; >> 540 }; 90 541 91 /* 542 /* 92 * The state of btrfs root !! 543 * Tree to record all locked full stripes of a RAID5/6 block group 93 */ 544 */ 94 enum { !! 545 struct btrfs_full_stripe_locks_tree { >> 546 struct rb_root root; >> 547 struct mutex lock; >> 548 }; >> 549 >> 550 struct btrfs_block_group_cache { >> 551 struct btrfs_key key; >> 552 struct btrfs_block_group_item item; >> 553 struct btrfs_fs_info *fs_info; >> 554 struct inode *inode; >> 555 spinlock_t lock; >> 556 u64 pinned; >> 557 u64 reserved; >> 558 u64 delalloc_bytes; >> 559 u64 bytes_super; >> 560 u64 flags; >> 561 u64 cache_generation; >> 562 u32 sectorsize; >> 563 >> 564 /* >> 565 * If the free space extent count exceeds this number, convert the block >> 566 * group to bitmaps. >> 567 */ >> 568 u32 bitmap_high_thresh; >> 569 >> 570 /* >> 571 * If the free space extent count drops below this number, convert the >> 572 * block group back to extents. >> 573 */ >> 574 u32 bitmap_low_thresh; >> 575 >> 576 /* >> 577 * It is just used for the delayed data space allocation because >> 578 * only the data space allocation and the relative metadata update >> 579 * can be done cross the transaction. >> 580 */ >> 581 struct rw_semaphore data_rwsem; >> 582 >> 583 /* for raid56, this is a full stripe, without parity */ >> 584 unsigned long full_stripe_len; >> 585 >> 586 unsigned int ro; >> 587 unsigned int iref:1; >> 588 unsigned int has_caching_ctl:1; >> 589 unsigned int removed:1; >> 590 >> 591 int disk_cache_state; >> 592 >> 593 /* cache tracking stuff */ >> 594 int cached; >> 595 struct btrfs_caching_control *caching_ctl; >> 596 u64 last_byte_to_unpin; >> 597 >> 598 struct btrfs_space_info *space_info; >> 599 >> 600 /* free space cache stuff */ >> 601 struct btrfs_free_space_ctl *free_space_ctl; >> 602 >> 603 /* block group cache stuff */ >> 604 struct rb_node cache_node; >> 605 >> 606 /* for block groups in the same raid type */ >> 607 struct list_head list; >> 608 >> 609 /* usage count */ >> 610 atomic_t count; >> 611 >> 612 /* List of struct btrfs_free_clusters for this block group. >> 613 * Today it will only have one thing on it, but that may change >> 614 */ >> 615 struct list_head cluster_list; >> 616 >> 617 /* For delayed block group creation or deletion of empty block groups */ >> 618 struct list_head bg_list; >> 619 >> 620 /* For read-only block groups */ >> 621 struct list_head ro_list; >> 622 >> 623 atomic_t trimming; >> 624 >> 625 /* For dirty block groups */ >> 626 struct list_head dirty_list; >> 627 struct list_head io_list; >> 628 >> 629 struct btrfs_io_ctl io_ctl; >> 630 >> 631 /* >> 632 * Incremented when doing extent allocations and holding a read lock >> 633 * on the space_info's groups_sem semaphore. >> 634 * Decremented when an ordered extent that represents an IO against this >> 635 * block group's range is created (after it's added to its inode's >> 636 * root's list of ordered extents) or immediately after the allocation >> 637 * if it's a metadata extent or fallocate extent (for these cases we >> 638 * don't create ordered extents). >> 639 */ >> 640 atomic_t reservations; >> 641 95 /* 642 /* 96 * btrfs_record_root_in_trans is a mul !! 643 * Incremented while holding the spinlock *lock* by a task checking if 97 * with the balancing code. But the !! 644 * it can perform a nocow write (incremented if the value for the *ro* 98 * first time the root is added to eac !! 645 * field is 0). Decremented by such tasks once they create an ordered 99 * is used to tell us when more checks !! 646 * extent or before that if some error happens before reaching that step. >> 647 * This is to prevent races between block group relocation and nocow >> 648 * writes through direct IO. 100 */ 649 */ 101 BTRFS_ROOT_IN_TRANS_SETUP, !! 650 atomic_t nocow_writers; >> 651 >> 652 /* Lock for free space tree operations. */ >> 653 struct mutex free_space_lock; 102 654 103 /* 655 /* 104 * Set if tree blocks of this root can !! 656 * Does the block group need to be added to the free space tree? 105 * Only subvolume trees and their relo !! 657 * Protected by free_space_lock. 106 * Conflicts with TRACK_DIRTY bit. !! 658 */ >> 659 int needs_free_space; >> 660 >> 661 /* Record locked full stripes for RAID5/6 block group */ >> 662 struct btrfs_full_stripe_locks_tree full_stripe_locks_root; >> 663 }; >> 664 >> 665 /* delayed seq elem */ >> 666 struct seq_list { >> 667 struct list_head list; >> 668 u64 seq; >> 669 }; >> 670 >> 671 #define SEQ_LIST_INIT(name) { .list = LIST_HEAD_INIT((name).list), .seq = 0 } >> 672 >> 673 #define SEQ_LAST ((u64)-1) >> 674 >> 675 enum btrfs_orphan_cleanup_state { >> 676 ORPHAN_CLEANUP_STARTED = 1, >> 677 ORPHAN_CLEANUP_DONE = 2, >> 678 }; >> 679 >> 680 /* used by the raid56 code to lock stripes for read/modify/write */ >> 681 struct btrfs_stripe_hash { >> 682 struct list_head hash_list; >> 683 wait_queue_head_t wait; >> 684 spinlock_t lock; >> 685 }; >> 686 >> 687 /* used by the raid56 code to lock stripes for read/modify/write */ >> 688 struct btrfs_stripe_hash_table { >> 689 struct list_head stripe_cache; >> 690 spinlock_t cache_lock; >> 691 int cache_size; >> 692 struct btrfs_stripe_hash table[]; >> 693 }; >> 694 >> 695 #define BTRFS_STRIPE_HASH_TABLE_BITS 11 >> 696 >> 697 void btrfs_init_async_reclaim_work(struct work_struct *work); >> 698 >> 699 /* fs_info */ >> 700 struct reloc_control; >> 701 struct btrfs_device; >> 702 struct btrfs_fs_devices; >> 703 struct btrfs_balance_control; >> 704 struct btrfs_delayed_root; >> 705 >> 706 #define BTRFS_FS_BARRIER 1 >> 707 #define BTRFS_FS_CLOSING_START 2 >> 708 #define BTRFS_FS_CLOSING_DONE 3 >> 709 #define BTRFS_FS_LOG_RECOVERING 4 >> 710 #define BTRFS_FS_OPEN 5 >> 711 #define BTRFS_FS_QUOTA_ENABLED 6 >> 712 #define BTRFS_FS_QUOTA_ENABLING 7 >> 713 #define BTRFS_FS_QUOTA_DISABLING 8 >> 714 #define BTRFS_FS_UPDATE_UUID_TREE_GEN 9 >> 715 #define BTRFS_FS_CREATING_FREE_SPACE_TREE 10 >> 716 #define BTRFS_FS_BTREE_ERR 11 >> 717 #define BTRFS_FS_LOG1_ERR 12 >> 718 #define BTRFS_FS_LOG2_ERR 13 >> 719 /* >> 720 * Indicate that a whole-filesystem exclusive operation is running >> 721 * (device replace, resize, device add/delete, balance) >> 722 */ >> 723 #define BTRFS_FS_EXCL_OP 14 >> 724 >> 725 struct btrfs_fs_info { >> 726 u8 fsid[BTRFS_FSID_SIZE]; >> 727 u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; >> 728 unsigned long flags; >> 729 struct btrfs_root *extent_root; >> 730 struct btrfs_root *tree_root; >> 731 struct btrfs_root *chunk_root; >> 732 struct btrfs_root *dev_root; >> 733 struct btrfs_root *fs_root; >> 734 struct btrfs_root *csum_root; >> 735 struct btrfs_root *quota_root; >> 736 struct btrfs_root *uuid_root; >> 737 struct btrfs_root *free_space_root; >> 738 >> 739 /* the log root tree is a directory of all the other log roots */ >> 740 struct btrfs_root *log_root_tree; >> 741 >> 742 spinlock_t fs_roots_radix_lock; >> 743 struct radix_tree_root fs_roots_radix; >> 744 >> 745 /* block group cache stuff */ >> 746 spinlock_t block_group_cache_lock; >> 747 u64 first_logical_byte; >> 748 struct rb_root block_group_cache_tree; >> 749 >> 750 /* keep track of unallocated space */ >> 751 spinlock_t free_chunk_lock; >> 752 u64 free_chunk_space; >> 753 >> 754 struct extent_io_tree freed_extents[2]; >> 755 struct extent_io_tree *pinned_extents; >> 756 >> 757 /* logical->physical extent mapping */ >> 758 struct btrfs_mapping_tree mapping_tree; >> 759 >> 760 /* >> 761 * block reservation for extent, checksum, root tree and >> 762 * delayed dir index item >> 763 */ >> 764 struct btrfs_block_rsv global_block_rsv; >> 765 /* block reservation for delay allocation */ >> 766 struct btrfs_block_rsv delalloc_block_rsv; >> 767 /* block reservation for metadata operations */ >> 768 struct btrfs_block_rsv trans_block_rsv; >> 769 /* block reservation for chunk tree */ >> 770 struct btrfs_block_rsv chunk_block_rsv; >> 771 /* block reservation for delayed operations */ >> 772 struct btrfs_block_rsv delayed_block_rsv; >> 773 >> 774 struct btrfs_block_rsv empty_block_rsv; >> 775 >> 776 u64 generation; >> 777 u64 last_trans_committed; >> 778 u64 avg_delayed_ref_runtime; >> 779 >> 780 /* >> 781 * this is updated to the current trans every time a full commit >> 782 * is required instead of the faster short fsync log commits >> 783 */ >> 784 u64 last_trans_log_full_commit; >> 785 unsigned long mount_opt; >> 786 /* >> 787 * Track requests for actions that need to be done during transaction >> 788 * commit (like for some mount options). >> 789 */ >> 790 unsigned long pending_changes; >> 791 unsigned long compress_type:4; >> 792 int commit_interval; >> 793 /* >> 794 * It is a suggestive number, the read side is safe even it gets a >> 795 * wrong number because we will write out the data into a regular >> 796 * extent. The write side(mount/remount) is under ->s_umount lock, >> 797 * so it is also safe. >> 798 */ >> 799 u64 max_inline; >> 800 /* >> 801 * Protected by ->chunk_mutex and sb->s_umount. 107 * 802 * 108 * This affects two things: !! 803 * The reason that we use two lock to protect it is because only >> 804 * remount and mount operations can change it and these two operations >> 805 * are under sb->s_umount, but the read side (chunk allocation) can not >> 806 * acquire sb->s_umount or the deadlock would happen. So we use two >> 807 * locks to protect it. On the write side, we must acquire two locks, >> 808 * and on the read side, we just need acquire one of them. >> 809 */ >> 810 u64 alloc_start; >> 811 struct btrfs_transaction *running_transaction; >> 812 wait_queue_head_t transaction_throttle; >> 813 wait_queue_head_t transaction_wait; >> 814 wait_queue_head_t transaction_blocked_wait; >> 815 wait_queue_head_t async_submit_wait; >> 816 >> 817 /* >> 818 * Used to protect the incompat_flags, compat_flags, compat_ro_flags >> 819 * when they are updated. 109 * 820 * 110 * - How balance works !! 821 * Because we do not clear the flags for ever, so we needn't use 111 * For shareable roots, we need to u !! 822 * the lock on the read side. 112 * replacement for balance, and need << 113 * snapshot creation to handle them. << 114 * 823 * 115 * While for non-shareable trees, we !! 824 * We also needn't use the lock when we mount the fs, because 116 * with COW. !! 825 * there is no other task which will update the flag. >> 826 */ >> 827 spinlock_t super_lock; >> 828 struct btrfs_super_block *super_copy; >> 829 struct btrfs_super_block *super_for_commit; >> 830 struct super_block *sb; >> 831 struct inode *btree_inode; >> 832 struct mutex tree_log_mutex; >> 833 struct mutex transaction_kthread_mutex; >> 834 struct mutex cleaner_mutex; >> 835 struct mutex chunk_mutex; >> 836 struct mutex volume_mutex; >> 837 >> 838 /* >> 839 * this is taken to make sure we don't set block groups ro after >> 840 * the free space cache has been allocated on them >> 841 */ >> 842 struct mutex ro_block_group_mutex; >> 843 >> 844 /* this is used during read/modify/write to make sure >> 845 * no two ios are trying to mod the same stripe at the same >> 846 * time >> 847 */ >> 848 struct btrfs_stripe_hash_table *stripe_hash_table; >> 849 >> 850 /* >> 851 * this protects the ordered operations list only while we are >> 852 * processing all of the entries on it. This way we make >> 853 * sure the commit code doesn't find the list temporarily empty >> 854 * because another function happens to be doing non-waiting preflush >> 855 * before jumping into the main commit. >> 856 */ >> 857 struct mutex ordered_operations_mutex; >> 858 >> 859 struct rw_semaphore commit_root_sem; >> 860 >> 861 struct rw_semaphore cleanup_work_sem; >> 862 >> 863 struct rw_semaphore subvol_sem; >> 864 struct srcu_struct subvol_srcu; >> 865 >> 866 spinlock_t trans_lock; >> 867 /* >> 868 * the reloc mutex goes with the trans lock, it is taken >> 869 * during commit to protect us from the relocation code >> 870 */ >> 871 struct mutex reloc_mutex; >> 872 >> 873 struct list_head trans_list; >> 874 struct list_head dead_roots; >> 875 struct list_head caching_block_groups; >> 876 >> 877 spinlock_t delayed_iput_lock; >> 878 struct list_head delayed_iputs; >> 879 struct mutex cleaner_delayed_iput_mutex; >> 880 >> 881 /* this protects tree_mod_seq_list */ >> 882 spinlock_t tree_mod_seq_lock; >> 883 atomic64_t tree_mod_seq; >> 884 struct list_head tree_mod_seq_list; >> 885 >> 886 /* this protects tree_mod_log */ >> 887 rwlock_t tree_mod_log_lock; >> 888 struct rb_root tree_mod_log; >> 889 >> 890 atomic_t nr_async_submits; >> 891 atomic_t async_submit_draining; >> 892 atomic_t nr_async_bios; >> 893 atomic_t async_delalloc_pages; >> 894 atomic_t open_ioctl_trans; >> 895 >> 896 /* >> 897 * this is used to protect the following list -- ordered_roots. >> 898 */ >> 899 spinlock_t ordered_root_lock; >> 900 >> 901 /* >> 902 * all fs/file tree roots in which there are data=ordered extents >> 903 * pending writeback are added into this list. 117 * 904 * 118 * - How dirty roots are tracked !! 905 * these can span multiple transactions and basically include 119 * For shareable roots, btrfs_record !! 906 * every dirty data page that isn't from nodatacow 120 * track them, while non-subvolume r !! 907 */ 121 * don't need to set this manually. !! 908 struct list_head ordered_roots; 122 */ !! 909 123 BTRFS_ROOT_SHAREABLE, !! 910 struct mutex delalloc_root_mutex; 124 BTRFS_ROOT_TRACK_DIRTY, !! 911 spinlock_t delalloc_root_lock; 125 BTRFS_ROOT_IN_RADIX, !! 912 /* all fs/file tree roots that have delalloc inodes. */ 126 BTRFS_ROOT_ORPHAN_ITEM_INSERTED, !! 913 struct list_head delalloc_roots; 127 BTRFS_ROOT_DEFRAG_RUNNING, << 128 BTRFS_ROOT_FORCE_COW, << 129 BTRFS_ROOT_MULTI_LOG_TASKS, << 130 BTRFS_ROOT_DIRTY, << 131 BTRFS_ROOT_DELETING, << 132 914 133 /* 915 /* 134 * Reloc tree is orphan, only kept her !! 916 * there is a pool of worker threads for checksumming during writes >> 917 * and a pool for checksumming after reads. This is because readers >> 918 * can run with FS locks held, and the writers may be waiting for >> 919 * those locks. We don't want ordering in the pending list to cause >> 920 * deadlocks, and so the two are serviced separately. 135 * 921 * 136 * Set for the subvolume tree owning t !! 922 * A third pool does submit_bio to avoid deadlocking with the other >> 923 * two 137 */ 924 */ 138 BTRFS_ROOT_DEAD_RELOC_TREE, !! 925 struct btrfs_workqueue *workers; 139 /* Mark dead root stored on device who !! 926 struct btrfs_workqueue *delalloc_workers; 140 BTRFS_ROOT_DEAD_TREE, !! 927 struct btrfs_workqueue *flush_workers; 141 /* The root has a log tree. Used for s !! 928 struct btrfs_workqueue *endio_workers; 142 BTRFS_ROOT_HAS_LOG_TREE, !! 929 struct btrfs_workqueue *endio_meta_workers; 143 /* Qgroup flushing is in progress */ !! 930 struct btrfs_workqueue *endio_raid56_workers; 144 BTRFS_ROOT_QGROUP_FLUSHING, !! 931 struct btrfs_workqueue *endio_repair_workers; 145 /* We started the orphan cleanup for t !! 932 struct btrfs_workqueue *rmw_workers; 146 BTRFS_ROOT_ORPHAN_CLEANUP, !! 933 struct btrfs_workqueue *endio_meta_write_workers; 147 /* This root has a drop operation that !! 934 struct btrfs_workqueue *endio_write_workers; 148 BTRFS_ROOT_UNFINISHED_DROP, !! 935 struct btrfs_workqueue *endio_freespace_worker; 149 /* This reloc root needs to have its b !! 936 struct btrfs_workqueue *submit_workers; 150 BTRFS_ROOT_RESET_LOCKDEP_CLASS, !! 937 struct btrfs_workqueue *caching_workers; >> 938 struct btrfs_workqueue *readahead_workers; >> 939 >> 940 /* >> 941 * fixup workers take dirty pages that didn't properly go through >> 942 * the cow mechanism and make them safe to write. It happens >> 943 * for the sys_munmap function call path >> 944 */ >> 945 struct btrfs_workqueue *fixup_workers; >> 946 struct btrfs_workqueue *delayed_workers; >> 947 >> 948 /* the extent workers do delayed refs on the extent allocation tree */ >> 949 struct btrfs_workqueue *extent_workers; >> 950 struct task_struct *transaction_kthread; >> 951 struct task_struct *cleaner_kthread; >> 952 int thread_pool_size; >> 953 >> 954 struct kobject *space_info_kobj; >> 955 >> 956 u64 total_pinned; >> 957 >> 958 /* used to keep from writing metadata until there is a nice batch */ >> 959 struct percpu_counter dirty_metadata_bytes; >> 960 struct percpu_counter delalloc_bytes; >> 961 s32 dirty_metadata_batch; >> 962 s32 delalloc_batch; >> 963 >> 964 struct list_head dirty_cowonly_roots; >> 965 >> 966 struct btrfs_fs_devices *fs_devices; >> 967 >> 968 /* >> 969 * the space_info list is almost entirely read only. It only changes >> 970 * when we add a new raid type to the FS, and that happens >> 971 * very rarely. RCU is used to protect it. >> 972 */ >> 973 struct list_head space_info; >> 974 >> 975 struct btrfs_space_info *data_sinfo; >> 976 >> 977 struct reloc_control *reloc_ctl; >> 978 >> 979 /* data_alloc_cluster is only used in ssd mode */ >> 980 struct btrfs_free_cluster data_alloc_cluster; >> 981 >> 982 /* all metadata allocations go through this cluster */ >> 983 struct btrfs_free_cluster meta_alloc_cluster; >> 984 >> 985 /* auto defrag inodes go here */ >> 986 spinlock_t defrag_inodes_lock; >> 987 struct rb_root defrag_inodes; >> 988 atomic_t defrag_running; >> 989 >> 990 /* Used to protect avail_{data, metadata, system}_alloc_bits */ >> 991 seqlock_t profiles_lock; >> 992 /* >> 993 * these three are in extended format (availability of single >> 994 * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other >> 995 * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits) >> 996 */ >> 997 u64 avail_data_alloc_bits; >> 998 u64 avail_metadata_alloc_bits; >> 999 u64 avail_system_alloc_bits; >> 1000 >> 1001 /* restriper state */ >> 1002 spinlock_t balance_lock; >> 1003 struct mutex balance_mutex; >> 1004 atomic_t balance_running; >> 1005 atomic_t balance_pause_req; >> 1006 atomic_t balance_cancel_req; >> 1007 struct btrfs_balance_control *balance_ctl; >> 1008 wait_queue_head_t balance_wait_q; >> 1009 >> 1010 unsigned data_chunk_allocations; >> 1011 unsigned metadata_ratio; >> 1012 >> 1013 void *bdev_holder; >> 1014 >> 1015 /* private scrub information */ >> 1016 struct mutex scrub_lock; >> 1017 atomic_t scrubs_running; >> 1018 atomic_t scrub_pause_req; >> 1019 atomic_t scrubs_paused; >> 1020 atomic_t scrub_cancel_req; >> 1021 wait_queue_head_t scrub_pause_wait; >> 1022 int scrub_workers_refcnt; >> 1023 struct btrfs_workqueue *scrub_workers; >> 1024 struct btrfs_workqueue *scrub_wr_completion_workers; >> 1025 struct btrfs_workqueue *scrub_nocow_workers; >> 1026 struct btrfs_workqueue *scrub_parity_workers; >> 1027 >> 1028 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY >> 1029 u32 check_integrity_print_mask; >> 1030 #endif >> 1031 /* is qgroup tracking in a consistent state? */ >> 1032 u64 qgroup_flags; >> 1033 >> 1034 /* holds configuration and tracking. Protected by qgroup_lock */ >> 1035 struct rb_root qgroup_tree; >> 1036 struct rb_root qgroup_op_tree; >> 1037 spinlock_t qgroup_lock; >> 1038 spinlock_t qgroup_op_lock; >> 1039 atomic_t qgroup_op_seq; >> 1040 >> 1041 /* >> 1042 * used to avoid frequently calling ulist_alloc()/ulist_free() >> 1043 * when doing qgroup accounting, it must be protected by qgroup_lock. >> 1044 */ >> 1045 struct ulist *qgroup_ulist; >> 1046 >> 1047 /* protect user change for quota operations */ >> 1048 struct mutex qgroup_ioctl_lock; >> 1049 >> 1050 /* list of dirty qgroups to be written at next commit */ >> 1051 struct list_head dirty_qgroups; >> 1052 >> 1053 /* used by qgroup for an efficient tree traversal */ >> 1054 u64 qgroup_seq; >> 1055 >> 1056 /* qgroup rescan items */ >> 1057 struct mutex qgroup_rescan_lock; /* protects the progress item */ >> 1058 struct btrfs_key qgroup_rescan_progress; >> 1059 struct btrfs_workqueue *qgroup_rescan_workers; >> 1060 struct completion qgroup_rescan_completion; >> 1061 struct btrfs_work qgroup_rescan_work; >> 1062 bool qgroup_rescan_running; /* protected by qgroup_rescan_lock */ >> 1063 >> 1064 /* filesystem state */ >> 1065 unsigned long fs_state; >> 1066 >> 1067 struct btrfs_delayed_root *delayed_root; >> 1068 >> 1069 /* readahead tree */ >> 1070 spinlock_t reada_lock; >> 1071 struct radix_tree_root reada_tree; >> 1072 >> 1073 /* readahead works cnt */ >> 1074 atomic_t reada_works_cnt; >> 1075 >> 1076 /* Extent buffer radix tree */ >> 1077 spinlock_t buffer_lock; >> 1078 struct radix_tree_root buffer_radix; >> 1079 >> 1080 /* next backup root to be overwritten */ >> 1081 int backup_root_index; >> 1082 >> 1083 int num_tolerated_disk_barrier_failures; >> 1084 >> 1085 /* device replace state */ >> 1086 struct btrfs_dev_replace dev_replace; >> 1087 >> 1088 struct percpu_counter bio_counter; >> 1089 wait_queue_head_t replace_wait; >> 1090 >> 1091 struct semaphore uuid_tree_rescan_sem; >> 1092 >> 1093 /* Used to reclaim the metadata space in the background. */ >> 1094 struct work_struct async_reclaim_work; >> 1095 >> 1096 spinlock_t unused_bgs_lock; >> 1097 struct list_head unused_bgs; >> 1098 struct mutex unused_bg_unpin_mutex; >> 1099 struct mutex delete_unused_bgs_mutex; >> 1100 >> 1101 /* For btrfs to record security options */ >> 1102 struct security_mnt_opts security_opts; >> 1103 >> 1104 /* >> 1105 * Chunks that can't be freed yet (under a trim/discard operation) >> 1106 * and will be latter freed. Protected by fs_info->chunk_mutex. >> 1107 */ >> 1108 struct list_head pinned_chunks; >> 1109 >> 1110 /* Used to record internally whether fs has been frozen */ >> 1111 int fs_frozen; >> 1112 >> 1113 /* Cached block sizes */ >> 1114 u32 nodesize; >> 1115 u32 sectorsize; >> 1116 u32 stripesize; >> 1117 }; >> 1118 >> 1119 static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) >> 1120 { >> 1121 return sb->s_fs_info; >> 1122 } >> 1123 >> 1124 struct btrfs_subvolume_writers { >> 1125 struct percpu_counter counter; >> 1126 wait_queue_head_t wait; 151 }; 1127 }; 152 1128 153 /* 1129 /* 154 * Record swapped tree blocks of a subvolume t !! 1130 * The state of btrfs root 155 * code. For detail check comment in fs/btrfs/ << 156 */ 1131 */ 157 struct btrfs_qgroup_swapped_blocks { !! 1132 /* 158 spinlock_t lock; !! 1133 * btrfs_record_root_in_trans is a multi-step process, 159 /* RM_EMPTY_ROOT() of above blocks[] * !! 1134 * and it can race with the balancing code. But the 160 bool swapped; !! 1135 * race is very small, and only the first time the root 161 struct rb_root blocks[BTRFS_MAX_LEVEL] !! 1136 * is added to each transaction. So IN_TRANS_SETUP 162 }; !! 1137 * is used to tell us when more checks are required >> 1138 */ >> 1139 #define BTRFS_ROOT_IN_TRANS_SETUP 0 >> 1140 #define BTRFS_ROOT_REF_COWS 1 >> 1141 #define BTRFS_ROOT_TRACK_DIRTY 2 >> 1142 #define BTRFS_ROOT_IN_RADIX 3 >> 1143 #define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 4 >> 1144 #define BTRFS_ROOT_DEFRAG_RUNNING 5 >> 1145 #define BTRFS_ROOT_FORCE_COW 6 >> 1146 #define BTRFS_ROOT_MULTI_LOG_TASKS 7 >> 1147 #define BTRFS_ROOT_DIRTY 8 163 1148 164 /* 1149 /* 165 * in ram representation of the tree. extent_ 1150 * in ram representation of the tree. extent_root is used for all allocations 166 * and for the extent tree extent_root root. 1151 * and for the extent tree extent_root root. 167 */ 1152 */ 168 struct btrfs_root { 1153 struct btrfs_root { 169 struct rb_node rb_node; << 170 << 171 struct extent_buffer *node; 1154 struct extent_buffer *node; 172 1155 173 struct extent_buffer *commit_root; 1156 struct extent_buffer *commit_root; 174 struct btrfs_root *log_root; 1157 struct btrfs_root *log_root; 175 struct btrfs_root *reloc_root; 1158 struct btrfs_root *reloc_root; 176 1159 177 unsigned long state; 1160 unsigned long state; 178 struct btrfs_root_item root_item; 1161 struct btrfs_root_item root_item; 179 struct btrfs_key root_key; 1162 struct btrfs_key root_key; 180 struct btrfs_fs_info *fs_info; 1163 struct btrfs_fs_info *fs_info; 181 struct extent_io_tree dirty_log_pages; 1164 struct extent_io_tree dirty_log_pages; 182 1165 183 struct mutex objectid_mutex; 1166 struct mutex objectid_mutex; 184 1167 185 spinlock_t accounting_lock; 1168 spinlock_t accounting_lock; 186 struct btrfs_block_rsv *block_rsv; 1169 struct btrfs_block_rsv *block_rsv; 187 1170 >> 1171 /* free ino cache stuff */ >> 1172 struct btrfs_free_space_ctl *free_ino_ctl; >> 1173 enum btrfs_caching_type ino_cache_state; >> 1174 spinlock_t ino_cache_lock; >> 1175 wait_queue_head_t ino_cache_wait; >> 1176 struct btrfs_free_space_ctl *free_ino_pinned; >> 1177 u64 ino_cache_progress; >> 1178 struct inode *ino_cache_inode; >> 1179 188 struct mutex log_mutex; 1180 struct mutex log_mutex; 189 wait_queue_head_t log_writer_wait; 1181 wait_queue_head_t log_writer_wait; 190 wait_queue_head_t log_commit_wait[2]; 1182 wait_queue_head_t log_commit_wait[2]; 191 struct list_head log_ctxs[2]; 1183 struct list_head log_ctxs[2]; 192 /* Used only for log trees of subvolum << 193 atomic_t log_writers; 1184 atomic_t log_writers; 194 atomic_t log_commit[2]; 1185 atomic_t log_commit[2]; 195 /* Used only for log trees of subvolum << 196 atomic_t log_batch; 1186 atomic_t log_batch; 197 /* << 198 * Protected by the 'log_mutex' lock b << 199 * that lock to avoid unnecessary lock << 200 * should be read using btrfs_get_root << 201 * log tree in which case it can be di << 202 * field should always use btrfs_set_r << 203 * trees where the field can be update << 204 */ << 205 int log_transid; 1187 int log_transid; 206 /* No matter the commit succeeds or no 1188 /* No matter the commit succeeds or not*/ 207 int log_transid_committed; 1189 int log_transid_committed; 208 /* !! 1190 /* Just be updated when the commit succeeds. */ 209 * Just be updated when the commit suc << 210 * btrfs_get_root_last_log_commit() an << 211 * to access this field. << 212 */ << 213 int last_log_commit; 1191 int last_log_commit; 214 pid_t log_start_pid; 1192 pid_t log_start_pid; 215 1193 >> 1194 u64 objectid; 216 u64 last_trans; 1195 u64 last_trans; 217 1196 218 u64 free_objectid; !! 1197 u32 type; >> 1198 >> 1199 u64 highest_objectid; >> 1200 >> 1201 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS >> 1202 /* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */ >> 1203 u64 alloc_bytenr; >> 1204 #endif 219 1205 >> 1206 u64 defrag_trans_start; 220 struct btrfs_key defrag_progress; 1207 struct btrfs_key defrag_progress; 221 struct btrfs_key defrag_max; 1208 struct btrfs_key defrag_max; >> 1209 char *name; 222 1210 223 /* The dirty list is only used by non- !! 1211 /* the dirty list is only used by non-reference counted roots */ 224 struct list_head dirty_list; 1212 struct list_head dirty_list; 225 1213 226 struct list_head root_list; 1214 struct list_head root_list; 227 1215 228 /* !! 1216 spinlock_t log_extents_lock[2]; 229 * Xarray that keeps track of in-memor !! 1217 struct list_head logged_list[2]; 230 * @inode_lock. !! 1218 231 */ !! 1219 spinlock_t orphan_lock; 232 struct xarray inodes; !! 1220 atomic_t orphan_inodes; >> 1221 struct btrfs_block_rsv *orphan_block_rsv; >> 1222 int orphan_cleanup_state; >> 1223 >> 1224 spinlock_t inode_lock; >> 1225 /* red-black tree that keeps track of in-memory inodes */ >> 1226 struct rb_root inode_tree; 233 1227 234 /* 1228 /* 235 * Xarray that keeps track of delayed !! 1229 * radix tree that keeps track of delayed nodes of every inode, 236 * by @inode_lock. !! 1230 * protected by inode_lock 237 */ 1231 */ 238 struct xarray delayed_nodes; !! 1232 struct radix_tree_root delayed_nodes_tree; 239 /* 1233 /* 240 * right now this just gets used so th 1234 * right now this just gets used so that a root has its own devid 241 * for stat. It may be used for more 1235 * for stat. It may be used for more later 242 */ 1236 */ 243 dev_t anon_dev; 1237 dev_t anon_dev; 244 1238 245 spinlock_t root_item_lock; 1239 spinlock_t root_item_lock; 246 refcount_t refs; 1240 refcount_t refs; 247 1241 248 struct mutex delalloc_mutex; 1242 struct mutex delalloc_mutex; 249 spinlock_t delalloc_lock; 1243 spinlock_t delalloc_lock; 250 /* 1244 /* 251 * all of the inodes that have delallo 1245 * all of the inodes that have delalloc bytes. It is possible for 252 * this list to be empty even when the 1246 * this list to be empty even when there is still dirty data=ordered 253 * extents waiting to finish IO. 1247 * extents waiting to finish IO. 254 */ 1248 */ 255 struct list_head delalloc_inodes; 1249 struct list_head delalloc_inodes; 256 struct list_head delalloc_root; 1250 struct list_head delalloc_root; 257 u64 nr_delalloc_inodes; 1251 u64 nr_delalloc_inodes; 258 1252 259 struct mutex ordered_extent_mutex; 1253 struct mutex ordered_extent_mutex; 260 /* 1254 /* 261 * this is used by the balancing code 1255 * this is used by the balancing code to wait for all the pending 262 * ordered extents 1256 * ordered extents 263 */ 1257 */ 264 spinlock_t ordered_extent_lock; 1258 spinlock_t ordered_extent_lock; 265 1259 266 /* 1260 /* 267 * all of the data=ordered extents pen 1261 * all of the data=ordered extents pending writeback 268 * these can span multiple transaction 1262 * these can span multiple transactions and basically include 269 * every dirty data page that isn't fr 1263 * every dirty data page that isn't from nodatacow 270 */ 1264 */ 271 struct list_head ordered_extents; 1265 struct list_head ordered_extents; 272 struct list_head ordered_root; 1266 struct list_head ordered_root; 273 u64 nr_ordered_extents; 1267 u64 nr_ordered_extents; 274 1268 275 /* 1269 /* 276 * Not empty if this subvolume root ha << 277 * (relocation) << 278 * << 279 * Will be used by reloc_control::dirt << 280 */ << 281 struct list_head reloc_dirty_list; << 282 << 283 /* << 284 * Number of currently running SEND io 1270 * Number of currently running SEND ioctls to prevent 285 * manipulation with the read-only sta 1271 * manipulation with the read-only status via SUBVOL_SETFLAGS 286 */ 1272 */ 287 int send_in_progress; 1273 int send_in_progress; 288 /* !! 1274 struct btrfs_subvolume_writers *subv_writers; 289 * Number of currently running dedupli !! 1275 atomic_t will_be_snapshoted; 290 * destination inode belonging to this << 291 * root_item_lock. << 292 */ << 293 int dedupe_in_progress; << 294 /* For exclusion of snapshot creation << 295 struct btrfs_drew_lock snapshot_lock; << 296 1276 297 atomic_t snapshot_force_cow; !! 1277 /* For qgroup metadata space reserve */ >> 1278 atomic64_t qgroup_meta_rsv; >> 1279 }; >> 1280 static inline u32 btrfs_inode_sectorsize(const struct inode *inode) >> 1281 { >> 1282 return btrfs_sb(inode->i_sb)->sectorsize; >> 1283 } 298 1284 299 /* For qgroup metadata reserved space !! 1285 static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize) 300 spinlock_t qgroup_meta_rsv_lock; !! 1286 { 301 u64 qgroup_meta_rsv_pertrans; !! 1287 return blocksize - sizeof(struct btrfs_header); 302 u64 qgroup_meta_rsv_prealloc; !! 1288 } 303 wait_queue_head_t qgroup_flush_wait; << 304 1289 305 /* Number of active swapfiles */ !! 1290 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info) 306 atomic_t nr_swapfiles; !! 1291 { >> 1292 return __BTRFS_LEAF_DATA_SIZE(info->nodesize); >> 1293 } 307 1294 308 /* Record pairs of swapped blocks for !! 1295 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info) 309 struct btrfs_qgroup_swapped_blocks swa !! 1296 { >> 1297 return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item); >> 1298 } 310 1299 311 /* Used only by log trees, when loggin !! 1300 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info) 312 struct extent_io_tree log_csum_range; !! 1301 { >> 1302 return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr); >> 1303 } 313 1304 314 /* Used in simple quotas, track root d !! 1305 #define BTRFS_FILE_EXTENT_INLINE_DATA_START \ 315 u64 relocation_src_root; !! 1306 (offsetof(struct btrfs_file_extent_item, disk_bytenr)) >> 1307 static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info) >> 1308 { >> 1309 return BTRFS_MAX_ITEM_SIZE(info) - >> 1310 BTRFS_FILE_EXTENT_INLINE_DATA_START; >> 1311 } 316 1312 317 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS !! 1313 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info) 318 u64 alloc_bytenr; !! 1314 { 319 #endif !! 1315 return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item); >> 1316 } >> 1317 >> 1318 /* >> 1319 * Flags for mount options. >> 1320 * >> 1321 * Note: don't forget to add new options to btrfs_show_options() >> 1322 */ >> 1323 #define BTRFS_MOUNT_NODATASUM (1 << 0) >> 1324 #define BTRFS_MOUNT_NODATACOW (1 << 1) >> 1325 #define BTRFS_MOUNT_NOBARRIER (1 << 2) >> 1326 #define BTRFS_MOUNT_SSD (1 << 3) >> 1327 #define BTRFS_MOUNT_DEGRADED (1 << 4) >> 1328 #define BTRFS_MOUNT_COMPRESS (1 << 5) >> 1329 #define BTRFS_MOUNT_NOTREELOG (1 << 6) >> 1330 #define BTRFS_MOUNT_FLUSHONCOMMIT (1 << 7) >> 1331 #define BTRFS_MOUNT_SSD_SPREAD (1 << 8) >> 1332 #define BTRFS_MOUNT_NOSSD (1 << 9) >> 1333 #define BTRFS_MOUNT_DISCARD (1 << 10) >> 1334 #define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11) >> 1335 #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) >> 1336 #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) >> 1337 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) >> 1338 #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) >> 1339 #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) >> 1340 #define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) >> 1341 #define BTRFS_MOUNT_USEBACKUPROOT (1 << 18) >> 1342 #define BTRFS_MOUNT_SKIP_BALANCE (1 << 19) >> 1343 #define BTRFS_MOUNT_CHECK_INTEGRITY (1 << 20) >> 1344 #define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) >> 1345 #define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) >> 1346 #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) >> 1347 #define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24) >> 1348 #define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25) >> 1349 #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26) >> 1350 #define BTRFS_MOUNT_NOLOGREPLAY (1 << 27) >> 1351 >> 1352 #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) >> 1353 #define BTRFS_DEFAULT_MAX_INLINE (2048) >> 1354 >> 1355 #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) >> 1356 #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) >> 1357 #define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) >> 1358 #define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \ >> 1359 BTRFS_MOUNT_##opt) >> 1360 >> 1361 #define btrfs_set_and_info(fs_info, opt, fmt, args...) \ >> 1362 { \ >> 1363 if (!btrfs_test_opt(fs_info, opt)) \ >> 1364 btrfs_info(fs_info, fmt, ##args); \ >> 1365 btrfs_set_opt(fs_info->mount_opt, opt); \ >> 1366 } >> 1367 >> 1368 #define btrfs_clear_and_info(fs_info, opt, fmt, args...) \ >> 1369 { \ >> 1370 if (btrfs_test_opt(fs_info, opt)) \ >> 1371 btrfs_info(fs_info, fmt, ##args); \ >> 1372 btrfs_clear_opt(fs_info->mount_opt, opt); \ >> 1373 } 320 1374 321 #ifdef CONFIG_BTRFS_DEBUG 1375 #ifdef CONFIG_BTRFS_DEBUG 322 struct list_head leak_list; !! 1376 static inline int >> 1377 btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group) >> 1378 { >> 1379 struct btrfs_fs_info *fs_info = block_group->fs_info; >> 1380 >> 1381 return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) && >> 1382 block_group->flags & BTRFS_BLOCK_GROUP_METADATA) || >> 1383 (btrfs_test_opt(fs_info, FRAGMENT_DATA) && >> 1384 block_group->flags & BTRFS_BLOCK_GROUP_DATA); >> 1385 } 323 #endif 1386 #endif >> 1387 >> 1388 /* >> 1389 * Requests for changes that need to be done during transaction commit. >> 1390 * >> 1391 * Internal mount options that are used for special handling of the real >> 1392 * mount options (eg. cannot be set during remount and have to be set during >> 1393 * transaction commit) >> 1394 */ >> 1395 >> 1396 #define BTRFS_PENDING_SET_INODE_MAP_CACHE (0) >> 1397 #define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1) >> 1398 #define BTRFS_PENDING_COMMIT (2) >> 1399 >> 1400 #define btrfs_test_pending(info, opt) \ >> 1401 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) >> 1402 #define btrfs_set_pending(info, opt) \ >> 1403 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) >> 1404 #define btrfs_clear_pending(info, opt) \ >> 1405 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes) >> 1406 >> 1407 /* >> 1408 * Helpers for setting pending mount option changes. >> 1409 * >> 1410 * Expects corresponding macros >> 1411 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name >> 1412 */ >> 1413 #define btrfs_set_pending_and_info(info, opt, fmt, args...) \ >> 1414 do { \ >> 1415 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \ >> 1416 btrfs_info((info), fmt, ##args); \ >> 1417 btrfs_set_pending((info), SET_##opt); \ >> 1418 btrfs_clear_pending((info), CLEAR_##opt); \ >> 1419 } \ >> 1420 } while(0) >> 1421 >> 1422 #define btrfs_clear_pending_and_info(info, opt, fmt, args...) \ >> 1423 do { \ >> 1424 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \ >> 1425 btrfs_info((info), fmt, ##args); \ >> 1426 btrfs_set_pending((info), CLEAR_##opt); \ >> 1427 btrfs_clear_pending((info), SET_##opt); \ >> 1428 } \ >> 1429 } while(0) >> 1430 >> 1431 /* >> 1432 * Inode flags >> 1433 */ >> 1434 #define BTRFS_INODE_NODATASUM (1 << 0) >> 1435 #define BTRFS_INODE_NODATACOW (1 << 1) >> 1436 #define BTRFS_INODE_READONLY (1 << 2) >> 1437 #define BTRFS_INODE_NOCOMPRESS (1 << 3) >> 1438 #define BTRFS_INODE_PREALLOC (1 << 4) >> 1439 #define BTRFS_INODE_SYNC (1 << 5) >> 1440 #define BTRFS_INODE_IMMUTABLE (1 << 6) >> 1441 #define BTRFS_INODE_APPEND (1 << 7) >> 1442 #define BTRFS_INODE_NODUMP (1 << 8) >> 1443 #define BTRFS_INODE_NOATIME (1 << 9) >> 1444 #define BTRFS_INODE_DIRSYNC (1 << 10) >> 1445 #define BTRFS_INODE_COMPRESS (1 << 11) >> 1446 >> 1447 #define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) >> 1448 >> 1449 struct btrfs_map_token { >> 1450 struct extent_buffer *eb; >> 1451 char *kaddr; >> 1452 unsigned long offset; 324 }; 1453 }; 325 1454 326 static inline bool btrfs_root_readonly(const s !! 1455 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \ >> 1456 ((bytes) >> (fs_info)->sb->s_blocksize_bits) >> 1457 >> 1458 static inline void btrfs_init_map_token (struct btrfs_map_token *token) 327 { 1459 { 328 /* Byte-swap the constant at compile t !! 1460 token->kaddr = NULL; 329 return (root->root_item.flags & cpu_to << 330 } 1461 } 331 1462 332 static inline bool btrfs_root_dead(const struc !! 1463 /* some macros to generate set/get functions for the struct fields. This >> 1464 * assumes there is a lefoo_to_cpu for every type, so lets make a simple >> 1465 * one for u8: >> 1466 */ >> 1467 #define le8_to_cpu(v) (v) >> 1468 #define cpu_to_le8(v) (v) >> 1469 #define __le8 u8 >> 1470 >> 1471 #define read_eb_member(eb, ptr, type, member, result) (\ >> 1472 read_extent_buffer(eb, (char *)(result), \ >> 1473 ((unsigned long)(ptr)) + \ >> 1474 offsetof(type, member), \ >> 1475 sizeof(((type *)0)->member))) >> 1476 >> 1477 #define write_eb_member(eb, ptr, type, member, result) (\ >> 1478 write_extent_buffer(eb, (char *)(result), \ >> 1479 ((unsigned long)(ptr)) + \ >> 1480 offsetof(type, member), \ >> 1481 sizeof(((type *)0)->member))) >> 1482 >> 1483 #define DECLARE_BTRFS_SETGET_BITS(bits) \ >> 1484 u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ >> 1485 unsigned long off, \ >> 1486 struct btrfs_map_token *token); \ >> 1487 void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \ >> 1488 unsigned long off, u##bits val, \ >> 1489 struct btrfs_map_token *token); \ >> 1490 static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \ >> 1491 unsigned long off) \ >> 1492 { \ >> 1493 return btrfs_get_token_##bits(eb, ptr, off, NULL); \ >> 1494 } \ >> 1495 static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \ >> 1496 unsigned long off, u##bits val) \ >> 1497 { \ >> 1498 btrfs_set_token_##bits(eb, ptr, off, val, NULL); \ >> 1499 } >> 1500 >> 1501 DECLARE_BTRFS_SETGET_BITS(8) >> 1502 DECLARE_BTRFS_SETGET_BITS(16) >> 1503 DECLARE_BTRFS_SETGET_BITS(32) >> 1504 DECLARE_BTRFS_SETGET_BITS(64) >> 1505 >> 1506 #define BTRFS_SETGET_FUNCS(name, type, member, bits) \ >> 1507 static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \ >> 1508 { \ >> 1509 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1510 return btrfs_get_##bits(eb, s, offsetof(type, member)); \ >> 1511 } \ >> 1512 static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \ >> 1513 u##bits val) \ >> 1514 { \ >> 1515 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1516 btrfs_set_##bits(eb, s, offsetof(type, member), val); \ >> 1517 } \ >> 1518 static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \ >> 1519 struct btrfs_map_token *token) \ >> 1520 { \ >> 1521 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1522 return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \ >> 1523 } \ >> 1524 static inline void btrfs_set_token_##name(struct extent_buffer *eb, \ >> 1525 type *s, u##bits val, \ >> 1526 struct btrfs_map_token *token) \ >> 1527 { \ >> 1528 BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \ >> 1529 btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \ >> 1530 } >> 1531 >> 1532 #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ >> 1533 static inline u##bits btrfs_##name(struct extent_buffer *eb) \ >> 1534 { \ >> 1535 type *p = page_address(eb->pages[0]); \ >> 1536 u##bits res = le##bits##_to_cpu(p->member); \ >> 1537 return res; \ >> 1538 } \ >> 1539 static inline void btrfs_set_##name(struct extent_buffer *eb, \ >> 1540 u##bits val) \ >> 1541 { \ >> 1542 type *p = page_address(eb->pages[0]); \ >> 1543 p->member = cpu_to_le##bits(val); \ >> 1544 } >> 1545 >> 1546 #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ >> 1547 static inline u##bits btrfs_##name(type *s) \ >> 1548 { \ >> 1549 return le##bits##_to_cpu(s->member); \ >> 1550 } \ >> 1551 static inline void btrfs_set_##name(type *s, u##bits val) \ >> 1552 { \ >> 1553 s->member = cpu_to_le##bits(val); \ >> 1554 } >> 1555 >> 1556 BTRFS_SETGET_FUNCS(device_type, struct btrfs_dev_item, type, 64); >> 1557 BTRFS_SETGET_FUNCS(device_total_bytes, struct btrfs_dev_item, total_bytes, 64); >> 1558 BTRFS_SETGET_FUNCS(device_bytes_used, struct btrfs_dev_item, bytes_used, 64); >> 1559 BTRFS_SETGET_FUNCS(device_io_align, struct btrfs_dev_item, io_align, 32); >> 1560 BTRFS_SETGET_FUNCS(device_io_width, struct btrfs_dev_item, io_width, 32); >> 1561 BTRFS_SETGET_FUNCS(device_start_offset, struct btrfs_dev_item, >> 1562 start_offset, 64); >> 1563 BTRFS_SETGET_FUNCS(device_sector_size, struct btrfs_dev_item, sector_size, 32); >> 1564 BTRFS_SETGET_FUNCS(device_id, struct btrfs_dev_item, devid, 64); >> 1565 BTRFS_SETGET_FUNCS(device_group, struct btrfs_dev_item, dev_group, 32); >> 1566 BTRFS_SETGET_FUNCS(device_seek_speed, struct btrfs_dev_item, seek_speed, 8); >> 1567 BTRFS_SETGET_FUNCS(device_bandwidth, struct btrfs_dev_item, bandwidth, 8); >> 1568 BTRFS_SETGET_FUNCS(device_generation, struct btrfs_dev_item, generation, 64); >> 1569 >> 1570 BTRFS_SETGET_STACK_FUNCS(stack_device_type, struct btrfs_dev_item, type, 64); >> 1571 BTRFS_SETGET_STACK_FUNCS(stack_device_total_bytes, struct btrfs_dev_item, >> 1572 total_bytes, 64); >> 1573 BTRFS_SETGET_STACK_FUNCS(stack_device_bytes_used, struct btrfs_dev_item, >> 1574 bytes_used, 64); >> 1575 BTRFS_SETGET_STACK_FUNCS(stack_device_io_align, struct btrfs_dev_item, >> 1576 io_align, 32); >> 1577 BTRFS_SETGET_STACK_FUNCS(stack_device_io_width, struct btrfs_dev_item, >> 1578 io_width, 32); >> 1579 BTRFS_SETGET_STACK_FUNCS(stack_device_sector_size, struct btrfs_dev_item, >> 1580 sector_size, 32); >> 1581 BTRFS_SETGET_STACK_FUNCS(stack_device_id, struct btrfs_dev_item, devid, 64); >> 1582 BTRFS_SETGET_STACK_FUNCS(stack_device_group, struct btrfs_dev_item, >> 1583 dev_group, 32); >> 1584 BTRFS_SETGET_STACK_FUNCS(stack_device_seek_speed, struct btrfs_dev_item, >> 1585 seek_speed, 8); >> 1586 BTRFS_SETGET_STACK_FUNCS(stack_device_bandwidth, struct btrfs_dev_item, >> 1587 bandwidth, 8); >> 1588 BTRFS_SETGET_STACK_FUNCS(stack_device_generation, struct btrfs_dev_item, >> 1589 generation, 64); >> 1590 >> 1591 static inline unsigned long btrfs_device_uuid(struct btrfs_dev_item *d) >> 1592 { >> 1593 return (unsigned long)d + offsetof(struct btrfs_dev_item, uuid); >> 1594 } >> 1595 >> 1596 static inline unsigned long btrfs_device_fsid(struct btrfs_dev_item *d) >> 1597 { >> 1598 return (unsigned long)d + offsetof(struct btrfs_dev_item, fsid); >> 1599 } >> 1600 >> 1601 BTRFS_SETGET_FUNCS(chunk_length, struct btrfs_chunk, length, 64); >> 1602 BTRFS_SETGET_FUNCS(chunk_owner, struct btrfs_chunk, owner, 64); >> 1603 BTRFS_SETGET_FUNCS(chunk_stripe_len, struct btrfs_chunk, stripe_len, 64); >> 1604 BTRFS_SETGET_FUNCS(chunk_io_align, struct btrfs_chunk, io_align, 32); >> 1605 BTRFS_SETGET_FUNCS(chunk_io_width, struct btrfs_chunk, io_width, 32); >> 1606 BTRFS_SETGET_FUNCS(chunk_sector_size, struct btrfs_chunk, sector_size, 32); >> 1607 BTRFS_SETGET_FUNCS(chunk_type, struct btrfs_chunk, type, 64); >> 1608 BTRFS_SETGET_FUNCS(chunk_num_stripes, struct btrfs_chunk, num_stripes, 16); >> 1609 BTRFS_SETGET_FUNCS(chunk_sub_stripes, struct btrfs_chunk, sub_stripes, 16); >> 1610 BTRFS_SETGET_FUNCS(stripe_devid, struct btrfs_stripe, devid, 64); >> 1611 BTRFS_SETGET_FUNCS(stripe_offset, struct btrfs_stripe, offset, 64); >> 1612 >> 1613 static inline char *btrfs_stripe_dev_uuid(struct btrfs_stripe *s) >> 1614 { >> 1615 return (char *)s + offsetof(struct btrfs_stripe, dev_uuid); >> 1616 } >> 1617 >> 1618 BTRFS_SETGET_STACK_FUNCS(stack_chunk_length, struct btrfs_chunk, length, 64); >> 1619 BTRFS_SETGET_STACK_FUNCS(stack_chunk_owner, struct btrfs_chunk, owner, 64); >> 1620 BTRFS_SETGET_STACK_FUNCS(stack_chunk_stripe_len, struct btrfs_chunk, >> 1621 stripe_len, 64); >> 1622 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_align, struct btrfs_chunk, >> 1623 io_align, 32); >> 1624 BTRFS_SETGET_STACK_FUNCS(stack_chunk_io_width, struct btrfs_chunk, >> 1625 io_width, 32); >> 1626 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sector_size, struct btrfs_chunk, >> 1627 sector_size, 32); >> 1628 BTRFS_SETGET_STACK_FUNCS(stack_chunk_type, struct btrfs_chunk, type, 64); >> 1629 BTRFS_SETGET_STACK_FUNCS(stack_chunk_num_stripes, struct btrfs_chunk, >> 1630 num_stripes, 16); >> 1631 BTRFS_SETGET_STACK_FUNCS(stack_chunk_sub_stripes, struct btrfs_chunk, >> 1632 sub_stripes, 16); >> 1633 BTRFS_SETGET_STACK_FUNCS(stack_stripe_devid, struct btrfs_stripe, devid, 64); >> 1634 BTRFS_SETGET_STACK_FUNCS(stack_stripe_offset, struct btrfs_stripe, offset, 64); >> 1635 >> 1636 static inline struct btrfs_stripe *btrfs_stripe_nr(struct btrfs_chunk *c, >> 1637 int nr) >> 1638 { >> 1639 unsigned long offset = (unsigned long)c; >> 1640 offset += offsetof(struct btrfs_chunk, stripe); >> 1641 offset += nr * sizeof(struct btrfs_stripe); >> 1642 return (struct btrfs_stripe *)offset; >> 1643 } >> 1644 >> 1645 static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr) >> 1646 { >> 1647 return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr)); >> 1648 } >> 1649 >> 1650 static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, >> 1651 struct btrfs_chunk *c, int nr) >> 1652 { >> 1653 return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); >> 1654 } >> 1655 >> 1656 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, >> 1657 struct btrfs_chunk *c, int nr) >> 1658 { >> 1659 return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); >> 1660 } >> 1661 >> 1662 /* struct btrfs_block_group_item */ >> 1663 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, >> 1664 used, 64); >> 1665 BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, >> 1666 used, 64); >> 1667 BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, >> 1668 struct btrfs_block_group_item, chunk_objectid, 64); >> 1669 >> 1670 BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, >> 1671 struct btrfs_block_group_item, chunk_objectid, 64); >> 1672 BTRFS_SETGET_FUNCS(disk_block_group_flags, >> 1673 struct btrfs_block_group_item, flags, 64); >> 1674 BTRFS_SETGET_STACK_FUNCS(block_group_flags, >> 1675 struct btrfs_block_group_item, flags, 64); >> 1676 >> 1677 /* struct btrfs_free_space_info */ >> 1678 BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info, >> 1679 extent_count, 32); >> 1680 BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32); >> 1681 >> 1682 /* struct btrfs_inode_ref */ >> 1683 BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); >> 1684 BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); >> 1685 >> 1686 /* struct btrfs_inode_extref */ >> 1687 BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref, >> 1688 parent_objectid, 64); >> 1689 BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref, >> 1690 name_len, 16); >> 1691 BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64); >> 1692 >> 1693 /* struct btrfs_inode_item */ >> 1694 BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); >> 1695 BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); >> 1696 BTRFS_SETGET_FUNCS(inode_transid, struct btrfs_inode_item, transid, 64); >> 1697 BTRFS_SETGET_FUNCS(inode_size, struct btrfs_inode_item, size, 64); >> 1698 BTRFS_SETGET_FUNCS(inode_nbytes, struct btrfs_inode_item, nbytes, 64); >> 1699 BTRFS_SETGET_FUNCS(inode_block_group, struct btrfs_inode_item, block_group, 64); >> 1700 BTRFS_SETGET_FUNCS(inode_nlink, struct btrfs_inode_item, nlink, 32); >> 1701 BTRFS_SETGET_FUNCS(inode_uid, struct btrfs_inode_item, uid, 32); >> 1702 BTRFS_SETGET_FUNCS(inode_gid, struct btrfs_inode_item, gid, 32); >> 1703 BTRFS_SETGET_FUNCS(inode_mode, struct btrfs_inode_item, mode, 32); >> 1704 BTRFS_SETGET_FUNCS(inode_rdev, struct btrfs_inode_item, rdev, 64); >> 1705 BTRFS_SETGET_FUNCS(inode_flags, struct btrfs_inode_item, flags, 64); >> 1706 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, >> 1707 generation, 64); >> 1708 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, >> 1709 sequence, 64); >> 1710 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, >> 1711 transid, 64); >> 1712 BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); >> 1713 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, >> 1714 nbytes, 64); >> 1715 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, >> 1716 block_group, 64); >> 1717 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); >> 1718 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); >> 1719 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); >> 1720 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); >> 1721 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); >> 1722 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); >> 1723 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); >> 1724 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); >> 1725 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); >> 1726 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); >> 1727 >> 1728 /* struct btrfs_dev_extent */ >> 1729 BTRFS_SETGET_FUNCS(dev_extent_chunk_tree, struct btrfs_dev_extent, >> 1730 chunk_tree, 64); >> 1731 BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent, >> 1732 chunk_objectid, 64); >> 1733 BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent, >> 1734 chunk_offset, 64); >> 1735 BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64); >> 1736 >> 1737 static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev) >> 1738 { >> 1739 unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid); >> 1740 return (unsigned long)dev + ptr; >> 1741 } >> 1742 >> 1743 BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64); >> 1744 BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item, >> 1745 generation, 64); >> 1746 BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64); >> 1747 >> 1748 BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32); >> 1749 >> 1750 >> 1751 BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8); >> 1752 >> 1753 static inline void btrfs_tree_block_key(struct extent_buffer *eb, >> 1754 struct btrfs_tree_block_info *item, >> 1755 struct btrfs_disk_key *key) >> 1756 { >> 1757 read_eb_member(eb, item, struct btrfs_tree_block_info, key, key); >> 1758 } >> 1759 >> 1760 static inline void btrfs_set_tree_block_key(struct extent_buffer *eb, >> 1761 struct btrfs_tree_block_info *item, >> 1762 struct btrfs_disk_key *key) >> 1763 { >> 1764 write_eb_member(eb, item, struct btrfs_tree_block_info, key, key); >> 1765 } >> 1766 >> 1767 BTRFS_SETGET_FUNCS(extent_data_ref_root, struct btrfs_extent_data_ref, >> 1768 root, 64); >> 1769 BTRFS_SETGET_FUNCS(extent_data_ref_objectid, struct btrfs_extent_data_ref, >> 1770 objectid, 64); >> 1771 BTRFS_SETGET_FUNCS(extent_data_ref_offset, struct btrfs_extent_data_ref, >> 1772 offset, 64); >> 1773 BTRFS_SETGET_FUNCS(extent_data_ref_count, struct btrfs_extent_data_ref, >> 1774 count, 32); >> 1775 >> 1776 BTRFS_SETGET_FUNCS(shared_data_ref_count, struct btrfs_shared_data_ref, >> 1777 count, 32); >> 1778 >> 1779 BTRFS_SETGET_FUNCS(extent_inline_ref_type, struct btrfs_extent_inline_ref, >> 1780 type, 8); >> 1781 BTRFS_SETGET_FUNCS(extent_inline_ref_offset, struct btrfs_extent_inline_ref, >> 1782 offset, 64); >> 1783 >> 1784 static inline u32 btrfs_extent_inline_ref_size(int type) >> 1785 { >> 1786 if (type == BTRFS_TREE_BLOCK_REF_KEY || >> 1787 type == BTRFS_SHARED_BLOCK_REF_KEY) >> 1788 return sizeof(struct btrfs_extent_inline_ref); >> 1789 if (type == BTRFS_SHARED_DATA_REF_KEY) >> 1790 return sizeof(struct btrfs_shared_data_ref) + >> 1791 sizeof(struct btrfs_extent_inline_ref); >> 1792 if (type == BTRFS_EXTENT_DATA_REF_KEY) >> 1793 return sizeof(struct btrfs_extent_data_ref) + >> 1794 offsetof(struct btrfs_extent_inline_ref, offset); >> 1795 BUG(); >> 1796 return 0; >> 1797 } >> 1798 >> 1799 BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64); >> 1800 BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0, >> 1801 generation, 64); >> 1802 BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64); >> 1803 BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32); >> 1804 >> 1805 /* struct btrfs_node */ >> 1806 BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64); >> 1807 BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64); >> 1808 BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr, >> 1809 blockptr, 64); >> 1810 BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr, >> 1811 generation, 64); >> 1812 >> 1813 static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr) 333 { 1814 { 334 /* Byte-swap the constant at compile t !! 1815 unsigned long ptr; 335 return (root->root_item.flags & cpu_to !! 1816 ptr = offsetof(struct btrfs_node, ptrs) + >> 1817 sizeof(struct btrfs_key_ptr) * nr; >> 1818 return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr); >> 1819 } >> 1820 >> 1821 static inline void btrfs_set_node_blockptr(struct extent_buffer *eb, >> 1822 int nr, u64 val) >> 1823 { >> 1824 unsigned long ptr; >> 1825 ptr = offsetof(struct btrfs_node, ptrs) + >> 1826 sizeof(struct btrfs_key_ptr) * nr; >> 1827 btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val); >> 1828 } >> 1829 >> 1830 static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr) >> 1831 { >> 1832 unsigned long ptr; >> 1833 ptr = offsetof(struct btrfs_node, ptrs) + >> 1834 sizeof(struct btrfs_key_ptr) * nr; >> 1835 return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr); 336 } 1836 } 337 1837 338 static inline u64 btrfs_root_id(const struct b !! 1838 static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb, >> 1839 int nr, u64 val) 339 { 1840 { 340 return root->root_key.objectid; !! 1841 unsigned long ptr; >> 1842 ptr = offsetof(struct btrfs_node, ptrs) + >> 1843 sizeof(struct btrfs_key_ptr) * nr; >> 1844 btrfs_set_key_generation(eb, (struct btrfs_key_ptr *)ptr, val); 341 } 1845 } 342 1846 343 static inline int btrfs_get_root_log_transid(c !! 1847 static inline unsigned long btrfs_node_key_ptr_offset(int nr) 344 { 1848 { 345 return READ_ONCE(root->log_transid); !! 1849 return offsetof(struct btrfs_node, ptrs) + >> 1850 sizeof(struct btrfs_key_ptr) * nr; 346 } 1851 } 347 1852 348 static inline void btrfs_set_root_log_transid( !! 1853 void btrfs_node_key(struct extent_buffer *eb, >> 1854 struct btrfs_disk_key *disk_key, int nr); >> 1855 >> 1856 static inline void btrfs_set_node_key(struct extent_buffer *eb, >> 1857 struct btrfs_disk_key *disk_key, int nr) 349 { 1858 { 350 WRITE_ONCE(root->log_transid, log_tran !! 1859 unsigned long ptr; >> 1860 ptr = btrfs_node_key_ptr_offset(nr); >> 1861 write_eb_member(eb, (struct btrfs_key_ptr *)ptr, >> 1862 struct btrfs_key_ptr, key, disk_key); 351 } 1863 } 352 1864 353 static inline int btrfs_get_root_last_log_comm !! 1865 /* struct btrfs_item */ >> 1866 BTRFS_SETGET_FUNCS(item_offset, struct btrfs_item, offset, 32); >> 1867 BTRFS_SETGET_FUNCS(item_size, struct btrfs_item, size, 32); >> 1868 BTRFS_SETGET_STACK_FUNCS(stack_item_offset, struct btrfs_item, offset, 32); >> 1869 BTRFS_SETGET_STACK_FUNCS(stack_item_size, struct btrfs_item, size, 32); >> 1870 >> 1871 static inline unsigned long btrfs_item_nr_offset(int nr) 354 { 1872 { 355 return READ_ONCE(root->last_log_commit !! 1873 return offsetof(struct btrfs_leaf, items) + >> 1874 sizeof(struct btrfs_item) * nr; 356 } 1875 } 357 1876 358 static inline void btrfs_set_root_last_log_com !! 1877 static inline struct btrfs_item *btrfs_item_nr(int nr) 359 { 1878 { 360 WRITE_ONCE(root->last_log_commit, comm !! 1879 return (struct btrfs_item *)btrfs_item_nr_offset(nr); 361 } 1880 } 362 1881 363 static inline u64 btrfs_get_root_last_trans(co !! 1882 static inline u32 btrfs_item_end(struct extent_buffer *eb, >> 1883 struct btrfs_item *item) 364 { 1884 { 365 return READ_ONCE(root->last_trans); !! 1885 return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item); 366 } 1886 } 367 1887 368 static inline void btrfs_set_root_last_trans(s !! 1888 static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr) 369 { 1889 { 370 WRITE_ONCE(root->last_trans, transid); !! 1890 return btrfs_item_end(eb, btrfs_item_nr(nr)); 371 } 1891 } 372 1892 >> 1893 static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr) >> 1894 { >> 1895 return btrfs_item_offset(eb, btrfs_item_nr(nr)); >> 1896 } >> 1897 >> 1898 static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr) >> 1899 { >> 1900 return btrfs_item_size(eb, btrfs_item_nr(nr)); >> 1901 } >> 1902 >> 1903 static inline void btrfs_item_key(struct extent_buffer *eb, >> 1904 struct btrfs_disk_key *disk_key, int nr) >> 1905 { >> 1906 struct btrfs_item *item = btrfs_item_nr(nr); >> 1907 read_eb_member(eb, item, struct btrfs_item, key, disk_key); >> 1908 } >> 1909 >> 1910 static inline void btrfs_set_item_key(struct extent_buffer *eb, >> 1911 struct btrfs_disk_key *disk_key, int nr) >> 1912 { >> 1913 struct btrfs_item *item = btrfs_item_nr(nr); >> 1914 write_eb_member(eb, item, struct btrfs_item, key, disk_key); >> 1915 } >> 1916 >> 1917 BTRFS_SETGET_FUNCS(dir_log_end, struct btrfs_dir_log_item, end, 64); >> 1918 373 /* 1919 /* 374 * Structure that conveys information about an !! 1920 * struct btrfs_root_ref 375 * all the extents in a file range. << 376 */ 1921 */ 377 struct btrfs_replace_extent_info { !! 1922 BTRFS_SETGET_FUNCS(root_ref_dirid, struct btrfs_root_ref, dirid, 64); 378 u64 disk_offset; !! 1923 BTRFS_SETGET_FUNCS(root_ref_sequence, struct btrfs_root_ref, sequence, 64); 379 u64 disk_len; !! 1924 BTRFS_SETGET_FUNCS(root_ref_name_len, struct btrfs_root_ref, name_len, 16); 380 u64 data_offset; << 381 u64 data_len; << 382 u64 file_offset; << 383 /* Pointer to a file extent item of ty << 384 char *extent_buf; << 385 /* << 386 * Set to true when attempting to repl << 387 * described by this structure, set to << 388 * existing extent into a file range. << 389 */ << 390 bool is_new_extent; << 391 /* Indicate if we should update the in << 392 bool update_times; << 393 /* Meaningful only if is_new_extent is << 394 int qgroup_reserved; << 395 /* << 396 * Meaningful only if is_new_extent is << 397 * Used to track how many extent items << 398 * subvolume tree that refer to the ex << 399 * so that we know when to create a ne << 400 * one. << 401 */ << 402 int insertions; << 403 }; << 404 1925 405 /* Arguments for btrfs_drop_extents() */ !! 1926 /* struct btrfs_dir_item */ 406 struct btrfs_drop_extents_args { !! 1927 BTRFS_SETGET_FUNCS(dir_data_len, struct btrfs_dir_item, data_len, 16); 407 /* Input parameters */ !! 1928 BTRFS_SETGET_FUNCS(dir_type, struct btrfs_dir_item, type, 8); >> 1929 BTRFS_SETGET_FUNCS(dir_name_len, struct btrfs_dir_item, name_len, 16); >> 1930 BTRFS_SETGET_FUNCS(dir_transid, struct btrfs_dir_item, transid, 64); >> 1931 BTRFS_SETGET_STACK_FUNCS(stack_dir_type, struct btrfs_dir_item, type, 8); >> 1932 BTRFS_SETGET_STACK_FUNCS(stack_dir_data_len, struct btrfs_dir_item, >> 1933 data_len, 16); >> 1934 BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item, >> 1935 name_len, 16); >> 1936 BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item, >> 1937 transid, 64); 408 1938 409 /* !! 1939 static inline void btrfs_dir_item_key(struct extent_buffer *eb, 410 * If NULL, btrfs_drop_extents() will !! 1940 struct btrfs_dir_item *item, 411 * If 'replace_extent' is true, this m !! 1941 struct btrfs_disk_key *key) 412 * is always released except if 'repla !! 1942 { 413 * btrfs_drop_extents() sets 'extent_i !! 1943 read_eb_member(eb, item, struct btrfs_dir_item, location, key); 414 * the path is kept locked. !! 1944 } 415 */ << 416 struct btrfs_path *path; << 417 /* Start offset of the range to drop e << 418 u64 start; << 419 /* End (exclusive, last byte + 1) of t << 420 u64 end; << 421 /* If true drop all the extent maps in << 422 bool drop_cache; << 423 /* << 424 * If true it means we want to insert << 425 * the extents in the range. If this i << 426 * parameter must be set as well and t << 427 * be set to true by btrfs_drop_extent << 428 * extent. << 429 * Note: when this is set to true the << 430 */ << 431 bool replace_extent; << 432 /* << 433 * Used if 'replace_extent' is true. S << 434 * insert after dropping all existing << 435 */ << 436 u32 extent_item_size; << 437 1945 438 /* Output parameters */ !! 1946 static inline void btrfs_set_dir_item_key(struct extent_buffer *eb, >> 1947 struct btrfs_dir_item *item, >> 1948 struct btrfs_disk_key *key) >> 1949 { >> 1950 write_eb_member(eb, item, struct btrfs_dir_item, location, key); >> 1951 } 439 1952 440 /* !! 1953 BTRFS_SETGET_FUNCS(free_space_entries, struct btrfs_free_space_header, 441 * Set to the minimum between the inpu !! 1954 num_entries, 64); 442 * (exclusive, last byte + 1) of the l !! 1955 BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header, 443 * set even if btrfs_drop_extents() re !! 1956 num_bitmaps, 64); 444 */ !! 1957 BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header, 445 u64 drop_end; !! 1958 generation, 64); 446 /* << 447 * The number of allocated bytes found << 448 * than the range's length when there << 449 */ << 450 u64 bytes_found; << 451 /* << 452 * Only set if 'replace_extent' is tru << 453 * to insert a replacement extent afte << 454 * range, otherwise set to false by bt << 455 * Also, if btrfs_drop_extents() has s << 456 * returned with the path locked, othe << 457 * false it has returned with the path << 458 */ << 459 bool extent_inserted; << 460 }; << 461 1959 462 struct btrfs_file_private { !! 1960 static inline void btrfs_free_space_key(struct extent_buffer *eb, 463 void *filldir_buf; !! 1961 struct btrfs_free_space_header *h, 464 u64 last_index; !! 1962 struct btrfs_disk_key *key) 465 struct extent_state *llseek_cached_sta !! 1963 { 466 /* Task that allocated this structure. !! 1964 read_eb_member(eb, h, struct btrfs_free_space_header, location, key); 467 struct task_struct *owner_task; !! 1965 } 468 }; << 469 1966 470 static inline u32 BTRFS_LEAF_DATA_SIZE(const s !! 1967 static inline void btrfs_set_free_space_key(struct extent_buffer *eb, >> 1968 struct btrfs_free_space_header *h, >> 1969 struct btrfs_disk_key *key) 471 { 1970 { 472 return info->nodesize - sizeof(struct !! 1971 write_eb_member(eb, h, struct btrfs_free_space_header, location, key); 473 } 1972 } 474 1973 475 static inline u32 BTRFS_MAX_ITEM_SIZE(const st !! 1974 /* struct btrfs_disk_key */ >> 1975 BTRFS_SETGET_STACK_FUNCS(disk_key_objectid, struct btrfs_disk_key, >> 1976 objectid, 64); >> 1977 BTRFS_SETGET_STACK_FUNCS(disk_key_offset, struct btrfs_disk_key, offset, 64); >> 1978 BTRFS_SETGET_STACK_FUNCS(disk_key_type, struct btrfs_disk_key, type, 8); >> 1979 >> 1980 static inline void btrfs_disk_key_to_cpu(struct btrfs_key *cpu, >> 1981 const struct btrfs_disk_key *disk) 476 { 1982 { 477 return BTRFS_LEAF_DATA_SIZE(info) - si !! 1983 cpu->offset = le64_to_cpu(disk->offset); >> 1984 cpu->type = disk->type; >> 1985 cpu->objectid = le64_to_cpu(disk->objectid); 478 } 1986 } 479 1987 480 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(con !! 1988 static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk, >> 1989 const struct btrfs_key *cpu) 481 { 1990 { 482 return BTRFS_LEAF_DATA_SIZE(info) / si !! 1991 disk->offset = cpu_to_le64(cpu->offset); >> 1992 disk->type = cpu->type; >> 1993 disk->objectid = cpu_to_le64(cpu->objectid); 483 } 1994 } 484 1995 485 static inline u32 BTRFS_MAX_XATTR_SIZE(const s !! 1996 static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb, >> 1997 struct btrfs_key *key, int nr) 486 { 1998 { 487 return BTRFS_MAX_ITEM_SIZE(info) - siz !! 1999 struct btrfs_disk_key disk_key; >> 2000 btrfs_node_key(eb, &disk_key, nr); >> 2001 btrfs_disk_key_to_cpu(key, &disk_key); 488 } 2002 } 489 2003 490 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \ !! 2004 static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb, 491 ((bytes) >> (f !! 2005 struct btrfs_key *key, int nr) >> 2006 { >> 2007 struct btrfs_disk_key disk_key; >> 2008 btrfs_item_key(eb, &disk_key, nr); >> 2009 btrfs_disk_key_to_cpu(key, &disk_key); >> 2010 } 492 2011 493 static inline gfp_t btrfs_alloc_write_mask(str !! 2012 static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb, >> 2013 struct btrfs_dir_item *item, >> 2014 struct btrfs_key *key) 494 { 2015 { 495 return mapping_gfp_constraint(mapping, !! 2016 struct btrfs_disk_key disk_key; >> 2017 btrfs_dir_item_key(eb, item, &disk_key); >> 2018 btrfs_disk_key_to_cpu(key, &disk_key); 496 } 2019 } 497 2020 498 void btrfs_error_unpin_extent_range(struct btr !! 2021 static inline u8 btrfs_key_type(const struct btrfs_key *key) 499 int btrfs_discard_extent(struct btrfs_fs_info !! 2022 { 500 u64 num_bytes, u64 *a !! 2023 return key->type; 501 int btrfs_trim_fs(struct btrfs_fs_info *fs_inf !! 2024 } 502 2025 503 /* ctree.c */ !! 2026 static inline void btrfs_set_key_type(struct btrfs_key *key, u8 val) 504 int __init btrfs_ctree_init(void); !! 2027 { 505 void __cold btrfs_ctree_exit(void); !! 2028 key->type = val; >> 2029 } >> 2030 >> 2031 /* struct btrfs_header */ >> 2032 BTRFS_SETGET_HEADER_FUNCS(header_bytenr, struct btrfs_header, bytenr, 64); >> 2033 BTRFS_SETGET_HEADER_FUNCS(header_generation, struct btrfs_header, >> 2034 generation, 64); >> 2035 BTRFS_SETGET_HEADER_FUNCS(header_owner, struct btrfs_header, owner, 64); >> 2036 BTRFS_SETGET_HEADER_FUNCS(header_nritems, struct btrfs_header, nritems, 32); >> 2037 BTRFS_SETGET_HEADER_FUNCS(header_flags, struct btrfs_header, flags, 64); >> 2038 BTRFS_SETGET_HEADER_FUNCS(header_level, struct btrfs_header, level, 8); >> 2039 BTRFS_SETGET_STACK_FUNCS(stack_header_generation, struct btrfs_header, >> 2040 generation, 64); >> 2041 BTRFS_SETGET_STACK_FUNCS(stack_header_owner, struct btrfs_header, owner, 64); >> 2042 BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header, >> 2043 nritems, 32); >> 2044 BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64); >> 2045 >> 2046 static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag) >> 2047 { >> 2048 return (btrfs_header_flags(eb) & flag) == flag; >> 2049 } >> 2050 >> 2051 static inline int btrfs_set_header_flag(struct extent_buffer *eb, u64 flag) >> 2052 { >> 2053 u64 flags = btrfs_header_flags(eb); >> 2054 btrfs_set_header_flags(eb, flags | flag); >> 2055 return (flags & flag) == flag; >> 2056 } >> 2057 >> 2058 static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag) >> 2059 { >> 2060 u64 flags = btrfs_header_flags(eb); >> 2061 btrfs_set_header_flags(eb, flags & ~flag); >> 2062 return (flags & flag) == flag; >> 2063 } >> 2064 >> 2065 static inline int btrfs_header_backref_rev(struct extent_buffer *eb) >> 2066 { >> 2067 u64 flags = btrfs_header_flags(eb); >> 2068 return flags >> BTRFS_BACKREF_REV_SHIFT; >> 2069 } >> 2070 >> 2071 static inline void btrfs_set_header_backref_rev(struct extent_buffer *eb, >> 2072 int rev) >> 2073 { >> 2074 u64 flags = btrfs_header_flags(eb); >> 2075 flags &= ~BTRFS_BACKREF_REV_MASK; >> 2076 flags |= (u64)rev << BTRFS_BACKREF_REV_SHIFT; >> 2077 btrfs_set_header_flags(eb, flags); >> 2078 } >> 2079 >> 2080 static inline unsigned long btrfs_header_fsid(void) >> 2081 { >> 2082 return offsetof(struct btrfs_header, fsid); >> 2083 } >> 2084 >> 2085 static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) >> 2086 { >> 2087 return offsetof(struct btrfs_header, chunk_tree_uuid); >> 2088 } 506 2089 507 int btrfs_bin_search(struct extent_buffer *eb, !! 2090 static inline int btrfs_is_leaf(struct extent_buffer *eb) 508 const struct btrfs_key *k !! 2091 { >> 2092 return btrfs_header_level(eb) == 0; >> 2093 } 509 2094 510 int __pure btrfs_comp_cpu_keys(const struct bt !! 2095 /* struct btrfs_root_item */ >> 2096 BTRFS_SETGET_FUNCS(disk_root_generation, struct btrfs_root_item, >> 2097 generation, 64); >> 2098 BTRFS_SETGET_FUNCS(disk_root_refs, struct btrfs_root_item, refs, 32); >> 2099 BTRFS_SETGET_FUNCS(disk_root_bytenr, struct btrfs_root_item, bytenr, 64); >> 2100 BTRFS_SETGET_FUNCS(disk_root_level, struct btrfs_root_item, level, 8); 511 2101 512 #ifdef __LITTLE_ENDIAN !! 2102 BTRFS_SETGET_STACK_FUNCS(root_generation, struct btrfs_root_item, >> 2103 generation, 64); >> 2104 BTRFS_SETGET_STACK_FUNCS(root_bytenr, struct btrfs_root_item, bytenr, 64); >> 2105 BTRFS_SETGET_STACK_FUNCS(root_level, struct btrfs_root_item, level, 8); >> 2106 BTRFS_SETGET_STACK_FUNCS(root_dirid, struct btrfs_root_item, root_dirid, 64); >> 2107 BTRFS_SETGET_STACK_FUNCS(root_refs, struct btrfs_root_item, refs, 32); >> 2108 BTRFS_SETGET_STACK_FUNCS(root_flags, struct btrfs_root_item, flags, 64); >> 2109 BTRFS_SETGET_STACK_FUNCS(root_used, struct btrfs_root_item, bytes_used, 64); >> 2110 BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); >> 2111 BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, >> 2112 last_snapshot, 64); >> 2113 BTRFS_SETGET_STACK_FUNCS(root_generation_v2, struct btrfs_root_item, >> 2114 generation_v2, 64); >> 2115 BTRFS_SETGET_STACK_FUNCS(root_ctransid, struct btrfs_root_item, >> 2116 ctransid, 64); >> 2117 BTRFS_SETGET_STACK_FUNCS(root_otransid, struct btrfs_root_item, >> 2118 otransid, 64); >> 2119 BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item, >> 2120 stransid, 64); >> 2121 BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item, >> 2122 rtransid, 64); >> 2123 >> 2124 static inline bool btrfs_root_readonly(struct btrfs_root *root) >> 2125 { >> 2126 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0; >> 2127 } >> 2128 >> 2129 static inline bool btrfs_root_dead(struct btrfs_root *root) >> 2130 { >> 2131 return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0; >> 2132 } >> 2133 >> 2134 /* struct btrfs_root_backup */ >> 2135 BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup, >> 2136 tree_root, 64); >> 2137 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup, >> 2138 tree_root_gen, 64); >> 2139 BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup, >> 2140 tree_root_level, 8); >> 2141 >> 2142 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup, >> 2143 chunk_root, 64); >> 2144 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup, >> 2145 chunk_root_gen, 64); >> 2146 BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup, >> 2147 chunk_root_level, 8); >> 2148 >> 2149 BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup, >> 2150 extent_root, 64); >> 2151 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup, >> 2152 extent_root_gen, 64); >> 2153 BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup, >> 2154 extent_root_level, 8); >> 2155 >> 2156 BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup, >> 2157 fs_root, 64); >> 2158 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup, >> 2159 fs_root_gen, 64); >> 2160 BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup, >> 2161 fs_root_level, 8); >> 2162 >> 2163 BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup, >> 2164 dev_root, 64); >> 2165 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup, >> 2166 dev_root_gen, 64); >> 2167 BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup, >> 2168 dev_root_level, 8); >> 2169 >> 2170 BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup, >> 2171 csum_root, 64); >> 2172 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup, >> 2173 csum_root_gen, 64); >> 2174 BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup, >> 2175 csum_root_level, 8); >> 2176 BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup, >> 2177 total_bytes, 64); >> 2178 BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup, >> 2179 bytes_used, 64); >> 2180 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup, >> 2181 num_devices, 64); >> 2182 >> 2183 /* struct btrfs_balance_item */ >> 2184 BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64); >> 2185 >> 2186 static inline void btrfs_balance_data(struct extent_buffer *eb, >> 2187 struct btrfs_balance_item *bi, >> 2188 struct btrfs_disk_balance_args *ba) >> 2189 { >> 2190 read_eb_member(eb, bi, struct btrfs_balance_item, data, ba); >> 2191 } >> 2192 >> 2193 static inline void btrfs_set_balance_data(struct extent_buffer *eb, >> 2194 struct btrfs_balance_item *bi, >> 2195 struct btrfs_disk_balance_args *ba) >> 2196 { >> 2197 write_eb_member(eb, bi, struct btrfs_balance_item, data, ba); >> 2198 } >> 2199 >> 2200 static inline void btrfs_balance_meta(struct extent_buffer *eb, >> 2201 struct btrfs_balance_item *bi, >> 2202 struct btrfs_disk_balance_args *ba) >> 2203 { >> 2204 read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); >> 2205 } >> 2206 >> 2207 static inline void btrfs_set_balance_meta(struct extent_buffer *eb, >> 2208 struct btrfs_balance_item *bi, >> 2209 struct btrfs_disk_balance_args *ba) >> 2210 { >> 2211 write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba); >> 2212 } >> 2213 >> 2214 static inline void btrfs_balance_sys(struct extent_buffer *eb, >> 2215 struct btrfs_balance_item *bi, >> 2216 struct btrfs_disk_balance_args *ba) >> 2217 { >> 2218 read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); >> 2219 } >> 2220 >> 2221 static inline void btrfs_set_balance_sys(struct extent_buffer *eb, >> 2222 struct btrfs_balance_item *bi, >> 2223 struct btrfs_disk_balance_args *ba) >> 2224 { >> 2225 write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba); >> 2226 } >> 2227 >> 2228 static inline void >> 2229 btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu, >> 2230 struct btrfs_disk_balance_args *disk) >> 2231 { >> 2232 memset(cpu, 0, sizeof(*cpu)); >> 2233 >> 2234 cpu->profiles = le64_to_cpu(disk->profiles); >> 2235 cpu->usage = le64_to_cpu(disk->usage); >> 2236 cpu->devid = le64_to_cpu(disk->devid); >> 2237 cpu->pstart = le64_to_cpu(disk->pstart); >> 2238 cpu->pend = le64_to_cpu(disk->pend); >> 2239 cpu->vstart = le64_to_cpu(disk->vstart); >> 2240 cpu->vend = le64_to_cpu(disk->vend); >> 2241 cpu->target = le64_to_cpu(disk->target); >> 2242 cpu->flags = le64_to_cpu(disk->flags); >> 2243 cpu->limit = le64_to_cpu(disk->limit); >> 2244 cpu->stripes_min = le32_to_cpu(disk->stripes_min); >> 2245 cpu->stripes_max = le32_to_cpu(disk->stripes_max); >> 2246 } >> 2247 >> 2248 static inline void >> 2249 btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk, >> 2250 struct btrfs_balance_args *cpu) >> 2251 { >> 2252 memset(disk, 0, sizeof(*disk)); >> 2253 >> 2254 disk->profiles = cpu_to_le64(cpu->profiles); >> 2255 disk->usage = cpu_to_le64(cpu->usage); >> 2256 disk->devid = cpu_to_le64(cpu->devid); >> 2257 disk->pstart = cpu_to_le64(cpu->pstart); >> 2258 disk->pend = cpu_to_le64(cpu->pend); >> 2259 disk->vstart = cpu_to_le64(cpu->vstart); >> 2260 disk->vend = cpu_to_le64(cpu->vend); >> 2261 disk->target = cpu_to_le64(cpu->target); >> 2262 disk->flags = cpu_to_le64(cpu->flags); >> 2263 disk->limit = cpu_to_le64(cpu->limit); >> 2264 disk->stripes_min = cpu_to_le32(cpu->stripes_min); >> 2265 disk->stripes_max = cpu_to_le32(cpu->stripes_max); >> 2266 } >> 2267 >> 2268 /* struct btrfs_super_block */ >> 2269 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); >> 2270 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64); >> 2271 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block, >> 2272 generation, 64); >> 2273 BTRFS_SETGET_STACK_FUNCS(super_root, struct btrfs_super_block, root, 64); >> 2274 BTRFS_SETGET_STACK_FUNCS(super_sys_array_size, >> 2275 struct btrfs_super_block, sys_chunk_array_size, 32); >> 2276 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_generation, >> 2277 struct btrfs_super_block, chunk_root_generation, 64); >> 2278 BTRFS_SETGET_STACK_FUNCS(super_root_level, struct btrfs_super_block, >> 2279 root_level, 8); >> 2280 BTRFS_SETGET_STACK_FUNCS(super_chunk_root, struct btrfs_super_block, >> 2281 chunk_root, 64); >> 2282 BTRFS_SETGET_STACK_FUNCS(super_chunk_root_level, struct btrfs_super_block, >> 2283 chunk_root_level, 8); >> 2284 BTRFS_SETGET_STACK_FUNCS(super_log_root, struct btrfs_super_block, >> 2285 log_root, 64); >> 2286 BTRFS_SETGET_STACK_FUNCS(super_log_root_transid, struct btrfs_super_block, >> 2287 log_root_transid, 64); >> 2288 BTRFS_SETGET_STACK_FUNCS(super_log_root_level, struct btrfs_super_block, >> 2289 log_root_level, 8); >> 2290 BTRFS_SETGET_STACK_FUNCS(super_total_bytes, struct btrfs_super_block, >> 2291 total_bytes, 64); >> 2292 BTRFS_SETGET_STACK_FUNCS(super_bytes_used, struct btrfs_super_block, >> 2293 bytes_used, 64); >> 2294 BTRFS_SETGET_STACK_FUNCS(super_sectorsize, struct btrfs_super_block, >> 2295 sectorsize, 32); >> 2296 BTRFS_SETGET_STACK_FUNCS(super_nodesize, struct btrfs_super_block, >> 2297 nodesize, 32); >> 2298 BTRFS_SETGET_STACK_FUNCS(super_stripesize, struct btrfs_super_block, >> 2299 stripesize, 32); >> 2300 BTRFS_SETGET_STACK_FUNCS(super_root_dir, struct btrfs_super_block, >> 2301 root_dir_objectid, 64); >> 2302 BTRFS_SETGET_STACK_FUNCS(super_num_devices, struct btrfs_super_block, >> 2303 num_devices, 64); >> 2304 BTRFS_SETGET_STACK_FUNCS(super_compat_flags, struct btrfs_super_block, >> 2305 compat_flags, 64); >> 2306 BTRFS_SETGET_STACK_FUNCS(super_compat_ro_flags, struct btrfs_super_block, >> 2307 compat_ro_flags, 64); >> 2308 BTRFS_SETGET_STACK_FUNCS(super_incompat_flags, struct btrfs_super_block, >> 2309 incompat_flags, 64); >> 2310 BTRFS_SETGET_STACK_FUNCS(super_csum_type, struct btrfs_super_block, >> 2311 csum_type, 16); >> 2312 BTRFS_SETGET_STACK_FUNCS(super_cache_generation, struct btrfs_super_block, >> 2313 cache_generation, 64); >> 2314 BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64); >> 2315 BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, >> 2316 uuid_tree_generation, 64); >> 2317 >> 2318 static inline int btrfs_super_csum_size(struct btrfs_super_block *s) >> 2319 { >> 2320 u16 t = btrfs_super_csum_type(s); >> 2321 /* >> 2322 * csum type is validated at mount time >> 2323 */ >> 2324 return btrfs_csum_sizes[t]; >> 2325 } >> 2326 >> 2327 static inline unsigned long btrfs_leaf_data(struct extent_buffer *l) >> 2328 { >> 2329 return offsetof(struct btrfs_leaf, items); >> 2330 } 513 2331 514 /* 2332 /* 515 * Compare two keys, on little-endian the disk !! 2333 * The leaf data grows from end-to-front in the node. 516 * we can avoid the conversion. !! 2334 * this returns the address of the start of the last item, >> 2335 * which is the stop of the leaf data stack 517 */ 2336 */ 518 static inline int btrfs_comp_keys(const struct !! 2337 static inline unsigned int leaf_data_end(struct btrfs_fs_info *fs_info, 519 const struct !! 2338 struct extent_buffer *leaf) 520 { 2339 { 521 const struct btrfs_key *k1 = (const st !! 2340 u32 nr = btrfs_header_nritems(leaf); >> 2341 >> 2342 if (nr == 0) >> 2343 return BTRFS_LEAF_DATA_SIZE(fs_info); >> 2344 return btrfs_item_offset_nr(leaf, nr - 1); >> 2345 } >> 2346 >> 2347 /* struct btrfs_file_extent_item */ >> 2348 BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8); >> 2349 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr, >> 2350 struct btrfs_file_extent_item, disk_bytenr, 64); >> 2351 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_offset, >> 2352 struct btrfs_file_extent_item, offset, 64); >> 2353 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_generation, >> 2354 struct btrfs_file_extent_item, generation, 64); >> 2355 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_num_bytes, >> 2356 struct btrfs_file_extent_item, num_bytes, 64); >> 2357 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_num_bytes, >> 2358 struct btrfs_file_extent_item, disk_num_bytes, 64); >> 2359 BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression, >> 2360 struct btrfs_file_extent_item, compression, 8); >> 2361 >> 2362 static inline unsigned long >> 2363 btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e) >> 2364 { >> 2365 return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START; >> 2366 } >> 2367 >> 2368 static inline u32 btrfs_file_extent_calc_inline_size(u32 datasize) >> 2369 { >> 2370 return BTRFS_FILE_EXTENT_INLINE_DATA_START + datasize; >> 2371 } >> 2372 >> 2373 BTRFS_SETGET_FUNCS(file_extent_disk_bytenr, struct btrfs_file_extent_item, >> 2374 disk_bytenr, 64); >> 2375 BTRFS_SETGET_FUNCS(file_extent_generation, struct btrfs_file_extent_item, >> 2376 generation, 64); >> 2377 BTRFS_SETGET_FUNCS(file_extent_disk_num_bytes, struct btrfs_file_extent_item, >> 2378 disk_num_bytes, 64); >> 2379 BTRFS_SETGET_FUNCS(file_extent_offset, struct btrfs_file_extent_item, >> 2380 offset, 64); >> 2381 BTRFS_SETGET_FUNCS(file_extent_num_bytes, struct btrfs_file_extent_item, >> 2382 num_bytes, 64); >> 2383 BTRFS_SETGET_FUNCS(file_extent_ram_bytes, struct btrfs_file_extent_item, >> 2384 ram_bytes, 64); >> 2385 BTRFS_SETGET_FUNCS(file_extent_compression, struct btrfs_file_extent_item, >> 2386 compression, 8); >> 2387 BTRFS_SETGET_FUNCS(file_extent_encryption, struct btrfs_file_extent_item, >> 2388 encryption, 8); >> 2389 BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item, >> 2390 other_encoding, 16); 522 2391 523 return btrfs_comp_cpu_keys(k1, k2); !! 2392 /* >> 2393 * this returns the number of bytes used by the item on disk, minus the >> 2394 * size of any extent headers. If a file is compressed on disk, this is >> 2395 * the compressed size >> 2396 */ >> 2397 static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb, >> 2398 struct btrfs_item *e) >> 2399 { >> 2400 return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START; 524 } 2401 } 525 2402 526 #else !! 2403 /* this returns the number of file bytes represented by the inline item. >> 2404 * If an item is compressed, this is the uncompressed size >> 2405 */ >> 2406 static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb, >> 2407 int slot, >> 2408 struct btrfs_file_extent_item *fi) >> 2409 { >> 2410 struct btrfs_map_token token; >> 2411 >> 2412 btrfs_init_map_token(&token); >> 2413 /* >> 2414 * return the space used on disk if this item isn't >> 2415 * compressed or encoded >> 2416 */ >> 2417 if (btrfs_token_file_extent_compression(eb, fi, &token) == 0 && >> 2418 btrfs_token_file_extent_encryption(eb, fi, &token) == 0 && >> 2419 btrfs_token_file_extent_other_encoding(eb, fi, &token) == 0) { >> 2420 return btrfs_file_extent_inline_item_len(eb, >> 2421 btrfs_item_nr(slot)); >> 2422 } >> 2423 >> 2424 /* otherwise use the ram bytes field */ >> 2425 return btrfs_token_file_extent_ram_bytes(eb, fi, &token); >> 2426 } >> 2427 >> 2428 >> 2429 /* btrfs_dev_stats_item */ >> 2430 static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb, >> 2431 struct btrfs_dev_stats_item *ptr, >> 2432 int index) >> 2433 { >> 2434 u64 val; >> 2435 >> 2436 read_extent_buffer(eb, &val, >> 2437 offsetof(struct btrfs_dev_stats_item, values) + >> 2438 ((unsigned long)ptr) + (index * sizeof(u64)), >> 2439 sizeof(val)); >> 2440 return val; >> 2441 } >> 2442 >> 2443 static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb, >> 2444 struct btrfs_dev_stats_item *ptr, >> 2445 int index, u64 val) >> 2446 { >> 2447 write_extent_buffer(eb, &val, >> 2448 offsetof(struct btrfs_dev_stats_item, values) + >> 2449 ((unsigned long)ptr) + (index * sizeof(u64)), >> 2450 sizeof(val)); >> 2451 } >> 2452 >> 2453 /* btrfs_qgroup_status_item */ >> 2454 BTRFS_SETGET_FUNCS(qgroup_status_generation, struct btrfs_qgroup_status_item, >> 2455 generation, 64); >> 2456 BTRFS_SETGET_FUNCS(qgroup_status_version, struct btrfs_qgroup_status_item, >> 2457 version, 64); >> 2458 BTRFS_SETGET_FUNCS(qgroup_status_flags, struct btrfs_qgroup_status_item, >> 2459 flags, 64); >> 2460 BTRFS_SETGET_FUNCS(qgroup_status_rescan, struct btrfs_qgroup_status_item, >> 2461 rescan, 64); >> 2462 >> 2463 /* btrfs_qgroup_info_item */ >> 2464 BTRFS_SETGET_FUNCS(qgroup_info_generation, struct btrfs_qgroup_info_item, >> 2465 generation, 64); >> 2466 BTRFS_SETGET_FUNCS(qgroup_info_rfer, struct btrfs_qgroup_info_item, rfer, 64); >> 2467 BTRFS_SETGET_FUNCS(qgroup_info_rfer_cmpr, struct btrfs_qgroup_info_item, >> 2468 rfer_cmpr, 64); >> 2469 BTRFS_SETGET_FUNCS(qgroup_info_excl, struct btrfs_qgroup_info_item, excl, 64); >> 2470 BTRFS_SETGET_FUNCS(qgroup_info_excl_cmpr, struct btrfs_qgroup_info_item, >> 2471 excl_cmpr, 64); >> 2472 >> 2473 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_generation, >> 2474 struct btrfs_qgroup_info_item, generation, 64); >> 2475 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer, struct btrfs_qgroup_info_item, >> 2476 rfer, 64); >> 2477 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_rfer_cmpr, >> 2478 struct btrfs_qgroup_info_item, rfer_cmpr, 64); >> 2479 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl, struct btrfs_qgroup_info_item, >> 2480 excl, 64); >> 2481 BTRFS_SETGET_STACK_FUNCS(stack_qgroup_info_excl_cmpr, >> 2482 struct btrfs_qgroup_info_item, excl_cmpr, 64); >> 2483 >> 2484 /* btrfs_qgroup_limit_item */ >> 2485 BTRFS_SETGET_FUNCS(qgroup_limit_flags, struct btrfs_qgroup_limit_item, >> 2486 flags, 64); >> 2487 BTRFS_SETGET_FUNCS(qgroup_limit_max_rfer, struct btrfs_qgroup_limit_item, >> 2488 max_rfer, 64); >> 2489 BTRFS_SETGET_FUNCS(qgroup_limit_max_excl, struct btrfs_qgroup_limit_item, >> 2490 max_excl, 64); >> 2491 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_rfer, struct btrfs_qgroup_limit_item, >> 2492 rsv_rfer, 64); >> 2493 BTRFS_SETGET_FUNCS(qgroup_limit_rsv_excl, struct btrfs_qgroup_limit_item, >> 2494 rsv_excl, 64); >> 2495 >> 2496 /* btrfs_dev_replace_item */ >> 2497 BTRFS_SETGET_FUNCS(dev_replace_src_devid, >> 2498 struct btrfs_dev_replace_item, src_devid, 64); >> 2499 BTRFS_SETGET_FUNCS(dev_replace_cont_reading_from_srcdev_mode, >> 2500 struct btrfs_dev_replace_item, cont_reading_from_srcdev_mode, >> 2501 64); >> 2502 BTRFS_SETGET_FUNCS(dev_replace_replace_state, struct btrfs_dev_replace_item, >> 2503 replace_state, 64); >> 2504 BTRFS_SETGET_FUNCS(dev_replace_time_started, struct btrfs_dev_replace_item, >> 2505 time_started, 64); >> 2506 BTRFS_SETGET_FUNCS(dev_replace_time_stopped, struct btrfs_dev_replace_item, >> 2507 time_stopped, 64); >> 2508 BTRFS_SETGET_FUNCS(dev_replace_num_write_errors, struct btrfs_dev_replace_item, >> 2509 num_write_errors, 64); >> 2510 BTRFS_SETGET_FUNCS(dev_replace_num_uncorrectable_read_errors, >> 2511 struct btrfs_dev_replace_item, num_uncorrectable_read_errors, >> 2512 64); >> 2513 BTRFS_SETGET_FUNCS(dev_replace_cursor_left, struct btrfs_dev_replace_item, >> 2514 cursor_left, 64); >> 2515 BTRFS_SETGET_FUNCS(dev_replace_cursor_right, struct btrfs_dev_replace_item, >> 2516 cursor_right, 64); >> 2517 >> 2518 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_src_devid, >> 2519 struct btrfs_dev_replace_item, src_devid, 64); >> 2520 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cont_reading_from_srcdev_mode, >> 2521 struct btrfs_dev_replace_item, >> 2522 cont_reading_from_srcdev_mode, 64); >> 2523 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_replace_state, >> 2524 struct btrfs_dev_replace_item, replace_state, 64); >> 2525 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_started, >> 2526 struct btrfs_dev_replace_item, time_started, 64); >> 2527 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_time_stopped, >> 2528 struct btrfs_dev_replace_item, time_stopped, 64); >> 2529 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_write_errors, >> 2530 struct btrfs_dev_replace_item, num_write_errors, 64); >> 2531 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_num_uncorrectable_read_errors, >> 2532 struct btrfs_dev_replace_item, >> 2533 num_uncorrectable_read_errors, 64); >> 2534 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left, >> 2535 struct btrfs_dev_replace_item, cursor_left, 64); >> 2536 BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right, >> 2537 struct btrfs_dev_replace_item, cursor_right, 64); >> 2538 >> 2539 /* helper function to cast into the data area of the leaf. */ >> 2540 #define btrfs_item_ptr(leaf, slot, type) \ >> 2541 ((type *)(btrfs_leaf_data(leaf) + \ >> 2542 btrfs_item_offset_nr(leaf, slot))) >> 2543 >> 2544 #define btrfs_item_ptr_offset(leaf, slot) \ >> 2545 ((unsigned long)(btrfs_leaf_data(leaf) + \ >> 2546 btrfs_item_offset_nr(leaf, slot))) 527 2547 528 /* Compare two keys in a memcmp fashion. */ !! 2548 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) 529 static inline int btrfs_comp_keys(const struct << 530 const struct << 531 { 2549 { 532 struct btrfs_key k1; !! 2550 return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) && >> 2551 (space_info->flags & BTRFS_BLOCK_GROUP_DATA)); >> 2552 } 533 2553 534 btrfs_disk_key_to_cpu(&k1, disk); !! 2554 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) >> 2555 { >> 2556 return mapping_gfp_constraint(mapping, ~__GFP_FS); >> 2557 } >> 2558 >> 2559 /* extent-tree.c */ 535 2560 536 return btrfs_comp_cpu_keys(&k1, k2); !! 2561 u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes); >> 2562 >> 2563 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info, >> 2564 unsigned num_items) >> 2565 { >> 2566 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items; 537 } 2567 } 538 2568 539 #endif !! 2569 /* >> 2570 * Doing a truncate won't result in new nodes or leaves, just what we need for >> 2571 * COW. >> 2572 */ >> 2573 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info, >> 2574 unsigned num_items) >> 2575 { >> 2576 return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items; >> 2577 } >> 2578 >> 2579 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, >> 2580 struct btrfs_fs_info *fs_info); >> 2581 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, >> 2582 struct btrfs_fs_info *fs_info); >> 2583 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, >> 2584 const u64 start); >> 2585 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); >> 2586 bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); >> 2587 void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); >> 2588 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); >> 2589 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); >> 2590 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, >> 2591 struct btrfs_fs_info *fs_info, unsigned long count); >> 2592 int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info, >> 2593 unsigned long count, u64 transid, int wait); >> 2594 int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len); >> 2595 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, >> 2596 struct btrfs_fs_info *fs_info, u64 bytenr, >> 2597 u64 offset, int metadata, u64 *refs, u64 *flags); >> 2598 int btrfs_pin_extent(struct btrfs_fs_info *fs_info, >> 2599 u64 bytenr, u64 num, int reserved); >> 2600 int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info, >> 2601 u64 bytenr, u64 num_bytes); >> 2602 int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info, >> 2603 struct extent_buffer *eb); >> 2604 int btrfs_cross_ref_exist(struct btrfs_root *root, >> 2605 u64 objectid, u64 offset, u64 bytenr); >> 2606 struct btrfs_block_group_cache *btrfs_lookup_block_group( >> 2607 struct btrfs_fs_info *info, >> 2608 u64 bytenr); >> 2609 void btrfs_get_block_group(struct btrfs_block_group_cache *cache); >> 2610 void btrfs_put_block_group(struct btrfs_block_group_cache *cache); >> 2611 int get_block_group_index(struct btrfs_block_group_cache *cache); >> 2612 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, >> 2613 struct btrfs_root *root, >> 2614 u64 parent, u64 root_objectid, >> 2615 const struct btrfs_disk_key *key, >> 2616 int level, u64 hint, >> 2617 u64 empty_size); >> 2618 void btrfs_free_tree_block(struct btrfs_trans_handle *trans, >> 2619 struct btrfs_root *root, >> 2620 struct extent_buffer *buf, >> 2621 u64 parent, int last_ref); >> 2622 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, >> 2623 u64 root_objectid, u64 owner, >> 2624 u64 offset, u64 ram_bytes, >> 2625 struct btrfs_key *ins); >> 2626 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, >> 2627 struct btrfs_fs_info *fs_info, >> 2628 u64 root_objectid, u64 owner, u64 offset, >> 2629 struct btrfs_key *ins); >> 2630 int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes, >> 2631 u64 min_alloc_size, u64 empty_size, u64 hint_byte, >> 2632 struct btrfs_key *ins, int is_data, int delalloc); >> 2633 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, >> 2634 struct extent_buffer *buf, int full_backref); >> 2635 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, >> 2636 struct extent_buffer *buf, int full_backref); >> 2637 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, >> 2638 struct btrfs_fs_info *fs_info, >> 2639 u64 bytenr, u64 num_bytes, u64 flags, >> 2640 int level, int is_data); >> 2641 int btrfs_free_extent(struct btrfs_trans_handle *trans, >> 2642 struct btrfs_fs_info *fs_info, >> 2643 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, >> 2644 u64 owner, u64 offset); >> 2645 >> 2646 int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, >> 2647 u64 start, u64 len, int delalloc); >> 2648 int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info, >> 2649 u64 start, u64 len); >> 2650 void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info); >> 2651 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, >> 2652 struct btrfs_fs_info *fs_info); >> 2653 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, >> 2654 struct btrfs_fs_info *fs_info, >> 2655 u64 bytenr, u64 num_bytes, u64 parent, >> 2656 u64 root_objectid, u64 owner, u64 offset); >> 2657 >> 2658 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, >> 2659 struct btrfs_fs_info *fs_info); >> 2660 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, >> 2661 struct btrfs_fs_info *fs_info); >> 2662 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans, >> 2663 struct btrfs_fs_info *fs_info); >> 2664 int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); >> 2665 int btrfs_free_block_groups(struct btrfs_fs_info *info); >> 2666 int btrfs_read_block_groups(struct btrfs_fs_info *info); >> 2667 int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr); >> 2668 int btrfs_make_block_group(struct btrfs_trans_handle *trans, >> 2669 struct btrfs_fs_info *fs_info, u64 bytes_used, >> 2670 u64 type, u64 chunk_objectid, u64 chunk_offset, >> 2671 u64 size); >> 2672 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( >> 2673 struct btrfs_fs_info *fs_info, >> 2674 const u64 chunk_offset); >> 2675 int btrfs_remove_block_group(struct btrfs_trans_handle *trans, >> 2676 struct btrfs_fs_info *fs_info, u64 group_start, >> 2677 struct extent_map *em); >> 2678 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); >> 2679 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); >> 2680 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); >> 2681 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, >> 2682 struct btrfs_fs_info *fs_info); >> 2683 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); >> 2684 void btrfs_clear_space_info_full(struct btrfs_fs_info *info); >> 2685 >> 2686 enum btrfs_reserve_flush_enum { >> 2687 /* If we are in the transaction, we can't flush anything.*/ >> 2688 BTRFS_RESERVE_NO_FLUSH, >> 2689 /* >> 2690 * Flushing delalloc may cause deadlock somewhere, in this >> 2691 * case, use FLUSH LIMIT >> 2692 */ >> 2693 BTRFS_RESERVE_FLUSH_LIMIT, >> 2694 BTRFS_RESERVE_FLUSH_ALL, >> 2695 }; >> 2696 >> 2697 enum btrfs_flush_state { >> 2698 FLUSH_DELAYED_ITEMS_NR = 1, >> 2699 FLUSH_DELAYED_ITEMS = 2, >> 2700 FLUSH_DELALLOC = 3, >> 2701 FLUSH_DELALLOC_WAIT = 4, >> 2702 ALLOC_CHUNK = 5, >> 2703 COMMIT_TRANS = 6, >> 2704 }; 540 2705 >> 2706 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len); >> 2707 int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes); >> 2708 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len); >> 2709 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, >> 2710 u64 len); >> 2711 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, >> 2712 struct btrfs_fs_info *fs_info); >> 2713 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); >> 2714 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, >> 2715 struct btrfs_inode *inode); >> 2716 void btrfs_orphan_release_metadata(struct btrfs_inode *inode); >> 2717 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, >> 2718 struct btrfs_block_rsv *rsv, >> 2719 int nitems, >> 2720 u64 *qgroup_reserved, bool use_global_rsv); >> 2721 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, >> 2722 struct btrfs_block_rsv *rsv); >> 2723 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); >> 2724 void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes); >> 2725 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len); >> 2726 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len); >> 2727 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); >> 2728 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info, >> 2729 unsigned short type); >> 2730 void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info, >> 2731 struct btrfs_block_rsv *rsv); >> 2732 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv); >> 2733 int btrfs_block_rsv_add(struct btrfs_root *root, >> 2734 struct btrfs_block_rsv *block_rsv, u64 num_bytes, >> 2735 enum btrfs_reserve_flush_enum flush); >> 2736 int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor); >> 2737 int btrfs_block_rsv_refill(struct btrfs_root *root, >> 2738 struct btrfs_block_rsv *block_rsv, u64 min_reserved, >> 2739 enum btrfs_reserve_flush_enum flush); >> 2740 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, >> 2741 struct btrfs_block_rsv *dst_rsv, u64 num_bytes, >> 2742 int update_size); >> 2743 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info, >> 2744 struct btrfs_block_rsv *dest, u64 num_bytes, >> 2745 int min_factor); >> 2746 void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, >> 2747 struct btrfs_block_rsv *block_rsv, >> 2748 u64 num_bytes); >> 2749 int btrfs_inc_block_group_ro(struct btrfs_fs_info *fs_info, >> 2750 struct btrfs_block_group_cache *cache); >> 2751 void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); >> 2752 void btrfs_put_block_group_cache(struct btrfs_fs_info *info); >> 2753 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); >> 2754 int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, >> 2755 u64 start, u64 end); >> 2756 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, >> 2757 u64 num_bytes, u64 *actual_bytes); >> 2758 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, >> 2759 struct btrfs_fs_info *fs_info, u64 type); >> 2760 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range); >> 2761 >> 2762 int btrfs_init_space_info(struct btrfs_fs_info *fs_info); >> 2763 int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, >> 2764 struct btrfs_fs_info *fs_info); >> 2765 int __get_raid_index(u64 flags); >> 2766 int btrfs_start_write_no_snapshoting(struct btrfs_root *root); >> 2767 void btrfs_end_write_no_snapshoting(struct btrfs_root *root); >> 2768 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); >> 2769 void check_system_chunk(struct btrfs_trans_handle *trans, >> 2770 struct btrfs_fs_info *fs_info, const u64 type); >> 2771 u64 add_new_free_space(struct btrfs_block_group_cache *block_group, >> 2772 struct btrfs_fs_info *info, u64 start, u64 end); >> 2773 >> 2774 /* ctree.c */ >> 2775 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, >> 2776 int level, int *slot); >> 2777 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); 541 int btrfs_previous_item(struct btrfs_root *roo 2778 int btrfs_previous_item(struct btrfs_root *root, 542 struct btrfs_path *pat 2779 struct btrfs_path *path, u64 min_objectid, 543 int type); 2780 int type); 544 int btrfs_previous_extent_item(struct btrfs_ro 2781 int btrfs_previous_extent_item(struct btrfs_root *root, 545 struct btrfs_path *pat 2782 struct btrfs_path *path, u64 min_objectid); 546 void btrfs_set_item_key_safe(struct btrfs_tran !! 2783 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info, 547 const struct btrf !! 2784 struct btrfs_path *path, 548 const struct btrf 2785 const struct btrfs_key *new_key); 549 struct extent_buffer *btrfs_root_node(struct b 2786 struct extent_buffer *btrfs_root_node(struct btrfs_root *root); >> 2787 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 550 int btrfs_find_next_key(struct btrfs_root *roo 2788 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path, 551 struct btrfs_key *key, 2789 struct btrfs_key *key, int lowest_level, 552 u64 min_trans); 2790 u64 min_trans); 553 int btrfs_search_forward(struct btrfs_root *ro 2791 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, 554 struct btrfs_path *pa 2792 struct btrfs_path *path, 555 u64 min_trans); 2793 u64 min_trans); 556 struct extent_buffer *btrfs_read_node_slot(str !! 2794 enum btrfs_compare_tree_result { 557 int !! 2795 BTRFS_COMPARE_TREE_NEW, 558 !! 2796 BTRFS_COMPARE_TREE_DELETED, >> 2797 BTRFS_COMPARE_TREE_CHANGED, >> 2798 BTRFS_COMPARE_TREE_SAME, >> 2799 }; >> 2800 typedef int (*btrfs_changed_cb_t)(struct btrfs_root *left_root, >> 2801 struct btrfs_root *right_root, >> 2802 struct btrfs_path *left_path, >> 2803 struct btrfs_path *right_path, >> 2804 struct btrfs_key *key, >> 2805 enum btrfs_compare_tree_result result, >> 2806 void *ctx); >> 2807 int btrfs_compare_trees(struct btrfs_root *left_root, >> 2808 struct btrfs_root *right_root, >> 2809 btrfs_changed_cb_t cb, void *ctx); 559 int btrfs_cow_block(struct btrfs_trans_handle 2810 int btrfs_cow_block(struct btrfs_trans_handle *trans, 560 struct btrfs_root *root, s 2811 struct btrfs_root *root, struct extent_buffer *buf, 561 struct extent_buffer *pare 2812 struct extent_buffer *parent, int parent_slot, 562 struct extent_buffer **cow !! 2813 struct extent_buffer **cow_ret); 563 enum btrfs_lock_nesting ne << 564 int btrfs_force_cow_block(struct btrfs_trans_h << 565 struct btrfs_root *r << 566 struct extent_buffer << 567 struct extent_buffer << 568 struct extent_buffer << 569 u64 search_start, u6 << 570 enum btrfs_lock_nest << 571 int btrfs_copy_root(struct btrfs_trans_handle 2814 int btrfs_copy_root(struct btrfs_trans_handle *trans, 572 struct btrfs_root *root, 2815 struct btrfs_root *root, 573 struct extent_buffer *bu 2816 struct extent_buffer *buf, 574 struct extent_buffer **c 2817 struct extent_buffer **cow_ret, u64 new_root_objectid); 575 bool btrfs_block_can_be_shared(struct btrfs_tr !! 2818 int btrfs_block_can_be_shared(struct btrfs_root *root, 576 struct btrfs_ro !! 2819 struct extent_buffer *buf); 577 struct extent_b !! 2820 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path, 578 int btrfs_del_ptr(struct btrfs_trans_handle *t !! 2821 u32 data_size); 579 struct btrfs_path *path, int !! 2822 void btrfs_truncate_item(struct btrfs_fs_info *fs_info, 580 void btrfs_extend_item(struct btrfs_trans_hand !! 2823 struct btrfs_path *path, u32 new_size, int from_end); 581 const struct btrfs_path << 582 void btrfs_truncate_item(struct btrfs_trans_ha << 583 const struct btrfs_pa << 584 int btrfs_split_item(struct btrfs_trans_handle 2824 int btrfs_split_item(struct btrfs_trans_handle *trans, 585 struct btrfs_root *root, 2825 struct btrfs_root *root, 586 struct btrfs_path *path, 2826 struct btrfs_path *path, 587 const struct btrfs_key *n 2827 const struct btrfs_key *new_key, 588 unsigned long split_offse 2828 unsigned long split_offset); 589 int btrfs_duplicate_item(struct btrfs_trans_ha 2829 int btrfs_duplicate_item(struct btrfs_trans_handle *trans, 590 struct btrfs_root *ro 2830 struct btrfs_root *root, 591 struct btrfs_path *pa 2831 struct btrfs_path *path, 592 const struct btrfs_ke 2832 const struct btrfs_key *new_key); 593 int btrfs_find_item(struct btrfs_root *fs_root 2833 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, 594 u64 inum, u64 ioff, u8 key_typ 2834 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key); 595 int btrfs_search_slot(struct btrfs_trans_handl 2835 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, 596 const struct btrfs_key * 2836 const struct btrfs_key *key, struct btrfs_path *p, 597 int ins_len, int cow); 2837 int ins_len, int cow); 598 int btrfs_search_old_slot(struct btrfs_root *r 2838 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, 599 struct btrfs_path *p 2839 struct btrfs_path *p, u64 time_seq); 600 int btrfs_search_slot_for_read(struct btrfs_ro 2840 int btrfs_search_slot_for_read(struct btrfs_root *root, 601 const struct bt 2841 const struct btrfs_key *key, 602 struct btrfs_pa 2842 struct btrfs_path *p, int find_higher, 603 int return_any) 2843 int return_any); >> 2844 int btrfs_realloc_node(struct btrfs_trans_handle *trans, >> 2845 struct btrfs_root *root, struct extent_buffer *parent, >> 2846 int start_slot, u64 *last_ret, >> 2847 struct btrfs_key *progress); 604 void btrfs_release_path(struct btrfs_path *p); 2848 void btrfs_release_path(struct btrfs_path *p); 605 struct btrfs_path *btrfs_alloc_path(void); 2849 struct btrfs_path *btrfs_alloc_path(void); 606 void btrfs_free_path(struct btrfs_path *p); 2850 void btrfs_free_path(struct btrfs_path *p); 607 DEFINE_FREE(btrfs_free_path, struct btrfs_path !! 2851 void btrfs_set_path_blocking(struct btrfs_path *p); >> 2852 void btrfs_clear_path_blocking(struct btrfs_path *p, >> 2853 struct extent_buffer *held, int held_rw); >> 2854 void btrfs_unlock_up_safe(struct btrfs_path *p, int level); 608 2855 609 int btrfs_del_items(struct btrfs_trans_handle 2856 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, 610 struct btrfs_path *path, in 2857 struct btrfs_path *path, int slot, int nr); 611 static inline int btrfs_del_item(struct btrfs_ 2858 static inline int btrfs_del_item(struct btrfs_trans_handle *trans, 612 struct btrfs_ 2859 struct btrfs_root *root, 613 struct btrfs_ 2860 struct btrfs_path *path) 614 { 2861 { 615 return btrfs_del_items(trans, root, pa 2862 return btrfs_del_items(trans, root, path, path->slots[0], 1); 616 } 2863 } 617 2864 618 /* !! 2865 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path, 619 * Describes a batch of items to insert in a b !! 2866 const struct btrfs_key *cpu_key, u32 *data_size, 620 * btrfs_insert_empty_items(). !! 2867 u32 total_data, u32 total_size, int nr); 621 */ << 622 struct btrfs_item_batch { << 623 /* << 624 * Pointer to an array containing the << 625 * sorted order). << 626 */ << 627 const struct btrfs_key *keys; << 628 /* Pointer to an array containing the << 629 const u32 *data_sizes; << 630 /* << 631 * The sum of data sizes for all items << 632 * setting up the data_sizes array, so << 633 * than having btrfs_insert_empty_item << 634 * doing it, as it would avoid an extr << 635 * array, and in the case of setup_ite << 636 * it while holding a write lock on a << 637 * too, unnecessarily increasing the s << 638 */ << 639 u32 total_data_size; << 640 /* Size of the keys and data_sizes arr << 641 int nr; << 642 }; << 643 << 644 void btrfs_setup_item_for_insert(struct btrfs_ << 645 struct btrfs_ << 646 struct btrfs_ << 647 const struct << 648 u32 data_size << 649 int btrfs_insert_item(struct btrfs_trans_handl 2868 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, 650 const struct btrfs_key * 2869 const struct btrfs_key *key, void *data, u32 data_size); 651 int btrfs_insert_empty_items(struct btrfs_tran 2870 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, 652 struct btrfs_root 2871 struct btrfs_root *root, 653 struct btrfs_path 2872 struct btrfs_path *path, 654 const struct btrf !! 2873 const struct btrfs_key *cpu_key, u32 *data_size, >> 2874 int nr); 655 2875 656 static inline int btrfs_insert_empty_item(stru 2876 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, 657 stru 2877 struct btrfs_root *root, 658 stru 2878 struct btrfs_path *path, 659 cons 2879 const struct btrfs_key *key, 660 u32 2880 u32 data_size) 661 { 2881 { 662 struct btrfs_item_batch batch; !! 2882 return btrfs_insert_empty_items(trans, root, path, key, &data_size, 1); 663 << 664 batch.keys = key; << 665 batch.data_sizes = &data_size; << 666 batch.total_data_size = data_size; << 667 batch.nr = 1; << 668 << 669 return btrfs_insert_empty_items(trans, << 670 } 2883 } 671 2884 >> 2885 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); >> 2886 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 672 int btrfs_next_old_leaf(struct btrfs_root *roo 2887 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, 673 u64 time_seq); 2888 u64 time_seq); >> 2889 static inline int btrfs_next_old_item(struct btrfs_root *root, >> 2890 struct btrfs_path *p, u64 time_seq) >> 2891 { >> 2892 ++p->slots[0]; >> 2893 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) >> 2894 return btrfs_next_old_leaf(root, p, time_seq); >> 2895 return 0; >> 2896 } >> 2897 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) >> 2898 { >> 2899 return btrfs_next_old_item(root, p, 0); >> 2900 } >> 2901 int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info, >> 2902 struct extent_buffer *leaf); >> 2903 int __must_check btrfs_drop_snapshot(struct btrfs_root *root, >> 2904 struct btrfs_block_rsv *block_rsv, >> 2905 int update_ref, int for_reloc); >> 2906 int btrfs_drop_subtree(struct btrfs_trans_handle *trans, >> 2907 struct btrfs_root *root, >> 2908 struct extent_buffer *node, >> 2909 struct extent_buffer *parent); >> 2910 static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) >> 2911 { >> 2912 /* >> 2913 * Do it this way so we only ever do one test_bit in the normal case. >> 2914 */ >> 2915 if (test_bit(BTRFS_FS_CLOSING_START, &fs_info->flags)) { >> 2916 if (test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)) >> 2917 return 2; >> 2918 return 1; >> 2919 } >> 2920 return 0; >> 2921 } >> 2922 >> 2923 /* >> 2924 * If we remount the fs to be R/O or umount the fs, the cleaner needn't do >> 2925 * anything except sleeping. This function is used to check the status of >> 2926 * the fs. >> 2927 */ >> 2928 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info) >> 2929 { >> 2930 return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info); >> 2931 } >> 2932 >> 2933 static inline void free_fs_info(struct btrfs_fs_info *fs_info) >> 2934 { >> 2935 kfree(fs_info->balance_ctl); >> 2936 kfree(fs_info->delayed_root); >> 2937 kfree(fs_info->extent_root); >> 2938 kfree(fs_info->tree_root); >> 2939 kfree(fs_info->chunk_root); >> 2940 kfree(fs_info->dev_root); >> 2941 kfree(fs_info->csum_root); >> 2942 kfree(fs_info->quota_root); >> 2943 kfree(fs_info->uuid_root); >> 2944 kfree(fs_info->free_space_root); >> 2945 kfree(fs_info->super_copy); >> 2946 kfree(fs_info->super_for_commit); >> 2947 security_free_mnt_opts(&fs_info->security_opts); >> 2948 kfree(fs_info); >> 2949 } >> 2950 >> 2951 /* tree mod log functions from ctree.c */ >> 2952 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, >> 2953 struct seq_list *elem); >> 2954 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, >> 2955 struct seq_list *elem); >> 2956 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq); >> 2957 >> 2958 /* root-item.c */ >> 2959 int btrfs_add_root_ref(struct btrfs_trans_handle *trans, >> 2960 struct btrfs_fs_info *fs_info, >> 2961 u64 root_id, u64 ref_id, u64 dirid, u64 sequence, >> 2962 const char *name, int name_len); >> 2963 int btrfs_del_root_ref(struct btrfs_trans_handle *trans, >> 2964 struct btrfs_fs_info *fs_info, >> 2965 u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, >> 2966 const char *name, int name_len); >> 2967 int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, >> 2968 const struct btrfs_key *key); >> 2969 int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, >> 2970 const struct btrfs_key *key, >> 2971 struct btrfs_root_item *item); >> 2972 int __must_check btrfs_update_root(struct btrfs_trans_handle *trans, >> 2973 struct btrfs_root *root, >> 2974 struct btrfs_key *key, >> 2975 struct btrfs_root_item *item); >> 2976 int btrfs_find_root(struct btrfs_root *root, const struct btrfs_key *search_key, >> 2977 struct btrfs_path *path, struct btrfs_root_item *root_item, >> 2978 struct btrfs_key *root_key); >> 2979 int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info); >> 2980 void btrfs_set_root_node(struct btrfs_root_item *item, >> 2981 struct extent_buffer *node); >> 2982 void btrfs_check_and_init_root_item(struct btrfs_root_item *item); >> 2983 void btrfs_update_root_times(struct btrfs_trans_handle *trans, >> 2984 struct btrfs_root *root); >> 2985 >> 2986 /* uuid-tree.c */ >> 2987 int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, >> 2988 struct btrfs_fs_info *fs_info, u8 *uuid, u8 type, >> 2989 u64 subid); >> 2990 int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans, >> 2991 struct btrfs_fs_info *fs_info, u8 *uuid, u8 type, >> 2992 u64 subid); >> 2993 int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, >> 2994 int (*check_func)(struct btrfs_fs_info *, u8 *, u8, >> 2995 u64)); >> 2996 >> 2997 /* dir-item.c */ >> 2998 int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, >> 2999 const char *name, int name_len); >> 3000 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, >> 3001 struct btrfs_root *root, const char *name, >> 3002 int name_len, struct btrfs_inode *dir, >> 3003 struct btrfs_key *location, u8 type, u64 index); >> 3004 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, >> 3005 struct btrfs_root *root, >> 3006 struct btrfs_path *path, u64 dir, >> 3007 const char *name, int name_len, >> 3008 int mod); >> 3009 struct btrfs_dir_item * >> 3010 btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans, >> 3011 struct btrfs_root *root, >> 3012 struct btrfs_path *path, u64 dir, >> 3013 u64 objectid, const char *name, int name_len, >> 3014 int mod); >> 3015 struct btrfs_dir_item * >> 3016 btrfs_search_dir_index_item(struct btrfs_root *root, >> 3017 struct btrfs_path *path, u64 dirid, >> 3018 const char *name, int name_len); >> 3019 int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, >> 3020 struct btrfs_root *root, >> 3021 struct btrfs_path *path, >> 3022 struct btrfs_dir_item *di); >> 3023 int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, >> 3024 struct btrfs_root *root, >> 3025 struct btrfs_path *path, u64 objectid, >> 3026 const char *name, u16 name_len, >> 3027 const void *data, u16 data_len); >> 3028 struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans, >> 3029 struct btrfs_root *root, >> 3030 struct btrfs_path *path, u64 dir, >> 3031 const char *name, u16 name_len, >> 3032 int mod); >> 3033 int verify_dir_item(struct btrfs_fs_info *fs_info, >> 3034 struct extent_buffer *leaf, >> 3035 struct btrfs_dir_item *dir_item); >> 3036 struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info, >> 3037 struct btrfs_path *path, >> 3038 const char *name, >> 3039 int name_len); >> 3040 >> 3041 /* orphan.c */ >> 3042 int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, >> 3043 struct btrfs_root *root, u64 offset); >> 3044 int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, >> 3045 struct btrfs_root *root, u64 offset); >> 3046 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); >> 3047 >> 3048 /* inode-item.c */ >> 3049 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, >> 3050 struct btrfs_root *root, >> 3051 const char *name, int name_len, >> 3052 u64 inode_objectid, u64 ref_objectid, u64 index); >> 3053 int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, >> 3054 struct btrfs_root *root, >> 3055 const char *name, int name_len, >> 3056 u64 inode_objectid, u64 ref_objectid, u64 *index); >> 3057 int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, >> 3058 struct btrfs_root *root, >> 3059 struct btrfs_path *path, u64 objectid); >> 3060 int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root >> 3061 *root, struct btrfs_path *path, >> 3062 struct btrfs_key *location, int mod); >> 3063 >> 3064 struct btrfs_inode_extref * >> 3065 btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans, >> 3066 struct btrfs_root *root, >> 3067 struct btrfs_path *path, >> 3068 const char *name, int name_len, >> 3069 u64 inode_objectid, u64 ref_objectid, int ins_len, >> 3070 int cow); >> 3071 >> 3072 int btrfs_find_name_in_ext_backref(struct btrfs_path *path, >> 3073 u64 ref_objectid, const char *name, >> 3074 int name_len, >> 3075 struct btrfs_inode_extref **extref_ret); >> 3076 >> 3077 /* file-item.c */ >> 3078 struct btrfs_dio_private; >> 3079 int btrfs_del_csums(struct btrfs_trans_handle *trans, >> 3080 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len); >> 3081 int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst); >> 3082 int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, >> 3083 u64 logical_offset); >> 3084 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, >> 3085 struct btrfs_root *root, >> 3086 u64 objectid, u64 pos, >> 3087 u64 disk_offset, u64 disk_num_bytes, >> 3088 u64 num_bytes, u64 offset, u64 ram_bytes, >> 3089 u8 compression, u8 encryption, u16 other_encoding); >> 3090 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans, >> 3091 struct btrfs_root *root, >> 3092 struct btrfs_path *path, u64 objectid, >> 3093 u64 bytenr, int mod); >> 3094 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, >> 3095 struct btrfs_root *root, >> 3096 struct btrfs_ordered_sum *sums); >> 3097 int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, >> 3098 u64 file_start, int contig); >> 3099 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, >> 3100 struct list_head *list, int search_commit); >> 3101 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, >> 3102 const struct btrfs_path *path, >> 3103 struct btrfs_file_extent_item *fi, >> 3104 const bool new_inline, >> 3105 struct extent_map *em); >> 3106 >> 3107 /* inode.c */ >> 3108 struct btrfs_delalloc_work { >> 3109 struct inode *inode; >> 3110 int delay_iput; >> 3111 struct completion completion; >> 3112 struct list_head list; >> 3113 struct btrfs_work work; >> 3114 }; 674 3115 675 int btrfs_search_backwards(struct btrfs_root * !! 3116 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, 676 struct btrfs_path * !! 3117 int delay_iput); >> 3118 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); >> 3119 >> 3120 struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, >> 3121 struct page *page, size_t pg_offset, u64 start, >> 3122 u64 len, int create); >> 3123 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, >> 3124 u64 *orig_start, u64 *orig_block_len, >> 3125 u64 *ram_bytes); >> 3126 >> 3127 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ >> 3128 #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) >> 3129 #define ClearPageChecked ClearPageFsMisc >> 3130 #define SetPageChecked SetPageFsMisc >> 3131 #define PageChecked PageFsMisc >> 3132 #endif 677 3133 678 int btrfs_get_next_valid_item(struct btrfs_roo !! 3134 /* This forces readahead on a given range of bytes in an inode */ 679 struct btrfs_pat !! 3135 static inline void btrfs_force_ra(struct address_space *mapping, >> 3136 struct file_ra_state *ra, struct file *file, >> 3137 pgoff_t offset, unsigned long req_size) >> 3138 { >> 3139 page_cache_sync_readahead(mapping, ra, file, offset, req_size); >> 3140 } >> 3141 >> 3142 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); >> 3143 int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); >> 3144 int btrfs_unlink_inode(struct btrfs_trans_handle *trans, >> 3145 struct btrfs_root *root, >> 3146 struct btrfs_inode *dir, struct btrfs_inode *inode, >> 3147 const char *name, int name_len); >> 3148 int btrfs_add_link(struct btrfs_trans_handle *trans, >> 3149 struct btrfs_inode *parent_inode, struct btrfs_inode *inode, >> 3150 const char *name, int name_len, int add_backref, u64 index); >> 3151 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, >> 3152 struct btrfs_root *root, >> 3153 struct inode *dir, u64 objectid, >> 3154 const char *name, int name_len); >> 3155 int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, >> 3156 int front); >> 3157 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, >> 3158 struct btrfs_root *root, >> 3159 struct inode *inode, u64 new_size, >> 3160 u32 min_type); >> 3161 >> 3162 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); >> 3163 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput, >> 3164 int nr); >> 3165 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, >> 3166 struct extent_state **cached_state, int dedupe); >> 3167 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, >> 3168 struct btrfs_root *new_root, >> 3169 struct btrfs_root *parent_root, >> 3170 u64 new_dirid); >> 3171 int btrfs_merge_bio_hook(struct page *page, unsigned long offset, >> 3172 size_t size, struct bio *bio, >> 3173 unsigned long bio_flags); >> 3174 int btrfs_page_mkwrite(struct vm_fault *vmf); >> 3175 int btrfs_readpage(struct file *file, struct page *page); >> 3176 void btrfs_evict_inode(struct inode *inode); >> 3177 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); >> 3178 struct inode *btrfs_alloc_inode(struct super_block *sb); >> 3179 void btrfs_destroy_inode(struct inode *inode); >> 3180 int btrfs_drop_inode(struct inode *inode); >> 3181 int btrfs_init_cachep(void); >> 3182 void btrfs_destroy_cachep(void); >> 3183 long btrfs_ioctl_trans_end(struct file *file); >> 3184 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, >> 3185 struct btrfs_root *root, int *was_new); >> 3186 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, >> 3187 struct page *page, size_t pg_offset, >> 3188 u64 start, u64 end, int create); >> 3189 int btrfs_update_inode(struct btrfs_trans_handle *trans, >> 3190 struct btrfs_root *root, >> 3191 struct inode *inode); >> 3192 int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, >> 3193 struct btrfs_root *root, struct inode *inode); >> 3194 int btrfs_orphan_add(struct btrfs_trans_handle *trans, >> 3195 struct btrfs_inode *inode); >> 3196 int btrfs_orphan_cleanup(struct btrfs_root *root); >> 3197 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, >> 3198 struct btrfs_root *root); >> 3199 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size); >> 3200 void btrfs_invalidate_inodes(struct btrfs_root *root); >> 3201 void btrfs_add_delayed_iput(struct inode *inode); >> 3202 void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info); >> 3203 int btrfs_prealloc_file_range(struct inode *inode, int mode, >> 3204 u64 start, u64 num_bytes, u64 min_size, >> 3205 loff_t actual_len, u64 *alloc_hint); >> 3206 int btrfs_prealloc_file_range_trans(struct inode *inode, >> 3207 struct btrfs_trans_handle *trans, int mode, >> 3208 u64 start, u64 num_bytes, u64 min_size, >> 3209 loff_t actual_len, u64 *alloc_hint); >> 3210 extern const struct dentry_operations btrfs_dentry_operations; >> 3211 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS >> 3212 void btrfs_test_inode_set_ops(struct inode *inode); >> 3213 #endif >> 3214 >> 3215 /* ioctl.c */ >> 3216 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); >> 3217 long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); >> 3218 int btrfs_ioctl_get_supported_features(void __user *arg); >> 3219 void btrfs_update_iflags(struct inode *inode); >> 3220 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); >> 3221 int btrfs_is_empty_uuid(u8 *uuid); >> 3222 int btrfs_defrag_file(struct inode *inode, struct file *file, >> 3223 struct btrfs_ioctl_defrag_range_args *range, >> 3224 u64 newer_than, unsigned long max_pages); >> 3225 void btrfs_get_block_group_info(struct list_head *groups_list, >> 3226 struct btrfs_ioctl_space_info *space); >> 3227 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, >> 3228 struct btrfs_ioctl_balance_args *bargs); >> 3229 ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, >> 3230 struct file *dst_file, u64 dst_loff); >> 3231 >> 3232 /* file.c */ >> 3233 int btrfs_auto_defrag_init(void); >> 3234 void btrfs_auto_defrag_exit(void); >> 3235 int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, >> 3236 struct btrfs_inode *inode); >> 3237 int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); >> 3238 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); >> 3239 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); >> 3240 void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, >> 3241 int skip_pinned); >> 3242 extern const struct file_operations btrfs_file_operations; >> 3243 int __btrfs_drop_extents(struct btrfs_trans_handle *trans, >> 3244 struct btrfs_root *root, struct inode *inode, >> 3245 struct btrfs_path *path, u64 start, u64 end, >> 3246 u64 *drop_end, int drop_cache, >> 3247 int replace_extent, >> 3248 u32 extent_item_size, >> 3249 int *key_inserted); >> 3250 int btrfs_drop_extents(struct btrfs_trans_handle *trans, >> 3251 struct btrfs_root *root, struct inode *inode, u64 start, >> 3252 u64 end, int drop_cache); >> 3253 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, >> 3254 struct btrfs_inode *inode, u64 start, u64 end); >> 3255 int btrfs_release_file(struct inode *inode, struct file *file); >> 3256 int btrfs_dirty_pages(struct inode *inode, struct page **pages, >> 3257 size_t num_pages, loff_t pos, size_t write_bytes, >> 3258 struct extent_state **cached); >> 3259 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end); >> 3260 int btrfs_clone_file_range(struct file *file_in, loff_t pos_in, >> 3261 struct file *file_out, loff_t pos_out, u64 len); >> 3262 >> 3263 /* tree-defrag.c */ >> 3264 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, >> 3265 struct btrfs_root *root); >> 3266 >> 3267 /* sysfs.c */ >> 3268 int btrfs_init_sysfs(void); >> 3269 void btrfs_exit_sysfs(void); >> 3270 int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info); >> 3271 void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info); >> 3272 >> 3273 /* xattr.c */ >> 3274 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); >> 3275 >> 3276 /* super.c */ >> 3277 int btrfs_parse_options(struct btrfs_fs_info *info, char *options, >> 3278 unsigned long new_flags); >> 3279 int btrfs_sync_fs(struct super_block *sb, int wait); >> 3280 >> 3281 static inline __printf(2, 3) >> 3282 void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...) >> 3283 { >> 3284 } >> 3285 >> 3286 #ifdef CONFIG_PRINTK >> 3287 __printf(2, 3) >> 3288 void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...); >> 3289 #else >> 3290 #define btrfs_printk(fs_info, fmt, args...) \ >> 3291 btrfs_no_printk(fs_info, fmt, ##args) >> 3292 #endif >> 3293 >> 3294 #define btrfs_emerg(fs_info, fmt, args...) \ >> 3295 btrfs_printk(fs_info, KERN_EMERG fmt, ##args) >> 3296 #define btrfs_alert(fs_info, fmt, args...) \ >> 3297 btrfs_printk(fs_info, KERN_ALERT fmt, ##args) >> 3298 #define btrfs_crit(fs_info, fmt, args...) \ >> 3299 btrfs_printk(fs_info, KERN_CRIT fmt, ##args) >> 3300 #define btrfs_err(fs_info, fmt, args...) \ >> 3301 btrfs_printk(fs_info, KERN_ERR fmt, ##args) >> 3302 #define btrfs_warn(fs_info, fmt, args...) \ >> 3303 btrfs_printk(fs_info, KERN_WARNING fmt, ##args) >> 3304 #define btrfs_notice(fs_info, fmt, args...) \ >> 3305 btrfs_printk(fs_info, KERN_NOTICE fmt, ##args) >> 3306 #define btrfs_info(fs_info, fmt, args...) \ >> 3307 btrfs_printk(fs_info, KERN_INFO fmt, ##args) 680 3308 681 /* 3309 /* 682 * Search in @root for a given @key, and store !! 3310 * Wrappers that use printk_in_rcu 683 * << 684 * @root: The root node of the tree. << 685 * @key: The key we are looking for. << 686 * @found_key: Will hold the found item. << 687 * @path: Holds the current slot/leaf. << 688 * @iter_ret: Contains the value returned fr << 689 * btrfs_get_next_valid_item, whi << 690 * << 691 * The @iter_ret is an output variable that wi << 692 * btrfs_search_slot, if it encountered an err << 693 * btrfs_get_next_valid_item otherwise. That r << 694 * slot was found, 1 if there were no more lea << 695 * << 696 * It's recommended to use a separate variable << 697 * set the function return value so there's no << 698 * values stemming from btrfs_search_slot. << 699 */ 3311 */ 700 #define btrfs_for_each_slot(root, key, found_k !! 3312 #define btrfs_emerg_in_rcu(fs_info, fmt, args...) \ 701 for (iter_ret = btrfs_search_slot(NULL !! 3313 btrfs_printk_in_rcu(fs_info, KERN_EMERG fmt, ##args) 702 (iter_ret) >= 0 && !! 3314 #define btrfs_alert_in_rcu(fs_info, fmt, args...) \ 703 (iter_ret = btrfs_get_next_val !! 3315 btrfs_printk_in_rcu(fs_info, KERN_ALERT fmt, ##args) 704 (path)->slots[0]++ !! 3316 #define btrfs_crit_in_rcu(fs_info, fmt, args...) \ 705 ) !! 3317 btrfs_printk_in_rcu(fs_info, KERN_CRIT fmt, ##args) >> 3318 #define btrfs_err_in_rcu(fs_info, fmt, args...) \ >> 3319 btrfs_printk_in_rcu(fs_info, KERN_ERR fmt, ##args) >> 3320 #define btrfs_warn_in_rcu(fs_info, fmt, args...) \ >> 3321 btrfs_printk_in_rcu(fs_info, KERN_WARNING fmt, ##args) >> 3322 #define btrfs_notice_in_rcu(fs_info, fmt, args...) \ >> 3323 btrfs_printk_in_rcu(fs_info, KERN_NOTICE fmt, ##args) >> 3324 #define btrfs_info_in_rcu(fs_info, fmt, args...) \ >> 3325 btrfs_printk_in_rcu(fs_info, KERN_INFO fmt, ##args) 706 3326 707 int btrfs_next_old_item(struct btrfs_root *roo !! 3327 /* >> 3328 * Wrappers that use a ratelimited printk_in_rcu >> 3329 */ >> 3330 #define btrfs_emerg_rl_in_rcu(fs_info, fmt, args...) \ >> 3331 btrfs_printk_rl_in_rcu(fs_info, KERN_EMERG fmt, ##args) >> 3332 #define btrfs_alert_rl_in_rcu(fs_info, fmt, args...) \ >> 3333 btrfs_printk_rl_in_rcu(fs_info, KERN_ALERT fmt, ##args) >> 3334 #define btrfs_crit_rl_in_rcu(fs_info, fmt, args...) \ >> 3335 btrfs_printk_rl_in_rcu(fs_info, KERN_CRIT fmt, ##args) >> 3336 #define btrfs_err_rl_in_rcu(fs_info, fmt, args...) \ >> 3337 btrfs_printk_rl_in_rcu(fs_info, KERN_ERR fmt, ##args) >> 3338 #define btrfs_warn_rl_in_rcu(fs_info, fmt, args...) \ >> 3339 btrfs_printk_rl_in_rcu(fs_info, KERN_WARNING fmt, ##args) >> 3340 #define btrfs_notice_rl_in_rcu(fs_info, fmt, args...) \ >> 3341 btrfs_printk_rl_in_rcu(fs_info, KERN_NOTICE fmt, ##args) >> 3342 #define btrfs_info_rl_in_rcu(fs_info, fmt, args...) \ >> 3343 btrfs_printk_rl_in_rcu(fs_info, KERN_INFO fmt, ##args) 708 3344 709 /* 3345 /* 710 * Search the tree again to find a leaf with g !! 3346 * Wrappers that use a ratelimited printk 711 * << 712 * Returns 0 if it found something or 1 if the << 713 * Returns < 0 on error. << 714 */ 3347 */ 715 static inline int btrfs_next_leaf(struct btrfs !! 3348 #define btrfs_emerg_rl(fs_info, fmt, args...) \ 716 { !! 3349 btrfs_printk_ratelimited(fs_info, KERN_EMERG fmt, ##args) 717 return btrfs_next_old_leaf(root, path, !! 3350 #define btrfs_alert_rl(fs_info, fmt, args...) \ >> 3351 btrfs_printk_ratelimited(fs_info, KERN_ALERT fmt, ##args) >> 3352 #define btrfs_crit_rl(fs_info, fmt, args...) \ >> 3353 btrfs_printk_ratelimited(fs_info, KERN_CRIT fmt, ##args) >> 3354 #define btrfs_err_rl(fs_info, fmt, args...) \ >> 3355 btrfs_printk_ratelimited(fs_info, KERN_ERR fmt, ##args) >> 3356 #define btrfs_warn_rl(fs_info, fmt, args...) \ >> 3357 btrfs_printk_ratelimited(fs_info, KERN_WARNING fmt, ##args) >> 3358 #define btrfs_notice_rl(fs_info, fmt, args...) \ >> 3359 btrfs_printk_ratelimited(fs_info, KERN_NOTICE fmt, ##args) >> 3360 #define btrfs_info_rl(fs_info, fmt, args...) \ >> 3361 btrfs_printk_ratelimited(fs_info, KERN_INFO fmt, ##args) >> 3362 >> 3363 #if defined(CONFIG_DYNAMIC_DEBUG) >> 3364 #define btrfs_debug(fs_info, fmt, args...) \ >> 3365 do { \ >> 3366 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ >> 3367 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ >> 3368 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args); \ >> 3369 } while (0) >> 3370 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ >> 3371 do { \ >> 3372 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ >> 3373 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ >> 3374 btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args); \ >> 3375 } while (0) >> 3376 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ >> 3377 do { \ >> 3378 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ >> 3379 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ >> 3380 btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, \ >> 3381 ##args);\ >> 3382 } while (0) >> 3383 #define btrfs_debug_rl(fs_info, fmt, args...) \ >> 3384 do { \ >> 3385 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ >> 3386 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ >> 3387 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, \ >> 3388 ##args); \ >> 3389 } while (0) >> 3390 #elif defined(DEBUG) >> 3391 #define btrfs_debug(fs_info, fmt, args...) \ >> 3392 btrfs_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3393 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ >> 3394 btrfs_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args) >> 3395 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ >> 3396 btrfs_printk_rl_in_rcu(fs_info, KERN_DEBUG fmt, ##args) >> 3397 #define btrfs_debug_rl(fs_info, fmt, args...) \ >> 3398 btrfs_printk_ratelimited(fs_info, KERN_DEBUG fmt, ##args) >> 3399 #else >> 3400 #define btrfs_debug(fs_info, fmt, args...) \ >> 3401 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3402 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \ >> 3403 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3404 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \ >> 3405 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3406 #define btrfs_debug_rl(fs_info, fmt, args...) \ >> 3407 btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args) >> 3408 #endif >> 3409 >> 3410 #define btrfs_printk_in_rcu(fs_info, fmt, args...) \ >> 3411 do { \ >> 3412 rcu_read_lock(); \ >> 3413 btrfs_printk(fs_info, fmt, ##args); \ >> 3414 rcu_read_unlock(); \ >> 3415 } while (0) >> 3416 >> 3417 #define btrfs_printk_ratelimited(fs_info, fmt, args...) \ >> 3418 do { \ >> 3419 static DEFINE_RATELIMIT_STATE(_rs, \ >> 3420 DEFAULT_RATELIMIT_INTERVAL, \ >> 3421 DEFAULT_RATELIMIT_BURST); \ >> 3422 if (__ratelimit(&_rs)) \ >> 3423 btrfs_printk(fs_info, fmt, ##args); \ >> 3424 } while (0) >> 3425 >> 3426 #define btrfs_printk_rl_in_rcu(fs_info, fmt, args...) \ >> 3427 do { \ >> 3428 rcu_read_lock(); \ >> 3429 btrfs_printk_ratelimited(fs_info, fmt, ##args); \ >> 3430 rcu_read_unlock(); \ >> 3431 } while (0) >> 3432 >> 3433 #ifdef CONFIG_BTRFS_ASSERT >> 3434 >> 3435 __cold >> 3436 static inline void assfail(char *expr, char *file, int line) >> 3437 { >> 3438 pr_err("assertion failed: %s, file: %s, line: %d\n", >> 3439 expr, file, line); >> 3440 BUG(); 718 } 3441 } 719 3442 720 static inline int btrfs_next_item(struct btrfs !! 3443 #define ASSERT(expr) \ >> 3444 (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) >> 3445 #else >> 3446 #define ASSERT(expr) ((void)0) >> 3447 #endif >> 3448 >> 3449 __printf(5, 6) >> 3450 __cold >> 3451 void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, >> 3452 unsigned int line, int errno, const char *fmt, ...); >> 3453 >> 3454 const char *btrfs_decode_error(int errno); >> 3455 >> 3456 __cold >> 3457 void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, >> 3458 const char *function, >> 3459 unsigned int line, int errno); >> 3460 >> 3461 /* >> 3462 * Call btrfs_abort_transaction as early as possible when an error condition is >> 3463 * detected, that way the exact line number is reported. >> 3464 */ >> 3465 #define btrfs_abort_transaction(trans, errno) \ >> 3466 do { \ >> 3467 /* Report first abort since mount */ \ >> 3468 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ >> 3469 &((trans)->fs_info->fs_state))) { \ >> 3470 if ((errno) != -EIO) { \ >> 3471 WARN(1, KERN_DEBUG \ >> 3472 "BTRFS: Transaction aborted (error %d)\n", \ >> 3473 (errno)); \ >> 3474 } else { \ >> 3475 btrfs_debug((trans)->fs_info, \ >> 3476 "Transaction aborted (error %d)", \ >> 3477 (errno)); \ >> 3478 } \ >> 3479 } \ >> 3480 __btrfs_abort_transaction((trans), __func__, \ >> 3481 __LINE__, (errno)); \ >> 3482 } while (0) >> 3483 >> 3484 #define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \ >> 3485 do { \ >> 3486 __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ >> 3487 (errno), fmt, ##args); \ >> 3488 } while (0) >> 3489 >> 3490 __printf(5, 6) >> 3491 __cold >> 3492 void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, >> 3493 unsigned int line, int errno, const char *fmt, ...); >> 3494 /* >> 3495 * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic >> 3496 * will panic(). Otherwise we BUG() here. >> 3497 */ >> 3498 #define btrfs_panic(fs_info, errno, fmt, args...) \ >> 3499 do { \ >> 3500 __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ >> 3501 BUG(); \ >> 3502 } while (0) >> 3503 >> 3504 >> 3505 /* compatibility and incompatibility defines */ >> 3506 >> 3507 #define btrfs_set_fs_incompat(__fs_info, opt) \ >> 3508 __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) >> 3509 >> 3510 static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, >> 3511 u64 flag) >> 3512 { >> 3513 struct btrfs_super_block *disk_super; >> 3514 u64 features; >> 3515 >> 3516 disk_super = fs_info->super_copy; >> 3517 features = btrfs_super_incompat_flags(disk_super); >> 3518 if (!(features & flag)) { >> 3519 spin_lock(&fs_info->super_lock); >> 3520 features = btrfs_super_incompat_flags(disk_super); >> 3521 if (!(features & flag)) { >> 3522 features |= flag; >> 3523 btrfs_set_super_incompat_flags(disk_super, features); >> 3524 btrfs_info(fs_info, "setting %llu feature flag", >> 3525 flag); >> 3526 } >> 3527 spin_unlock(&fs_info->super_lock); >> 3528 } >> 3529 } >> 3530 >> 3531 #define btrfs_clear_fs_incompat(__fs_info, opt) \ >> 3532 __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) >> 3533 >> 3534 static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, >> 3535 u64 flag) >> 3536 { >> 3537 struct btrfs_super_block *disk_super; >> 3538 u64 features; >> 3539 >> 3540 disk_super = fs_info->super_copy; >> 3541 features = btrfs_super_incompat_flags(disk_super); >> 3542 if (features & flag) { >> 3543 spin_lock(&fs_info->super_lock); >> 3544 features = btrfs_super_incompat_flags(disk_super); >> 3545 if (features & flag) { >> 3546 features &= ~flag; >> 3547 btrfs_set_super_incompat_flags(disk_super, features); >> 3548 btrfs_info(fs_info, "clearing %llu feature flag", >> 3549 flag); >> 3550 } >> 3551 spin_unlock(&fs_info->super_lock); >> 3552 } >> 3553 } >> 3554 >> 3555 #define btrfs_fs_incompat(fs_info, opt) \ >> 3556 __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) >> 3557 >> 3558 static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) >> 3559 { >> 3560 struct btrfs_super_block *disk_super; >> 3561 disk_super = fs_info->super_copy; >> 3562 return !!(btrfs_super_incompat_flags(disk_super) & flag); >> 3563 } >> 3564 >> 3565 #define btrfs_set_fs_compat_ro(__fs_info, opt) \ >> 3566 __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) >> 3567 >> 3568 static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, >> 3569 u64 flag) >> 3570 { >> 3571 struct btrfs_super_block *disk_super; >> 3572 u64 features; >> 3573 >> 3574 disk_super = fs_info->super_copy; >> 3575 features = btrfs_super_compat_ro_flags(disk_super); >> 3576 if (!(features & flag)) { >> 3577 spin_lock(&fs_info->super_lock); >> 3578 features = btrfs_super_compat_ro_flags(disk_super); >> 3579 if (!(features & flag)) { >> 3580 features |= flag; >> 3581 btrfs_set_super_compat_ro_flags(disk_super, features); >> 3582 btrfs_info(fs_info, "setting %llu ro feature flag", >> 3583 flag); >> 3584 } >> 3585 spin_unlock(&fs_info->super_lock); >> 3586 } >> 3587 } >> 3588 >> 3589 #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ >> 3590 __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) >> 3591 >> 3592 static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, >> 3593 u64 flag) >> 3594 { >> 3595 struct btrfs_super_block *disk_super; >> 3596 u64 features; >> 3597 >> 3598 disk_super = fs_info->super_copy; >> 3599 features = btrfs_super_compat_ro_flags(disk_super); >> 3600 if (features & flag) { >> 3601 spin_lock(&fs_info->super_lock); >> 3602 features = btrfs_super_compat_ro_flags(disk_super); >> 3603 if (features & flag) { >> 3604 features &= ~flag; >> 3605 btrfs_set_super_compat_ro_flags(disk_super, features); >> 3606 btrfs_info(fs_info, "clearing %llu ro feature flag", >> 3607 flag); >> 3608 } >> 3609 spin_unlock(&fs_info->super_lock); >> 3610 } >> 3611 } >> 3612 >> 3613 #define btrfs_fs_compat_ro(fs_info, opt) \ >> 3614 __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) >> 3615 >> 3616 static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) >> 3617 { >> 3618 struct btrfs_super_block *disk_super; >> 3619 disk_super = fs_info->super_copy; >> 3620 return !!(btrfs_super_compat_ro_flags(disk_super) & flag); >> 3621 } >> 3622 >> 3623 /* acl.c */ >> 3624 #ifdef CONFIG_BTRFS_FS_POSIX_ACL >> 3625 struct posix_acl *btrfs_get_acl(struct inode *inode, int type); >> 3626 int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); >> 3627 int btrfs_init_acl(struct btrfs_trans_handle *trans, >> 3628 struct inode *inode, struct inode *dir); >> 3629 #else >> 3630 #define btrfs_get_acl NULL >> 3631 #define btrfs_set_acl NULL >> 3632 static inline int btrfs_init_acl(struct btrfs_trans_handle *trans, >> 3633 struct inode *inode, struct inode *dir) 721 { 3634 { 722 return btrfs_next_old_item(root, p, 0) !! 3635 return 0; 723 } 3636 } 724 int btrfs_leaf_free_space(const struct extent_ !! 3637 #endif >> 3638 >> 3639 /* relocation.c */ >> 3640 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start); >> 3641 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans, >> 3642 struct btrfs_root *root); >> 3643 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, >> 3644 struct btrfs_root *root); >> 3645 int btrfs_recover_relocation(struct btrfs_root *root); >> 3646 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len); >> 3647 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, >> 3648 struct btrfs_root *root, struct extent_buffer *buf, >> 3649 struct extent_buffer *cow); >> 3650 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending, >> 3651 u64 *bytes_to_reserve); >> 3652 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, >> 3653 struct btrfs_pending_snapshot *pending); >> 3654 >> 3655 /* scrub.c */ >> 3656 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, >> 3657 u64 end, struct btrfs_scrub_progress *progress, >> 3658 int readonly, int is_dev_replace); >> 3659 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info); >> 3660 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info); >> 3661 int btrfs_scrub_cancel(struct btrfs_fs_info *info); >> 3662 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info, >> 3663 struct btrfs_device *dev); >> 3664 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid, >> 3665 struct btrfs_scrub_progress *progress); >> 3666 static inline void btrfs_init_full_stripe_locks_tree( >> 3667 struct btrfs_full_stripe_locks_tree *locks_root) >> 3668 { >> 3669 locks_root->root = RB_ROOT; >> 3670 mutex_init(&locks_root->lock); >> 3671 } >> 3672 >> 3673 /* dev-replace.c */ >> 3674 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); >> 3675 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); >> 3676 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); >> 3677 >> 3678 static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) >> 3679 { >> 3680 btrfs_bio_counter_sub(fs_info, 1); >> 3681 } >> 3682 >> 3683 /* reada.c */ >> 3684 struct reada_control { >> 3685 struct btrfs_fs_info *fs_info; /* tree to prefetch */ >> 3686 struct btrfs_key key_start; >> 3687 struct btrfs_key key_end; /* exclusive */ >> 3688 atomic_t elems; >> 3689 struct kref refcnt; >> 3690 wait_queue_head_t wait; >> 3691 }; >> 3692 struct reada_control *btrfs_reada_add(struct btrfs_root *root, >> 3693 struct btrfs_key *start, struct btrfs_key *end); >> 3694 int btrfs_reada_wait(void *handle); >> 3695 void btrfs_reada_detach(void *handle); >> 3696 int btree_readahead_hook(struct extent_buffer *eb, int err); 725 3697 726 static inline int is_fstree(u64 rootid) 3698 static inline int is_fstree(u64 rootid) 727 { 3699 { 728 if (rootid == BTRFS_FS_TREE_OBJECTID | 3700 if (rootid == BTRFS_FS_TREE_OBJECTID || 729 ((s64)rootid >= (s64)BTRFS_FIRST_F 3701 ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID && 730 !btrfs_qgroup_level(rootid))) 3702 !btrfs_qgroup_level(rootid))) 731 return 1; 3703 return 1; 732 return 0; 3704 return 0; 733 } 3705 } 734 3706 735 static inline bool btrfs_is_data_reloc_root(co !! 3707 static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info) 736 { 3708 { 737 return root->root_key.objectid == BTRF !! 3709 return signal_pending(current); 738 } 3710 } 739 3711 740 u16 btrfs_csum_type_size(u16 type); !! 3712 /* Sanity test specific functions */ 741 int btrfs_super_csum_size(const struct btrfs_s !! 3713 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS 742 const char *btrfs_super_csum_name(u16 csum_typ !! 3714 void btrfs_test_destroy_inode(struct inode *inode); 743 const char *btrfs_super_csum_driver(u16 csum_t !! 3715 #endif 744 size_t __attribute_const__ btrfs_get_num_csums << 745 << 746 /* << 747 * We use page status Private2 to indicate the << 748 * unfinished IO. << 749 * << 750 * Rename the Private2 accessors to Ordered, t << 751 */ << 752 #define PageOrdered(page) PagePr << 753 #define SetPageOrdered(page) SetPag << 754 #define ClearPageOrdered(page) ClearP << 755 #define folio_test_ordered(folio) folio_ << 756 #define folio_set_ordered(folio) folio_ << 757 #define folio_clear_ordered(folio) folio_ << 758 3716 >> 3717 static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info) >> 3718 { >> 3719 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS >> 3720 if (unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, >> 3721 &fs_info->fs_state))) >> 3722 return 1; >> 3723 #endif >> 3724 return 0; >> 3725 } 759 #endif 3726 #endif 760 3727
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.