~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/btrfs/ctree.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Copyright (C) 2007 Oracle.  All rights reserved.
  4  */
  5 
  6 #ifndef BTRFS_CTREE_H
  7 #define BTRFS_CTREE_H
  8 
  9 #include <linux/pagemap.h>
 10 #include <linux/spinlock.h>
 11 #include <linux/rbtree.h>
 12 #include <linux/mutex.h>
 13 #include <linux/wait.h>
 14 #include <linux/list.h>
 15 #include <linux/atomic.h>
 16 #include <linux/xarray.h>
 17 #include <linux/refcount.h>
 18 #include <uapi/linux/btrfs_tree.h>
 19 #include "locking.h"
 20 #include "fs.h"
 21 #include "accessors.h"
 22 #include "extent-io-tree.h"
 23 
 24 struct extent_buffer;
 25 struct btrfs_block_rsv;
 26 struct btrfs_trans_handle;
 27 struct btrfs_block_group;
 28 
 29 /* Read ahead values for struct btrfs_path.reada */
 30 enum {
 31         READA_NONE,
 32         READA_BACK,
 33         READA_FORWARD,
 34         /*
 35          * Similar to READA_FORWARD but unlike it:
 36          *
 37          * 1) It will trigger readahead even for leaves that are not close to
 38          *    each other on disk;
 39          * 2) It also triggers readahead for nodes;
 40          * 3) During a search, even when a node or leaf is already in memory, it
 41          *    will still trigger readahead for other nodes and leaves that follow
 42          *    it.
 43          *
 44          * This is meant to be used only when we know we are iterating over the
 45          * entire tree or a very large part of it.
 46          */
 47         READA_FORWARD_ALWAYS,
 48 };
 49 
 50 /*
 51  * btrfs_paths remember the path taken from the root down to the leaf.
 52  * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
 53  * to any other levels that are present.
 54  *
 55  * The slots array records the index of the item or block pointer
 56  * used while walking the tree.
 57  */
 58 struct btrfs_path {
 59         struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
 60         int slots[BTRFS_MAX_LEVEL];
 61         /* if there is real range locking, this locks field will change */
 62         u8 locks[BTRFS_MAX_LEVEL];
 63         u8 reada;
 64         /* keep some upper locks as we walk down */
 65         u8 lowest_level;
 66 
 67         /*
 68          * set by btrfs_split_item, tells search_slot to keep all locks
 69          * and to force calls to keep space in the nodes
 70          */
 71         unsigned int search_for_split:1;
 72         unsigned int keep_locks:1;
 73         unsigned int skip_locking:1;
 74         unsigned int search_commit_root:1;
 75         unsigned int need_commit_sem:1;
 76         unsigned int skip_release_on_error:1;
 77         /*
 78          * Indicate that new item (btrfs_search_slot) is extending already
 79          * existing item and ins_len contains only the data size and not item
 80          * header (ie. sizeof(struct btrfs_item) is not included).
 81          */
 82         unsigned int search_for_extension:1;
 83         /* Stop search if any locks need to be taken (for read) */
 84         unsigned int nowait:1;
 85 };
 86 
 87 /*
 88  * The state of btrfs root
 89  */
 90 enum {
 91         /*
 92          * btrfs_record_root_in_trans is a multi-step process, and it can race
 93          * with the balancing code.   But the race is very small, and only the
 94          * first time the root is added to each transaction.  So IN_TRANS_SETUP
 95          * is used to tell us when more checks are required
 96          */
 97         BTRFS_ROOT_IN_TRANS_SETUP,
 98 
 99         /*
100          * Set if tree blocks of this root can be shared by other roots.
101          * Only subvolume trees and their reloc trees have this bit set.
102          * Conflicts with TRACK_DIRTY bit.
103          *
104          * This affects two things:
105          *
106          * - How balance works
107          *   For shareable roots, we need to use reloc tree and do path
108          *   replacement for balance, and need various pre/post hooks for
109          *   snapshot creation to handle them.
110          *
111          *   While for non-shareable trees, we just simply do a tree search
112          *   with COW.
113          *
114          * - How dirty roots are tracked
115          *   For shareable roots, btrfs_record_root_in_trans() is needed to
116          *   track them, while non-subvolume roots have TRACK_DIRTY bit, they
117          *   don't need to set this manually.
118          */
119         BTRFS_ROOT_SHAREABLE,
120         BTRFS_ROOT_TRACK_DIRTY,
121         BTRFS_ROOT_IN_RADIX,
122         BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
123         BTRFS_ROOT_DEFRAG_RUNNING,
124         BTRFS_ROOT_FORCE_COW,
125         BTRFS_ROOT_MULTI_LOG_TASKS,
126         BTRFS_ROOT_DIRTY,
127         BTRFS_ROOT_DELETING,
128 
129         /*
130          * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
131          *
132          * Set for the subvolume tree owning the reloc tree.
133          */
134         BTRFS_ROOT_DEAD_RELOC_TREE,
135         /* Mark dead root stored on device whose cleanup needs to be resumed */
136         BTRFS_ROOT_DEAD_TREE,
137         /* The root has a log tree. Used for subvolume roots and the tree root. */
138         BTRFS_ROOT_HAS_LOG_TREE,
139         /* Qgroup flushing is in progress */
140         BTRFS_ROOT_QGROUP_FLUSHING,
141         /* We started the orphan cleanup for this root. */
142         BTRFS_ROOT_ORPHAN_CLEANUP,
143         /* This root has a drop operation that was started previously. */
144         BTRFS_ROOT_UNFINISHED_DROP,
145         /* This reloc root needs to have its buffers lockdep class reset. */
146         BTRFS_ROOT_RESET_LOCKDEP_CLASS,
147 };
148 
149 /*
150  * Record swapped tree blocks of a subvolume tree for delayed subtree trace
151  * code. For detail check comment in fs/btrfs/qgroup.c.
152  */
153 struct btrfs_qgroup_swapped_blocks {
154         spinlock_t lock;
155         /* RM_EMPTY_ROOT() of above blocks[] */
156         bool swapped;
157         struct rb_root blocks[BTRFS_MAX_LEVEL];
158 };
159 
160 /*
161  * in ram representation of the tree.  extent_root is used for all allocations
162  * and for the extent tree extent_root root.
163  */
164 struct btrfs_root {
165         struct rb_node rb_node;
166 
167         struct extent_buffer *node;
168 
169         struct extent_buffer *commit_root;
170         struct btrfs_root *log_root;
171         struct btrfs_root *reloc_root;
172 
173         unsigned long state;
174         struct btrfs_root_item root_item;
175         struct btrfs_key root_key;
176         struct btrfs_fs_info *fs_info;
177         struct extent_io_tree dirty_log_pages;
178 
179         struct mutex objectid_mutex;
180 
181         spinlock_t accounting_lock;
182         struct btrfs_block_rsv *block_rsv;
183 
184         struct mutex log_mutex;
185         wait_queue_head_t log_writer_wait;
186         wait_queue_head_t log_commit_wait[2];
187         struct list_head log_ctxs[2];
188         /* Used only for log trees of subvolumes, not for the log root tree */
189         atomic_t log_writers;
190         atomic_t log_commit[2];
191         /* Used only for log trees of subvolumes, not for the log root tree */
192         atomic_t log_batch;
193         /*
194          * Protected by the 'log_mutex' lock but can be read without holding
195          * that lock to avoid unnecessary lock contention, in which case it
196          * should be read using btrfs_get_root_log_transid() except if it's a
197          * log tree in which case it can be directly accessed. Updates to this
198          * field should always use btrfs_set_root_log_transid(), except for log
199          * trees where the field can be updated directly.
200          */
201         int log_transid;
202         /* No matter the commit succeeds or not*/
203         int log_transid_committed;
204         /*
205          * Just be updated when the commit succeeds. Use
206          * btrfs_get_root_last_log_commit() and btrfs_set_root_last_log_commit()
207          * to access this field.
208          */
209         int last_log_commit;
210         pid_t log_start_pid;
211 
212         u64 last_trans;
213 
214         u64 free_objectid;
215 
216         struct btrfs_key defrag_progress;
217         struct btrfs_key defrag_max;
218 
219         /* The dirty list is only used by non-shareable roots */
220         struct list_head dirty_list;
221 
222         struct list_head root_list;
223 
224         /*
225          * Xarray that keeps track of in-memory inodes, protected by the lock
226          * @inode_lock.
227          */
228         struct xarray inodes;
229 
230         /*
231          * Xarray that keeps track of delayed nodes of every inode, protected
232          * by @inode_lock.
233          */
234         struct xarray delayed_nodes;
235         /*
236          * right now this just gets used so that a root has its own devid
237          * for stat.  It may be used for more later
238          */
239         dev_t anon_dev;
240 
241         spinlock_t root_item_lock;
242         refcount_t refs;
243 
244         struct mutex delalloc_mutex;
245         spinlock_t delalloc_lock;
246         /*
247          * all of the inodes that have delalloc bytes.  It is possible for
248          * this list to be empty even when there is still dirty data=ordered
249          * extents waiting to finish IO.
250          */
251         struct list_head delalloc_inodes;
252         struct list_head delalloc_root;
253         u64 nr_delalloc_inodes;
254 
255         struct mutex ordered_extent_mutex;
256         /*
257          * this is used by the balancing code to wait for all the pending
258          * ordered extents
259          */
260         spinlock_t ordered_extent_lock;
261 
262         /*
263          * all of the data=ordered extents pending writeback
264          * these can span multiple transactions and basically include
265          * every dirty data page that isn't from nodatacow
266          */
267         struct list_head ordered_extents;
268         struct list_head ordered_root;
269         u64 nr_ordered_extents;
270 
271         /*
272          * Not empty if this subvolume root has gone through tree block swap
273          * (relocation)
274          *
275          * Will be used by reloc_control::dirty_subvol_roots.
276          */
277         struct list_head reloc_dirty_list;
278 
279         /*
280          * Number of currently running SEND ioctls to prevent
281          * manipulation with the read-only status via SUBVOL_SETFLAGS
282          */
283         int send_in_progress;
284         /*
285          * Number of currently running deduplication operations that have a
286          * destination inode belonging to this root. Protected by the lock
287          * root_item_lock.
288          */
289         int dedupe_in_progress;
290         /* For exclusion of snapshot creation and nocow writes */
291         struct btrfs_drew_lock snapshot_lock;
292 
293         atomic_t snapshot_force_cow;
294 
295         /* For qgroup metadata reserved space */
296         spinlock_t qgroup_meta_rsv_lock;
297         u64 qgroup_meta_rsv_pertrans;
298         u64 qgroup_meta_rsv_prealloc;
299         wait_queue_head_t qgroup_flush_wait;
300 
301         /* Number of active swapfiles */
302         atomic_t nr_swapfiles;
303 
304         /* Record pairs of swapped blocks for qgroup */
305         struct btrfs_qgroup_swapped_blocks swapped_blocks;
306 
307         /* Used only by log trees, when logging csum items */
308         struct extent_io_tree log_csum_range;
309 
310         /* Used in simple quotas, track root during relocation. */
311         u64 relocation_src_root;
312 
313 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
314         u64 alloc_bytenr;
315 #endif
316 
317 #ifdef CONFIG_BTRFS_DEBUG
318         struct list_head leak_list;
319 #endif
320 };
321 
322 static inline bool btrfs_root_readonly(const struct btrfs_root *root)
323 {
324         /* Byte-swap the constant at compile time, root_item::flags is LE */
325         return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
326 }
327 
328 static inline bool btrfs_root_dead(const struct btrfs_root *root)
329 {
330         /* Byte-swap the constant at compile time, root_item::flags is LE */
331         return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
332 }
333 
334 static inline u64 btrfs_root_id(const struct btrfs_root *root)
335 {
336         return root->root_key.objectid;
337 }
338 
339 static inline int btrfs_get_root_log_transid(const struct btrfs_root *root)
340 {
341         return READ_ONCE(root->log_transid);
342 }
343 
344 static inline void btrfs_set_root_log_transid(struct btrfs_root *root, int log_transid)
345 {
346         WRITE_ONCE(root->log_transid, log_transid);
347 }
348 
349 static inline int btrfs_get_root_last_log_commit(const struct btrfs_root *root)
350 {
351         return READ_ONCE(root->last_log_commit);
352 }
353 
354 static inline void btrfs_set_root_last_log_commit(struct btrfs_root *root, int commit_id)
355 {
356         WRITE_ONCE(root->last_log_commit, commit_id);
357 }
358 
359 static inline u64 btrfs_get_root_last_trans(const struct btrfs_root *root)
360 {
361         return READ_ONCE(root->last_trans);
362 }
363 
364 static inline void btrfs_set_root_last_trans(struct btrfs_root *root, u64 transid)
365 {
366         WRITE_ONCE(root->last_trans, transid);
367 }
368 
369 /*
370  * Structure that conveys information about an extent that is going to replace
371  * all the extents in a file range.
372  */
373 struct btrfs_replace_extent_info {
374         u64 disk_offset;
375         u64 disk_len;
376         u64 data_offset;
377         u64 data_len;
378         u64 file_offset;
379         /* Pointer to a file extent item of type regular or prealloc. */
380         char *extent_buf;
381         /*
382          * Set to true when attempting to replace a file range with a new extent
383          * described by this structure, set to false when attempting to clone an
384          * existing extent into a file range.
385          */
386         bool is_new_extent;
387         /* Indicate if we should update the inode's mtime and ctime. */
388         bool update_times;
389         /* Meaningful only if is_new_extent is true. */
390         int qgroup_reserved;
391         /*
392          * Meaningful only if is_new_extent is true.
393          * Used to track how many extent items we have already inserted in a
394          * subvolume tree that refer to the extent described by this structure,
395          * so that we know when to create a new delayed ref or update an existing
396          * one.
397          */
398         int insertions;
399 };
400 
401 /* Arguments for btrfs_drop_extents() */
402 struct btrfs_drop_extents_args {
403         /* Input parameters */
404 
405         /*
406          * If NULL, btrfs_drop_extents() will allocate and free its own path.
407          * If 'replace_extent' is true, this must not be NULL. Also the path
408          * is always released except if 'replace_extent' is true and
409          * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
410          * the path is kept locked.
411          */
412         struct btrfs_path *path;
413         /* Start offset of the range to drop extents from */
414         u64 start;
415         /* End (exclusive, last byte + 1) of the range to drop extents from */
416         u64 end;
417         /* If true drop all the extent maps in the range */
418         bool drop_cache;
419         /*
420          * If true it means we want to insert a new extent after dropping all
421          * the extents in the range. If this is true, the 'extent_item_size'
422          * parameter must be set as well and the 'extent_inserted' field will
423          * be set to true by btrfs_drop_extents() if it could insert the new
424          * extent.
425          * Note: when this is set to true the path must not be NULL.
426          */
427         bool replace_extent;
428         /*
429          * Used if 'replace_extent' is true. Size of the file extent item to
430          * insert after dropping all existing extents in the range
431          */
432         u32 extent_item_size;
433 
434         /* Output parameters */
435 
436         /*
437          * Set to the minimum between the input parameter 'end' and the end
438          * (exclusive, last byte + 1) of the last dropped extent. This is always
439          * set even if btrfs_drop_extents() returns an error.
440          */
441         u64 drop_end;
442         /*
443          * The number of allocated bytes found in the range. This can be smaller
444          * than the range's length when there are holes in the range.
445          */
446         u64 bytes_found;
447         /*
448          * Only set if 'replace_extent' is true. Set to true if we were able
449          * to insert a replacement extent after dropping all extents in the
450          * range, otherwise set to false by btrfs_drop_extents().
451          * Also, if btrfs_drop_extents() has set this to true it means it
452          * returned with the path locked, otherwise if it has set this to
453          * false it has returned with the path released.
454          */
455         bool extent_inserted;
456 };
457 
458 struct btrfs_file_private {
459         void *filldir_buf;
460         u64 last_index;
461         struct extent_state *llseek_cached_state;
462         bool fsync_skip_inode_lock;
463 };
464 
465 static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
466 {
467         return info->nodesize - sizeof(struct btrfs_header);
468 }
469 
470 static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
471 {
472         return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
473 }
474 
475 static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
476 {
477         return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
478 }
479 
480 static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
481 {
482         return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
483 }
484 
485 #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
486                                 ((bytes) >> (fs_info)->sectorsize_bits)
487 
488 static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
489 {
490         return mapping_gfp_constraint(mapping, ~__GFP_FS);
491 }
492 
493 void btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, u64 start, u64 end);
494 int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
495                          u64 num_bytes, u64 *actual_bytes);
496 int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
497 
498 /* ctree.c */
499 int __init btrfs_ctree_init(void);
500 void __cold btrfs_ctree_exit(void);
501 
502 int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
503                      const struct btrfs_key *key, int *slot);
504 
505 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
506 
507 #ifdef __LITTLE_ENDIAN
508 
509 /*
510  * Compare two keys, on little-endian the disk order is same as CPU order and
511  * we can avoid the conversion.
512  */
513 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk_key,
514                                   const struct btrfs_key *k2)
515 {
516         const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
517 
518         return btrfs_comp_cpu_keys(k1, k2);
519 }
520 
521 #else
522 
523 /* Compare two keys in a memcmp fashion. */
524 static inline int btrfs_comp_keys(const struct btrfs_disk_key *disk,
525                                   const struct btrfs_key *k2)
526 {
527         struct btrfs_key k1;
528 
529         btrfs_disk_key_to_cpu(&k1, disk);
530 
531         return btrfs_comp_cpu_keys(&k1, k2);
532 }
533 
534 #endif
535 
536 int btrfs_previous_item(struct btrfs_root *root,
537                         struct btrfs_path *path, u64 min_objectid,
538                         int type);
539 int btrfs_previous_extent_item(struct btrfs_root *root,
540                         struct btrfs_path *path, u64 min_objectid);
541 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
542                              struct btrfs_path *path,
543                              const struct btrfs_key *new_key);
544 struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
545 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
546                         struct btrfs_key *key, int lowest_level,
547                         u64 min_trans);
548 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
549                          struct btrfs_path *path,
550                          u64 min_trans);
551 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
552                                            int slot);
553 
554 int btrfs_cow_block(struct btrfs_trans_handle *trans,
555                     struct btrfs_root *root, struct extent_buffer *buf,
556                     struct extent_buffer *parent, int parent_slot,
557                     struct extent_buffer **cow_ret,
558                     enum btrfs_lock_nesting nest);
559 int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
560                           struct btrfs_root *root,
561                           struct extent_buffer *buf,
562                           struct extent_buffer *parent, int parent_slot,
563                           struct extent_buffer **cow_ret,
564                           u64 search_start, u64 empty_size,
565                           enum btrfs_lock_nesting nest);
566 int btrfs_copy_root(struct btrfs_trans_handle *trans,
567                       struct btrfs_root *root,
568                       struct extent_buffer *buf,
569                       struct extent_buffer **cow_ret, u64 new_root_objectid);
570 bool btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
571                                struct btrfs_root *root,
572                                struct extent_buffer *buf);
573 int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
574                   struct btrfs_path *path, int level, int slot);
575 void btrfs_extend_item(struct btrfs_trans_handle *trans,
576                        struct btrfs_path *path, u32 data_size);
577 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
578                          struct btrfs_path *path, u32 new_size, int from_end);
579 int btrfs_split_item(struct btrfs_trans_handle *trans,
580                      struct btrfs_root *root,
581                      struct btrfs_path *path,
582                      const struct btrfs_key *new_key,
583                      unsigned long split_offset);
584 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
585                          struct btrfs_root *root,
586                          struct btrfs_path *path,
587                          const struct btrfs_key *new_key);
588 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
589                 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
590 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
591                       const struct btrfs_key *key, struct btrfs_path *p,
592                       int ins_len, int cow);
593 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
594                           struct btrfs_path *p, u64 time_seq);
595 int btrfs_search_slot_for_read(struct btrfs_root *root,
596                                const struct btrfs_key *key,
597                                struct btrfs_path *p, int find_higher,
598                                int return_any);
599 void btrfs_release_path(struct btrfs_path *p);
600 struct btrfs_path *btrfs_alloc_path(void);
601 void btrfs_free_path(struct btrfs_path *p);
602 
603 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
604                    struct btrfs_path *path, int slot, int nr);
605 static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
606                                  struct btrfs_root *root,
607                                  struct btrfs_path *path)
608 {
609         return btrfs_del_items(trans, root, path, path->slots[0], 1);
610 }
611 
612 /*
613  * Describes a batch of items to insert in a btree. This is used by
614  * btrfs_insert_empty_items().
615  */
616 struct btrfs_item_batch {
617         /*
618          * Pointer to an array containing the keys of the items to insert (in
619          * sorted order).
620          */
621         const struct btrfs_key *keys;
622         /* Pointer to an array containing the data size for each item to insert. */
623         const u32 *data_sizes;
624         /*
625          * The sum of data sizes for all items. The caller can compute this while
626          * setting up the data_sizes array, so it ends up being more efficient
627          * than having btrfs_insert_empty_items() or setup_item_for_insert()
628          * doing it, as it would avoid an extra loop over a potentially large
629          * array, and in the case of setup_item_for_insert(), we would be doing
630          * it while holding a write lock on a leaf and often on upper level nodes
631          * too, unnecessarily increasing the size of a critical section.
632          */
633         u32 total_data_size;
634         /* Size of the keys and data_sizes arrays (number of items in the batch). */
635         int nr;
636 };
637 
638 void btrfs_setup_item_for_insert(struct btrfs_trans_handle *trans,
639                                  struct btrfs_root *root,
640                                  struct btrfs_path *path,
641                                  const struct btrfs_key *key,
642                                  u32 data_size);
643 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
644                       const struct btrfs_key *key, void *data, u32 data_size);
645 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
646                              struct btrfs_root *root,
647                              struct btrfs_path *path,
648                              const struct btrfs_item_batch *batch);
649 
650 static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
651                                           struct btrfs_root *root,
652                                           struct btrfs_path *path,
653                                           const struct btrfs_key *key,
654                                           u32 data_size)
655 {
656         struct btrfs_item_batch batch;
657 
658         batch.keys = key;
659         batch.data_sizes = &data_size;
660         batch.total_data_size = data_size;
661         batch.nr = 1;
662 
663         return btrfs_insert_empty_items(trans, root, path, &batch);
664 }
665 
666 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
667                         u64 time_seq);
668 
669 int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
670                            struct btrfs_path *path);
671 
672 int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
673                               struct btrfs_path *path);
674 
675 /*
676  * Search in @root for a given @key, and store the slot found in @found_key.
677  *
678  * @root:       The root node of the tree.
679  * @key:        The key we are looking for.
680  * @found_key:  Will hold the found item.
681  * @path:       Holds the current slot/leaf.
682  * @iter_ret:   Contains the value returned from btrfs_search_slot or
683  *              btrfs_get_next_valid_item, whichever was executed last.
684  *
685  * The @iter_ret is an output variable that will contain the return value of
686  * btrfs_search_slot, if it encountered an error, or the value returned from
687  * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
688  * slot was found, 1 if there were no more leaves, and <0 if there was an error.
689  *
690  * It's recommended to use a separate variable for iter_ret and then use it to
691  * set the function return value so there's no confusion of the 0/1/errno
692  * values stemming from btrfs_search_slot.
693  */
694 #define btrfs_for_each_slot(root, key, found_key, path, iter_ret)               \
695         for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0);   \
696                 (iter_ret) >= 0 &&                                              \
697                 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
698                 (path)->slots[0]++                                              \
699         )
700 
701 int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
702 
703 /*
704  * Search the tree again to find a leaf with greater keys.
705  *
706  * Returns 0 if it found something or 1 if there are no greater leaves.
707  * Returns < 0 on error.
708  */
709 static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
710 {
711         return btrfs_next_old_leaf(root, path, 0);
712 }
713 
714 static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
715 {
716         return btrfs_next_old_item(root, p, 0);
717 }
718 int btrfs_leaf_free_space(const struct extent_buffer *leaf);
719 
720 static inline int is_fstree(u64 rootid)
721 {
722         if (rootid == BTRFS_FS_TREE_OBJECTID ||
723             ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
724               !btrfs_qgroup_level(rootid)))
725                 return 1;
726         return 0;
727 }
728 
729 static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
730 {
731         return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
732 }
733 
734 u16 btrfs_csum_type_size(u16 type);
735 int btrfs_super_csum_size(const struct btrfs_super_block *s);
736 const char *btrfs_super_csum_name(u16 csum_type);
737 const char *btrfs_super_csum_driver(u16 csum_type);
738 size_t __attribute_const__ btrfs_get_num_csums(void);
739 
740 /*
741  * We use page status Private2 to indicate there is an ordered extent with
742  * unfinished IO.
743  *
744  * Rename the Private2 accessors to Ordered, to improve readability.
745  */
746 #define PageOrdered(page)               PagePrivate2(page)
747 #define SetPageOrdered(page)            SetPagePrivate2(page)
748 #define ClearPageOrdered(page)          ClearPagePrivate2(page)
749 #define folio_test_ordered(folio)       folio_test_private_2(folio)
750 #define folio_set_ordered(folio)        folio_set_private_2(folio)
751 #define folio_clear_ordered(folio)      folio_clear_private_2(folio)
752 
753 #endif
754 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php