1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_LOCKING_H 7 #define BTRFS_LOCKING_H 8 9 #include <linux/atomic.h> 10 #include <linux/wait.h> 11 #include <linux/lockdep.h> 12 #include <linux/percpu_counter.h> 13 #include "extent_io.h" 14 15 struct extent_buffer; 16 struct btrfs_path; 17 struct btrfs_root; 18 19 #define BTRFS_WRITE_LOCK 1 20 #define BTRFS_READ_LOCK 2 21 22 /* 23 * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at 24 * the time of this patch is 8, which is how many we use. Keep this in mind if 25 * you decide you want to add another subclass. 26 */ 27 enum btrfs_lock_nesting { 28 BTRFS_NESTING_NORMAL, 29 30 /* 31 * When we COW a block we are holding the lock on the original block, 32 * and since our lockdep maps are rootid+level, this confuses lockdep 33 * when we lock the newly allocated COW'd block. Handle this by having 34 * a subclass for COW'ed blocks so that lockdep doesn't complain. 35 */ 36 BTRFS_NESTING_COW, 37 38 /* 39 * Oftentimes we need to lock adjacent nodes on the same level while 40 * still holding the lock on the original node we searched to, such as 41 * for searching forward or for split/balance. 42 * 43 * Because of this we need to indicate to lockdep that this is 44 * acceptable by having a different subclass for each of these 45 * operations. 46 */ 47 BTRFS_NESTING_LEFT, 48 BTRFS_NESTING_RIGHT, 49 50 /* 51 * When splitting we will be holding a lock on the left/right node when 52 * we need to cow that node, thus we need a new set of subclasses for 53 * these two operations. 54 */ 55 BTRFS_NESTING_LEFT_COW, 56 BTRFS_NESTING_RIGHT_COW, 57 58 /* 59 * When splitting we may push nodes to the left or right, but still use 60 * the subsequent nodes in our path, keeping our locks on those adjacent 61 * blocks. Thus when we go to allocate a new split block we've already 62 * used up all of our available subclasses, so this subclass exists to 63 * handle this case where we need to allocate a new split block. 64 */ 65 BTRFS_NESTING_SPLIT, 66 67 /* 68 * When promoting a new block to a root we need to have a special 69 * subclass so we don't confuse lockdep, as it will appear that we are 70 * locking a higher level node before a lower level one. Copying also 71 * has this problem as it appears we're locking the same block again 72 * when we make a snapshot of an existing root. 73 */ 74 BTRFS_NESTING_NEW_ROOT, 75 76 /* 77 * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so 78 * add this in here and add a static_assert to keep us from going over 79 * the limit. As of this writing we're limited to 8, and we're 80 * definitely using 8, hence this check to keep us from messing up in 81 * the future. 82 */ 83 BTRFS_NESTING_MAX, 84 }; 85 86 enum btrfs_lockdep_trans_states { 87 BTRFS_LOCKDEP_TRANS_COMMIT_PREP, 88 BTRFS_LOCKDEP_TRANS_UNBLOCKED, 89 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED, 90 BTRFS_LOCKDEP_TRANS_COMPLETED, 91 }; 92 93 /* 94 * Lockdep annotation for wait events. 95 * 96 * @owner: The struct where the lockdep map is defined 97 * @lock: The lockdep map corresponding to a wait event 98 * 99 * This macro is used to annotate a wait event. In this case a thread acquires 100 * the lockdep map as writer (exclusive lock) because it has to block until all 101 * the threads that hold the lock as readers signal the condition for the wait 102 * event and release their locks. 103 */ 104 #define btrfs_might_wait_for_event(owner, lock) \ 105 do { \ 106 rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \ 107 rwsem_release(&owner->lock##_map, _THIS_IP_); \ 108 } while (0) 109 110 /* 111 * Protection for the resource/condition of a wait event. 112 * 113 * @owner: The struct where the lockdep map is defined 114 * @lock: The lockdep map corresponding to a wait event 115 * 116 * Many threads can modify the condition for the wait event at the same time 117 * and signal the threads that block on the wait event. The threads that modify 118 * the condition and do the signaling acquire the lock as readers (shared 119 * lock). 120 */ 121 #define btrfs_lockdep_acquire(owner, lock) \ 122 rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_) 123 124 /* 125 * Used after signaling the condition for a wait event to release the lockdep 126 * map held by a reader thread. 127 */ 128 #define btrfs_lockdep_release(owner, lock) \ 129 rwsem_release(&owner->lock##_map, _THIS_IP_) 130 131 /* 132 * Macros for the transaction states wait events, similar to the generic wait 133 * event macros. 134 */ 135 #define btrfs_might_wait_for_state(owner, i) \ 136 do { \ 137 rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \ 138 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \ 139 } while (0) 140 141 #define btrfs_trans_state_lockdep_acquire(owner, i) \ 142 rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_) 143 144 #define btrfs_trans_state_lockdep_release(owner, i) \ 145 rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_) 146 147 /* Initialization of the lockdep map */ 148 #define btrfs_lockdep_init_map(owner, lock) \ 149 do { \ 150 static struct lock_class_key lock##_key; \ 151 lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \ 152 } while (0) 153 154 /* Initialization of the transaction states lockdep maps. */ 155 #define btrfs_state_lockdep_init_map(owner, lock, state) \ 156 do { \ 157 static struct lock_class_key lock##_key; \ 158 lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \ 159 &lock##_key, 0); \ 160 } while (0) 161 162 static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES, 163 "too many lock subclasses defined"); 164 165 void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest); 166 167 static inline void btrfs_tree_lock(struct extent_buffer *eb) 168 { 169 btrfs_tree_lock_nested(eb, BTRFS_NESTING_NORMAL); 170 } 171 172 void btrfs_tree_unlock(struct extent_buffer *eb); 173 174 void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest); 175 176 static inline void btrfs_tree_read_lock(struct extent_buffer *eb) 177 { 178 btrfs_tree_read_lock_nested(eb, BTRFS_NESTING_NORMAL); 179 } 180 181 void btrfs_tree_read_unlock(struct extent_buffer *eb); 182 int btrfs_try_tree_read_lock(struct extent_buffer *eb); 183 int btrfs_try_tree_write_lock(struct extent_buffer *eb); 184 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); 185 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root); 186 struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root); 187 188 #ifdef CONFIG_BTRFS_DEBUG 189 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) 190 { 191 lockdep_assert_held_write(&eb->lock); 192 } 193 #else 194 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { } 195 #endif 196 197 void btrfs_unlock_up_safe(struct btrfs_path *path, int level); 198 199 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) 200 { 201 if (rw == BTRFS_WRITE_LOCK) 202 btrfs_tree_unlock(eb); 203 else if (rw == BTRFS_READ_LOCK) 204 btrfs_tree_read_unlock(eb); 205 else 206 BUG(); 207 } 208 209 struct btrfs_drew_lock { 210 atomic_t readers; 211 atomic_t writers; 212 wait_queue_head_t pending_writers; 213 wait_queue_head_t pending_readers; 214 }; 215 216 void btrfs_drew_lock_init(struct btrfs_drew_lock *lock); 217 void btrfs_drew_write_lock(struct btrfs_drew_lock *lock); 218 bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock); 219 void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock); 220 void btrfs_drew_read_lock(struct btrfs_drew_lock *lock); 221 void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock); 222 223 #ifdef CONFIG_DEBUG_LOCK_ALLOC 224 void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level); 225 void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb); 226 #else 227 static inline void btrfs_set_buffer_lockdep_class(u64 objectid, 228 struct extent_buffer *eb, int level) 229 { 230 } 231 static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, 232 struct extent_buffer *eb) 233 { 234 } 235 #endif 236 237 #endif 238
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.