~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/bcachefs/btree_locking.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _BCACHEFS_BTREE_LOCKING_H
  3 #define _BCACHEFS_BTREE_LOCKING_H
  4 
  5 /*
  6  * Only for internal btree use:
  7  *
  8  * The btree iterator tracks what locks it wants to take, and what locks it
  9  * currently has - here we have wrappers for locking/unlocking btree nodes and
 10  * updating the iterator state
 11  */
 12 
 13 #include "btree_iter.h"
 14 #include "six.h"
 15 
 16 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
 17 
 18 void bch2_trans_unlock_noassert(struct btree_trans *);
 19 
 20 static inline bool is_btree_node(struct btree_path *path, unsigned l)
 21 {
 22         return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
 23 }
 24 
 25 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
 26 {
 27         return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
 28                 ? &trans->c->btree_transaction_stats[trans->fn_idx]
 29                 : NULL;
 30 }
 31 
 32 /* matches six lock types */
 33 enum btree_node_locked_type {
 34         BTREE_NODE_UNLOCKED             = -1,
 35         BTREE_NODE_READ_LOCKED          = SIX_LOCK_read,
 36         BTREE_NODE_INTENT_LOCKED        = SIX_LOCK_intent,
 37         BTREE_NODE_WRITE_LOCKED         = SIX_LOCK_write,
 38 };
 39 
 40 static inline int btree_node_locked_type(struct btree_path *path,
 41                                          unsigned level)
 42 {
 43         return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
 44 }
 45 
 46 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
 47 {
 48         return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
 49 }
 50 
 51 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
 52 {
 53         return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
 54 }
 55 
 56 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
 57 {
 58         return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
 59 }
 60 
 61 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
 62 {
 63         return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
 64 }
 65 
 66 static inline void mark_btree_node_locked_noreset(struct btree_path *path,
 67                                                   unsigned level,
 68                                                   enum btree_node_locked_type type)
 69 {
 70         /* relying on this to avoid a branch */
 71         BUILD_BUG_ON(SIX_LOCK_read   != 0);
 72         BUILD_BUG_ON(SIX_LOCK_intent != 1);
 73 
 74         path->nodes_locked &= ~(3U << (level << 1));
 75         path->nodes_locked |= (type + 1) << (level << 1);
 76 }
 77 
 78 static inline void mark_btree_node_unlocked(struct btree_path *path,
 79                                             unsigned level)
 80 {
 81         EBUG_ON(btree_node_write_locked(path, level));
 82         mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
 83 }
 84 
 85 static inline void mark_btree_node_locked(struct btree_trans *trans,
 86                                           struct btree_path *path,
 87                                           unsigned level,
 88                                           enum btree_node_locked_type type)
 89 {
 90         mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
 91 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
 92         path->l[level].lock_taken_time = local_clock();
 93 #endif
 94 }
 95 
 96 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
 97 {
 98         return level < path->locks_want
 99                 ? SIX_LOCK_intent
100                 : SIX_LOCK_read;
101 }
102 
103 static inline enum btree_node_locked_type
104 btree_lock_want(struct btree_path *path, int level)
105 {
106         if (level < path->level)
107                 return BTREE_NODE_UNLOCKED;
108         if (level < path->locks_want)
109                 return BTREE_NODE_INTENT_LOCKED;
110         if (level == path->level)
111                 return BTREE_NODE_READ_LOCKED;
112         return BTREE_NODE_UNLOCKED;
113 }
114 
115 static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
116                                               struct btree_path *path, unsigned level)
117 {
118 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
119         __bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times,
120                                  path->l[level].lock_taken_time,
121                                  local_clock());
122 #endif
123 }
124 
125 /* unlock: */
126 
127 static inline void btree_node_unlock(struct btree_trans *trans,
128                                      struct btree_path *path, unsigned level)
129 {
130         int lock_type = btree_node_locked_type(path, level);
131 
132         EBUG_ON(level >= BTREE_MAX_DEPTH);
133         EBUG_ON(lock_type == BTREE_NODE_WRITE_LOCKED);
134 
135         if (lock_type != BTREE_NODE_UNLOCKED) {
136                 six_unlock_type(&path->l[level].b->c.lock, lock_type);
137                 btree_trans_lock_hold_time_update(trans, path, level);
138         }
139         mark_btree_node_unlocked(path, level);
140 }
141 
142 static inline int btree_path_lowest_level_locked(struct btree_path *path)
143 {
144         return __ffs(path->nodes_locked) >> 1;
145 }
146 
147 static inline int btree_path_highest_level_locked(struct btree_path *path)
148 {
149         return __fls(path->nodes_locked) >> 1;
150 }
151 
152 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
153                                             struct btree_path *path)
154 {
155         btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
156 
157         while (path->nodes_locked)
158                 btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
159 }
160 
161 /*
162  * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
163  * succeed:
164  */
165 static inline void
166 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
167                                      struct btree *b)
168 {
169         struct btree_path *linked;
170         unsigned i;
171 
172         EBUG_ON(path->l[b->c.level].b != b);
173         EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
174         EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
175 
176         mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
177 
178         trans_for_each_path_with_node(trans, b, linked, i)
179                 linked->l[b->c.level].lock_seq++;
180 
181         six_unlock_write(&b->c.lock);
182 }
183 
184 void bch2_btree_node_unlock_write(struct btree_trans *,
185                         struct btree_path *, struct btree *);
186 
187 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
188 
189 /* lock: */
190 
191 static inline void trans_set_locked(struct btree_trans *trans)
192 {
193         if (!trans->locked) {
194                 lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_);
195                 trans->locked = true;
196                 trans->last_unlock_ip = 0;
197 
198                 trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0;
199                 current->flags |= PF_MEMALLOC_NOFS;
200         }
201 }
202 
203 static inline void trans_set_unlocked(struct btree_trans *trans)
204 {
205         if (trans->locked) {
206                 lock_release(&trans->dep_map, _THIS_IP_);
207                 trans->locked = false;
208                 trans->last_unlock_ip = _RET_IP_;
209 
210                 if (!trans->pf_memalloc_nofs)
211                         current->flags &= ~PF_MEMALLOC_NOFS;
212         }
213 }
214 
215 static inline int __btree_node_lock_nopath(struct btree_trans *trans,
216                                          struct btree_bkey_cached_common *b,
217                                          enum six_lock_type type,
218                                          bool lock_may_not_fail,
219                                          unsigned long ip)
220 {
221         int ret;
222 
223         trans->lock_may_not_fail = lock_may_not_fail;
224         trans->lock_must_abort  = false;
225         trans->locking          = b;
226 
227         ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
228                                  bch2_six_check_for_deadlock, trans, ip);
229         WRITE_ONCE(trans->locking, NULL);
230         WRITE_ONCE(trans->locking_wait.start_time, 0);
231         return ret;
232 }
233 
234 static inline int __must_check
235 btree_node_lock_nopath(struct btree_trans *trans,
236                        struct btree_bkey_cached_common *b,
237                        enum six_lock_type type,
238                        unsigned long ip)
239 {
240         return __btree_node_lock_nopath(trans, b, type, false, ip);
241 }
242 
243 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
244                                          struct btree_bkey_cached_common *b,
245                                          enum six_lock_type type)
246 {
247         int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
248 
249         BUG_ON(ret);
250 }
251 
252 /*
253  * Lock a btree node if we already have it locked on one of our linked
254  * iterators:
255  */
256 static inline bool btree_node_lock_increment(struct btree_trans *trans,
257                                              struct btree_bkey_cached_common *b,
258                                              unsigned level,
259                                              enum btree_node_locked_type want)
260 {
261         struct btree_path *path;
262         unsigned i;
263 
264         trans_for_each_path(trans, path, i)
265                 if (&path->l[level].b->c == b &&
266                     btree_node_locked_type(path, level) >= want) {
267                         six_lock_increment(&b->lock, (enum six_lock_type) want);
268                         return true;
269                 }
270 
271         return false;
272 }
273 
274 static inline int btree_node_lock(struct btree_trans *trans,
275                         struct btree_path *path,
276                         struct btree_bkey_cached_common *b,
277                         unsigned level,
278                         enum six_lock_type type,
279                         unsigned long ip)
280 {
281         int ret = 0;
282 
283         EBUG_ON(level >= BTREE_MAX_DEPTH);
284 
285         if (likely(six_trylock_type(&b->lock, type)) ||
286             btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
287             !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
288 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
289                 path->l[b->level].lock_taken_time = local_clock();
290 #endif
291         }
292 
293         return ret;
294 }
295 
296 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
297                                  struct btree_bkey_cached_common *b, bool);
298 
299 static inline int __btree_node_lock_write(struct btree_trans *trans,
300                                           struct btree_path *path,
301                                           struct btree_bkey_cached_common *b,
302                                           bool lock_may_not_fail)
303 {
304         EBUG_ON(&path->l[b->level].b->c != b);
305         EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
306         EBUG_ON(!btree_node_intent_locked(path, b->level));
307 
308         /*
309          * six locks are unfair, and read locks block while a thread wants a
310          * write lock: thus, we need to tell the cycle detector we have a write
311          * lock _before_ taking the lock:
312          */
313         mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
314 
315         return likely(six_trylock_write(&b->lock))
316                 ? 0
317                 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
318 }
319 
320 static inline int __must_check
321 bch2_btree_node_lock_write(struct btree_trans *trans,
322                            struct btree_path *path,
323                            struct btree_bkey_cached_common *b)
324 {
325         return __btree_node_lock_write(trans, path, b, false);
326 }
327 
328 void bch2_btree_node_lock_write_nofail(struct btree_trans *,
329                                        struct btree_path *,
330                                        struct btree_bkey_cached_common *);
331 
332 /* relock: */
333 
334 bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
335 int __bch2_btree_path_relock(struct btree_trans *,
336                              struct btree_path *, unsigned long);
337 
338 static inline int bch2_btree_path_relock(struct btree_trans *trans,
339                                 struct btree_path *path, unsigned long trace_ip)
340 {
341         return btree_node_locked(path, path->level)
342                 ? 0
343                 : __bch2_btree_path_relock(trans, path, trace_ip);
344 }
345 
346 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
347 
348 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
349                                           struct btree_path *path, unsigned level)
350 {
351         EBUG_ON(btree_node_locked(path, level) &&
352                 !btree_node_write_locked(path, level) &&
353                 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
354 
355         return likely(btree_node_locked(path, level)) ||
356                 (!IS_ERR_OR_NULL(path->l[level].b) &&
357                  __bch2_btree_node_relock(trans, path, level, true));
358 }
359 
360 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
361                                                   struct btree_path *path, unsigned level)
362 {
363         EBUG_ON(btree_node_locked(path, level) &&
364                 !btree_node_write_locked(path, level) &&
365                 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
366 
367         return likely(btree_node_locked(path, level)) ||
368                 (!IS_ERR_OR_NULL(path->l[level].b) &&
369                  __bch2_btree_node_relock(trans, path, level, false));
370 }
371 
372 /* upgrade */
373 
374 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
375                                struct btree_path *, unsigned,
376                                struct get_locks_fail *);
377 
378 bool __bch2_btree_path_upgrade(struct btree_trans *,
379                                struct btree_path *, unsigned,
380                                struct get_locks_fail *);
381 
382 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
383                                           struct btree_path *path,
384                                           unsigned new_locks_want)
385 {
386         struct get_locks_fail f = {};
387         unsigned old_locks_want = path->locks_want;
388 
389         new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
390 
391         if (path->locks_want < new_locks_want
392             ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
393             : path->nodes_locked)
394                 return 0;
395 
396         trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
397                         old_locks_want, new_locks_want, &f);
398         return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
399 }
400 
401 /* misc: */
402 
403 static inline void btree_path_set_should_be_locked(struct btree_path *path)
404 {
405         EBUG_ON(!btree_node_locked(path, path->level));
406         EBUG_ON(path->uptodate);
407 
408         path->should_be_locked = true;
409 }
410 
411 static inline void __btree_path_set_level_up(struct btree_trans *trans,
412                                       struct btree_path *path,
413                                       unsigned l)
414 {
415         btree_node_unlock(trans, path, l);
416         path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
417 }
418 
419 static inline void btree_path_set_level_up(struct btree_trans *trans,
420                                     struct btree_path *path)
421 {
422         __btree_path_set_level_up(trans, path, path->level++);
423         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
424 }
425 
426 /* debug */
427 
428 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
429                                 struct btree_path *,
430                                 struct btree_bkey_cached_common *b,
431                                 unsigned);
432 
433 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
434 
435 #ifdef CONFIG_BCACHEFS_DEBUG
436 void bch2_btree_path_verify_locks(struct btree_path *);
437 void bch2_trans_verify_locks(struct btree_trans *);
438 #else
439 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
440 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
441 #endif
442 
443 #endif /* _BCACHEFS_BTREE_LOCKING_H */
444 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php