1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 #include "bcachefs.h" 3 #include "bcachefs.h" 4 #include "bkey_methods.h" 4 #include "bkey_methods.h" 5 #include "bkey_buf.h" 5 #include "bkey_buf.h" 6 #include "btree_cache.h" 6 #include "btree_cache.h" 7 #include "btree_iter.h" 7 #include "btree_iter.h" 8 #include "btree_journal_iter.h" 8 #include "btree_journal_iter.h" 9 #include "btree_key_cache.h" 9 #include "btree_key_cache.h" 10 #include "btree_locking.h" 10 #include "btree_locking.h" 11 #include "btree_update.h" 11 #include "btree_update.h" 12 #include "debug.h" 12 #include "debug.h" 13 #include "error.h" 13 #include "error.h" 14 #include "extents.h" 14 #include "extents.h" 15 #include "journal.h" 15 #include "journal.h" 16 #include "journal_io.h" 16 #include "journal_io.h" 17 #include "replicas.h" 17 #include "replicas.h" 18 #include "snapshot.h" 18 #include "snapshot.h" 19 #include "trace.h" 19 #include "trace.h" 20 20 21 #include <linux/random.h> 21 #include <linux/random.h> 22 #include <linux/prefetch.h> 22 #include <linux/prefetch.h> 23 23 24 static inline void btree_path_list_remove(stru 24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *); 25 static inline void btree_path_list_add(struct 25 static inline void btree_path_list_add(struct btree_trans *, 26 btree_path_idx_t, btre 26 btree_path_idx_t, btree_path_idx_t); 27 27 28 static inline unsigned long btree_iter_ip_allo 28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) 29 { 29 { 30 #ifdef TRACK_PATH_ALLOCATED 30 #ifdef TRACK_PATH_ALLOCATED 31 return iter->ip_allocated; 31 return iter->ip_allocated; 32 #else 32 #else 33 return 0; 33 return 0; 34 #endif 34 #endif 35 } 35 } 36 36 37 static btree_path_idx_t btree_path_alloc(struc 37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t); 38 static void bch2_trans_srcu_lock(struct btree_ 38 static void bch2_trans_srcu_lock(struct btree_trans *); 39 39 40 static inline int __btree_path_cmp(const struc 40 static inline int __btree_path_cmp(const struct btree_path *l, 41 enum btree_ 41 enum btree_id r_btree_id, 42 bool 42 bool r_cached, 43 struct bpos 43 struct bpos r_pos, 44 unsigned 44 unsigned r_level) 45 { 45 { 46 /* 46 /* 47 * Must match lock ordering as defined 47 * Must match lock ordering as defined by __bch2_btree_node_lock: 48 */ 48 */ 49 return cmp_int(l->btree_id, r_btre 49 return cmp_int(l->btree_id, r_btree_id) ?: 50 cmp_int((int) l->cached, 50 cmp_int((int) l->cached, (int) r_cached) ?: 51 bpos_cmp(l->pos, r_pos) 51 bpos_cmp(l->pos, r_pos) ?: 52 -cmp_int(l->level, r_leve 52 -cmp_int(l->level, r_level); 53 } 53 } 54 54 55 static inline int btree_path_cmp(const struct 55 static inline int btree_path_cmp(const struct btree_path *l, 56 const struct 56 const struct btree_path *r) 57 { 57 { 58 return __btree_path_cmp(l, r->btree_id 58 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level); 59 } 59 } 60 60 61 static inline struct bpos bkey_successor(struc 61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p) 62 { 62 { 63 /* Are we iterating over keys in all s 63 /* Are we iterating over keys in all snapshots? */ 64 if (iter->flags & BTREE_ITER_all_snaps 64 if (iter->flags & BTREE_ITER_all_snapshots) { 65 p = bpos_successor(p); 65 p = bpos_successor(p); 66 } else { 66 } else { 67 p = bpos_nosnap_successor(p); 67 p = bpos_nosnap_successor(p); 68 p.snapshot = iter->snapshot; 68 p.snapshot = iter->snapshot; 69 } 69 } 70 70 71 return p; 71 return p; 72 } 72 } 73 73 74 static inline struct bpos bkey_predecessor(str 74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p) 75 { 75 { 76 /* Are we iterating over keys in all s 76 /* Are we iterating over keys in all snapshots? */ 77 if (iter->flags & BTREE_ITER_all_snaps 77 if (iter->flags & BTREE_ITER_all_snapshots) { 78 p = bpos_predecessor(p); 78 p = bpos_predecessor(p); 79 } else { 79 } else { 80 p = bpos_nosnap_predecessor(p) 80 p = bpos_nosnap_predecessor(p); 81 p.snapshot = iter->snapshot; 81 p.snapshot = iter->snapshot; 82 } 82 } 83 83 84 return p; 84 return p; 85 } 85 } 86 86 87 static inline struct bpos btree_iter_search_ke 87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter) 88 { 88 { 89 struct bpos pos = iter->pos; 89 struct bpos pos = iter->pos; 90 90 91 if ((iter->flags & BTREE_ITER_is_exten 91 if ((iter->flags & BTREE_ITER_is_extents) && 92 !bkey_eq(pos, POS_MAX)) 92 !bkey_eq(pos, POS_MAX)) 93 pos = bkey_successor(iter, pos 93 pos = bkey_successor(iter, pos); 94 return pos; 94 return pos; 95 } 95 } 96 96 97 static inline bool btree_path_pos_before_node( 97 static inline bool btree_path_pos_before_node(struct btree_path *path, 98 98 struct btree *b) 99 { 99 { 100 return bpos_lt(path->pos, b->data->min 100 return bpos_lt(path->pos, b->data->min_key); 101 } 101 } 102 102 103 static inline bool btree_path_pos_after_node(s 103 static inline bool btree_path_pos_after_node(struct btree_path *path, 104 s 104 struct btree *b) 105 { 105 { 106 return bpos_gt(path->pos, b->key.k.p); 106 return bpos_gt(path->pos, b->key.k.p); 107 } 107 } 108 108 109 static inline bool btree_path_pos_in_node(stru 109 static inline bool btree_path_pos_in_node(struct btree_path *path, 110 stru 110 struct btree *b) 111 { 111 { 112 return path->btree_id == b->c.btree_id 112 return path->btree_id == b->c.btree_id && 113 !btree_path_pos_before_node(pa 113 !btree_path_pos_before_node(path, b) && 114 !btree_path_pos_after_node(pat 114 !btree_path_pos_after_node(path, b); 115 } 115 } 116 116 117 /* Btree iterator: */ 117 /* Btree iterator: */ 118 118 119 #ifdef CONFIG_BCACHEFS_DEBUG 119 #ifdef CONFIG_BCACHEFS_DEBUG 120 120 121 static void bch2_btree_path_verify_cached(stru 121 static void bch2_btree_path_verify_cached(struct btree_trans *trans, 122 stru 122 struct btree_path *path) 123 { 123 { 124 struct bkey_cached *ck; 124 struct bkey_cached *ck; 125 bool locked = btree_node_locked(path, 125 bool locked = btree_node_locked(path, 0); 126 126 127 if (!bch2_btree_node_relock(trans, pat 127 if (!bch2_btree_node_relock(trans, path, 0)) 128 return; 128 return; 129 129 130 ck = (void *) path->l[0].b; 130 ck = (void *) path->l[0].b; 131 BUG_ON(ck->key.btree_id != path->btree 131 BUG_ON(ck->key.btree_id != path->btree_id || 132 !bkey_eq(ck->key.pos, path->pos 132 !bkey_eq(ck->key.pos, path->pos)); 133 133 134 if (!locked) 134 if (!locked) 135 btree_node_unlock(trans, path, 135 btree_node_unlock(trans, path, 0); 136 } 136 } 137 137 138 static void bch2_btree_path_verify_level(struc 138 static void bch2_btree_path_verify_level(struct btree_trans *trans, 139 struct btree_p 139 struct btree_path *path, unsigned level) 140 { 140 { 141 struct btree_path_level *l; 141 struct btree_path_level *l; 142 struct btree_node_iter tmp; 142 struct btree_node_iter tmp; 143 bool locked; 143 bool locked; 144 struct bkey_packed *p, *k; 144 struct bkey_packed *p, *k; 145 struct printbuf buf1 = PRINTBUF; 145 struct printbuf buf1 = PRINTBUF; 146 struct printbuf buf2 = PRINTBUF; 146 struct printbuf buf2 = PRINTBUF; 147 struct printbuf buf3 = PRINTBUF; 147 struct printbuf buf3 = PRINTBUF; 148 const char *msg; 148 const char *msg; 149 149 150 if (!bch2_debug_check_iterators) 150 if (!bch2_debug_check_iterators) 151 return; 151 return; 152 152 153 l = &path->l[level]; 153 l = &path->l[level]; 154 tmp = l->iter; 154 tmp = l->iter; 155 locked = btree_node_locked(path, leve 155 locked = btree_node_locked(path, level); 156 156 157 if (path->cached) { 157 if (path->cached) { 158 if (!level) 158 if (!level) 159 bch2_btree_path_verify 159 bch2_btree_path_verify_cached(trans, path); 160 return; 160 return; 161 } 161 } 162 162 163 if (!btree_path_node(path, level)) 163 if (!btree_path_node(path, level)) 164 return; 164 return; 165 165 166 if (!bch2_btree_node_relock_notrace(tr 166 if (!bch2_btree_node_relock_notrace(trans, path, level)) 167 return; 167 return; 168 168 169 BUG_ON(!btree_path_pos_in_node(path, l 169 BUG_ON(!btree_path_pos_in_node(path, l->b)); 170 170 171 bch2_btree_node_iter_verify(&l->iter, 171 bch2_btree_node_iter_verify(&l->iter, l->b); 172 172 173 /* 173 /* 174 * For interior nodes, the iterator wi 174 * For interior nodes, the iterator will have skipped past deleted keys: 175 */ 175 */ 176 p = level 176 p = level 177 ? bch2_btree_node_iter_prev(&t 177 ? bch2_btree_node_iter_prev(&tmp, l->b) 178 : bch2_btree_node_iter_prev_al 178 : bch2_btree_node_iter_prev_all(&tmp, l->b); 179 k = bch2_btree_node_iter_peek_all(&l-> 179 k = bch2_btree_node_iter_peek_all(&l->iter, l->b); 180 180 181 if (p && bkey_iter_pos_cmp(l->b, p, &p 181 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) { 182 msg = "before"; 182 msg = "before"; 183 goto err; 183 goto err; 184 } 184 } 185 185 186 if (k && bkey_iter_pos_cmp(l->b, k, &p 186 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { 187 msg = "after"; 187 msg = "after"; 188 goto err; 188 goto err; 189 } 189 } 190 190 191 if (!locked) 191 if (!locked) 192 btree_node_unlock(trans, path, 192 btree_node_unlock(trans, path, level); 193 return; 193 return; 194 err: 194 err: 195 bch2_bpos_to_text(&buf1, path->pos); 195 bch2_bpos_to_text(&buf1, path->pos); 196 196 197 if (p) { 197 if (p) { 198 struct bkey uk = bkey_unpack_k 198 struct bkey uk = bkey_unpack_key(l->b, p); 199 199 200 bch2_bkey_to_text(&buf2, &uk); 200 bch2_bkey_to_text(&buf2, &uk); 201 } else { 201 } else { 202 prt_printf(&buf2, "(none)"); 202 prt_printf(&buf2, "(none)"); 203 } 203 } 204 204 205 if (k) { 205 if (k) { 206 struct bkey uk = bkey_unpack_k 206 struct bkey uk = bkey_unpack_key(l->b, k); 207 207 208 bch2_bkey_to_text(&buf3, &uk); 208 bch2_bkey_to_text(&buf3, &uk); 209 } else { 209 } else { 210 prt_printf(&buf3, "(none)"); 210 prt_printf(&buf3, "(none)"); 211 } 211 } 212 212 213 panic("path should be %s key at level 213 panic("path should be %s key at level %u:\n" 214 "path pos %s\n" 214 "path pos %s\n" 215 "prev key %s\n" 215 "prev key %s\n" 216 "cur key %s\n", 216 "cur key %s\n", 217 msg, level, buf1.buf, buf2.buf, 217 msg, level, buf1.buf, buf2.buf, buf3.buf); 218 } 218 } 219 219 220 static void bch2_btree_path_verify(struct btre 220 static void bch2_btree_path_verify(struct btree_trans *trans, 221 struct btre 221 struct btree_path *path) 222 { 222 { 223 struct bch_fs *c = trans->c; 223 struct bch_fs *c = trans->c; 224 224 225 for (unsigned i = 0; i < (!path->cache 225 for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { 226 if (!path->l[i].b) { 226 if (!path->l[i].b) { 227 BUG_ON(!path->cached & 227 BUG_ON(!path->cached && 228 bch2_btree_id_r 228 bch2_btree_id_root(c, path->btree_id)->b->c.level > i); 229 break; 229 break; 230 } 230 } 231 231 232 bch2_btree_path_verify_level(t 232 bch2_btree_path_verify_level(trans, path, i); 233 } 233 } 234 234 235 bch2_btree_path_verify_locks(path); 235 bch2_btree_path_verify_locks(path); 236 } 236 } 237 237 238 void bch2_trans_verify_paths(struct btree_tran 238 void bch2_trans_verify_paths(struct btree_trans *trans) 239 { 239 { 240 struct btree_path *path; 240 struct btree_path *path; 241 unsigned iter; 241 unsigned iter; 242 242 243 trans_for_each_path(trans, path, iter) 243 trans_for_each_path(trans, path, iter) 244 bch2_btree_path_verify(trans, 244 bch2_btree_path_verify(trans, path); 245 } 245 } 246 246 247 static void bch2_btree_iter_verify(struct btre 247 static void bch2_btree_iter_verify(struct btree_iter *iter) 248 { 248 { 249 struct btree_trans *trans = iter->tran 249 struct btree_trans *trans = iter->trans; 250 250 251 BUG_ON(!!(iter->flags & BTREE_ITER_cac 251 BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached); 252 252 253 BUG_ON((iter->flags & BTREE_ITER_is_ex 253 BUG_ON((iter->flags & BTREE_ITER_is_extents) && 254 (iter->flags & BTREE_ITER_all_s 254 (iter->flags & BTREE_ITER_all_snapshots)); 255 255 256 BUG_ON(!(iter->flags & BTREE_ITER_snap 256 BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) && 257 (iter->flags & BTREE_ITER_all_s 257 (iter->flags & BTREE_ITER_all_snapshots) && 258 !btree_type_has_snapshot_field( 258 !btree_type_has_snapshot_field(iter->btree_id)); 259 259 260 if (iter->update_path) 260 if (iter->update_path) 261 bch2_btree_path_verify(trans, 261 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]); 262 bch2_btree_path_verify(trans, btree_it 262 bch2_btree_path_verify(trans, btree_iter_path(trans, iter)); 263 } 263 } 264 264 265 static void bch2_btree_iter_verify_entry_exit( 265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) 266 { 266 { 267 BUG_ON((iter->flags & BTREE_ITER_filte 267 BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && 268 !iter->pos.snapshot); 268 !iter->pos.snapshot); 269 269 270 BUG_ON(!(iter->flags & BTREE_ITER_all_ 270 BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) && 271 iter->pos.snapshot != iter->sna 271 iter->pos.snapshot != iter->snapshot); 272 272 273 BUG_ON(bkey_lt(iter->pos, bkey_start_p 273 BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || 274 bkey_gt(iter->pos, iter->k.p)); 274 bkey_gt(iter->pos, iter->k.p)); 275 } 275 } 276 276 277 static int bch2_btree_iter_verify_ret(struct b 277 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) 278 { 278 { 279 struct btree_trans *trans = iter->tran 279 struct btree_trans *trans = iter->trans; 280 struct btree_iter copy; 280 struct btree_iter copy; 281 struct bkey_s_c prev; 281 struct bkey_s_c prev; 282 int ret = 0; 282 int ret = 0; 283 283 284 if (!bch2_debug_check_iterators) 284 if (!bch2_debug_check_iterators) 285 return 0; 285 return 0; 286 286 287 if (!(iter->flags & BTREE_ITER_filter_ 287 if (!(iter->flags & BTREE_ITER_filter_snapshots)) 288 return 0; 288 return 0; 289 289 290 if (bkey_err(k) || !k.k) 290 if (bkey_err(k) || !k.k) 291 return 0; 291 return 0; 292 292 293 BUG_ON(!bch2_snapshot_is_ancestor(tran 293 BUG_ON(!bch2_snapshot_is_ancestor(trans->c, 294 iter 294 iter->snapshot, 295 k.k- 295 k.k->p.snapshot)); 296 296 297 bch2_trans_iter_init(trans, ©, ite 297 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos, 298 BTREE_ITER_nopres 298 BTREE_ITER_nopreserve| 299 BTREE_ITER_all_sn 299 BTREE_ITER_all_snapshots); 300 prev = bch2_btree_iter_prev(©); 300 prev = bch2_btree_iter_prev(©); 301 if (!prev.k) 301 if (!prev.k) 302 goto out; 302 goto out; 303 303 304 ret = bkey_err(prev); 304 ret = bkey_err(prev); 305 if (ret) 305 if (ret) 306 goto out; 306 goto out; 307 307 308 if (bkey_eq(prev.k->p, k.k->p) && 308 if (bkey_eq(prev.k->p, k.k->p) && 309 bch2_snapshot_is_ancestor(trans->c 309 bch2_snapshot_is_ancestor(trans->c, iter->snapshot, 310 prev.k-> 310 prev.k->p.snapshot) > 0) { 311 struct printbuf buf1 = PRINTBU 311 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; 312 312 313 bch2_bkey_to_text(&buf1, k.k); 313 bch2_bkey_to_text(&buf1, k.k); 314 bch2_bkey_to_text(&buf2, prev. 314 bch2_bkey_to_text(&buf2, prev.k); 315 315 316 panic("iter snap %u\n" 316 panic("iter snap %u\n" 317 "k %s\n" 317 "k %s\n" 318 "prev %s\n", 318 "prev %s\n", 319 iter->snapshot, 319 iter->snapshot, 320 buf1.buf, buf2.buf); 320 buf1.buf, buf2.buf); 321 } 321 } 322 out: 322 out: 323 bch2_trans_iter_exit(trans, ©); 323 bch2_trans_iter_exit(trans, ©); 324 return ret; 324 return ret; 325 } 325 } 326 326 327 void bch2_assert_pos_locked(struct btree_trans 327 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, 328 struct bpos pos) !! 328 struct bpos pos, bool key_cache) 329 { 329 { 330 bch2_trans_verify_not_unlocked(trans); 330 bch2_trans_verify_not_unlocked(trans); 331 331 332 struct btree_path *path; 332 struct btree_path *path; 333 struct trans_for_each_path_inorder_ite 333 struct trans_for_each_path_inorder_iter iter; 334 struct printbuf buf = PRINTBUF; 334 struct printbuf buf = PRINTBUF; 335 335 336 btree_trans_sort_paths(trans); 336 btree_trans_sort_paths(trans); 337 337 338 trans_for_each_path_inorder(trans, pat 338 trans_for_each_path_inorder(trans, path, iter) { 339 if (path->btree_id != id || !! 339 int cmp = cmp_int(path->btree_id, id) ?: 340 !btree_node_locked(path, 0 !! 340 cmp_int(path->cached, key_cache); >> 341 >> 342 if (cmp > 0) >> 343 break; >> 344 if (cmp < 0) >> 345 continue; >> 346 >> 347 if (!btree_node_locked(path, 0) || 341 !path->should_be_locked) 348 !path->should_be_locked) 342 continue; 349 continue; 343 350 344 if (!path->cached) { !! 351 if (!key_cache) { 345 if (bkey_ge(pos, path- 352 if (bkey_ge(pos, path->l[0].b->data->min_key) && 346 bkey_le(pos, path- 353 bkey_le(pos, path->l[0].b->key.k.p)) 347 return; 354 return; 348 } else { 355 } else { 349 if (bkey_eq(pos, path- 356 if (bkey_eq(pos, path->pos)) 350 return; 357 return; 351 } 358 } 352 } 359 } 353 360 354 bch2_dump_trans_paths_updates(trans); 361 bch2_dump_trans_paths_updates(trans); 355 bch2_bpos_to_text(&buf, pos); 362 bch2_bpos_to_text(&buf, pos); 356 363 357 panic("not locked: %s %s\n", bch2_btre !! 364 panic("not locked: %s %s%s\n", >> 365 bch2_btree_id_str(id), buf.buf, >> 366 key_cache ? " cached" : ""); 358 } 367 } 359 368 360 #else 369 #else 361 370 362 static inline void bch2_btree_path_verify_leve 371 static inline void bch2_btree_path_verify_level(struct btree_trans *trans, 363 372 struct btree_path *path, unsigned l) {} 364 static inline void bch2_btree_path_verify(stru 373 static inline void bch2_btree_path_verify(struct btree_trans *trans, 365 stru 374 struct btree_path *path) {} 366 static inline void bch2_btree_iter_verify(stru 375 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {} 367 static inline void bch2_btree_iter_verify_entr 376 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {} 368 static inline int bch2_btree_iter_verify_ret(s 377 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; } 369 378 370 #endif 379 #endif 371 380 372 /* Btree path: fixups after btree updates */ 381 /* Btree path: fixups after btree updates */ 373 382 374 static void btree_node_iter_set_set_pos(struct 383 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter, 375 struct 384 struct btree *b, 376 struct 385 struct bset_tree *t, 377 struct 386 struct bkey_packed *k) 378 { 387 { 379 struct btree_node_iter_set *set; 388 struct btree_node_iter_set *set; 380 389 381 btree_node_iter_for_each(iter, set) 390 btree_node_iter_for_each(iter, set) 382 if (set->end == t->end_offset) 391 if (set->end == t->end_offset) { 383 set->k = __btree_node_ 392 set->k = __btree_node_key_to_offset(b, k); 384 bch2_btree_node_iter_s 393 bch2_btree_node_iter_sort(iter, b); 385 return; 394 return; 386 } 395 } 387 396 388 bch2_btree_node_iter_push(iter, b, k, 397 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t)); 389 } 398 } 390 399 391 static void __bch2_btree_path_fix_key_modified 400 static void __bch2_btree_path_fix_key_modified(struct btree_path *path, 392 401 struct btree *b, 393 402 struct bkey_packed *where) 394 { 403 { 395 struct btree_path_level *l = &path->l[ 404 struct btree_path_level *l = &path->l[b->c.level]; 396 405 397 if (where != bch2_btree_node_iter_peek 406 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b)) 398 return; 407 return; 399 408 400 if (bkey_iter_pos_cmp(l->b, where, &pa 409 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0) 401 bch2_btree_node_iter_advance(& 410 bch2_btree_node_iter_advance(&l->iter, l->b); 402 } 411 } 403 412 404 void bch2_btree_path_fix_key_modified(struct b 413 void bch2_btree_path_fix_key_modified(struct btree_trans *trans, 405 struct b 414 struct btree *b, 406 struct b 415 struct bkey_packed *where) 407 { 416 { 408 struct btree_path *path; 417 struct btree_path *path; 409 unsigned i; 418 unsigned i; 410 419 411 trans_for_each_path_with_node(trans, b 420 trans_for_each_path_with_node(trans, b, path, i) { 412 __bch2_btree_path_fix_key_modi 421 __bch2_btree_path_fix_key_modified(path, b, where); 413 bch2_btree_path_verify_level(t 422 bch2_btree_path_verify_level(trans, path, b->c.level); 414 } 423 } 415 } 424 } 416 425 417 static void __bch2_btree_node_iter_fix(struct 426 static void __bch2_btree_node_iter_fix(struct btree_path *path, 418 struct 427 struct btree *b, 419 struct 428 struct btree_node_iter *node_iter, 420 struct 429 struct bset_tree *t, 421 struct 430 struct bkey_packed *where, 422 unsigne 431 unsigned clobber_u64s, 423 unsigne 432 unsigned new_u64s) 424 { 433 { 425 const struct bkey_packed *end = btree_ 434 const struct bkey_packed *end = btree_bkey_last(b, t); 426 struct btree_node_iter_set *set; 435 struct btree_node_iter_set *set; 427 unsigned offset = __btree_node_key_to_ 436 unsigned offset = __btree_node_key_to_offset(b, where); 428 int shift = new_u64s - clobber_u64s; 437 int shift = new_u64s - clobber_u64s; 429 unsigned old_end = t->end_offset - shi 438 unsigned old_end = t->end_offset - shift; 430 unsigned orig_iter_pos = node_iter->da 439 unsigned orig_iter_pos = node_iter->data[0].k; 431 bool iter_current_key_modified = 440 bool iter_current_key_modified = 432 orig_iter_pos >= offset && 441 orig_iter_pos >= offset && 433 orig_iter_pos <= offset + clob 442 orig_iter_pos <= offset + clobber_u64s; 434 443 435 btree_node_iter_for_each(node_iter, se 444 btree_node_iter_for_each(node_iter, set) 436 if (set->end == old_end) 445 if (set->end == old_end) 437 goto found; 446 goto found; 438 447 439 /* didn't find the bset in the iterato 448 /* didn't find the bset in the iterator - might have to readd it: */ 440 if (new_u64s && 449 if (new_u64s && 441 bkey_iter_pos_cmp(b, where, &path- 450 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { 442 bch2_btree_node_iter_push(node 451 bch2_btree_node_iter_push(node_iter, b, where, end); 443 goto fixup_done; 452 goto fixup_done; 444 } else { 453 } else { 445 /* Iterator is after key that 454 /* Iterator is after key that changed */ 446 return; 455 return; 447 } 456 } 448 found: 457 found: 449 set->end = t->end_offset; 458 set->end = t->end_offset; 450 459 451 /* Iterator hasn't gotten to the key t 460 /* Iterator hasn't gotten to the key that changed yet: */ 452 if (set->k < offset) 461 if (set->k < offset) 453 return; 462 return; 454 463 455 if (new_u64s && 464 if (new_u64s && 456 bkey_iter_pos_cmp(b, where, &path- 465 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) { 457 set->k = offset; 466 set->k = offset; 458 } else if (set->k < offset + clobber_u 467 } else if (set->k < offset + clobber_u64s) { 459 set->k = offset + new_u64s; 468 set->k = offset + new_u64s; 460 if (set->k == set->end) 469 if (set->k == set->end) 461 bch2_btree_node_iter_s 470 bch2_btree_node_iter_set_drop(node_iter, set); 462 } else { 471 } else { 463 /* Iterator is after key that 472 /* Iterator is after key that changed */ 464 set->k = (int) set->k + shift; 473 set->k = (int) set->k + shift; 465 return; 474 return; 466 } 475 } 467 476 468 bch2_btree_node_iter_sort(node_iter, b 477 bch2_btree_node_iter_sort(node_iter, b); 469 fixup_done: 478 fixup_done: 470 if (node_iter->data[0].k != orig_iter_ 479 if (node_iter->data[0].k != orig_iter_pos) 471 iter_current_key_modified = tr 480 iter_current_key_modified = true; 472 481 473 /* 482 /* 474 * When a new key is added, and the no 483 * When a new key is added, and the node iterator now points to that 475 * key, the iterator might have skippe 484 * key, the iterator might have skipped past deleted keys that should 476 * come after the key the iterator now 485 * come after the key the iterator now points to. We have to rewind to 477 * before those deleted keys - otherwi 486 * before those deleted keys - otherwise 478 * bch2_btree_node_iter_prev_all() bre 487 * bch2_btree_node_iter_prev_all() breaks: 479 */ 488 */ 480 if (!bch2_btree_node_iter_end(node_ite 489 if (!bch2_btree_node_iter_end(node_iter) && 481 iter_current_key_modified && 490 iter_current_key_modified && 482 b->c.level) { 491 b->c.level) { 483 struct bkey_packed *k, *k2, *p 492 struct bkey_packed *k, *k2, *p; 484 493 485 k = bch2_btree_node_iter_peek_ 494 k = bch2_btree_node_iter_peek_all(node_iter, b); 486 495 487 for_each_bset(b, t) { 496 for_each_bset(b, t) { 488 bool set_pos = false; 497 bool set_pos = false; 489 498 490 if (node_iter->data[0] 499 if (node_iter->data[0].end == t->end_offset) 491 continue; 500 continue; 492 501 493 k2 = bch2_btree_node_i 502 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t); 494 503 495 while ((p = bch2_bkey_ 504 while ((p = bch2_bkey_prev_all(b, t, k2)) && 496 bkey_iter_cmp(b 505 bkey_iter_cmp(b, k, p) < 0) { 497 k2 = p; 506 k2 = p; 498 set_pos = true 507 set_pos = true; 499 } 508 } 500 509 501 if (set_pos) 510 if (set_pos) 502 btree_node_ite 511 btree_node_iter_set_set_pos(node_iter, 503 512 b, t, k2); 504 } 513 } 505 } 514 } 506 } 515 } 507 516 508 void bch2_btree_node_iter_fix(struct btree_tra 517 void bch2_btree_node_iter_fix(struct btree_trans *trans, 509 struct btree_pat 518 struct btree_path *path, 510 struct btree *b, 519 struct btree *b, 511 struct btree_nod 520 struct btree_node_iter *node_iter, 512 struct bkey_pack 521 struct bkey_packed *where, 513 unsigned clobber 522 unsigned clobber_u64s, 514 unsigned new_u64 523 unsigned new_u64s) 515 { 524 { 516 struct bset_tree *t = bch2_bkey_to_bse 525 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where); 517 struct btree_path *linked; 526 struct btree_path *linked; 518 unsigned i; 527 unsigned i; 519 528 520 if (node_iter != &path->l[b->c.level]. 529 if (node_iter != &path->l[b->c.level].iter) { 521 __bch2_btree_node_iter_fix(pat 530 __bch2_btree_node_iter_fix(path, b, node_iter, t, 522 whe 531 where, clobber_u64s, new_u64s); 523 532 524 if (bch2_debug_check_iterators 533 if (bch2_debug_check_iterators) 525 bch2_btree_node_iter_v 534 bch2_btree_node_iter_verify(node_iter, b); 526 } 535 } 527 536 528 trans_for_each_path_with_node(trans, b 537 trans_for_each_path_with_node(trans, b, linked, i) { 529 __bch2_btree_node_iter_fix(lin 538 __bch2_btree_node_iter_fix(linked, b, 530 &li 539 &linked->l[b->c.level].iter, t, 531 whe 540 where, clobber_u64s, new_u64s); 532 bch2_btree_path_verify_level(t 541 bch2_btree_path_verify_level(trans, linked, b->c.level); 533 } 542 } 534 } 543 } 535 544 536 /* Btree path level: pointer to a particular b 545 /* Btree path level: pointer to a particular btree node and node iter */ 537 546 538 static inline struct bkey_s_c __btree_iter_unp 547 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c, 539 548 struct btree_path_level *l, 540 549 struct bkey *u, 541 550 struct bkey_packed *k) 542 { 551 { 543 if (unlikely(!k)) { 552 if (unlikely(!k)) { 544 /* 553 /* 545 * signal to bch2_btree_iter_p 554 * signal to bch2_btree_iter_peek_slot() that we're currently at 546 * a hole 555 * a hole 547 */ 556 */ 548 u->type = KEY_TYPE_deleted; 557 u->type = KEY_TYPE_deleted; 549 return bkey_s_c_null; 558 return bkey_s_c_null; 550 } 559 } 551 560 552 return bkey_disassemble(l->b, k, u); 561 return bkey_disassemble(l->b, k, u); 553 } 562 } 554 563 555 static inline struct bkey_s_c btree_path_level 564 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c, 556 565 struct btree_path_level *l, 557 566 struct bkey *u) 558 { 567 { 559 return __btree_iter_unpack(c, l, u, 568 return __btree_iter_unpack(c, l, u, 560 bch2_btree_node_iter_p 569 bch2_btree_node_iter_peek_all(&l->iter, l->b)); 561 } 570 } 562 571 563 static inline struct bkey_s_c btree_path_level 572 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans, 564 573 struct btree_path *path, 565 574 struct btree_path_level *l, 566 575 struct bkey *u) 567 { 576 { 568 struct bkey_s_c k = __btree_iter_unpac 577 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u, 569 bch2_btree_node_iter_p 578 bch2_btree_node_iter_peek(&l->iter, l->b)); 570 579 571 path->pos = k.k ? k.k->p : l->b->key.k 580 path->pos = k.k ? k.k->p : l->b->key.k.p; 572 trans->paths_sorted = false; 581 trans->paths_sorted = false; 573 bch2_btree_path_verify_level(trans, pa 582 bch2_btree_path_verify_level(trans, path, l - path->l); 574 return k; 583 return k; 575 } 584 } 576 585 577 static inline struct bkey_s_c btree_path_level 586 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans, 578 587 struct btree_path *path, 579 588 struct btree_path_level *l, 580 589 struct bkey *u) 581 { 590 { 582 struct bkey_s_c k = __btree_iter_unpac 591 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u, 583 bch2_btree_node_iter_p 592 bch2_btree_node_iter_prev(&l->iter, l->b)); 584 593 585 path->pos = k.k ? k.k->p : l->b->data- 594 path->pos = k.k ? k.k->p : l->b->data->min_key; 586 trans->paths_sorted = false; 595 trans->paths_sorted = false; 587 bch2_btree_path_verify_level(trans, pa 596 bch2_btree_path_verify_level(trans, path, l - path->l); 588 return k; 597 return k; 589 } 598 } 590 599 591 static inline bool btree_path_advance_to_pos(s 600 static inline bool btree_path_advance_to_pos(struct btree_path *path, 592 s 601 struct btree_path_level *l, 593 i 602 int max_advance) 594 { 603 { 595 struct bkey_packed *k; 604 struct bkey_packed *k; 596 int nr_advanced = 0; 605 int nr_advanced = 0; 597 606 598 while ((k = bch2_btree_node_iter_peek_ 607 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) && 599 bkey_iter_pos_cmp(l->b, k, &pat 608 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) { 600 if (max_advance > 0 && nr_adva 609 if (max_advance > 0 && nr_advanced >= max_advance) 601 return false; 610 return false; 602 611 603 bch2_btree_node_iter_advance(& 612 bch2_btree_node_iter_advance(&l->iter, l->b); 604 nr_advanced++; 613 nr_advanced++; 605 } 614 } 606 615 607 return true; 616 return true; 608 } 617 } 609 618 610 static inline void __btree_path_level_init(str 619 static inline void __btree_path_level_init(struct btree_path *path, 611 uns 620 unsigned level) 612 { 621 { 613 struct btree_path_level *l = &path->l[ 622 struct btree_path_level *l = &path->l[level]; 614 623 615 bch2_btree_node_iter_init(&l->iter, l- 624 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); 616 625 617 /* 626 /* 618 * Iterators to interior nodes should 627 * Iterators to interior nodes should always be pointed at the first non 619 * whiteout: 628 * whiteout: 620 */ 629 */ 621 if (level) 630 if (level) 622 bch2_btree_node_iter_peek(&l-> 631 bch2_btree_node_iter_peek(&l->iter, l->b); 623 } 632 } 624 633 625 void bch2_btree_path_level_init(struct btree_t 634 void bch2_btree_path_level_init(struct btree_trans *trans, 626 struct btree_p 635 struct btree_path *path, 627 struct btree * 636 struct btree *b) 628 { 637 { 629 BUG_ON(path->cached); 638 BUG_ON(path->cached); 630 639 631 EBUG_ON(!btree_path_pos_in_node(path, 640 EBUG_ON(!btree_path_pos_in_node(path, b)); 632 641 633 path->l[b->c.level].lock_seq = six_loc 642 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); 634 path->l[b->c.level].b = b; 643 path->l[b->c.level].b = b; 635 __btree_path_level_init(path, b->c.lev 644 __btree_path_level_init(path, b->c.level); 636 } 645 } 637 646 638 /* Btree path: fixups after btree node updates 647 /* Btree path: fixups after btree node updates: */ 639 648 640 static void bch2_trans_revalidate_updates_in_n 649 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b) 641 { 650 { 642 struct bch_fs *c = trans->c; 651 struct bch_fs *c = trans->c; 643 652 644 trans_for_each_update(trans, i) 653 trans_for_each_update(trans, i) 645 if (!i->cached && 654 if (!i->cached && 646 i->level == b->c.level 655 i->level == b->c.level && 647 i->btree_id == b->c.btree_ 656 i->btree_id == b->c.btree_id && 648 bpos_cmp(i->k->k.p, b->dat 657 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 && 649 bpos_cmp(i->k->k.p, b->dat 658 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) { 650 i->old_v = bch2_btree_ 659 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v; 651 660 652 if (unlikely(trans->jo 661 if (unlikely(trans->journal_replay_not_finished)) { 653 struct bkey_i 662 struct bkey_i *j_k = 654 bch2_j 663 bch2_journal_keys_peek_slot(c, i->btree_id, i->level, 655 664 i->k->k.p); 656 665 657 if (j_k) { 666 if (j_k) { 658 i->old 667 i->old_k = j_k->k; 659 i->old 668 i->old_v = &j_k->v; 660 } 669 } 661 } 670 } 662 } 671 } 663 } 672 } 664 673 665 /* 674 /* 666 * A btree node is being replaced - update the 675 * A btree node is being replaced - update the iterator to point to the new 667 * node: 676 * node: 668 */ 677 */ 669 void bch2_trans_node_add(struct btree_trans *t 678 void bch2_trans_node_add(struct btree_trans *trans, 670 struct btree_path *pa 679 struct btree_path *path, 671 struct btree *b) 680 struct btree *b) 672 { 681 { 673 struct btree_path *prev; 682 struct btree_path *prev; 674 683 675 BUG_ON(!btree_path_pos_in_node(path, b 684 BUG_ON(!btree_path_pos_in_node(path, b)); 676 685 677 while ((prev = prev_btree_path(trans, 686 while ((prev = prev_btree_path(trans, path)) && 678 btree_path_pos_in_node(prev, b) 687 btree_path_pos_in_node(prev, b)) 679 path = prev; 688 path = prev; 680 689 681 for (; 690 for (; 682 path && btree_path_pos_in_node(pa 691 path && btree_path_pos_in_node(path, b); 683 path = next_btree_path(trans, pat 692 path = next_btree_path(trans, path)) 684 if (path->uptodate == BTREE_IT 693 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) { 685 enum btree_node_locked 694 enum btree_node_locked_type t = 686 btree_lock_wan 695 btree_lock_want(path, b->c.level); 687 696 688 if (t != BTREE_NODE_UN 697 if (t != BTREE_NODE_UNLOCKED) { 689 btree_node_unl 698 btree_node_unlock(trans, path, b->c.level); 690 six_lock_incre 699 six_lock_increment(&b->c.lock, (enum six_lock_type) t); 691 mark_btree_nod 700 mark_btree_node_locked(trans, path, b->c.level, t); 692 } 701 } 693 702 694 bch2_btree_path_level_ 703 bch2_btree_path_level_init(trans, path, b); 695 } 704 } 696 705 697 bch2_trans_revalidate_updates_in_node( 706 bch2_trans_revalidate_updates_in_node(trans, b); 698 } 707 } 699 708 700 /* 709 /* 701 * A btree node has been modified in such a wa 710 * A btree node has been modified in such a way as to invalidate iterators - fix 702 * them: 711 * them: 703 */ 712 */ 704 void bch2_trans_node_reinit_iter(struct btree_ 713 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b) 705 { 714 { 706 struct btree_path *path; 715 struct btree_path *path; 707 unsigned i; 716 unsigned i; 708 717 709 trans_for_each_path_with_node(trans, b 718 trans_for_each_path_with_node(trans, b, path, i) 710 __btree_path_level_init(path, 719 __btree_path_level_init(path, b->c.level); 711 720 712 bch2_trans_revalidate_updates_in_node( 721 bch2_trans_revalidate_updates_in_node(trans, b); 713 } 722 } 714 723 715 /* Btree path: traverse, set_pos: */ 724 /* Btree path: traverse, set_pos: */ 716 725 717 static inline int btree_path_lock_root(struct 726 static inline int btree_path_lock_root(struct btree_trans *trans, 718 struct 727 struct btree_path *path, 719 unsigne 728 unsigned depth_want, 720 unsigne 729 unsigned long trace_ip) 721 { 730 { 722 struct bch_fs *c = trans->c; 731 struct bch_fs *c = trans->c; 723 struct btree *b, **rootp = &bch2_btree 732 struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b; 724 enum six_lock_type lock_type; 733 enum six_lock_type lock_type; 725 unsigned i; 734 unsigned i; 726 int ret; 735 int ret; 727 736 728 EBUG_ON(path->nodes_locked); 737 EBUG_ON(path->nodes_locked); 729 738 730 while (1) { 739 while (1) { 731 b = READ_ONCE(*rootp); 740 b = READ_ONCE(*rootp); 732 path->level = READ_ONCE(b->c.l 741 path->level = READ_ONCE(b->c.level); 733 742 734 if (unlikely(path->level < dep 743 if (unlikely(path->level < depth_want)) { 735 /* 744 /* 736 * the root is at a lo 745 * the root is at a lower depth than the depth we want: 737 * got to the end of t 746 * got to the end of the btree, or we're walking nodes 738 * greater than some d 747 * greater than some depth and there are no nodes >= 739 * that depth 748 * that depth 740 */ 749 */ 741 path->level = depth_wa 750 path->level = depth_want; 742 for (i = path->level; 751 for (i = path->level; i < BTREE_MAX_DEPTH; i++) 743 path->l[i].b = 752 path->l[i].b = NULL; 744 return 1; 753 return 1; 745 } 754 } 746 755 747 lock_type = __btree_lock_want( 756 lock_type = __btree_lock_want(path, path->level); 748 ret = btree_node_lock(trans, p 757 ret = btree_node_lock(trans, path, &b->c, 749 path->le 758 path->level, lock_type, trace_ip); 750 if (unlikely(ret)) { 759 if (unlikely(ret)) { 751 if (bch2_err_matches(r 760 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed)) 752 continue; 761 continue; 753 if (bch2_err_matches(r 762 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 754 return ret; 763 return ret; 755 BUG(); 764 BUG(); 756 } 765 } 757 766 758 if (likely(b == READ_ONCE(*roo 767 if (likely(b == READ_ONCE(*rootp) && 759 b->c.level == path- 768 b->c.level == path->level && 760 !race_fault())) { 769 !race_fault())) { 761 for (i = 0; i < path-> 770 for (i = 0; i < path->level; i++) 762 path->l[i].b = 771 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root); 763 path->l[path->level].b 772 path->l[path->level].b = b; 764 for (i = path->level + 773 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++) 765 path->l[i].b = 774 path->l[i].b = NULL; 766 775 767 mark_btree_node_locked 776 mark_btree_node_locked(trans, path, path->level, 768 777 (enum btree_node_locked_type) lock_type); 769 bch2_btree_path_level_ 778 bch2_btree_path_level_init(trans, path, b); 770 return 0; 779 return 0; 771 } 780 } 772 781 773 six_unlock_type(&b->c.lock, lo 782 six_unlock_type(&b->c.lock, lock_type); 774 } 783 } 775 } 784 } 776 785 777 noinline 786 noinline 778 static int btree_path_prefetch(struct btree_tr 787 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path) 779 { 788 { 780 struct bch_fs *c = trans->c; 789 struct bch_fs *c = trans->c; 781 struct btree_path_level *l = path_l(pa 790 struct btree_path_level *l = path_l(path); 782 struct btree_node_iter node_iter = l-> 791 struct btree_node_iter node_iter = l->iter; 783 struct bkey_packed *k; 792 struct bkey_packed *k; 784 struct bkey_buf tmp; 793 struct bkey_buf tmp; 785 unsigned nr = test_bit(BCH_FS_started, 794 unsigned nr = test_bit(BCH_FS_started, &c->flags) 786 ? (path->level > 1 ? 0 : 2) 795 ? (path->level > 1 ? 0 : 2) 787 : (path->level > 1 ? 1 : 16); 796 : (path->level > 1 ? 1 : 16); 788 bool was_locked = btree_node_locked(pa 797 bool was_locked = btree_node_locked(path, path->level); 789 int ret = 0; 798 int ret = 0; 790 799 791 bch2_bkey_buf_init(&tmp); 800 bch2_bkey_buf_init(&tmp); 792 801 793 while (nr-- && !ret) { 802 while (nr-- && !ret) { 794 if (!bch2_btree_node_relock(tr 803 if (!bch2_btree_node_relock(trans, path, path->level)) 795 break; 804 break; 796 805 797 bch2_btree_node_iter_advance(& 806 bch2_btree_node_iter_advance(&node_iter, l->b); 798 k = bch2_btree_node_iter_peek( 807 k = bch2_btree_node_iter_peek(&node_iter, l->b); 799 if (!k) 808 if (!k) 800 break; 809 break; 801 810 802 bch2_bkey_buf_unpack(&tmp, c, 811 bch2_bkey_buf_unpack(&tmp, c, l->b, k); 803 ret = bch2_btree_node_prefetch 812 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, 804 813 path->level - 1); 805 } 814 } 806 815 807 if (!was_locked) 816 if (!was_locked) 808 btree_node_unlock(trans, path, 817 btree_node_unlock(trans, path, path->level); 809 818 810 bch2_bkey_buf_exit(&tmp, c); 819 bch2_bkey_buf_exit(&tmp, c); 811 return ret; 820 return ret; 812 } 821 } 813 822 814 static int btree_path_prefetch_j(struct btree_ 823 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path, 815 struct btree_ 824 struct btree_and_journal_iter *jiter) 816 { 825 { 817 struct bch_fs *c = trans->c; 826 struct bch_fs *c = trans->c; 818 struct bkey_s_c k; 827 struct bkey_s_c k; 819 struct bkey_buf tmp; 828 struct bkey_buf tmp; 820 unsigned nr = test_bit(BCH_FS_started, 829 unsigned nr = test_bit(BCH_FS_started, &c->flags) 821 ? (path->level > 1 ? 0 : 2) 830 ? (path->level > 1 ? 0 : 2) 822 : (path->level > 1 ? 1 : 16); 831 : (path->level > 1 ? 1 : 16); 823 bool was_locked = btree_node_locked(pa 832 bool was_locked = btree_node_locked(path, path->level); 824 int ret = 0; 833 int ret = 0; 825 834 826 bch2_bkey_buf_init(&tmp); 835 bch2_bkey_buf_init(&tmp); 827 836 828 while (nr-- && !ret) { 837 while (nr-- && !ret) { 829 if (!bch2_btree_node_relock(tr 838 if (!bch2_btree_node_relock(trans, path, path->level)) 830 break; 839 break; 831 840 832 bch2_btree_and_journal_iter_ad 841 bch2_btree_and_journal_iter_advance(jiter); 833 k = bch2_btree_and_journal_ite 842 k = bch2_btree_and_journal_iter_peek(jiter); 834 if (!k.k) 843 if (!k.k) 835 break; 844 break; 836 845 837 bch2_bkey_buf_reassemble(&tmp, 846 bch2_bkey_buf_reassemble(&tmp, c, k); 838 ret = bch2_btree_node_prefetch 847 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, 839 848 path->level - 1); 840 } 849 } 841 850 842 if (!was_locked) 851 if (!was_locked) 843 btree_node_unlock(trans, path, 852 btree_node_unlock(trans, path, path->level); 844 853 845 bch2_bkey_buf_exit(&tmp, c); 854 bch2_bkey_buf_exit(&tmp, c); 846 return ret; 855 return ret; 847 } 856 } 848 857 849 static noinline void btree_node_mem_ptr_set(st 858 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans, 850 st 859 struct btree_path *path, 851 un 860 unsigned plevel, struct btree *b) 852 { 861 { 853 struct btree_path_level *l = &path->l[ 862 struct btree_path_level *l = &path->l[plevel]; 854 bool locked = btree_node_locked(path, 863 bool locked = btree_node_locked(path, plevel); 855 struct bkey_packed *k; 864 struct bkey_packed *k; 856 struct bch_btree_ptr_v2 *bp; 865 struct bch_btree_ptr_v2 *bp; 857 866 858 if (!bch2_btree_node_relock(trans, pat 867 if (!bch2_btree_node_relock(trans, path, plevel)) 859 return; 868 return; 860 869 861 k = bch2_btree_node_iter_peek_all(&l-> 870 k = bch2_btree_node_iter_peek_all(&l->iter, l->b); 862 BUG_ON(k->type != KEY_TYPE_btree_ptr_v 871 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2); 863 872 864 bp = (void *) bkeyp_val(&l->b->format, 873 bp = (void *) bkeyp_val(&l->b->format, k); 865 bp->mem_ptr = (unsigned long)b; 874 bp->mem_ptr = (unsigned long)b; 866 875 867 if (!locked) 876 if (!locked) 868 btree_node_unlock(trans, path, 877 btree_node_unlock(trans, path, plevel); 869 } 878 } 870 879 871 static noinline int btree_node_iter_and_journa 880 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans, 872 881 struct btree_path *path, 873 882 unsigned flags, 874 883 struct bkey_buf *out) 875 { 884 { 876 struct bch_fs *c = trans->c; 885 struct bch_fs *c = trans->c; 877 struct btree_path_level *l = path_l(pa 886 struct btree_path_level *l = path_l(path); 878 struct btree_and_journal_iter jiter; 887 struct btree_and_journal_iter jiter; 879 struct bkey_s_c k; 888 struct bkey_s_c k; 880 int ret = 0; 889 int ret = 0; 881 890 882 __bch2_btree_and_journal_iter_init_nod 891 __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos); 883 892 884 k = bch2_btree_and_journal_iter_peek(& 893 k = bch2_btree_and_journal_iter_peek(&jiter); 885 if (!k.k) { << 886 struct printbuf buf = PRINTBUF << 887 << 888 prt_str(&buf, "node not found << 889 bch2_bpos_to_text(&buf, path-> << 890 prt_str(&buf, " at btree "); << 891 bch2_btree_pos_to_text(&buf, c << 892 << 893 ret = bch2_fs_topology_error(c << 894 printbuf_exit(&buf); << 895 goto err; << 896 } << 897 894 898 bch2_bkey_buf_reassemble(out, c, k); 895 bch2_bkey_buf_reassemble(out, c, k); 899 896 900 if ((flags & BTREE_ITER_prefetch) && 897 if ((flags & BTREE_ITER_prefetch) && 901 c->opts.btree_node_prefetch) 898 c->opts.btree_node_prefetch) 902 ret = btree_path_prefetch_j(tr 899 ret = btree_path_prefetch_j(trans, path, &jiter); 903 900 904 err: << 905 bch2_btree_and_journal_iter_exit(&jite 901 bch2_btree_and_journal_iter_exit(&jiter); 906 return ret; 902 return ret; 907 } 903 } 908 904 909 static __always_inline int btree_path_down(str 905 static __always_inline int btree_path_down(struct btree_trans *trans, 910 str 906 struct btree_path *path, 911 uns 907 unsigned flags, 912 uns 908 unsigned long trace_ip) 913 { 909 { 914 struct bch_fs *c = trans->c; 910 struct bch_fs *c = trans->c; 915 struct btree_path_level *l = path_l(pa 911 struct btree_path_level *l = path_l(path); 916 struct btree *b; 912 struct btree *b; 917 unsigned level = path->level - 1; 913 unsigned level = path->level - 1; 918 enum six_lock_type lock_type = __btree 914 enum six_lock_type lock_type = __btree_lock_want(path, level); 919 struct bkey_buf tmp; 915 struct bkey_buf tmp; 920 int ret; 916 int ret; 921 917 922 EBUG_ON(!btree_node_locked(path, path- 918 EBUG_ON(!btree_node_locked(path, path->level)); 923 919 924 bch2_bkey_buf_init(&tmp); 920 bch2_bkey_buf_init(&tmp); 925 921 926 if (unlikely(trans->journal_replay_not 922 if (unlikely(trans->journal_replay_not_finished)) { 927 ret = btree_node_iter_and_jour 923 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp); 928 if (ret) 924 if (ret) 929 goto err; 925 goto err; 930 } else { 926 } else { 931 struct bkey_packed *k = bch2_b 927 struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b); 932 if (!k) { 928 if (!k) { 933 struct printbuf buf = 929 struct printbuf buf = PRINTBUF; 934 930 935 prt_str(&buf, "node no 931 prt_str(&buf, "node not found at pos "); 936 bch2_bpos_to_text(&buf 932 bch2_bpos_to_text(&buf, path->pos); 937 prt_str(&buf, " within 933 prt_str(&buf, " within parent node "); 938 bch2_bkey_val_to_text( 934 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key)); 939 935 940 bch2_fs_fatal_error(c, 936 bch2_fs_fatal_error(c, "%s", buf.buf); 941 printbuf_exit(&buf); 937 printbuf_exit(&buf); 942 ret = -BCH_ERR_btree_n 938 ret = -BCH_ERR_btree_need_topology_repair; 943 goto err; 939 goto err; 944 } 940 } 945 941 946 bch2_bkey_buf_unpack(&tmp, c, 942 bch2_bkey_buf_unpack(&tmp, c, l->b, k); 947 943 948 if ((flags & BTREE_ITER_prefet 944 if ((flags & BTREE_ITER_prefetch) && 949 c->opts.btree_node_prefetc 945 c->opts.btree_node_prefetch) { 950 ret = btree_path_prefe 946 ret = btree_path_prefetch(trans, path); 951 if (ret) 947 if (ret) 952 goto err; 948 goto err; 953 } 949 } 954 } 950 } 955 951 956 b = bch2_btree_node_get(trans, path, t 952 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip); 957 ret = PTR_ERR_OR_ZERO(b); 953 ret = PTR_ERR_OR_ZERO(b); 958 if (unlikely(ret)) 954 if (unlikely(ret)) 959 goto err; 955 goto err; 960 956 961 if (likely(!trans->journal_replay_not_ 957 if (likely(!trans->journal_replay_not_finished && 962 tmp.k->k.type == KEY_TYPE_b 958 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) && 963 unlikely(b != btree_node_mem_ptr(t 959 unlikely(b != btree_node_mem_ptr(tmp.k))) 964 btree_node_mem_ptr_set(trans, 960 btree_node_mem_ptr_set(trans, path, level + 1, b); 965 961 966 if (btree_node_read_locked(path, level 962 if (btree_node_read_locked(path, level + 1)) 967 btree_node_unlock(trans, path, 963 btree_node_unlock(trans, path, level + 1); 968 964 969 mark_btree_node_locked(trans, path, le 965 mark_btree_node_locked(trans, path, level, 970 (enum btree_nod 966 (enum btree_node_locked_type) lock_type); 971 path->level = level; 967 path->level = level; 972 bch2_btree_path_level_init(trans, path 968 bch2_btree_path_level_init(trans, path, b); 973 969 974 bch2_btree_path_verify_locks(path); 970 bch2_btree_path_verify_locks(path); 975 err: 971 err: 976 bch2_bkey_buf_exit(&tmp, c); 972 bch2_bkey_buf_exit(&tmp, c); 977 return ret; 973 return ret; 978 } 974 } 979 975 980 static int bch2_btree_path_traverse_all(struct 976 static int bch2_btree_path_traverse_all(struct btree_trans *trans) 981 { 977 { 982 struct bch_fs *c = trans->c; 978 struct bch_fs *c = trans->c; 983 struct btree_path *path; 979 struct btree_path *path; 984 unsigned long trace_ip = _RET_IP_; 980 unsigned long trace_ip = _RET_IP_; 985 unsigned i; 981 unsigned i; 986 int ret = 0; 982 int ret = 0; 987 983 988 if (trans->in_traverse_all) 984 if (trans->in_traverse_all) 989 return -BCH_ERR_transaction_re 985 return -BCH_ERR_transaction_restart_in_traverse_all; 990 986 991 trans->in_traverse_all = true; 987 trans->in_traverse_all = true; 992 retry_all: 988 retry_all: 993 trans->restarted = 0; 989 trans->restarted = 0; 994 trans->last_restarted_ip = 0; 990 trans->last_restarted_ip = 0; 995 991 996 trans_for_each_path(trans, path, i) 992 trans_for_each_path(trans, path, i) 997 path->should_be_locked = false 993 path->should_be_locked = false; 998 994 999 btree_trans_sort_paths(trans); 995 btree_trans_sort_paths(trans); 1000 996 1001 bch2_trans_unlock(trans); 997 bch2_trans_unlock(trans); 1002 cond_resched(); 998 cond_resched(); 1003 trans_set_locked(trans); 999 trans_set_locked(trans); 1004 1000 1005 if (unlikely(trans->memory_allocation 1001 if (unlikely(trans->memory_allocation_failure)) { 1006 struct closure cl; 1002 struct closure cl; 1007 1003 1008 closure_init_stack(&cl); 1004 closure_init_stack(&cl); 1009 1005 1010 do { 1006 do { 1011 ret = bch2_btree_cach 1007 ret = bch2_btree_cache_cannibalize_lock(trans, &cl); 1012 closure_sync(&cl); 1008 closure_sync(&cl); 1013 } while (ret); 1009 } while (ret); 1014 } 1010 } 1015 1011 1016 /* Now, redo traversals in correct or 1012 /* Now, redo traversals in correct order: */ 1017 i = 0; 1013 i = 0; 1018 while (i < trans->nr_sorted) { 1014 while (i < trans->nr_sorted) { 1019 btree_path_idx_t idx = trans- 1015 btree_path_idx_t idx = trans->sorted[i]; 1020 1016 1021 /* 1017 /* 1022 * Traversing a path can caus 1018 * Traversing a path can cause another path to be added at about 1023 * the same position: 1019 * the same position: 1024 */ 1020 */ 1025 if (trans->paths[idx].uptodat 1021 if (trans->paths[idx].uptodate) { 1026 __btree_path_get(tran !! 1022 __btree_path_get(&trans->paths[idx], false); 1027 ret = bch2_btree_path 1023 ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_); 1028 __btree_path_put(tran !! 1024 __btree_path_put(&trans->paths[idx], false); 1029 1025 1030 if (bch2_err_matches( 1026 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || 1031 bch2_err_matches( 1027 bch2_err_matches(ret, ENOMEM)) 1032 goto retry_al 1028 goto retry_all; 1033 if (ret) 1029 if (ret) 1034 goto err; 1030 goto err; 1035 } else { 1031 } else { 1036 i++; 1032 i++; 1037 } 1033 } 1038 } 1034 } 1039 1035 1040 /* 1036 /* 1041 * We used to assert that all paths h 1037 * We used to assert that all paths had been traversed here 1042 * (path->uptodate < BTREE_ITER_NEED_ 1038 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since 1043 * path->should_be_locked is not set 1039 * path->should_be_locked is not set yet, we might have unlocked and 1044 * then failed to relock a path - tha 1040 * then failed to relock a path - that's fine. 1045 */ 1041 */ 1046 err: 1042 err: 1047 bch2_btree_cache_cannibalize_unlock(t 1043 bch2_btree_cache_cannibalize_unlock(trans); 1048 1044 1049 trans->in_traverse_all = false; 1045 trans->in_traverse_all = false; 1050 1046 1051 trace_and_count(c, trans_traverse_all 1047 trace_and_count(c, trans_traverse_all, trans, trace_ip); 1052 return ret; 1048 return ret; 1053 } 1049 } 1054 1050 1055 static inline bool btree_path_check_pos_in_no 1051 static inline bool btree_path_check_pos_in_node(struct btree_path *path, 1056 1052 unsigned l, int check_pos) 1057 { 1053 { 1058 if (check_pos < 0 && btree_path_pos_b 1054 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b)) 1059 return false; 1055 return false; 1060 if (check_pos > 0 && btree_path_pos_a 1056 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b)) 1061 return false; 1057 return false; 1062 return true; 1058 return true; 1063 } 1059 } 1064 1060 1065 static inline bool btree_path_good_node(struc 1061 static inline bool btree_path_good_node(struct btree_trans *trans, 1066 struc 1062 struct btree_path *path, 1067 unsig 1063 unsigned l, int check_pos) 1068 { 1064 { 1069 return is_btree_node(path, l) && 1065 return is_btree_node(path, l) && 1070 bch2_btree_node_relock(trans, 1066 bch2_btree_node_relock(trans, path, l) && 1071 btree_path_check_pos_in_node( 1067 btree_path_check_pos_in_node(path, l, check_pos); 1072 } 1068 } 1073 1069 1074 static void btree_path_set_level_down(struct 1070 static void btree_path_set_level_down(struct btree_trans *trans, 1075 struct 1071 struct btree_path *path, 1076 unsigne 1072 unsigned new_level) 1077 { 1073 { 1078 unsigned l; 1074 unsigned l; 1079 1075 1080 path->level = new_level; 1076 path->level = new_level; 1081 1077 1082 for (l = path->level + 1; l < BTREE_M 1078 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++) 1083 if (btree_lock_want(path, l) 1079 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED) 1084 btree_node_unlock(tra 1080 btree_node_unlock(trans, path, l); 1085 1081 1086 btree_path_set_dirty(path, BTREE_ITER 1082 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); 1087 bch2_btree_path_verify(trans, path); 1083 bch2_btree_path_verify(trans, path); 1088 } 1084 } 1089 1085 1090 static noinline unsigned __btree_path_up_unti 1086 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans, 1091 1087 struct btree_path *path, 1092 1088 int check_pos) 1093 { 1089 { 1094 unsigned i, l = path->level; 1090 unsigned i, l = path->level; 1095 again: 1091 again: 1096 while (btree_path_node(path, l) && 1092 while (btree_path_node(path, l) && 1097 !btree_path_good_node(trans, p 1093 !btree_path_good_node(trans, path, l, check_pos)) 1098 __btree_path_set_level_up(tra 1094 __btree_path_set_level_up(trans, path, l++); 1099 1095 1100 /* If we need intent locks, take them 1096 /* If we need intent locks, take them too: */ 1101 for (i = l + 1; 1097 for (i = l + 1; 1102 i < path->locks_want && btree_pa 1098 i < path->locks_want && btree_path_node(path, i); 1103 i++) 1099 i++) 1104 if (!bch2_btree_node_relock(t 1100 if (!bch2_btree_node_relock(trans, path, i)) { 1105 while (l <= i) 1101 while (l <= i) 1106 __btree_path_ 1102 __btree_path_set_level_up(trans, path, l++); 1107 goto again; 1103 goto again; 1108 } 1104 } 1109 1105 1110 return l; 1106 return l; 1111 } 1107 } 1112 1108 1113 static inline unsigned btree_path_up_until_go 1109 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans, 1114 1110 struct btree_path *path, 1115 1111 int check_pos) 1116 { 1112 { 1117 return likely(btree_node_locked(path, 1113 return likely(btree_node_locked(path, path->level) && 1118 btree_path_check_pos_in 1114 btree_path_check_pos_in_node(path, path->level, check_pos)) 1119 ? path->level 1115 ? path->level 1120 : __btree_path_up_until_good_ 1116 : __btree_path_up_until_good_node(trans, path, check_pos); 1121 } 1117 } 1122 1118 1123 /* 1119 /* 1124 * This is the main state machine for walking 1120 * This is the main state machine for walking down the btree - walks down to a 1125 * specified depth 1121 * specified depth 1126 * 1122 * 1127 * Returns 0 on success, -EIO on error (error 1123 * Returns 0 on success, -EIO on error (error reading in a btree node). 1128 * 1124 * 1129 * On error, caller (peek_node()/peek_key()) 1125 * On error, caller (peek_node()/peek_key()) must return NULL; the error is 1130 * stashed in the iterator and returned from 1126 * stashed in the iterator and returned from bch2_trans_exit(). 1131 */ 1127 */ 1132 int bch2_btree_path_traverse_one(struct btree 1128 int bch2_btree_path_traverse_one(struct btree_trans *trans, 1133 btree_path_i 1129 btree_path_idx_t path_idx, 1134 unsigned fla 1130 unsigned flags, 1135 unsigned lon 1131 unsigned long trace_ip) 1136 { 1132 { 1137 struct btree_path *path = &trans->pat 1133 struct btree_path *path = &trans->paths[path_idx]; 1138 unsigned depth_want = path->level; 1134 unsigned depth_want = path->level; 1139 int ret = -((int) trans->restarted); 1135 int ret = -((int) trans->restarted); 1140 1136 1141 if (unlikely(ret)) 1137 if (unlikely(ret)) 1142 goto out; 1138 goto out; 1143 1139 1144 if (unlikely(!trans->srcu_held)) 1140 if (unlikely(!trans->srcu_held)) 1145 bch2_trans_srcu_lock(trans); 1141 bch2_trans_srcu_lock(trans); 1146 1142 1147 trace_btree_path_traverse_start(trans << 1148 << 1149 /* 1143 /* 1150 * Ensure we obey path->should_be_loc 1144 * Ensure we obey path->should_be_locked: if it's set, we can't unlock 1151 * and re-traverse the path without a 1145 * and re-traverse the path without a transaction restart: 1152 */ 1146 */ 1153 if (path->should_be_locked) { 1147 if (path->should_be_locked) { 1154 ret = bch2_btree_path_relock( 1148 ret = bch2_btree_path_relock(trans, path, trace_ip); 1155 goto out; 1149 goto out; 1156 } 1150 } 1157 1151 1158 if (path->cached) { 1152 if (path->cached) { 1159 ret = bch2_btree_path_travers 1153 ret = bch2_btree_path_traverse_cached(trans, path, flags); 1160 goto out; 1154 goto out; 1161 } 1155 } 1162 1156 1163 path = &trans->paths[path_idx]; 1157 path = &trans->paths[path_idx]; 1164 1158 1165 if (unlikely(path->level >= BTREE_MAX 1159 if (unlikely(path->level >= BTREE_MAX_DEPTH)) 1166 goto out_uptodate; 1160 goto out_uptodate; 1167 1161 1168 path->level = btree_path_up_until_goo 1162 path->level = btree_path_up_until_good_node(trans, path, 0); 1169 unsigned max_level = path->level; 1163 unsigned max_level = path->level; 1170 1164 1171 EBUG_ON(btree_path_node(path, path->l 1165 EBUG_ON(btree_path_node(path, path->level) && 1172 !btree_node_locked(path, path 1166 !btree_node_locked(path, path->level)); 1173 1167 1174 /* 1168 /* 1175 * Note: path->nodes[path->level] may 1169 * Note: path->nodes[path->level] may be temporarily NULL here - that 1176 * would indicate to other code that 1170 * would indicate to other code that we got to the end of the btree, 1177 * here it indicates that relocking t 1171 * here it indicates that relocking the root failed - it's critical that 1178 * btree_path_lock_root() comes next 1172 * btree_path_lock_root() comes next and that it can't fail 1179 */ 1173 */ 1180 while (path->level > depth_want) { 1174 while (path->level > depth_want) { 1181 ret = btree_path_node(path, p 1175 ret = btree_path_node(path, path->level) 1182 ? btree_path_down(tra 1176 ? btree_path_down(trans, path, flags, trace_ip) 1183 : btree_path_lock_roo 1177 : btree_path_lock_root(trans, path, depth_want, trace_ip); 1184 if (unlikely(ret)) { 1178 if (unlikely(ret)) { 1185 if (ret == 1) { 1179 if (ret == 1) { 1186 /* 1180 /* 1187 * No nodes a 1181 * No nodes at this level - got to the end of 1188 * the btree: 1182 * the btree: 1189 */ 1183 */ 1190 ret = 0; 1184 ret = 0; 1191 goto out; 1185 goto out; 1192 } 1186 } 1193 1187 1194 __bch2_btree_path_unl 1188 __bch2_btree_path_unlock(trans, path); 1195 path->level = depth_w 1189 path->level = depth_want; 1196 path->l[path->level]. 1190 path->l[path->level].b = ERR_PTR(ret); 1197 goto out; 1191 goto out; 1198 } 1192 } 1199 } 1193 } 1200 1194 1201 if (unlikely(max_level > path->level) 1195 if (unlikely(max_level > path->level)) { 1202 struct btree_path *linked; 1196 struct btree_path *linked; 1203 unsigned iter; 1197 unsigned iter; 1204 1198 1205 trans_for_each_path_with_node 1199 trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter) 1206 for (unsigned j = pat 1200 for (unsigned j = path->level + 1; j < max_level; j++) 1207 linked->l[j] 1201 linked->l[j] = path->l[j]; 1208 } 1202 } 1209 1203 1210 out_uptodate: 1204 out_uptodate: 1211 path->uptodate = BTREE_ITER_UPTODATE; 1205 path->uptodate = BTREE_ITER_UPTODATE; 1212 trace_btree_path_traverse_end(trans, << 1213 out: 1206 out: 1214 if (bch2_err_matches(ret, BCH_ERR_tra 1207 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted) 1215 panic("ret %s (%i) trans->res 1208 panic("ret %s (%i) trans->restarted %s (%i)\n", 1216 bch2_err_str(ret), ret, 1209 bch2_err_str(ret), ret, 1217 bch2_err_str(trans->res 1210 bch2_err_str(trans->restarted), trans->restarted); 1218 bch2_btree_path_verify(trans, path); 1211 bch2_btree_path_verify(trans, path); 1219 return ret; 1212 return ret; 1220 } 1213 } 1221 1214 1222 static inline void btree_path_copy(struct btr 1215 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst, 1223 struct btree_path 1216 struct btree_path *src) 1224 { 1217 { 1225 unsigned i, offset = offsetof(struct 1218 unsigned i, offset = offsetof(struct btree_path, pos); 1226 1219 1227 memcpy((void *) dst + offset, 1220 memcpy((void *) dst + offset, 1228 (void *) src + offset, 1221 (void *) src + offset, 1229 sizeof(struct btree_path) - of 1222 sizeof(struct btree_path) - offset); 1230 1223 1231 for (i = 0; i < BTREE_MAX_DEPTH; i++) 1224 for (i = 0; i < BTREE_MAX_DEPTH; i++) { 1232 unsigned t = btree_node_locke 1225 unsigned t = btree_node_locked_type(dst, i); 1233 1226 1234 if (t != BTREE_NODE_UNLOCKED) 1227 if (t != BTREE_NODE_UNLOCKED) 1235 six_lock_increment(&d 1228 six_lock_increment(&dst->l[i].b->c.lock, t); 1236 } 1229 } 1237 } 1230 } 1238 1231 1239 static btree_path_idx_t btree_path_clone(stru 1232 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src, 1240 bool 1233 bool intent, unsigned long ip) 1241 { 1234 { 1242 btree_path_idx_t new = btree_path_all 1235 btree_path_idx_t new = btree_path_alloc(trans, src); 1243 btree_path_copy(trans, trans->paths + 1236 btree_path_copy(trans, trans->paths + new, trans->paths + src); 1244 __btree_path_get(trans, trans->paths !! 1237 __btree_path_get(trans->paths + new, intent); 1245 #ifdef TRACK_PATH_ALLOCATED 1238 #ifdef TRACK_PATH_ALLOCATED 1246 trans->paths[new].ip_allocated = ip; 1239 trans->paths[new].ip_allocated = ip; 1247 #endif 1240 #endif 1248 return new; 1241 return new; 1249 } 1242 } 1250 1243 1251 __flatten 1244 __flatten 1252 btree_path_idx_t __bch2_btree_path_make_mut(s 1245 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans, 1253 btree_path_idx_t path 1246 btree_path_idx_t path, bool intent, unsigned long ip) 1254 { 1247 { 1255 struct btree_path *old = trans->paths !! 1248 __btree_path_put(trans->paths + path, intent); 1256 __btree_path_put(trans, trans->paths << 1257 path = btree_path_clone(trans, path, 1249 path = btree_path_clone(trans, path, intent, ip); 1258 trace_btree_path_clone(trans, old, tr << 1259 trans->paths[path].preserve = false; 1250 trans->paths[path].preserve = false; 1260 return path; 1251 return path; 1261 } 1252 } 1262 1253 1263 btree_path_idx_t __must_check 1254 btree_path_idx_t __must_check 1264 __bch2_btree_path_set_pos(struct btree_trans 1255 __bch2_btree_path_set_pos(struct btree_trans *trans, 1265 btree_path_idx_t pa 1256 btree_path_idx_t path_idx, struct bpos new_pos, 1266 bool intent, unsign 1257 bool intent, unsigned long ip) 1267 { 1258 { 1268 int cmp = bpos_cmp(new_pos, trans->pa 1259 int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos); 1269 1260 1270 bch2_trans_verify_not_in_restart(tran 1261 bch2_trans_verify_not_in_restart(trans); 1271 EBUG_ON(!trans->paths[path_idx].ref); 1262 EBUG_ON(!trans->paths[path_idx].ref); 1272 1263 1273 trace_btree_path_set_pos(trans, trans << 1274 << 1275 path_idx = bch2_btree_path_make_mut(t 1264 path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip); 1276 1265 1277 struct btree_path *path = trans->path 1266 struct btree_path *path = trans->paths + path_idx; 1278 path->pos = new_pos; 1267 path->pos = new_pos; 1279 trans->paths_sorted = false; 1268 trans->paths_sorted = false; 1280 1269 1281 if (unlikely(path->cached)) { 1270 if (unlikely(path->cached)) { 1282 btree_node_unlock(trans, path 1271 btree_node_unlock(trans, path, 0); 1283 path->l[0].b = ERR_PTR(-BCH_E 1272 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up); 1284 btree_path_set_dirty(path, BT 1273 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); 1285 goto out; 1274 goto out; 1286 } 1275 } 1287 1276 1288 unsigned level = btree_path_up_until_ 1277 unsigned level = btree_path_up_until_good_node(trans, path, cmp); 1289 1278 1290 if (btree_path_node(path, level)) { 1279 if (btree_path_node(path, level)) { 1291 struct btree_path_level *l = 1280 struct btree_path_level *l = &path->l[level]; 1292 1281 1293 BUG_ON(!btree_node_locked(pat 1282 BUG_ON(!btree_node_locked(path, level)); 1294 /* 1283 /* 1295 * We might have to skip over 1284 * We might have to skip over many keys, or just a few: try 1296 * advancing the node iterato 1285 * advancing the node iterator, and if we have to skip over too 1297 * many keys just reinit it ( 1286 * many keys just reinit it (or if we're rewinding, since that 1298 * is expensive). 1287 * is expensive). 1299 */ 1288 */ 1300 if (cmp < 0 || 1289 if (cmp < 0 || 1301 !btree_path_advance_to_po 1290 !btree_path_advance_to_pos(path, l, 8)) 1302 bch2_btree_node_iter_ 1291 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos); 1303 1292 1304 /* 1293 /* 1305 * Iterators to interior node 1294 * Iterators to interior nodes should always be pointed at the first non 1306 * whiteout: 1295 * whiteout: 1307 */ 1296 */ 1308 if (unlikely(level)) 1297 if (unlikely(level)) 1309 bch2_btree_node_iter_ 1298 bch2_btree_node_iter_peek(&l->iter, l->b); 1310 } 1299 } 1311 1300 1312 if (unlikely(level != path->level)) { 1301 if (unlikely(level != path->level)) { 1313 btree_path_set_dirty(path, BT 1302 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); 1314 __bch2_btree_path_unlock(tran 1303 __bch2_btree_path_unlock(trans, path); 1315 } 1304 } 1316 out: 1305 out: 1317 bch2_btree_path_verify(trans, path); 1306 bch2_btree_path_verify(trans, path); 1318 return path_idx; 1307 return path_idx; 1319 } 1308 } 1320 1309 1321 /* Btree path: main interface: */ 1310 /* Btree path: main interface: */ 1322 1311 1323 static struct btree_path *have_path_at_pos(st 1312 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path) 1324 { 1313 { 1325 struct btree_path *sib; 1314 struct btree_path *sib; 1326 1315 1327 sib = prev_btree_path(trans, path); 1316 sib = prev_btree_path(trans, path); 1328 if (sib && !btree_path_cmp(sib, path) 1317 if (sib && !btree_path_cmp(sib, path)) 1329 return sib; 1318 return sib; 1330 1319 1331 sib = next_btree_path(trans, path); 1320 sib = next_btree_path(trans, path); 1332 if (sib && !btree_path_cmp(sib, path) 1321 if (sib && !btree_path_cmp(sib, path)) 1333 return sib; 1322 return sib; 1334 1323 1335 return NULL; 1324 return NULL; 1336 } 1325 } 1337 1326 1338 static struct btree_path *have_node_at_pos(st 1327 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path) 1339 { 1328 { 1340 struct btree_path *sib; 1329 struct btree_path *sib; 1341 1330 1342 sib = prev_btree_path(trans, path); 1331 sib = prev_btree_path(trans, path); 1343 if (sib && sib->level == path->level 1332 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b) 1344 return sib; 1333 return sib; 1345 1334 1346 sib = next_btree_path(trans, path); 1335 sib = next_btree_path(trans, path); 1347 if (sib && sib->level == path->level 1336 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b) 1348 return sib; 1337 return sib; 1349 1338 1350 return NULL; 1339 return NULL; 1351 } 1340 } 1352 1341 1353 static inline void __bch2_path_free(struct bt 1342 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path) 1354 { 1343 { 1355 __bch2_btree_path_unlock(trans, trans 1344 __bch2_btree_path_unlock(trans, trans->paths + path); 1356 btree_path_list_remove(trans, trans-> 1345 btree_path_list_remove(trans, trans->paths + path); 1357 __clear_bit(path, trans->paths_alloca 1346 __clear_bit(path, trans->paths_allocated); 1358 } 1347 } 1359 1348 1360 static bool bch2_btree_path_can_relock(struct 1349 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path) 1361 { 1350 { 1362 unsigned l = path->level; 1351 unsigned l = path->level; 1363 1352 1364 do { 1353 do { 1365 if (!btree_path_node(path, l) 1354 if (!btree_path_node(path, l)) 1366 break; 1355 break; 1367 1356 1368 if (!is_btree_node(path, l)) 1357 if (!is_btree_node(path, l)) 1369 return false; 1358 return false; 1370 1359 1371 if (path->l[l].lock_seq != pa 1360 if (path->l[l].lock_seq != path->l[l].b->c.lock.seq) 1372 return false; 1361 return false; 1373 1362 1374 l++; 1363 l++; 1375 } while (l < path->locks_want); 1364 } while (l < path->locks_want); 1376 1365 1377 return true; 1366 return true; 1378 } 1367 } 1379 1368 1380 void bch2_path_put(struct btree_trans *trans, 1369 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent) 1381 { 1370 { 1382 struct btree_path *path = trans->path 1371 struct btree_path *path = trans->paths + path_idx, *dup; 1383 1372 1384 if (!__btree_path_put(trans, path, in !! 1373 if (!__btree_path_put(path, intent)) 1385 return; 1374 return; 1386 1375 1387 dup = path->preserve 1376 dup = path->preserve 1388 ? have_path_at_pos(trans, pat 1377 ? have_path_at_pos(trans, path) 1389 : have_node_at_pos(trans, pat 1378 : have_node_at_pos(trans, path); 1390 1379 1391 trace_btree_path_free(trans, path_idx << 1392 << 1393 if (!dup && !(!path->preserve && !is_ 1380 if (!dup && !(!path->preserve && !is_btree_node(path, path->level))) 1394 return; 1381 return; 1395 1382 1396 if (path->should_be_locked && !trans- 1383 if (path->should_be_locked && !trans->restarted) { 1397 if (!dup) 1384 if (!dup) 1398 return; 1385 return; 1399 1386 1400 if (!(trans->locked 1387 if (!(trans->locked 1401 ? bch2_btree_path_reloc 1388 ? bch2_btree_path_relock_norestart(trans, dup) 1402 : bch2_btree_path_can_r 1389 : bch2_btree_path_can_relock(trans, dup))) 1403 return; 1390 return; 1404 } 1391 } 1405 1392 1406 if (dup) { 1393 if (dup) { 1407 dup->preserve |= pa 1394 dup->preserve |= path->preserve; 1408 dup->should_be_locked |= pa 1395 dup->should_be_locked |= path->should_be_locked; 1409 } 1396 } 1410 1397 1411 __bch2_path_free(trans, path_idx); 1398 __bch2_path_free(trans, path_idx); 1412 } 1399 } 1413 1400 1414 static void bch2_path_put_nokeep(struct btree 1401 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path, 1415 bool intent) 1402 bool intent) 1416 { 1403 { 1417 if (!__btree_path_put(trans, trans->p !! 1404 if (!__btree_path_put(trans->paths + path, intent)) 1418 return; 1405 return; 1419 1406 1420 __bch2_path_free(trans, path); 1407 __bch2_path_free(trans, path); 1421 } 1408 } 1422 1409 1423 void __noreturn bch2_trans_restart_error(stru 1410 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count) 1424 { 1411 { 1425 panic("trans->restart_count %u, shoul 1412 panic("trans->restart_count %u, should be %u, last restarted by %pS\n", 1426 trans->restart_count, restart_c 1413 trans->restart_count, restart_count, 1427 (void *) trans->last_begin_ip); 1414 (void *) trans->last_begin_ip); 1428 } 1415 } 1429 1416 1430 void __noreturn bch2_trans_in_restart_error(s 1417 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) 1431 { 1418 { 1432 panic("in transaction restart: %s, la 1419 panic("in transaction restart: %s, last restarted by %pS\n", 1433 bch2_err_str(trans->restarted), 1420 bch2_err_str(trans->restarted), 1434 (void *) trans->last_restarted_ 1421 (void *) trans->last_restarted_ip); 1435 } 1422 } 1436 1423 1437 void __noreturn bch2_trans_unlocked_error(str 1424 void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans) 1438 { 1425 { 1439 panic("trans should be locked, unlock 1426 panic("trans should be locked, unlocked by %pS\n", 1440 (void *) trans->last_unlock_ip) 1427 (void *) trans->last_unlock_ip); 1441 } 1428 } 1442 1429 1443 noinline __cold 1430 noinline __cold 1444 void bch2_trans_updates_to_text(struct printb 1431 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans) 1445 { 1432 { 1446 prt_printf(buf, "%u transaction updat !! 1433 prt_printf(buf, "transaction updates for %s journal seq %llu\n", 1447 trans->nr_updates, trans-> !! 1434 trans->fn, trans->journal_res.seq); 1448 printbuf_indent_add(buf, 2); 1435 printbuf_indent_add(buf, 2); 1449 1436 1450 trans_for_each_update(trans, i) { 1437 trans_for_each_update(trans, i) { 1451 struct bkey_s_c old = { &i->o 1438 struct bkey_s_c old = { &i->old_k, i->old_v }; 1452 1439 1453 prt_printf(buf, "update: btre 1440 prt_printf(buf, "update: btree=%s cached=%u %pS\n", 1454 bch2_btree_id_str(i->b 1441 bch2_btree_id_str(i->btree_id), 1455 i->cached, 1442 i->cached, 1456 (void *) i->ip_allocat 1443 (void *) i->ip_allocated); 1457 1444 1458 prt_printf(buf, " old "); 1445 prt_printf(buf, " old "); 1459 bch2_bkey_val_to_text(buf, tr 1446 bch2_bkey_val_to_text(buf, trans->c, old); 1460 prt_newline(buf); 1447 prt_newline(buf); 1461 1448 1462 prt_printf(buf, " new "); 1449 prt_printf(buf, " new "); 1463 bch2_bkey_val_to_text(buf, tr 1450 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k)); 1464 prt_newline(buf); 1451 prt_newline(buf); 1465 } 1452 } 1466 1453 1467 for (struct jset_entry *e = trans->jo 1454 for (struct jset_entry *e = trans->journal_entries; 1468 e != btree_trans_journal_entries 1455 e != btree_trans_journal_entries_top(trans); 1469 e = vstruct_next(e)) 1456 e = vstruct_next(e)) 1470 bch2_journal_entry_to_text(bu 1457 bch2_journal_entry_to_text(buf, trans->c, e); 1471 1458 1472 printbuf_indent_sub(buf, 2); 1459 printbuf_indent_sub(buf, 2); 1473 } 1460 } 1474 1461 1475 noinline __cold 1462 noinline __cold 1476 void bch2_dump_trans_updates(struct btree_tra 1463 void bch2_dump_trans_updates(struct btree_trans *trans) 1477 { 1464 { 1478 struct printbuf buf = PRINTBUF; 1465 struct printbuf buf = PRINTBUF; 1479 1466 1480 bch2_trans_updates_to_text(&buf, tran 1467 bch2_trans_updates_to_text(&buf, trans); 1481 bch2_print_str(trans->c, buf.buf); !! 1468 bch2_print_string_as_lines(KERN_ERR, buf.buf); 1482 printbuf_exit(&buf); 1469 printbuf_exit(&buf); 1483 } 1470 } 1484 1471 1485 static void bch2_btree_path_to_text_short(str 1472 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx) 1486 { 1473 { 1487 struct btree_path *path = trans->path 1474 struct btree_path *path = trans->paths + path_idx; 1488 1475 1489 prt_printf(out, "path: idx %3u ref %u !! 1476 prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ", 1490 path_idx, path->ref, path- 1477 path_idx, path->ref, path->intent_ref, 1491 path->preserve ? 'P' : ' ' 1478 path->preserve ? 'P' : ' ', 1492 path->should_be_locked ? ' 1479 path->should_be_locked ? 'S' : ' ', 1493 path->cached ? 'C' : 'B', 1480 path->cached ? 'C' : 'B', 1494 bch2_btree_id_str(path->bt 1481 bch2_btree_id_str(path->btree_id), 1495 path->level); 1482 path->level); 1496 bch2_bpos_to_text(out, path->pos); 1483 bch2_bpos_to_text(out, path->pos); 1497 1484 1498 if (!path->cached && btree_node_locke << 1499 prt_char(out, ' '); << 1500 struct btree *b = path_l(path << 1501 bch2_bpos_to_text(out, b->dat << 1502 prt_char(out, '-'); << 1503 bch2_bpos_to_text(out, b->key << 1504 } << 1505 << 1506 #ifdef TRACK_PATH_ALLOCATED 1485 #ifdef TRACK_PATH_ALLOCATED 1507 prt_printf(out, " %pS", (void *) path 1486 prt_printf(out, " %pS", (void *) path->ip_allocated); 1508 #endif 1487 #endif 1509 } 1488 } 1510 1489 1511 static const char *btree_node_locked_str(enum 1490 static const char *btree_node_locked_str(enum btree_node_locked_type t) 1512 { 1491 { 1513 switch (t) { 1492 switch (t) { 1514 case BTREE_NODE_UNLOCKED: 1493 case BTREE_NODE_UNLOCKED: 1515 return "unlocked"; 1494 return "unlocked"; 1516 case BTREE_NODE_READ_LOCKED: 1495 case BTREE_NODE_READ_LOCKED: 1517 return "read"; 1496 return "read"; 1518 case BTREE_NODE_INTENT_LOCKED: 1497 case BTREE_NODE_INTENT_LOCKED: 1519 return "intent"; 1498 return "intent"; 1520 case BTREE_NODE_WRITE_LOCKED: 1499 case BTREE_NODE_WRITE_LOCKED: 1521 return "write"; 1500 return "write"; 1522 default: 1501 default: 1523 return NULL; 1502 return NULL; 1524 } 1503 } 1525 } 1504 } 1526 1505 1527 void bch2_btree_path_to_text(struct printbuf 1506 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx) 1528 { 1507 { 1529 bch2_btree_path_to_text_short(out, tr 1508 bch2_btree_path_to_text_short(out, trans, path_idx); 1530 1509 1531 struct btree_path *path = trans->path 1510 struct btree_path *path = trans->paths + path_idx; 1532 1511 1533 prt_printf(out, " uptodate %u locks_w 1512 prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want); 1534 prt_newline(out); 1513 prt_newline(out); 1535 1514 1536 printbuf_indent_add(out, 2); 1515 printbuf_indent_add(out, 2); 1537 for (unsigned l = 0; l < BTREE_MAX_DE 1516 for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) { 1538 prt_printf(out, "l=%u locks % 1517 prt_printf(out, "l=%u locks %s seq %u node ", l, 1539 btree_node_locked_ 1518 btree_node_locked_str(btree_node_locked_type(path, l)), 1540 path->l[l].lock_se 1519 path->l[l].lock_seq); 1541 1520 1542 int ret = PTR_ERR_OR_ZERO(pat 1521 int ret = PTR_ERR_OR_ZERO(path->l[l].b); 1543 if (ret) 1522 if (ret) 1544 prt_str(out, bch2_err 1523 prt_str(out, bch2_err_str(ret)); 1545 else 1524 else 1546 prt_printf(out, "%px" 1525 prt_printf(out, "%px", path->l[l].b); 1547 prt_newline(out); 1526 prt_newline(out); 1548 } 1527 } 1549 printbuf_indent_sub(out, 2); 1528 printbuf_indent_sub(out, 2); 1550 } 1529 } 1551 1530 1552 static noinline __cold 1531 static noinline __cold 1553 void __bch2_trans_paths_to_text(struct printb 1532 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans, 1554 bool nosort) 1533 bool nosort) 1555 { 1534 { 1556 struct trans_for_each_path_inorder_it 1535 struct trans_for_each_path_inorder_iter iter; 1557 1536 1558 if (!nosort) 1537 if (!nosort) 1559 btree_trans_sort_paths(trans) 1538 btree_trans_sort_paths(trans); 1560 1539 1561 trans_for_each_path_idx_inorder(trans 1540 trans_for_each_path_idx_inorder(trans, iter) { 1562 bch2_btree_path_to_text_short 1541 bch2_btree_path_to_text_short(out, trans, iter.path_idx); 1563 prt_newline(out); 1542 prt_newline(out); 1564 } 1543 } 1565 } 1544 } 1566 1545 1567 noinline __cold 1546 noinline __cold 1568 void bch2_trans_paths_to_text(struct printbuf 1547 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans) 1569 { 1548 { 1570 __bch2_trans_paths_to_text(out, trans 1549 __bch2_trans_paths_to_text(out, trans, false); 1571 } 1550 } 1572 1551 1573 static noinline __cold 1552 static noinline __cold 1574 void __bch2_dump_trans_paths_updates(struct b 1553 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort) 1575 { 1554 { 1576 struct printbuf buf = PRINTBUF; 1555 struct printbuf buf = PRINTBUF; 1577 1556 1578 __bch2_trans_paths_to_text(&buf, tran 1557 __bch2_trans_paths_to_text(&buf, trans, nosort); 1579 bch2_trans_updates_to_text(&buf, tran 1558 bch2_trans_updates_to_text(&buf, trans); 1580 1559 1581 bch2_print_str(trans->c, buf.buf); !! 1560 bch2_print_string_as_lines(KERN_ERR, buf.buf); 1582 printbuf_exit(&buf); 1561 printbuf_exit(&buf); 1583 } 1562 } 1584 1563 1585 noinline __cold 1564 noinline __cold 1586 void bch2_dump_trans_paths_updates(struct btr 1565 void bch2_dump_trans_paths_updates(struct btree_trans *trans) 1587 { 1566 { 1588 __bch2_dump_trans_paths_updates(trans 1567 __bch2_dump_trans_paths_updates(trans, false); 1589 } 1568 } 1590 1569 1591 noinline __cold 1570 noinline __cold 1592 static void bch2_trans_update_max_paths(struc 1571 static void bch2_trans_update_max_paths(struct btree_trans *trans) 1593 { 1572 { 1594 struct btree_transaction_stats *s = b 1573 struct btree_transaction_stats *s = btree_trans_stats(trans); 1595 struct printbuf buf = PRINTBUF; 1574 struct printbuf buf = PRINTBUF; 1596 size_t nr = bitmap_weight(trans->path 1575 size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths); 1597 1576 1598 bch2_trans_paths_to_text(&buf, trans) 1577 bch2_trans_paths_to_text(&buf, trans); 1599 1578 1600 if (!buf.allocation_failure) { 1579 if (!buf.allocation_failure) { 1601 mutex_lock(&s->lock); 1580 mutex_lock(&s->lock); 1602 if (nr > s->nr_max_paths) { 1581 if (nr > s->nr_max_paths) { 1603 s->nr_max_paths = nr; 1582 s->nr_max_paths = nr; 1604 swap(s->max_paths_tex 1583 swap(s->max_paths_text, buf.buf); 1605 } 1584 } 1606 mutex_unlock(&s->lock); 1585 mutex_unlock(&s->lock); 1607 } 1586 } 1608 1587 1609 printbuf_exit(&buf); 1588 printbuf_exit(&buf); 1610 1589 1611 trans->nr_paths_max = nr; 1590 trans->nr_paths_max = nr; 1612 } 1591 } 1613 1592 1614 noinline __cold 1593 noinline __cold 1615 int __bch2_btree_trans_too_many_iters(struct 1594 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans) 1616 { 1595 { 1617 if (trace_trans_restart_too_many_iter 1596 if (trace_trans_restart_too_many_iters_enabled()) { 1618 struct printbuf buf = PRINTBU 1597 struct printbuf buf = PRINTBUF; 1619 1598 1620 bch2_trans_paths_to_text(&buf 1599 bch2_trans_paths_to_text(&buf, trans); 1621 trace_trans_restart_too_many_ 1600 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf); 1622 printbuf_exit(&buf); 1601 printbuf_exit(&buf); 1623 } 1602 } 1624 1603 1625 count_event(trans->c, trans_restart_t 1604 count_event(trans->c, trans_restart_too_many_iters); 1626 1605 1627 return btree_trans_restart(trans, BCH 1606 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters); 1628 } 1607 } 1629 1608 1630 static noinline void btree_path_overflow(stru 1609 static noinline void btree_path_overflow(struct btree_trans *trans) 1631 { 1610 { 1632 bch2_dump_trans_paths_updates(trans); 1611 bch2_dump_trans_paths_updates(trans); 1633 bch_err(trans->c, "trans path overflo 1612 bch_err(trans->c, "trans path overflow"); 1634 } 1613 } 1635 1614 1636 static noinline void btree_paths_realloc(stru 1615 static noinline void btree_paths_realloc(struct btree_trans *trans) 1637 { 1616 { 1638 unsigned nr = trans->nr_paths * 2; 1617 unsigned nr = trans->nr_paths * 2; 1639 1618 1640 void *p = kvzalloc(BITS_TO_LONGS(nr) 1619 void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) + 1641 sizeof(struct btree 1620 sizeof(struct btree_trans_paths) + 1642 nr * sizeof(struct 1621 nr * sizeof(struct btree_path) + 1643 nr * sizeof(btree_p 1622 nr * sizeof(btree_path_idx_t) + 8 + 1644 nr * sizeof(struct 1623 nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL); 1645 1624 1646 unsigned long *paths_allocated = p; 1625 unsigned long *paths_allocated = p; 1647 memcpy(paths_allocated, trans->paths_ 1626 memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long)); 1648 p += BITS_TO_LONGS(nr) * sizeof(unsig 1627 p += BITS_TO_LONGS(nr) * sizeof(unsigned long); 1649 1628 1650 p += sizeof(struct btree_trans_paths) 1629 p += sizeof(struct btree_trans_paths); 1651 struct btree_path *paths = p; 1630 struct btree_path *paths = p; 1652 *trans_paths_nr(paths) = nr; 1631 *trans_paths_nr(paths) = nr; 1653 memcpy(paths, trans->paths, trans->nr 1632 memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path)); 1654 p += nr * sizeof(struct btree_path); 1633 p += nr * sizeof(struct btree_path); 1655 1634 1656 btree_path_idx_t *sorted = p; 1635 btree_path_idx_t *sorted = p; 1657 memcpy(sorted, trans->sorted, trans-> 1636 memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t)); 1658 p += nr * sizeof(btree_path_idx_t) + 1637 p += nr * sizeof(btree_path_idx_t) + 8; 1659 1638 1660 struct btree_insert_entry *updates = 1639 struct btree_insert_entry *updates = p; 1661 memcpy(updates, trans->updates, trans 1640 memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry)); 1662 1641 1663 unsigned long *old = trans->paths_all 1642 unsigned long *old = trans->paths_allocated; 1664 1643 1665 rcu_assign_pointer(trans->paths_alloc 1644 rcu_assign_pointer(trans->paths_allocated, paths_allocated); 1666 rcu_assign_pointer(trans->paths, 1645 rcu_assign_pointer(trans->paths, paths); 1667 rcu_assign_pointer(trans->sorted, 1646 rcu_assign_pointer(trans->sorted, sorted); 1668 rcu_assign_pointer(trans->updates, 1647 rcu_assign_pointer(trans->updates, updates); 1669 1648 1670 trans->nr_paths = nr; 1649 trans->nr_paths = nr; 1671 1650 1672 if (old != trans->_paths_allocated) 1651 if (old != trans->_paths_allocated) 1673 kfree_rcu_mightsleep(old); 1652 kfree_rcu_mightsleep(old); 1674 } 1653 } 1675 1654 1676 static inline btree_path_idx_t btree_path_all 1655 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans, 1677 1656 btree_path_idx_t pos) 1678 { 1657 { 1679 btree_path_idx_t idx = find_first_zer 1658 btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths); 1680 1659 1681 if (unlikely(idx == trans->nr_paths)) 1660 if (unlikely(idx == trans->nr_paths)) { 1682 if (trans->nr_paths == BTREE_ 1661 if (trans->nr_paths == BTREE_ITER_MAX) { 1683 btree_path_overflow(t 1662 btree_path_overflow(trans); 1684 return 0; 1663 return 0; 1685 } 1664 } 1686 1665 1687 btree_paths_realloc(trans); 1666 btree_paths_realloc(trans); 1688 } 1667 } 1689 1668 1690 /* 1669 /* 1691 * Do this before marking the new pat 1670 * Do this before marking the new path as allocated, since it won't be 1692 * initialized yet: 1671 * initialized yet: 1693 */ 1672 */ 1694 if (unlikely(idx > trans->nr_paths_ma 1673 if (unlikely(idx > trans->nr_paths_max)) 1695 bch2_trans_update_max_paths(t 1674 bch2_trans_update_max_paths(trans); 1696 1675 1697 __set_bit(idx, trans->paths_allocated 1676 __set_bit(idx, trans->paths_allocated); 1698 1677 1699 struct btree_path *path = &trans->pat 1678 struct btree_path *path = &trans->paths[idx]; 1700 path->ref = 0; 1679 path->ref = 0; 1701 path->intent_ref = 0; 1680 path->intent_ref = 0; 1702 path->nodes_locked = 0; 1681 path->nodes_locked = 0; 1703 1682 1704 btree_path_list_add(trans, pos, idx); 1683 btree_path_list_add(trans, pos, idx); 1705 trans->paths_sorted = false; 1684 trans->paths_sorted = false; 1706 return idx; 1685 return idx; 1707 } 1686 } 1708 1687 1709 btree_path_idx_t bch2_path_get(struct btree_t 1688 btree_path_idx_t bch2_path_get(struct btree_trans *trans, 1710 enum btree_id bt 1689 enum btree_id btree_id, struct bpos pos, 1711 unsigned locks_w 1690 unsigned locks_want, unsigned level, 1712 unsigned flags, 1691 unsigned flags, unsigned long ip) 1713 { 1692 { 1714 struct btree_path *path; 1693 struct btree_path *path; 1715 bool cached = flags & BTREE_ITER_cach 1694 bool cached = flags & BTREE_ITER_cached; 1716 bool intent = flags & BTREE_ITER_inte 1695 bool intent = flags & BTREE_ITER_intent; 1717 struct trans_for_each_path_inorder_it 1696 struct trans_for_each_path_inorder_iter iter; 1718 btree_path_idx_t path_pos = 0, path_i 1697 btree_path_idx_t path_pos = 0, path_idx; 1719 1698 1720 bch2_trans_verify_not_unlocked(trans) 1699 bch2_trans_verify_not_unlocked(trans); 1721 bch2_trans_verify_not_in_restart(tran 1700 bch2_trans_verify_not_in_restart(trans); 1722 bch2_trans_verify_locks(trans); 1701 bch2_trans_verify_locks(trans); 1723 1702 1724 btree_trans_sort_paths(trans); 1703 btree_trans_sort_paths(trans); 1725 1704 1726 trans_for_each_path_inorder(trans, pa 1705 trans_for_each_path_inorder(trans, path, iter) { 1727 if (__btree_path_cmp(path, 1706 if (__btree_path_cmp(path, 1728 btree_id 1707 btree_id, 1729 cached, 1708 cached, 1730 pos, 1709 pos, 1731 level) > 1710 level) > 0) 1732 break; 1711 break; 1733 1712 1734 path_pos = iter.path_idx; 1713 path_pos = iter.path_idx; 1735 } 1714 } 1736 1715 1737 if (path_pos && 1716 if (path_pos && 1738 trans->paths[path_pos].cached 1717 trans->paths[path_pos].cached == cached && 1739 trans->paths[path_pos].btree_id 1718 trans->paths[path_pos].btree_id == btree_id && 1740 trans->paths[path_pos].level 1719 trans->paths[path_pos].level == level) { 1741 trace_btree_path_get(trans, t !! 1720 __btree_path_get(trans->paths + path_pos, intent); 1742 << 1743 __btree_path_get(trans, trans << 1744 path_idx = bch2_btree_path_se 1721 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip); 1745 path = trans->paths + path_id 1722 path = trans->paths + path_idx; 1746 } else { 1723 } else { 1747 path_idx = btree_path_alloc(t 1724 path_idx = btree_path_alloc(trans, path_pos); 1748 path = trans->paths + path_id 1725 path = trans->paths + path_idx; 1749 1726 1750 __btree_path_get(trans, path, !! 1727 __btree_path_get(path, intent); 1751 path->pos 1728 path->pos = pos; 1752 path->btree_id 1729 path->btree_id = btree_id; 1753 path->cached 1730 path->cached = cached; 1754 path->uptodate 1731 path->uptodate = BTREE_ITER_NEED_TRAVERSE; 1755 path->should_be_locked 1732 path->should_be_locked = false; 1756 path->level 1733 path->level = level; 1757 path->locks_want 1734 path->locks_want = locks_want; 1758 path->nodes_locked 1735 path->nodes_locked = 0; 1759 for (unsigned i = 0; i < ARRA 1736 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++) 1760 path->l[i].b 1737 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init); 1761 #ifdef TRACK_PATH_ALLOCATED 1738 #ifdef TRACK_PATH_ALLOCATED 1762 path->ip_allocated 1739 path->ip_allocated = ip; 1763 #endif 1740 #endif 1764 trans->paths_sorted 1741 trans->paths_sorted = false; 1765 << 1766 trace_btree_path_alloc(trans, << 1767 } 1742 } 1768 1743 1769 if (!(flags & BTREE_ITER_nopreserve)) 1744 if (!(flags & BTREE_ITER_nopreserve)) 1770 path->preserve = true; 1745 path->preserve = true; 1771 1746 1772 if (path->intent_ref) 1747 if (path->intent_ref) 1773 locks_want = max(locks_want, 1748 locks_want = max(locks_want, level + 1); 1774 1749 1775 /* 1750 /* 1776 * If the path has locks_want greater 1751 * If the path has locks_want greater than requested, we don't downgrade 1777 * it here - on transaction restart b 1752 * it here - on transaction restart because btree node split needs to 1778 * upgrade locks, we might be putting 1753 * upgrade locks, we might be putting/getting the iterator again. 1779 * Downgrading iterators only happens 1754 * Downgrading iterators only happens via bch2_trans_downgrade(), after 1780 * a successful transaction commit. 1755 * a successful transaction commit. 1781 */ 1756 */ 1782 1757 1783 locks_want = min(locks_want, BTREE_MA 1758 locks_want = min(locks_want, BTREE_MAX_DEPTH); 1784 if (locks_want > path->locks_want) 1759 if (locks_want > path->locks_want) 1785 bch2_btree_path_upgrade_noupg 1760 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL); 1786 1761 1787 return path_idx; 1762 return path_idx; 1788 } 1763 } 1789 1764 1790 btree_path_idx_t bch2_path_get_unlocked_mut(s 1765 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans, 1791 e 1766 enum btree_id btree_id, 1792 u 1767 unsigned level, 1793 s 1768 struct bpos pos) 1794 { 1769 { 1795 btree_path_idx_t path_idx = bch2_path 1770 btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level, 1796 BTREE_ITER_nopre 1771 BTREE_ITER_nopreserve| 1797 BTREE_ITER_inten 1772 BTREE_ITER_intent, _RET_IP_); 1798 path_idx = bch2_btree_path_make_mut(t 1773 path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_); 1799 1774 1800 struct btree_path *path = trans->path 1775 struct btree_path *path = trans->paths + path_idx; 1801 bch2_btree_path_downgrade(trans, path 1776 bch2_btree_path_downgrade(trans, path); 1802 __bch2_btree_path_unlock(trans, path) 1777 __bch2_btree_path_unlock(trans, path); 1803 return path_idx; 1778 return path_idx; 1804 } 1779 } 1805 1780 1806 struct bkey_s_c bch2_btree_path_peek_slot(str 1781 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u) 1807 { 1782 { 1808 1783 1809 struct btree_path_level *l = path_l(p 1784 struct btree_path_level *l = path_l(path); 1810 struct bkey_packed *_k; 1785 struct bkey_packed *_k; 1811 struct bkey_s_c k; 1786 struct bkey_s_c k; 1812 1787 1813 if (unlikely(!l->b)) 1788 if (unlikely(!l->b)) 1814 return bkey_s_c_null; 1789 return bkey_s_c_null; 1815 1790 1816 EBUG_ON(path->uptodate != BTREE_ITER_ 1791 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE); 1817 EBUG_ON(!btree_node_locked(path, path 1792 EBUG_ON(!btree_node_locked(path, path->level)); 1818 1793 1819 if (!path->cached) { 1794 if (!path->cached) { 1820 _k = bch2_btree_node_iter_pee 1795 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b); 1821 k = _k ? bkey_disassemble(l-> 1796 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null; 1822 1797 1823 EBUG_ON(k.k && bkey_deleted(k 1798 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos)); 1824 1799 1825 if (!k.k || !bpos_eq(path->po 1800 if (!k.k || !bpos_eq(path->pos, k.k->p)) 1826 goto hole; 1801 goto hole; 1827 } else { 1802 } else { 1828 struct bkey_cached *ck = (voi 1803 struct bkey_cached *ck = (void *) path->l[0].b; 1829 if (!ck) << 1830 return bkey_s_c_null; << 1831 1804 1832 EBUG_ON(path->btree_id != ck- !! 1805 EBUG_ON(ck && 1833 !bkey_eq(path->pos, c !! 1806 (path->btree_id != ck->key.btree_id || >> 1807 !bkey_eq(path->pos, ck->key.pos))); >> 1808 if (!ck || !ck->valid) >> 1809 return bkey_s_c_null; 1834 1810 1835 *u = ck->k->k; 1811 *u = ck->k->k; 1836 k = bkey_i_to_s_c(ck->k); 1812 k = bkey_i_to_s_c(ck->k); 1837 } 1813 } 1838 1814 1839 return k; 1815 return k; 1840 hole: 1816 hole: 1841 bkey_init(u); 1817 bkey_init(u); 1842 u->p = path->pos; 1818 u->p = path->pos; 1843 return (struct bkey_s_c) { u, NULL }; 1819 return (struct bkey_s_c) { u, NULL }; 1844 } 1820 } 1845 1821 1846 1822 1847 void bch2_set_btree_iter_dontneed(struct btre 1823 void bch2_set_btree_iter_dontneed(struct btree_iter *iter) 1848 { 1824 { 1849 struct btree_trans *trans = iter->tra 1825 struct btree_trans *trans = iter->trans; 1850 1826 1851 if (!iter->path || trans->restarted) 1827 if (!iter->path || trans->restarted) 1852 return; 1828 return; 1853 1829 1854 struct btree_path *path = btree_iter_ 1830 struct btree_path *path = btree_iter_path(trans, iter); 1855 path->preserve = false; 1831 path->preserve = false; 1856 if (path->ref == 1) 1832 if (path->ref == 1) 1857 path->should_be_locked = fal 1833 path->should_be_locked = false; 1858 } 1834 } 1859 /* Btree iterators: */ 1835 /* Btree iterators: */ 1860 1836 1861 int __must_check 1837 int __must_check 1862 __bch2_btree_iter_traverse(struct btree_iter 1838 __bch2_btree_iter_traverse(struct btree_iter *iter) 1863 { 1839 { 1864 return bch2_btree_path_traverse(iter- 1840 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); 1865 } 1841 } 1866 1842 1867 int __must_check 1843 int __must_check 1868 bch2_btree_iter_traverse(struct btree_iter *i 1844 bch2_btree_iter_traverse(struct btree_iter *iter) 1869 { 1845 { 1870 struct btree_trans *trans = iter->tra 1846 struct btree_trans *trans = iter->trans; 1871 int ret; 1847 int ret; 1872 1848 1873 bch2_trans_verify_not_unlocked(trans) 1849 bch2_trans_verify_not_unlocked(trans); 1874 1850 1875 iter->path = bch2_btree_path_set_pos( 1851 iter->path = bch2_btree_path_set_pos(trans, iter->path, 1876 btree 1852 btree_iter_search_key(iter), 1877 iter- 1853 iter->flags & BTREE_ITER_intent, 1878 btree 1854 btree_iter_ip_allocated(iter)); 1879 1855 1880 ret = bch2_btree_path_traverse(iter-> 1856 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); 1881 if (ret) 1857 if (ret) 1882 return ret; 1858 return ret; 1883 1859 1884 struct btree_path *path = btree_iter_ 1860 struct btree_path *path = btree_iter_path(trans, iter); 1885 if (btree_path_node(path, path->level 1861 if (btree_path_node(path, path->level)) 1886 btree_path_set_should_be_lock !! 1862 btree_path_set_should_be_locked(path); 1887 return 0; 1863 return 0; 1888 } 1864 } 1889 1865 1890 /* Iterate across nodes (leaf and interior no 1866 /* Iterate across nodes (leaf and interior nodes) */ 1891 1867 1892 struct btree *bch2_btree_iter_peek_node(struc 1868 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) 1893 { 1869 { 1894 struct btree_trans *trans = iter->tra 1870 struct btree_trans *trans = iter->trans; 1895 struct btree *b = NULL; 1871 struct btree *b = NULL; 1896 int ret; 1872 int ret; 1897 1873 1898 EBUG_ON(trans->paths[iter->path].cach 1874 EBUG_ON(trans->paths[iter->path].cached); 1899 bch2_btree_iter_verify(iter); 1875 bch2_btree_iter_verify(iter); 1900 1876 1901 ret = bch2_btree_path_traverse(trans, 1877 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); 1902 if (ret) 1878 if (ret) 1903 goto err; 1879 goto err; 1904 1880 1905 struct btree_path *path = btree_iter_ 1881 struct btree_path *path = btree_iter_path(trans, iter); 1906 b = btree_path_node(path, path->level 1882 b = btree_path_node(path, path->level); 1907 if (!b) 1883 if (!b) 1908 goto out; 1884 goto out; 1909 1885 1910 BUG_ON(bpos_lt(b->key.k.p, iter->pos) 1886 BUG_ON(bpos_lt(b->key.k.p, iter->pos)); 1911 1887 1912 bkey_init(&iter->k); 1888 bkey_init(&iter->k); 1913 iter->k.p = iter->pos = b->key.k.p; 1889 iter->k.p = iter->pos = b->key.k.p; 1914 1890 1915 iter->path = bch2_btree_path_set_pos( 1891 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, 1916 iter- 1892 iter->flags & BTREE_ITER_intent, 1917 btree 1893 btree_iter_ip_allocated(iter)); 1918 btree_path_set_should_be_locked(trans !! 1894 btree_path_set_should_be_locked(btree_iter_path(trans, iter)); 1919 out: 1895 out: 1920 bch2_btree_iter_verify_entry_exit(ite 1896 bch2_btree_iter_verify_entry_exit(iter); 1921 bch2_btree_iter_verify(iter); 1897 bch2_btree_iter_verify(iter); 1922 1898 1923 return b; 1899 return b; 1924 err: 1900 err: 1925 b = ERR_PTR(ret); 1901 b = ERR_PTR(ret); 1926 goto out; 1902 goto out; 1927 } 1903 } 1928 1904 1929 /* Only kept for -tools */ << 1930 struct btree *bch2_btree_iter_peek_node_and_r 1905 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter) 1931 { 1906 { 1932 struct btree *b; 1907 struct btree *b; 1933 1908 1934 while (b = bch2_btree_iter_peek_node( 1909 while (b = bch2_btree_iter_peek_node(iter), 1935 bch2_err_matches(PTR_ERR_OR_ZE 1910 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart)) 1936 bch2_trans_begin(iter->trans) 1911 bch2_trans_begin(iter->trans); 1937 1912 1938 return b; 1913 return b; 1939 } 1914 } 1940 1915 1941 struct btree *bch2_btree_iter_next_node(struc 1916 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) 1942 { 1917 { 1943 struct btree_trans *trans = iter->tra 1918 struct btree_trans *trans = iter->trans; 1944 struct btree *b = NULL; 1919 struct btree *b = NULL; 1945 int ret; 1920 int ret; 1946 1921 1947 EBUG_ON(trans->paths[iter->path].cach 1922 EBUG_ON(trans->paths[iter->path].cached); 1948 bch2_trans_verify_not_in_restart(tran 1923 bch2_trans_verify_not_in_restart(trans); 1949 bch2_btree_iter_verify(iter); 1924 bch2_btree_iter_verify(iter); 1950 1925 1951 ret = bch2_btree_path_traverse(trans, << 1952 if (ret) << 1953 goto err; << 1954 << 1955 << 1956 struct btree_path *path = btree_iter_ 1926 struct btree_path *path = btree_iter_path(trans, iter); 1957 1927 1958 /* already at end? */ 1928 /* already at end? */ 1959 if (!btree_path_node(path, path->leve 1929 if (!btree_path_node(path, path->level)) 1960 return NULL; 1930 return NULL; 1961 1931 1962 /* got to end? */ 1932 /* got to end? */ 1963 if (!btree_path_node(path, path->leve 1933 if (!btree_path_node(path, path->level + 1)) { 1964 btree_path_set_level_up(trans 1934 btree_path_set_level_up(trans, path); 1965 return NULL; 1935 return NULL; 1966 } 1936 } 1967 1937 1968 if (!bch2_btree_node_relock(trans, pa 1938 if (!bch2_btree_node_relock(trans, path, path->level + 1)) { 1969 __bch2_btree_path_unlock(tran 1939 __bch2_btree_path_unlock(trans, path); 1970 path->l[path->level].b 1940 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); 1971 path->l[path->level + 1].b 1941 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); 1972 btree_path_set_dirty(path, BT 1942 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); 1973 trace_and_count(trans->c, tra 1943 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path); 1974 ret = btree_trans_restart(tra 1944 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); 1975 goto err; 1945 goto err; 1976 } 1946 } 1977 1947 1978 b = btree_path_node(path, path->level 1948 b = btree_path_node(path, path->level + 1); 1979 1949 1980 if (bpos_eq(iter->pos, b->key.k.p)) { 1950 if (bpos_eq(iter->pos, b->key.k.p)) { 1981 __btree_path_set_level_up(tra 1951 __btree_path_set_level_up(trans, path, path->level++); 1982 } else { 1952 } else { 1983 if (btree_lock_want(path, pat 1953 if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED) 1984 btree_node_unlock(tra 1954 btree_node_unlock(trans, path, path->level + 1); 1985 1955 1986 /* 1956 /* 1987 * Haven't gotten to the end 1957 * Haven't gotten to the end of the parent node: go back down to 1988 * the next child node 1958 * the next child node 1989 */ 1959 */ 1990 iter->path = bch2_btree_path_ 1960 iter->path = bch2_btree_path_set_pos(trans, iter->path, 1991 bpos_ 1961 bpos_successor(iter->pos), 1992 iter- 1962 iter->flags & BTREE_ITER_intent, 1993 btree 1963 btree_iter_ip_allocated(iter)); 1994 1964 1995 path = btree_iter_path(trans, 1965 path = btree_iter_path(trans, iter); 1996 btree_path_set_level_down(tra 1966 btree_path_set_level_down(trans, path, iter->min_depth); 1997 1967 1998 ret = bch2_btree_path_travers 1968 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); 1999 if (ret) 1969 if (ret) 2000 goto err; 1970 goto err; 2001 1971 2002 path = btree_iter_path(trans, 1972 path = btree_iter_path(trans, iter); 2003 b = path->l[path->level].b; 1973 b = path->l[path->level].b; 2004 } 1974 } 2005 1975 2006 bkey_init(&iter->k); 1976 bkey_init(&iter->k); 2007 iter->k.p = iter->pos = b->key.k.p; 1977 iter->k.p = iter->pos = b->key.k.p; 2008 1978 2009 iter->path = bch2_btree_path_set_pos( 1979 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, 2010 iter- 1980 iter->flags & BTREE_ITER_intent, 2011 btree 1981 btree_iter_ip_allocated(iter)); 2012 btree_path_set_should_be_locked(trans !! 1982 btree_path_set_should_be_locked(btree_iter_path(trans, iter)); 2013 EBUG_ON(btree_iter_path(trans, iter)- 1983 EBUG_ON(btree_iter_path(trans, iter)->uptodate); 2014 out: 1984 out: 2015 bch2_btree_iter_verify_entry_exit(ite 1985 bch2_btree_iter_verify_entry_exit(iter); 2016 bch2_btree_iter_verify(iter); 1986 bch2_btree_iter_verify(iter); 2017 1987 2018 return b; 1988 return b; 2019 err: 1989 err: 2020 b = ERR_PTR(ret); 1990 b = ERR_PTR(ret); 2021 goto out; 1991 goto out; 2022 } 1992 } 2023 1993 2024 /* Iterate across keys (in leaf nodes only) * 1994 /* Iterate across keys (in leaf nodes only) */ 2025 1995 2026 inline bool bch2_btree_iter_advance(struct bt 1996 inline bool bch2_btree_iter_advance(struct btree_iter *iter) 2027 { 1997 { 2028 struct bpos pos = iter->k.p; 1998 struct bpos pos = iter->k.p; 2029 bool ret = !(iter->flags & BTREE_ITER 1999 bool ret = !(iter->flags & BTREE_ITER_all_snapshots 2030 ? bpos_eq(pos, SPOS_MAX) 2000 ? bpos_eq(pos, SPOS_MAX) 2031 : bkey_eq(pos, SPOS_MAX) 2001 : bkey_eq(pos, SPOS_MAX)); 2032 2002 2033 if (ret && !(iter->flags & BTREE_ITER 2003 if (ret && !(iter->flags & BTREE_ITER_is_extents)) 2034 pos = bkey_successor(iter, po 2004 pos = bkey_successor(iter, pos); 2035 bch2_btree_iter_set_pos(iter, pos); 2005 bch2_btree_iter_set_pos(iter, pos); 2036 return ret; 2006 return ret; 2037 } 2007 } 2038 2008 2039 inline bool bch2_btree_iter_rewind(struct btr 2009 inline bool bch2_btree_iter_rewind(struct btree_iter *iter) 2040 { 2010 { 2041 struct bpos pos = bkey_start_pos(&ite 2011 struct bpos pos = bkey_start_pos(&iter->k); 2042 bool ret = !(iter->flags & BTREE_ITER 2012 bool ret = !(iter->flags & BTREE_ITER_all_snapshots 2043 ? bpos_eq(pos, POS_MIN) 2013 ? bpos_eq(pos, POS_MIN) 2044 : bkey_eq(pos, POS_MIN)) 2014 : bkey_eq(pos, POS_MIN)); 2045 2015 2046 if (ret && !(iter->flags & BTREE_ITER 2016 if (ret && !(iter->flags & BTREE_ITER_is_extents)) 2047 pos = bkey_predecessor(iter, 2017 pos = bkey_predecessor(iter, pos); 2048 bch2_btree_iter_set_pos(iter, pos); 2018 bch2_btree_iter_set_pos(iter, pos); 2049 return ret; 2019 return ret; 2050 } 2020 } 2051 2021 2052 static noinline 2022 static noinline 2053 void bch2_btree_trans_peek_prev_updates(struc 2023 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter, 2054 struc 2024 struct bkey_s_c *k) 2055 { 2025 { 2056 struct bpos end = path_l(btree_iter_p 2026 struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key; 2057 2027 2058 trans_for_each_update(trans, i) 2028 trans_for_each_update(trans, i) 2059 if (!i->key_cache_already_flu 2029 if (!i->key_cache_already_flushed && 2060 i->btree_id == iter->btre 2030 i->btree_id == iter->btree_id && 2061 bpos_le(i->k->k.p, iter-> 2031 bpos_le(i->k->k.p, iter->pos) && 2062 bpos_ge(i->k->k.p, k->k ? 2032 bpos_ge(i->k->k.p, k->k ? k->k->p : end)) { 2063 iter->k = i->k->k; 2033 iter->k = i->k->k; 2064 *k = bkey_i_to_s_c(i- 2034 *k = bkey_i_to_s_c(i->k); 2065 } 2035 } 2066 } 2036 } 2067 2037 2068 static noinline 2038 static noinline 2069 void bch2_btree_trans_peek_updates(struct btr 2039 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter, 2070 struct bke 2040 struct bkey_s_c *k) 2071 { 2041 { 2072 struct btree_path *path = btree_iter_ 2042 struct btree_path *path = btree_iter_path(trans, iter); 2073 struct bpos end = path_l(path)->b->ke 2043 struct bpos end = path_l(path)->b->key.k.p; 2074 2044 2075 trans_for_each_update(trans, i) 2045 trans_for_each_update(trans, i) 2076 if (!i->key_cache_already_flu 2046 if (!i->key_cache_already_flushed && 2077 i->btree_id == iter->btre 2047 i->btree_id == iter->btree_id && 2078 bpos_ge(i->k->k.p, path-> 2048 bpos_ge(i->k->k.p, path->pos) && 2079 bpos_le(i->k->k.p, k->k ? 2049 bpos_le(i->k->k.p, k->k ? k->k->p : end)) { 2080 iter->k = i->k->k; 2050 iter->k = i->k->k; 2081 *k = bkey_i_to_s_c(i- 2051 *k = bkey_i_to_s_c(i->k); 2082 } 2052 } 2083 } 2053 } 2084 2054 2085 static noinline 2055 static noinline 2086 void bch2_btree_trans_peek_slot_updates(struc 2056 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter, 2087 struc 2057 struct bkey_s_c *k) 2088 { 2058 { 2089 trans_for_each_update(trans, i) 2059 trans_for_each_update(trans, i) 2090 if (!i->key_cache_already_flu 2060 if (!i->key_cache_already_flushed && 2091 i->btree_id == iter->btre 2061 i->btree_id == iter->btree_id && 2092 bpos_eq(i->k->k.p, iter-> 2062 bpos_eq(i->k->k.p, iter->pos)) { 2093 iter->k = i->k->k; 2063 iter->k = i->k->k; 2094 *k = bkey_i_to_s_c(i- 2064 *k = bkey_i_to_s_c(i->k); 2095 } 2065 } 2096 } 2066 } 2097 2067 2098 static struct bkey_i *bch2_btree_journal_peek 2068 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, 2099 2069 struct btree_iter *iter, 2100 2070 struct bpos end_pos) 2101 { 2071 { 2102 struct btree_path *path = btree_iter_ 2072 struct btree_path *path = btree_iter_path(trans, iter); 2103 2073 2104 return bch2_journal_keys_peek_upto(tr 2074 return bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 2105 pa 2075 path->level, 2106 pa 2076 path->pos, 2107 en 2077 end_pos, 2108 &i 2078 &iter->journal_idx); 2109 } 2079 } 2110 2080 2111 static noinline 2081 static noinline 2112 struct bkey_s_c btree_trans_peek_slot_journal 2082 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans, 2113 2083 struct btree_iter *iter) 2114 { 2084 { 2115 struct btree_path *path = btree_iter_ 2085 struct btree_path *path = btree_iter_path(trans, iter); 2116 struct bkey_i *k = bch2_btree_journal 2086 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos); 2117 2087 2118 if (k) { 2088 if (k) { 2119 iter->k = k->k; 2089 iter->k = k->k; 2120 return bkey_i_to_s_c(k); 2090 return bkey_i_to_s_c(k); 2121 } else { 2091 } else { 2122 return bkey_s_c_null; 2092 return bkey_s_c_null; 2123 } 2093 } 2124 } 2094 } 2125 2095 2126 static noinline 2096 static noinline 2127 struct bkey_s_c btree_trans_peek_journal(stru 2097 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans, 2128 stru 2098 struct btree_iter *iter, 2129 stru 2099 struct bkey_s_c k) 2130 { 2100 { 2131 struct btree_path *path = btree_iter_ 2101 struct btree_path *path = btree_iter_path(trans, iter); 2132 struct bkey_i *next_journal = 2102 struct bkey_i *next_journal = 2133 bch2_btree_journal_peek(trans 2103 bch2_btree_journal_peek(trans, iter, 2134 k.k ? k.k->p 2104 k.k ? k.k->p : path_l(path)->b->key.k.p); 2135 2105 2136 if (next_journal) { 2106 if (next_journal) { 2137 iter->k = next_journal->k; 2107 iter->k = next_journal->k; 2138 k = bkey_i_to_s_c(next_journa 2108 k = bkey_i_to_s_c(next_journal); 2139 } 2109 } 2140 2110 2141 return k; 2111 return k; 2142 } 2112 } 2143 2113 2144 /* 2114 /* 2145 * Checks btree key cache for key at iter->po 2115 * Checks btree key cache for key at iter->pos and returns it if present, or 2146 * bkey_s_c_null: 2116 * bkey_s_c_null: 2147 */ 2117 */ 2148 static noinline 2118 static noinline 2149 struct bkey_s_c btree_trans_peek_key_cache(st 2119 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos) 2150 { 2120 { 2151 struct btree_trans *trans = iter->tra 2121 struct btree_trans *trans = iter->trans; 2152 struct bch_fs *c = trans->c; 2122 struct bch_fs *c = trans->c; 2153 struct bkey u; 2123 struct bkey u; 2154 struct bkey_s_c k; 2124 struct bkey_s_c k; 2155 int ret; 2125 int ret; 2156 2126 2157 bch2_trans_verify_not_in_restart(tran 2127 bch2_trans_verify_not_in_restart(trans); 2158 bch2_trans_verify_not_unlocked(trans) 2128 bch2_trans_verify_not_unlocked(trans); 2159 2129 2160 if ((iter->flags & BTREE_ITER_key_cac 2130 if ((iter->flags & BTREE_ITER_key_cache_fill) && 2161 bpos_eq(iter->pos, pos)) 2131 bpos_eq(iter->pos, pos)) 2162 return bkey_s_c_null; 2132 return bkey_s_c_null; 2163 2133 2164 if (!bch2_btree_key_cache_find(c, ite 2134 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos)) 2165 return bkey_s_c_null; 2135 return bkey_s_c_null; 2166 2136 2167 if (!iter->key_cache_path) 2137 if (!iter->key_cache_path) 2168 iter->key_cache_path = bch2_p 2138 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos, 2169 2139 iter->flags & BTREE_ITER_intent, 0, 2170 2140 iter->flags|BTREE_ITER_cached| 2171 2141 BTREE_ITER_cached_nofill, 2172 2142 _THIS_IP_); 2173 2143 2174 iter->key_cache_path = bch2_btree_pat 2144 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos, 2175 iter- 2145 iter->flags & BTREE_ITER_intent, 2176 btree 2146 btree_iter_ip_allocated(iter)); 2177 2147 2178 ret = bch2_btree_path_traverse(tran 2148 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, 2179 iter 2149 iter->flags|BTREE_ITER_cached) ?: 2180 bch2_btree_path_relock(trans, 2150 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_); 2181 if (unlikely(ret)) 2151 if (unlikely(ret)) 2182 return bkey_s_c_err(ret); 2152 return bkey_s_c_err(ret); 2183 2153 2184 btree_path_set_should_be_locked(trans !! 2154 btree_path_set_should_be_locked(trans->paths + iter->key_cache_path); 2185 2155 2186 k = bch2_btree_path_peek_slot(trans-> 2156 k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u); 2187 if (k.k && !bkey_err(k)) { 2157 if (k.k && !bkey_err(k)) { 2188 iter->k = u; 2158 iter->k = u; 2189 k.k = &iter->k; 2159 k.k = &iter->k; 2190 } 2160 } 2191 return k; 2161 return k; 2192 } 2162 } 2193 2163 2194 static struct bkey_s_c __bch2_btree_iter_peek 2164 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key) 2195 { 2165 { 2196 struct btree_trans *trans = iter->tra 2166 struct btree_trans *trans = iter->trans; 2197 struct bkey_s_c k, k2; 2167 struct bkey_s_c k, k2; 2198 int ret; 2168 int ret; 2199 2169 2200 EBUG_ON(btree_iter_path(trans, iter)- 2170 EBUG_ON(btree_iter_path(trans, iter)->cached); 2201 bch2_btree_iter_verify(iter); 2171 bch2_btree_iter_verify(iter); 2202 2172 2203 while (1) { 2173 while (1) { 2204 struct btree_path_level *l; 2174 struct btree_path_level *l; 2205 2175 2206 iter->path = bch2_btree_path_ 2176 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, 2207 iter- 2177 iter->flags & BTREE_ITER_intent, 2208 btree 2178 btree_iter_ip_allocated(iter)); 2209 2179 2210 ret = bch2_btree_path_travers 2180 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); 2211 if (unlikely(ret)) { 2181 if (unlikely(ret)) { 2212 /* ensure that iter-> 2182 /* ensure that iter->k is consistent with iter->pos: */ 2213 bch2_btree_iter_set_p 2183 bch2_btree_iter_set_pos(iter, iter->pos); 2214 k = bkey_s_c_err(ret) 2184 k = bkey_s_c_err(ret); 2215 goto out; 2185 goto out; 2216 } 2186 } 2217 2187 2218 struct btree_path *path = btr 2188 struct btree_path *path = btree_iter_path(trans, iter); 2219 l = path_l(path); 2189 l = path_l(path); 2220 2190 2221 if (unlikely(!l->b)) { 2191 if (unlikely(!l->b)) { 2222 /* No btree nodes at 2192 /* No btree nodes at requested level: */ 2223 bch2_btree_iter_set_p 2193 bch2_btree_iter_set_pos(iter, SPOS_MAX); 2224 k = bkey_s_c_null; 2194 k = bkey_s_c_null; 2225 goto out; 2195 goto out; 2226 } 2196 } 2227 2197 2228 btree_path_set_should_be_lock !! 2198 btree_path_set_should_be_locked(path); 2229 2199 2230 k = btree_path_level_peek_all 2200 k = btree_path_level_peek_all(trans->c, l, &iter->k); 2231 2201 2232 if (unlikely(iter->flags & BT 2202 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && 2233 k.k && 2203 k.k && 2234 (k2 = btree_trans_peek_ke 2204 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { 2235 k = k2; 2205 k = k2; 2236 ret = bkey_err(k); 2206 ret = bkey_err(k); 2237 if (ret) { 2207 if (ret) { 2238 bch2_btree_it 2208 bch2_btree_iter_set_pos(iter, iter->pos); 2239 goto out; 2209 goto out; 2240 } 2210 } 2241 } 2211 } 2242 2212 2243 if (unlikely(iter->flags & BT 2213 if (unlikely(iter->flags & BTREE_ITER_with_journal)) 2244 k = btree_trans_peek_ 2214 k = btree_trans_peek_journal(trans, iter, k); 2245 2215 2246 if (unlikely((iter->flags & B 2216 if (unlikely((iter->flags & BTREE_ITER_with_updates) && 2247 trans->nr_update 2217 trans->nr_updates)) 2248 bch2_btree_trans_peek 2218 bch2_btree_trans_peek_updates(trans, iter, &k); 2249 2219 2250 if (k.k && bkey_deleted(k.k)) 2220 if (k.k && bkey_deleted(k.k)) { 2251 /* 2221 /* 2252 * If we've got a whi 2222 * If we've got a whiteout, and it's after the search 2253 * key, advance the s 2223 * key, advance the search key to the whiteout instead 2254 * of just after the 2224 * of just after the whiteout - it might be a btree 2255 * whiteout, with a r 2225 * whiteout, with a real key at the same position, since 2256 * in the btree delet 2226 * in the btree deleted keys sort before non deleted. 2257 */ 2227 */ 2258 search_key = !bpos_eq 2228 search_key = !bpos_eq(search_key, k.k->p) 2259 ? k.k->p 2229 ? k.k->p 2260 : bpos_succes 2230 : bpos_successor(k.k->p); 2261 continue; 2231 continue; 2262 } 2232 } 2263 2233 2264 if (likely(k.k)) { 2234 if (likely(k.k)) { 2265 break; 2235 break; 2266 } else if (likely(!bpos_eq(l- 2236 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) { 2267 /* Advance to next le 2237 /* Advance to next leaf node: */ 2268 search_key = bpos_suc 2238 search_key = bpos_successor(l->b->key.k.p); 2269 } else { 2239 } else { 2270 /* End of btree: */ 2240 /* End of btree: */ 2271 bch2_btree_iter_set_p 2241 bch2_btree_iter_set_pos(iter, SPOS_MAX); 2272 k = bkey_s_c_null; 2242 k = bkey_s_c_null; 2273 goto out; 2243 goto out; 2274 } 2244 } 2275 } 2245 } 2276 out: 2246 out: 2277 bch2_btree_iter_verify(iter); 2247 bch2_btree_iter_verify(iter); 2278 2248 2279 return k; 2249 return k; 2280 } 2250 } 2281 2251 2282 /** 2252 /** 2283 * bch2_btree_iter_peek_upto() - returns firs 2253 * bch2_btree_iter_peek_upto() - returns first key greater than or equal to 2284 * iterator's current position 2254 * iterator's current position 2285 * @iter: iterator to peek from 2255 * @iter: iterator to peek from 2286 * @end: search limit: returns keys le 2256 * @end: search limit: returns keys less than or equal to @end 2287 * 2257 * 2288 * Returns: key if found, or an error ext 2258 * Returns: key if found, or an error extractable with bkey_err(). 2289 */ 2259 */ 2290 struct bkey_s_c bch2_btree_iter_peek_upto(str 2260 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end) 2291 { 2261 { 2292 struct btree_trans *trans = iter->tra 2262 struct btree_trans *trans = iter->trans; 2293 struct bpos search_key = btree_iter_s 2263 struct bpos search_key = btree_iter_search_key(iter); 2294 struct bkey_s_c k; 2264 struct bkey_s_c k; 2295 struct bpos iter_pos; 2265 struct bpos iter_pos; 2296 int ret; 2266 int ret; 2297 2267 2298 bch2_trans_verify_not_unlocked(trans) 2268 bch2_trans_verify_not_unlocked(trans); 2299 EBUG_ON((iter->flags & BTREE_ITER_fil 2269 EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX)); 2300 2270 2301 if (iter->update_path) { 2271 if (iter->update_path) { 2302 bch2_path_put_nokeep(trans, i 2272 bch2_path_put_nokeep(trans, iter->update_path, 2303 iter->fl 2273 iter->flags & BTREE_ITER_intent); 2304 iter->update_path = 0; 2274 iter->update_path = 0; 2305 } 2275 } 2306 2276 2307 bch2_btree_iter_verify_entry_exit(ite 2277 bch2_btree_iter_verify_entry_exit(iter); 2308 2278 2309 while (1) { 2279 while (1) { 2310 k = __bch2_btree_iter_peek(it 2280 k = __bch2_btree_iter_peek(iter, search_key); 2311 if (unlikely(!k.k)) 2281 if (unlikely(!k.k)) 2312 goto end; 2282 goto end; 2313 if (unlikely(bkey_err(k))) 2283 if (unlikely(bkey_err(k))) 2314 goto out_no_locked; 2284 goto out_no_locked; 2315 2285 2316 /* 2286 /* 2317 * We need to check against @ 2287 * We need to check against @end before FILTER_SNAPSHOTS because 2318 * if we get to a different i 2288 * if we get to a different inode that requested we might be 2319 * seeing keys for a differen 2289 * seeing keys for a different snapshot tree that will all be 2320 * filtered out. 2290 * filtered out. 2321 * 2291 * 2322 * But we can't do the full c 2292 * But we can't do the full check here, because bkey_start_pos() 2323 * isn't monotonically increa 2293 * isn't monotonically increasing before FILTER_SNAPSHOTS, and 2324 * that's what we check again 2294 * that's what we check against in extents mode: 2325 */ 2295 */ 2326 if (unlikely(!(iter->flags & 2296 if (unlikely(!(iter->flags & BTREE_ITER_is_extents) 2327 ? bkey_gt(k.k->p 2297 ? bkey_gt(k.k->p, end) 2328 : k.k->p.inode > 2298 : k.k->p.inode > end.inode)) 2329 goto end; 2299 goto end; 2330 2300 2331 if (iter->update_path && 2301 if (iter->update_path && 2332 !bkey_eq(trans->paths[ite 2302 !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) { 2333 bch2_path_put_nokeep( 2303 bch2_path_put_nokeep(trans, iter->update_path, 2334 2304 iter->flags & BTREE_ITER_intent); 2335 iter->update_path = 0 2305 iter->update_path = 0; 2336 } 2306 } 2337 2307 2338 if ((iter->flags & BTREE_ITER 2308 if ((iter->flags & BTREE_ITER_filter_snapshots) && 2339 (iter->flags & BTREE_ITER 2309 (iter->flags & BTREE_ITER_intent) && 2340 !(iter->flags & BTREE_ITE 2310 !(iter->flags & BTREE_ITER_is_extents) && 2341 !iter->update_path) { 2311 !iter->update_path) { 2342 struct bpos pos = k.k 2312 struct bpos pos = k.k->p; 2343 2313 2344 if (pos.snapshot < it 2314 if (pos.snapshot < iter->snapshot) { 2345 search_key = 2315 search_key = bpos_successor(k.k->p); 2346 continue; 2316 continue; 2347 } 2317 } 2348 2318 2349 pos.snapshot = iter-> 2319 pos.snapshot = iter->snapshot; 2350 2320 2351 /* 2321 /* 2352 * advance, same as o 2322 * advance, same as on exit for iter->path, but only up 2353 * to snapshot 2323 * to snapshot 2354 */ 2324 */ 2355 __btree_path_get(tran !! 2325 __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent); 2356 iter->update_path = i 2326 iter->update_path = iter->path; 2357 2327 2358 iter->update_path = b 2328 iter->update_path = bch2_btree_path_set_pos(trans, 2359 2329 iter->update_path, pos, 2360 2330 iter->flags & BTREE_ITER_intent, 2361 2331 _THIS_IP_); 2362 ret = bch2_btree_path 2332 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags); 2363 if (unlikely(ret)) { 2333 if (unlikely(ret)) { 2364 k = bkey_s_c_ 2334 k = bkey_s_c_err(ret); 2365 goto out_no_l 2335 goto out_no_locked; 2366 } 2336 } 2367 } 2337 } 2368 2338 2369 /* 2339 /* 2370 * We can never have a key in 2340 * We can never have a key in a leaf node at POS_MAX, so 2371 * we don't have to check the 2341 * we don't have to check these successor() calls: 2372 */ 2342 */ 2373 if ((iter->flags & BTREE_ITER 2343 if ((iter->flags & BTREE_ITER_filter_snapshots) && 2374 !bch2_snapshot_is_ancesto 2344 !bch2_snapshot_is_ancestor(trans->c, 2375 2345 iter->snapshot, 2376 2346 k.k->p.snapshot)) { 2377 search_key = bpos_suc 2347 search_key = bpos_successor(k.k->p); 2378 continue; 2348 continue; 2379 } 2349 } 2380 2350 2381 if (bkey_whiteout(k.k) && 2351 if (bkey_whiteout(k.k) && 2382 !(iter->flags & BTREE_ITE 2352 !(iter->flags & BTREE_ITER_all_snapshots)) { 2383 search_key = bkey_suc 2353 search_key = bkey_successor(iter, k.k->p); 2384 continue; 2354 continue; 2385 } 2355 } 2386 2356 2387 /* 2357 /* 2388 * iter->pos should be monono 2358 * iter->pos should be mononotically increasing, and always be 2389 * equal to the key we just r 2359 * equal to the key we just returned - except extents can 2390 * straddle iter->pos: 2360 * straddle iter->pos: 2391 */ 2361 */ 2392 if (!(iter->flags & BTREE_ITE 2362 if (!(iter->flags & BTREE_ITER_is_extents)) 2393 iter_pos = k.k->p; 2363 iter_pos = k.k->p; 2394 else 2364 else 2395 iter_pos = bkey_max(i 2365 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k)); 2396 2366 2397 if (unlikely(iter->flags & BT !! 2367 if (unlikely(!(iter->flags & BTREE_ITER_is_extents) 2398 iter->flags & BT !! 2368 ? bkey_gt(iter_pos, end) 2399 !! 2369 : bkey_ge(iter_pos, end))) 2400 goto end; 2370 goto end; 2401 2371 2402 break; 2372 break; 2403 } 2373 } 2404 2374 2405 iter->pos = iter_pos; 2375 iter->pos = iter_pos; 2406 2376 2407 iter->path = bch2_btree_path_set_pos( 2377 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p, 2408 iter->flags & 2378 iter->flags & BTREE_ITER_intent, 2409 btree_iter_ip 2379 btree_iter_ip_allocated(iter)); 2410 2380 2411 btree_path_set_should_be_locked(trans !! 2381 btree_path_set_should_be_locked(btree_iter_path(trans, iter)); 2412 out_no_locked: 2382 out_no_locked: 2413 if (iter->update_path) { 2383 if (iter->update_path) { 2414 ret = bch2_btree_path_relock( 2384 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_); 2415 if (unlikely(ret)) 2385 if (unlikely(ret)) 2416 k = bkey_s_c_err(ret) 2386 k = bkey_s_c_err(ret); 2417 else 2387 else 2418 btree_path_set_should !! 2388 btree_path_set_should_be_locked(trans->paths + iter->update_path); 2419 } 2389 } 2420 2390 2421 if (!(iter->flags & BTREE_ITER_all_sn 2391 if (!(iter->flags & BTREE_ITER_all_snapshots)) 2422 iter->pos.snapshot = iter->sn 2392 iter->pos.snapshot = iter->snapshot; 2423 2393 2424 ret = bch2_btree_iter_verify_ret(iter 2394 ret = bch2_btree_iter_verify_ret(iter, k); 2425 if (unlikely(ret)) { 2395 if (unlikely(ret)) { 2426 bch2_btree_iter_set_pos(iter, 2396 bch2_btree_iter_set_pos(iter, iter->pos); 2427 k = bkey_s_c_err(ret); 2397 k = bkey_s_c_err(ret); 2428 } 2398 } 2429 2399 2430 bch2_btree_iter_verify_entry_exit(ite 2400 bch2_btree_iter_verify_entry_exit(iter); 2431 2401 2432 return k; 2402 return k; 2433 end: 2403 end: 2434 bch2_btree_iter_set_pos(iter, end); 2404 bch2_btree_iter_set_pos(iter, end); 2435 k = bkey_s_c_null; 2405 k = bkey_s_c_null; 2436 goto out_no_locked; 2406 goto out_no_locked; 2437 } 2407 } 2438 2408 2439 /** 2409 /** 2440 * bch2_btree_iter_next() - returns first key 2410 * bch2_btree_iter_next() - returns first key greater than iterator's current 2441 * position 2411 * position 2442 * @iter: iterator to peek from 2412 * @iter: iterator to peek from 2443 * 2413 * 2444 * Returns: key if found, or an error ext 2414 * Returns: key if found, or an error extractable with bkey_err(). 2445 */ 2415 */ 2446 struct bkey_s_c bch2_btree_iter_next(struct b 2416 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) 2447 { 2417 { 2448 if (!bch2_btree_iter_advance(iter)) 2418 if (!bch2_btree_iter_advance(iter)) 2449 return bkey_s_c_null; 2419 return bkey_s_c_null; 2450 2420 2451 return bch2_btree_iter_peek(iter); 2421 return bch2_btree_iter_peek(iter); 2452 } 2422 } 2453 2423 2454 /** 2424 /** 2455 * bch2_btree_iter_peek_prev() - returns firs 2425 * bch2_btree_iter_peek_prev() - returns first key less than or equal to 2456 * iterator's current position 2426 * iterator's current position 2457 * @iter: iterator to peek from 2427 * @iter: iterator to peek from 2458 * 2428 * 2459 * Returns: key if found, or an error ext 2429 * Returns: key if found, or an error extractable with bkey_err(). 2460 */ 2430 */ 2461 struct bkey_s_c bch2_btree_iter_peek_prev(str 2431 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) 2462 { 2432 { 2463 struct btree_trans *trans = iter->tra 2433 struct btree_trans *trans = iter->trans; 2464 struct bpos search_key = iter->pos; 2434 struct bpos search_key = iter->pos; 2465 struct bkey_s_c k; 2435 struct bkey_s_c k; 2466 struct bkey saved_k; 2436 struct bkey saved_k; 2467 const struct bch_val *saved_v; 2437 const struct bch_val *saved_v; 2468 btree_path_idx_t saved_path = 0; 2438 btree_path_idx_t saved_path = 0; 2469 int ret; 2439 int ret; 2470 2440 2471 bch2_trans_verify_not_unlocked(trans) 2441 bch2_trans_verify_not_unlocked(trans); 2472 EBUG_ON(btree_iter_path(trans, iter)- 2442 EBUG_ON(btree_iter_path(trans, iter)->cached || 2473 btree_iter_path(trans, iter)- 2443 btree_iter_path(trans, iter)->level); 2474 2444 2475 if (iter->flags & BTREE_ITER_with_jou 2445 if (iter->flags & BTREE_ITER_with_journal) 2476 return bkey_s_c_err(-BCH_ERR_ 2446 return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported); 2477 2447 2478 bch2_btree_iter_verify(iter); 2448 bch2_btree_iter_verify(iter); 2479 bch2_btree_iter_verify_entry_exit(ite 2449 bch2_btree_iter_verify_entry_exit(iter); 2480 2450 2481 if (iter->flags & BTREE_ITER_filter_s 2451 if (iter->flags & BTREE_ITER_filter_snapshots) 2482 search_key.snapshot = U32_MAX 2452 search_key.snapshot = U32_MAX; 2483 2453 2484 while (1) { 2454 while (1) { 2485 iter->path = bch2_btree_path_ 2455 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, 2486 2456 iter->flags & BTREE_ITER_intent, 2487 2457 btree_iter_ip_allocated(iter)); 2488 2458 2489 ret = bch2_btree_path_travers 2459 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); 2490 if (unlikely(ret)) { 2460 if (unlikely(ret)) { 2491 /* ensure that iter-> 2461 /* ensure that iter->k is consistent with iter->pos: */ 2492 bch2_btree_iter_set_p 2462 bch2_btree_iter_set_pos(iter, iter->pos); 2493 k = bkey_s_c_err(ret) 2463 k = bkey_s_c_err(ret); 2494 goto out_no_locked; 2464 goto out_no_locked; 2495 } 2465 } 2496 2466 2497 struct btree_path *path = btr 2467 struct btree_path *path = btree_iter_path(trans, iter); 2498 2468 2499 k = btree_path_level_peek(tra 2469 k = btree_path_level_peek(trans, path, &path->l[0], &iter->k); 2500 if (!k.k || 2470 if (!k.k || 2501 ((iter->flags & BTREE_ITE 2471 ((iter->flags & BTREE_ITER_is_extents) 2502 ? bpos_ge(bkey_start_pos 2472 ? bpos_ge(bkey_start_pos(k.k), search_key) 2503 : bpos_gt(k.k->p, search 2473 : bpos_gt(k.k->p, search_key))) 2504 k = btree_path_level_ 2474 k = btree_path_level_prev(trans, path, &path->l[0], &iter->k); 2505 2475 2506 if (unlikely((iter->flags & B 2476 if (unlikely((iter->flags & BTREE_ITER_with_updates) && 2507 trans->nr_update 2477 trans->nr_updates)) 2508 bch2_btree_trans_peek 2478 bch2_btree_trans_peek_prev_updates(trans, iter, &k); 2509 2479 2510 if (likely(k.k)) { 2480 if (likely(k.k)) { 2511 if (iter->flags & BTR 2481 if (iter->flags & BTREE_ITER_filter_snapshots) { 2512 if (k.k->p.sn 2482 if (k.k->p.snapshot == iter->snapshot) 2513 goto 2483 goto got_key; 2514 2484 2515 /* 2485 /* 2516 * If we have 2486 * If we have a saved candidate, and we're no 2517 * longer at 2487 * longer at the same _key_ (not pos), return 2518 * that candi 2488 * that candidate 2519 */ 2489 */ 2520 if (saved_pat 2490 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) { 2521 bch2_ 2491 bch2_path_put_nokeep(trans, iter->path, 2522 2492 iter->flags & BTREE_ITER_intent); 2523 iter- 2493 iter->path = saved_path; 2524 saved 2494 saved_path = 0; 2525 iter- 2495 iter->k = saved_k; 2526 k.v 2496 k.v = saved_v; 2527 goto 2497 goto got_key; 2528 } 2498 } 2529 2499 2530 if (bch2_snap 2500 if (bch2_snapshot_is_ancestor(trans->c, 2531 2501 iter->snapshot, 2532 2502 k.k->p.snapshot)) { 2533 if (s 2503 if (saved_path) 2534 2504 bch2_path_put_nokeep(trans, saved_path, 2535 2505 iter->flags & BTREE_ITER_intent); 2536 saved 2506 saved_path = btree_path_clone(trans, iter->path, 2537 2507 iter->flags & BTREE_ITER_intent, 2538 2508 _THIS_IP_); 2539 path 2509 path = btree_iter_path(trans, iter); 2540 trace << 2541 saved 2510 saved_k = *k.k; 2542 saved 2511 saved_v = k.v; 2543 } 2512 } 2544 2513 2545 search_key = 2514 search_key = bpos_predecessor(k.k->p); 2546 continue; 2515 continue; 2547 } 2516 } 2548 got_key: 2517 got_key: 2549 if (bkey_whiteout(k.k 2518 if (bkey_whiteout(k.k) && 2550 !(iter->flags & B 2519 !(iter->flags & BTREE_ITER_all_snapshots)) { 2551 search_key = 2520 search_key = bkey_predecessor(iter, k.k->p); 2552 if (iter->fla 2521 if (iter->flags & BTREE_ITER_filter_snapshots) 2553 searc 2522 search_key.snapshot = U32_MAX; 2554 continue; 2523 continue; 2555 } 2524 } 2556 2525 2557 btree_path_set_should !! 2526 btree_path_set_should_be_locked(path); 2558 break; 2527 break; 2559 } else if (likely(!bpos_eq(pa 2528 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) { 2560 /* Advance to previou 2529 /* Advance to previous leaf node: */ 2561 search_key = bpos_pre 2530 search_key = bpos_predecessor(path->l[0].b->data->min_key); 2562 } else { 2531 } else { 2563 /* Start of btree: */ 2532 /* Start of btree: */ 2564 bch2_btree_iter_set_p 2533 bch2_btree_iter_set_pos(iter, POS_MIN); 2565 k = bkey_s_c_null; 2534 k = bkey_s_c_null; 2566 goto out_no_locked; 2535 goto out_no_locked; 2567 } 2536 } 2568 } 2537 } 2569 2538 2570 EBUG_ON(bkey_gt(bkey_start_pos(k.k), 2539 EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos)); 2571 2540 2572 /* Extents can straddle iter->pos: */ 2541 /* Extents can straddle iter->pos: */ 2573 if (bkey_lt(k.k->p, iter->pos)) 2542 if (bkey_lt(k.k->p, iter->pos)) 2574 iter->pos = k.k->p; 2543 iter->pos = k.k->p; 2575 2544 2576 if (iter->flags & BTREE_ITER_filter_s 2545 if (iter->flags & BTREE_ITER_filter_snapshots) 2577 iter->pos.snapshot = iter->sn 2546 iter->pos.snapshot = iter->snapshot; 2578 out_no_locked: 2547 out_no_locked: 2579 if (saved_path) 2548 if (saved_path) 2580 bch2_path_put_nokeep(trans, s 2549 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent); 2581 2550 2582 bch2_btree_iter_verify_entry_exit(ite 2551 bch2_btree_iter_verify_entry_exit(iter); 2583 bch2_btree_iter_verify(iter); 2552 bch2_btree_iter_verify(iter); 2584 2553 2585 return k; 2554 return k; 2586 } 2555 } 2587 2556 2588 /** 2557 /** 2589 * bch2_btree_iter_prev() - returns first key 2558 * bch2_btree_iter_prev() - returns first key less than iterator's current 2590 * position 2559 * position 2591 * @iter: iterator to peek from 2560 * @iter: iterator to peek from 2592 * 2561 * 2593 * Returns: key if found, or an error ext 2562 * Returns: key if found, or an error extractable with bkey_err(). 2594 */ 2563 */ 2595 struct bkey_s_c bch2_btree_iter_prev(struct b 2564 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) 2596 { 2565 { 2597 if (!bch2_btree_iter_rewind(iter)) 2566 if (!bch2_btree_iter_rewind(iter)) 2598 return bkey_s_c_null; 2567 return bkey_s_c_null; 2599 2568 2600 return bch2_btree_iter_peek_prev(iter 2569 return bch2_btree_iter_peek_prev(iter); 2601 } 2570 } 2602 2571 2603 struct bkey_s_c bch2_btree_iter_peek_slot(str 2572 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) 2604 { 2573 { 2605 struct btree_trans *trans = iter->tra 2574 struct btree_trans *trans = iter->trans; 2606 struct bpos search_key; 2575 struct bpos search_key; 2607 struct bkey_s_c k; 2576 struct bkey_s_c k; 2608 int ret; 2577 int ret; 2609 2578 2610 bch2_trans_verify_not_unlocked(trans) 2579 bch2_trans_verify_not_unlocked(trans); 2611 bch2_btree_iter_verify(iter); 2580 bch2_btree_iter_verify(iter); 2612 bch2_btree_iter_verify_entry_exit(ite 2581 bch2_btree_iter_verify_entry_exit(iter); 2613 EBUG_ON(btree_iter_path(trans, iter)- 2582 EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache)); 2614 2583 2615 /* extents can't span inode numbers: 2584 /* extents can't span inode numbers: */ 2616 if ((iter->flags & BTREE_ITER_is_exte 2585 if ((iter->flags & BTREE_ITER_is_extents) && 2617 unlikely(iter->pos.offset == KEY_ 2586 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) { 2618 if (iter->pos.inode == KEY_IN 2587 if (iter->pos.inode == KEY_INODE_MAX) 2619 return bkey_s_c_null; 2588 return bkey_s_c_null; 2620 2589 2621 bch2_btree_iter_set_pos(iter, 2590 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos)); 2622 } 2591 } 2623 2592 2624 search_key = btree_iter_search_key(it 2593 search_key = btree_iter_search_key(iter); 2625 iter->path = bch2_btree_path_set_pos( 2594 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, 2626 iter- 2595 iter->flags & BTREE_ITER_intent, 2627 btree 2596 btree_iter_ip_allocated(iter)); 2628 2597 2629 ret = bch2_btree_path_traverse(trans, 2598 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); 2630 if (unlikely(ret)) { 2599 if (unlikely(ret)) { 2631 k = bkey_s_c_err(ret); 2600 k = bkey_s_c_err(ret); 2632 goto out_no_locked; 2601 goto out_no_locked; 2633 } 2602 } 2634 2603 2635 if ((iter->flags & BTREE_ITER_cached) 2604 if ((iter->flags & BTREE_ITER_cached) || 2636 !(iter->flags & (BTREE_ITER_is_ex 2605 !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) { 2637 k = bkey_s_c_null; 2606 k = bkey_s_c_null; 2638 2607 2639 if (unlikely((iter->flags & B 2608 if (unlikely((iter->flags & BTREE_ITER_with_updates) && 2640 trans->nr_update 2609 trans->nr_updates)) { 2641 bch2_btree_trans_peek 2610 bch2_btree_trans_peek_slot_updates(trans, iter, &k); 2642 if (k.k) 2611 if (k.k) 2643 goto out; 2612 goto out; 2644 } 2613 } 2645 2614 2646 if (unlikely(iter->flags & BT 2615 if (unlikely(iter->flags & BTREE_ITER_with_journal) && 2647 (k = btree_trans_peek_slo 2616 (k = btree_trans_peek_slot_journal(trans, iter)).k) 2648 goto out; 2617 goto out; 2649 2618 2650 if (unlikely(iter->flags & BT 2619 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && 2651 (k = btree_trans_peek_key 2620 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) { 2652 if (!bkey_err(k)) 2621 if (!bkey_err(k)) 2653 iter->k = *k. 2622 iter->k = *k.k; 2654 /* We're not returnin 2623 /* We're not returning a key from iter->path: */ 2655 goto out_no_locked; 2624 goto out_no_locked; 2656 } 2625 } 2657 2626 2658 k = bch2_btree_path_peek_slot 2627 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k); 2659 if (unlikely(!k.k)) 2628 if (unlikely(!k.k)) 2660 goto out_no_locked; 2629 goto out_no_locked; 2661 } else { 2630 } else { 2662 struct bpos next; 2631 struct bpos next; 2663 struct bpos end = iter->pos; 2632 struct bpos end = iter->pos; 2664 2633 2665 if (iter->flags & BTREE_ITER_ 2634 if (iter->flags & BTREE_ITER_is_extents) 2666 end.offset = U64_MAX; 2635 end.offset = U64_MAX; 2667 2636 2668 EBUG_ON(btree_iter_path(trans 2637 EBUG_ON(btree_iter_path(trans, iter)->level); 2669 2638 2670 if (iter->flags & BTREE_ITER_ 2639 if (iter->flags & BTREE_ITER_intent) { 2671 struct btree_iter ite 2640 struct btree_iter iter2; 2672 2641 2673 bch2_trans_copy_iter( 2642 bch2_trans_copy_iter(&iter2, iter); 2674 k = bch2_btree_iter_p 2643 k = bch2_btree_iter_peek_upto(&iter2, end); 2675 2644 2676 if (k.k && !bkey_err( 2645 if (k.k && !bkey_err(k)) { 2677 swap(iter->ke 2646 swap(iter->key_cache_path, iter2.key_cache_path); 2678 iter->k = ite 2647 iter->k = iter2.k; 2679 k.k = &iter-> 2648 k.k = &iter->k; 2680 } 2649 } 2681 bch2_trans_iter_exit( 2650 bch2_trans_iter_exit(trans, &iter2); 2682 } else { 2651 } else { 2683 struct bpos pos = ite 2652 struct bpos pos = iter->pos; 2684 2653 2685 k = bch2_btree_iter_p 2654 k = bch2_btree_iter_peek_upto(iter, end); 2686 if (unlikely(bkey_err 2655 if (unlikely(bkey_err(k))) 2687 bch2_btree_it 2656 bch2_btree_iter_set_pos(iter, pos); 2688 else 2657 else 2689 iter->pos = p 2658 iter->pos = pos; 2690 } 2659 } 2691 2660 2692 if (unlikely(bkey_err(k))) 2661 if (unlikely(bkey_err(k))) 2693 goto out_no_locked; 2662 goto out_no_locked; 2694 2663 2695 next = k.k ? bkey_start_pos(k 2664 next = k.k ? bkey_start_pos(k.k) : POS_MAX; 2696 2665 2697 if (bkey_lt(iter->pos, next)) 2666 if (bkey_lt(iter->pos, next)) { 2698 bkey_init(&iter->k); 2667 bkey_init(&iter->k); 2699 iter->k.p = iter->pos 2668 iter->k.p = iter->pos; 2700 2669 2701 if (iter->flags & BTR 2670 if (iter->flags & BTREE_ITER_is_extents) { 2702 bch2_key_resi 2671 bch2_key_resize(&iter->k, 2703 2672 min_t(u64, KEY_SIZE_MAX, 2704 2673 (next.inode == iter->pos.inode 2705 2674 ? next.offset 2706 2675 : KEY_OFFSET_MAX) - 2707 2676 iter->pos.offset)); 2708 EBUG_ON(!iter 2677 EBUG_ON(!iter->k.size); 2709 } 2678 } 2710 2679 2711 k = (struct bkey_s_c) 2680 k = (struct bkey_s_c) { &iter->k, NULL }; 2712 } 2681 } 2713 } 2682 } 2714 out: 2683 out: 2715 btree_path_set_should_be_locked(trans !! 2684 btree_path_set_should_be_locked(btree_iter_path(trans, iter)); 2716 out_no_locked: 2685 out_no_locked: 2717 bch2_btree_iter_verify_entry_exit(ite 2686 bch2_btree_iter_verify_entry_exit(iter); 2718 bch2_btree_iter_verify(iter); 2687 bch2_btree_iter_verify(iter); 2719 ret = bch2_btree_iter_verify_ret(iter 2688 ret = bch2_btree_iter_verify_ret(iter, k); 2720 if (unlikely(ret)) 2689 if (unlikely(ret)) 2721 return bkey_s_c_err(ret); 2690 return bkey_s_c_err(ret); 2722 2691 2723 return k; 2692 return k; 2724 } 2693 } 2725 2694 2726 struct bkey_s_c bch2_btree_iter_next_slot(str 2695 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter) 2727 { 2696 { 2728 if (!bch2_btree_iter_advance(iter)) 2697 if (!bch2_btree_iter_advance(iter)) 2729 return bkey_s_c_null; 2698 return bkey_s_c_null; 2730 2699 2731 return bch2_btree_iter_peek_slot(iter 2700 return bch2_btree_iter_peek_slot(iter); 2732 } 2701 } 2733 2702 2734 struct bkey_s_c bch2_btree_iter_prev_slot(str 2703 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter) 2735 { 2704 { 2736 if (!bch2_btree_iter_rewind(iter)) 2705 if (!bch2_btree_iter_rewind(iter)) 2737 return bkey_s_c_null; 2706 return bkey_s_c_null; 2738 2707 2739 return bch2_btree_iter_peek_slot(iter 2708 return bch2_btree_iter_peek_slot(iter); 2740 } 2709 } 2741 2710 2742 /* Obsolete, but still used by rust wrapper i << 2743 struct bkey_s_c bch2_btree_iter_peek_and_rest 2711 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter) 2744 { 2712 { 2745 struct bkey_s_c k; 2713 struct bkey_s_c k; 2746 2714 2747 while (btree_trans_too_many_iters(ite 2715 while (btree_trans_too_many_iters(iter->trans) || 2748 (k = bch2_btree_iter_peek_type 2716 (k = bch2_btree_iter_peek_type(iter, iter->flags), 2749 bch2_err_matches(bkey_err(k), 2717 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart))) 2750 bch2_trans_begin(iter->trans) 2718 bch2_trans_begin(iter->trans); 2751 2719 2752 return k; 2720 return k; 2753 } 2721 } 2754 2722 2755 /* new transactional stuff: */ 2723 /* new transactional stuff: */ 2756 2724 2757 #ifdef CONFIG_BCACHEFS_DEBUG 2725 #ifdef CONFIG_BCACHEFS_DEBUG 2758 static void btree_trans_verify_sorted_refs(st 2726 static void btree_trans_verify_sorted_refs(struct btree_trans *trans) 2759 { 2727 { 2760 struct btree_path *path; 2728 struct btree_path *path; 2761 unsigned i; 2729 unsigned i; 2762 2730 2763 BUG_ON(trans->nr_sorted != bitmap_wei 2731 BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1); 2764 2732 2765 trans_for_each_path(trans, path, i) { 2733 trans_for_each_path(trans, path, i) { 2766 BUG_ON(path->sorted_idx >= tr 2734 BUG_ON(path->sorted_idx >= trans->nr_sorted); 2767 BUG_ON(trans->sorted[path->so 2735 BUG_ON(trans->sorted[path->sorted_idx] != i); 2768 } 2736 } 2769 2737 2770 for (i = 0; i < trans->nr_sorted; i++ 2738 for (i = 0; i < trans->nr_sorted; i++) { 2771 unsigned idx = trans->sorted[ 2739 unsigned idx = trans->sorted[i]; 2772 2740 2773 BUG_ON(!test_bit(idx, trans-> 2741 BUG_ON(!test_bit(idx, trans->paths_allocated)); 2774 BUG_ON(trans->paths[idx].sort 2742 BUG_ON(trans->paths[idx].sorted_idx != i); 2775 } 2743 } 2776 } 2744 } 2777 2745 2778 static void btree_trans_verify_sorted(struct 2746 static void btree_trans_verify_sorted(struct btree_trans *trans) 2779 { 2747 { 2780 struct btree_path *path, *prev = NULL 2748 struct btree_path *path, *prev = NULL; 2781 struct trans_for_each_path_inorder_it 2749 struct trans_for_each_path_inorder_iter iter; 2782 2750 2783 if (!bch2_debug_check_iterators) 2751 if (!bch2_debug_check_iterators) 2784 return; 2752 return; 2785 2753 2786 trans_for_each_path_inorder(trans, pa 2754 trans_for_each_path_inorder(trans, path, iter) { 2787 if (prev && btree_path_cmp(pr 2755 if (prev && btree_path_cmp(prev, path) > 0) { 2788 __bch2_dump_trans_pat 2756 __bch2_dump_trans_paths_updates(trans, true); 2789 panic("trans paths ou 2757 panic("trans paths out of order!\n"); 2790 } 2758 } 2791 prev = path; 2759 prev = path; 2792 } 2760 } 2793 } 2761 } 2794 #else 2762 #else 2795 static inline void btree_trans_verify_sorted_ 2763 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {} 2796 static inline void btree_trans_verify_sorted( 2764 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {} 2797 #endif 2765 #endif 2798 2766 2799 void __bch2_btree_trans_sort_paths(struct btr 2767 void __bch2_btree_trans_sort_paths(struct btree_trans *trans) 2800 { 2768 { 2801 int i, l = 0, r = trans->nr_sorted, i 2769 int i, l = 0, r = trans->nr_sorted, inc = 1; 2802 bool swapped; 2770 bool swapped; 2803 2771 2804 btree_trans_verify_sorted_refs(trans) 2772 btree_trans_verify_sorted_refs(trans); 2805 2773 2806 if (trans->paths_sorted) 2774 if (trans->paths_sorted) 2807 goto out; 2775 goto out; 2808 2776 2809 /* 2777 /* 2810 * Cocktail shaker sort: this is effi 2778 * Cocktail shaker sort: this is efficient because iterators will be 2811 * mostly sorted. 2779 * mostly sorted. 2812 */ 2780 */ 2813 do { 2781 do { 2814 swapped = false; 2782 swapped = false; 2815 2783 2816 for (i = inc > 0 ? l : r - 2; 2784 for (i = inc > 0 ? l : r - 2; 2817 i + 1 < r && i >= l; 2785 i + 1 < r && i >= l; 2818 i += inc) { 2786 i += inc) { 2819 if (btree_path_cmp(tr 2787 if (btree_path_cmp(trans->paths + trans->sorted[i], 2820 tr 2788 trans->paths + trans->sorted[i + 1]) > 0) { 2821 swap(trans->s 2789 swap(trans->sorted[i], trans->sorted[i + 1]); 2822 trans->paths[ 2790 trans->paths[trans->sorted[i]].sorted_idx = i; 2823 trans->paths[ 2791 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1; 2824 swapped = tru 2792 swapped = true; 2825 } 2793 } 2826 } 2794 } 2827 2795 2828 if (inc > 0) 2796 if (inc > 0) 2829 --r; 2797 --r; 2830 else 2798 else 2831 l++; 2799 l++; 2832 inc = -inc; 2800 inc = -inc; 2833 } while (swapped); 2801 } while (swapped); 2834 2802 2835 trans->paths_sorted = true; 2803 trans->paths_sorted = true; 2836 out: 2804 out: 2837 btree_trans_verify_sorted(trans); 2805 btree_trans_verify_sorted(trans); 2838 } 2806 } 2839 2807 2840 static inline void btree_path_list_remove(str 2808 static inline void btree_path_list_remove(struct btree_trans *trans, 2841 str 2809 struct btree_path *path) 2842 { 2810 { 2843 EBUG_ON(path->sorted_idx >= trans->nr 2811 EBUG_ON(path->sorted_idx >= trans->nr_sorted); 2844 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2812 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2845 trans->nr_sorted--; 2813 trans->nr_sorted--; 2846 memmove_u64s_down_small(trans->sorted 2814 memmove_u64s_down_small(trans->sorted + path->sorted_idx, 2847 trans->sorted 2815 trans->sorted + path->sorted_idx + 1, 2848 DIV_ROUND_UP( 2816 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 2849 2817 sizeof(u64) / sizeof(btree_path_idx_t))); 2850 #else 2818 #else 2851 array_remove_item(trans->sorted, tran 2819 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx); 2852 #endif 2820 #endif 2853 for (unsigned i = path->sorted_idx; i 2821 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++) 2854 trans->paths[trans->sorted[i] 2822 trans->paths[trans->sorted[i]].sorted_idx = i; 2855 } 2823 } 2856 2824 2857 static inline void btree_path_list_add(struct 2825 static inline void btree_path_list_add(struct btree_trans *trans, 2858 btree_ 2826 btree_path_idx_t pos, 2859 btree_ 2827 btree_path_idx_t path_idx) 2860 { 2828 { 2861 struct btree_path *path = trans->path 2829 struct btree_path *path = trans->paths + path_idx; 2862 2830 2863 path->sorted_idx = pos ? trans->paths 2831 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted; 2864 2832 2865 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2833 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2866 memmove_u64s_up_small(trans->sorted + 2834 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1, 2867 trans->sorted + 2835 trans->sorted + path->sorted_idx, 2868 DIV_ROUND_UP(tr 2836 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 2869 si 2837 sizeof(u64) / sizeof(btree_path_idx_t))); 2870 trans->nr_sorted++; 2838 trans->nr_sorted++; 2871 trans->sorted[path->sorted_idx] = pat 2839 trans->sorted[path->sorted_idx] = path_idx; 2872 #else 2840 #else 2873 array_insert_item(trans->sorted, tran 2841 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx); 2874 #endif 2842 #endif 2875 2843 2876 for (unsigned i = path->sorted_idx; i 2844 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++) 2877 trans->paths[trans->sorted[i] 2845 trans->paths[trans->sorted[i]].sorted_idx = i; 2878 2846 2879 btree_trans_verify_sorted_refs(trans) 2847 btree_trans_verify_sorted_refs(trans); 2880 } 2848 } 2881 2849 2882 void bch2_trans_iter_exit(struct btree_trans 2850 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) 2883 { 2851 { 2884 if (iter->update_path) 2852 if (iter->update_path) 2885 bch2_path_put_nokeep(trans, i 2853 bch2_path_put_nokeep(trans, iter->update_path, 2886 iter->flags & B 2854 iter->flags & BTREE_ITER_intent); 2887 if (iter->path) 2855 if (iter->path) 2888 bch2_path_put(trans, iter->pa 2856 bch2_path_put(trans, iter->path, 2889 iter->flags & B 2857 iter->flags & BTREE_ITER_intent); 2890 if (iter->key_cache_path) 2858 if (iter->key_cache_path) 2891 bch2_path_put(trans, iter->ke 2859 bch2_path_put(trans, iter->key_cache_path, 2892 iter->flags & B 2860 iter->flags & BTREE_ITER_intent); 2893 iter->path = 0; 2861 iter->path = 0; 2894 iter->update_path = 0; 2862 iter->update_path = 0; 2895 iter->key_cache_path = 0; 2863 iter->key_cache_path = 0; 2896 iter->trans = NULL; 2864 iter->trans = NULL; 2897 } 2865 } 2898 2866 2899 void bch2_trans_iter_init_outlined(struct btr 2867 void bch2_trans_iter_init_outlined(struct btree_trans *trans, 2900 struct btree_iter * 2868 struct btree_iter *iter, 2901 enum btree_id btree 2869 enum btree_id btree_id, struct bpos pos, 2902 unsigned flags) 2870 unsigned flags) 2903 { 2871 { 2904 bch2_trans_iter_init_common(trans, it 2872 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, 2905 bch2_btree_ite 2873 bch2_btree_iter_flags(trans, btree_id, flags), 2906 _RET_IP_); 2874 _RET_IP_); 2907 } 2875 } 2908 2876 2909 void bch2_trans_node_iter_init(struct btree_t 2877 void bch2_trans_node_iter_init(struct btree_trans *trans, 2910 struct btree_i 2878 struct btree_iter *iter, 2911 enum btree_id 2879 enum btree_id btree_id, 2912 struct bpos po 2880 struct bpos pos, 2913 unsigned locks 2881 unsigned locks_want, 2914 unsigned depth 2882 unsigned depth, 2915 unsigned flags 2883 unsigned flags) 2916 { 2884 { 2917 flags |= BTREE_ITER_not_extents; 2885 flags |= BTREE_ITER_not_extents; 2918 flags |= BTREE_ITER_snapshot_field; 2886 flags |= BTREE_ITER_snapshot_field; 2919 flags |= BTREE_ITER_all_snapshots; 2887 flags |= BTREE_ITER_all_snapshots; 2920 2888 2921 bch2_trans_iter_init_common(trans, it 2889 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth, 2922 __bch2_btree_i 2890 __bch2_btree_iter_flags(trans, btree_id, flags), 2923 _RET_IP_); 2891 _RET_IP_); 2924 2892 2925 iter->min_depth = depth; 2893 iter->min_depth = depth; 2926 2894 2927 struct btree_path *path = btree_iter_ 2895 struct btree_path *path = btree_iter_path(trans, iter); 2928 BUG_ON(path->locks_want < min(locks_ 2896 BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH)); 2929 BUG_ON(path->level != depth); 2897 BUG_ON(path->level != depth); 2930 BUG_ON(iter->min_depth != depth); 2898 BUG_ON(iter->min_depth != depth); 2931 } 2899 } 2932 2900 2933 void bch2_trans_copy_iter(struct btree_iter * 2901 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src) 2934 { 2902 { 2935 struct btree_trans *trans = src->tran 2903 struct btree_trans *trans = src->trans; 2936 2904 2937 *dst = *src; 2905 *dst = *src; 2938 #ifdef TRACK_PATH_ALLOCATED 2906 #ifdef TRACK_PATH_ALLOCATED 2939 dst->ip_allocated = _RET_IP_; 2907 dst->ip_allocated = _RET_IP_; 2940 #endif 2908 #endif 2941 if (src->path) 2909 if (src->path) 2942 __btree_path_get(trans, trans !! 2910 __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent); 2943 if (src->update_path) 2911 if (src->update_path) 2944 __btree_path_get(trans, trans !! 2912 __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent); 2945 dst->key_cache_path = 0; 2913 dst->key_cache_path = 0; 2946 } 2914 } 2947 2915 2948 void *__bch2_trans_kmalloc(struct btree_trans 2916 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) 2949 { 2917 { 2950 struct bch_fs *c = trans->c; 2918 struct bch_fs *c = trans->c; 2951 unsigned new_top = trans->mem_top + s 2919 unsigned new_top = trans->mem_top + size; 2952 unsigned old_bytes = trans->mem_bytes 2920 unsigned old_bytes = trans->mem_bytes; 2953 unsigned new_bytes = roundup_pow_of_t 2921 unsigned new_bytes = roundup_pow_of_two(new_top); 2954 int ret; 2922 int ret; 2955 void *new_mem; 2923 void *new_mem; 2956 void *p; 2924 void *p; 2957 2925 2958 WARN_ON_ONCE(new_bytes > BTREE_TRANS_ 2926 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX); 2959 2927 2960 struct btree_transaction_stats *s = b 2928 struct btree_transaction_stats *s = btree_trans_stats(trans); 2961 s->max_mem = max(s->max_mem, new_byte 2929 s->max_mem = max(s->max_mem, new_bytes); 2962 2930 2963 if (trans->used_mempool) { 2931 if (trans->used_mempool) { 2964 if (trans->mem_bytes >= new_b 2932 if (trans->mem_bytes >= new_bytes) 2965 goto out_change_top; 2933 goto out_change_top; 2966 2934 2967 /* No more space from mempool 2935 /* No more space from mempool item, need malloc new one */ 2968 new_mem = kmalloc(new_bytes, 2936 new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN); 2969 if (unlikely(!new_mem)) { 2937 if (unlikely(!new_mem)) { 2970 bch2_trans_unlock(tra 2938 bch2_trans_unlock(trans); 2971 2939 2972 new_mem = kmalloc(new 2940 new_mem = kmalloc(new_bytes, GFP_KERNEL); 2973 if (!new_mem) 2941 if (!new_mem) 2974 return ERR_PT 2942 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc); 2975 2943 2976 ret = bch2_trans_relo 2944 ret = bch2_trans_relock(trans); 2977 if (ret) { 2945 if (ret) { 2978 kfree(new_mem 2946 kfree(new_mem); 2979 return ERR_PT 2947 return ERR_PTR(ret); 2980 } 2948 } 2981 } 2949 } 2982 memcpy(new_mem, trans->mem, t 2950 memcpy(new_mem, trans->mem, trans->mem_top); 2983 trans->used_mempool = false; 2951 trans->used_mempool = false; 2984 mempool_free(trans->mem, &c-> 2952 mempool_free(trans->mem, &c->btree_trans_mem_pool); 2985 goto out_new_mem; 2953 goto out_new_mem; 2986 } 2954 } 2987 2955 2988 new_mem = krealloc(trans->mem, new_by 2956 new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN); 2989 if (unlikely(!new_mem)) { 2957 if (unlikely(!new_mem)) { 2990 bch2_trans_unlock(trans); 2958 bch2_trans_unlock(trans); 2991 2959 2992 new_mem = krealloc(trans->mem 2960 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL); 2993 if (!new_mem && new_bytes <= 2961 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) { 2994 new_mem = mempool_all 2962 new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL); 2995 new_bytes = BTREE_TRA 2963 new_bytes = BTREE_TRANS_MEM_MAX; 2996 memcpy(new_mem, trans 2964 memcpy(new_mem, trans->mem, trans->mem_top); 2997 trans->used_mempool = 2965 trans->used_mempool = true; 2998 kfree(trans->mem); 2966 kfree(trans->mem); 2999 } 2967 } 3000 2968 3001 if (!new_mem) 2969 if (!new_mem) 3002 return ERR_PTR(-BCH_E 2970 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc); 3003 2971 3004 trans->mem = new_mem; 2972 trans->mem = new_mem; 3005 trans->mem_bytes = new_bytes; 2973 trans->mem_bytes = new_bytes; 3006 2974 3007 ret = bch2_trans_relock(trans 2975 ret = bch2_trans_relock(trans); 3008 if (ret) 2976 if (ret) 3009 return ERR_PTR(ret); 2977 return ERR_PTR(ret); 3010 } 2978 } 3011 out_new_mem: 2979 out_new_mem: 3012 trans->mem = new_mem; 2980 trans->mem = new_mem; 3013 trans->mem_bytes = new_bytes; 2981 trans->mem_bytes = new_bytes; 3014 2982 3015 if (old_bytes) { 2983 if (old_bytes) { 3016 trace_and_count(c, trans_rest 2984 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes); 3017 return ERR_PTR(btree_trans_re 2985 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced)); 3018 } 2986 } 3019 out_change_top: 2987 out_change_top: 3020 p = trans->mem + trans->mem_top; 2988 p = trans->mem + trans->mem_top; 3021 trans->mem_top += size; 2989 trans->mem_top += size; 3022 memset(p, 0, size); 2990 memset(p, 0, size); 3023 return p; 2991 return p; 3024 } 2992 } 3025 2993 3026 static inline void check_srcu_held_too_long(s 2994 static inline void check_srcu_held_too_long(struct btree_trans *trans) 3027 { 2995 { 3028 WARN(trans->srcu_held && time_after(j 2996 WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10), 3029 "btree trans held srcu lock (del 2997 "btree trans held srcu lock (delaying memory reclaim) for %lu seconds", 3030 (jiffies - trans->srcu_lock_time 2998 (jiffies - trans->srcu_lock_time) / HZ); 3031 } 2999 } 3032 3000 3033 void bch2_trans_srcu_unlock(struct btree_tran 3001 void bch2_trans_srcu_unlock(struct btree_trans *trans) 3034 { 3002 { 3035 if (trans->srcu_held) { 3003 if (trans->srcu_held) { 3036 struct bch_fs *c = trans->c; 3004 struct bch_fs *c = trans->c; 3037 struct btree_path *path; 3005 struct btree_path *path; 3038 unsigned i; 3006 unsigned i; 3039 3007 3040 trans_for_each_path(trans, pa 3008 trans_for_each_path(trans, path, i) 3041 if (path->cached && ! 3009 if (path->cached && !btree_node_locked(path, 0)) 3042 path->l[0].b 3010 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset); 3043 3011 3044 check_srcu_held_too_long(tran 3012 check_srcu_held_too_long(trans); 3045 srcu_read_unlock(&c->btree_tr 3013 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); 3046 trans->srcu_held = false; 3014 trans->srcu_held = false; 3047 } 3015 } 3048 } 3016 } 3049 3017 3050 static void bch2_trans_srcu_lock(struct btree 3018 static void bch2_trans_srcu_lock(struct btree_trans *trans) 3051 { 3019 { 3052 if (!trans->srcu_held) { 3020 if (!trans->srcu_held) { 3053 trans->srcu_idx = srcu_read_l 3021 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier); 3054 trans->srcu_lock_time = jif 3022 trans->srcu_lock_time = jiffies; 3055 trans->srcu_held = true; 3023 trans->srcu_held = true; 3056 } 3024 } 3057 } 3025 } 3058 3026 3059 /** 3027 /** 3060 * bch2_trans_begin() - reset a transaction a 3028 * bch2_trans_begin() - reset a transaction after a interrupted attempt 3061 * @trans: transaction to reset 3029 * @trans: transaction to reset 3062 * 3030 * 3063 * Returns: current restart counter, to b 3031 * Returns: current restart counter, to be used with trans_was_restarted() 3064 * 3032 * 3065 * While iterating over nodes or updating nod 3033 * While iterating over nodes or updating nodes a attempt to lock a btree node 3066 * may return BCH_ERR_transaction_restart whe 3034 * may return BCH_ERR_transaction_restart when the trylock fails. When this 3067 * occurs bch2_trans_begin() should be called 3035 * occurs bch2_trans_begin() should be called and the transaction retried. 3068 */ 3036 */ 3069 u32 bch2_trans_begin(struct btree_trans *tran 3037 u32 bch2_trans_begin(struct btree_trans *trans) 3070 { 3038 { 3071 struct btree_path *path; 3039 struct btree_path *path; 3072 unsigned i; 3040 unsigned i; 3073 u64 now; 3041 u64 now; 3074 3042 3075 bch2_trans_reset_updates(trans); 3043 bch2_trans_reset_updates(trans); 3076 3044 3077 trans->restart_count++; 3045 trans->restart_count++; 3078 trans->mem_top = 0; 3046 trans->mem_top = 0; 3079 trans->journal_entries = NUL 3047 trans->journal_entries = NULL; 3080 3048 3081 trans_for_each_path(trans, path, i) { 3049 trans_for_each_path(trans, path, i) { 3082 path->should_be_locked = fals 3050 path->should_be_locked = false; 3083 3051 3084 /* 3052 /* 3085 * If the transaction wasn't 3053 * If the transaction wasn't restarted, we're presuming to be 3086 * doing something new: dont 3054 * doing something new: dont keep iterators excpt the ones that 3087 * are in use - except for th 3055 * are in use - except for the subvolumes btree: 3088 */ 3056 */ 3089 if (!trans->restarted && path 3057 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes) 3090 path->preserve = fals 3058 path->preserve = false; 3091 3059 3092 /* 3060 /* 3093 * XXX: we probably shouldn't 3061 * XXX: we probably shouldn't be doing this if the transaction 3094 * was restarted, but current 3062 * was restarted, but currently we still overflow transaction 3095 * iterators if we do that 3063 * iterators if we do that 3096 */ 3064 */ 3097 if (!path->ref && !path->pres 3065 if (!path->ref && !path->preserve) 3098 __bch2_path_free(tran 3066 __bch2_path_free(trans, i); 3099 else 3067 else 3100 path->preserve = fals 3068 path->preserve = false; 3101 } 3069 } 3102 3070 3103 now = local_clock(); 3071 now = local_clock(); 3104 3072 3105 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LA 3073 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) && 3106 time_after64(now, trans->last_beg 3074 time_after64(now, trans->last_begin_time + 10)) 3107 __bch2_time_stats_update(&btr 3075 __bch2_time_stats_update(&btree_trans_stats(trans)->duration, 3108 tran 3076 trans->last_begin_time, now); 3109 3077 3110 if (!trans->restarted && 3078 if (!trans->restarted && 3111 (need_resched() || 3079 (need_resched() || 3112 time_after64(now, trans->last_be 3080 time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) { 3113 bch2_trans_unlock(trans); 3081 bch2_trans_unlock(trans); 3114 cond_resched(); 3082 cond_resched(); 3115 now = local_clock(); 3083 now = local_clock(); 3116 } 3084 } 3117 trans->last_begin_time = now; 3085 trans->last_begin_time = now; 3118 3086 3119 if (unlikely(trans->srcu_held && 3087 if (unlikely(trans->srcu_held && 3120 time_after(jiffies, tran 3088 time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10)))) 3121 bch2_trans_srcu_unlock(trans) 3089 bch2_trans_srcu_unlock(trans); 3122 3090 3123 trans->last_begin_ip = _RET_IP_; 3091 trans->last_begin_ip = _RET_IP_; 3124 3092 3125 trans_set_locked(trans); 3093 trans_set_locked(trans); 3126 3094 3127 if (trans->restarted) { 3095 if (trans->restarted) { 3128 bch2_btree_path_traverse_all( 3096 bch2_btree_path_traverse_all(trans); 3129 trans->notrace_relock_fail = 3097 trans->notrace_relock_fail = false; 3130 } 3098 } 3131 3099 3132 bch2_trans_verify_not_unlocked(trans) 3100 bch2_trans_verify_not_unlocked(trans); 3133 return trans->restart_count; 3101 return trans->restart_count; 3134 } 3102 } 3135 3103 3136 const char *bch2_btree_transaction_fns[BCH_TR 3104 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" }; 3137 3105 3138 unsigned bch2_trans_get_fn_idx(const char *fn 3106 unsigned bch2_trans_get_fn_idx(const char *fn) 3139 { 3107 { 3140 for (unsigned i = 0; i < ARRAY_SIZE(b 3108 for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++) 3141 if (!bch2_btree_transaction_f 3109 if (!bch2_btree_transaction_fns[i] || 3142 bch2_btree_transaction_fn 3110 bch2_btree_transaction_fns[i] == fn) { 3143 bch2_btree_transactio 3111 bch2_btree_transaction_fns[i] = fn; 3144 return i; 3112 return i; 3145 } 3113 } 3146 3114 3147 pr_warn_once("BCH_TRANSACTIONS_NR not 3115 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!"); 3148 return 0; 3116 return 0; 3149 } 3117 } 3150 3118 3151 struct btree_trans *__bch2_trans_get(struct b 3119 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx) 3152 __acquires(&c->btree_trans_barrier) 3120 __acquires(&c->btree_trans_barrier) 3153 { 3121 { 3154 struct btree_trans *trans; 3122 struct btree_trans *trans; 3155 3123 3156 if (IS_ENABLED(__KERNEL__)) { 3124 if (IS_ENABLED(__KERNEL__)) { 3157 trans = this_cpu_xchg(c->btre 3125 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL); 3158 if (trans) { 3126 if (trans) { 3159 memset(trans, 0, offs 3127 memset(trans, 0, offsetof(struct btree_trans, list)); 3160 goto got_trans; 3128 goto got_trans; 3161 } 3129 } 3162 } 3130 } 3163 3131 3164 trans = mempool_alloc(&c->btree_trans 3132 trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS); 3165 memset(trans, 0, sizeof(*trans)); 3133 memset(trans, 0, sizeof(*trans)); 3166 3134 3167 seqmutex_lock(&c->btree_trans_lock); 3135 seqmutex_lock(&c->btree_trans_lock); 3168 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) 3136 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { 3169 struct btree_trans *pos; 3137 struct btree_trans *pos; 3170 pid_t pid = current->pid; 3138 pid_t pid = current->pid; 3171 3139 3172 trans->locking_wait.task = cu 3140 trans->locking_wait.task = current; 3173 3141 3174 list_for_each_entry(pos, &c-> 3142 list_for_each_entry(pos, &c->btree_trans_list, list) { 3175 struct task_struct *p 3143 struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task); 3176 /* 3144 /* 3177 * We'd much prefer t 3145 * We'd much prefer to be stricter here and completely 3178 * disallow multiple 3146 * disallow multiple btree_trans in the same thread - 3179 * but the data move 3147 * but the data move path calls bch2_write when we 3180 * already have a btr 3148 * already have a btree_trans initialized. 3181 */ 3149 */ 3182 BUG_ON(pos_task && 3150 BUG_ON(pos_task && 3183 pid == pos_tas 3151 pid == pos_task->pid && 3184 pos->locked); 3152 pos->locked); 3185 } 3153 } 3186 } 3154 } 3187 3155 3188 list_add(&trans->list, &c->btree_tran 3156 list_add(&trans->list, &c->btree_trans_list); 3189 seqmutex_unlock(&c->btree_trans_lock) 3157 seqmutex_unlock(&c->btree_trans_lock); 3190 got_trans: 3158 got_trans: 3191 trans->c = c; 3159 trans->c = c; 3192 trans->last_begin_time = local_clock 3160 trans->last_begin_time = local_clock(); 3193 trans->fn_idx = fn_idx; 3161 trans->fn_idx = fn_idx; 3194 trans->locking_wait.task = current; 3162 trans->locking_wait.task = current; 3195 trans->journal_replay_not_finished = 3163 trans->journal_replay_not_finished = 3196 unlikely(!test_bit(JOURNAL_re 3164 unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) && 3197 atomic_inc_not_zero(&c->journ 3165 atomic_inc_not_zero(&c->journal_keys.ref); 3198 trans->nr_paths = ARRAY_SIZE( 3166 trans->nr_paths = ARRAY_SIZE(trans->_paths); 3199 trans->paths_allocated = trans->_pat 3167 trans->paths_allocated = trans->_paths_allocated; 3200 trans->sorted = trans->_sor 3168 trans->sorted = trans->_sorted; 3201 trans->paths = trans->_pat 3169 trans->paths = trans->_paths; 3202 trans->updates = trans->_upd 3170 trans->updates = trans->_updates; 3203 3171 3204 *trans_paths_nr(trans->paths) = BTREE 3172 *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL; 3205 3173 3206 trans->paths_allocated[0] = 1; 3174 trans->paths_allocated[0] = 1; 3207 3175 3208 static struct lock_class_key lockdep_ << 3209 lockdep_init_map(&trans->dep_map, "bc << 3210 << 3211 if (fn_idx < BCH_TRANSACTIONS_NR) { 3176 if (fn_idx < BCH_TRANSACTIONS_NR) { 3212 trans->fn = bch2_btree_transa 3177 trans->fn = bch2_btree_transaction_fns[fn_idx]; 3213 3178 3214 struct btree_transaction_stat 3179 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx]; 3215 3180 3216 if (s->max_mem) { 3181 if (s->max_mem) { 3217 unsigned expected_mem 3182 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem); 3218 3183 3219 trans->mem = kmalloc( 3184 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL); 3220 if (likely(trans->mem 3185 if (likely(trans->mem)) 3221 trans->mem_by 3186 trans->mem_bytes = expected_mem_bytes; 3222 } 3187 } 3223 3188 3224 trans->nr_paths_max = s->nr_m 3189 trans->nr_paths_max = s->nr_max_paths; 3225 trans->journal_entries_size = 3190 trans->journal_entries_size = s->journal_entries_size; 3226 } 3191 } 3227 3192 3228 trans->srcu_idx = srcu_read_l 3193 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); 3229 trans->srcu_lock_time = jiffies; 3194 trans->srcu_lock_time = jiffies; 3230 trans->srcu_held = true; 3195 trans->srcu_held = true; 3231 trans_set_locked(trans); 3196 trans_set_locked(trans); 3232 3197 3233 closure_init_stack_release(&trans->re 3198 closure_init_stack_release(&trans->ref); 3234 return trans; 3199 return trans; 3235 } 3200 } 3236 3201 3237 static void check_btree_paths_leaked(struct b 3202 static void check_btree_paths_leaked(struct btree_trans *trans) 3238 { 3203 { 3239 #ifdef CONFIG_BCACHEFS_DEBUG 3204 #ifdef CONFIG_BCACHEFS_DEBUG 3240 struct bch_fs *c = trans->c; 3205 struct bch_fs *c = trans->c; 3241 struct btree_path *path; 3206 struct btree_path *path; 3242 unsigned i; 3207 unsigned i; 3243 3208 3244 trans_for_each_path(trans, path, i) 3209 trans_for_each_path(trans, path, i) 3245 if (path->ref) 3210 if (path->ref) 3246 goto leaked; 3211 goto leaked; 3247 return; 3212 return; 3248 leaked: 3213 leaked: 3249 bch_err(c, "btree paths leaked from % 3214 bch_err(c, "btree paths leaked from %s!", trans->fn); 3250 trans_for_each_path(trans, path, i) 3215 trans_for_each_path(trans, path, i) 3251 if (path->ref) 3216 if (path->ref) 3252 printk(KERN_ERR " bt 3217 printk(KERN_ERR " btree %s %pS\n", 3253 bch2_btree_id_ 3218 bch2_btree_id_str(path->btree_id), 3254 (void *) path- 3219 (void *) path->ip_allocated); 3255 /* Be noisy about this: */ 3220 /* Be noisy about this: */ 3256 bch2_fatal_error(c); 3221 bch2_fatal_error(c); 3257 #endif 3222 #endif 3258 } 3223 } 3259 3224 3260 void bch2_trans_put(struct btree_trans *trans 3225 void bch2_trans_put(struct btree_trans *trans) 3261 __releases(&c->btree_trans_barrier) 3226 __releases(&c->btree_trans_barrier) 3262 { 3227 { 3263 struct bch_fs *c = trans->c; 3228 struct bch_fs *c = trans->c; 3264 3229 3265 bch2_trans_unlock(trans); 3230 bch2_trans_unlock(trans); 3266 3231 3267 trans_for_each_update(trans, i) 3232 trans_for_each_update(trans, i) 3268 __btree_path_put(trans, trans !! 3233 __btree_path_put(trans->paths + i->path, true); 3269 trans->nr_updates = 0; 3234 trans->nr_updates = 0; 3270 3235 3271 check_btree_paths_leaked(trans); 3236 check_btree_paths_leaked(trans); 3272 3237 3273 if (trans->srcu_held) { 3238 if (trans->srcu_held) { 3274 check_srcu_held_too_long(tran 3239 check_srcu_held_too_long(trans); 3275 srcu_read_unlock(&c->btree_tr 3240 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); 3276 } 3241 } 3277 3242 >> 3243 if (trans->fs_usage_deltas) { >> 3244 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) == >> 3245 REPLICAS_DELTA_LIST_MAX) >> 3246 mempool_free(trans->fs_usage_deltas, >> 3247 &c->replicas_delta_pool); >> 3248 else >> 3249 kfree(trans->fs_usage_deltas); >> 3250 } >> 3251 3278 if (unlikely(trans->journal_replay_no 3252 if (unlikely(trans->journal_replay_not_finished)) 3279 bch2_journal_keys_put(c); 3253 bch2_journal_keys_put(c); 3280 3254 3281 /* 3255 /* 3282 * trans->ref protects trans->locking 3256 * trans->ref protects trans->locking_wait.task, btree_paths array; used 3283 * by cycle detector 3257 * by cycle detector 3284 */ 3258 */ 3285 closure_return_sync(&trans->ref); 3259 closure_return_sync(&trans->ref); 3286 trans->locking_wait.task = NULL; 3260 trans->locking_wait.task = NULL; 3287 3261 3288 unsigned long *paths_allocated = tran 3262 unsigned long *paths_allocated = trans->paths_allocated; 3289 trans->paths_allocated = NULL; 3263 trans->paths_allocated = NULL; 3290 trans->paths = NULL; 3264 trans->paths = NULL; 3291 3265 3292 if (paths_allocated != trans->_paths_ 3266 if (paths_allocated != trans->_paths_allocated) 3293 kvfree_rcu_mightsleep(paths_a 3267 kvfree_rcu_mightsleep(paths_allocated); 3294 3268 3295 if (trans->used_mempool) 3269 if (trans->used_mempool) 3296 mempool_free(trans->mem, &c-> 3270 mempool_free(trans->mem, &c->btree_trans_mem_pool); 3297 else 3271 else 3298 kfree(trans->mem); 3272 kfree(trans->mem); 3299 3273 3300 /* Userspace doesn't have a real perc 3274 /* Userspace doesn't have a real percpu implementation: */ 3301 if (IS_ENABLED(__KERNEL__)) 3275 if (IS_ENABLED(__KERNEL__)) 3302 trans = this_cpu_xchg(c->btre 3276 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans); 3303 3277 3304 if (trans) { 3278 if (trans) { 3305 seqmutex_lock(&c->btree_trans 3279 seqmutex_lock(&c->btree_trans_lock); 3306 list_del(&trans->list); 3280 list_del(&trans->list); 3307 seqmutex_unlock(&c->btree_tra 3281 seqmutex_unlock(&c->btree_trans_lock); 3308 3282 3309 mempool_free(trans, &c->btree 3283 mempool_free(trans, &c->btree_trans_pool); 3310 } 3284 } 3311 } 3285 } 3312 3286 3313 bool bch2_current_has_btree_trans(struct bch_ << 3314 { << 3315 seqmutex_lock(&c->btree_trans_lock); << 3316 struct btree_trans *trans; << 3317 bool ret = false; << 3318 list_for_each_entry(trans, &c->btree_ << 3319 if (trans->locking_wait.task << 3320 trans->locked) { << 3321 ret = true; << 3322 break; << 3323 } << 3324 seqmutex_unlock(&c->btree_trans_lock) << 3325 return ret; << 3326 } << 3327 << 3328 static void __maybe_unused 3287 static void __maybe_unused 3329 bch2_btree_bkey_cached_common_to_text(struct 3288 bch2_btree_bkey_cached_common_to_text(struct printbuf *out, 3330 struct 3289 struct btree_bkey_cached_common *b) 3331 { 3290 { 3332 struct six_lock_count c = six_lock_co 3291 struct six_lock_count c = six_lock_counts(&b->lock); 3333 struct task_struct *owner; 3292 struct task_struct *owner; 3334 pid_t pid; 3293 pid_t pid; 3335 3294 3336 rcu_read_lock(); 3295 rcu_read_lock(); 3337 owner = READ_ONCE(b->lock.owner); 3296 owner = READ_ONCE(b->lock.owner); 3338 pid = owner ? owner->pid : 0; 3297 pid = owner ? owner->pid : 0; 3339 rcu_read_unlock(); 3298 rcu_read_unlock(); 3340 3299 3341 prt_printf(out, "\t%px %c l=%u %s:", 3300 prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b', 3342 b->level, bch2_btree_id_st 3301 b->level, bch2_btree_id_str(b->btree_id)); 3343 bch2_bpos_to_text(out, btree_node_pos 3302 bch2_bpos_to_text(out, btree_node_pos(b)); 3344 3303 3345 prt_printf(out, "\t locks %u:%u:%u he 3304 prt_printf(out, "\t locks %u:%u:%u held by pid %u", 3346 c.n[0], c.n[1], c.n[2], pi 3305 c.n[0], c.n[1], c.n[2], pid); 3347 } 3306 } 3348 3307 3349 void bch2_btree_trans_to_text(struct printbuf 3308 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) 3350 { 3309 { 3351 struct btree_bkey_cached_common *b; 3310 struct btree_bkey_cached_common *b; 3352 static char lock_types[] = { 'r', 'i' 3311 static char lock_types[] = { 'r', 'i', 'w' }; 3353 struct task_struct *task = READ_ONCE( 3312 struct task_struct *task = READ_ONCE(trans->locking_wait.task); 3354 unsigned l, idx; 3313 unsigned l, idx; 3355 3314 3356 /* before rcu_read_lock(): */ 3315 /* before rcu_read_lock(): */ 3357 bch2_printbuf_make_room(out, 4096); 3316 bch2_printbuf_make_room(out, 4096); 3358 3317 3359 if (!out->nr_tabstops) { 3318 if (!out->nr_tabstops) { 3360 printbuf_tabstop_push(out, 16 3319 printbuf_tabstop_push(out, 16); 3361 printbuf_tabstop_push(out, 32 3320 printbuf_tabstop_push(out, 32); 3362 } 3321 } 3363 3322 3364 prt_printf(out, "%i %s\n", task ? tas 3323 prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn); 3365 3324 3366 /* trans->paths is rcu protected vs. 3325 /* trans->paths is rcu protected vs. freeing */ 3367 rcu_read_lock(); 3326 rcu_read_lock(); 3368 out->atomic++; 3327 out->atomic++; 3369 3328 3370 struct btree_path *paths = rcu_derefe 3329 struct btree_path *paths = rcu_dereference(trans->paths); 3371 if (!paths) 3330 if (!paths) 3372 goto out; 3331 goto out; 3373 3332 3374 unsigned long *paths_allocated = tran 3333 unsigned long *paths_allocated = trans_paths_allocated(paths); 3375 3334 3376 trans_for_each_path_idx_from(paths_al 3335 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) { 3377 struct btree_path *path = pat 3336 struct btree_path *path = paths + idx; 3378 if (!path->nodes_locked) 3337 if (!path->nodes_locked) 3379 continue; 3338 continue; 3380 3339 3381 prt_printf(out, " path %u %c 3340 prt_printf(out, " path %u %c l=%u %s:", 3382 idx, 3341 idx, 3383 path->cached ? 'c' : ' 3342 path->cached ? 'c' : 'b', 3384 path->level, 3343 path->level, 3385 bch2_btree_id_str(path 3344 bch2_btree_id_str(path->btree_id)); 3386 bch2_bpos_to_text(out, path-> 3345 bch2_bpos_to_text(out, path->pos); 3387 prt_newline(out); 3346 prt_newline(out); 3388 3347 3389 for (l = 0; l < BTREE_MAX_DEP 3348 for (l = 0; l < BTREE_MAX_DEPTH; l++) { 3390 if (btree_node_locked 3349 if (btree_node_locked(path, l) && 3391 !IS_ERR_OR_NULL(b 3350 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) { 3392 prt_printf(ou 3351 prt_printf(out, " %c l=%u ", 3393 lo 3352 lock_types[btree_node_locked_type(path, l)], l); 3394 bch2_btree_bk 3353 bch2_btree_bkey_cached_common_to_text(out, b); 3395 prt_newline(o 3354 prt_newline(out); 3396 } 3355 } 3397 } 3356 } 3398 } 3357 } 3399 3358 3400 b = READ_ONCE(trans->locking); 3359 b = READ_ONCE(trans->locking); 3401 if (b) { 3360 if (b) { 3402 prt_printf(out, " blocked fo 3361 prt_printf(out, " blocked for %lluus on\n", 3403 div_u64(local_cloc 3362 div_u64(local_clock() - trans->locking_wait.start_time, 1000)); 3404 prt_printf(out, " %c", loc 3363 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]); 3405 bch2_btree_bkey_cached_common 3364 bch2_btree_bkey_cached_common_to_text(out, b); 3406 prt_newline(out); 3365 prt_newline(out); 3407 } 3366 } 3408 out: 3367 out: 3409 --out->atomic; 3368 --out->atomic; 3410 rcu_read_unlock(); 3369 rcu_read_unlock(); 3411 } 3370 } 3412 3371 3413 void bch2_fs_btree_iter_exit(struct bch_fs *c 3372 void bch2_fs_btree_iter_exit(struct bch_fs *c) 3414 { 3373 { 3415 struct btree_transaction_stats *s; 3374 struct btree_transaction_stats *s; 3416 struct btree_trans *trans; 3375 struct btree_trans *trans; 3417 int cpu; 3376 int cpu; 3418 3377 3419 if (c->btree_trans_bufs) 3378 if (c->btree_trans_bufs) 3420 for_each_possible_cpu(cpu) { 3379 for_each_possible_cpu(cpu) { 3421 struct btree_trans *t 3380 struct btree_trans *trans = 3422 per_cpu_ptr(c 3381 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans; 3423 3382 3424 if (trans) { 3383 if (trans) { 3425 seqmutex_lock 3384 seqmutex_lock(&c->btree_trans_lock); 3426 list_del(&tra 3385 list_del(&trans->list); 3427 seqmutex_unlo 3386 seqmutex_unlock(&c->btree_trans_lock); 3428 } 3387 } 3429 kfree(trans); 3388 kfree(trans); 3430 } 3389 } 3431 free_percpu(c->btree_trans_bufs); 3390 free_percpu(c->btree_trans_bufs); 3432 3391 3433 trans = list_first_entry_or_null(&c-> 3392 trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list); 3434 if (trans) 3393 if (trans) 3435 panic("%s leaked btree_trans\ 3394 panic("%s leaked btree_trans\n", trans->fn); 3436 3395 3437 for (s = c->btree_transaction_stats; 3396 for (s = c->btree_transaction_stats; 3438 s < c->btree_transaction_stats + 3397 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); 3439 s++) { 3398 s++) { 3440 kfree(s->max_paths_text); 3399 kfree(s->max_paths_text); 3441 bch2_time_stats_exit(&s->lock 3400 bch2_time_stats_exit(&s->lock_hold_times); 3442 } 3401 } 3443 3402 3444 if (c->btree_trans_barrier_initialize 3403 if (c->btree_trans_barrier_initialized) { 3445 synchronize_srcu_expedited(&c 3404 synchronize_srcu_expedited(&c->btree_trans_barrier); 3446 cleanup_srcu_struct(&c->btree 3405 cleanup_srcu_struct(&c->btree_trans_barrier); 3447 } 3406 } 3448 mempool_exit(&c->btree_trans_mem_pool 3407 mempool_exit(&c->btree_trans_mem_pool); 3449 mempool_exit(&c->btree_trans_pool); 3408 mempool_exit(&c->btree_trans_pool); 3450 } 3409 } 3451 3410 3452 void bch2_fs_btree_iter_init_early(struct bch 3411 void bch2_fs_btree_iter_init_early(struct bch_fs *c) 3453 { 3412 { 3454 struct btree_transaction_stats *s; 3413 struct btree_transaction_stats *s; 3455 3414 3456 for (s = c->btree_transaction_stats; 3415 for (s = c->btree_transaction_stats; 3457 s < c->btree_transaction_stats + 3416 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); 3458 s++) { 3417 s++) { 3459 bch2_time_stats_init(&s->dura 3418 bch2_time_stats_init(&s->duration); 3460 bch2_time_stats_init(&s->lock 3419 bch2_time_stats_init(&s->lock_hold_times); 3461 mutex_init(&s->lock); 3420 mutex_init(&s->lock); 3462 } 3421 } 3463 3422 3464 INIT_LIST_HEAD(&c->btree_trans_list); 3423 INIT_LIST_HEAD(&c->btree_trans_list); 3465 seqmutex_init(&c->btree_trans_lock); 3424 seqmutex_init(&c->btree_trans_lock); 3466 } 3425 } 3467 3426 3468 int bch2_fs_btree_iter_init(struct bch_fs *c) 3427 int bch2_fs_btree_iter_init(struct bch_fs *c) 3469 { 3428 { 3470 int ret; 3429 int ret; 3471 3430 3472 c->btree_trans_bufs = alloc_percpu(st 3431 c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf); 3473 if (!c->btree_trans_bufs) 3432 if (!c->btree_trans_bufs) 3474 return -ENOMEM; 3433 return -ENOMEM; 3475 3434 3476 ret = mempool_init_kmalloc_pool(&c- 3435 ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1, 3477 siz 3436 sizeof(struct btree_trans)) ?: 3478 mempool_init_kmalloc_pool(&c- 3437 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1, 3479 BTR 3438 BTREE_TRANS_MEM_MAX) ?: 3480 init_srcu_struct(&c->btree_tr 3439 init_srcu_struct(&c->btree_trans_barrier); 3481 if (ret) !! 3440 if (!ret) 3482 return ret; !! 3441 c->btree_trans_barrier_initialized = true; 3483 !! 3442 return ret; 3484 /* << 3485 * static annotation (hackily done) f << 3486 * btree node locks: << 3487 */ << 3488 #ifdef CONFIG_LOCKDEP << 3489 fs_reclaim_acquire(GFP_KERNEL); << 3490 struct btree_trans *trans = bch2_tran << 3491 trans_set_locked(trans); << 3492 bch2_trans_put(trans); << 3493 fs_reclaim_release(GFP_KERNEL); << 3494 #endif << 3495 << 3496 c->btree_trans_barrier_initialized = << 3497 return 0; << 3498 << 3499 } 3443 } 3500 3444
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.