1 // SPDX-License-Identifier: GPL-2.0 1 2 3 #include "bcachefs.h" 4 #include "bkey_methods.h" 5 #include "bkey_buf.h" 6 #include "btree_cache.h" 7 #include "btree_iter.h" 8 #include "btree_journal_iter.h" 9 #include "btree_key_cache.h" 10 #include "btree_locking.h" 11 #include "btree_update.h" 12 #include "debug.h" 13 #include "error.h" 14 #include "extents.h" 15 #include "journal.h" 16 #include "journal_io.h" 17 #include "replicas.h" 18 #include "snapshot.h" 19 #include "trace.h" 20 21 #include <linux/random.h> 22 #include <linux/prefetch.h> 23 24 static inline void btree_path_list_remove(stru 25 static inline void btree_path_list_add(struct 26 btree_path_idx_t, btre 27 28 static inline unsigned long btree_iter_ip_allo 29 { 30 #ifdef TRACK_PATH_ALLOCATED 31 return iter->ip_allocated; 32 #else 33 return 0; 34 #endif 35 } 36 37 static btree_path_idx_t btree_path_alloc(struc 38 static void bch2_trans_srcu_lock(struct btree_ 39 40 static inline int __btree_path_cmp(const struc 41 enum btree_ 42 bool 43 struct bpos 44 unsigned 45 { 46 /* 47 * Must match lock ordering as defined 48 */ 49 return cmp_int(l->btree_id, r_btre 50 cmp_int((int) l->cached, 51 bpos_cmp(l->pos, r_pos) 52 -cmp_int(l->level, r_leve 53 } 54 55 static inline int btree_path_cmp(const struct 56 const struct 57 { 58 return __btree_path_cmp(l, r->btree_id 59 } 60 61 static inline struct bpos bkey_successor(struc 62 { 63 /* Are we iterating over keys in all s 64 if (iter->flags & BTREE_ITER_all_snaps 65 p = bpos_successor(p); 66 } else { 67 p = bpos_nosnap_successor(p); 68 p.snapshot = iter->snapshot; 69 } 70 71 return p; 72 } 73 74 static inline struct bpos bkey_predecessor(str 75 { 76 /* Are we iterating over keys in all s 77 if (iter->flags & BTREE_ITER_all_snaps 78 p = bpos_predecessor(p); 79 } else { 80 p = bpos_nosnap_predecessor(p) 81 p.snapshot = iter->snapshot; 82 } 83 84 return p; 85 } 86 87 static inline struct bpos btree_iter_search_ke 88 { 89 struct bpos pos = iter->pos; 90 91 if ((iter->flags & BTREE_ITER_is_exten 92 !bkey_eq(pos, POS_MAX)) 93 pos = bkey_successor(iter, pos 94 return pos; 95 } 96 97 static inline bool btree_path_pos_before_node( 98 99 { 100 return bpos_lt(path->pos, b->data->min 101 } 102 103 static inline bool btree_path_pos_after_node(s 104 s 105 { 106 return bpos_gt(path->pos, b->key.k.p); 107 } 108 109 static inline bool btree_path_pos_in_node(stru 110 stru 111 { 112 return path->btree_id == b->c.btree_id 113 !btree_path_pos_before_node(pa 114 !btree_path_pos_after_node(pat 115 } 116 117 /* Btree iterator: */ 118 119 #ifdef CONFIG_BCACHEFS_DEBUG 120 121 static void bch2_btree_path_verify_cached(stru 122 stru 123 { 124 struct bkey_cached *ck; 125 bool locked = btree_node_locked(path, 126 127 if (!bch2_btree_node_relock(trans, pat 128 return; 129 130 ck = (void *) path->l[0].b; 131 BUG_ON(ck->key.btree_id != path->btree 132 !bkey_eq(ck->key.pos, path->pos 133 134 if (!locked) 135 btree_node_unlock(trans, path, 136 } 137 138 static void bch2_btree_path_verify_level(struc 139 struct btree_p 140 { 141 struct btree_path_level *l; 142 struct btree_node_iter tmp; 143 bool locked; 144 struct bkey_packed *p, *k; 145 struct printbuf buf1 = PRINTBUF; 146 struct printbuf buf2 = PRINTBUF; 147 struct printbuf buf3 = PRINTBUF; 148 const char *msg; 149 150 if (!bch2_debug_check_iterators) 151 return; 152 153 l = &path->l[level]; 154 tmp = l->iter; 155 locked = btree_node_locked(path, leve 156 157 if (path->cached) { 158 if (!level) 159 bch2_btree_path_verify 160 return; 161 } 162 163 if (!btree_path_node(path, level)) 164 return; 165 166 if (!bch2_btree_node_relock_notrace(tr 167 return; 168 169 BUG_ON(!btree_path_pos_in_node(path, l 170 171 bch2_btree_node_iter_verify(&l->iter, 172 173 /* 174 * For interior nodes, the iterator wi 175 */ 176 p = level 177 ? bch2_btree_node_iter_prev(&t 178 : bch2_btree_node_iter_prev_al 179 k = bch2_btree_node_iter_peek_all(&l-> 180 181 if (p && bkey_iter_pos_cmp(l->b, p, &p 182 msg = "before"; 183 goto err; 184 } 185 186 if (k && bkey_iter_pos_cmp(l->b, k, &p 187 msg = "after"; 188 goto err; 189 } 190 191 if (!locked) 192 btree_node_unlock(trans, path, 193 return; 194 err: 195 bch2_bpos_to_text(&buf1, path->pos); 196 197 if (p) { 198 struct bkey uk = bkey_unpack_k 199 200 bch2_bkey_to_text(&buf2, &uk); 201 } else { 202 prt_printf(&buf2, "(none)"); 203 } 204 205 if (k) { 206 struct bkey uk = bkey_unpack_k 207 208 bch2_bkey_to_text(&buf3, &uk); 209 } else { 210 prt_printf(&buf3, "(none)"); 211 } 212 213 panic("path should be %s key at level 214 "path pos %s\n" 215 "prev key %s\n" 216 "cur key %s\n", 217 msg, level, buf1.buf, buf2.buf, 218 } 219 220 static void bch2_btree_path_verify(struct btre 221 struct btre 222 { 223 struct bch_fs *c = trans->c; 224 225 for (unsigned i = 0; i < (!path->cache 226 if (!path->l[i].b) { 227 BUG_ON(!path->cached & 228 bch2_btree_id_r 229 break; 230 } 231 232 bch2_btree_path_verify_level(t 233 } 234 235 bch2_btree_path_verify_locks(path); 236 } 237 238 void bch2_trans_verify_paths(struct btree_tran 239 { 240 struct btree_path *path; 241 unsigned iter; 242 243 trans_for_each_path(trans, path, iter) 244 bch2_btree_path_verify(trans, 245 } 246 247 static void bch2_btree_iter_verify(struct btre 248 { 249 struct btree_trans *trans = iter->tran 250 251 BUG_ON(!!(iter->flags & BTREE_ITER_cac 252 253 BUG_ON((iter->flags & BTREE_ITER_is_ex 254 (iter->flags & BTREE_ITER_all_s 255 256 BUG_ON(!(iter->flags & BTREE_ITER_snap 257 (iter->flags & BTREE_ITER_all_s 258 !btree_type_has_snapshot_field( 259 260 if (iter->update_path) 261 bch2_btree_path_verify(trans, 262 bch2_btree_path_verify(trans, btree_it 263 } 264 265 static void bch2_btree_iter_verify_entry_exit( 266 { 267 BUG_ON((iter->flags & BTREE_ITER_filte 268 !iter->pos.snapshot); 269 270 BUG_ON(!(iter->flags & BTREE_ITER_all_ 271 iter->pos.snapshot != iter->sna 272 273 BUG_ON(bkey_lt(iter->pos, bkey_start_p 274 bkey_gt(iter->pos, iter->k.p)); 275 } 276 277 static int bch2_btree_iter_verify_ret(struct b 278 { 279 struct btree_trans *trans = iter->tran 280 struct btree_iter copy; 281 struct bkey_s_c prev; 282 int ret = 0; 283 284 if (!bch2_debug_check_iterators) 285 return 0; 286 287 if (!(iter->flags & BTREE_ITER_filter_ 288 return 0; 289 290 if (bkey_err(k) || !k.k) 291 return 0; 292 293 BUG_ON(!bch2_snapshot_is_ancestor(tran 294 iter 295 k.k- 296 297 bch2_trans_iter_init(trans, ©, ite 298 BTREE_ITER_nopres 299 BTREE_ITER_all_sn 300 prev = bch2_btree_iter_prev(©); 301 if (!prev.k) 302 goto out; 303 304 ret = bkey_err(prev); 305 if (ret) 306 goto out; 307 308 if (bkey_eq(prev.k->p, k.k->p) && 309 bch2_snapshot_is_ancestor(trans->c 310 prev.k-> 311 struct printbuf buf1 = PRINTBU 312 313 bch2_bkey_to_text(&buf1, k.k); 314 bch2_bkey_to_text(&buf2, prev. 315 316 panic("iter snap %u\n" 317 "k %s\n" 318 "prev %s\n", 319 iter->snapshot, 320 buf1.buf, buf2.buf); 321 } 322 out: 323 bch2_trans_iter_exit(trans, ©); 324 return ret; 325 } 326 327 void bch2_assert_pos_locked(struct btree_trans 328 struct bpos pos) 329 { 330 bch2_trans_verify_not_unlocked(trans); 331 332 struct btree_path *path; 333 struct trans_for_each_path_inorder_ite 334 struct printbuf buf = PRINTBUF; 335 336 btree_trans_sort_paths(trans); 337 338 trans_for_each_path_inorder(trans, pat 339 if (path->btree_id != id || 340 !btree_node_locked(path, 0 341 !path->should_be_locked) 342 continue; 343 344 if (!path->cached) { 345 if (bkey_ge(pos, path- 346 bkey_le(pos, path- 347 return; 348 } else { 349 if (bkey_eq(pos, path- 350 return; 351 } 352 } 353 354 bch2_dump_trans_paths_updates(trans); 355 bch2_bpos_to_text(&buf, pos); 356 357 panic("not locked: %s %s\n", bch2_btre 358 } 359 360 #else 361 362 static inline void bch2_btree_path_verify_leve 363 364 static inline void bch2_btree_path_verify(stru 365 stru 366 static inline void bch2_btree_iter_verify(stru 367 static inline void bch2_btree_iter_verify_entr 368 static inline int bch2_btree_iter_verify_ret(s 369 370 #endif 371 372 /* Btree path: fixups after btree updates */ 373 374 static void btree_node_iter_set_set_pos(struct 375 struct 376 struct 377 struct 378 { 379 struct btree_node_iter_set *set; 380 381 btree_node_iter_for_each(iter, set) 382 if (set->end == t->end_offset) 383 set->k = __btree_node_ 384 bch2_btree_node_iter_s 385 return; 386 } 387 388 bch2_btree_node_iter_push(iter, b, k, 389 } 390 391 static void __bch2_btree_path_fix_key_modified 392 393 394 { 395 struct btree_path_level *l = &path->l[ 396 397 if (where != bch2_btree_node_iter_peek 398 return; 399 400 if (bkey_iter_pos_cmp(l->b, where, &pa 401 bch2_btree_node_iter_advance(& 402 } 403 404 void bch2_btree_path_fix_key_modified(struct b 405 struct b 406 struct b 407 { 408 struct btree_path *path; 409 unsigned i; 410 411 trans_for_each_path_with_node(trans, b 412 __bch2_btree_path_fix_key_modi 413 bch2_btree_path_verify_level(t 414 } 415 } 416 417 static void __bch2_btree_node_iter_fix(struct 418 struct 419 struct 420 struct 421 struct 422 unsigne 423 unsigne 424 { 425 const struct bkey_packed *end = btree_ 426 struct btree_node_iter_set *set; 427 unsigned offset = __btree_node_key_to_ 428 int shift = new_u64s - clobber_u64s; 429 unsigned old_end = t->end_offset - shi 430 unsigned orig_iter_pos = node_iter->da 431 bool iter_current_key_modified = 432 orig_iter_pos >= offset && 433 orig_iter_pos <= offset + clob 434 435 btree_node_iter_for_each(node_iter, se 436 if (set->end == old_end) 437 goto found; 438 439 /* didn't find the bset in the iterato 440 if (new_u64s && 441 bkey_iter_pos_cmp(b, where, &path- 442 bch2_btree_node_iter_push(node 443 goto fixup_done; 444 } else { 445 /* Iterator is after key that 446 return; 447 } 448 found: 449 set->end = t->end_offset; 450 451 /* Iterator hasn't gotten to the key t 452 if (set->k < offset) 453 return; 454 455 if (new_u64s && 456 bkey_iter_pos_cmp(b, where, &path- 457 set->k = offset; 458 } else if (set->k < offset + clobber_u 459 set->k = offset + new_u64s; 460 if (set->k == set->end) 461 bch2_btree_node_iter_s 462 } else { 463 /* Iterator is after key that 464 set->k = (int) set->k + shift; 465 return; 466 } 467 468 bch2_btree_node_iter_sort(node_iter, b 469 fixup_done: 470 if (node_iter->data[0].k != orig_iter_ 471 iter_current_key_modified = tr 472 473 /* 474 * When a new key is added, and the no 475 * key, the iterator might have skippe 476 * come after the key the iterator now 477 * before those deleted keys - otherwi 478 * bch2_btree_node_iter_prev_all() bre 479 */ 480 if (!bch2_btree_node_iter_end(node_ite 481 iter_current_key_modified && 482 b->c.level) { 483 struct bkey_packed *k, *k2, *p 484 485 k = bch2_btree_node_iter_peek_ 486 487 for_each_bset(b, t) { 488 bool set_pos = false; 489 490 if (node_iter->data[0] 491 continue; 492 493 k2 = bch2_btree_node_i 494 495 while ((p = bch2_bkey_ 496 bkey_iter_cmp(b 497 k2 = p; 498 set_pos = true 499 } 500 501 if (set_pos) 502 btree_node_ite 503 504 } 505 } 506 } 507 508 void bch2_btree_node_iter_fix(struct btree_tra 509 struct btree_pat 510 struct btree *b, 511 struct btree_nod 512 struct bkey_pack 513 unsigned clobber 514 unsigned new_u64 515 { 516 struct bset_tree *t = bch2_bkey_to_bse 517 struct btree_path *linked; 518 unsigned i; 519 520 if (node_iter != &path->l[b->c.level]. 521 __bch2_btree_node_iter_fix(pat 522 whe 523 524 if (bch2_debug_check_iterators 525 bch2_btree_node_iter_v 526 } 527 528 trans_for_each_path_with_node(trans, b 529 __bch2_btree_node_iter_fix(lin 530 &li 531 whe 532 bch2_btree_path_verify_level(t 533 } 534 } 535 536 /* Btree path level: pointer to a particular b 537 538 static inline struct bkey_s_c __btree_iter_unp 539 540 541 542 { 543 if (unlikely(!k)) { 544 /* 545 * signal to bch2_btree_iter_p 546 * a hole 547 */ 548 u->type = KEY_TYPE_deleted; 549 return bkey_s_c_null; 550 } 551 552 return bkey_disassemble(l->b, k, u); 553 } 554 555 static inline struct bkey_s_c btree_path_level 556 557 558 { 559 return __btree_iter_unpack(c, l, u, 560 bch2_btree_node_iter_p 561 } 562 563 static inline struct bkey_s_c btree_path_level 564 565 566 567 { 568 struct bkey_s_c k = __btree_iter_unpac 569 bch2_btree_node_iter_p 570 571 path->pos = k.k ? k.k->p : l->b->key.k 572 trans->paths_sorted = false; 573 bch2_btree_path_verify_level(trans, pa 574 return k; 575 } 576 577 static inline struct bkey_s_c btree_path_level 578 579 580 581 { 582 struct bkey_s_c k = __btree_iter_unpac 583 bch2_btree_node_iter_p 584 585 path->pos = k.k ? k.k->p : l->b->data- 586 trans->paths_sorted = false; 587 bch2_btree_path_verify_level(trans, pa 588 return k; 589 } 590 591 static inline bool btree_path_advance_to_pos(s 592 s 593 i 594 { 595 struct bkey_packed *k; 596 int nr_advanced = 0; 597 598 while ((k = bch2_btree_node_iter_peek_ 599 bkey_iter_pos_cmp(l->b, k, &pat 600 if (max_advance > 0 && nr_adva 601 return false; 602 603 bch2_btree_node_iter_advance(& 604 nr_advanced++; 605 } 606 607 return true; 608 } 609 610 static inline void __btree_path_level_init(str 611 uns 612 { 613 struct btree_path_level *l = &path->l[ 614 615 bch2_btree_node_iter_init(&l->iter, l- 616 617 /* 618 * Iterators to interior nodes should 619 * whiteout: 620 */ 621 if (level) 622 bch2_btree_node_iter_peek(&l-> 623 } 624 625 void bch2_btree_path_level_init(struct btree_t 626 struct btree_p 627 struct btree * 628 { 629 BUG_ON(path->cached); 630 631 EBUG_ON(!btree_path_pos_in_node(path, 632 633 path->l[b->c.level].lock_seq = six_loc 634 path->l[b->c.level].b = b; 635 __btree_path_level_init(path, b->c.lev 636 } 637 638 /* Btree path: fixups after btree node updates 639 640 static void bch2_trans_revalidate_updates_in_n 641 { 642 struct bch_fs *c = trans->c; 643 644 trans_for_each_update(trans, i) 645 if (!i->cached && 646 i->level == b->c.level 647 i->btree_id == b->c.btree_ 648 bpos_cmp(i->k->k.p, b->dat 649 bpos_cmp(i->k->k.p, b->dat 650 i->old_v = bch2_btree_ 651 652 if (unlikely(trans->jo 653 struct bkey_i 654 bch2_j 655 656 657 if (j_k) { 658 i->old 659 i->old 660 } 661 } 662 } 663 } 664 665 /* 666 * A btree node is being replaced - update the 667 * node: 668 */ 669 void bch2_trans_node_add(struct btree_trans *t 670 struct btree_path *pa 671 struct btree *b) 672 { 673 struct btree_path *prev; 674 675 BUG_ON(!btree_path_pos_in_node(path, b 676 677 while ((prev = prev_btree_path(trans, 678 btree_path_pos_in_node(prev, b) 679 path = prev; 680 681 for (; 682 path && btree_path_pos_in_node(pa 683 path = next_btree_path(trans, pat 684 if (path->uptodate == BTREE_IT 685 enum btree_node_locked 686 btree_lock_wan 687 688 if (t != BTREE_NODE_UN 689 btree_node_unl 690 six_lock_incre 691 mark_btree_nod 692 } 693 694 bch2_btree_path_level_ 695 } 696 697 bch2_trans_revalidate_updates_in_node( 698 } 699 700 /* 701 * A btree node has been modified in such a wa 702 * them: 703 */ 704 void bch2_trans_node_reinit_iter(struct btree_ 705 { 706 struct btree_path *path; 707 unsigned i; 708 709 trans_for_each_path_with_node(trans, b 710 __btree_path_level_init(path, 711 712 bch2_trans_revalidate_updates_in_node( 713 } 714 715 /* Btree path: traverse, set_pos: */ 716 717 static inline int btree_path_lock_root(struct 718 struct 719 unsigne 720 unsigne 721 { 722 struct bch_fs *c = trans->c; 723 struct btree *b, **rootp = &bch2_btree 724 enum six_lock_type lock_type; 725 unsigned i; 726 int ret; 727 728 EBUG_ON(path->nodes_locked); 729 730 while (1) { 731 b = READ_ONCE(*rootp); 732 path->level = READ_ONCE(b->c.l 733 734 if (unlikely(path->level < dep 735 /* 736 * the root is at a lo 737 * got to the end of t 738 * greater than some d 739 * that depth 740 */ 741 path->level = depth_wa 742 for (i = path->level; 743 path->l[i].b = 744 return 1; 745 } 746 747 lock_type = __btree_lock_want( 748 ret = btree_node_lock(trans, p 749 path->le 750 if (unlikely(ret)) { 751 if (bch2_err_matches(r 752 continue; 753 if (bch2_err_matches(r 754 return ret; 755 BUG(); 756 } 757 758 if (likely(b == READ_ONCE(*roo 759 b->c.level == path- 760 !race_fault())) { 761 for (i = 0; i < path-> 762 path->l[i].b = 763 path->l[path->level].b 764 for (i = path->level + 765 path->l[i].b = 766 767 mark_btree_node_locked 768 769 bch2_btree_path_level_ 770 return 0; 771 } 772 773 six_unlock_type(&b->c.lock, lo 774 } 775 } 776 777 noinline 778 static int btree_path_prefetch(struct btree_tr 779 { 780 struct bch_fs *c = trans->c; 781 struct btree_path_level *l = path_l(pa 782 struct btree_node_iter node_iter = l-> 783 struct bkey_packed *k; 784 struct bkey_buf tmp; 785 unsigned nr = test_bit(BCH_FS_started, 786 ? (path->level > 1 ? 0 : 2) 787 : (path->level > 1 ? 1 : 16); 788 bool was_locked = btree_node_locked(pa 789 int ret = 0; 790 791 bch2_bkey_buf_init(&tmp); 792 793 while (nr-- && !ret) { 794 if (!bch2_btree_node_relock(tr 795 break; 796 797 bch2_btree_node_iter_advance(& 798 k = bch2_btree_node_iter_peek( 799 if (!k) 800 break; 801 802 bch2_bkey_buf_unpack(&tmp, c, 803 ret = bch2_btree_node_prefetch 804 805 } 806 807 if (!was_locked) 808 btree_node_unlock(trans, path, 809 810 bch2_bkey_buf_exit(&tmp, c); 811 return ret; 812 } 813 814 static int btree_path_prefetch_j(struct btree_ 815 struct btree_ 816 { 817 struct bch_fs *c = trans->c; 818 struct bkey_s_c k; 819 struct bkey_buf tmp; 820 unsigned nr = test_bit(BCH_FS_started, 821 ? (path->level > 1 ? 0 : 2) 822 : (path->level > 1 ? 1 : 16); 823 bool was_locked = btree_node_locked(pa 824 int ret = 0; 825 826 bch2_bkey_buf_init(&tmp); 827 828 while (nr-- && !ret) { 829 if (!bch2_btree_node_relock(tr 830 break; 831 832 bch2_btree_and_journal_iter_ad 833 k = bch2_btree_and_journal_ite 834 if (!k.k) 835 break; 836 837 bch2_bkey_buf_reassemble(&tmp, 838 ret = bch2_btree_node_prefetch 839 840 } 841 842 if (!was_locked) 843 btree_node_unlock(trans, path, 844 845 bch2_bkey_buf_exit(&tmp, c); 846 return ret; 847 } 848 849 static noinline void btree_node_mem_ptr_set(st 850 st 851 un 852 { 853 struct btree_path_level *l = &path->l[ 854 bool locked = btree_node_locked(path, 855 struct bkey_packed *k; 856 struct bch_btree_ptr_v2 *bp; 857 858 if (!bch2_btree_node_relock(trans, pat 859 return; 860 861 k = bch2_btree_node_iter_peek_all(&l-> 862 BUG_ON(k->type != KEY_TYPE_btree_ptr_v 863 864 bp = (void *) bkeyp_val(&l->b->format, 865 bp->mem_ptr = (unsigned long)b; 866 867 if (!locked) 868 btree_node_unlock(trans, path, 869 } 870 871 static noinline int btree_node_iter_and_journa 872 873 874 875 { 876 struct bch_fs *c = trans->c; 877 struct btree_path_level *l = path_l(pa 878 struct btree_and_journal_iter jiter; 879 struct bkey_s_c k; 880 int ret = 0; 881 882 __bch2_btree_and_journal_iter_init_nod 883 884 k = bch2_btree_and_journal_iter_peek(& 885 if (!k.k) { 886 struct printbuf buf = PRINTBUF 887 888 prt_str(&buf, "node not found 889 bch2_bpos_to_text(&buf, path-> 890 prt_str(&buf, " at btree "); 891 bch2_btree_pos_to_text(&buf, c 892 893 ret = bch2_fs_topology_error(c 894 printbuf_exit(&buf); 895 goto err; 896 } 897 898 bch2_bkey_buf_reassemble(out, c, k); 899 900 if ((flags & BTREE_ITER_prefetch) && 901 c->opts.btree_node_prefetch) 902 ret = btree_path_prefetch_j(tr 903 904 err: 905 bch2_btree_and_journal_iter_exit(&jite 906 return ret; 907 } 908 909 static __always_inline int btree_path_down(str 910 str 911 uns 912 uns 913 { 914 struct bch_fs *c = trans->c; 915 struct btree_path_level *l = path_l(pa 916 struct btree *b; 917 unsigned level = path->level - 1; 918 enum six_lock_type lock_type = __btree 919 struct bkey_buf tmp; 920 int ret; 921 922 EBUG_ON(!btree_node_locked(path, path- 923 924 bch2_bkey_buf_init(&tmp); 925 926 if (unlikely(trans->journal_replay_not 927 ret = btree_node_iter_and_jour 928 if (ret) 929 goto err; 930 } else { 931 struct bkey_packed *k = bch2_b 932 if (!k) { 933 struct printbuf buf = 934 935 prt_str(&buf, "node no 936 bch2_bpos_to_text(&buf 937 prt_str(&buf, " within 938 bch2_bkey_val_to_text( 939 940 bch2_fs_fatal_error(c, 941 printbuf_exit(&buf); 942 ret = -BCH_ERR_btree_n 943 goto err; 944 } 945 946 bch2_bkey_buf_unpack(&tmp, c, 947 948 if ((flags & BTREE_ITER_prefet 949 c->opts.btree_node_prefetc 950 ret = btree_path_prefe 951 if (ret) 952 goto err; 953 } 954 } 955 956 b = bch2_btree_node_get(trans, path, t 957 ret = PTR_ERR_OR_ZERO(b); 958 if (unlikely(ret)) 959 goto err; 960 961 if (likely(!trans->journal_replay_not_ 962 tmp.k->k.type == KEY_TYPE_b 963 unlikely(b != btree_node_mem_ptr(t 964 btree_node_mem_ptr_set(trans, 965 966 if (btree_node_read_locked(path, level 967 btree_node_unlock(trans, path, 968 969 mark_btree_node_locked(trans, path, le 970 (enum btree_nod 971 path->level = level; 972 bch2_btree_path_level_init(trans, path 973 974 bch2_btree_path_verify_locks(path); 975 err: 976 bch2_bkey_buf_exit(&tmp, c); 977 return ret; 978 } 979 980 static int bch2_btree_path_traverse_all(struct 981 { 982 struct bch_fs *c = trans->c; 983 struct btree_path *path; 984 unsigned long trace_ip = _RET_IP_; 985 unsigned i; 986 int ret = 0; 987 988 if (trans->in_traverse_all) 989 return -BCH_ERR_transaction_re 990 991 trans->in_traverse_all = true; 992 retry_all: 993 trans->restarted = 0; 994 trans->last_restarted_ip = 0; 995 996 trans_for_each_path(trans, path, i) 997 path->should_be_locked = false 998 999 btree_trans_sort_paths(trans); 1000 1001 bch2_trans_unlock(trans); 1002 cond_resched(); 1003 trans_set_locked(trans); 1004 1005 if (unlikely(trans->memory_allocation 1006 struct closure cl; 1007 1008 closure_init_stack(&cl); 1009 1010 do { 1011 ret = bch2_btree_cach 1012 closure_sync(&cl); 1013 } while (ret); 1014 } 1015 1016 /* Now, redo traversals in correct or 1017 i = 0; 1018 while (i < trans->nr_sorted) { 1019 btree_path_idx_t idx = trans- 1020 1021 /* 1022 * Traversing a path can caus 1023 * the same position: 1024 */ 1025 if (trans->paths[idx].uptodat 1026 __btree_path_get(tran 1027 ret = bch2_btree_path 1028 __btree_path_put(tran 1029 1030 if (bch2_err_matches( 1031 bch2_err_matches( 1032 goto retry_al 1033 if (ret) 1034 goto err; 1035 } else { 1036 i++; 1037 } 1038 } 1039 1040 /* 1041 * We used to assert that all paths h 1042 * (path->uptodate < BTREE_ITER_NEED_ 1043 * path->should_be_locked is not set 1044 * then failed to relock a path - tha 1045 */ 1046 err: 1047 bch2_btree_cache_cannibalize_unlock(t 1048 1049 trans->in_traverse_all = false; 1050 1051 trace_and_count(c, trans_traverse_all 1052 return ret; 1053 } 1054 1055 static inline bool btree_path_check_pos_in_no 1056 1057 { 1058 if (check_pos < 0 && btree_path_pos_b 1059 return false; 1060 if (check_pos > 0 && btree_path_pos_a 1061 return false; 1062 return true; 1063 } 1064 1065 static inline bool btree_path_good_node(struc 1066 struc 1067 unsig 1068 { 1069 return is_btree_node(path, l) && 1070 bch2_btree_node_relock(trans, 1071 btree_path_check_pos_in_node( 1072 } 1073 1074 static void btree_path_set_level_down(struct 1075 struct 1076 unsigne 1077 { 1078 unsigned l; 1079 1080 path->level = new_level; 1081 1082 for (l = path->level + 1; l < BTREE_M 1083 if (btree_lock_want(path, l) 1084 btree_node_unlock(tra 1085 1086 btree_path_set_dirty(path, BTREE_ITER 1087 bch2_btree_path_verify(trans, path); 1088 } 1089 1090 static noinline unsigned __btree_path_up_unti 1091 1092 1093 { 1094 unsigned i, l = path->level; 1095 again: 1096 while (btree_path_node(path, l) && 1097 !btree_path_good_node(trans, p 1098 __btree_path_set_level_up(tra 1099 1100 /* If we need intent locks, take them 1101 for (i = l + 1; 1102 i < path->locks_want && btree_pa 1103 i++) 1104 if (!bch2_btree_node_relock(t 1105 while (l <= i) 1106 __btree_path_ 1107 goto again; 1108 } 1109 1110 return l; 1111 } 1112 1113 static inline unsigned btree_path_up_until_go 1114 1115 1116 { 1117 return likely(btree_node_locked(path, 1118 btree_path_check_pos_in 1119 ? path->level 1120 : __btree_path_up_until_good_ 1121 } 1122 1123 /* 1124 * This is the main state machine for walking 1125 * specified depth 1126 * 1127 * Returns 0 on success, -EIO on error (error 1128 * 1129 * On error, caller (peek_node()/peek_key()) 1130 * stashed in the iterator and returned from 1131 */ 1132 int bch2_btree_path_traverse_one(struct btree 1133 btree_path_i 1134 unsigned fla 1135 unsigned lon 1136 { 1137 struct btree_path *path = &trans->pat 1138 unsigned depth_want = path->level; 1139 int ret = -((int) trans->restarted); 1140 1141 if (unlikely(ret)) 1142 goto out; 1143 1144 if (unlikely(!trans->srcu_held)) 1145 bch2_trans_srcu_lock(trans); 1146 1147 trace_btree_path_traverse_start(trans 1148 1149 /* 1150 * Ensure we obey path->should_be_loc 1151 * and re-traverse the path without a 1152 */ 1153 if (path->should_be_locked) { 1154 ret = bch2_btree_path_relock( 1155 goto out; 1156 } 1157 1158 if (path->cached) { 1159 ret = bch2_btree_path_travers 1160 goto out; 1161 } 1162 1163 path = &trans->paths[path_idx]; 1164 1165 if (unlikely(path->level >= BTREE_MAX 1166 goto out_uptodate; 1167 1168 path->level = btree_path_up_until_goo 1169 unsigned max_level = path->level; 1170 1171 EBUG_ON(btree_path_node(path, path->l 1172 !btree_node_locked(path, path 1173 1174 /* 1175 * Note: path->nodes[path->level] may 1176 * would indicate to other code that 1177 * here it indicates that relocking t 1178 * btree_path_lock_root() comes next 1179 */ 1180 while (path->level > depth_want) { 1181 ret = btree_path_node(path, p 1182 ? btree_path_down(tra 1183 : btree_path_lock_roo 1184 if (unlikely(ret)) { 1185 if (ret == 1) { 1186 /* 1187 * No nodes a 1188 * the btree: 1189 */ 1190 ret = 0; 1191 goto out; 1192 } 1193 1194 __bch2_btree_path_unl 1195 path->level = depth_w 1196 path->l[path->level]. 1197 goto out; 1198 } 1199 } 1200 1201 if (unlikely(max_level > path->level) 1202 struct btree_path *linked; 1203 unsigned iter; 1204 1205 trans_for_each_path_with_node 1206 for (unsigned j = pat 1207 linked->l[j] 1208 } 1209 1210 out_uptodate: 1211 path->uptodate = BTREE_ITER_UPTODATE; 1212 trace_btree_path_traverse_end(trans, 1213 out: 1214 if (bch2_err_matches(ret, BCH_ERR_tra 1215 panic("ret %s (%i) trans->res 1216 bch2_err_str(ret), ret, 1217 bch2_err_str(trans->res 1218 bch2_btree_path_verify(trans, path); 1219 return ret; 1220 } 1221 1222 static inline void btree_path_copy(struct btr 1223 struct btree_path 1224 { 1225 unsigned i, offset = offsetof(struct 1226 1227 memcpy((void *) dst + offset, 1228 (void *) src + offset, 1229 sizeof(struct btree_path) - of 1230 1231 for (i = 0; i < BTREE_MAX_DEPTH; i++) 1232 unsigned t = btree_node_locke 1233 1234 if (t != BTREE_NODE_UNLOCKED) 1235 six_lock_increment(&d 1236 } 1237 } 1238 1239 static btree_path_idx_t btree_path_clone(stru 1240 bool 1241 { 1242 btree_path_idx_t new = btree_path_all 1243 btree_path_copy(trans, trans->paths + 1244 __btree_path_get(trans, trans->paths 1245 #ifdef TRACK_PATH_ALLOCATED 1246 trans->paths[new].ip_allocated = ip; 1247 #endif 1248 return new; 1249 } 1250 1251 __flatten 1252 btree_path_idx_t __bch2_btree_path_make_mut(s 1253 btree_path_idx_t path 1254 { 1255 struct btree_path *old = trans->paths 1256 __btree_path_put(trans, trans->paths 1257 path = btree_path_clone(trans, path, 1258 trace_btree_path_clone(trans, old, tr 1259 trans->paths[path].preserve = false; 1260 return path; 1261 } 1262 1263 btree_path_idx_t __must_check 1264 __bch2_btree_path_set_pos(struct btree_trans 1265 btree_path_idx_t pa 1266 bool intent, unsign 1267 { 1268 int cmp = bpos_cmp(new_pos, trans->pa 1269 1270 bch2_trans_verify_not_in_restart(tran 1271 EBUG_ON(!trans->paths[path_idx].ref); 1272 1273 trace_btree_path_set_pos(trans, trans 1274 1275 path_idx = bch2_btree_path_make_mut(t 1276 1277 struct btree_path *path = trans->path 1278 path->pos = new_pos; 1279 trans->paths_sorted = false; 1280 1281 if (unlikely(path->cached)) { 1282 btree_node_unlock(trans, path 1283 path->l[0].b = ERR_PTR(-BCH_E 1284 btree_path_set_dirty(path, BT 1285 goto out; 1286 } 1287 1288 unsigned level = btree_path_up_until_ 1289 1290 if (btree_path_node(path, level)) { 1291 struct btree_path_level *l = 1292 1293 BUG_ON(!btree_node_locked(pat 1294 /* 1295 * We might have to skip over 1296 * advancing the node iterato 1297 * many keys just reinit it ( 1298 * is expensive). 1299 */ 1300 if (cmp < 0 || 1301 !btree_path_advance_to_po 1302 bch2_btree_node_iter_ 1303 1304 /* 1305 * Iterators to interior node 1306 * whiteout: 1307 */ 1308 if (unlikely(level)) 1309 bch2_btree_node_iter_ 1310 } 1311 1312 if (unlikely(level != path->level)) { 1313 btree_path_set_dirty(path, BT 1314 __bch2_btree_path_unlock(tran 1315 } 1316 out: 1317 bch2_btree_path_verify(trans, path); 1318 return path_idx; 1319 } 1320 1321 /* Btree path: main interface: */ 1322 1323 static struct btree_path *have_path_at_pos(st 1324 { 1325 struct btree_path *sib; 1326 1327 sib = prev_btree_path(trans, path); 1328 if (sib && !btree_path_cmp(sib, path) 1329 return sib; 1330 1331 sib = next_btree_path(trans, path); 1332 if (sib && !btree_path_cmp(sib, path) 1333 return sib; 1334 1335 return NULL; 1336 } 1337 1338 static struct btree_path *have_node_at_pos(st 1339 { 1340 struct btree_path *sib; 1341 1342 sib = prev_btree_path(trans, path); 1343 if (sib && sib->level == path->level 1344 return sib; 1345 1346 sib = next_btree_path(trans, path); 1347 if (sib && sib->level == path->level 1348 return sib; 1349 1350 return NULL; 1351 } 1352 1353 static inline void __bch2_path_free(struct bt 1354 { 1355 __bch2_btree_path_unlock(trans, trans 1356 btree_path_list_remove(trans, trans-> 1357 __clear_bit(path, trans->paths_alloca 1358 } 1359 1360 static bool bch2_btree_path_can_relock(struct 1361 { 1362 unsigned l = path->level; 1363 1364 do { 1365 if (!btree_path_node(path, l) 1366 break; 1367 1368 if (!is_btree_node(path, l)) 1369 return false; 1370 1371 if (path->l[l].lock_seq != pa 1372 return false; 1373 1374 l++; 1375 } while (l < path->locks_want); 1376 1377 return true; 1378 } 1379 1380 void bch2_path_put(struct btree_trans *trans, 1381 { 1382 struct btree_path *path = trans->path 1383 1384 if (!__btree_path_put(trans, path, in 1385 return; 1386 1387 dup = path->preserve 1388 ? have_path_at_pos(trans, pat 1389 : have_node_at_pos(trans, pat 1390 1391 trace_btree_path_free(trans, path_idx 1392 1393 if (!dup && !(!path->preserve && !is_ 1394 return; 1395 1396 if (path->should_be_locked && !trans- 1397 if (!dup) 1398 return; 1399 1400 if (!(trans->locked 1401 ? bch2_btree_path_reloc 1402 : bch2_btree_path_can_r 1403 return; 1404 } 1405 1406 if (dup) { 1407 dup->preserve |= pa 1408 dup->should_be_locked |= pa 1409 } 1410 1411 __bch2_path_free(trans, path_idx); 1412 } 1413 1414 static void bch2_path_put_nokeep(struct btree 1415 bool intent) 1416 { 1417 if (!__btree_path_put(trans, trans->p 1418 return; 1419 1420 __bch2_path_free(trans, path); 1421 } 1422 1423 void __noreturn bch2_trans_restart_error(stru 1424 { 1425 panic("trans->restart_count %u, shoul 1426 trans->restart_count, restart_c 1427 (void *) trans->last_begin_ip); 1428 } 1429 1430 void __noreturn bch2_trans_in_restart_error(s 1431 { 1432 panic("in transaction restart: %s, la 1433 bch2_err_str(trans->restarted), 1434 (void *) trans->last_restarted_ 1435 } 1436 1437 void __noreturn bch2_trans_unlocked_error(str 1438 { 1439 panic("trans should be locked, unlock 1440 (void *) trans->last_unlock_ip) 1441 } 1442 1443 noinline __cold 1444 void bch2_trans_updates_to_text(struct printb 1445 { 1446 prt_printf(buf, "%u transaction updat 1447 trans->nr_updates, trans-> 1448 printbuf_indent_add(buf, 2); 1449 1450 trans_for_each_update(trans, i) { 1451 struct bkey_s_c old = { &i->o 1452 1453 prt_printf(buf, "update: btre 1454 bch2_btree_id_str(i->b 1455 i->cached, 1456 (void *) i->ip_allocat 1457 1458 prt_printf(buf, " old "); 1459 bch2_bkey_val_to_text(buf, tr 1460 prt_newline(buf); 1461 1462 prt_printf(buf, " new "); 1463 bch2_bkey_val_to_text(buf, tr 1464 prt_newline(buf); 1465 } 1466 1467 for (struct jset_entry *e = trans->jo 1468 e != btree_trans_journal_entries 1469 e = vstruct_next(e)) 1470 bch2_journal_entry_to_text(bu 1471 1472 printbuf_indent_sub(buf, 2); 1473 } 1474 1475 noinline __cold 1476 void bch2_dump_trans_updates(struct btree_tra 1477 { 1478 struct printbuf buf = PRINTBUF; 1479 1480 bch2_trans_updates_to_text(&buf, tran 1481 bch2_print_str(trans->c, buf.buf); 1482 printbuf_exit(&buf); 1483 } 1484 1485 static void bch2_btree_path_to_text_short(str 1486 { 1487 struct btree_path *path = trans->path 1488 1489 prt_printf(out, "path: idx %3u ref %u 1490 path_idx, path->ref, path- 1491 path->preserve ? 'P' : ' ' 1492 path->should_be_locked ? ' 1493 path->cached ? 'C' : 'B', 1494 bch2_btree_id_str(path->bt 1495 path->level); 1496 bch2_bpos_to_text(out, path->pos); 1497 1498 if (!path->cached && btree_node_locke 1499 prt_char(out, ' '); 1500 struct btree *b = path_l(path 1501 bch2_bpos_to_text(out, b->dat 1502 prt_char(out, '-'); 1503 bch2_bpos_to_text(out, b->key 1504 } 1505 1506 #ifdef TRACK_PATH_ALLOCATED 1507 prt_printf(out, " %pS", (void *) path 1508 #endif 1509 } 1510 1511 static const char *btree_node_locked_str(enum 1512 { 1513 switch (t) { 1514 case BTREE_NODE_UNLOCKED: 1515 return "unlocked"; 1516 case BTREE_NODE_READ_LOCKED: 1517 return "read"; 1518 case BTREE_NODE_INTENT_LOCKED: 1519 return "intent"; 1520 case BTREE_NODE_WRITE_LOCKED: 1521 return "write"; 1522 default: 1523 return NULL; 1524 } 1525 } 1526 1527 void bch2_btree_path_to_text(struct printbuf 1528 { 1529 bch2_btree_path_to_text_short(out, tr 1530 1531 struct btree_path *path = trans->path 1532 1533 prt_printf(out, " uptodate %u locks_w 1534 prt_newline(out); 1535 1536 printbuf_indent_add(out, 2); 1537 for (unsigned l = 0; l < BTREE_MAX_DE 1538 prt_printf(out, "l=%u locks % 1539 btree_node_locked_ 1540 path->l[l].lock_se 1541 1542 int ret = PTR_ERR_OR_ZERO(pat 1543 if (ret) 1544 prt_str(out, bch2_err 1545 else 1546 prt_printf(out, "%px" 1547 prt_newline(out); 1548 } 1549 printbuf_indent_sub(out, 2); 1550 } 1551 1552 static noinline __cold 1553 void __bch2_trans_paths_to_text(struct printb 1554 bool nosort) 1555 { 1556 struct trans_for_each_path_inorder_it 1557 1558 if (!nosort) 1559 btree_trans_sort_paths(trans) 1560 1561 trans_for_each_path_idx_inorder(trans 1562 bch2_btree_path_to_text_short 1563 prt_newline(out); 1564 } 1565 } 1566 1567 noinline __cold 1568 void bch2_trans_paths_to_text(struct printbuf 1569 { 1570 __bch2_trans_paths_to_text(out, trans 1571 } 1572 1573 static noinline __cold 1574 void __bch2_dump_trans_paths_updates(struct b 1575 { 1576 struct printbuf buf = PRINTBUF; 1577 1578 __bch2_trans_paths_to_text(&buf, tran 1579 bch2_trans_updates_to_text(&buf, tran 1580 1581 bch2_print_str(trans->c, buf.buf); 1582 printbuf_exit(&buf); 1583 } 1584 1585 noinline __cold 1586 void bch2_dump_trans_paths_updates(struct btr 1587 { 1588 __bch2_dump_trans_paths_updates(trans 1589 } 1590 1591 noinline __cold 1592 static void bch2_trans_update_max_paths(struc 1593 { 1594 struct btree_transaction_stats *s = b 1595 struct printbuf buf = PRINTBUF; 1596 size_t nr = bitmap_weight(trans->path 1597 1598 bch2_trans_paths_to_text(&buf, trans) 1599 1600 if (!buf.allocation_failure) { 1601 mutex_lock(&s->lock); 1602 if (nr > s->nr_max_paths) { 1603 s->nr_max_paths = nr; 1604 swap(s->max_paths_tex 1605 } 1606 mutex_unlock(&s->lock); 1607 } 1608 1609 printbuf_exit(&buf); 1610 1611 trans->nr_paths_max = nr; 1612 } 1613 1614 noinline __cold 1615 int __bch2_btree_trans_too_many_iters(struct 1616 { 1617 if (trace_trans_restart_too_many_iter 1618 struct printbuf buf = PRINTBU 1619 1620 bch2_trans_paths_to_text(&buf 1621 trace_trans_restart_too_many_ 1622 printbuf_exit(&buf); 1623 } 1624 1625 count_event(trans->c, trans_restart_t 1626 1627 return btree_trans_restart(trans, BCH 1628 } 1629 1630 static noinline void btree_path_overflow(stru 1631 { 1632 bch2_dump_trans_paths_updates(trans); 1633 bch_err(trans->c, "trans path overflo 1634 } 1635 1636 static noinline void btree_paths_realloc(stru 1637 { 1638 unsigned nr = trans->nr_paths * 2; 1639 1640 void *p = kvzalloc(BITS_TO_LONGS(nr) 1641 sizeof(struct btree 1642 nr * sizeof(struct 1643 nr * sizeof(btree_p 1644 nr * sizeof(struct 1645 1646 unsigned long *paths_allocated = p; 1647 memcpy(paths_allocated, trans->paths_ 1648 p += BITS_TO_LONGS(nr) * sizeof(unsig 1649 1650 p += sizeof(struct btree_trans_paths) 1651 struct btree_path *paths = p; 1652 *trans_paths_nr(paths) = nr; 1653 memcpy(paths, trans->paths, trans->nr 1654 p += nr * sizeof(struct btree_path); 1655 1656 btree_path_idx_t *sorted = p; 1657 memcpy(sorted, trans->sorted, trans-> 1658 p += nr * sizeof(btree_path_idx_t) + 1659 1660 struct btree_insert_entry *updates = 1661 memcpy(updates, trans->updates, trans 1662 1663 unsigned long *old = trans->paths_all 1664 1665 rcu_assign_pointer(trans->paths_alloc 1666 rcu_assign_pointer(trans->paths, 1667 rcu_assign_pointer(trans->sorted, 1668 rcu_assign_pointer(trans->updates, 1669 1670 trans->nr_paths = nr; 1671 1672 if (old != trans->_paths_allocated) 1673 kfree_rcu_mightsleep(old); 1674 } 1675 1676 static inline btree_path_idx_t btree_path_all 1677 1678 { 1679 btree_path_idx_t idx = find_first_zer 1680 1681 if (unlikely(idx == trans->nr_paths)) 1682 if (trans->nr_paths == BTREE_ 1683 btree_path_overflow(t 1684 return 0; 1685 } 1686 1687 btree_paths_realloc(trans); 1688 } 1689 1690 /* 1691 * Do this before marking the new pat 1692 * initialized yet: 1693 */ 1694 if (unlikely(idx > trans->nr_paths_ma 1695 bch2_trans_update_max_paths(t 1696 1697 __set_bit(idx, trans->paths_allocated 1698 1699 struct btree_path *path = &trans->pat 1700 path->ref = 0; 1701 path->intent_ref = 0; 1702 path->nodes_locked = 0; 1703 1704 btree_path_list_add(trans, pos, idx); 1705 trans->paths_sorted = false; 1706 return idx; 1707 } 1708 1709 btree_path_idx_t bch2_path_get(struct btree_t 1710 enum btree_id bt 1711 unsigned locks_w 1712 unsigned flags, 1713 { 1714 struct btree_path *path; 1715 bool cached = flags & BTREE_ITER_cach 1716 bool intent = flags & BTREE_ITER_inte 1717 struct trans_for_each_path_inorder_it 1718 btree_path_idx_t path_pos = 0, path_i 1719 1720 bch2_trans_verify_not_unlocked(trans) 1721 bch2_trans_verify_not_in_restart(tran 1722 bch2_trans_verify_locks(trans); 1723 1724 btree_trans_sort_paths(trans); 1725 1726 trans_for_each_path_inorder(trans, pa 1727 if (__btree_path_cmp(path, 1728 btree_id 1729 cached, 1730 pos, 1731 level) > 1732 break; 1733 1734 path_pos = iter.path_idx; 1735 } 1736 1737 if (path_pos && 1738 trans->paths[path_pos].cached 1739 trans->paths[path_pos].btree_id 1740 trans->paths[path_pos].level 1741 trace_btree_path_get(trans, t 1742 1743 __btree_path_get(trans, trans 1744 path_idx = bch2_btree_path_se 1745 path = trans->paths + path_id 1746 } else { 1747 path_idx = btree_path_alloc(t 1748 path = trans->paths + path_id 1749 1750 __btree_path_get(trans, path, 1751 path->pos 1752 path->btree_id 1753 path->cached 1754 path->uptodate 1755 path->should_be_locked 1756 path->level 1757 path->locks_want 1758 path->nodes_locked 1759 for (unsigned i = 0; i < ARRA 1760 path->l[i].b 1761 #ifdef TRACK_PATH_ALLOCATED 1762 path->ip_allocated 1763 #endif 1764 trans->paths_sorted 1765 1766 trace_btree_path_alloc(trans, 1767 } 1768 1769 if (!(flags & BTREE_ITER_nopreserve)) 1770 path->preserve = true; 1771 1772 if (path->intent_ref) 1773 locks_want = max(locks_want, 1774 1775 /* 1776 * If the path has locks_want greater 1777 * it here - on transaction restart b 1778 * upgrade locks, we might be putting 1779 * Downgrading iterators only happens 1780 * a successful transaction commit. 1781 */ 1782 1783 locks_want = min(locks_want, BTREE_MA 1784 if (locks_want > path->locks_want) 1785 bch2_btree_path_upgrade_noupg 1786 1787 return path_idx; 1788 } 1789 1790 btree_path_idx_t bch2_path_get_unlocked_mut(s 1791 e 1792 u 1793 s 1794 { 1795 btree_path_idx_t path_idx = bch2_path 1796 BTREE_ITER_nopre 1797 BTREE_ITER_inten 1798 path_idx = bch2_btree_path_make_mut(t 1799 1800 struct btree_path *path = trans->path 1801 bch2_btree_path_downgrade(trans, path 1802 __bch2_btree_path_unlock(trans, path) 1803 return path_idx; 1804 } 1805 1806 struct bkey_s_c bch2_btree_path_peek_slot(str 1807 { 1808 1809 struct btree_path_level *l = path_l(p 1810 struct bkey_packed *_k; 1811 struct bkey_s_c k; 1812 1813 if (unlikely(!l->b)) 1814 return bkey_s_c_null; 1815 1816 EBUG_ON(path->uptodate != BTREE_ITER_ 1817 EBUG_ON(!btree_node_locked(path, path 1818 1819 if (!path->cached) { 1820 _k = bch2_btree_node_iter_pee 1821 k = _k ? bkey_disassemble(l-> 1822 1823 EBUG_ON(k.k && bkey_deleted(k 1824 1825 if (!k.k || !bpos_eq(path->po 1826 goto hole; 1827 } else { 1828 struct bkey_cached *ck = (voi 1829 if (!ck) 1830 return bkey_s_c_null; 1831 1832 EBUG_ON(path->btree_id != ck- 1833 !bkey_eq(path->pos, c 1834 1835 *u = ck->k->k; 1836 k = bkey_i_to_s_c(ck->k); 1837 } 1838 1839 return k; 1840 hole: 1841 bkey_init(u); 1842 u->p = path->pos; 1843 return (struct bkey_s_c) { u, NULL }; 1844 } 1845 1846 1847 void bch2_set_btree_iter_dontneed(struct btre 1848 { 1849 struct btree_trans *trans = iter->tra 1850 1851 if (!iter->path || trans->restarted) 1852 return; 1853 1854 struct btree_path *path = btree_iter_ 1855 path->preserve = false; 1856 if (path->ref == 1) 1857 path->should_be_locked = fal 1858 } 1859 /* Btree iterators: */ 1860 1861 int __must_check 1862 __bch2_btree_iter_traverse(struct btree_iter 1863 { 1864 return bch2_btree_path_traverse(iter- 1865 } 1866 1867 int __must_check 1868 bch2_btree_iter_traverse(struct btree_iter *i 1869 { 1870 struct btree_trans *trans = iter->tra 1871 int ret; 1872 1873 bch2_trans_verify_not_unlocked(trans) 1874 1875 iter->path = bch2_btree_path_set_pos( 1876 btree 1877 iter- 1878 btree 1879 1880 ret = bch2_btree_path_traverse(iter-> 1881 if (ret) 1882 return ret; 1883 1884 struct btree_path *path = btree_iter_ 1885 if (btree_path_node(path, path->level 1886 btree_path_set_should_be_lock 1887 return 0; 1888 } 1889 1890 /* Iterate across nodes (leaf and interior no 1891 1892 struct btree *bch2_btree_iter_peek_node(struc 1893 { 1894 struct btree_trans *trans = iter->tra 1895 struct btree *b = NULL; 1896 int ret; 1897 1898 EBUG_ON(trans->paths[iter->path].cach 1899 bch2_btree_iter_verify(iter); 1900 1901 ret = bch2_btree_path_traverse(trans, 1902 if (ret) 1903 goto err; 1904 1905 struct btree_path *path = btree_iter_ 1906 b = btree_path_node(path, path->level 1907 if (!b) 1908 goto out; 1909 1910 BUG_ON(bpos_lt(b->key.k.p, iter->pos) 1911 1912 bkey_init(&iter->k); 1913 iter->k.p = iter->pos = b->key.k.p; 1914 1915 iter->path = bch2_btree_path_set_pos( 1916 iter- 1917 btree 1918 btree_path_set_should_be_locked(trans 1919 out: 1920 bch2_btree_iter_verify_entry_exit(ite 1921 bch2_btree_iter_verify(iter); 1922 1923 return b; 1924 err: 1925 b = ERR_PTR(ret); 1926 goto out; 1927 } 1928 1929 /* Only kept for -tools */ 1930 struct btree *bch2_btree_iter_peek_node_and_r 1931 { 1932 struct btree *b; 1933 1934 while (b = bch2_btree_iter_peek_node( 1935 bch2_err_matches(PTR_ERR_OR_ZE 1936 bch2_trans_begin(iter->trans) 1937 1938 return b; 1939 } 1940 1941 struct btree *bch2_btree_iter_next_node(struc 1942 { 1943 struct btree_trans *trans = iter->tra 1944 struct btree *b = NULL; 1945 int ret; 1946 1947 EBUG_ON(trans->paths[iter->path].cach 1948 bch2_trans_verify_not_in_restart(tran 1949 bch2_btree_iter_verify(iter); 1950 1951 ret = bch2_btree_path_traverse(trans, 1952 if (ret) 1953 goto err; 1954 1955 1956 struct btree_path *path = btree_iter_ 1957 1958 /* already at end? */ 1959 if (!btree_path_node(path, path->leve 1960 return NULL; 1961 1962 /* got to end? */ 1963 if (!btree_path_node(path, path->leve 1964 btree_path_set_level_up(trans 1965 return NULL; 1966 } 1967 1968 if (!bch2_btree_node_relock(trans, pa 1969 __bch2_btree_path_unlock(tran 1970 path->l[path->level].b 1971 path->l[path->level + 1].b 1972 btree_path_set_dirty(path, BT 1973 trace_and_count(trans->c, tra 1974 ret = btree_trans_restart(tra 1975 goto err; 1976 } 1977 1978 b = btree_path_node(path, path->level 1979 1980 if (bpos_eq(iter->pos, b->key.k.p)) { 1981 __btree_path_set_level_up(tra 1982 } else { 1983 if (btree_lock_want(path, pat 1984 btree_node_unlock(tra 1985 1986 /* 1987 * Haven't gotten to the end 1988 * the next child node 1989 */ 1990 iter->path = bch2_btree_path_ 1991 bpos_ 1992 iter- 1993 btree 1994 1995 path = btree_iter_path(trans, 1996 btree_path_set_level_down(tra 1997 1998 ret = bch2_btree_path_travers 1999 if (ret) 2000 goto err; 2001 2002 path = btree_iter_path(trans, 2003 b = path->l[path->level].b; 2004 } 2005 2006 bkey_init(&iter->k); 2007 iter->k.p = iter->pos = b->key.k.p; 2008 2009 iter->path = bch2_btree_path_set_pos( 2010 iter- 2011 btree 2012 btree_path_set_should_be_locked(trans 2013 EBUG_ON(btree_iter_path(trans, iter)- 2014 out: 2015 bch2_btree_iter_verify_entry_exit(ite 2016 bch2_btree_iter_verify(iter); 2017 2018 return b; 2019 err: 2020 b = ERR_PTR(ret); 2021 goto out; 2022 } 2023 2024 /* Iterate across keys (in leaf nodes only) * 2025 2026 inline bool bch2_btree_iter_advance(struct bt 2027 { 2028 struct bpos pos = iter->k.p; 2029 bool ret = !(iter->flags & BTREE_ITER 2030 ? bpos_eq(pos, SPOS_MAX) 2031 : bkey_eq(pos, SPOS_MAX) 2032 2033 if (ret && !(iter->flags & BTREE_ITER 2034 pos = bkey_successor(iter, po 2035 bch2_btree_iter_set_pos(iter, pos); 2036 return ret; 2037 } 2038 2039 inline bool bch2_btree_iter_rewind(struct btr 2040 { 2041 struct bpos pos = bkey_start_pos(&ite 2042 bool ret = !(iter->flags & BTREE_ITER 2043 ? bpos_eq(pos, POS_MIN) 2044 : bkey_eq(pos, POS_MIN)) 2045 2046 if (ret && !(iter->flags & BTREE_ITER 2047 pos = bkey_predecessor(iter, 2048 bch2_btree_iter_set_pos(iter, pos); 2049 return ret; 2050 } 2051 2052 static noinline 2053 void bch2_btree_trans_peek_prev_updates(struc 2054 struc 2055 { 2056 struct bpos end = path_l(btree_iter_p 2057 2058 trans_for_each_update(trans, i) 2059 if (!i->key_cache_already_flu 2060 i->btree_id == iter->btre 2061 bpos_le(i->k->k.p, iter-> 2062 bpos_ge(i->k->k.p, k->k ? 2063 iter->k = i->k->k; 2064 *k = bkey_i_to_s_c(i- 2065 } 2066 } 2067 2068 static noinline 2069 void bch2_btree_trans_peek_updates(struct btr 2070 struct bke 2071 { 2072 struct btree_path *path = btree_iter_ 2073 struct bpos end = path_l(path)->b->ke 2074 2075 trans_for_each_update(trans, i) 2076 if (!i->key_cache_already_flu 2077 i->btree_id == iter->btre 2078 bpos_ge(i->k->k.p, path-> 2079 bpos_le(i->k->k.p, k->k ? 2080 iter->k = i->k->k; 2081 *k = bkey_i_to_s_c(i- 2082 } 2083 } 2084 2085 static noinline 2086 void bch2_btree_trans_peek_slot_updates(struc 2087 struc 2088 { 2089 trans_for_each_update(trans, i) 2090 if (!i->key_cache_already_flu 2091 i->btree_id == iter->btre 2092 bpos_eq(i->k->k.p, iter-> 2093 iter->k = i->k->k; 2094 *k = bkey_i_to_s_c(i- 2095 } 2096 } 2097 2098 static struct bkey_i *bch2_btree_journal_peek 2099 2100 2101 { 2102 struct btree_path *path = btree_iter_ 2103 2104 return bch2_journal_keys_peek_upto(tr 2105 pa 2106 pa 2107 en 2108 &i 2109 } 2110 2111 static noinline 2112 struct bkey_s_c btree_trans_peek_slot_journal 2113 2114 { 2115 struct btree_path *path = btree_iter_ 2116 struct bkey_i *k = bch2_btree_journal 2117 2118 if (k) { 2119 iter->k = k->k; 2120 return bkey_i_to_s_c(k); 2121 } else { 2122 return bkey_s_c_null; 2123 } 2124 } 2125 2126 static noinline 2127 struct bkey_s_c btree_trans_peek_journal(stru 2128 stru 2129 stru 2130 { 2131 struct btree_path *path = btree_iter_ 2132 struct bkey_i *next_journal = 2133 bch2_btree_journal_peek(trans 2134 k.k ? k.k->p 2135 2136 if (next_journal) { 2137 iter->k = next_journal->k; 2138 k = bkey_i_to_s_c(next_journa 2139 } 2140 2141 return k; 2142 } 2143 2144 /* 2145 * Checks btree key cache for key at iter->po 2146 * bkey_s_c_null: 2147 */ 2148 static noinline 2149 struct bkey_s_c btree_trans_peek_key_cache(st 2150 { 2151 struct btree_trans *trans = iter->tra 2152 struct bch_fs *c = trans->c; 2153 struct bkey u; 2154 struct bkey_s_c k; 2155 int ret; 2156 2157 bch2_trans_verify_not_in_restart(tran 2158 bch2_trans_verify_not_unlocked(trans) 2159 2160 if ((iter->flags & BTREE_ITER_key_cac 2161 bpos_eq(iter->pos, pos)) 2162 return bkey_s_c_null; 2163 2164 if (!bch2_btree_key_cache_find(c, ite 2165 return bkey_s_c_null; 2166 2167 if (!iter->key_cache_path) 2168 iter->key_cache_path = bch2_p 2169 2170 2171 2172 2173 2174 iter->key_cache_path = bch2_btree_pat 2175 iter- 2176 btree 2177 2178 ret = bch2_btree_path_traverse(tran 2179 iter 2180 bch2_btree_path_relock(trans, 2181 if (unlikely(ret)) 2182 return bkey_s_c_err(ret); 2183 2184 btree_path_set_should_be_locked(trans 2185 2186 k = bch2_btree_path_peek_slot(trans-> 2187 if (k.k && !bkey_err(k)) { 2188 iter->k = u; 2189 k.k = &iter->k; 2190 } 2191 return k; 2192 } 2193 2194 static struct bkey_s_c __bch2_btree_iter_peek 2195 { 2196 struct btree_trans *trans = iter->tra 2197 struct bkey_s_c k, k2; 2198 int ret; 2199 2200 EBUG_ON(btree_iter_path(trans, iter)- 2201 bch2_btree_iter_verify(iter); 2202 2203 while (1) { 2204 struct btree_path_level *l; 2205 2206 iter->path = bch2_btree_path_ 2207 iter- 2208 btree 2209 2210 ret = bch2_btree_path_travers 2211 if (unlikely(ret)) { 2212 /* ensure that iter-> 2213 bch2_btree_iter_set_p 2214 k = bkey_s_c_err(ret) 2215 goto out; 2216 } 2217 2218 struct btree_path *path = btr 2219 l = path_l(path); 2220 2221 if (unlikely(!l->b)) { 2222 /* No btree nodes at 2223 bch2_btree_iter_set_p 2224 k = bkey_s_c_null; 2225 goto out; 2226 } 2227 2228 btree_path_set_should_be_lock 2229 2230 k = btree_path_level_peek_all 2231 2232 if (unlikely(iter->flags & BT 2233 k.k && 2234 (k2 = btree_trans_peek_ke 2235 k = k2; 2236 ret = bkey_err(k); 2237 if (ret) { 2238 bch2_btree_it 2239 goto out; 2240 } 2241 } 2242 2243 if (unlikely(iter->flags & BT 2244 k = btree_trans_peek_ 2245 2246 if (unlikely((iter->flags & B 2247 trans->nr_update 2248 bch2_btree_trans_peek 2249 2250 if (k.k && bkey_deleted(k.k)) 2251 /* 2252 * If we've got a whi 2253 * key, advance the s 2254 * of just after the 2255 * whiteout, with a r 2256 * in the btree delet 2257 */ 2258 search_key = !bpos_eq 2259 ? k.k->p 2260 : bpos_succes 2261 continue; 2262 } 2263 2264 if (likely(k.k)) { 2265 break; 2266 } else if (likely(!bpos_eq(l- 2267 /* Advance to next le 2268 search_key = bpos_suc 2269 } else { 2270 /* End of btree: */ 2271 bch2_btree_iter_set_p 2272 k = bkey_s_c_null; 2273 goto out; 2274 } 2275 } 2276 out: 2277 bch2_btree_iter_verify(iter); 2278 2279 return k; 2280 } 2281 2282 /** 2283 * bch2_btree_iter_peek_upto() - returns firs 2284 * iterator's current position 2285 * @iter: iterator to peek from 2286 * @end: search limit: returns keys le 2287 * 2288 * Returns: key if found, or an error ext 2289 */ 2290 struct bkey_s_c bch2_btree_iter_peek_upto(str 2291 { 2292 struct btree_trans *trans = iter->tra 2293 struct bpos search_key = btree_iter_s 2294 struct bkey_s_c k; 2295 struct bpos iter_pos; 2296 int ret; 2297 2298 bch2_trans_verify_not_unlocked(trans) 2299 EBUG_ON((iter->flags & BTREE_ITER_fil 2300 2301 if (iter->update_path) { 2302 bch2_path_put_nokeep(trans, i 2303 iter->fl 2304 iter->update_path = 0; 2305 } 2306 2307 bch2_btree_iter_verify_entry_exit(ite 2308 2309 while (1) { 2310 k = __bch2_btree_iter_peek(it 2311 if (unlikely(!k.k)) 2312 goto end; 2313 if (unlikely(bkey_err(k))) 2314 goto out_no_locked; 2315 2316 /* 2317 * We need to check against @ 2318 * if we get to a different i 2319 * seeing keys for a differen 2320 * filtered out. 2321 * 2322 * But we can't do the full c 2323 * isn't monotonically increa 2324 * that's what we check again 2325 */ 2326 if (unlikely(!(iter->flags & 2327 ? bkey_gt(k.k->p 2328 : k.k->p.inode > 2329 goto end; 2330 2331 if (iter->update_path && 2332 !bkey_eq(trans->paths[ite 2333 bch2_path_put_nokeep( 2334 2335 iter->update_path = 0 2336 } 2337 2338 if ((iter->flags & BTREE_ITER 2339 (iter->flags & BTREE_ITER 2340 !(iter->flags & BTREE_ITE 2341 !iter->update_path) { 2342 struct bpos pos = k.k 2343 2344 if (pos.snapshot < it 2345 search_key = 2346 continue; 2347 } 2348 2349 pos.snapshot = iter-> 2350 2351 /* 2352 * advance, same as o 2353 * to snapshot 2354 */ 2355 __btree_path_get(tran 2356 iter->update_path = i 2357 2358 iter->update_path = b 2359 2360 2361 2362 ret = bch2_btree_path 2363 if (unlikely(ret)) { 2364 k = bkey_s_c_ 2365 goto out_no_l 2366 } 2367 } 2368 2369 /* 2370 * We can never have a key in 2371 * we don't have to check the 2372 */ 2373 if ((iter->flags & BTREE_ITER 2374 !bch2_snapshot_is_ancesto 2375 2376 2377 search_key = bpos_suc 2378 continue; 2379 } 2380 2381 if (bkey_whiteout(k.k) && 2382 !(iter->flags & BTREE_ITE 2383 search_key = bkey_suc 2384 continue; 2385 } 2386 2387 /* 2388 * iter->pos should be monono 2389 * equal to the key we just r 2390 * straddle iter->pos: 2391 */ 2392 if (!(iter->flags & BTREE_ITE 2393 iter_pos = k.k->p; 2394 else 2395 iter_pos = bkey_max(i 2396 2397 if (unlikely(iter->flags & BT 2398 iter->flags & BT 2399 2400 goto end; 2401 2402 break; 2403 } 2404 2405 iter->pos = iter_pos; 2406 2407 iter->path = bch2_btree_path_set_pos( 2408 iter->flags & 2409 btree_iter_ip 2410 2411 btree_path_set_should_be_locked(trans 2412 out_no_locked: 2413 if (iter->update_path) { 2414 ret = bch2_btree_path_relock( 2415 if (unlikely(ret)) 2416 k = bkey_s_c_err(ret) 2417 else 2418 btree_path_set_should 2419 } 2420 2421 if (!(iter->flags & BTREE_ITER_all_sn 2422 iter->pos.snapshot = iter->sn 2423 2424 ret = bch2_btree_iter_verify_ret(iter 2425 if (unlikely(ret)) { 2426 bch2_btree_iter_set_pos(iter, 2427 k = bkey_s_c_err(ret); 2428 } 2429 2430 bch2_btree_iter_verify_entry_exit(ite 2431 2432 return k; 2433 end: 2434 bch2_btree_iter_set_pos(iter, end); 2435 k = bkey_s_c_null; 2436 goto out_no_locked; 2437 } 2438 2439 /** 2440 * bch2_btree_iter_next() - returns first key 2441 * position 2442 * @iter: iterator to peek from 2443 * 2444 * Returns: key if found, or an error ext 2445 */ 2446 struct bkey_s_c bch2_btree_iter_next(struct b 2447 { 2448 if (!bch2_btree_iter_advance(iter)) 2449 return bkey_s_c_null; 2450 2451 return bch2_btree_iter_peek(iter); 2452 } 2453 2454 /** 2455 * bch2_btree_iter_peek_prev() - returns firs 2456 * iterator's current position 2457 * @iter: iterator to peek from 2458 * 2459 * Returns: key if found, or an error ext 2460 */ 2461 struct bkey_s_c bch2_btree_iter_peek_prev(str 2462 { 2463 struct btree_trans *trans = iter->tra 2464 struct bpos search_key = iter->pos; 2465 struct bkey_s_c k; 2466 struct bkey saved_k; 2467 const struct bch_val *saved_v; 2468 btree_path_idx_t saved_path = 0; 2469 int ret; 2470 2471 bch2_trans_verify_not_unlocked(trans) 2472 EBUG_ON(btree_iter_path(trans, iter)- 2473 btree_iter_path(trans, iter)- 2474 2475 if (iter->flags & BTREE_ITER_with_jou 2476 return bkey_s_c_err(-BCH_ERR_ 2477 2478 bch2_btree_iter_verify(iter); 2479 bch2_btree_iter_verify_entry_exit(ite 2480 2481 if (iter->flags & BTREE_ITER_filter_s 2482 search_key.snapshot = U32_MAX 2483 2484 while (1) { 2485 iter->path = bch2_btree_path_ 2486 2487 2488 2489 ret = bch2_btree_path_travers 2490 if (unlikely(ret)) { 2491 /* ensure that iter-> 2492 bch2_btree_iter_set_p 2493 k = bkey_s_c_err(ret) 2494 goto out_no_locked; 2495 } 2496 2497 struct btree_path *path = btr 2498 2499 k = btree_path_level_peek(tra 2500 if (!k.k || 2501 ((iter->flags & BTREE_ITE 2502 ? bpos_ge(bkey_start_pos 2503 : bpos_gt(k.k->p, search 2504 k = btree_path_level_ 2505 2506 if (unlikely((iter->flags & B 2507 trans->nr_update 2508 bch2_btree_trans_peek 2509 2510 if (likely(k.k)) { 2511 if (iter->flags & BTR 2512 if (k.k->p.sn 2513 goto 2514 2515 /* 2516 * If we have 2517 * longer at 2518 * that candi 2519 */ 2520 if (saved_pat 2521 bch2_ 2522 2523 iter- 2524 saved 2525 iter- 2526 k.v 2527 goto 2528 } 2529 2530 if (bch2_snap 2531 2532 2533 if (s 2534 2535 2536 saved 2537 2538 2539 path 2540 trace 2541 saved 2542 saved 2543 } 2544 2545 search_key = 2546 continue; 2547 } 2548 got_key: 2549 if (bkey_whiteout(k.k 2550 !(iter->flags & B 2551 search_key = 2552 if (iter->fla 2553 searc 2554 continue; 2555 } 2556 2557 btree_path_set_should 2558 break; 2559 } else if (likely(!bpos_eq(pa 2560 /* Advance to previou 2561 search_key = bpos_pre 2562 } else { 2563 /* Start of btree: */ 2564 bch2_btree_iter_set_p 2565 k = bkey_s_c_null; 2566 goto out_no_locked; 2567 } 2568 } 2569 2570 EBUG_ON(bkey_gt(bkey_start_pos(k.k), 2571 2572 /* Extents can straddle iter->pos: */ 2573 if (bkey_lt(k.k->p, iter->pos)) 2574 iter->pos = k.k->p; 2575 2576 if (iter->flags & BTREE_ITER_filter_s 2577 iter->pos.snapshot = iter->sn 2578 out_no_locked: 2579 if (saved_path) 2580 bch2_path_put_nokeep(trans, s 2581 2582 bch2_btree_iter_verify_entry_exit(ite 2583 bch2_btree_iter_verify(iter); 2584 2585 return k; 2586 } 2587 2588 /** 2589 * bch2_btree_iter_prev() - returns first key 2590 * position 2591 * @iter: iterator to peek from 2592 * 2593 * Returns: key if found, or an error ext 2594 */ 2595 struct bkey_s_c bch2_btree_iter_prev(struct b 2596 { 2597 if (!bch2_btree_iter_rewind(iter)) 2598 return bkey_s_c_null; 2599 2600 return bch2_btree_iter_peek_prev(iter 2601 } 2602 2603 struct bkey_s_c bch2_btree_iter_peek_slot(str 2604 { 2605 struct btree_trans *trans = iter->tra 2606 struct bpos search_key; 2607 struct bkey_s_c k; 2608 int ret; 2609 2610 bch2_trans_verify_not_unlocked(trans) 2611 bch2_btree_iter_verify(iter); 2612 bch2_btree_iter_verify_entry_exit(ite 2613 EBUG_ON(btree_iter_path(trans, iter)- 2614 2615 /* extents can't span inode numbers: 2616 if ((iter->flags & BTREE_ITER_is_exte 2617 unlikely(iter->pos.offset == KEY_ 2618 if (iter->pos.inode == KEY_IN 2619 return bkey_s_c_null; 2620 2621 bch2_btree_iter_set_pos(iter, 2622 } 2623 2624 search_key = btree_iter_search_key(it 2625 iter->path = bch2_btree_path_set_pos( 2626 iter- 2627 btree 2628 2629 ret = bch2_btree_path_traverse(trans, 2630 if (unlikely(ret)) { 2631 k = bkey_s_c_err(ret); 2632 goto out_no_locked; 2633 } 2634 2635 if ((iter->flags & BTREE_ITER_cached) 2636 !(iter->flags & (BTREE_ITER_is_ex 2637 k = bkey_s_c_null; 2638 2639 if (unlikely((iter->flags & B 2640 trans->nr_update 2641 bch2_btree_trans_peek 2642 if (k.k) 2643 goto out; 2644 } 2645 2646 if (unlikely(iter->flags & BT 2647 (k = btree_trans_peek_slo 2648 goto out; 2649 2650 if (unlikely(iter->flags & BT 2651 (k = btree_trans_peek_key 2652 if (!bkey_err(k)) 2653 iter->k = *k. 2654 /* We're not returnin 2655 goto out_no_locked; 2656 } 2657 2658 k = bch2_btree_path_peek_slot 2659 if (unlikely(!k.k)) 2660 goto out_no_locked; 2661 } else { 2662 struct bpos next; 2663 struct bpos end = iter->pos; 2664 2665 if (iter->flags & BTREE_ITER_ 2666 end.offset = U64_MAX; 2667 2668 EBUG_ON(btree_iter_path(trans 2669 2670 if (iter->flags & BTREE_ITER_ 2671 struct btree_iter ite 2672 2673 bch2_trans_copy_iter( 2674 k = bch2_btree_iter_p 2675 2676 if (k.k && !bkey_err( 2677 swap(iter->ke 2678 iter->k = ite 2679 k.k = &iter-> 2680 } 2681 bch2_trans_iter_exit( 2682 } else { 2683 struct bpos pos = ite 2684 2685 k = bch2_btree_iter_p 2686 if (unlikely(bkey_err 2687 bch2_btree_it 2688 else 2689 iter->pos = p 2690 } 2691 2692 if (unlikely(bkey_err(k))) 2693 goto out_no_locked; 2694 2695 next = k.k ? bkey_start_pos(k 2696 2697 if (bkey_lt(iter->pos, next)) 2698 bkey_init(&iter->k); 2699 iter->k.p = iter->pos 2700 2701 if (iter->flags & BTR 2702 bch2_key_resi 2703 2704 2705 2706 2707 2708 EBUG_ON(!iter 2709 } 2710 2711 k = (struct bkey_s_c) 2712 } 2713 } 2714 out: 2715 btree_path_set_should_be_locked(trans 2716 out_no_locked: 2717 bch2_btree_iter_verify_entry_exit(ite 2718 bch2_btree_iter_verify(iter); 2719 ret = bch2_btree_iter_verify_ret(iter 2720 if (unlikely(ret)) 2721 return bkey_s_c_err(ret); 2722 2723 return k; 2724 } 2725 2726 struct bkey_s_c bch2_btree_iter_next_slot(str 2727 { 2728 if (!bch2_btree_iter_advance(iter)) 2729 return bkey_s_c_null; 2730 2731 return bch2_btree_iter_peek_slot(iter 2732 } 2733 2734 struct bkey_s_c bch2_btree_iter_prev_slot(str 2735 { 2736 if (!bch2_btree_iter_rewind(iter)) 2737 return bkey_s_c_null; 2738 2739 return bch2_btree_iter_peek_slot(iter 2740 } 2741 2742 /* Obsolete, but still used by rust wrapper i 2743 struct bkey_s_c bch2_btree_iter_peek_and_rest 2744 { 2745 struct bkey_s_c k; 2746 2747 while (btree_trans_too_many_iters(ite 2748 (k = bch2_btree_iter_peek_type 2749 bch2_err_matches(bkey_err(k), 2750 bch2_trans_begin(iter->trans) 2751 2752 return k; 2753 } 2754 2755 /* new transactional stuff: */ 2756 2757 #ifdef CONFIG_BCACHEFS_DEBUG 2758 static void btree_trans_verify_sorted_refs(st 2759 { 2760 struct btree_path *path; 2761 unsigned i; 2762 2763 BUG_ON(trans->nr_sorted != bitmap_wei 2764 2765 trans_for_each_path(trans, path, i) { 2766 BUG_ON(path->sorted_idx >= tr 2767 BUG_ON(trans->sorted[path->so 2768 } 2769 2770 for (i = 0; i < trans->nr_sorted; i++ 2771 unsigned idx = trans->sorted[ 2772 2773 BUG_ON(!test_bit(idx, trans-> 2774 BUG_ON(trans->paths[idx].sort 2775 } 2776 } 2777 2778 static void btree_trans_verify_sorted(struct 2779 { 2780 struct btree_path *path, *prev = NULL 2781 struct trans_for_each_path_inorder_it 2782 2783 if (!bch2_debug_check_iterators) 2784 return; 2785 2786 trans_for_each_path_inorder(trans, pa 2787 if (prev && btree_path_cmp(pr 2788 __bch2_dump_trans_pat 2789 panic("trans paths ou 2790 } 2791 prev = path; 2792 } 2793 } 2794 #else 2795 static inline void btree_trans_verify_sorted_ 2796 static inline void btree_trans_verify_sorted( 2797 #endif 2798 2799 void __bch2_btree_trans_sort_paths(struct btr 2800 { 2801 int i, l = 0, r = trans->nr_sorted, i 2802 bool swapped; 2803 2804 btree_trans_verify_sorted_refs(trans) 2805 2806 if (trans->paths_sorted) 2807 goto out; 2808 2809 /* 2810 * Cocktail shaker sort: this is effi 2811 * mostly sorted. 2812 */ 2813 do { 2814 swapped = false; 2815 2816 for (i = inc > 0 ? l : r - 2; 2817 i + 1 < r && i >= l; 2818 i += inc) { 2819 if (btree_path_cmp(tr 2820 tr 2821 swap(trans->s 2822 trans->paths[ 2823 trans->paths[ 2824 swapped = tru 2825 } 2826 } 2827 2828 if (inc > 0) 2829 --r; 2830 else 2831 l++; 2832 inc = -inc; 2833 } while (swapped); 2834 2835 trans->paths_sorted = true; 2836 out: 2837 btree_trans_verify_sorted(trans); 2838 } 2839 2840 static inline void btree_path_list_remove(str 2841 str 2842 { 2843 EBUG_ON(path->sorted_idx >= trans->nr 2844 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2845 trans->nr_sorted--; 2846 memmove_u64s_down_small(trans->sorted 2847 trans->sorted 2848 DIV_ROUND_UP( 2849 2850 #else 2851 array_remove_item(trans->sorted, tran 2852 #endif 2853 for (unsigned i = path->sorted_idx; i 2854 trans->paths[trans->sorted[i] 2855 } 2856 2857 static inline void btree_path_list_add(struct 2858 btree_ 2859 btree_ 2860 { 2861 struct btree_path *path = trans->path 2862 2863 path->sorted_idx = pos ? trans->paths 2864 2865 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2866 memmove_u64s_up_small(trans->sorted + 2867 trans->sorted + 2868 DIV_ROUND_UP(tr 2869 si 2870 trans->nr_sorted++; 2871 trans->sorted[path->sorted_idx] = pat 2872 #else 2873 array_insert_item(trans->sorted, tran 2874 #endif 2875 2876 for (unsigned i = path->sorted_idx; i 2877 trans->paths[trans->sorted[i] 2878 2879 btree_trans_verify_sorted_refs(trans) 2880 } 2881 2882 void bch2_trans_iter_exit(struct btree_trans 2883 { 2884 if (iter->update_path) 2885 bch2_path_put_nokeep(trans, i 2886 iter->flags & B 2887 if (iter->path) 2888 bch2_path_put(trans, iter->pa 2889 iter->flags & B 2890 if (iter->key_cache_path) 2891 bch2_path_put(trans, iter->ke 2892 iter->flags & B 2893 iter->path = 0; 2894 iter->update_path = 0; 2895 iter->key_cache_path = 0; 2896 iter->trans = NULL; 2897 } 2898 2899 void bch2_trans_iter_init_outlined(struct btr 2900 struct btree_iter * 2901 enum btree_id btree 2902 unsigned flags) 2903 { 2904 bch2_trans_iter_init_common(trans, it 2905 bch2_btree_ite 2906 _RET_IP_); 2907 } 2908 2909 void bch2_trans_node_iter_init(struct btree_t 2910 struct btree_i 2911 enum btree_id 2912 struct bpos po 2913 unsigned locks 2914 unsigned depth 2915 unsigned flags 2916 { 2917 flags |= BTREE_ITER_not_extents; 2918 flags |= BTREE_ITER_snapshot_field; 2919 flags |= BTREE_ITER_all_snapshots; 2920 2921 bch2_trans_iter_init_common(trans, it 2922 __bch2_btree_i 2923 _RET_IP_); 2924 2925 iter->min_depth = depth; 2926 2927 struct btree_path *path = btree_iter_ 2928 BUG_ON(path->locks_want < min(locks_ 2929 BUG_ON(path->level != depth); 2930 BUG_ON(iter->min_depth != depth); 2931 } 2932 2933 void bch2_trans_copy_iter(struct btree_iter * 2934 { 2935 struct btree_trans *trans = src->tran 2936 2937 *dst = *src; 2938 #ifdef TRACK_PATH_ALLOCATED 2939 dst->ip_allocated = _RET_IP_; 2940 #endif 2941 if (src->path) 2942 __btree_path_get(trans, trans 2943 if (src->update_path) 2944 __btree_path_get(trans, trans 2945 dst->key_cache_path = 0; 2946 } 2947 2948 void *__bch2_trans_kmalloc(struct btree_trans 2949 { 2950 struct bch_fs *c = trans->c; 2951 unsigned new_top = trans->mem_top + s 2952 unsigned old_bytes = trans->mem_bytes 2953 unsigned new_bytes = roundup_pow_of_t 2954 int ret; 2955 void *new_mem; 2956 void *p; 2957 2958 WARN_ON_ONCE(new_bytes > BTREE_TRANS_ 2959 2960 struct btree_transaction_stats *s = b 2961 s->max_mem = max(s->max_mem, new_byte 2962 2963 if (trans->used_mempool) { 2964 if (trans->mem_bytes >= new_b 2965 goto out_change_top; 2966 2967 /* No more space from mempool 2968 new_mem = kmalloc(new_bytes, 2969 if (unlikely(!new_mem)) { 2970 bch2_trans_unlock(tra 2971 2972 new_mem = kmalloc(new 2973 if (!new_mem) 2974 return ERR_PT 2975 2976 ret = bch2_trans_relo 2977 if (ret) { 2978 kfree(new_mem 2979 return ERR_PT 2980 } 2981 } 2982 memcpy(new_mem, trans->mem, t 2983 trans->used_mempool = false; 2984 mempool_free(trans->mem, &c-> 2985 goto out_new_mem; 2986 } 2987 2988 new_mem = krealloc(trans->mem, new_by 2989 if (unlikely(!new_mem)) { 2990 bch2_trans_unlock(trans); 2991 2992 new_mem = krealloc(trans->mem 2993 if (!new_mem && new_bytes <= 2994 new_mem = mempool_all 2995 new_bytes = BTREE_TRA 2996 memcpy(new_mem, trans 2997 trans->used_mempool = 2998 kfree(trans->mem); 2999 } 3000 3001 if (!new_mem) 3002 return ERR_PTR(-BCH_E 3003 3004 trans->mem = new_mem; 3005 trans->mem_bytes = new_bytes; 3006 3007 ret = bch2_trans_relock(trans 3008 if (ret) 3009 return ERR_PTR(ret); 3010 } 3011 out_new_mem: 3012 trans->mem = new_mem; 3013 trans->mem_bytes = new_bytes; 3014 3015 if (old_bytes) { 3016 trace_and_count(c, trans_rest 3017 return ERR_PTR(btree_trans_re 3018 } 3019 out_change_top: 3020 p = trans->mem + trans->mem_top; 3021 trans->mem_top += size; 3022 memset(p, 0, size); 3023 return p; 3024 } 3025 3026 static inline void check_srcu_held_too_long(s 3027 { 3028 WARN(trans->srcu_held && time_after(j 3029 "btree trans held srcu lock (del 3030 (jiffies - trans->srcu_lock_time 3031 } 3032 3033 void bch2_trans_srcu_unlock(struct btree_tran 3034 { 3035 if (trans->srcu_held) { 3036 struct bch_fs *c = trans->c; 3037 struct btree_path *path; 3038 unsigned i; 3039 3040 trans_for_each_path(trans, pa 3041 if (path->cached && ! 3042 path->l[0].b 3043 3044 check_srcu_held_too_long(tran 3045 srcu_read_unlock(&c->btree_tr 3046 trans->srcu_held = false; 3047 } 3048 } 3049 3050 static void bch2_trans_srcu_lock(struct btree 3051 { 3052 if (!trans->srcu_held) { 3053 trans->srcu_idx = srcu_read_l 3054 trans->srcu_lock_time = jif 3055 trans->srcu_held = true; 3056 } 3057 } 3058 3059 /** 3060 * bch2_trans_begin() - reset a transaction a 3061 * @trans: transaction to reset 3062 * 3063 * Returns: current restart counter, to b 3064 * 3065 * While iterating over nodes or updating nod 3066 * may return BCH_ERR_transaction_restart whe 3067 * occurs bch2_trans_begin() should be called 3068 */ 3069 u32 bch2_trans_begin(struct btree_trans *tran 3070 { 3071 struct btree_path *path; 3072 unsigned i; 3073 u64 now; 3074 3075 bch2_trans_reset_updates(trans); 3076 3077 trans->restart_count++; 3078 trans->mem_top = 0; 3079 trans->journal_entries = NUL 3080 3081 trans_for_each_path(trans, path, i) { 3082 path->should_be_locked = fals 3083 3084 /* 3085 * If the transaction wasn't 3086 * doing something new: dont 3087 * are in use - except for th 3088 */ 3089 if (!trans->restarted && path 3090 path->preserve = fals 3091 3092 /* 3093 * XXX: we probably shouldn't 3094 * was restarted, but current 3095 * iterators if we do that 3096 */ 3097 if (!path->ref && !path->pres 3098 __bch2_path_free(tran 3099 else 3100 path->preserve = fals 3101 } 3102 3103 now = local_clock(); 3104 3105 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LA 3106 time_after64(now, trans->last_beg 3107 __bch2_time_stats_update(&btr 3108 tran 3109 3110 if (!trans->restarted && 3111 (need_resched() || 3112 time_after64(now, trans->last_be 3113 bch2_trans_unlock(trans); 3114 cond_resched(); 3115 now = local_clock(); 3116 } 3117 trans->last_begin_time = now; 3118 3119 if (unlikely(trans->srcu_held && 3120 time_after(jiffies, tran 3121 bch2_trans_srcu_unlock(trans) 3122 3123 trans->last_begin_ip = _RET_IP_; 3124 3125 trans_set_locked(trans); 3126 3127 if (trans->restarted) { 3128 bch2_btree_path_traverse_all( 3129 trans->notrace_relock_fail = 3130 } 3131 3132 bch2_trans_verify_not_unlocked(trans) 3133 return trans->restart_count; 3134 } 3135 3136 const char *bch2_btree_transaction_fns[BCH_TR 3137 3138 unsigned bch2_trans_get_fn_idx(const char *fn 3139 { 3140 for (unsigned i = 0; i < ARRAY_SIZE(b 3141 if (!bch2_btree_transaction_f 3142 bch2_btree_transaction_fn 3143 bch2_btree_transactio 3144 return i; 3145 } 3146 3147 pr_warn_once("BCH_TRANSACTIONS_NR not 3148 return 0; 3149 } 3150 3151 struct btree_trans *__bch2_trans_get(struct b 3152 __acquires(&c->btree_trans_barrier) 3153 { 3154 struct btree_trans *trans; 3155 3156 if (IS_ENABLED(__KERNEL__)) { 3157 trans = this_cpu_xchg(c->btre 3158 if (trans) { 3159 memset(trans, 0, offs 3160 goto got_trans; 3161 } 3162 } 3163 3164 trans = mempool_alloc(&c->btree_trans 3165 memset(trans, 0, sizeof(*trans)); 3166 3167 seqmutex_lock(&c->btree_trans_lock); 3168 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) 3169 struct btree_trans *pos; 3170 pid_t pid = current->pid; 3171 3172 trans->locking_wait.task = cu 3173 3174 list_for_each_entry(pos, &c-> 3175 struct task_struct *p 3176 /* 3177 * We'd much prefer t 3178 * disallow multiple 3179 * but the data move 3180 * already have a btr 3181 */ 3182 BUG_ON(pos_task && 3183 pid == pos_tas 3184 pos->locked); 3185 } 3186 } 3187 3188 list_add(&trans->list, &c->btree_tran 3189 seqmutex_unlock(&c->btree_trans_lock) 3190 got_trans: 3191 trans->c = c; 3192 trans->last_begin_time = local_clock 3193 trans->fn_idx = fn_idx; 3194 trans->locking_wait.task = current; 3195 trans->journal_replay_not_finished = 3196 unlikely(!test_bit(JOURNAL_re 3197 atomic_inc_not_zero(&c->journ 3198 trans->nr_paths = ARRAY_SIZE( 3199 trans->paths_allocated = trans->_pat 3200 trans->sorted = trans->_sor 3201 trans->paths = trans->_pat 3202 trans->updates = trans->_upd 3203 3204 *trans_paths_nr(trans->paths) = BTREE 3205 3206 trans->paths_allocated[0] = 1; 3207 3208 static struct lock_class_key lockdep_ 3209 lockdep_init_map(&trans->dep_map, "bc 3210 3211 if (fn_idx < BCH_TRANSACTIONS_NR) { 3212 trans->fn = bch2_btree_transa 3213 3214 struct btree_transaction_stat 3215 3216 if (s->max_mem) { 3217 unsigned expected_mem 3218 3219 trans->mem = kmalloc( 3220 if (likely(trans->mem 3221 trans->mem_by 3222 } 3223 3224 trans->nr_paths_max = s->nr_m 3225 trans->journal_entries_size = 3226 } 3227 3228 trans->srcu_idx = srcu_read_l 3229 trans->srcu_lock_time = jiffies; 3230 trans->srcu_held = true; 3231 trans_set_locked(trans); 3232 3233 closure_init_stack_release(&trans->re 3234 return trans; 3235 } 3236 3237 static void check_btree_paths_leaked(struct b 3238 { 3239 #ifdef CONFIG_BCACHEFS_DEBUG 3240 struct bch_fs *c = trans->c; 3241 struct btree_path *path; 3242 unsigned i; 3243 3244 trans_for_each_path(trans, path, i) 3245 if (path->ref) 3246 goto leaked; 3247 return; 3248 leaked: 3249 bch_err(c, "btree paths leaked from % 3250 trans_for_each_path(trans, path, i) 3251 if (path->ref) 3252 printk(KERN_ERR " bt 3253 bch2_btree_id_ 3254 (void *) path- 3255 /* Be noisy about this: */ 3256 bch2_fatal_error(c); 3257 #endif 3258 } 3259 3260 void bch2_trans_put(struct btree_trans *trans 3261 __releases(&c->btree_trans_barrier) 3262 { 3263 struct bch_fs *c = trans->c; 3264 3265 bch2_trans_unlock(trans); 3266 3267 trans_for_each_update(trans, i) 3268 __btree_path_put(trans, trans 3269 trans->nr_updates = 0; 3270 3271 check_btree_paths_leaked(trans); 3272 3273 if (trans->srcu_held) { 3274 check_srcu_held_too_long(tran 3275 srcu_read_unlock(&c->btree_tr 3276 } 3277 3278 if (unlikely(trans->journal_replay_no 3279 bch2_journal_keys_put(c); 3280 3281 /* 3282 * trans->ref protects trans->locking 3283 * by cycle detector 3284 */ 3285 closure_return_sync(&trans->ref); 3286 trans->locking_wait.task = NULL; 3287 3288 unsigned long *paths_allocated = tran 3289 trans->paths_allocated = NULL; 3290 trans->paths = NULL; 3291 3292 if (paths_allocated != trans->_paths_ 3293 kvfree_rcu_mightsleep(paths_a 3294 3295 if (trans->used_mempool) 3296 mempool_free(trans->mem, &c-> 3297 else 3298 kfree(trans->mem); 3299 3300 /* Userspace doesn't have a real perc 3301 if (IS_ENABLED(__KERNEL__)) 3302 trans = this_cpu_xchg(c->btre 3303 3304 if (trans) { 3305 seqmutex_lock(&c->btree_trans 3306 list_del(&trans->list); 3307 seqmutex_unlock(&c->btree_tra 3308 3309 mempool_free(trans, &c->btree 3310 } 3311 } 3312 3313 bool bch2_current_has_btree_trans(struct bch_ 3314 { 3315 seqmutex_lock(&c->btree_trans_lock); 3316 struct btree_trans *trans; 3317 bool ret = false; 3318 list_for_each_entry(trans, &c->btree_ 3319 if (trans->locking_wait.task 3320 trans->locked) { 3321 ret = true; 3322 break; 3323 } 3324 seqmutex_unlock(&c->btree_trans_lock) 3325 return ret; 3326 } 3327 3328 static void __maybe_unused 3329 bch2_btree_bkey_cached_common_to_text(struct 3330 struct 3331 { 3332 struct six_lock_count c = six_lock_co 3333 struct task_struct *owner; 3334 pid_t pid; 3335 3336 rcu_read_lock(); 3337 owner = READ_ONCE(b->lock.owner); 3338 pid = owner ? owner->pid : 0; 3339 rcu_read_unlock(); 3340 3341 prt_printf(out, "\t%px %c l=%u %s:", 3342 b->level, bch2_btree_id_st 3343 bch2_bpos_to_text(out, btree_node_pos 3344 3345 prt_printf(out, "\t locks %u:%u:%u he 3346 c.n[0], c.n[1], c.n[2], pi 3347 } 3348 3349 void bch2_btree_trans_to_text(struct printbuf 3350 { 3351 struct btree_bkey_cached_common *b; 3352 static char lock_types[] = { 'r', 'i' 3353 struct task_struct *task = READ_ONCE( 3354 unsigned l, idx; 3355 3356 /* before rcu_read_lock(): */ 3357 bch2_printbuf_make_room(out, 4096); 3358 3359 if (!out->nr_tabstops) { 3360 printbuf_tabstop_push(out, 16 3361 printbuf_tabstop_push(out, 32 3362 } 3363 3364 prt_printf(out, "%i %s\n", task ? tas 3365 3366 /* trans->paths is rcu protected vs. 3367 rcu_read_lock(); 3368 out->atomic++; 3369 3370 struct btree_path *paths = rcu_derefe 3371 if (!paths) 3372 goto out; 3373 3374 unsigned long *paths_allocated = tran 3375 3376 trans_for_each_path_idx_from(paths_al 3377 struct btree_path *path = pat 3378 if (!path->nodes_locked) 3379 continue; 3380 3381 prt_printf(out, " path %u %c 3382 idx, 3383 path->cached ? 'c' : ' 3384 path->level, 3385 bch2_btree_id_str(path 3386 bch2_bpos_to_text(out, path-> 3387 prt_newline(out); 3388 3389 for (l = 0; l < BTREE_MAX_DEP 3390 if (btree_node_locked 3391 !IS_ERR_OR_NULL(b 3392 prt_printf(ou 3393 lo 3394 bch2_btree_bk 3395 prt_newline(o 3396 } 3397 } 3398 } 3399 3400 b = READ_ONCE(trans->locking); 3401 if (b) { 3402 prt_printf(out, " blocked fo 3403 div_u64(local_cloc 3404 prt_printf(out, " %c", loc 3405 bch2_btree_bkey_cached_common 3406 prt_newline(out); 3407 } 3408 out: 3409 --out->atomic; 3410 rcu_read_unlock(); 3411 } 3412 3413 void bch2_fs_btree_iter_exit(struct bch_fs *c 3414 { 3415 struct btree_transaction_stats *s; 3416 struct btree_trans *trans; 3417 int cpu; 3418 3419 if (c->btree_trans_bufs) 3420 for_each_possible_cpu(cpu) { 3421 struct btree_trans *t 3422 per_cpu_ptr(c 3423 3424 if (trans) { 3425 seqmutex_lock 3426 list_del(&tra 3427 seqmutex_unlo 3428 } 3429 kfree(trans); 3430 } 3431 free_percpu(c->btree_trans_bufs); 3432 3433 trans = list_first_entry_or_null(&c-> 3434 if (trans) 3435 panic("%s leaked btree_trans\ 3436 3437 for (s = c->btree_transaction_stats; 3438 s < c->btree_transaction_stats + 3439 s++) { 3440 kfree(s->max_paths_text); 3441 bch2_time_stats_exit(&s->lock 3442 } 3443 3444 if (c->btree_trans_barrier_initialize 3445 synchronize_srcu_expedited(&c 3446 cleanup_srcu_struct(&c->btree 3447 } 3448 mempool_exit(&c->btree_trans_mem_pool 3449 mempool_exit(&c->btree_trans_pool); 3450 } 3451 3452 void bch2_fs_btree_iter_init_early(struct bch 3453 { 3454 struct btree_transaction_stats *s; 3455 3456 for (s = c->btree_transaction_stats; 3457 s < c->btree_transaction_stats + 3458 s++) { 3459 bch2_time_stats_init(&s->dura 3460 bch2_time_stats_init(&s->lock 3461 mutex_init(&s->lock); 3462 } 3463 3464 INIT_LIST_HEAD(&c->btree_trans_list); 3465 seqmutex_init(&c->btree_trans_lock); 3466 } 3467 3468 int bch2_fs_btree_iter_init(struct bch_fs *c) 3469 { 3470 int ret; 3471 3472 c->btree_trans_bufs = alloc_percpu(st 3473 if (!c->btree_trans_bufs) 3474 return -ENOMEM; 3475 3476 ret = mempool_init_kmalloc_pool(&c- 3477 siz 3478 mempool_init_kmalloc_pool(&c- 3479 BTR 3480 init_srcu_struct(&c->btree_tr 3481 if (ret) 3482 return ret; 3483 3484 /* 3485 * static annotation (hackily done) f 3486 * btree node locks: 3487 */ 3488 #ifdef CONFIG_LOCKDEP 3489 fs_reclaim_acquire(GFP_KERNEL); 3490 struct btree_trans *trans = bch2_tran 3491 trans_set_locked(trans); 3492 bch2_trans_put(trans); 3493 fs_reclaim_release(GFP_KERNEL); 3494 #endif 3495 3496 c->btree_trans_barrier_initialized = 3497 return 0; 3498 3499 } 3500
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.