1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for working with individual keys, and sorted sets of keys with in a 4 * btree node 5 * 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcachefs.h" 10 #include "btree_cache.h" 11 #include "bset.h" 12 #include "eytzinger.h" 13 #include "trace.h" 14 #include "util.h" 15 16 #include <asm/unaligned.h> 17 #include <linux/console.h> 18 #include <linux/random.h> 19 #include <linux/prefetch.h> 20 21 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *, 22 struct btree *); 23 24 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter) 25 { 26 unsigned n = ARRAY_SIZE(iter->data); 27 28 while (n && __btree_node_iter_set_end(iter, n - 1)) 29 --n; 30 31 return n; 32 } 33 34 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k) 35 { 36 return bch2_bkey_to_bset_inlined(b, k); 37 } 38 39 /* 40 * There are never duplicate live keys in the btree - but including keys that 41 * have been flagged as deleted (and will be cleaned up later) we _will_ see 42 * duplicates. 43 * 44 * Thus the sort order is: usual key comparison first, but for keys that compare 45 * equal the deleted key(s) come first, and the (at most one) live version comes 46 * last. 47 * 48 * The main reason for this is insertion: to handle overwrites, we first iterate 49 * over keys that compare equal to our insert key, and then insert immediately 50 * prior to the first key greater than the key we're inserting - our insert 51 * position will be after all keys that compare equal to our insert key, which 52 * by the time we actually do the insert will all be deleted. 53 */ 54 55 void bch2_dump_bset(struct bch_fs *c, struct btree *b, 56 struct bset *i, unsigned set) 57 { 58 struct bkey_packed *_k, *_n; 59 struct bkey uk, n; 60 struct bkey_s_c k; 61 struct printbuf buf = PRINTBUF; 62 63 if (!i->u64s) 64 return; 65 66 for (_k = i->start; 67 _k < vstruct_last(i); 68 _k = _n) { 69 _n = bkey_p_next(_k); 70 71 if (!_k->u64s) { 72 printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set, 73 _k->_data - i->_data); 74 break; 75 } 76 77 k = bkey_disassemble(b, _k, &uk); 78 79 printbuf_reset(&buf); 80 if (c) 81 bch2_bkey_val_to_text(&buf, c, k); 82 else 83 bch2_bkey_to_text(&buf, k.k); 84 printk(KERN_ERR "block %u key %5zu: %s\n", set, 85 _k->_data - i->_data, buf.buf); 86 87 if (_n == vstruct_last(i)) 88 continue; 89 90 n = bkey_unpack_key(b, _n); 91 92 if (bpos_lt(n.p, k.k->p)) { 93 printk(KERN_ERR "Key skipped backwards\n"); 94 continue; 95 } 96 97 if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p)) 98 printk(KERN_ERR "Duplicate keys\n"); 99 } 100 101 printbuf_exit(&buf); 102 } 103 104 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b) 105 { 106 console_lock(); 107 for_each_bset(b, t) 108 bch2_dump_bset(c, b, bset(b, t), t - b->set); 109 console_unlock(); 110 } 111 112 void bch2_dump_btree_node_iter(struct btree *b, 113 struct btree_node_iter *iter) 114 { 115 struct btree_node_iter_set *set; 116 struct printbuf buf = PRINTBUF; 117 118 printk(KERN_ERR "btree node iter with %u/%u sets:\n", 119 __btree_node_iter_used(iter), b->nsets); 120 121 btree_node_iter_for_each(iter, set) { 122 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k); 123 struct bset_tree *t = bch2_bkey_to_bset(b, k); 124 struct bkey uk = bkey_unpack_key(b, k); 125 126 printbuf_reset(&buf); 127 bch2_bkey_to_text(&buf, &uk); 128 printk(KERN_ERR "set %zu key %u: %s\n", 129 t - b->set, set->k, buf.buf); 130 } 131 132 printbuf_exit(&buf); 133 } 134 135 struct btree_nr_keys bch2_btree_node_count_keys(struct btree *b) 136 { 137 struct bkey_packed *k; 138 struct btree_nr_keys nr = {}; 139 140 for_each_bset(b, t) 141 bset_tree_for_each_key(b, t, k) 142 if (!bkey_deleted(k)) 143 btree_keys_account_key_add(&nr, t - b->set, k); 144 return nr; 145 } 146 147 #ifdef CONFIG_BCACHEFS_DEBUG 148 149 void __bch2_verify_btree_nr_keys(struct btree *b) 150 { 151 struct btree_nr_keys nr = bch2_btree_node_count_keys(b); 152 153 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr))); 154 } 155 156 static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter, 157 struct btree *b) 158 { 159 struct btree_node_iter iter = *_iter; 160 const struct bkey_packed *k, *n; 161 162 k = bch2_btree_node_iter_peek_all(&iter, b); 163 __bch2_btree_node_iter_advance(&iter, b); 164 n = bch2_btree_node_iter_peek_all(&iter, b); 165 166 bkey_unpack_key(b, k); 167 168 if (n && 169 bkey_iter_cmp(b, k, n) > 0) { 170 struct btree_node_iter_set *set; 171 struct bkey ku = bkey_unpack_key(b, k); 172 struct bkey nu = bkey_unpack_key(b, n); 173 struct printbuf buf1 = PRINTBUF; 174 struct printbuf buf2 = PRINTBUF; 175 176 bch2_dump_btree_node(NULL, b); 177 bch2_bkey_to_text(&buf1, &ku); 178 bch2_bkey_to_text(&buf2, &nu); 179 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n", 180 buf1.buf, buf2.buf); 181 printk(KERN_ERR "iter was:"); 182 183 btree_node_iter_for_each(_iter, set) { 184 struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k); 185 struct bset_tree *t = bch2_bkey_to_bset(b, k2); 186 printk(" [%zi %zi]", t - b->set, 187 k2->_data - bset(b, t)->_data); 188 } 189 panic("\n"); 190 } 191 } 192 193 void bch2_btree_node_iter_verify(struct btree_node_iter *iter, 194 struct btree *b) 195 { 196 struct btree_node_iter_set *set, *s2; 197 struct bkey_packed *k, *p; 198 199 if (bch2_btree_node_iter_end(iter)) 200 return; 201 202 /* Verify no duplicates: */ 203 btree_node_iter_for_each(iter, set) { 204 BUG_ON(set->k > set->end); 205 btree_node_iter_for_each(iter, s2) 206 BUG_ON(set != s2 && set->end == s2->end); 207 } 208 209 /* Verify that set->end is correct: */ 210 btree_node_iter_for_each(iter, set) { 211 for_each_bset(b, t) 212 if (set->end == t->end_offset) { 213 BUG_ON(set->k < btree_bkey_first_offset(t) || 214 set->k >= t->end_offset); 215 goto found; 216 } 217 BUG(); 218 found: 219 do {} while (0); 220 } 221 222 /* Verify iterator is sorted: */ 223 btree_node_iter_for_each(iter, set) 224 BUG_ON(set != iter->data && 225 btree_node_iter_cmp(b, set[-1], set[0]) > 0); 226 227 k = bch2_btree_node_iter_peek_all(iter, b); 228 229 for_each_bset(b, t) { 230 if (iter->data[0].end == t->end_offset) 231 continue; 232 233 p = bch2_bkey_prev_all(b, t, 234 bch2_btree_node_iter_bset_pos(iter, b, t)); 235 236 BUG_ON(p && bkey_iter_cmp(b, k, p) < 0); 237 } 238 } 239 240 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where, 241 struct bkey_packed *insert, unsigned clobber_u64s) 242 { 243 struct bset_tree *t = bch2_bkey_to_bset(b, where); 244 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where); 245 struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s); 246 struct printbuf buf1 = PRINTBUF; 247 struct printbuf buf2 = PRINTBUF; 248 #if 0 249 BUG_ON(prev && 250 bkey_iter_cmp(b, prev, insert) > 0); 251 #else 252 if (prev && 253 bkey_iter_cmp(b, prev, insert) > 0) { 254 struct bkey k1 = bkey_unpack_key(b, prev); 255 struct bkey k2 = bkey_unpack_key(b, insert); 256 257 bch2_dump_btree_node(NULL, b); 258 bch2_bkey_to_text(&buf1, &k1); 259 bch2_bkey_to_text(&buf2, &k2); 260 261 panic("prev > insert:\n" 262 "prev key %s\n" 263 "insert key %s\n", 264 buf1.buf, buf2.buf); 265 } 266 #endif 267 #if 0 268 BUG_ON(next != btree_bkey_last(b, t) && 269 bkey_iter_cmp(b, insert, next) > 0); 270 #else 271 if (next != btree_bkey_last(b, t) && 272 bkey_iter_cmp(b, insert, next) > 0) { 273 struct bkey k1 = bkey_unpack_key(b, insert); 274 struct bkey k2 = bkey_unpack_key(b, next); 275 276 bch2_dump_btree_node(NULL, b); 277 bch2_bkey_to_text(&buf1, &k1); 278 bch2_bkey_to_text(&buf2, &k2); 279 280 panic("insert > next:\n" 281 "insert key %s\n" 282 "next key %s\n", 283 buf1.buf, buf2.buf); 284 } 285 #endif 286 } 287 288 #else 289 290 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter, 291 struct btree *b) {} 292 293 #endif 294 295 /* Auxiliary search trees */ 296 297 #define BFLOAT_FAILED_UNPACKED U8_MAX 298 #define BFLOAT_FAILED U8_MAX 299 300 struct bkey_float { 301 u8 exponent; 302 u8 key_offset; 303 u16 mantissa; 304 }; 305 #define BKEY_MANTISSA_BITS 16 306 307 static unsigned bkey_float_byte_offset(unsigned idx) 308 { 309 return idx * sizeof(struct bkey_float); 310 } 311 312 struct ro_aux_tree { 313 u8 nothing[0]; 314 struct bkey_float f[]; 315 }; 316 317 struct rw_aux_tree { 318 u16 offset; 319 struct bpos k; 320 }; 321 322 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t) 323 { 324 BUG_ON(t->aux_data_offset == U16_MAX); 325 326 switch (bset_aux_tree_type(t)) { 327 case BSET_NO_AUX_TREE: 328 return t->aux_data_offset; 329 case BSET_RO_AUX_TREE: 330 return t->aux_data_offset + 331 DIV_ROUND_UP(t->size * sizeof(struct bkey_float) + 332 t->size * sizeof(u8), 8); 333 case BSET_RW_AUX_TREE: 334 return t->aux_data_offset + 335 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8); 336 default: 337 BUG(); 338 } 339 } 340 341 static unsigned bset_aux_tree_buf_start(const struct btree *b, 342 const struct bset_tree *t) 343 { 344 return t == b->set 345 ? DIV_ROUND_UP(b->unpack_fn_len, 8) 346 : bset_aux_tree_buf_end(t - 1); 347 } 348 349 static void *__aux_tree_base(const struct btree *b, 350 const struct bset_tree *t) 351 { 352 return b->aux_data + t->aux_data_offset * 8; 353 } 354 355 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b, 356 const struct bset_tree *t) 357 { 358 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE); 359 360 return __aux_tree_base(b, t); 361 } 362 363 static u8 *ro_aux_tree_prev(const struct btree *b, 364 const struct bset_tree *t) 365 { 366 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE); 367 368 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size); 369 } 370 371 static struct bkey_float *bkey_float(const struct btree *b, 372 const struct bset_tree *t, 373 unsigned idx) 374 { 375 return ro_aux_tree_base(b, t)->f + idx; 376 } 377 378 static void bset_aux_tree_verify(struct btree *b) 379 { 380 #ifdef CONFIG_BCACHEFS_DEBUG 381 for_each_bset(b, t) { 382 if (t->aux_data_offset == U16_MAX) 383 continue; 384 385 BUG_ON(t != b->set && 386 t[-1].aux_data_offset == U16_MAX); 387 388 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t)); 389 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b)); 390 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b)); 391 } 392 #endif 393 } 394 395 void bch2_btree_keys_init(struct btree *b) 396 { 397 unsigned i; 398 399 b->nsets = 0; 400 memset(&b->nr, 0, sizeof(b->nr)); 401 402 for (i = 0; i < MAX_BSETS; i++) 403 b->set[i].data_offset = U16_MAX; 404 405 bch2_bset_set_no_aux_tree(b, b->set); 406 } 407 408 /* Binary tree stuff for auxiliary search trees */ 409 410 /* 411 * Cacheline/offset <-> bkey pointer arithmetic: 412 * 413 * t->tree is a binary search tree in an array; each node corresponds to a key 414 * in one cacheline in t->set (BSET_CACHELINE bytes). 415 * 416 * This means we don't have to store the full index of the key that a node in 417 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and 418 * then bkey_float->m gives us the offset within that cacheline, in units of 8 419 * bytes. 420 * 421 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to 422 * make this work. 423 * 424 * To construct the bfloat for an arbitrary key we need to know what the key 425 * immediately preceding it is: we have to check if the two keys differ in the 426 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size 427 * of the previous key so we can walk backwards to it from t->tree[j]'s key. 428 */ 429 430 static inline void *bset_cacheline(const struct btree *b, 431 const struct bset_tree *t, 432 unsigned cacheline) 433 { 434 return (void *) round_down((unsigned long) btree_bkey_first(b, t), 435 L1_CACHE_BYTES) + 436 cacheline * BSET_CACHELINE; 437 } 438 439 static struct bkey_packed *cacheline_to_bkey(const struct btree *b, 440 const struct bset_tree *t, 441 unsigned cacheline, 442 unsigned offset) 443 { 444 return bset_cacheline(b, t, cacheline) + offset * 8; 445 } 446 447 static unsigned bkey_to_cacheline(const struct btree *b, 448 const struct bset_tree *t, 449 const struct bkey_packed *k) 450 { 451 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE; 452 } 453 454 static ssize_t __bkey_to_cacheline_offset(const struct btree *b, 455 const struct bset_tree *t, 456 unsigned cacheline, 457 const struct bkey_packed *k) 458 { 459 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline); 460 } 461 462 static unsigned bkey_to_cacheline_offset(const struct btree *b, 463 const struct bset_tree *t, 464 unsigned cacheline, 465 const struct bkey_packed *k) 466 { 467 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k); 468 469 EBUG_ON(m > U8_MAX); 470 return m; 471 } 472 473 static inline struct bkey_packed *tree_to_bkey(const struct btree *b, 474 const struct bset_tree *t, 475 unsigned j) 476 { 477 return cacheline_to_bkey(b, t, 478 __eytzinger1_to_inorder(j, t->size - 1, t->extra), 479 bkey_float(b, t, j)->key_offset); 480 } 481 482 static struct bkey_packed *tree_to_prev_bkey(const struct btree *b, 483 const struct bset_tree *t, 484 unsigned j) 485 { 486 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j]; 487 488 return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s); 489 } 490 491 static struct rw_aux_tree *rw_aux_tree(const struct btree *b, 492 const struct bset_tree *t) 493 { 494 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE); 495 496 return __aux_tree_base(b, t); 497 } 498 499 /* 500 * For the write set - the one we're currently inserting keys into - we don't 501 * maintain a full search tree, we just keep a simple lookup table in t->prev. 502 */ 503 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b, 504 struct bset_tree *t, 505 unsigned j) 506 { 507 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset); 508 } 509 510 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t, 511 unsigned j, struct bkey_packed *k) 512 { 513 EBUG_ON(k >= btree_bkey_last(b, t)); 514 515 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) { 516 .offset = __btree_node_key_to_offset(b, k), 517 .k = bkey_unpack_pos(b, k), 518 }; 519 } 520 521 static void bch2_bset_verify_rw_aux_tree(struct btree *b, 522 struct bset_tree *t) 523 { 524 struct bkey_packed *k = btree_bkey_first(b, t); 525 unsigned j = 0; 526 527 if (!bch2_expensive_debug_checks) 528 return; 529 530 BUG_ON(bset_has_ro_aux_tree(t)); 531 532 if (!bset_has_rw_aux_tree(t)) 533 return; 534 535 BUG_ON(t->size < 1); 536 BUG_ON(rw_aux_to_bkey(b, t, j) != k); 537 538 goto start; 539 while (1) { 540 if (rw_aux_to_bkey(b, t, j) == k) { 541 BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k, 542 bkey_unpack_pos(b, k))); 543 start: 544 if (++j == t->size) 545 break; 546 547 BUG_ON(rw_aux_tree(b, t)[j].offset <= 548 rw_aux_tree(b, t)[j - 1].offset); 549 } 550 551 k = bkey_p_next(k); 552 BUG_ON(k >= btree_bkey_last(b, t)); 553 } 554 } 555 556 /* returns idx of first entry >= offset: */ 557 static unsigned rw_aux_tree_bsearch(struct btree *b, 558 struct bset_tree *t, 559 unsigned offset) 560 { 561 unsigned bset_offs = offset - btree_bkey_first_offset(t); 562 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t); 563 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0; 564 565 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE); 566 EBUG_ON(!t->size); 567 EBUG_ON(idx > t->size); 568 569 while (idx < t->size && 570 rw_aux_tree(b, t)[idx].offset < offset) 571 idx++; 572 573 while (idx && 574 rw_aux_tree(b, t)[idx - 1].offset >= offset) 575 idx--; 576 577 EBUG_ON(idx < t->size && 578 rw_aux_tree(b, t)[idx].offset < offset); 579 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset); 580 EBUG_ON(idx + 1 < t->size && 581 rw_aux_tree(b, t)[idx].offset == 582 rw_aux_tree(b, t)[idx + 1].offset); 583 584 return idx; 585 } 586 587 static inline unsigned bkey_mantissa(const struct bkey_packed *k, 588 const struct bkey_float *f, 589 unsigned idx) 590 { 591 u64 v; 592 593 EBUG_ON(!bkey_packed(k)); 594 595 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3))); 596 597 /* 598 * In little endian, we're shifting off low bits (and then the bits we 599 * want are at the low end), in big endian we're shifting off high bits 600 * (and then the bits we want are at the high end, so we shift them 601 * back down): 602 */ 603 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 604 v >>= f->exponent & 7; 605 #else 606 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS; 607 #endif 608 return (u16) v; 609 } 610 611 static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t, 612 unsigned j, 613 struct bkey_packed *min_key, 614 struct bkey_packed *max_key) 615 { 616 struct bkey_float *f = bkey_float(b, t, j); 617 struct bkey_packed *m = tree_to_bkey(b, t, j); 618 struct bkey_packed *l = is_power_of_2(j) 619 ? min_key 620 : tree_to_prev_bkey(b, t, j >> ffs(j)); 621 struct bkey_packed *r = is_power_of_2(j + 1) 622 ? max_key 623 : tree_to_bkey(b, t, j >> (ffz(j) + 1)); 624 unsigned mantissa; 625 int shift, exponent, high_bit; 626 627 /* 628 * for failed bfloats, the lookup code falls back to comparing against 629 * the original key. 630 */ 631 632 if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) || 633 !b->nr_key_bits) { 634 f->exponent = BFLOAT_FAILED_UNPACKED; 635 return; 636 } 637 638 /* 639 * The greatest differing bit of l and r is the first bit we must 640 * include in the bfloat mantissa we're creating in order to do 641 * comparisons - that bit always becomes the high bit of 642 * bfloat->mantissa, and thus the exponent we're calculating here is 643 * the position of what will become the low bit in bfloat->mantissa: 644 * 645 * Note that this may be negative - we may be running off the low end 646 * of the key: we handle this later: 647 */ 648 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r), 649 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1); 650 exponent = high_bit - (BKEY_MANTISSA_BITS - 1); 651 652 /* 653 * Then we calculate the actual shift value, from the start of the key 654 * (k->_data), to get the key bits starting at exponent: 655 */ 656 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 657 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent; 658 659 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64); 660 #else 661 shift = high_bit_offset + 662 b->nr_key_bits - 663 exponent - 664 BKEY_MANTISSA_BITS; 665 666 EBUG_ON(shift < KEY_PACKED_BITS_START); 667 #endif 668 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED); 669 670 f->exponent = shift; 671 mantissa = bkey_mantissa(m, f, j); 672 673 /* 674 * If we've got garbage bits, set them to all 1s - it's legal for the 675 * bfloat to compare larger than the original key, but not smaller: 676 */ 677 if (exponent < 0) 678 mantissa |= ~(~0U << -exponent); 679 680 f->mantissa = mantissa; 681 } 682 683 /* bytes remaining - only valid for last bset: */ 684 static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t) 685 { 686 bset_aux_tree_verify(b); 687 688 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64); 689 } 690 691 static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t) 692 { 693 return __bset_tree_capacity(b, t) / 694 (sizeof(struct bkey_float) + sizeof(u8)); 695 } 696 697 static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t) 698 { 699 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree); 700 } 701 702 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t) 703 { 704 struct bkey_packed *k; 705 706 t->size = 1; 707 t->extra = BSET_RW_AUX_TREE_VAL; 708 rw_aux_tree(b, t)[0].offset = 709 __btree_node_key_to_offset(b, btree_bkey_first(b, t)); 710 711 bset_tree_for_each_key(b, t, k) { 712 if (t->size == bset_rw_tree_capacity(b, t)) 713 break; 714 715 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) > 716 L1_CACHE_BYTES) 717 rw_aux_tree_set(b, t, t->size++, k); 718 } 719 } 720 721 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t) 722 { 723 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t); 724 struct bkey_i min_key, max_key; 725 unsigned cacheline = 1; 726 727 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)), 728 bset_ro_tree_capacity(b, t)); 729 retry: 730 if (t->size < 2) { 731 t->size = 0; 732 t->extra = BSET_NO_AUX_TREE_VAL; 733 return; 734 } 735 736 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1; 737 738 /* First we figure out where the first key in each cacheline is */ 739 eytzinger1_for_each(j, t->size - 1) { 740 while (bkey_to_cacheline(b, t, k) < cacheline) 741 prev = k, k = bkey_p_next(k); 742 743 if (k >= btree_bkey_last(b, t)) { 744 /* XXX: this path sucks */ 745 t->size--; 746 goto retry; 747 } 748 749 ro_aux_tree_prev(b, t)[j] = prev->u64s; 750 bkey_float(b, t, j)->key_offset = 751 bkey_to_cacheline_offset(b, t, cacheline++, k); 752 753 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev); 754 EBUG_ON(tree_to_bkey(b, t, j) != k); 755 } 756 757 while (k != btree_bkey_last(b, t)) 758 prev = k, k = bkey_p_next(k); 759 760 if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) { 761 bkey_init(&min_key.k); 762 min_key.k.p = b->data->min_key; 763 } 764 765 if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) { 766 bkey_init(&max_key.k); 767 max_key.k.p = b->data->max_key; 768 } 769 770 /* Then we build the tree */ 771 eytzinger1_for_each(j, t->size - 1) 772 make_bfloat(b, t, j, 773 bkey_to_packed(&min_key), 774 bkey_to_packed(&max_key)); 775 } 776 777 static void bset_alloc_tree(struct btree *b, struct bset_tree *t) 778 { 779 struct bset_tree *i; 780 781 for (i = b->set; i != t; i++) 782 BUG_ON(bset_has_rw_aux_tree(i)); 783 784 bch2_bset_set_no_aux_tree(b, t); 785 786 /* round up to next cacheline: */ 787 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t), 788 SMP_CACHE_BYTES / sizeof(u64)); 789 790 bset_aux_tree_verify(b); 791 } 792 793 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t, 794 bool writeable) 795 { 796 if (writeable 797 ? bset_has_rw_aux_tree(t) 798 : bset_has_ro_aux_tree(t)) 799 return; 800 801 bset_alloc_tree(b, t); 802 803 if (!__bset_tree_capacity(b, t)) 804 return; 805 806 if (writeable) 807 __build_rw_aux_tree(b, t); 808 else 809 __build_ro_aux_tree(b, t); 810 811 bset_aux_tree_verify(b); 812 } 813 814 void bch2_bset_init_first(struct btree *b, struct bset *i) 815 { 816 struct bset_tree *t; 817 818 BUG_ON(b->nsets); 819 820 memset(i, 0, sizeof(*i)); 821 get_random_bytes(&i->seq, sizeof(i->seq)); 822 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); 823 824 t = &b->set[b->nsets++]; 825 set_btree_bset(b, t, i); 826 } 827 828 void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne) 829 { 830 struct bset *i = &bne->keys; 831 struct bset_tree *t; 832 833 BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b)); 834 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b))); 835 BUG_ON(b->nsets >= MAX_BSETS); 836 837 memset(i, 0, sizeof(*i)); 838 i->seq = btree_bset_first(b)->seq; 839 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN); 840 841 t = &b->set[b->nsets++]; 842 set_btree_bset(b, t, i); 843 } 844 845 /* 846 * find _some_ key in the same bset as @k that precedes @k - not necessarily the 847 * immediate predecessor: 848 */ 849 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t, 850 struct bkey_packed *k) 851 { 852 struct bkey_packed *p; 853 unsigned offset; 854 int j; 855 856 EBUG_ON(k < btree_bkey_first(b, t) || 857 k > btree_bkey_last(b, t)); 858 859 if (k == btree_bkey_first(b, t)) 860 return NULL; 861 862 switch (bset_aux_tree_type(t)) { 863 case BSET_NO_AUX_TREE: 864 p = btree_bkey_first(b, t); 865 break; 866 case BSET_RO_AUX_TREE: 867 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k)); 868 869 do { 870 p = j ? tree_to_bkey(b, t, 871 __inorder_to_eytzinger1(j--, 872 t->size - 1, t->extra)) 873 : btree_bkey_first(b, t); 874 } while (p >= k); 875 break; 876 case BSET_RW_AUX_TREE: 877 offset = __btree_node_key_to_offset(b, k); 878 j = rw_aux_tree_bsearch(b, t, offset); 879 p = j ? rw_aux_to_bkey(b, t, j - 1) 880 : btree_bkey_first(b, t); 881 break; 882 } 883 884 return p; 885 } 886 887 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, 888 struct bset_tree *t, 889 struct bkey_packed *k, 890 unsigned min_key_type) 891 { 892 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k; 893 894 while ((p = __bkey_prev(b, t, k)) && !ret) { 895 for (i = p; i != k; i = bkey_p_next(i)) 896 if (i->type >= min_key_type) 897 ret = i; 898 899 k = p; 900 } 901 902 if (bch2_expensive_debug_checks) { 903 BUG_ON(ret >= orig_k); 904 905 for (i = ret 906 ? bkey_p_next(ret) 907 : btree_bkey_first(b, t); 908 i != orig_k; 909 i = bkey_p_next(i)) 910 BUG_ON(i->type >= min_key_type); 911 } 912 913 return ret; 914 } 915 916 /* Insert */ 917 918 static void bch2_bset_fix_lookup_table(struct btree *b, 919 struct bset_tree *t, 920 struct bkey_packed *_where, 921 unsigned clobber_u64s, 922 unsigned new_u64s) 923 { 924 int shift = new_u64s - clobber_u64s; 925 unsigned l, j, where = __btree_node_key_to_offset(b, _where); 926 927 EBUG_ON(bset_has_ro_aux_tree(t)); 928 929 if (!bset_has_rw_aux_tree(t)) 930 return; 931 932 /* returns first entry >= where */ 933 l = rw_aux_tree_bsearch(b, t, where); 934 935 if (!l) /* never delete first entry */ 936 l++; 937 else if (l < t->size && 938 where < t->end_offset && 939 rw_aux_tree(b, t)[l].offset == where) 940 rw_aux_tree_set(b, t, l++, _where); 941 942 /* l now > where */ 943 944 for (j = l; 945 j < t->size && 946 rw_aux_tree(b, t)[j].offset < where + clobber_u64s; 947 j++) 948 ; 949 950 if (j < t->size && 951 rw_aux_tree(b, t)[j].offset + shift == 952 rw_aux_tree(b, t)[l - 1].offset) 953 j++; 954 955 memmove(&rw_aux_tree(b, t)[l], 956 &rw_aux_tree(b, t)[j], 957 (void *) &rw_aux_tree(b, t)[t->size] - 958 (void *) &rw_aux_tree(b, t)[j]); 959 t->size -= j - l; 960 961 for (j = l; j < t->size; j++) 962 rw_aux_tree(b, t)[j].offset += shift; 963 964 EBUG_ON(l < t->size && 965 rw_aux_tree(b, t)[l].offset == 966 rw_aux_tree(b, t)[l - 1].offset); 967 968 if (t->size < bset_rw_tree_capacity(b, t) && 969 (l < t->size 970 ? rw_aux_tree(b, t)[l].offset 971 : t->end_offset) - 972 rw_aux_tree(b, t)[l - 1].offset > 973 L1_CACHE_BYTES / sizeof(u64)) { 974 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1); 975 struct bkey_packed *end = l < t->size 976 ? rw_aux_to_bkey(b, t, l) 977 : btree_bkey_last(b, t); 978 struct bkey_packed *k = start; 979 980 while (1) { 981 k = bkey_p_next(k); 982 if (k == end) 983 break; 984 985 if ((void *) k - (void *) start >= L1_CACHE_BYTES) { 986 memmove(&rw_aux_tree(b, t)[l + 1], 987 &rw_aux_tree(b, t)[l], 988 (void *) &rw_aux_tree(b, t)[t->size] - 989 (void *) &rw_aux_tree(b, t)[l]); 990 t->size++; 991 rw_aux_tree_set(b, t, l, k); 992 break; 993 } 994 } 995 } 996 997 bch2_bset_verify_rw_aux_tree(b, t); 998 bset_aux_tree_verify(b); 999 } 1000 1001 void bch2_bset_insert(struct btree *b, 1002 struct btree_node_iter *iter, 1003 struct bkey_packed *where, 1004 struct bkey_i *insert, 1005 unsigned clobber_u64s) 1006 { 1007 struct bkey_format *f = &b->format; 1008 struct bset_tree *t = bset_tree_last(b); 1009 struct bkey_packed packed, *src = bkey_to_packed(insert); 1010 1011 bch2_bset_verify_rw_aux_tree(b, t); 1012 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s); 1013 1014 if (bch2_bkey_pack_key(&packed, &insert->k, f)) 1015 src = &packed; 1016 1017 if (!bkey_deleted(&insert->k)) 1018 btree_keys_account_key_add(&b->nr, t - b->set, src); 1019 1020 if (src->u64s != clobber_u64s) { 1021 u64 *src_p = (u64 *) where->_data + clobber_u64s; 1022 u64 *dst_p = (u64 *) where->_data + src->u64s; 1023 1024 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) < 1025 (int) clobber_u64s - src->u64s); 1026 1027 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p); 1028 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s); 1029 set_btree_bset_end(b, t); 1030 } 1031 1032 memcpy_u64s_small(where, src, 1033 bkeyp_key_u64s(f, src)); 1034 memcpy_u64s(bkeyp_val(f, where), &insert->v, 1035 bkeyp_val_u64s(f, src)); 1036 1037 if (src->u64s != clobber_u64s) 1038 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s); 1039 1040 bch2_verify_btree_nr_keys(b); 1041 } 1042 1043 void bch2_bset_delete(struct btree *b, 1044 struct bkey_packed *where, 1045 unsigned clobber_u64s) 1046 { 1047 struct bset_tree *t = bset_tree_last(b); 1048 u64 *src_p = (u64 *) where->_data + clobber_u64s; 1049 u64 *dst_p = where->_data; 1050 1051 bch2_bset_verify_rw_aux_tree(b, t); 1052 1053 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s); 1054 1055 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p); 1056 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s); 1057 set_btree_bset_end(b, t); 1058 1059 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0); 1060 } 1061 1062 /* Lookup */ 1063 1064 __flatten 1065 static struct bkey_packed *bset_search_write_set(const struct btree *b, 1066 struct bset_tree *t, 1067 struct bpos *search) 1068 { 1069 unsigned l = 0, r = t->size; 1070 1071 while (l + 1 != r) { 1072 unsigned m = (l + r) >> 1; 1073 1074 if (bpos_lt(rw_aux_tree(b, t)[m].k, *search)) 1075 l = m; 1076 else 1077 r = m; 1078 } 1079 1080 return rw_aux_to_bkey(b, t, l); 1081 } 1082 1083 static inline void prefetch_four_cachelines(void *p) 1084 { 1085 #ifdef CONFIG_X86_64 1086 asm("prefetcht0 (-127 + 64 * 0)(%0);" 1087 "prefetcht0 (-127 + 64 * 1)(%0);" 1088 "prefetcht0 (-127 + 64 * 2)(%0);" 1089 "prefetcht0 (-127 + 64 * 3)(%0);" 1090 : 1091 : "r" (p + 127)); 1092 #else 1093 prefetch(p + L1_CACHE_BYTES * 0); 1094 prefetch(p + L1_CACHE_BYTES * 1); 1095 prefetch(p + L1_CACHE_BYTES * 2); 1096 prefetch(p + L1_CACHE_BYTES * 3); 1097 #endif 1098 } 1099 1100 static inline bool bkey_mantissa_bits_dropped(const struct btree *b, 1101 const struct bkey_float *f, 1102 unsigned idx) 1103 { 1104 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 1105 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits; 1106 1107 return f->exponent > key_bits_start; 1108 #else 1109 unsigned key_bits_end = high_bit_offset + b->nr_key_bits; 1110 1111 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end; 1112 #endif 1113 } 1114 1115 __flatten 1116 static struct bkey_packed *bset_search_tree(const struct btree *b, 1117 const struct bset_tree *t, 1118 const struct bpos *search, 1119 const struct bkey_packed *packed_search) 1120 { 1121 struct ro_aux_tree *base = ro_aux_tree_base(b, t); 1122 struct bkey_float *f; 1123 struct bkey_packed *k; 1124 unsigned inorder, n = 1, l, r; 1125 int cmp; 1126 1127 do { 1128 if (likely(n << 4 < t->size)) 1129 prefetch(&base->f[n << 4]); 1130 1131 f = &base->f[n]; 1132 if (unlikely(f->exponent >= BFLOAT_FAILED)) 1133 goto slowpath; 1134 1135 l = f->mantissa; 1136 r = bkey_mantissa(packed_search, f, n); 1137 1138 if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n)) 1139 goto slowpath; 1140 1141 n = n * 2 + (l < r); 1142 continue; 1143 slowpath: 1144 k = tree_to_bkey(b, t, n); 1145 cmp = bkey_cmp_p_or_unp(b, k, packed_search, search); 1146 if (!cmp) 1147 return k; 1148 1149 n = n * 2 + (cmp < 0); 1150 } while (n < t->size); 1151 1152 inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra); 1153 1154 /* 1155 * n would have been the node we recursed to - the low bit tells us if 1156 * we recursed left or recursed right. 1157 */ 1158 if (likely(!(n & 1))) { 1159 --inorder; 1160 if (unlikely(!inorder)) 1161 return btree_bkey_first(b, t); 1162 1163 f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)]; 1164 } 1165 1166 return cacheline_to_bkey(b, t, inorder, f->key_offset); 1167 } 1168 1169 static __always_inline __flatten 1170 struct bkey_packed *__bch2_bset_search(struct btree *b, 1171 struct bset_tree *t, 1172 struct bpos *search, 1173 const struct bkey_packed *lossy_packed_search) 1174 { 1175 1176 /* 1177 * First, we search for a cacheline, then lastly we do a linear search 1178 * within that cacheline. 1179 * 1180 * To search for the cacheline, there's three different possibilities: 1181 * * The set is too small to have a search tree, so we just do a linear 1182 * search over the whole set. 1183 * * The set is the one we're currently inserting into; keeping a full 1184 * auxiliary search tree up to date would be too expensive, so we 1185 * use a much simpler lookup table to do a binary search - 1186 * bset_search_write_set(). 1187 * * Or we use the auxiliary search tree we constructed earlier - 1188 * bset_search_tree() 1189 */ 1190 1191 switch (bset_aux_tree_type(t)) { 1192 case BSET_NO_AUX_TREE: 1193 return btree_bkey_first(b, t); 1194 case BSET_RW_AUX_TREE: 1195 return bset_search_write_set(b, t, search); 1196 case BSET_RO_AUX_TREE: 1197 return bset_search_tree(b, t, search, lossy_packed_search); 1198 default: 1199 BUG(); 1200 } 1201 } 1202 1203 static __always_inline __flatten 1204 struct bkey_packed *bch2_bset_search_linear(struct btree *b, 1205 struct bset_tree *t, 1206 struct bpos *search, 1207 struct bkey_packed *packed_search, 1208 const struct bkey_packed *lossy_packed_search, 1209 struct bkey_packed *m) 1210 { 1211 if (lossy_packed_search) 1212 while (m != btree_bkey_last(b, t) && 1213 bkey_iter_cmp_p_or_unp(b, m, 1214 lossy_packed_search, search) < 0) 1215 m = bkey_p_next(m); 1216 1217 if (!packed_search) 1218 while (m != btree_bkey_last(b, t) && 1219 bkey_iter_pos_cmp(b, m, search) < 0) 1220 m = bkey_p_next(m); 1221 1222 if (bch2_expensive_debug_checks) { 1223 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); 1224 1225 BUG_ON(prev && 1226 bkey_iter_cmp_p_or_unp(b, prev, 1227 packed_search, search) >= 0); 1228 } 1229 1230 return m; 1231 } 1232 1233 /* Btree node iterator */ 1234 1235 static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter, 1236 struct btree *b, 1237 const struct bkey_packed *k, 1238 const struct bkey_packed *end) 1239 { 1240 if (k != end) { 1241 struct btree_node_iter_set *pos; 1242 1243 btree_node_iter_for_each(iter, pos) 1244 ; 1245 1246 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data)); 1247 *pos = (struct btree_node_iter_set) { 1248 __btree_node_key_to_offset(b, k), 1249 __btree_node_key_to_offset(b, end) 1250 }; 1251 } 1252 } 1253 1254 void bch2_btree_node_iter_push(struct btree_node_iter *iter, 1255 struct btree *b, 1256 const struct bkey_packed *k, 1257 const struct bkey_packed *end) 1258 { 1259 __bch2_btree_node_iter_push(iter, b, k, end); 1260 bch2_btree_node_iter_sort(iter, b); 1261 } 1262 1263 noinline __flatten __cold 1264 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter, 1265 struct btree *b, struct bpos *search) 1266 { 1267 struct bkey_packed *k; 1268 1269 trace_bkey_pack_pos_fail(search); 1270 1271 bch2_btree_node_iter_init_from_start(iter, b); 1272 1273 while ((k = bch2_btree_node_iter_peek(iter, b)) && 1274 bkey_iter_pos_cmp(b, k, search) < 0) 1275 bch2_btree_node_iter_advance(iter, b); 1276 } 1277 1278 /** 1279 * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a 1280 * given position 1281 * 1282 * @iter: iterator to initialize 1283 * @b: btree node to search 1284 * @search: search key 1285 * 1286 * Main entry point to the lookup code for individual btree nodes: 1287 * 1288 * NOTE: 1289 * 1290 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate 1291 * keys. This doesn't matter for most code, but it does matter for lookups. 1292 * 1293 * Some adjacent keys with a string of equal keys: 1294 * i j k k k k l m 1295 * 1296 * If you search for k, the lookup code isn't guaranteed to return you any 1297 * specific k. The lookup code is conceptually doing a binary search and 1298 * iterating backwards is very expensive so if the pivot happens to land at the 1299 * last k that's what you'll get. 1300 * 1301 * This works out ok, but it's something to be aware of: 1302 * 1303 * - For non extents, we guarantee that the live key comes last - see 1304 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't 1305 * see will only be deleted keys you don't care about. 1306 * 1307 * - For extents, deleted keys sort last (see the comment at the top of this 1308 * file). But when you're searching for extents, you actually want the first 1309 * key strictly greater than your search key - an extent that compares equal 1310 * to the search key is going to have 0 sectors after the search key. 1311 * 1312 * But this does mean that we can't just search for 1313 * bpos_successor(start_of_range) to get the first extent that overlaps with 1314 * the range we want - if we're unlucky and there's an extent that ends 1315 * exactly where we searched, then there could be a deleted key at the same 1316 * position and we'd get that when we search instead of the preceding extent 1317 * we needed. 1318 * 1319 * So we've got to search for start_of_range, then after the lookup iterate 1320 * past any extents that compare equal to the position we searched for. 1321 */ 1322 __flatten 1323 void bch2_btree_node_iter_init(struct btree_node_iter *iter, 1324 struct btree *b, struct bpos *search) 1325 { 1326 struct bkey_packed p, *packed_search = NULL; 1327 struct btree_node_iter_set *pos = iter->data; 1328 struct bkey_packed *k[MAX_BSETS]; 1329 unsigned i; 1330 1331 EBUG_ON(bpos_lt(*search, b->data->min_key)); 1332 EBUG_ON(bpos_gt(*search, b->data->max_key)); 1333 bset_aux_tree_verify(b); 1334 1335 memset(iter, 0, sizeof(*iter)); 1336 1337 switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) { 1338 case BKEY_PACK_POS_EXACT: 1339 packed_search = &p; 1340 break; 1341 case BKEY_PACK_POS_SMALLER: 1342 packed_search = NULL; 1343 break; 1344 case BKEY_PACK_POS_FAIL: 1345 btree_node_iter_init_pack_failed(iter, b, search); 1346 return; 1347 } 1348 1349 for (i = 0; i < b->nsets; i++) { 1350 k[i] = __bch2_bset_search(b, b->set + i, search, &p); 1351 prefetch_four_cachelines(k[i]); 1352 } 1353 1354 for (i = 0; i < b->nsets; i++) { 1355 struct bset_tree *t = b->set + i; 1356 struct bkey_packed *end = btree_bkey_last(b, t); 1357 1358 k[i] = bch2_bset_search_linear(b, t, search, 1359 packed_search, &p, k[i]); 1360 if (k[i] != end) 1361 *pos++ = (struct btree_node_iter_set) { 1362 __btree_node_key_to_offset(b, k[i]), 1363 __btree_node_key_to_offset(b, end) 1364 }; 1365 } 1366 1367 bch2_btree_node_iter_sort(iter, b); 1368 } 1369 1370 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter, 1371 struct btree *b) 1372 { 1373 memset(iter, 0, sizeof(*iter)); 1374 1375 for_each_bset(b, t) 1376 __bch2_btree_node_iter_push(iter, b, 1377 btree_bkey_first(b, t), 1378 btree_bkey_last(b, t)); 1379 bch2_btree_node_iter_sort(iter, b); 1380 } 1381 1382 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter, 1383 struct btree *b, 1384 struct bset_tree *t) 1385 { 1386 struct btree_node_iter_set *set; 1387 1388 btree_node_iter_for_each(iter, set) 1389 if (set->end == t->end_offset) 1390 return __btree_node_offset_to_key(b, set->k); 1391 1392 return btree_bkey_last(b, t); 1393 } 1394 1395 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter, 1396 struct btree *b, 1397 unsigned first) 1398 { 1399 bool ret; 1400 1401 if ((ret = (btree_node_iter_cmp(b, 1402 iter->data[first], 1403 iter->data[first + 1]) > 0))) 1404 swap(iter->data[first], iter->data[first + 1]); 1405 return ret; 1406 } 1407 1408 void bch2_btree_node_iter_sort(struct btree_node_iter *iter, 1409 struct btree *b) 1410 { 1411 /* unrolled bubble sort: */ 1412 1413 if (!__btree_node_iter_set_end(iter, 2)) { 1414 btree_node_iter_sort_two(iter, b, 0); 1415 btree_node_iter_sort_two(iter, b, 1); 1416 } 1417 1418 if (!__btree_node_iter_set_end(iter, 1)) 1419 btree_node_iter_sort_two(iter, b, 0); 1420 } 1421 1422 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter, 1423 struct btree_node_iter_set *set) 1424 { 1425 struct btree_node_iter_set *last = 1426 iter->data + ARRAY_SIZE(iter->data) - 1; 1427 1428 memmove(&set[0], &set[1], (void *) last - (void *) set); 1429 *last = (struct btree_node_iter_set) { 0, 0 }; 1430 } 1431 1432 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter, 1433 struct btree *b) 1434 { 1435 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s; 1436 1437 EBUG_ON(iter->data->k > iter->data->end); 1438 1439 if (unlikely(__btree_node_iter_set_end(iter, 0))) { 1440 /* avoid an expensive memmove call: */ 1441 iter->data[0] = iter->data[1]; 1442 iter->data[1] = iter->data[2]; 1443 iter->data[2] = (struct btree_node_iter_set) { 0, 0 }; 1444 return; 1445 } 1446 1447 if (__btree_node_iter_set_end(iter, 1)) 1448 return; 1449 1450 if (!btree_node_iter_sort_two(iter, b, 0)) 1451 return; 1452 1453 if (__btree_node_iter_set_end(iter, 2)) 1454 return; 1455 1456 btree_node_iter_sort_two(iter, b, 1); 1457 } 1458 1459 void bch2_btree_node_iter_advance(struct btree_node_iter *iter, 1460 struct btree *b) 1461 { 1462 if (bch2_expensive_debug_checks) { 1463 bch2_btree_node_iter_verify(iter, b); 1464 bch2_btree_node_iter_next_check(iter, b); 1465 } 1466 1467 __bch2_btree_node_iter_advance(iter, b); 1468 } 1469 1470 /* 1471 * Expensive: 1472 */ 1473 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter, 1474 struct btree *b) 1475 { 1476 struct bkey_packed *k, *prev = NULL; 1477 struct btree_node_iter_set *set; 1478 unsigned end = 0; 1479 1480 if (bch2_expensive_debug_checks) 1481 bch2_btree_node_iter_verify(iter, b); 1482 1483 for_each_bset(b, t) { 1484 k = bch2_bkey_prev_all(b, t, 1485 bch2_btree_node_iter_bset_pos(iter, b, t)); 1486 if (k && 1487 (!prev || bkey_iter_cmp(b, k, prev) > 0)) { 1488 prev = k; 1489 end = t->end_offset; 1490 } 1491 } 1492 1493 if (!prev) 1494 return NULL; 1495 1496 /* 1497 * We're manually memmoving instead of just calling sort() to ensure the 1498 * prev we picked ends up in slot 0 - sort won't necessarily put it 1499 * there because of duplicate deleted keys: 1500 */ 1501 btree_node_iter_for_each(iter, set) 1502 if (set->end == end) 1503 goto found; 1504 1505 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]); 1506 found: 1507 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data)); 1508 1509 memmove(&iter->data[1], 1510 &iter->data[0], 1511 (void *) set - (void *) &iter->data[0]); 1512 1513 iter->data[0].k = __btree_node_key_to_offset(b, prev); 1514 iter->data[0].end = end; 1515 1516 if (bch2_expensive_debug_checks) 1517 bch2_btree_node_iter_verify(iter, b); 1518 return prev; 1519 } 1520 1521 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter, 1522 struct btree *b) 1523 { 1524 struct bkey_packed *prev; 1525 1526 do { 1527 prev = bch2_btree_node_iter_prev_all(iter, b); 1528 } while (prev && bkey_deleted(prev)); 1529 1530 return prev; 1531 } 1532 1533 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter, 1534 struct btree *b, 1535 struct bkey *u) 1536 { 1537 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b); 1538 1539 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null; 1540 } 1541 1542 /* Mergesort */ 1543 1544 void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats) 1545 { 1546 for_each_bset_c(b, t) { 1547 enum bset_aux_tree_type type = bset_aux_tree_type(t); 1548 size_t j; 1549 1550 stats->sets[type].nr++; 1551 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) * 1552 sizeof(u64); 1553 1554 if (bset_has_ro_aux_tree(t)) { 1555 stats->floats += t->size - 1; 1556 1557 for (j = 1; j < t->size; j++) 1558 stats->failed += 1559 bkey_float(b, t, j)->exponent == 1560 BFLOAT_FAILED; 1561 } 1562 } 1563 } 1564 1565 void bch2_bfloat_to_text(struct printbuf *out, struct btree *b, 1566 struct bkey_packed *k) 1567 { 1568 struct bset_tree *t = bch2_bkey_to_bset(b, k); 1569 struct bkey uk; 1570 unsigned j, inorder; 1571 1572 if (!bset_has_ro_aux_tree(t)) 1573 return; 1574 1575 inorder = bkey_to_cacheline(b, t, k); 1576 if (!inorder || inorder >= t->size) 1577 return; 1578 1579 j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra); 1580 if (k != tree_to_bkey(b, t, j)) 1581 return; 1582 1583 switch (bkey_float(b, t, j)->exponent) { 1584 case BFLOAT_FAILED: 1585 uk = bkey_unpack_key(b, k); 1586 prt_printf(out, 1587 " failed unpacked at depth %u\n" 1588 "\t", 1589 ilog2(j)); 1590 bch2_bpos_to_text(out, uk.p); 1591 prt_printf(out, "\n"); 1592 break; 1593 } 1594 } 1595
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.