1 // SPDX-License-Identifier: GPL-2.0-only 1 2 /* 3 * Resizable, Scalable, Concurrent Hash Table 4 * 5 * Copyright (c) 2015 Herbert Xu <herbert@gond 6 * Copyright (c) 2014-2015 Thomas Graf <tgraf@ 7 * Copyright (c) 2008-2014 Patrick McHardy <ka 8 * 9 * Code partially derived from nft_hash 10 * Rewritten with rehash code from br_multicas 11 * pointer as suggested by Josh Triplett 12 */ 13 14 #include <linux/atomic.h> 15 #include <linux/kernel.h> 16 #include <linux/init.h> 17 #include <linux/log2.h> 18 #include <linux/sched.h> 19 #include <linux/rculist.h> 20 #include <linux/slab.h> 21 #include <linux/vmalloc.h> 22 #include <linux/mm.h> 23 #include <linux/jhash.h> 24 #include <linux/random.h> 25 #include <linux/rhashtable.h> 26 #include <linux/err.h> 27 #include <linux/export.h> 28 29 #define HASH_DEFAULT_SIZE 64UL 30 #define HASH_MIN_SIZE 4U 31 32 union nested_table { 33 union nested_table __rcu *table; 34 struct rhash_lock_head __rcu *bucket; 35 }; 36 37 static u32 head_hashfn(struct rhashtable *ht, 38 const struct bucket_tab 39 const struct rhash_head 40 { 41 return rht_head_hashfn(ht, tbl, he, ht 42 } 43 44 #ifdef CONFIG_PROVE_LOCKING 45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_r 46 47 int lockdep_rht_mutex_is_held(struct rhashtabl 48 { 49 return (debug_locks) ? lockdep_is_held 50 } 51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); 52 53 int lockdep_rht_bucket_is_held(const struct bu 54 { 55 if (!debug_locks) 56 return 1; 57 if (unlikely(tbl->nest)) 58 return 1; 59 return bit_spin_is_locked(0, (unsigned 60 } 61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 62 #else 63 #define ASSERT_RHT_MUTEX(HT) 64 #endif 65 66 static inline union nested_table *nested_table 67 const struct bucket_table *tbl) 68 { 69 /* The top-level bucket entry does not 70 * because it's set at the same time a 71 */ 72 return (void *)rcu_dereference_protect 73 } 74 75 static void nested_table_free(union nested_tab 76 { 77 const unsigned int shift = PAGE_SHIFT 78 const unsigned int len = 1 << shift; 79 unsigned int i; 80 81 ntbl = rcu_dereference_protected(ntbl- 82 if (!ntbl) 83 return; 84 85 if (size > len) { 86 size >>= shift; 87 for (i = 0; i < len; i++) 88 nested_table_free(ntbl 89 } 90 91 kfree(ntbl); 92 } 93 94 static void nested_bucket_table_free(const str 95 { 96 unsigned int size = tbl->size >> tbl-> 97 unsigned int len = 1 << tbl->nest; 98 union nested_table *ntbl; 99 unsigned int i; 100 101 ntbl = nested_table_top(tbl); 102 103 for (i = 0; i < len; i++) 104 nested_table_free(ntbl + i, si 105 106 kfree(ntbl); 107 } 108 109 static void bucket_table_free(const struct buc 110 { 111 if (tbl->nest) 112 nested_bucket_table_free(tbl); 113 114 kvfree(tbl); 115 } 116 117 static void bucket_table_free_rcu(struct rcu_h 118 { 119 bucket_table_free(container_of(head, s 120 } 121 122 static union nested_table *nested_table_alloc( 123 124 125 { 126 union nested_table *ntbl; 127 int i; 128 129 ntbl = rcu_dereference(*prev); 130 if (ntbl) 131 return ntbl; 132 133 ntbl = alloc_hooks_tag(ht->alloc_tag, 134 kmalloc_noprof(PAGE_SI 135 136 if (ntbl && leaf) { 137 for (i = 0; i < PAGE_SIZE / si 138 INIT_RHT_NULLS_HEAD(nt 139 } 140 141 if (cmpxchg((union nested_table **)pre 142 return ntbl; 143 /* Raced with another thread. */ 144 kfree(ntbl); 145 return rcu_dereference(*prev); 146 } 147 148 static struct bucket_table *nested_bucket_tabl 149 150 151 { 152 const unsigned int shift = PAGE_SHIFT 153 struct bucket_table *tbl; 154 size_t size; 155 156 if (nbuckets < (1 << (shift + 1))) 157 return NULL; 158 159 size = sizeof(*tbl) + sizeof(tbl->buck 160 161 tbl = alloc_hooks_tag(ht->alloc_tag, 162 kmalloc_noprof(size, g 163 if (!tbl) 164 return NULL; 165 166 if (!nested_table_alloc(ht, (union nes 167 false)) { 168 kfree(tbl); 169 return NULL; 170 } 171 172 tbl->nest = (ilog2(nbuckets) - 1) % sh 173 174 return tbl; 175 } 176 177 static struct bucket_table *bucket_table_alloc 178 179 180 { 181 struct bucket_table *tbl = NULL; 182 size_t size; 183 int i; 184 static struct lock_class_key __key; 185 186 tbl = alloc_hooks_tag(ht->alloc_tag, 187 kvmalloc_node_noprof(s 188 g 189 190 size = nbuckets; 191 192 if (tbl == NULL && (gfp & ~__GFP_NOFAI 193 tbl = nested_bucket_table_allo 194 nbuckets = 0; 195 } 196 197 if (tbl == NULL) 198 return NULL; 199 200 lockdep_init_map(&tbl->dep_map, "rhash 201 202 tbl->size = size; 203 204 rcu_head_init(&tbl->rcu); 205 INIT_LIST_HEAD(&tbl->walkers); 206 207 tbl->hash_rnd = get_random_u32(); 208 209 for (i = 0; i < nbuckets; i++) 210 INIT_RHT_NULLS_HEAD(tbl->bucke 211 212 return tbl; 213 } 214 215 static struct bucket_table *rhashtable_last_ta 216 217 { 218 struct bucket_table *new_tbl; 219 220 do { 221 new_tbl = tbl; 222 tbl = rht_dereference_rcu(tbl- 223 } while (tbl); 224 225 return new_tbl; 226 } 227 228 static int rhashtable_rehash_one(struct rhasht 229 struct rhash_ 230 unsigned int 231 { 232 struct bucket_table *old_tbl = rht_der 233 struct bucket_table *new_tbl = rhashta 234 int err = -EAGAIN; 235 struct rhash_head *head, *next, *entry 236 struct rhash_head __rcu **pprev = NULL 237 unsigned int new_hash; 238 unsigned long flags; 239 240 if (new_tbl->nest) 241 goto out; 242 243 err = -ENOENT; 244 245 rht_for_each_from(entry, rht_ptr(bkt, 246 old_tbl, old_hash) { 247 err = 0; 248 next = rht_dereference_bucket( 249 250 if (rht_is_a_nulls(next)) 251 break; 252 253 pprev = &entry->next; 254 } 255 256 if (err) 257 goto out; 258 259 new_hash = head_hashfn(ht, new_tbl, en 260 261 flags = rht_lock_nested(new_tbl, &new_ 262 SINGLE_DEPTH_N 263 264 head = rht_ptr(new_tbl->buckets + new_ 265 266 RCU_INIT_POINTER(entry->next, head); 267 268 rht_assign_unlock(new_tbl, &new_tbl->b 269 270 if (pprev) 271 rcu_assign_pointer(*pprev, nex 272 else 273 /* Need to preserved the bit l 274 rht_assign_locked(bkt, next); 275 276 out: 277 return err; 278 } 279 280 static int rhashtable_rehash_chain(struct rhas 281 unsigned i 282 { 283 struct bucket_table *old_tbl = rht_der 284 struct rhash_lock_head __rcu **bkt = r 285 unsigned long flags; 286 int err; 287 288 if (!bkt) 289 return 0; 290 flags = rht_lock(old_tbl, bkt); 291 292 while (!(err = rhashtable_rehash_one(h 293 ; 294 295 if (err == -ENOENT) 296 err = 0; 297 rht_unlock(old_tbl, bkt, flags); 298 299 return err; 300 } 301 302 static int rhashtable_rehash_attach(struct rha 303 struct buc 304 struct buc 305 { 306 /* Make insertions go into the new, em 307 * and lookups will be attempted in bo 308 * As cmpxchg() provides strong barrie 309 * rcu_assign_pointer(). 310 */ 311 312 if (cmpxchg((struct bucket_table **)&o 313 new_tbl) != NULL) 314 return -EEXIST; 315 316 return 0; 317 } 318 319 static int rhashtable_rehash_table(struct rhas 320 { 321 struct bucket_table *old_tbl = rht_der 322 struct bucket_table *new_tbl; 323 struct rhashtable_walker *walker; 324 unsigned int old_hash; 325 int err; 326 327 new_tbl = rht_dereference(old_tbl->fut 328 if (!new_tbl) 329 return 0; 330 331 for (old_hash = 0; old_hash < old_tbl- 332 err = rhashtable_rehash_chain( 333 if (err) 334 return err; 335 cond_resched(); 336 } 337 338 /* Publish the new table pointer. */ 339 rcu_assign_pointer(ht->tbl, new_tbl); 340 341 spin_lock(&ht->lock); 342 list_for_each_entry(walker, &old_tbl-> 343 walker->tbl = NULL; 344 345 /* Wait for readers. All new readers w 346 * table, and thus no references to th 347 * remain. 348 * We do this inside the locked region 349 * rhashtable_walk_stop() can use rcu_ 350 * to check if it should not re-link t 351 */ 352 call_rcu(&old_tbl->rcu, bucket_table_f 353 spin_unlock(&ht->lock); 354 355 return rht_dereference(new_tbl->future 356 } 357 358 static int rhashtable_rehash_alloc(struct rhas 359 struct buck 360 unsigned in 361 { 362 struct bucket_table *new_tbl; 363 int err; 364 365 ASSERT_RHT_MUTEX(ht); 366 367 new_tbl = bucket_table_alloc(ht, size, 368 if (new_tbl == NULL) 369 return -ENOMEM; 370 371 err = rhashtable_rehash_attach(ht, old 372 if (err) 373 bucket_table_free(new_tbl); 374 375 return err; 376 } 377 378 /** 379 * rhashtable_shrink - Shrink hash table while 380 * @ht: the hash table to shrink 381 * 382 * This function shrinks the hash table to fit 383 * size would not cause it to expand right awa 384 * 385 * The caller must ensure that no concurrent r 386 * ht->mutex. 387 * 388 * The caller must ensure that no concurrent t 389 * It is however valid to have concurrent look 390 * 391 * It is valid to have concurrent insertions a 392 * bucket locks or concurrent RCU protected lo 393 */ 394 static int rhashtable_shrink(struct rhashtable 395 { 396 struct bucket_table *old_tbl = rht_der 397 unsigned int nelems = atomic_read(&ht- 398 unsigned int size = 0; 399 400 if (nelems) 401 size = roundup_pow_of_two(nele 402 if (size < ht->p.min_size) 403 size = ht->p.min_size; 404 405 if (old_tbl->size <= size) 406 return 0; 407 408 if (rht_dereference(old_tbl->future_tb 409 return -EEXIST; 410 411 return rhashtable_rehash_alloc(ht, old 412 } 413 414 static void rht_deferred_worker(struct work_st 415 { 416 struct rhashtable *ht; 417 struct bucket_table *tbl; 418 int err = 0; 419 420 ht = container_of(work, struct rhashta 421 mutex_lock(&ht->mutex); 422 423 tbl = rht_dereference(ht->tbl, ht); 424 tbl = rhashtable_last_table(ht, tbl); 425 426 if (rht_grow_above_75(ht, tbl)) 427 err = rhashtable_rehash_alloc( 428 else if (ht->p.automatic_shrinking && 429 err = rhashtable_shrink(ht); 430 else if (tbl->nest) 431 err = rhashtable_rehash_alloc( 432 433 if (!err || err == -EEXIST) { 434 int nerr; 435 436 nerr = rhashtable_rehash_table 437 err = err ?: nerr; 438 } 439 440 mutex_unlock(&ht->mutex); 441 442 if (err) 443 schedule_work(&ht->run_work); 444 } 445 446 static int rhashtable_insert_rehash(struct rha 447 struct buc 448 { 449 struct bucket_table *old_tbl; 450 struct bucket_table *new_tbl; 451 unsigned int size; 452 int err; 453 454 old_tbl = rht_dereference_rcu(ht->tbl, 455 456 size = tbl->size; 457 458 err = -EBUSY; 459 460 if (rht_grow_above_75(ht, tbl)) 461 size *= 2; 462 /* Do not schedule more than one rehas 463 else if (old_tbl != tbl) 464 goto fail; 465 466 err = -ENOMEM; 467 468 new_tbl = bucket_table_alloc(ht, size, 469 if (new_tbl == NULL) 470 goto fail; 471 472 err = rhashtable_rehash_attach(ht, tbl 473 if (err) { 474 bucket_table_free(new_tbl); 475 if (err == -EEXIST) 476 err = 0; 477 } else 478 schedule_work(&ht->run_work); 479 480 return err; 481 482 fail: 483 /* Do not fail the insert if someone e 484 if (likely(rcu_access_pointer(tbl->fut 485 return 0; 486 487 /* Schedule async rehash to retry allo 488 if (err == -ENOMEM) 489 schedule_work(&ht->run_work); 490 491 return err; 492 } 493 494 static void *rhashtable_lookup_one(struct rhas 495 struct rhas 496 struct buck 497 const void 498 { 499 struct rhashtable_compare_arg arg = { 500 .ht = ht, 501 .key = key, 502 }; 503 struct rhash_head __rcu **pprev = NULL 504 struct rhash_head *head; 505 int elasticity; 506 507 elasticity = RHT_ELASTICITY; 508 rht_for_each_from(head, rht_ptr(bkt, t 509 struct rhlist_head *list; 510 struct rhlist_head *plist; 511 512 elasticity--; 513 if (!key || 514 (ht->p.obj_cmpfn ? 515 ht->p.obj_cmpfn(&arg, rht 516 rhashtable_compare(&arg, 517 pprev = &head->next; 518 continue; 519 } 520 521 if (!ht->rhlist) 522 return rht_obj(ht, hea 523 524 list = container_of(obj, struc 525 plist = container_of(head, str 526 527 RCU_INIT_POINTER(list->next, p 528 head = rht_dereference_bucket( 529 RCU_INIT_POINTER(list->rhead.n 530 if (pprev) 531 rcu_assign_pointer(*pp 532 else 533 /* Need to preserve th 534 rht_assign_locked(bkt, 535 536 return NULL; 537 } 538 539 if (elasticity <= 0) 540 return ERR_PTR(-EAGAIN); 541 542 return ERR_PTR(-ENOENT); 543 } 544 545 static struct bucket_table *rhashtable_insert_ 546 struct rhashtable *ht, struct rhash_lo 547 struct bucket_table *tbl, unsigned int 548 void *data) 549 { 550 struct bucket_table *new_tbl; 551 struct rhash_head *head; 552 553 if (!IS_ERR_OR_NULL(data)) 554 return ERR_PTR(-EEXIST); 555 556 if (PTR_ERR(data) != -EAGAIN && PTR_ER 557 return ERR_CAST(data); 558 559 new_tbl = rht_dereference_rcu(tbl->fut 560 if (new_tbl) 561 return new_tbl; 562 563 if (PTR_ERR(data) != -ENOENT) 564 return ERR_CAST(data); 565 566 if (unlikely(rht_grow_above_max(ht, tb 567 return ERR_PTR(-E2BIG); 568 569 if (unlikely(rht_grow_above_100(ht, tb 570 return ERR_PTR(-EAGAIN); 571 572 head = rht_ptr(bkt, tbl, hash); 573 574 RCU_INIT_POINTER(obj->next, head); 575 if (ht->rhlist) { 576 struct rhlist_head *list; 577 578 list = container_of(obj, struc 579 RCU_INIT_POINTER(list->next, N 580 } 581 582 /* bkt is always the head of the list, 583 * the lock, which we need to preserve 584 */ 585 rht_assign_locked(bkt, obj); 586 587 atomic_inc(&ht->nelems); 588 if (rht_grow_above_75(ht, tbl)) 589 schedule_work(&ht->run_work); 590 591 return NULL; 592 } 593 594 static void *rhashtable_try_insert(struct rhas 595 struct rhas 596 { 597 struct bucket_table *new_tbl; 598 struct bucket_table *tbl; 599 struct rhash_lock_head __rcu **bkt; 600 unsigned long flags; 601 unsigned int hash; 602 void *data; 603 604 new_tbl = rcu_dereference(ht->tbl); 605 606 do { 607 tbl = new_tbl; 608 hash = rht_head_hashfn(ht, tbl 609 if (rcu_access_pointer(tbl->fu 610 /* Failure is OK */ 611 bkt = rht_bucket_var(t 612 else 613 bkt = rht_bucket_inser 614 if (bkt == NULL) { 615 new_tbl = rht_derefere 616 data = ERR_PTR(-EAGAIN 617 } else { 618 flags = rht_lock(tbl, 619 data = rhashtable_look 620 621 new_tbl = rhashtable_i 622 623 if (PTR_ERR(new_tbl) ! 624 data = ERR_CAS 625 626 rht_unlock(tbl, bkt, f 627 } 628 } while (!IS_ERR_OR_NULL(new_tbl)); 629 630 if (PTR_ERR(data) == -EAGAIN) 631 data = ERR_PTR(rhashtable_inse 632 -EAGAIN); 633 634 return data; 635 } 636 637 void *rhashtable_insert_slow(struct rhashtable 638 struct rhash_head 639 { 640 void *data; 641 642 do { 643 rcu_read_lock(); 644 data = rhashtable_try_insert(h 645 rcu_read_unlock(); 646 } while (PTR_ERR(data) == -EAGAIN); 647 648 return data; 649 } 650 EXPORT_SYMBOL_GPL(rhashtable_insert_slow); 651 652 /** 653 * rhashtable_walk_enter - Initialise an itera 654 * @ht: Table to walk over 655 * @iter: Hash table Iterator 656 * 657 * This function prepares a hash table walk. 658 * 659 * Note that if you restart a walk after rhash 660 * may see the same object twice. Also, you m 661 * there are removals in between rhashtable_wa 662 * call to rhashtable_walk_start. 663 * 664 * For a completely stable walk you should con 665 * structure outside the hash table. 666 * 667 * This function may be called from any proces 668 * non-preemptable context, but cannot be call 669 * hardirq context. 670 * 671 * You must call rhashtable_walk_exit after th 672 */ 673 void rhashtable_walk_enter(struct rhashtable * 674 { 675 iter->ht = ht; 676 iter->p = NULL; 677 iter->slot = 0; 678 iter->skip = 0; 679 iter->end_of_table = 0; 680 681 spin_lock(&ht->lock); 682 iter->walker.tbl = 683 rcu_dereference_protected(ht-> 684 list_add(&iter->walker.list, &iter->wa 685 spin_unlock(&ht->lock); 686 } 687 EXPORT_SYMBOL_GPL(rhashtable_walk_enter); 688 689 /** 690 * rhashtable_walk_exit - Free an iterator 691 * @iter: Hash table Iterator 692 * 693 * This function frees resources allocated by 694 */ 695 void rhashtable_walk_exit(struct rhashtable_it 696 { 697 spin_lock(&iter->ht->lock); 698 if (iter->walker.tbl) 699 list_del(&iter->walker.list); 700 spin_unlock(&iter->ht->lock); 701 } 702 EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 703 704 /** 705 * rhashtable_walk_start_check - Start a hash 706 * @iter: Hash table iterator 707 * 708 * Start a hash table walk at the current iter 709 * the RCU lock in all cases including when we 710 * always call rhashtable_walk_stop to clean u 711 * 712 * Returns zero if successful. 713 * 714 * Returns -EAGAIN if resize event occurred. 715 * will rewind back to the beginning and you m 716 * by calling rhashtable_walk_next. 717 * 718 * rhashtable_walk_start is defined as an inli 719 * void. This is preferred in cases where the 720 * resize events and always continue. 721 */ 722 int rhashtable_walk_start_check(struct rhashta 723 __acquires(RCU) 724 { 725 struct rhashtable *ht = iter->ht; 726 bool rhlist = ht->rhlist; 727 728 rcu_read_lock(); 729 730 spin_lock(&ht->lock); 731 if (iter->walker.tbl) 732 list_del(&iter->walker.list); 733 spin_unlock(&ht->lock); 734 735 if (iter->end_of_table) 736 return 0; 737 if (!iter->walker.tbl) { 738 iter->walker.tbl = rht_derefer 739 iter->slot = 0; 740 iter->skip = 0; 741 return -EAGAIN; 742 } 743 744 if (iter->p && !rhlist) { 745 /* 746 * We need to validate that 'p 747 * if so, update 'skip' 748 */ 749 struct rhash_head *p; 750 int skip = 0; 751 rht_for_each_rcu(p, iter->walk 752 skip++; 753 if (p == iter->p) { 754 iter->skip = s 755 goto found; 756 } 757 } 758 iter->p = NULL; 759 } else if (iter->p && rhlist) { 760 /* Need to validate that 'list 761 * if so, update 'skip' and 'p 762 */ 763 struct rhash_head *p; 764 struct rhlist_head *list; 765 int skip = 0; 766 rht_for_each_rcu(p, iter->walk 767 for (list = container_ 768 list; 769 list = rcu_derefe 770 skip++; 771 if (list == it 772 iter-> 773 iter-> 774 goto f 775 } 776 } 777 } 778 iter->p = NULL; 779 } 780 found: 781 return 0; 782 } 783 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check) 784 785 /** 786 * __rhashtable_walk_find_next - Find the next 787 * one in case of a new walk). 788 * 789 * @iter: Hash table iterator 790 * 791 * Returns the found object or NULL when the e 792 * 793 * Returns -EAGAIN if resize event occurred. 794 */ 795 static void *__rhashtable_walk_find_next(struc 796 { 797 struct bucket_table *tbl = iter->walke 798 struct rhlist_head *list = iter->list; 799 struct rhashtable *ht = iter->ht; 800 struct rhash_head *p = iter->p; 801 bool rhlist = ht->rhlist; 802 803 if (!tbl) 804 return NULL; 805 806 for (; iter->slot < tbl->size; iter->s 807 int skip = iter->skip; 808 809 rht_for_each_rcu(p, tbl, iter- 810 if (rhlist) { 811 list = contain 812 813 do { 814 if (!s 815 816 skip-- 817 list = 818 } while (list) 819 820 continue; 821 } 822 if (!skip) 823 break; 824 skip--; 825 } 826 827 next: 828 if (!rht_is_a_nulls(p)) { 829 iter->skip++; 830 iter->p = p; 831 iter->list = list; 832 return rht_obj(ht, rhl 833 } 834 835 iter->skip = 0; 836 } 837 838 iter->p = NULL; 839 840 /* Ensure we see any new tables. */ 841 smp_rmb(); 842 843 iter->walker.tbl = rht_dereference_rcu 844 if (iter->walker.tbl) { 845 iter->slot = 0; 846 iter->skip = 0; 847 return ERR_PTR(-EAGAIN); 848 } else { 849 iter->end_of_table = true; 850 } 851 852 return NULL; 853 } 854 855 /** 856 * rhashtable_walk_next - Return the next obje 857 * @iter: Hash table iterator 858 * 859 * Note that you must call rhashtable_walk_sto 860 * with the walk. 861 * 862 * Returns the next object or NULL when the en 863 * 864 * Returns -EAGAIN if resize event occurred. 865 * will rewind back to the beginning and you m 866 */ 867 void *rhashtable_walk_next(struct rhashtable_i 868 { 869 struct rhlist_head *list = iter->list; 870 struct rhashtable *ht = iter->ht; 871 struct rhash_head *p = iter->p; 872 bool rhlist = ht->rhlist; 873 874 if (p) { 875 if (!rhlist || !(list = rcu_de 876 p = rcu_dereference(p- 877 list = container_of(p, 878 } 879 if (!rht_is_a_nulls(p)) { 880 iter->skip++; 881 iter->p = p; 882 iter->list = list; 883 return rht_obj(ht, rhl 884 } 885 886 /* At the end of this slot, sw 887 * next entry from that point. 888 */ 889 iter->skip = 0; 890 iter->slot++; 891 } 892 893 return __rhashtable_walk_find_next(ite 894 } 895 EXPORT_SYMBOL_GPL(rhashtable_walk_next); 896 897 /** 898 * rhashtable_walk_peek - Return the next obje 899 * @iter: Hash table iterator 900 * 901 * Returns the next object or NULL when the en 902 * 903 * Returns -EAGAIN if resize event occurred. 904 * will rewind back to the beginning and you m 905 */ 906 void *rhashtable_walk_peek(struct rhashtable_i 907 { 908 struct rhlist_head *list = iter->list; 909 struct rhashtable *ht = iter->ht; 910 struct rhash_head *p = iter->p; 911 912 if (p) 913 return rht_obj(ht, ht->rhlist 914 915 /* No object found in current iter, fi 916 917 if (iter->skip) { 918 /* A nonzero skip value points 919 * beyond that last one that w 920 * we find the current value. 921 * will restore the original v 922 * the table hasn't changed. 923 */ 924 iter->skip--; 925 } 926 927 return __rhashtable_walk_find_next(ite 928 } 929 EXPORT_SYMBOL_GPL(rhashtable_walk_peek); 930 931 /** 932 * rhashtable_walk_stop - Finish a hash table 933 * @iter: Hash table iterator 934 * 935 * Finish a hash table walk. Does not reset t 936 * hash table. 937 */ 938 void rhashtable_walk_stop(struct rhashtable_it 939 __releases(RCU) 940 { 941 struct rhashtable *ht; 942 struct bucket_table *tbl = iter->walke 943 944 if (!tbl) 945 goto out; 946 947 ht = iter->ht; 948 949 spin_lock(&ht->lock); 950 if (rcu_head_after_call_rcu(&tbl->rcu, 951 /* This bucket table is being 952 iter->walker.tbl = NULL; 953 else 954 list_add(&iter->walker.list, & 955 spin_unlock(&ht->lock); 956 957 out: 958 rcu_read_unlock(); 959 } 960 EXPORT_SYMBOL_GPL(rhashtable_walk_stop); 961 962 static size_t rounded_hashtable_size(const str 963 { 964 size_t retsize; 965 966 if (params->nelem_hint) 967 retsize = max(roundup_pow_of_t 968 (unsigned long)p 969 else 970 retsize = max(HASH_DEFAULT_SIZ 971 (unsigned long)p 972 973 return retsize; 974 } 975 976 static u32 rhashtable_jhash2(const void *key, 977 { 978 return jhash2(key, length, seed); 979 } 980 981 /** 982 * rhashtable_init - initialize a new hash tab 983 * @ht: hash table to be initialized 984 * @params: configuration parameters 985 * 986 * Initializes a new hash table based on the p 987 * parameters. A table can be configured eithe 988 * fixed length key: 989 * 990 * Configuration Example 1: Fixed length keys 991 * struct test_obj { 992 * int key; 993 * void * my_member; 994 * struct rhash_head node; 995 * }; 996 * 997 * struct rhashtable_params params = { 998 * .head_offset = offsetof(struct test_ob 999 * .key_offset = offsetof(struct test_obj 1000 * .key_len = sizeof(int), 1001 * .hashfn = jhash, 1002 * }; 1003 * 1004 * Configuration Example 2: Variable length k 1005 * struct test_obj { 1006 * [...] 1007 * struct rhash_head node; 1008 * }; 1009 * 1010 * u32 my_hash_fn(const void *data, u32 len, 1011 * { 1012 * struct test_obj *obj = data; 1013 * 1014 * return [... hash ...]; 1015 * } 1016 * 1017 * struct rhashtable_params params = { 1018 * .head_offset = offsetof(struct test_o 1019 * .hashfn = jhash, 1020 * .obj_hashfn = my_hash_fn, 1021 * }; 1022 */ 1023 int rhashtable_init_noprof(struct rhashtable 1024 const struct rhashtable_p 1025 { 1026 struct bucket_table *tbl; 1027 size_t size; 1028 1029 if ((!params->key_len && !params->obj 1030 (params->obj_hashfn && !params->o 1031 return -EINVAL; 1032 1033 memset(ht, 0, sizeof(*ht)); 1034 mutex_init(&ht->mutex); 1035 spin_lock_init(&ht->lock); 1036 memcpy(&ht->p, params, sizeof(*params 1037 1038 alloc_tag_record(ht->alloc_tag); 1039 1040 if (params->min_size) 1041 ht->p.min_size = roundup_pow_ 1042 1043 /* Cap total entries at 2^31 to avoid 1044 ht->max_elems = 1u << 31; 1045 1046 if (params->max_size) { 1047 ht->p.max_size = rounddown_po 1048 if (ht->p.max_size < ht->max_ 1049 ht->max_elems = ht->p 1050 } 1051 1052 ht->p.min_size = max_t(u16, ht->p.min 1053 1054 size = rounded_hashtable_size(&ht->p) 1055 1056 ht->key_len = ht->p.key_len; 1057 if (!params->hashfn) { 1058 ht->p.hashfn = jhash; 1059 1060 if (!(ht->key_len & (sizeof(u 1061 ht->key_len /= sizeof 1062 ht->p.hashfn = rhasht 1063 } 1064 } 1065 1066 /* 1067 * This is api initialization and thu 1068 * initial rhashtable allocation. Upo 1069 * smallest possible size with __GFP_ 1070 */ 1071 tbl = bucket_table_alloc(ht, size, GF 1072 if (unlikely(tbl == NULL)) { 1073 size = max_t(u16, ht->p.min_s 1074 tbl = bucket_table_alloc(ht, 1075 } 1076 1077 atomic_set(&ht->nelems, 0); 1078 1079 RCU_INIT_POINTER(ht->tbl, tbl); 1080 1081 INIT_WORK(&ht->run_work, rht_deferred 1082 1083 return 0; 1084 } 1085 EXPORT_SYMBOL_GPL(rhashtable_init_noprof); 1086 1087 /** 1088 * rhltable_init - initialize a new hash list 1089 * @hlt: hash list table to be initial 1090 * @params: configuration parameters 1091 * 1092 * Initializes a new hash list table. 1093 * 1094 * See documentation for rhashtable_init. 1095 */ 1096 int rhltable_init_noprof(struct rhltable *hlt 1097 { 1098 int err; 1099 1100 err = rhashtable_init_noprof(&hlt->ht 1101 hlt->ht.rhlist = true; 1102 return err; 1103 } 1104 EXPORT_SYMBOL_GPL(rhltable_init_noprof); 1105 1106 static void rhashtable_free_one(struct rhasht 1107 void (*free_f 1108 void *arg) 1109 { 1110 struct rhlist_head *list; 1111 1112 if (!ht->rhlist) { 1113 free_fn(rht_obj(ht, obj), arg 1114 return; 1115 } 1116 1117 list = container_of(obj, struct rhlis 1118 do { 1119 obj = &list->rhead; 1120 list = rht_dereference(list-> 1121 free_fn(rht_obj(ht, obj), arg 1122 } while (list); 1123 } 1124 1125 /** 1126 * rhashtable_free_and_destroy - free element 1127 * @ht: the hash table to destroy 1128 * @free_fn: callback to release resources 1129 * @arg: pointer passed to free_fn 1130 * 1131 * Stops an eventual async resize. If defined 1132 * element to releasal resources. Please note 1133 * readers may still be accessing the element 1134 * must occur in a compatible manner. Then fr 1135 * 1136 * This function will eventually sleep to wai 1137 * to complete. The caller is responsible tha 1138 * occurs in parallel. 1139 */ 1140 void rhashtable_free_and_destroy(struct rhash 1141 void (*free_ 1142 void *arg) 1143 { 1144 struct bucket_table *tbl, *next_tbl; 1145 unsigned int i; 1146 1147 cancel_work_sync(&ht->run_work); 1148 1149 mutex_lock(&ht->mutex); 1150 tbl = rht_dereference(ht->tbl, ht); 1151 restart: 1152 if (free_fn) { 1153 for (i = 0; i < tbl->size; i+ 1154 struct rhash_head *po 1155 1156 cond_resched(); 1157 for (pos = rht_ptr_ex 1158 next = !rht_is_a 1159 rht_d 1160 !rht_is_a_nulls( 1161 pos = next, 1162 next = !rht_is_a 1163 rht_d 1164 rhashtable_fr 1165 } 1166 } 1167 1168 next_tbl = rht_dereference(tbl->futur 1169 bucket_table_free(tbl); 1170 if (next_tbl) { 1171 tbl = next_tbl; 1172 goto restart; 1173 } 1174 mutex_unlock(&ht->mutex); 1175 } 1176 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy 1177 1178 void rhashtable_destroy(struct rhashtable *ht 1179 { 1180 return rhashtable_free_and_destroy(ht 1181 } 1182 EXPORT_SYMBOL_GPL(rhashtable_destroy); 1183 1184 struct rhash_lock_head __rcu **__rht_bucket_n 1185 const struct bucket_table *tbl, unsig 1186 { 1187 const unsigned int shift = PAGE_SHIFT 1188 unsigned int index = hash & ((1 << tb 1189 unsigned int size = tbl->size >> tbl- 1190 unsigned int subhash = hash; 1191 union nested_table *ntbl; 1192 1193 ntbl = nested_table_top(tbl); 1194 ntbl = rht_dereference_bucket_rcu(ntb 1195 subhash >>= tbl->nest; 1196 1197 while (ntbl && size > (1 << shift)) { 1198 index = subhash & ((1 << shif 1199 ntbl = rht_dereference_bucket 1200 1201 size >>= shift; 1202 subhash >>= shift; 1203 } 1204 1205 if (!ntbl) 1206 return NULL; 1207 1208 return &ntbl[subhash].bucket; 1209 1210 } 1211 EXPORT_SYMBOL_GPL(__rht_bucket_nested); 1212 1213 struct rhash_lock_head __rcu **rht_bucket_nes 1214 const struct bucket_table *tbl, unsig 1215 { 1216 static struct rhash_lock_head __rcu * 1217 1218 if (!rhnull) 1219 INIT_RHT_NULLS_HEAD(rhnull); 1220 return __rht_bucket_nested(tbl, hash) 1221 } 1222 EXPORT_SYMBOL_GPL(rht_bucket_nested); 1223 1224 struct rhash_lock_head __rcu **rht_bucket_nes 1225 struct rhashtable *ht, struct bucket_ 1226 { 1227 const unsigned int shift = PAGE_SHIFT 1228 unsigned int index = hash & ((1 << tb 1229 unsigned int size = tbl->size >> tbl- 1230 union nested_table *ntbl; 1231 1232 ntbl = nested_table_top(tbl); 1233 hash >>= tbl->nest; 1234 ntbl = nested_table_alloc(ht, &ntbl[i 1235 size <= (1 1236 1237 while (ntbl && size > (1 << shift)) { 1238 index = hash & ((1 << shift) 1239 size >>= shift; 1240 hash >>= shift; 1241 ntbl = nested_table_alloc(ht, 1242 siz 1243 } 1244 1245 if (!ntbl) 1246 return NULL; 1247 1248 return &ntbl[hash].bucket; 1249 1250 } 1251 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert); 1252
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.