~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/rhashtable.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Resizable, Scalable, Concurrent Hash Table
  4  *
  5  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  6  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  7  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  8  *
  9  * Code partially derived from nft_hash
 10  * Rewritten with rehash code from br_multicast plus single list
 11  * pointer as suggested by Josh Triplett
 12  */
 13 
 14 #include <linux/atomic.h>
 15 #include <linux/kernel.h>
 16 #include <linux/init.h>
 17 #include <linux/log2.h>
 18 #include <linux/sched.h>
 19 #include <linux/rculist.h>
 20 #include <linux/slab.h>
 21 #include <linux/vmalloc.h>
 22 #include <linux/mm.h>
 23 #include <linux/jhash.h>
 24 #include <linux/random.h>
 25 #include <linux/rhashtable.h>
 26 #include <linux/err.h>
 27 #include <linux/export.h>
 28 
 29 #define HASH_DEFAULT_SIZE       64UL
 30 #define HASH_MIN_SIZE           4U
 31 
 32 union nested_table {
 33         union nested_table __rcu *table;
 34         struct rhash_lock_head __rcu *bucket;
 35 };
 36 
 37 static u32 head_hashfn(struct rhashtable *ht,
 38                        const struct bucket_table *tbl,
 39                        const struct rhash_head *he)
 40 {
 41         return rht_head_hashfn(ht, tbl, he, ht->p);
 42 }
 43 
 44 #ifdef CONFIG_PROVE_LOCKING
 45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
 46 
 47 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
 48 {
 49         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
 50 }
 51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
 52 
 53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
 54 {
 55         if (!debug_locks)
 56                 return 1;
 57         if (unlikely(tbl->nest))
 58                 return 1;
 59         return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
 60 }
 61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
 62 #else
 63 #define ASSERT_RHT_MUTEX(HT)
 64 #endif
 65 
 66 static inline union nested_table *nested_table_top(
 67         const struct bucket_table *tbl)
 68 {
 69         /* The top-level bucket entry does not need RCU protection
 70          * because it's set at the same time as tbl->nest.
 71          */
 72         return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
 73 }
 74 
 75 static void nested_table_free(union nested_table *ntbl, unsigned int size)
 76 {
 77         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
 78         const unsigned int len = 1 << shift;
 79         unsigned int i;
 80 
 81         ntbl = rcu_dereference_protected(ntbl->table, 1);
 82         if (!ntbl)
 83                 return;
 84 
 85         if (size > len) {
 86                 size >>= shift;
 87                 for (i = 0; i < len; i++)
 88                         nested_table_free(ntbl + i, size);
 89         }
 90 
 91         kfree(ntbl);
 92 }
 93 
 94 static void nested_bucket_table_free(const struct bucket_table *tbl)
 95 {
 96         unsigned int size = tbl->size >> tbl->nest;
 97         unsigned int len = 1 << tbl->nest;
 98         union nested_table *ntbl;
 99         unsigned int i;
100 
101         ntbl = nested_table_top(tbl);
102 
103         for (i = 0; i < len; i++)
104                 nested_table_free(ntbl + i, size);
105 
106         kfree(ntbl);
107 }
108 
109 static void bucket_table_free(const struct bucket_table *tbl)
110 {
111         if (tbl->nest)
112                 nested_bucket_table_free(tbl);
113 
114         kvfree(tbl);
115 }
116 
117 static void bucket_table_free_rcu(struct rcu_head *head)
118 {
119         bucket_table_free(container_of(head, struct bucket_table, rcu));
120 }
121 
122 static union nested_table *nested_table_alloc(struct rhashtable *ht,
123                                               union nested_table __rcu **prev,
124                                               bool leaf)
125 {
126         union nested_table *ntbl;
127         int i;
128 
129         ntbl = rcu_dereference(*prev);
130         if (ntbl)
131                 return ntbl;
132 
133         ntbl = alloc_hooks_tag(ht->alloc_tag,
134                         kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
135 
136         if (ntbl && leaf) {
137                 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
138                         INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
139         }
140 
141         if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
142                 return ntbl;
143         /* Raced with another thread. */
144         kfree(ntbl);
145         return rcu_dereference(*prev);
146 }
147 
148 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
149                                                       size_t nbuckets,
150                                                       gfp_t gfp)
151 {
152         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
153         struct bucket_table *tbl;
154         size_t size;
155 
156         if (nbuckets < (1 << (shift + 1)))
157                 return NULL;
158 
159         size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
160 
161         tbl = alloc_hooks_tag(ht->alloc_tag,
162                         kmalloc_noprof(size, gfp|__GFP_ZERO));
163         if (!tbl)
164                 return NULL;
165 
166         if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
167                                 false)) {
168                 kfree(tbl);
169                 return NULL;
170         }
171 
172         tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
173 
174         return tbl;
175 }
176 
177 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
178                                                size_t nbuckets,
179                                                gfp_t gfp)
180 {
181         struct bucket_table *tbl = NULL;
182         size_t size;
183         int i;
184         static struct lock_class_key __key;
185 
186         tbl = alloc_hooks_tag(ht->alloc_tag,
187                         kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
188                                              gfp|__GFP_ZERO, NUMA_NO_NODE));
189 
190         size = nbuckets;
191 
192         if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
193                 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
194                 nbuckets = 0;
195         }
196 
197         if (tbl == NULL)
198                 return NULL;
199 
200         lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
201 
202         tbl->size = size;
203 
204         rcu_head_init(&tbl->rcu);
205         INIT_LIST_HEAD(&tbl->walkers);
206 
207         tbl->hash_rnd = get_random_u32();
208 
209         for (i = 0; i < nbuckets; i++)
210                 INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
211 
212         return tbl;
213 }
214 
215 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
216                                                   struct bucket_table *tbl)
217 {
218         struct bucket_table *new_tbl;
219 
220         do {
221                 new_tbl = tbl;
222                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
223         } while (tbl);
224 
225         return new_tbl;
226 }
227 
228 static int rhashtable_rehash_one(struct rhashtable *ht,
229                                  struct rhash_lock_head __rcu **bkt,
230                                  unsigned int old_hash)
231 {
232         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
233         struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
234         int err = -EAGAIN;
235         struct rhash_head *head, *next, *entry;
236         struct rhash_head __rcu **pprev = NULL;
237         unsigned int new_hash;
238         unsigned long flags;
239 
240         if (new_tbl->nest)
241                 goto out;
242 
243         err = -ENOENT;
244 
245         rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
246                           old_tbl, old_hash) {
247                 err = 0;
248                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
249 
250                 if (rht_is_a_nulls(next))
251                         break;
252 
253                 pprev = &entry->next;
254         }
255 
256         if (err)
257                 goto out;
258 
259         new_hash = head_hashfn(ht, new_tbl, entry);
260 
261         flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
262                                 SINGLE_DEPTH_NESTING);
263 
264         head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
265 
266         RCU_INIT_POINTER(entry->next, head);
267 
268         rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
269 
270         if (pprev)
271                 rcu_assign_pointer(*pprev, next);
272         else
273                 /* Need to preserved the bit lock. */
274                 rht_assign_locked(bkt, next);
275 
276 out:
277         return err;
278 }
279 
280 static int rhashtable_rehash_chain(struct rhashtable *ht,
281                                     unsigned int old_hash)
282 {
283         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
284         struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
285         unsigned long flags;
286         int err;
287 
288         if (!bkt)
289                 return 0;
290         flags = rht_lock(old_tbl, bkt);
291 
292         while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
293                 ;
294 
295         if (err == -ENOENT)
296                 err = 0;
297         rht_unlock(old_tbl, bkt, flags);
298 
299         return err;
300 }
301 
302 static int rhashtable_rehash_attach(struct rhashtable *ht,
303                                     struct bucket_table *old_tbl,
304                                     struct bucket_table *new_tbl)
305 {
306         /* Make insertions go into the new, empty table right away. Deletions
307          * and lookups will be attempted in both tables until we synchronize.
308          * As cmpxchg() provides strong barriers, we do not need
309          * rcu_assign_pointer().
310          */
311 
312         if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
313                     new_tbl) != NULL)
314                 return -EEXIST;
315 
316         return 0;
317 }
318 
319 static int rhashtable_rehash_table(struct rhashtable *ht)
320 {
321         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
322         struct bucket_table *new_tbl;
323         struct rhashtable_walker *walker;
324         unsigned int old_hash;
325         int err;
326 
327         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
328         if (!new_tbl)
329                 return 0;
330 
331         for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
332                 err = rhashtable_rehash_chain(ht, old_hash);
333                 if (err)
334                         return err;
335                 cond_resched();
336         }
337 
338         /* Publish the new table pointer. */
339         rcu_assign_pointer(ht->tbl, new_tbl);
340 
341         spin_lock(&ht->lock);
342         list_for_each_entry(walker, &old_tbl->walkers, list)
343                 walker->tbl = NULL;
344 
345         /* Wait for readers. All new readers will see the new
346          * table, and thus no references to the old table will
347          * remain.
348          * We do this inside the locked region so that
349          * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
350          * to check if it should not re-link the table.
351          */
352         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
353         spin_unlock(&ht->lock);
354 
355         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
356 }
357 
358 static int rhashtable_rehash_alloc(struct rhashtable *ht,
359                                    struct bucket_table *old_tbl,
360                                    unsigned int size)
361 {
362         struct bucket_table *new_tbl;
363         int err;
364 
365         ASSERT_RHT_MUTEX(ht);
366 
367         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
368         if (new_tbl == NULL)
369                 return -ENOMEM;
370 
371         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
372         if (err)
373                 bucket_table_free(new_tbl);
374 
375         return err;
376 }
377 
378 /**
379  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
380  * @ht:         the hash table to shrink
381  *
382  * This function shrinks the hash table to fit, i.e., the smallest
383  * size would not cause it to expand right away automatically.
384  *
385  * The caller must ensure that no concurrent resizing occurs by holding
386  * ht->mutex.
387  *
388  * The caller must ensure that no concurrent table mutations take place.
389  * It is however valid to have concurrent lookups if they are RCU protected.
390  *
391  * It is valid to have concurrent insertions and deletions protected by per
392  * bucket locks or concurrent RCU protected lookups and traversals.
393  */
394 static int rhashtable_shrink(struct rhashtable *ht)
395 {
396         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
397         unsigned int nelems = atomic_read(&ht->nelems);
398         unsigned int size = 0;
399 
400         if (nelems)
401                 size = roundup_pow_of_two(nelems * 3 / 2);
402         if (size < ht->p.min_size)
403                 size = ht->p.min_size;
404 
405         if (old_tbl->size <= size)
406                 return 0;
407 
408         if (rht_dereference(old_tbl->future_tbl, ht))
409                 return -EEXIST;
410 
411         return rhashtable_rehash_alloc(ht, old_tbl, size);
412 }
413 
414 static void rht_deferred_worker(struct work_struct *work)
415 {
416         struct rhashtable *ht;
417         struct bucket_table *tbl;
418         int err = 0;
419 
420         ht = container_of(work, struct rhashtable, run_work);
421         mutex_lock(&ht->mutex);
422 
423         tbl = rht_dereference(ht->tbl, ht);
424         tbl = rhashtable_last_table(ht, tbl);
425 
426         if (rht_grow_above_75(ht, tbl))
427                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
428         else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
429                 err = rhashtable_shrink(ht);
430         else if (tbl->nest)
431                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
432 
433         if (!err || err == -EEXIST) {
434                 int nerr;
435 
436                 nerr = rhashtable_rehash_table(ht);
437                 err = err ?: nerr;
438         }
439 
440         mutex_unlock(&ht->mutex);
441 
442         if (err)
443                 schedule_work(&ht->run_work);
444 }
445 
446 static int rhashtable_insert_rehash(struct rhashtable *ht,
447                                     struct bucket_table *tbl)
448 {
449         struct bucket_table *old_tbl;
450         struct bucket_table *new_tbl;
451         unsigned int size;
452         int err;
453 
454         old_tbl = rht_dereference_rcu(ht->tbl, ht);
455 
456         size = tbl->size;
457 
458         err = -EBUSY;
459 
460         if (rht_grow_above_75(ht, tbl))
461                 size *= 2;
462         /* Do not schedule more than one rehash */
463         else if (old_tbl != tbl)
464                 goto fail;
465 
466         err = -ENOMEM;
467 
468         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
469         if (new_tbl == NULL)
470                 goto fail;
471 
472         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
473         if (err) {
474                 bucket_table_free(new_tbl);
475                 if (err == -EEXIST)
476                         err = 0;
477         } else
478                 schedule_work(&ht->run_work);
479 
480         return err;
481 
482 fail:
483         /* Do not fail the insert if someone else did a rehash. */
484         if (likely(rcu_access_pointer(tbl->future_tbl)))
485                 return 0;
486 
487         /* Schedule async rehash to retry allocation in process context. */
488         if (err == -ENOMEM)
489                 schedule_work(&ht->run_work);
490 
491         return err;
492 }
493 
494 static void *rhashtable_lookup_one(struct rhashtable *ht,
495                                    struct rhash_lock_head __rcu **bkt,
496                                    struct bucket_table *tbl, unsigned int hash,
497                                    const void *key, struct rhash_head *obj)
498 {
499         struct rhashtable_compare_arg arg = {
500                 .ht = ht,
501                 .key = key,
502         };
503         struct rhash_head __rcu **pprev = NULL;
504         struct rhash_head *head;
505         int elasticity;
506 
507         elasticity = RHT_ELASTICITY;
508         rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
509                 struct rhlist_head *list;
510                 struct rhlist_head *plist;
511 
512                 elasticity--;
513                 if (!key ||
514                     (ht->p.obj_cmpfn ?
515                      ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
516                      rhashtable_compare(&arg, rht_obj(ht, head)))) {
517                         pprev = &head->next;
518                         continue;
519                 }
520 
521                 if (!ht->rhlist)
522                         return rht_obj(ht, head);
523 
524                 list = container_of(obj, struct rhlist_head, rhead);
525                 plist = container_of(head, struct rhlist_head, rhead);
526 
527                 RCU_INIT_POINTER(list->next, plist);
528                 head = rht_dereference_bucket(head->next, tbl, hash);
529                 RCU_INIT_POINTER(list->rhead.next, head);
530                 if (pprev)
531                         rcu_assign_pointer(*pprev, obj);
532                 else
533                         /* Need to preserve the bit lock */
534                         rht_assign_locked(bkt, obj);
535 
536                 return NULL;
537         }
538 
539         if (elasticity <= 0)
540                 return ERR_PTR(-EAGAIN);
541 
542         return ERR_PTR(-ENOENT);
543 }
544 
545 static struct bucket_table *rhashtable_insert_one(
546         struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
547         struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
548         void *data)
549 {
550         struct bucket_table *new_tbl;
551         struct rhash_head *head;
552 
553         if (!IS_ERR_OR_NULL(data))
554                 return ERR_PTR(-EEXIST);
555 
556         if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
557                 return ERR_CAST(data);
558 
559         new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
560         if (new_tbl)
561                 return new_tbl;
562 
563         if (PTR_ERR(data) != -ENOENT)
564                 return ERR_CAST(data);
565 
566         if (unlikely(rht_grow_above_max(ht, tbl)))
567                 return ERR_PTR(-E2BIG);
568 
569         if (unlikely(rht_grow_above_100(ht, tbl)))
570                 return ERR_PTR(-EAGAIN);
571 
572         head = rht_ptr(bkt, tbl, hash);
573 
574         RCU_INIT_POINTER(obj->next, head);
575         if (ht->rhlist) {
576                 struct rhlist_head *list;
577 
578                 list = container_of(obj, struct rhlist_head, rhead);
579                 RCU_INIT_POINTER(list->next, NULL);
580         }
581 
582         /* bkt is always the head of the list, so it holds
583          * the lock, which we need to preserve
584          */
585         rht_assign_locked(bkt, obj);
586 
587         atomic_inc(&ht->nelems);
588         if (rht_grow_above_75(ht, tbl))
589                 schedule_work(&ht->run_work);
590 
591         return NULL;
592 }
593 
594 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
595                                    struct rhash_head *obj)
596 {
597         struct bucket_table *new_tbl;
598         struct bucket_table *tbl;
599         struct rhash_lock_head __rcu **bkt;
600         unsigned long flags;
601         unsigned int hash;
602         void *data;
603 
604         new_tbl = rcu_dereference(ht->tbl);
605 
606         do {
607                 tbl = new_tbl;
608                 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
609                 if (rcu_access_pointer(tbl->future_tbl))
610                         /* Failure is OK */
611                         bkt = rht_bucket_var(tbl, hash);
612                 else
613                         bkt = rht_bucket_insert(ht, tbl, hash);
614                 if (bkt == NULL) {
615                         new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
616                         data = ERR_PTR(-EAGAIN);
617                 } else {
618                         flags = rht_lock(tbl, bkt);
619                         data = rhashtable_lookup_one(ht, bkt, tbl,
620                                                      hash, key, obj);
621                         new_tbl = rhashtable_insert_one(ht, bkt, tbl,
622                                                         hash, obj, data);
623                         if (PTR_ERR(new_tbl) != -EEXIST)
624                                 data = ERR_CAST(new_tbl);
625 
626                         rht_unlock(tbl, bkt, flags);
627                 }
628         } while (!IS_ERR_OR_NULL(new_tbl));
629 
630         if (PTR_ERR(data) == -EAGAIN)
631                 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
632                                -EAGAIN);
633 
634         return data;
635 }
636 
637 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
638                              struct rhash_head *obj)
639 {
640         void *data;
641 
642         do {
643                 rcu_read_lock();
644                 data = rhashtable_try_insert(ht, key, obj);
645                 rcu_read_unlock();
646         } while (PTR_ERR(data) == -EAGAIN);
647 
648         return data;
649 }
650 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
651 
652 /**
653  * rhashtable_walk_enter - Initialise an iterator
654  * @ht:         Table to walk over
655  * @iter:       Hash table Iterator
656  *
657  * This function prepares a hash table walk.
658  *
659  * Note that if you restart a walk after rhashtable_walk_stop you
660  * may see the same object twice.  Also, you may miss objects if
661  * there are removals in between rhashtable_walk_stop and the next
662  * call to rhashtable_walk_start.
663  *
664  * For a completely stable walk you should construct your own data
665  * structure outside the hash table.
666  *
667  * This function may be called from any process context, including
668  * non-preemptable context, but cannot be called from softirq or
669  * hardirq context.
670  *
671  * You must call rhashtable_walk_exit after this function returns.
672  */
673 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
674 {
675         iter->ht = ht;
676         iter->p = NULL;
677         iter->slot = 0;
678         iter->skip = 0;
679         iter->end_of_table = 0;
680 
681         spin_lock(&ht->lock);
682         iter->walker.tbl =
683                 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
684         list_add(&iter->walker.list, &iter->walker.tbl->walkers);
685         spin_unlock(&ht->lock);
686 }
687 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
688 
689 /**
690  * rhashtable_walk_exit - Free an iterator
691  * @iter:       Hash table Iterator
692  *
693  * This function frees resources allocated by rhashtable_walk_enter.
694  */
695 void rhashtable_walk_exit(struct rhashtable_iter *iter)
696 {
697         spin_lock(&iter->ht->lock);
698         if (iter->walker.tbl)
699                 list_del(&iter->walker.list);
700         spin_unlock(&iter->ht->lock);
701 }
702 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
703 
704 /**
705  * rhashtable_walk_start_check - Start a hash table walk
706  * @iter:       Hash table iterator
707  *
708  * Start a hash table walk at the current iterator position.  Note that we take
709  * the RCU lock in all cases including when we return an error.  So you must
710  * always call rhashtable_walk_stop to clean up.
711  *
712  * Returns zero if successful.
713  *
714  * Returns -EAGAIN if resize event occurred.  Note that the iterator
715  * will rewind back to the beginning and you may use it immediately
716  * by calling rhashtable_walk_next.
717  *
718  * rhashtable_walk_start is defined as an inline variant that returns
719  * void. This is preferred in cases where the caller would ignore
720  * resize events and always continue.
721  */
722 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
723         __acquires(RCU)
724 {
725         struct rhashtable *ht = iter->ht;
726         bool rhlist = ht->rhlist;
727 
728         rcu_read_lock();
729 
730         spin_lock(&ht->lock);
731         if (iter->walker.tbl)
732                 list_del(&iter->walker.list);
733         spin_unlock(&ht->lock);
734 
735         if (iter->end_of_table)
736                 return 0;
737         if (!iter->walker.tbl) {
738                 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
739                 iter->slot = 0;
740                 iter->skip = 0;
741                 return -EAGAIN;
742         }
743 
744         if (iter->p && !rhlist) {
745                 /*
746                  * We need to validate that 'p' is still in the table, and
747                  * if so, update 'skip'
748                  */
749                 struct rhash_head *p;
750                 int skip = 0;
751                 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
752                         skip++;
753                         if (p == iter->p) {
754                                 iter->skip = skip;
755                                 goto found;
756                         }
757                 }
758                 iter->p = NULL;
759         } else if (iter->p && rhlist) {
760                 /* Need to validate that 'list' is still in the table, and
761                  * if so, update 'skip' and 'p'.
762                  */
763                 struct rhash_head *p;
764                 struct rhlist_head *list;
765                 int skip = 0;
766                 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
767                         for (list = container_of(p, struct rhlist_head, rhead);
768                              list;
769                              list = rcu_dereference(list->next)) {
770                                 skip++;
771                                 if (list == iter->list) {
772                                         iter->p = p;
773                                         iter->skip = skip;
774                                         goto found;
775                                 }
776                         }
777                 }
778                 iter->p = NULL;
779         }
780 found:
781         return 0;
782 }
783 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
784 
785 /**
786  * __rhashtable_walk_find_next - Find the next element in a table (or the first
787  * one in case of a new walk).
788  *
789  * @iter:       Hash table iterator
790  *
791  * Returns the found object or NULL when the end of the table is reached.
792  *
793  * Returns -EAGAIN if resize event occurred.
794  */
795 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
796 {
797         struct bucket_table *tbl = iter->walker.tbl;
798         struct rhlist_head *list = iter->list;
799         struct rhashtable *ht = iter->ht;
800         struct rhash_head *p = iter->p;
801         bool rhlist = ht->rhlist;
802 
803         if (!tbl)
804                 return NULL;
805 
806         for (; iter->slot < tbl->size; iter->slot++) {
807                 int skip = iter->skip;
808 
809                 rht_for_each_rcu(p, tbl, iter->slot) {
810                         if (rhlist) {
811                                 list = container_of(p, struct rhlist_head,
812                                                     rhead);
813                                 do {
814                                         if (!skip)
815                                                 goto next;
816                                         skip--;
817                                         list = rcu_dereference(list->next);
818                                 } while (list);
819 
820                                 continue;
821                         }
822                         if (!skip)
823                                 break;
824                         skip--;
825                 }
826 
827 next:
828                 if (!rht_is_a_nulls(p)) {
829                         iter->skip++;
830                         iter->p = p;
831                         iter->list = list;
832                         return rht_obj(ht, rhlist ? &list->rhead : p);
833                 }
834 
835                 iter->skip = 0;
836         }
837 
838         iter->p = NULL;
839 
840         /* Ensure we see any new tables. */
841         smp_rmb();
842 
843         iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
844         if (iter->walker.tbl) {
845                 iter->slot = 0;
846                 iter->skip = 0;
847                 return ERR_PTR(-EAGAIN);
848         } else {
849                 iter->end_of_table = true;
850         }
851 
852         return NULL;
853 }
854 
855 /**
856  * rhashtable_walk_next - Return the next object and advance the iterator
857  * @iter:       Hash table iterator
858  *
859  * Note that you must call rhashtable_walk_stop when you are finished
860  * with the walk.
861  *
862  * Returns the next object or NULL when the end of the table is reached.
863  *
864  * Returns -EAGAIN if resize event occurred.  Note that the iterator
865  * will rewind back to the beginning and you may continue to use it.
866  */
867 void *rhashtable_walk_next(struct rhashtable_iter *iter)
868 {
869         struct rhlist_head *list = iter->list;
870         struct rhashtable *ht = iter->ht;
871         struct rhash_head *p = iter->p;
872         bool rhlist = ht->rhlist;
873 
874         if (p) {
875                 if (!rhlist || !(list = rcu_dereference(list->next))) {
876                         p = rcu_dereference(p->next);
877                         list = container_of(p, struct rhlist_head, rhead);
878                 }
879                 if (!rht_is_a_nulls(p)) {
880                         iter->skip++;
881                         iter->p = p;
882                         iter->list = list;
883                         return rht_obj(ht, rhlist ? &list->rhead : p);
884                 }
885 
886                 /* At the end of this slot, switch to next one and then find
887                  * next entry from that point.
888                  */
889                 iter->skip = 0;
890                 iter->slot++;
891         }
892 
893         return __rhashtable_walk_find_next(iter);
894 }
895 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
896 
897 /**
898  * rhashtable_walk_peek - Return the next object but don't advance the iterator
899  * @iter:       Hash table iterator
900  *
901  * Returns the next object or NULL when the end of the table is reached.
902  *
903  * Returns -EAGAIN if resize event occurred.  Note that the iterator
904  * will rewind back to the beginning and you may continue to use it.
905  */
906 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
907 {
908         struct rhlist_head *list = iter->list;
909         struct rhashtable *ht = iter->ht;
910         struct rhash_head *p = iter->p;
911 
912         if (p)
913                 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
914 
915         /* No object found in current iter, find next one in the table. */
916 
917         if (iter->skip) {
918                 /* A nonzero skip value points to the next entry in the table
919                  * beyond that last one that was found. Decrement skip so
920                  * we find the current value. __rhashtable_walk_find_next
921                  * will restore the original value of skip assuming that
922                  * the table hasn't changed.
923                  */
924                 iter->skip--;
925         }
926 
927         return __rhashtable_walk_find_next(iter);
928 }
929 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
930 
931 /**
932  * rhashtable_walk_stop - Finish a hash table walk
933  * @iter:       Hash table iterator
934  *
935  * Finish a hash table walk.  Does not reset the iterator to the start of the
936  * hash table.
937  */
938 void rhashtable_walk_stop(struct rhashtable_iter *iter)
939         __releases(RCU)
940 {
941         struct rhashtable *ht;
942         struct bucket_table *tbl = iter->walker.tbl;
943 
944         if (!tbl)
945                 goto out;
946 
947         ht = iter->ht;
948 
949         spin_lock(&ht->lock);
950         if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
951                 /* This bucket table is being freed, don't re-link it. */
952                 iter->walker.tbl = NULL;
953         else
954                 list_add(&iter->walker.list, &tbl->walkers);
955         spin_unlock(&ht->lock);
956 
957 out:
958         rcu_read_unlock();
959 }
960 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
961 
962 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
963 {
964         size_t retsize;
965 
966         if (params->nelem_hint)
967                 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
968                               (unsigned long)params->min_size);
969         else
970                 retsize = max(HASH_DEFAULT_SIZE,
971                               (unsigned long)params->min_size);
972 
973         return retsize;
974 }
975 
976 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
977 {
978         return jhash2(key, length, seed);
979 }
980 
981 /**
982  * rhashtable_init - initialize a new hash table
983  * @ht:         hash table to be initialized
984  * @params:     configuration parameters
985  *
986  * Initializes a new hash table based on the provided configuration
987  * parameters. A table can be configured either with a variable or
988  * fixed length key:
989  *
990  * Configuration Example 1: Fixed length keys
991  * struct test_obj {
992  *      int                     key;
993  *      void *                  my_member;
994  *      struct rhash_head       node;
995  * };
996  *
997  * struct rhashtable_params params = {
998  *      .head_offset = offsetof(struct test_obj, node),
999  *      .key_offset = offsetof(struct test_obj, key),
1000  *      .key_len = sizeof(int),
1001  *      .hashfn = jhash,
1002  * };
1003  *
1004  * Configuration Example 2: Variable length keys
1005  * struct test_obj {
1006  *      [...]
1007  *      struct rhash_head       node;
1008  * };
1009  *
1010  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1011  * {
1012  *      struct test_obj *obj = data;
1013  *
1014  *      return [... hash ...];
1015  * }
1016  *
1017  * struct rhashtable_params params = {
1018  *      .head_offset = offsetof(struct test_obj, node),
1019  *      .hashfn = jhash,
1020  *      .obj_hashfn = my_hash_fn,
1021  * };
1022  */
1023 int rhashtable_init_noprof(struct rhashtable *ht,
1024                     const struct rhashtable_params *params)
1025 {
1026         struct bucket_table *tbl;
1027         size_t size;
1028 
1029         if ((!params->key_len && !params->obj_hashfn) ||
1030             (params->obj_hashfn && !params->obj_cmpfn))
1031                 return -EINVAL;
1032 
1033         memset(ht, 0, sizeof(*ht));
1034         mutex_init(&ht->mutex);
1035         spin_lock_init(&ht->lock);
1036         memcpy(&ht->p, params, sizeof(*params));
1037 
1038         alloc_tag_record(ht->alloc_tag);
1039 
1040         if (params->min_size)
1041                 ht->p.min_size = roundup_pow_of_two(params->min_size);
1042 
1043         /* Cap total entries at 2^31 to avoid nelems overflow. */
1044         ht->max_elems = 1u << 31;
1045 
1046         if (params->max_size) {
1047                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1048                 if (ht->p.max_size < ht->max_elems / 2)
1049                         ht->max_elems = ht->p.max_size * 2;
1050         }
1051 
1052         ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1053 
1054         size = rounded_hashtable_size(&ht->p);
1055 
1056         ht->key_len = ht->p.key_len;
1057         if (!params->hashfn) {
1058                 ht->p.hashfn = jhash;
1059 
1060                 if (!(ht->key_len & (sizeof(u32) - 1))) {
1061                         ht->key_len /= sizeof(u32);
1062                         ht->p.hashfn = rhashtable_jhash2;
1063                 }
1064         }
1065 
1066         /*
1067          * This is api initialization and thus we need to guarantee the
1068          * initial rhashtable allocation. Upon failure, retry with the
1069          * smallest possible size with __GFP_NOFAIL semantics.
1070          */
1071         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1072         if (unlikely(tbl == NULL)) {
1073                 size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1074                 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
1075         }
1076 
1077         atomic_set(&ht->nelems, 0);
1078 
1079         RCU_INIT_POINTER(ht->tbl, tbl);
1080 
1081         INIT_WORK(&ht->run_work, rht_deferred_worker);
1082 
1083         return 0;
1084 }
1085 EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
1086 
1087 /**
1088  * rhltable_init - initialize a new hash list table
1089  * @hlt:        hash list table to be initialized
1090  * @params:     configuration parameters
1091  *
1092  * Initializes a new hash list table.
1093  *
1094  * See documentation for rhashtable_init.
1095  */
1096 int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
1097 {
1098         int err;
1099 
1100         err = rhashtable_init_noprof(&hlt->ht, params);
1101         hlt->ht.rhlist = true;
1102         return err;
1103 }
1104 EXPORT_SYMBOL_GPL(rhltable_init_noprof);
1105 
1106 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1107                                 void (*free_fn)(void *ptr, void *arg),
1108                                 void *arg)
1109 {
1110         struct rhlist_head *list;
1111 
1112         if (!ht->rhlist) {
1113                 free_fn(rht_obj(ht, obj), arg);
1114                 return;
1115         }
1116 
1117         list = container_of(obj, struct rhlist_head, rhead);
1118         do {
1119                 obj = &list->rhead;
1120                 list = rht_dereference(list->next, ht);
1121                 free_fn(rht_obj(ht, obj), arg);
1122         } while (list);
1123 }
1124 
1125 /**
1126  * rhashtable_free_and_destroy - free elements and destroy hash table
1127  * @ht:         the hash table to destroy
1128  * @free_fn:    callback to release resources of element
1129  * @arg:        pointer passed to free_fn
1130  *
1131  * Stops an eventual async resize. If defined, invokes free_fn for each
1132  * element to releasal resources. Please note that RCU protected
1133  * readers may still be accessing the elements. Releasing of resources
1134  * must occur in a compatible manner. Then frees the bucket array.
1135  *
1136  * This function will eventually sleep to wait for an async resize
1137  * to complete. The caller is responsible that no further write operations
1138  * occurs in parallel.
1139  */
1140 void rhashtable_free_and_destroy(struct rhashtable *ht,
1141                                  void (*free_fn)(void *ptr, void *arg),
1142                                  void *arg)
1143 {
1144         struct bucket_table *tbl, *next_tbl;
1145         unsigned int i;
1146 
1147         cancel_work_sync(&ht->run_work);
1148 
1149         mutex_lock(&ht->mutex);
1150         tbl = rht_dereference(ht->tbl, ht);
1151 restart:
1152         if (free_fn) {
1153                 for (i = 0; i < tbl->size; i++) {
1154                         struct rhash_head *pos, *next;
1155 
1156                         cond_resched();
1157                         for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1158                              next = !rht_is_a_nulls(pos) ?
1159                                         rht_dereference(pos->next, ht) : NULL;
1160                              !rht_is_a_nulls(pos);
1161                              pos = next,
1162                              next = !rht_is_a_nulls(pos) ?
1163                                         rht_dereference(pos->next, ht) : NULL)
1164                                 rhashtable_free_one(ht, pos, free_fn, arg);
1165                 }
1166         }
1167 
1168         next_tbl = rht_dereference(tbl->future_tbl, ht);
1169         bucket_table_free(tbl);
1170         if (next_tbl) {
1171                 tbl = next_tbl;
1172                 goto restart;
1173         }
1174         mutex_unlock(&ht->mutex);
1175 }
1176 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1177 
1178 void rhashtable_destroy(struct rhashtable *ht)
1179 {
1180         return rhashtable_free_and_destroy(ht, NULL, NULL);
1181 }
1182 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1183 
1184 struct rhash_lock_head __rcu **__rht_bucket_nested(
1185         const struct bucket_table *tbl, unsigned int hash)
1186 {
1187         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1188         unsigned int index = hash & ((1 << tbl->nest) - 1);
1189         unsigned int size = tbl->size >> tbl->nest;
1190         unsigned int subhash = hash;
1191         union nested_table *ntbl;
1192 
1193         ntbl = nested_table_top(tbl);
1194         ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1195         subhash >>= tbl->nest;
1196 
1197         while (ntbl && size > (1 << shift)) {
1198                 index = subhash & ((1 << shift) - 1);
1199                 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1200                                                   tbl, hash);
1201                 size >>= shift;
1202                 subhash >>= shift;
1203         }
1204 
1205         if (!ntbl)
1206                 return NULL;
1207 
1208         return &ntbl[subhash].bucket;
1209 
1210 }
1211 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1212 
1213 struct rhash_lock_head __rcu **rht_bucket_nested(
1214         const struct bucket_table *tbl, unsigned int hash)
1215 {
1216         static struct rhash_lock_head __rcu *rhnull;
1217 
1218         if (!rhnull)
1219                 INIT_RHT_NULLS_HEAD(rhnull);
1220         return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1221 }
1222 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1223 
1224 struct rhash_lock_head __rcu **rht_bucket_nested_insert(
1225         struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
1226 {
1227         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1228         unsigned int index = hash & ((1 << tbl->nest) - 1);
1229         unsigned int size = tbl->size >> tbl->nest;
1230         union nested_table *ntbl;
1231 
1232         ntbl = nested_table_top(tbl);
1233         hash >>= tbl->nest;
1234         ntbl = nested_table_alloc(ht, &ntbl[index].table,
1235                                   size <= (1 << shift));
1236 
1237         while (ntbl && size > (1 << shift)) {
1238                 index = hash & ((1 << shift) - 1);
1239                 size >>= shift;
1240                 hash >>= shift;
1241                 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1242                                           size <= (1 << shift));
1243         }
1244 
1245         if (!ntbl)
1246                 return NULL;
1247 
1248         return &ntbl[hash].bucket;
1249 
1250 }
1251 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1252 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php