~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/rhashtable.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /lib/rhashtable.c (Version linux-6.11-rc3) and /lib/rhashtable.c (Version linux-4.17.19)


  1 // SPDX-License-Identifier: GPL-2.0-only       << 
  2 /*                                                  1 /*
  3  * Resizable, Scalable, Concurrent Hash Table       2  * Resizable, Scalable, Concurrent Hash Table
  4  *                                                  3  *
  5  * Copyright (c) 2015 Herbert Xu <herbert@gond      4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  6  * Copyright (c) 2014-2015 Thomas Graf <tgraf@      5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
  7  * Copyright (c) 2008-2014 Patrick McHardy <ka      6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
  8  *                                                  7  *
  9  * Code partially derived from nft_hash             8  * Code partially derived from nft_hash
 10  * Rewritten with rehash code from br_multicas      9  * Rewritten with rehash code from br_multicast plus single list
 11  * pointer as suggested by Josh Triplett           10  * pointer as suggested by Josh Triplett
                                                   >>  11  *
                                                   >>  12  * This program is free software; you can redistribute it and/or modify
                                                   >>  13  * it under the terms of the GNU General Public License version 2 as
                                                   >>  14  * published by the Free Software Foundation.
 12  */                                                15  */
 13                                                    16 
 14 #include <linux/atomic.h>                          17 #include <linux/atomic.h>
 15 #include <linux/kernel.h>                          18 #include <linux/kernel.h>
 16 #include <linux/init.h>                            19 #include <linux/init.h>
 17 #include <linux/log2.h>                            20 #include <linux/log2.h>
 18 #include <linux/sched.h>                           21 #include <linux/sched.h>
 19 #include <linux/rculist.h>                         22 #include <linux/rculist.h>
 20 #include <linux/slab.h>                            23 #include <linux/slab.h>
 21 #include <linux/vmalloc.h>                         24 #include <linux/vmalloc.h>
 22 #include <linux/mm.h>                              25 #include <linux/mm.h>
 23 #include <linux/jhash.h>                           26 #include <linux/jhash.h>
 24 #include <linux/random.h>                          27 #include <linux/random.h>
 25 #include <linux/rhashtable.h>                      28 #include <linux/rhashtable.h>
 26 #include <linux/err.h>                             29 #include <linux/err.h>
 27 #include <linux/export.h>                          30 #include <linux/export.h>
 28                                                    31 
 29 #define HASH_DEFAULT_SIZE       64UL               32 #define HASH_DEFAULT_SIZE       64UL
 30 #define HASH_MIN_SIZE           4U                 33 #define HASH_MIN_SIZE           4U
                                                   >>  34 #define BUCKET_LOCKS_PER_CPU    32UL
 31                                                    35 
 32 union nested_table {                               36 union nested_table {
 33         union nested_table __rcu *table;           37         union nested_table __rcu *table;
 34         struct rhash_lock_head __rcu *bucket;  !!  38         struct rhash_head __rcu *bucket;
 35 };                                                 39 };
 36                                                    40 
 37 static u32 head_hashfn(struct rhashtable *ht,      41 static u32 head_hashfn(struct rhashtable *ht,
 38                        const struct bucket_tab     42                        const struct bucket_table *tbl,
 39                        const struct rhash_head     43                        const struct rhash_head *he)
 40 {                                                  44 {
 41         return rht_head_hashfn(ht, tbl, he, ht     45         return rht_head_hashfn(ht, tbl, he, ht->p);
 42 }                                                  46 }
 43                                                    47 
 44 #ifdef CONFIG_PROVE_LOCKING                        48 #ifdef CONFIG_PROVE_LOCKING
 45 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_r     49 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
 46                                                    50 
 47 int lockdep_rht_mutex_is_held(struct rhashtabl     51 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
 48 {                                                  52 {
 49         return (debug_locks) ? lockdep_is_held     53         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
 50 }                                                  54 }
 51 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);      55 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
 52                                                    56 
 53 int lockdep_rht_bucket_is_held(const struct bu     57 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
 54 {                                                  58 {
 55         if (!debug_locks)                      !!  59         spinlock_t *lock = rht_bucket_lock(tbl, hash);
 56                 return 1;                      !!  60 
 57         if (unlikely(tbl->nest))               !!  61         return (debug_locks) ? lockdep_is_held(lock) : 1;
 58                 return 1;                      << 
 59         return bit_spin_is_locked(0, (unsigned << 
 60 }                                                  62 }
 61 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);     63 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
 62 #else                                              64 #else
 63 #define ASSERT_RHT_MUTEX(HT)                       65 #define ASSERT_RHT_MUTEX(HT)
 64 #endif                                             66 #endif
 65                                                    67 
 66 static inline union nested_table *nested_table << 
 67         const struct bucket_table *tbl)        << 
 68 {                                              << 
 69         /* The top-level bucket entry does not << 
 70          * because it's set at the same time a << 
 71          */                                    << 
 72         return (void *)rcu_dereference_protect << 
 73 }                                              << 
 74                                                << 
 75 static void nested_table_free(union nested_tab     68 static void nested_table_free(union nested_table *ntbl, unsigned int size)
 76 {                                                  69 {
 77         const unsigned int shift = PAGE_SHIFT      70         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
 78         const unsigned int len = 1 << shift;       71         const unsigned int len = 1 << shift;
 79         unsigned int i;                            72         unsigned int i;
 80                                                    73 
 81         ntbl = rcu_dereference_protected(ntbl- !!  74         ntbl = rcu_dereference_raw(ntbl->table);
 82         if (!ntbl)                                 75         if (!ntbl)
 83                 return;                            76                 return;
 84                                                    77 
 85         if (size > len) {                          78         if (size > len) {
 86                 size >>= shift;                    79                 size >>= shift;
 87                 for (i = 0; i < len; i++)          80                 for (i = 0; i < len; i++)
 88                         nested_table_free(ntbl     81                         nested_table_free(ntbl + i, size);
 89         }                                          82         }
 90                                                    83 
 91         kfree(ntbl);                               84         kfree(ntbl);
 92 }                                                  85 }
 93                                                    86 
 94 static void nested_bucket_table_free(const str     87 static void nested_bucket_table_free(const struct bucket_table *tbl)
 95 {                                                  88 {
 96         unsigned int size = tbl->size >> tbl->     89         unsigned int size = tbl->size >> tbl->nest;
 97         unsigned int len = 1 << tbl->nest;         90         unsigned int len = 1 << tbl->nest;
 98         union nested_table *ntbl;                  91         union nested_table *ntbl;
 99         unsigned int i;                            92         unsigned int i;
100                                                    93 
101         ntbl = nested_table_top(tbl);          !!  94         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
102                                                    95 
103         for (i = 0; i < len; i++)                  96         for (i = 0; i < len; i++)
104                 nested_table_free(ntbl + i, si     97                 nested_table_free(ntbl + i, size);
105                                                    98 
106         kfree(ntbl);                               99         kfree(ntbl);
107 }                                                 100 }
108                                                   101 
109 static void bucket_table_free(const struct buc    102 static void bucket_table_free(const struct bucket_table *tbl)
110 {                                                 103 {
111         if (tbl->nest)                            104         if (tbl->nest)
112                 nested_bucket_table_free(tbl);    105                 nested_bucket_table_free(tbl);
113                                                   106 
                                                   >> 107         free_bucket_spinlocks(tbl->locks);
114         kvfree(tbl);                              108         kvfree(tbl);
115 }                                                 109 }
116                                                   110 
117 static void bucket_table_free_rcu(struct rcu_h    111 static void bucket_table_free_rcu(struct rcu_head *head)
118 {                                                 112 {
119         bucket_table_free(container_of(head, s    113         bucket_table_free(container_of(head, struct bucket_table, rcu));
120 }                                                 114 }
121                                                   115 
122 static union nested_table *nested_table_alloc(    116 static union nested_table *nested_table_alloc(struct rhashtable *ht,
123                                                   117                                               union nested_table __rcu **prev,
124                                                !! 118                                               unsigned int shifted,
                                                   >> 119                                               unsigned int nhash)
125 {                                                 120 {
126         union nested_table *ntbl;                 121         union nested_table *ntbl;
127         int i;                                    122         int i;
128                                                   123 
129         ntbl = rcu_dereference(*prev);            124         ntbl = rcu_dereference(*prev);
130         if (ntbl)                                 125         if (ntbl)
131                 return ntbl;                      126                 return ntbl;
132                                                   127 
133         ntbl = alloc_hooks_tag(ht->alloc_tag,  !! 128         ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
134                         kmalloc_noprof(PAGE_SI << 
135                                                   129 
136         if (ntbl && leaf) {                    !! 130         if (ntbl && shifted) {
137                 for (i = 0; i < PAGE_SIZE / si !! 131                 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
138                         INIT_RHT_NULLS_HEAD(nt !! 132                         INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
                                                   >> 133                                             (i << shifted) | nhash);
139         }                                         134         }
140                                                   135 
141         if (cmpxchg((union nested_table **)pre !! 136         rcu_assign_pointer(*prev, ntbl);
142                 return ntbl;                   !! 137 
143         /* Raced with another thread. */       !! 138         return ntbl;
144         kfree(ntbl);                           << 
145         return rcu_dereference(*prev);         << 
146 }                                                 139 }
147                                                   140 
148 static struct bucket_table *nested_bucket_tabl    141 static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
149                                                   142                                                       size_t nbuckets,
150                                                   143                                                       gfp_t gfp)
151 {                                                 144 {
152         const unsigned int shift = PAGE_SHIFT     145         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
153         struct bucket_table *tbl;                 146         struct bucket_table *tbl;
154         size_t size;                              147         size_t size;
155                                                   148 
156         if (nbuckets < (1 << (shift + 1)))        149         if (nbuckets < (1 << (shift + 1)))
157                 return NULL;                      150                 return NULL;
158                                                   151 
159         size = sizeof(*tbl) + sizeof(tbl->buck    152         size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
160                                                   153 
161         tbl = alloc_hooks_tag(ht->alloc_tag,   !! 154         tbl = kzalloc(size, gfp);
162                         kmalloc_noprof(size, g << 
163         if (!tbl)                                 155         if (!tbl)
164                 return NULL;                      156                 return NULL;
165                                                   157 
166         if (!nested_table_alloc(ht, (union nes    158         if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
167                                 false)) {      !! 159                                 0, 0)) {
168                 kfree(tbl);                       160                 kfree(tbl);
169                 return NULL;                      161                 return NULL;
170         }                                         162         }
171                                                   163 
172         tbl->nest = (ilog2(nbuckets) - 1) % sh    164         tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
173                                                   165 
174         return tbl;                               166         return tbl;
175 }                                                 167 }
176                                                   168 
177 static struct bucket_table *bucket_table_alloc    169 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
178                                                   170                                                size_t nbuckets,
179                                                   171                                                gfp_t gfp)
180 {                                                 172 {
181         struct bucket_table *tbl = NULL;          173         struct bucket_table *tbl = NULL;
182         size_t size;                           !! 174         size_t size, max_locks;
183         int i;                                    175         int i;
184         static struct lock_class_key __key;    << 
185                                                   176 
186         tbl = alloc_hooks_tag(ht->alloc_tag,   !! 177         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
187                         kvmalloc_node_noprof(s !! 178         if (gfp != GFP_KERNEL)
188                                              g !! 179                 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
                                                   >> 180         else
                                                   >> 181                 tbl = kvzalloc(size, gfp);
189                                                   182 
190         size = nbuckets;                          183         size = nbuckets;
191                                                   184 
192         if (tbl == NULL && (gfp & ~__GFP_NOFAI !! 185         if (tbl == NULL && gfp != GFP_KERNEL) {
193                 tbl = nested_bucket_table_allo    186                 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
194                 nbuckets = 0;                     187                 nbuckets = 0;
195         }                                         188         }
196                                                << 
197         if (tbl == NULL)                          189         if (tbl == NULL)
198                 return NULL;                      190                 return NULL;
199                                                   191 
200         lockdep_init_map(&tbl->dep_map, "rhash << 
201                                                << 
202         tbl->size = size;                         192         tbl->size = size;
203                                                   193 
204         rcu_head_init(&tbl->rcu);              !! 194         max_locks = size >> 1;
                                                   >> 195         if (tbl->nest)
                                                   >> 196                 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
                                                   >> 197 
                                                   >> 198         if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
                                                   >> 199                                    ht->p.locks_mul, gfp) < 0) {
                                                   >> 200                 bucket_table_free(tbl);
                                                   >> 201                 return NULL;
                                                   >> 202         }
                                                   >> 203 
205         INIT_LIST_HEAD(&tbl->walkers);            204         INIT_LIST_HEAD(&tbl->walkers);
206                                                   205 
207         tbl->hash_rnd = get_random_u32();         206         tbl->hash_rnd = get_random_u32();
208                                                   207 
209         for (i = 0; i < nbuckets; i++)            208         for (i = 0; i < nbuckets; i++)
210                 INIT_RHT_NULLS_HEAD(tbl->bucke !! 209                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
211                                                   210 
212         return tbl;                               211         return tbl;
213 }                                                 212 }
214                                                   213 
215 static struct bucket_table *rhashtable_last_ta    214 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
216                                                   215                                                   struct bucket_table *tbl)
217 {                                                 216 {
218         struct bucket_table *new_tbl;             217         struct bucket_table *new_tbl;
219                                                   218 
220         do {                                      219         do {
221                 new_tbl = tbl;                    220                 new_tbl = tbl;
222                 tbl = rht_dereference_rcu(tbl-    221                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
223         } while (tbl);                            222         } while (tbl);
224                                                   223 
225         return new_tbl;                           224         return new_tbl;
226 }                                                 225 }
227                                                   226 
228 static int rhashtable_rehash_one(struct rhasht !! 227 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
229                                  struct rhash_ << 
230                                  unsigned int  << 
231 {                                                 228 {
232         struct bucket_table *old_tbl = rht_der    229         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
233         struct bucket_table *new_tbl = rhashta !! 230         struct bucket_table *new_tbl = rhashtable_last_table(ht,
                                                   >> 231                 rht_dereference_rcu(old_tbl->future_tbl, ht));
                                                   >> 232         struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
234         int err = -EAGAIN;                        233         int err = -EAGAIN;
235         struct rhash_head *head, *next, *entry    234         struct rhash_head *head, *next, *entry;
236         struct rhash_head __rcu **pprev = NULL !! 235         spinlock_t *new_bucket_lock;
237         unsigned int new_hash;                    236         unsigned int new_hash;
238         unsigned long flags;                   << 
239                                                   237 
240         if (new_tbl->nest)                        238         if (new_tbl->nest)
241                 goto out;                         239                 goto out;
242                                                   240 
243         err = -ENOENT;                            241         err = -ENOENT;
244                                                   242 
245         rht_for_each_from(entry, rht_ptr(bkt,  !! 243         rht_for_each(entry, old_tbl, old_hash) {
246                           old_tbl, old_hash) { << 
247                 err = 0;                          244                 err = 0;
248                 next = rht_dereference_bucket(    245                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
249                                                   246 
250                 if (rht_is_a_nulls(next))         247                 if (rht_is_a_nulls(next))
251                         break;                    248                         break;
252                                                   249 
253                 pprev = &entry->next;             250                 pprev = &entry->next;
254         }                                         251         }
255                                                   252 
256         if (err)                                  253         if (err)
257                 goto out;                         254                 goto out;
258                                                   255 
259         new_hash = head_hashfn(ht, new_tbl, en    256         new_hash = head_hashfn(ht, new_tbl, entry);
260                                                   257 
261         flags = rht_lock_nested(new_tbl, &new_ !! 258         new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
262                                 SINGLE_DEPTH_N << 
263                                                   259 
264         head = rht_ptr(new_tbl->buckets + new_ !! 260         spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
                                                   >> 261         head = rht_dereference_bucket(new_tbl->buckets[new_hash],
                                                   >> 262                                       new_tbl, new_hash);
265                                                   263 
266         RCU_INIT_POINTER(entry->next, head);      264         RCU_INIT_POINTER(entry->next, head);
267                                                   265 
268         rht_assign_unlock(new_tbl, &new_tbl->b !! 266         rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
                                                   >> 267         spin_unlock(new_bucket_lock);
269                                                   268 
270         if (pprev)                             !! 269         rcu_assign_pointer(*pprev, next);
271                 rcu_assign_pointer(*pprev, nex << 
272         else                                   << 
273                 /* Need to preserved the bit l << 
274                 rht_assign_locked(bkt, next);  << 
275                                                   270 
276 out:                                              271 out:
277         return err;                               272         return err;
278 }                                                 273 }
279                                                   274 
280 static int rhashtable_rehash_chain(struct rhas    275 static int rhashtable_rehash_chain(struct rhashtable *ht,
281                                     unsigned i    276                                     unsigned int old_hash)
282 {                                                 277 {
283         struct bucket_table *old_tbl = rht_der    278         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
284         struct rhash_lock_head __rcu **bkt = r !! 279         spinlock_t *old_bucket_lock;
285         unsigned long flags;                   << 
286         int err;                                  280         int err;
287                                                   281 
288         if (!bkt)                              !! 282         old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
289                 return 0;                      << 
290         flags = rht_lock(old_tbl, bkt);        << 
291                                                   283 
292         while (!(err = rhashtable_rehash_one(h !! 284         spin_lock_bh(old_bucket_lock);
                                                   >> 285         while (!(err = rhashtable_rehash_one(ht, old_hash)))
293                 ;                                 286                 ;
294                                                   287 
295         if (err == -ENOENT)                    !! 288         if (err == -ENOENT) {
                                                   >> 289                 old_tbl->rehash++;
296                 err = 0;                          290                 err = 0;
297         rht_unlock(old_tbl, bkt, flags);       !! 291         }
                                                   >> 292         spin_unlock_bh(old_bucket_lock);
298                                                   293 
299         return err;                               294         return err;
300 }                                                 295 }
301                                                   296 
302 static int rhashtable_rehash_attach(struct rha    297 static int rhashtable_rehash_attach(struct rhashtable *ht,
303                                     struct buc    298                                     struct bucket_table *old_tbl,
304                                     struct buc    299                                     struct bucket_table *new_tbl)
305 {                                                 300 {
                                                   >> 301         /* Protect future_tbl using the first bucket lock. */
                                                   >> 302         spin_lock_bh(old_tbl->locks);
                                                   >> 303 
                                                   >> 304         /* Did somebody beat us to it? */
                                                   >> 305         if (rcu_access_pointer(old_tbl->future_tbl)) {
                                                   >> 306                 spin_unlock_bh(old_tbl->locks);
                                                   >> 307                 return -EEXIST;
                                                   >> 308         }
                                                   >> 309 
306         /* Make insertions go into the new, em    310         /* Make insertions go into the new, empty table right away. Deletions
307          * and lookups will be attempted in bo    311          * and lookups will be attempted in both tables until we synchronize.
308          * As cmpxchg() provides strong barrie << 
309          * rcu_assign_pointer().               << 
310          */                                       312          */
                                                   >> 313         rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
311                                                   314 
312         if (cmpxchg((struct bucket_table **)&o !! 315         spin_unlock_bh(old_tbl->locks);
313                     new_tbl) != NULL)          << 
314                 return -EEXIST;                << 
315                                                   316 
316         return 0;                                 317         return 0;
317 }                                                 318 }
318                                                   319 
319 static int rhashtable_rehash_table(struct rhas    320 static int rhashtable_rehash_table(struct rhashtable *ht)
320 {                                                 321 {
321         struct bucket_table *old_tbl = rht_der    322         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
322         struct bucket_table *new_tbl;             323         struct bucket_table *new_tbl;
323         struct rhashtable_walker *walker;         324         struct rhashtable_walker *walker;
324         unsigned int old_hash;                    325         unsigned int old_hash;
325         int err;                                  326         int err;
326                                                   327 
327         new_tbl = rht_dereference(old_tbl->fut    328         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
328         if (!new_tbl)                             329         if (!new_tbl)
329                 return 0;                         330                 return 0;
330                                                   331 
331         for (old_hash = 0; old_hash < old_tbl-    332         for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
332                 err = rhashtable_rehash_chain(    333                 err = rhashtable_rehash_chain(ht, old_hash);
333                 if (err)                          334                 if (err)
334                         return err;               335                         return err;
335                 cond_resched();                   336                 cond_resched();
336         }                                         337         }
337                                                   338 
338         /* Publish the new table pointer. */      339         /* Publish the new table pointer. */
339         rcu_assign_pointer(ht->tbl, new_tbl);     340         rcu_assign_pointer(ht->tbl, new_tbl);
340                                                   341 
341         spin_lock(&ht->lock);                     342         spin_lock(&ht->lock);
342         list_for_each_entry(walker, &old_tbl->    343         list_for_each_entry(walker, &old_tbl->walkers, list)
343                 walker->tbl = NULL;               344                 walker->tbl = NULL;
                                                   >> 345         spin_unlock(&ht->lock);
344                                                   346 
345         /* Wait for readers. All new readers w    347         /* Wait for readers. All new readers will see the new
346          * table, and thus no references to th    348          * table, and thus no references to the old table will
347          * remain.                                349          * remain.
348          * We do this inside the locked region << 
349          * rhashtable_walk_stop() can use rcu_ << 
350          * to check if it should not re-link t << 
351          */                                       350          */
352         call_rcu(&old_tbl->rcu, bucket_table_f    351         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
353         spin_unlock(&ht->lock);                << 
354                                                   352 
355         return rht_dereference(new_tbl->future    353         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
356 }                                                 354 }
357                                                   355 
358 static int rhashtable_rehash_alloc(struct rhas    356 static int rhashtable_rehash_alloc(struct rhashtable *ht,
359                                    struct buck    357                                    struct bucket_table *old_tbl,
360                                    unsigned in    358                                    unsigned int size)
361 {                                                 359 {
362         struct bucket_table *new_tbl;             360         struct bucket_table *new_tbl;
363         int err;                                  361         int err;
364                                                   362 
365         ASSERT_RHT_MUTEX(ht);                     363         ASSERT_RHT_MUTEX(ht);
366                                                   364 
367         new_tbl = bucket_table_alloc(ht, size,    365         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
368         if (new_tbl == NULL)                      366         if (new_tbl == NULL)
369                 return -ENOMEM;                   367                 return -ENOMEM;
370                                                   368 
371         err = rhashtable_rehash_attach(ht, old    369         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
372         if (err)                                  370         if (err)
373                 bucket_table_free(new_tbl);       371                 bucket_table_free(new_tbl);
374                                                   372 
375         return err;                               373         return err;
376 }                                                 374 }
377                                                   375 
378 /**                                               376 /**
379  * rhashtable_shrink - Shrink hash table while    377  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
380  * @ht:         the hash table to shrink          378  * @ht:         the hash table to shrink
381  *                                                379  *
382  * This function shrinks the hash table to fit    380  * This function shrinks the hash table to fit, i.e., the smallest
383  * size would not cause it to expand right awa    381  * size would not cause it to expand right away automatically.
384  *                                                382  *
385  * The caller must ensure that no concurrent r    383  * The caller must ensure that no concurrent resizing occurs by holding
386  * ht->mutex.                                     384  * ht->mutex.
387  *                                                385  *
388  * The caller must ensure that no concurrent t    386  * The caller must ensure that no concurrent table mutations take place.
389  * It is however valid to have concurrent look    387  * It is however valid to have concurrent lookups if they are RCU protected.
390  *                                                388  *
391  * It is valid to have concurrent insertions a    389  * It is valid to have concurrent insertions and deletions protected by per
392  * bucket locks or concurrent RCU protected lo    390  * bucket locks or concurrent RCU protected lookups and traversals.
393  */                                               391  */
394 static int rhashtable_shrink(struct rhashtable    392 static int rhashtable_shrink(struct rhashtable *ht)
395 {                                                 393 {
396         struct bucket_table *old_tbl = rht_der    394         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
397         unsigned int nelems = atomic_read(&ht-    395         unsigned int nelems = atomic_read(&ht->nelems);
398         unsigned int size = 0;                    396         unsigned int size = 0;
399                                                   397 
400         if (nelems)                               398         if (nelems)
401                 size = roundup_pow_of_two(nele    399                 size = roundup_pow_of_two(nelems * 3 / 2);
402         if (size < ht->p.min_size)                400         if (size < ht->p.min_size)
403                 size = ht->p.min_size;            401                 size = ht->p.min_size;
404                                                   402 
405         if (old_tbl->size <= size)                403         if (old_tbl->size <= size)
406                 return 0;                         404                 return 0;
407                                                   405 
408         if (rht_dereference(old_tbl->future_tb    406         if (rht_dereference(old_tbl->future_tbl, ht))
409                 return -EEXIST;                   407                 return -EEXIST;
410                                                   408 
411         return rhashtable_rehash_alloc(ht, old    409         return rhashtable_rehash_alloc(ht, old_tbl, size);
412 }                                                 410 }
413                                                   411 
414 static void rht_deferred_worker(struct work_st    412 static void rht_deferred_worker(struct work_struct *work)
415 {                                                 413 {
416         struct rhashtable *ht;                    414         struct rhashtable *ht;
417         struct bucket_table *tbl;                 415         struct bucket_table *tbl;
418         int err = 0;                              416         int err = 0;
419                                                   417 
420         ht = container_of(work, struct rhashta    418         ht = container_of(work, struct rhashtable, run_work);
421         mutex_lock(&ht->mutex);                   419         mutex_lock(&ht->mutex);
422                                                   420 
423         tbl = rht_dereference(ht->tbl, ht);       421         tbl = rht_dereference(ht->tbl, ht);
424         tbl = rhashtable_last_table(ht, tbl);     422         tbl = rhashtable_last_table(ht, tbl);
425                                                   423 
426         if (rht_grow_above_75(ht, tbl))           424         if (rht_grow_above_75(ht, tbl))
427                 err = rhashtable_rehash_alloc(    425                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
428         else if (ht->p.automatic_shrinking &&     426         else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
429                 err = rhashtable_shrink(ht);      427                 err = rhashtable_shrink(ht);
430         else if (tbl->nest)                       428         else if (tbl->nest)
431                 err = rhashtable_rehash_alloc(    429                 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
432                                                   430 
433         if (!err || err == -EEXIST) {          !! 431         if (!err)
434                 int nerr;                      !! 432                 err = rhashtable_rehash_table(ht);
435                                                << 
436                 nerr = rhashtable_rehash_table << 
437                 err = err ?: nerr;             << 
438         }                                      << 
439                                                   433 
440         mutex_unlock(&ht->mutex);                 434         mutex_unlock(&ht->mutex);
441                                                   435 
442         if (err)                                  436         if (err)
443                 schedule_work(&ht->run_work);     437                 schedule_work(&ht->run_work);
444 }                                                 438 }
445                                                   439 
446 static int rhashtable_insert_rehash(struct rha    440 static int rhashtable_insert_rehash(struct rhashtable *ht,
447                                     struct buc    441                                     struct bucket_table *tbl)
448 {                                                 442 {
449         struct bucket_table *old_tbl;             443         struct bucket_table *old_tbl;
450         struct bucket_table *new_tbl;             444         struct bucket_table *new_tbl;
451         unsigned int size;                        445         unsigned int size;
452         int err;                                  446         int err;
453                                                   447 
454         old_tbl = rht_dereference_rcu(ht->tbl,    448         old_tbl = rht_dereference_rcu(ht->tbl, ht);
455                                                   449 
456         size = tbl->size;                         450         size = tbl->size;
457                                                   451 
458         err = -EBUSY;                             452         err = -EBUSY;
459                                                   453 
460         if (rht_grow_above_75(ht, tbl))           454         if (rht_grow_above_75(ht, tbl))
461                 size *= 2;                        455                 size *= 2;
462         /* Do not schedule more than one rehas    456         /* Do not schedule more than one rehash */
463         else if (old_tbl != tbl)                  457         else if (old_tbl != tbl)
464                 goto fail;                        458                 goto fail;
465                                                   459 
466         err = -ENOMEM;                            460         err = -ENOMEM;
467                                                   461 
468         new_tbl = bucket_table_alloc(ht, size, !! 462         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
469         if (new_tbl == NULL)                      463         if (new_tbl == NULL)
470                 goto fail;                        464                 goto fail;
471                                                   465 
472         err = rhashtable_rehash_attach(ht, tbl    466         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
473         if (err) {                                467         if (err) {
474                 bucket_table_free(new_tbl);       468                 bucket_table_free(new_tbl);
475                 if (err == -EEXIST)               469                 if (err == -EEXIST)
476                         err = 0;                  470                         err = 0;
477         } else                                    471         } else
478                 schedule_work(&ht->run_work);     472                 schedule_work(&ht->run_work);
479                                                   473 
480         return err;                               474         return err;
481                                                   475 
482 fail:                                             476 fail:
483         /* Do not fail the insert if someone e    477         /* Do not fail the insert if someone else did a rehash. */
484         if (likely(rcu_access_pointer(tbl->fut !! 478         if (likely(rcu_dereference_raw(tbl->future_tbl)))
485                 return 0;                         479                 return 0;
486                                                   480 
487         /* Schedule async rehash to retry allo    481         /* Schedule async rehash to retry allocation in process context. */
488         if (err == -ENOMEM)                       482         if (err == -ENOMEM)
489                 schedule_work(&ht->run_work);     483                 schedule_work(&ht->run_work);
490                                                   484 
491         return err;                               485         return err;
492 }                                                 486 }
493                                                   487 
494 static void *rhashtable_lookup_one(struct rhas    488 static void *rhashtable_lookup_one(struct rhashtable *ht,
495                                    struct rhas << 
496                                    struct buck    489                                    struct bucket_table *tbl, unsigned int hash,
497                                    const void     490                                    const void *key, struct rhash_head *obj)
498 {                                                 491 {
499         struct rhashtable_compare_arg arg = {     492         struct rhashtable_compare_arg arg = {
500                 .ht = ht,                         493                 .ht = ht,
501                 .key = key,                       494                 .key = key,
502         };                                        495         };
503         struct rhash_head __rcu **pprev = NULL !! 496         struct rhash_head __rcu **pprev;
504         struct rhash_head *head;                  497         struct rhash_head *head;
505         int elasticity;                           498         int elasticity;
506                                                   499 
507         elasticity = RHT_ELASTICITY;              500         elasticity = RHT_ELASTICITY;
508         rht_for_each_from(head, rht_ptr(bkt, t !! 501         pprev = rht_bucket_var(tbl, hash);
                                                   >> 502         rht_for_each_continue(head, *pprev, tbl, hash) {
509                 struct rhlist_head *list;         503                 struct rhlist_head *list;
510                 struct rhlist_head *plist;        504                 struct rhlist_head *plist;
511                                                   505 
512                 elasticity--;                     506                 elasticity--;
513                 if (!key ||                       507                 if (!key ||
514                     (ht->p.obj_cmpfn ?            508                     (ht->p.obj_cmpfn ?
515                      ht->p.obj_cmpfn(&arg, rht    509                      ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
516                      rhashtable_compare(&arg,     510                      rhashtable_compare(&arg, rht_obj(ht, head)))) {
517                         pprev = &head->next;      511                         pprev = &head->next;
518                         continue;                 512                         continue;
519                 }                                 513                 }
520                                                   514 
521                 if (!ht->rhlist)                  515                 if (!ht->rhlist)
522                         return rht_obj(ht, hea    516                         return rht_obj(ht, head);
523                                                   517 
524                 list = container_of(obj, struc    518                 list = container_of(obj, struct rhlist_head, rhead);
525                 plist = container_of(head, str    519                 plist = container_of(head, struct rhlist_head, rhead);
526                                                   520 
527                 RCU_INIT_POINTER(list->next, p    521                 RCU_INIT_POINTER(list->next, plist);
528                 head = rht_dereference_bucket(    522                 head = rht_dereference_bucket(head->next, tbl, hash);
529                 RCU_INIT_POINTER(list->rhead.n    523                 RCU_INIT_POINTER(list->rhead.next, head);
530                 if (pprev)                     !! 524                 rcu_assign_pointer(*pprev, obj);
531                         rcu_assign_pointer(*pp << 
532                 else                           << 
533                         /* Need to preserve th << 
534                         rht_assign_locked(bkt, << 
535                                                   525 
536                 return NULL;                      526                 return NULL;
537         }                                         527         }
538                                                   528 
539         if (elasticity <= 0)                      529         if (elasticity <= 0)
540                 return ERR_PTR(-EAGAIN);          530                 return ERR_PTR(-EAGAIN);
541                                                   531 
542         return ERR_PTR(-ENOENT);                  532         return ERR_PTR(-ENOENT);
543 }                                                 533 }
544                                                   534 
545 static struct bucket_table *rhashtable_insert_ !! 535 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
546         struct rhashtable *ht, struct rhash_lo !! 536                                                   struct bucket_table *tbl,
547         struct bucket_table *tbl, unsigned int !! 537                                                   unsigned int hash,
548         void *data)                            !! 538                                                   struct rhash_head *obj,
                                                   >> 539                                                   void *data)
549 {                                                 540 {
                                                   >> 541         struct rhash_head __rcu **pprev;
550         struct bucket_table *new_tbl;             542         struct bucket_table *new_tbl;
551         struct rhash_head *head;                  543         struct rhash_head *head;
552                                                   544 
553         if (!IS_ERR_OR_NULL(data))                545         if (!IS_ERR_OR_NULL(data))
554                 return ERR_PTR(-EEXIST);          546                 return ERR_PTR(-EEXIST);
555                                                   547 
556         if (PTR_ERR(data) != -EAGAIN && PTR_ER    548         if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
557                 return ERR_CAST(data);            549                 return ERR_CAST(data);
558                                                   550 
559         new_tbl = rht_dereference_rcu(tbl->fut !! 551         new_tbl = rcu_dereference(tbl->future_tbl);
560         if (new_tbl)                              552         if (new_tbl)
561                 return new_tbl;                   553                 return new_tbl;
562                                                   554 
563         if (PTR_ERR(data) != -ENOENT)             555         if (PTR_ERR(data) != -ENOENT)
564                 return ERR_CAST(data);            556                 return ERR_CAST(data);
565                                                   557 
566         if (unlikely(rht_grow_above_max(ht, tb    558         if (unlikely(rht_grow_above_max(ht, tbl)))
567                 return ERR_PTR(-E2BIG);           559                 return ERR_PTR(-E2BIG);
568                                                   560 
569         if (unlikely(rht_grow_above_100(ht, tb    561         if (unlikely(rht_grow_above_100(ht, tbl)))
570                 return ERR_PTR(-EAGAIN);          562                 return ERR_PTR(-EAGAIN);
571                                                   563 
572         head = rht_ptr(bkt, tbl, hash);        !! 564         pprev = rht_bucket_insert(ht, tbl, hash);
                                                   >> 565         if (!pprev)
                                                   >> 566                 return ERR_PTR(-ENOMEM);
                                                   >> 567 
                                                   >> 568         head = rht_dereference_bucket(*pprev, tbl, hash);
573                                                   569 
574         RCU_INIT_POINTER(obj->next, head);        570         RCU_INIT_POINTER(obj->next, head);
575         if (ht->rhlist) {                         571         if (ht->rhlist) {
576                 struct rhlist_head *list;         572                 struct rhlist_head *list;
577                                                   573 
578                 list = container_of(obj, struc    574                 list = container_of(obj, struct rhlist_head, rhead);
579                 RCU_INIT_POINTER(list->next, N    575                 RCU_INIT_POINTER(list->next, NULL);
580         }                                         576         }
581                                                   577 
582         /* bkt is always the head of the list, !! 578         rcu_assign_pointer(*pprev, obj);
583          * the lock, which we need to preserve << 
584          */                                    << 
585         rht_assign_locked(bkt, obj);           << 
586                                                   579 
587         atomic_inc(&ht->nelems);                  580         atomic_inc(&ht->nelems);
588         if (rht_grow_above_75(ht, tbl))           581         if (rht_grow_above_75(ht, tbl))
589                 schedule_work(&ht->run_work);     582                 schedule_work(&ht->run_work);
590                                                   583 
591         return NULL;                              584         return NULL;
592 }                                                 585 }
593                                                   586 
594 static void *rhashtable_try_insert(struct rhas    587 static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
595                                    struct rhas    588                                    struct rhash_head *obj)
596 {                                                 589 {
597         struct bucket_table *new_tbl;             590         struct bucket_table *new_tbl;
598         struct bucket_table *tbl;                 591         struct bucket_table *tbl;
599         struct rhash_lock_head __rcu **bkt;    << 
600         unsigned long flags;                   << 
601         unsigned int hash;                        592         unsigned int hash;
                                                   >> 593         spinlock_t *lock;
602         void *data;                               594         void *data;
603                                                   595 
604         new_tbl = rcu_dereference(ht->tbl);    !! 596         tbl = rcu_dereference(ht->tbl);
605                                                   597 
606         do {                                   !! 598         /* All insertions must grab the oldest table containing
                                                   >> 599          * the hashed bucket that is yet to be rehashed.
                                                   >> 600          */
                                                   >> 601         for (;;) {
                                                   >> 602                 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
                                                   >> 603                 lock = rht_bucket_lock(tbl, hash);
                                                   >> 604                 spin_lock_bh(lock);
                                                   >> 605 
                                                   >> 606                 if (tbl->rehash <= hash)
                                                   >> 607                         break;
                                                   >> 608 
                                                   >> 609                 spin_unlock_bh(lock);
                                                   >> 610                 tbl = rcu_dereference(tbl->future_tbl);
                                                   >> 611         }
                                                   >> 612 
                                                   >> 613         data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
                                                   >> 614         new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
                                                   >> 615         if (PTR_ERR(new_tbl) != -EEXIST)
                                                   >> 616                 data = ERR_CAST(new_tbl);
                                                   >> 617 
                                                   >> 618         while (!IS_ERR_OR_NULL(new_tbl)) {
607                 tbl = new_tbl;                    619                 tbl = new_tbl;
608                 hash = rht_head_hashfn(ht, tbl    620                 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
609                 if (rcu_access_pointer(tbl->fu !! 621                 spin_lock_nested(rht_bucket_lock(tbl, hash),
610                         /* Failure is OK */    !! 622                                  SINGLE_DEPTH_NESTING);
611                         bkt = rht_bucket_var(t << 
612                 else                           << 
613                         bkt = rht_bucket_inser << 
614                 if (bkt == NULL) {             << 
615                         new_tbl = rht_derefere << 
616                         data = ERR_PTR(-EAGAIN << 
617                 } else {                       << 
618                         flags = rht_lock(tbl,  << 
619                         data = rhashtable_look << 
620                                                << 
621                         new_tbl = rhashtable_i << 
622                                                << 
623                         if (PTR_ERR(new_tbl) ! << 
624                                 data = ERR_CAS << 
625                                                   623 
626                         rht_unlock(tbl, bkt, f !! 624                 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
627                 }                              !! 625                 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
628         } while (!IS_ERR_OR_NULL(new_tbl));    !! 626                 if (PTR_ERR(new_tbl) != -EEXIST)
                                                   >> 627                         data = ERR_CAST(new_tbl);
                                                   >> 628 
                                                   >> 629                 spin_unlock(rht_bucket_lock(tbl, hash));
                                                   >> 630         }
                                                   >> 631 
                                                   >> 632         spin_unlock_bh(lock);
629                                                   633 
630         if (PTR_ERR(data) == -EAGAIN)             634         if (PTR_ERR(data) == -EAGAIN)
631                 data = ERR_PTR(rhashtable_inse    635                 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
632                                -EAGAIN);          636                                -EAGAIN);
633                                                   637 
634         return data;                              638         return data;
635 }                                                 639 }
636                                                   640 
637 void *rhashtable_insert_slow(struct rhashtable    641 void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
638                              struct rhash_head    642                              struct rhash_head *obj)
639 {                                                 643 {
640         void *data;                               644         void *data;
641                                                   645 
642         do {                                      646         do {
643                 rcu_read_lock();                  647                 rcu_read_lock();
644                 data = rhashtable_try_insert(h    648                 data = rhashtable_try_insert(ht, key, obj);
645                 rcu_read_unlock();                649                 rcu_read_unlock();
646         } while (PTR_ERR(data) == -EAGAIN);       650         } while (PTR_ERR(data) == -EAGAIN);
647                                                   651 
648         return data;                              652         return data;
649 }                                                 653 }
650 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);        654 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
651                                                   655 
652 /**                                               656 /**
653  * rhashtable_walk_enter - Initialise an itera    657  * rhashtable_walk_enter - Initialise an iterator
654  * @ht:         Table to walk over                658  * @ht:         Table to walk over
655  * @iter:       Hash table Iterator               659  * @iter:       Hash table Iterator
656  *                                                660  *
657  * This function prepares a hash table walk.      661  * This function prepares a hash table walk.
658  *                                                662  *
659  * Note that if you restart a walk after rhash    663  * Note that if you restart a walk after rhashtable_walk_stop you
660  * may see the same object twice.  Also, you m    664  * may see the same object twice.  Also, you may miss objects if
661  * there are removals in between rhashtable_wa    665  * there are removals in between rhashtable_walk_stop and the next
662  * call to rhashtable_walk_start.                 666  * call to rhashtable_walk_start.
663  *                                                667  *
664  * For a completely stable walk you should con    668  * For a completely stable walk you should construct your own data
665  * structure outside the hash table.              669  * structure outside the hash table.
666  *                                                670  *
667  * This function may be called from any proces !! 671  * This function may sleep so you must not call it from interrupt
668  * non-preemptable context, but cannot be call !! 672  * context or with spin locks held.
669  * hardirq context.                            << 
670  *                                                673  *
671  * You must call rhashtable_walk_exit after th    674  * You must call rhashtable_walk_exit after this function returns.
672  */                                               675  */
673 void rhashtable_walk_enter(struct rhashtable *    676 void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
674 {                                                 677 {
675         iter->ht = ht;                            678         iter->ht = ht;
676         iter->p = NULL;                           679         iter->p = NULL;
677         iter->slot = 0;                           680         iter->slot = 0;
678         iter->skip = 0;                           681         iter->skip = 0;
679         iter->end_of_table = 0;                   682         iter->end_of_table = 0;
680                                                   683 
681         spin_lock(&ht->lock);                     684         spin_lock(&ht->lock);
682         iter->walker.tbl =                        685         iter->walker.tbl =
683                 rcu_dereference_protected(ht->    686                 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
684         list_add(&iter->walker.list, &iter->wa    687         list_add(&iter->walker.list, &iter->walker.tbl->walkers);
685         spin_unlock(&ht->lock);                   688         spin_unlock(&ht->lock);
686 }                                                 689 }
687 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);         690 EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
688                                                   691 
689 /**                                               692 /**
690  * rhashtable_walk_exit - Free an iterator        693  * rhashtable_walk_exit - Free an iterator
691  * @iter:       Hash table Iterator               694  * @iter:       Hash table Iterator
692  *                                                695  *
693  * This function frees resources allocated by  !! 696  * This function frees resources allocated by rhashtable_walk_init.
694  */                                               697  */
695 void rhashtable_walk_exit(struct rhashtable_it    698 void rhashtable_walk_exit(struct rhashtable_iter *iter)
696 {                                                 699 {
697         spin_lock(&iter->ht->lock);               700         spin_lock(&iter->ht->lock);
698         if (iter->walker.tbl)                     701         if (iter->walker.tbl)
699                 list_del(&iter->walker.list);     702                 list_del(&iter->walker.list);
700         spin_unlock(&iter->ht->lock);             703         spin_unlock(&iter->ht->lock);
701 }                                                 704 }
702 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);          705 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
703                                                   706 
704 /**                                               707 /**
705  * rhashtable_walk_start_check - Start a hash     708  * rhashtable_walk_start_check - Start a hash table walk
706  * @iter:       Hash table iterator               709  * @iter:       Hash table iterator
707  *                                                710  *
708  * Start a hash table walk at the current iter    711  * Start a hash table walk at the current iterator position.  Note that we take
709  * the RCU lock in all cases including when we    712  * the RCU lock in all cases including when we return an error.  So you must
710  * always call rhashtable_walk_stop to clean u    713  * always call rhashtable_walk_stop to clean up.
711  *                                                714  *
712  * Returns zero if successful.                    715  * Returns zero if successful.
713  *                                                716  *
714  * Returns -EAGAIN if resize event occurred.   !! 717  * Returns -EAGAIN if resize event occured.  Note that the iterator
715  * will rewind back to the beginning and you m    718  * will rewind back to the beginning and you may use it immediately
716  * by calling rhashtable_walk_next.               719  * by calling rhashtable_walk_next.
717  *                                                720  *
718  * rhashtable_walk_start is defined as an inli    721  * rhashtable_walk_start is defined as an inline variant that returns
719  * void. This is preferred in cases where the     722  * void. This is preferred in cases where the caller would ignore
720  * resize events and always continue.             723  * resize events and always continue.
721  */                                               724  */
722 int rhashtable_walk_start_check(struct rhashta    725 int rhashtable_walk_start_check(struct rhashtable_iter *iter)
723         __acquires(RCU)                           726         __acquires(RCU)
724 {                                                 727 {
725         struct rhashtable *ht = iter->ht;         728         struct rhashtable *ht = iter->ht;
726         bool rhlist = ht->rhlist;              << 
727                                                   729 
728         rcu_read_lock();                          730         rcu_read_lock();
729                                                   731 
730         spin_lock(&ht->lock);                     732         spin_lock(&ht->lock);
731         if (iter->walker.tbl)                     733         if (iter->walker.tbl)
732                 list_del(&iter->walker.list);     734                 list_del(&iter->walker.list);
733         spin_unlock(&ht->lock);                   735         spin_unlock(&ht->lock);
734                                                   736 
735         if (iter->end_of_table)                !! 737         if (!iter->walker.tbl && !iter->end_of_table) {
736                 return 0;                      << 
737         if (!iter->walker.tbl) {               << 
738                 iter->walker.tbl = rht_derefer    738                 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
739                 iter->slot = 0;                << 
740                 iter->skip = 0;                << 
741                 return -EAGAIN;                   739                 return -EAGAIN;
742         }                                         740         }
743                                                   741 
744         if (iter->p && !rhlist) {              << 
745                 /*                             << 
746                  * We need to validate that 'p << 
747                  * if so, update 'skip'        << 
748                  */                            << 
749                 struct rhash_head *p;          << 
750                 int skip = 0;                  << 
751                 rht_for_each_rcu(p, iter->walk << 
752                         skip++;                << 
753                         if (p == iter->p) {    << 
754                                 iter->skip = s << 
755                                 goto found;    << 
756                         }                      << 
757                 }                              << 
758                 iter->p = NULL;                << 
759         } else if (iter->p && rhlist) {        << 
760                 /* Need to validate that 'list << 
761                  * if so, update 'skip' and 'p << 
762                  */                            << 
763                 struct rhash_head *p;          << 
764                 struct rhlist_head *list;      << 
765                 int skip = 0;                  << 
766                 rht_for_each_rcu(p, iter->walk << 
767                         for (list = container_ << 
768                              list;             << 
769                              list = rcu_derefe << 
770                                 skip++;        << 
771                                 if (list == it << 
772                                         iter-> << 
773                                         iter-> << 
774                                         goto f << 
775                                 }              << 
776                         }                      << 
777                 }                              << 
778                 iter->p = NULL;                << 
779         }                                      << 
780 found:                                         << 
781         return 0;                                 742         return 0;
782 }                                                 743 }
783 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check)    744 EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
784                                                   745 
785 /**                                               746 /**
786  * __rhashtable_walk_find_next - Find the next    747  * __rhashtable_walk_find_next - Find the next element in a table (or the first
787  * one in case of a new walk).                    748  * one in case of a new walk).
788  *                                                749  *
789  * @iter:       Hash table iterator               750  * @iter:       Hash table iterator
790  *                                                751  *
791  * Returns the found object or NULL when the e    752  * Returns the found object or NULL when the end of the table is reached.
792  *                                                753  *
793  * Returns -EAGAIN if resize event occurred.      754  * Returns -EAGAIN if resize event occurred.
794  */                                               755  */
795 static void *__rhashtable_walk_find_next(struc    756 static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
796 {                                                 757 {
797         struct bucket_table *tbl = iter->walke    758         struct bucket_table *tbl = iter->walker.tbl;
798         struct rhlist_head *list = iter->list;    759         struct rhlist_head *list = iter->list;
799         struct rhashtable *ht = iter->ht;         760         struct rhashtable *ht = iter->ht;
800         struct rhash_head *p = iter->p;           761         struct rhash_head *p = iter->p;
801         bool rhlist = ht->rhlist;                 762         bool rhlist = ht->rhlist;
802                                                   763 
803         if (!tbl)                                 764         if (!tbl)
804                 return NULL;                      765                 return NULL;
805                                                   766 
806         for (; iter->slot < tbl->size; iter->s    767         for (; iter->slot < tbl->size; iter->slot++) {
807                 int skip = iter->skip;            768                 int skip = iter->skip;
808                                                   769 
809                 rht_for_each_rcu(p, tbl, iter-    770                 rht_for_each_rcu(p, tbl, iter->slot) {
810                         if (rhlist) {             771                         if (rhlist) {
811                                 list = contain    772                                 list = container_of(p, struct rhlist_head,
812                                                   773                                                     rhead);
813                                 do {              774                                 do {
814                                         if (!s    775                                         if (!skip)
815                                                   776                                                 goto next;
816                                         skip--    777                                         skip--;
817                                         list =    778                                         list = rcu_dereference(list->next);
818                                 } while (list)    779                                 } while (list);
819                                                   780 
820                                 continue;         781                                 continue;
821                         }                         782                         }
822                         if (!skip)                783                         if (!skip)
823                                 break;            784                                 break;
824                         skip--;                   785                         skip--;
825                 }                                 786                 }
826                                                   787 
827 next:                                             788 next:
828                 if (!rht_is_a_nulls(p)) {         789                 if (!rht_is_a_nulls(p)) {
829                         iter->skip++;             790                         iter->skip++;
830                         iter->p = p;              791                         iter->p = p;
831                         iter->list = list;        792                         iter->list = list;
832                         return rht_obj(ht, rhl    793                         return rht_obj(ht, rhlist ? &list->rhead : p);
833                 }                                 794                 }
834                                                   795 
835                 iter->skip = 0;                   796                 iter->skip = 0;
836         }                                         797         }
837                                                   798 
838         iter->p = NULL;                           799         iter->p = NULL;
839                                                   800 
840         /* Ensure we see any new tables. */       801         /* Ensure we see any new tables. */
841         smp_rmb();                                802         smp_rmb();
842                                                   803 
843         iter->walker.tbl = rht_dereference_rcu    804         iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
844         if (iter->walker.tbl) {                   805         if (iter->walker.tbl) {
845                 iter->slot = 0;                   806                 iter->slot = 0;
846                 iter->skip = 0;                   807                 iter->skip = 0;
847                 return ERR_PTR(-EAGAIN);          808                 return ERR_PTR(-EAGAIN);
848         } else {                                  809         } else {
849                 iter->end_of_table = true;        810                 iter->end_of_table = true;
850         }                                         811         }
851                                                   812 
852         return NULL;                              813         return NULL;
853 }                                                 814 }
854                                                   815 
855 /**                                               816 /**
856  * rhashtable_walk_next - Return the next obje    817  * rhashtable_walk_next - Return the next object and advance the iterator
857  * @iter:       Hash table iterator               818  * @iter:       Hash table iterator
858  *                                                819  *
859  * Note that you must call rhashtable_walk_sto    820  * Note that you must call rhashtable_walk_stop when you are finished
860  * with the walk.                                 821  * with the walk.
861  *                                                822  *
862  * Returns the next object or NULL when the en    823  * Returns the next object or NULL when the end of the table is reached.
863  *                                                824  *
864  * Returns -EAGAIN if resize event occurred.      825  * Returns -EAGAIN if resize event occurred.  Note that the iterator
865  * will rewind back to the beginning and you m    826  * will rewind back to the beginning and you may continue to use it.
866  */                                               827  */
867 void *rhashtable_walk_next(struct rhashtable_i    828 void *rhashtable_walk_next(struct rhashtable_iter *iter)
868 {                                                 829 {
869         struct rhlist_head *list = iter->list;    830         struct rhlist_head *list = iter->list;
870         struct rhashtable *ht = iter->ht;         831         struct rhashtable *ht = iter->ht;
871         struct rhash_head *p = iter->p;           832         struct rhash_head *p = iter->p;
872         bool rhlist = ht->rhlist;                 833         bool rhlist = ht->rhlist;
873                                                   834 
874         if (p) {                                  835         if (p) {
875                 if (!rhlist || !(list = rcu_de    836                 if (!rhlist || !(list = rcu_dereference(list->next))) {
876                         p = rcu_dereference(p-    837                         p = rcu_dereference(p->next);
877                         list = container_of(p,    838                         list = container_of(p, struct rhlist_head, rhead);
878                 }                                 839                 }
879                 if (!rht_is_a_nulls(p)) {         840                 if (!rht_is_a_nulls(p)) {
880                         iter->skip++;             841                         iter->skip++;
881                         iter->p = p;              842                         iter->p = p;
882                         iter->list = list;        843                         iter->list = list;
883                         return rht_obj(ht, rhl    844                         return rht_obj(ht, rhlist ? &list->rhead : p);
884                 }                                 845                 }
885                                                   846 
886                 /* At the end of this slot, sw    847                 /* At the end of this slot, switch to next one and then find
887                  * next entry from that point.    848                  * next entry from that point.
888                  */                               849                  */
889                 iter->skip = 0;                   850                 iter->skip = 0;
890                 iter->slot++;                     851                 iter->slot++;
891         }                                         852         }
892                                                   853 
893         return __rhashtable_walk_find_next(ite    854         return __rhashtable_walk_find_next(iter);
894 }                                                 855 }
895 EXPORT_SYMBOL_GPL(rhashtable_walk_next);          856 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
896                                                   857 
897 /**                                               858 /**
898  * rhashtable_walk_peek - Return the next obje    859  * rhashtable_walk_peek - Return the next object but don't advance the iterator
899  * @iter:       Hash table iterator               860  * @iter:       Hash table iterator
900  *                                                861  *
901  * Returns the next object or NULL when the en    862  * Returns the next object or NULL when the end of the table is reached.
902  *                                                863  *
903  * Returns -EAGAIN if resize event occurred.      864  * Returns -EAGAIN if resize event occurred.  Note that the iterator
904  * will rewind back to the beginning and you m    865  * will rewind back to the beginning and you may continue to use it.
905  */                                               866  */
906 void *rhashtable_walk_peek(struct rhashtable_i    867 void *rhashtable_walk_peek(struct rhashtable_iter *iter)
907 {                                                 868 {
908         struct rhlist_head *list = iter->list;    869         struct rhlist_head *list = iter->list;
909         struct rhashtable *ht = iter->ht;         870         struct rhashtable *ht = iter->ht;
910         struct rhash_head *p = iter->p;           871         struct rhash_head *p = iter->p;
911                                                   872 
912         if (p)                                    873         if (p)
913                 return rht_obj(ht, ht->rhlist     874                 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
914                                                   875 
915         /* No object found in current iter, fi    876         /* No object found in current iter, find next one in the table. */
916                                                   877 
917         if (iter->skip) {                         878         if (iter->skip) {
918                 /* A nonzero skip value points    879                 /* A nonzero skip value points to the next entry in the table
919                  * beyond that last one that w    880                  * beyond that last one that was found. Decrement skip so
920                  * we find the current value.     881                  * we find the current value. __rhashtable_walk_find_next
921                  * will restore the original v    882                  * will restore the original value of skip assuming that
922                  * the table hasn't changed.      883                  * the table hasn't changed.
923                  */                               884                  */
924                 iter->skip--;                     885                 iter->skip--;
925         }                                         886         }
926                                                   887 
927         return __rhashtable_walk_find_next(ite    888         return __rhashtable_walk_find_next(iter);
928 }                                                 889 }
929 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);          890 EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
930                                                   891 
931 /**                                               892 /**
932  * rhashtable_walk_stop - Finish a hash table     893  * rhashtable_walk_stop - Finish a hash table walk
933  * @iter:       Hash table iterator               894  * @iter:       Hash table iterator
934  *                                                895  *
935  * Finish a hash table walk.  Does not reset t    896  * Finish a hash table walk.  Does not reset the iterator to the start of the
936  * hash table.                                    897  * hash table.
937  */                                               898  */
938 void rhashtable_walk_stop(struct rhashtable_it    899 void rhashtable_walk_stop(struct rhashtable_iter *iter)
939         __releases(RCU)                           900         __releases(RCU)
940 {                                                 901 {
941         struct rhashtable *ht;                    902         struct rhashtable *ht;
942         struct bucket_table *tbl = iter->walke    903         struct bucket_table *tbl = iter->walker.tbl;
943                                                   904 
944         if (!tbl)                                 905         if (!tbl)
945                 goto out;                         906                 goto out;
946                                                   907 
947         ht = iter->ht;                            908         ht = iter->ht;
948                                                   909 
949         spin_lock(&ht->lock);                     910         spin_lock(&ht->lock);
950         if (rcu_head_after_call_rcu(&tbl->rcu, !! 911         if (tbl->rehash < tbl->size)
951                 /* This bucket table is being  << 
952                 iter->walker.tbl = NULL;       << 
953         else                                   << 
954                 list_add(&iter->walker.list, &    912                 list_add(&iter->walker.list, &tbl->walkers);
                                                   >> 913         else
                                                   >> 914                 iter->walker.tbl = NULL;
955         spin_unlock(&ht->lock);                   915         spin_unlock(&ht->lock);
956                                                   916 
                                                   >> 917         iter->p = NULL;
                                                   >> 918 
957 out:                                              919 out:
958         rcu_read_unlock();                        920         rcu_read_unlock();
959 }                                                 921 }
960 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);          922 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
961                                                   923 
962 static size_t rounded_hashtable_size(const str    924 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
963 {                                                 925 {
964         size_t retsize;                           926         size_t retsize;
965                                                   927 
966         if (params->nelem_hint)                   928         if (params->nelem_hint)
967                 retsize = max(roundup_pow_of_t    929                 retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
968                               (unsigned long)p    930                               (unsigned long)params->min_size);
969         else                                      931         else
970                 retsize = max(HASH_DEFAULT_SIZ    932                 retsize = max(HASH_DEFAULT_SIZE,
971                               (unsigned long)p    933                               (unsigned long)params->min_size);
972                                                   934 
973         return retsize;                           935         return retsize;
974 }                                                 936 }
975                                                   937 
976 static u32 rhashtable_jhash2(const void *key,     938 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
977 {                                                 939 {
978         return jhash2(key, length, seed);         940         return jhash2(key, length, seed);
979 }                                                 941 }
980                                                   942 
981 /**                                               943 /**
982  * rhashtable_init - initialize a new hash tab    944  * rhashtable_init - initialize a new hash table
983  * @ht:         hash table to be initialized      945  * @ht:         hash table to be initialized
984  * @params:     configuration parameters          946  * @params:     configuration parameters
985  *                                                947  *
986  * Initializes a new hash table based on the p    948  * Initializes a new hash table based on the provided configuration
987  * parameters. A table can be configured eithe    949  * parameters. A table can be configured either with a variable or
988  * fixed length key:                              950  * fixed length key:
989  *                                                951  *
990  * Configuration Example 1: Fixed length keys     952  * Configuration Example 1: Fixed length keys
991  * struct test_obj {                              953  * struct test_obj {
992  *      int                     key;              954  *      int                     key;
993  *      void *                  my_member;        955  *      void *                  my_member;
994  *      struct rhash_head       node;             956  *      struct rhash_head       node;
995  * };                                             957  * };
996  *                                                958  *
997  * struct rhashtable_params params = {            959  * struct rhashtable_params params = {
998  *      .head_offset = offsetof(struct test_ob    960  *      .head_offset = offsetof(struct test_obj, node),
999  *      .key_offset = offsetof(struct test_obj    961  *      .key_offset = offsetof(struct test_obj, key),
1000  *      .key_len = sizeof(int),                  962  *      .key_len = sizeof(int),
1001  *      .hashfn = jhash,                         963  *      .hashfn = jhash,
                                                   >> 964  *      .nulls_base = (1U << RHT_BASE_SHIFT),
1002  * };                                            965  * };
1003  *                                               966  *
1004  * Configuration Example 2: Variable length k    967  * Configuration Example 2: Variable length keys
1005  * struct test_obj {                             968  * struct test_obj {
1006  *      [...]                                    969  *      [...]
1007  *      struct rhash_head       node;            970  *      struct rhash_head       node;
1008  * };                                            971  * };
1009  *                                               972  *
1010  * u32 my_hash_fn(const void *data, u32 len,     973  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
1011  * {                                             974  * {
1012  *      struct test_obj *obj = data;             975  *      struct test_obj *obj = data;
1013  *                                               976  *
1014  *      return [... hash ...];                   977  *      return [... hash ...];
1015  * }                                             978  * }
1016  *                                               979  *
1017  * struct rhashtable_params params = {           980  * struct rhashtable_params params = {
1018  *      .head_offset = offsetof(struct test_o    981  *      .head_offset = offsetof(struct test_obj, node),
1019  *      .hashfn = jhash,                         982  *      .hashfn = jhash,
1020  *      .obj_hashfn = my_hash_fn,                983  *      .obj_hashfn = my_hash_fn,
1021  * };                                            984  * };
1022  */                                              985  */
1023 int rhashtable_init_noprof(struct rhashtable  !! 986 int rhashtable_init(struct rhashtable *ht,
1024                     const struct rhashtable_p    987                     const struct rhashtable_params *params)
1025 {                                                988 {
1026         struct bucket_table *tbl;                989         struct bucket_table *tbl;
1027         size_t size;                             990         size_t size;
1028                                                  991 
1029         if ((!params->key_len && !params->obj    992         if ((!params->key_len && !params->obj_hashfn) ||
1030             (params->obj_hashfn && !params->o    993             (params->obj_hashfn && !params->obj_cmpfn))
1031                 return -EINVAL;                  994                 return -EINVAL;
1032                                                  995 
                                                   >> 996         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
                                                   >> 997                 return -EINVAL;
                                                   >> 998 
1033         memset(ht, 0, sizeof(*ht));              999         memset(ht, 0, sizeof(*ht));
1034         mutex_init(&ht->mutex);                  1000         mutex_init(&ht->mutex);
1035         spin_lock_init(&ht->lock);               1001         spin_lock_init(&ht->lock);
1036         memcpy(&ht->p, params, sizeof(*params    1002         memcpy(&ht->p, params, sizeof(*params));
1037                                                  1003 
1038         alloc_tag_record(ht->alloc_tag);      << 
1039                                               << 
1040         if (params->min_size)                    1004         if (params->min_size)
1041                 ht->p.min_size = roundup_pow_    1005                 ht->p.min_size = roundup_pow_of_two(params->min_size);
1042                                                  1006 
1043         /* Cap total entries at 2^31 to avoid    1007         /* Cap total entries at 2^31 to avoid nelems overflow. */
1044         ht->max_elems = 1u << 31;                1008         ht->max_elems = 1u << 31;
1045                                                  1009 
1046         if (params->max_size) {                  1010         if (params->max_size) {
1047                 ht->p.max_size = rounddown_po    1011                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
1048                 if (ht->p.max_size < ht->max_    1012                 if (ht->p.max_size < ht->max_elems / 2)
1049                         ht->max_elems = ht->p    1013                         ht->max_elems = ht->p.max_size * 2;
1050         }                                        1014         }
1051                                                  1015 
1052         ht->p.min_size = max_t(u16, ht->p.min    1016         ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
1053                                                  1017 
1054         size = rounded_hashtable_size(&ht->p)    1018         size = rounded_hashtable_size(&ht->p);
1055                                                  1019 
                                                   >> 1020         if (params->locks_mul)
                                                   >> 1021                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
                                                   >> 1022         else
                                                   >> 1023                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
                                                   >> 1024 
1056         ht->key_len = ht->p.key_len;             1025         ht->key_len = ht->p.key_len;
1057         if (!params->hashfn) {                   1026         if (!params->hashfn) {
1058                 ht->p.hashfn = jhash;            1027                 ht->p.hashfn = jhash;
1059                                                  1028 
1060                 if (!(ht->key_len & (sizeof(u    1029                 if (!(ht->key_len & (sizeof(u32) - 1))) {
1061                         ht->key_len /= sizeof    1030                         ht->key_len /= sizeof(u32);
1062                         ht->p.hashfn = rhasht    1031                         ht->p.hashfn = rhashtable_jhash2;
1063                 }                                1032                 }
1064         }                                        1033         }
1065                                                  1034 
1066         /*                                    << 
1067          * This is api initialization and thu << 
1068          * initial rhashtable allocation. Upo << 
1069          * smallest possible size with __GFP_ << 
1070          */                                   << 
1071         tbl = bucket_table_alloc(ht, size, GF    1035         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
1072         if (unlikely(tbl == NULL)) {          !! 1036         if (tbl == NULL)
1073                 size = max_t(u16, ht->p.min_s !! 1037                 return -ENOMEM;
1074                 tbl = bucket_table_alloc(ht,  << 
1075         }                                     << 
1076                                                  1038 
1077         atomic_set(&ht->nelems, 0);              1039         atomic_set(&ht->nelems, 0);
1078                                                  1040 
1079         RCU_INIT_POINTER(ht->tbl, tbl);          1041         RCU_INIT_POINTER(ht->tbl, tbl);
1080                                                  1042 
1081         INIT_WORK(&ht->run_work, rht_deferred    1043         INIT_WORK(&ht->run_work, rht_deferred_worker);
1082                                                  1044 
1083         return 0;                                1045         return 0;
1084 }                                                1046 }
1085 EXPORT_SYMBOL_GPL(rhashtable_init_noprof);    !! 1047 EXPORT_SYMBOL_GPL(rhashtable_init);
1086                                                  1048 
1087 /**                                              1049 /**
1088  * rhltable_init - initialize a new hash list    1050  * rhltable_init - initialize a new hash list table
1089  * @hlt:        hash list table to be initial    1051  * @hlt:        hash list table to be initialized
1090  * @params:     configuration parameters         1052  * @params:     configuration parameters
1091  *                                               1053  *
1092  * Initializes a new hash list table.            1054  * Initializes a new hash list table.
1093  *                                               1055  *
1094  * See documentation for rhashtable_init.        1056  * See documentation for rhashtable_init.
1095  */                                              1057  */
1096 int rhltable_init_noprof(struct rhltable *hlt !! 1058 int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
1097 {                                                1059 {
1098         int err;                                 1060         int err;
1099                                                  1061 
1100         err = rhashtable_init_noprof(&hlt->ht !! 1062         /* No rhlist NULLs marking for now. */
                                                   >> 1063         if (params->nulls_base)
                                                   >> 1064                 return -EINVAL;
                                                   >> 1065 
                                                   >> 1066         err = rhashtable_init(&hlt->ht, params);
1101         hlt->ht.rhlist = true;                   1067         hlt->ht.rhlist = true;
1102         return err;                              1068         return err;
1103 }                                                1069 }
1104 EXPORT_SYMBOL_GPL(rhltable_init_noprof);      !! 1070 EXPORT_SYMBOL_GPL(rhltable_init);
1105                                                  1071 
1106 static void rhashtable_free_one(struct rhasht    1072 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
1107                                 void (*free_f    1073                                 void (*free_fn)(void *ptr, void *arg),
1108                                 void *arg)       1074                                 void *arg)
1109 {                                                1075 {
1110         struct rhlist_head *list;                1076         struct rhlist_head *list;
1111                                                  1077 
1112         if (!ht->rhlist) {                       1078         if (!ht->rhlist) {
1113                 free_fn(rht_obj(ht, obj), arg    1079                 free_fn(rht_obj(ht, obj), arg);
1114                 return;                          1080                 return;
1115         }                                        1081         }
1116                                                  1082 
1117         list = container_of(obj, struct rhlis    1083         list = container_of(obj, struct rhlist_head, rhead);
1118         do {                                     1084         do {
1119                 obj = &list->rhead;              1085                 obj = &list->rhead;
1120                 list = rht_dereference(list->    1086                 list = rht_dereference(list->next, ht);
1121                 free_fn(rht_obj(ht, obj), arg    1087                 free_fn(rht_obj(ht, obj), arg);
1122         } while (list);                          1088         } while (list);
1123 }                                                1089 }
1124                                                  1090 
1125 /**                                              1091 /**
1126  * rhashtable_free_and_destroy - free element    1092  * rhashtable_free_and_destroy - free elements and destroy hash table
1127  * @ht:         the hash table to destroy        1093  * @ht:         the hash table to destroy
1128  * @free_fn:    callback to release resources    1094  * @free_fn:    callback to release resources of element
1129  * @arg:        pointer passed to free_fn        1095  * @arg:        pointer passed to free_fn
1130  *                                               1096  *
1131  * Stops an eventual async resize. If defined    1097  * Stops an eventual async resize. If defined, invokes free_fn for each
1132  * element to releasal resources. Please note    1098  * element to releasal resources. Please note that RCU protected
1133  * readers may still be accessing the element    1099  * readers may still be accessing the elements. Releasing of resources
1134  * must occur in a compatible manner. Then fr    1100  * must occur in a compatible manner. Then frees the bucket array.
1135  *                                               1101  *
1136  * This function will eventually sleep to wai    1102  * This function will eventually sleep to wait for an async resize
1137  * to complete. The caller is responsible tha    1103  * to complete. The caller is responsible that no further write operations
1138  * occurs in parallel.                           1104  * occurs in parallel.
1139  */                                              1105  */
1140 void rhashtable_free_and_destroy(struct rhash    1106 void rhashtable_free_and_destroy(struct rhashtable *ht,
1141                                  void (*free_    1107                                  void (*free_fn)(void *ptr, void *arg),
1142                                  void *arg)      1108                                  void *arg)
1143 {                                                1109 {
1144         struct bucket_table *tbl, *next_tbl;     1110         struct bucket_table *tbl, *next_tbl;
1145         unsigned int i;                          1111         unsigned int i;
1146                                                  1112 
1147         cancel_work_sync(&ht->run_work);         1113         cancel_work_sync(&ht->run_work);
1148                                                  1114 
1149         mutex_lock(&ht->mutex);                  1115         mutex_lock(&ht->mutex);
1150         tbl = rht_dereference(ht->tbl, ht);      1116         tbl = rht_dereference(ht->tbl, ht);
1151 restart:                                         1117 restart:
1152         if (free_fn) {                           1118         if (free_fn) {
1153                 for (i = 0; i < tbl->size; i+    1119                 for (i = 0; i < tbl->size; i++) {
1154                         struct rhash_head *po    1120                         struct rhash_head *pos, *next;
1155                                                  1121 
1156                         cond_resched();          1122                         cond_resched();
1157                         for (pos = rht_ptr_ex !! 1123                         for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1158                              next = !rht_is_a    1124                              next = !rht_is_a_nulls(pos) ?
1159                                         rht_d    1125                                         rht_dereference(pos->next, ht) : NULL;
1160                              !rht_is_a_nulls(    1126                              !rht_is_a_nulls(pos);
1161                              pos = next,         1127                              pos = next,
1162                              next = !rht_is_a    1128                              next = !rht_is_a_nulls(pos) ?
1163                                         rht_d    1129                                         rht_dereference(pos->next, ht) : NULL)
1164                                 rhashtable_fr    1130                                 rhashtable_free_one(ht, pos, free_fn, arg);
1165                 }                                1131                 }
1166         }                                        1132         }
1167                                                  1133 
1168         next_tbl = rht_dereference(tbl->futur    1134         next_tbl = rht_dereference(tbl->future_tbl, ht);
1169         bucket_table_free(tbl);                  1135         bucket_table_free(tbl);
1170         if (next_tbl) {                          1136         if (next_tbl) {
1171                 tbl = next_tbl;                  1137                 tbl = next_tbl;
1172                 goto restart;                    1138                 goto restart;
1173         }                                        1139         }
1174         mutex_unlock(&ht->mutex);                1140         mutex_unlock(&ht->mutex);
1175 }                                                1141 }
1176 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy    1142 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
1177                                                  1143 
1178 void rhashtable_destroy(struct rhashtable *ht    1144 void rhashtable_destroy(struct rhashtable *ht)
1179 {                                                1145 {
1180         return rhashtable_free_and_destroy(ht    1146         return rhashtable_free_and_destroy(ht, NULL, NULL);
1181 }                                                1147 }
1182 EXPORT_SYMBOL_GPL(rhashtable_destroy);           1148 EXPORT_SYMBOL_GPL(rhashtable_destroy);
1183                                                  1149 
1184 struct rhash_lock_head __rcu **__rht_bucket_n !! 1150 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1185         const struct bucket_table *tbl, unsig !! 1151                                             unsigned int hash)
1186 {                                                1152 {
1187         const unsigned int shift = PAGE_SHIFT    1153         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
                                                   >> 1154         static struct rhash_head __rcu *rhnull =
                                                   >> 1155                 (struct rhash_head __rcu *)NULLS_MARKER(0);
1188         unsigned int index = hash & ((1 << tb    1156         unsigned int index = hash & ((1 << tbl->nest) - 1);
1189         unsigned int size = tbl->size >> tbl-    1157         unsigned int size = tbl->size >> tbl->nest;
1190         unsigned int subhash = hash;             1158         unsigned int subhash = hash;
1191         union nested_table *ntbl;                1159         union nested_table *ntbl;
1192                                                  1160 
1193         ntbl = nested_table_top(tbl);         !! 1161         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1194         ntbl = rht_dereference_bucket_rcu(ntb    1162         ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1195         subhash >>= tbl->nest;                   1163         subhash >>= tbl->nest;
1196                                                  1164 
1197         while (ntbl && size > (1 << shift)) {    1165         while (ntbl && size > (1 << shift)) {
1198                 index = subhash & ((1 << shif    1166                 index = subhash & ((1 << shift) - 1);
1199                 ntbl = rht_dereference_bucket    1167                 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1200                                                  1168                                                   tbl, hash);
1201                 size >>= shift;                  1169                 size >>= shift;
1202                 subhash >>= shift;               1170                 subhash >>= shift;
1203         }                                        1171         }
1204                                                  1172 
1205         if (!ntbl)                               1173         if (!ntbl)
1206                 return NULL;                  !! 1174                 return &rhnull;
1207                                                  1175 
1208         return &ntbl[subhash].bucket;            1176         return &ntbl[subhash].bucket;
1209                                                  1177 
1210 }                                                1178 }
1211 EXPORT_SYMBOL_GPL(__rht_bucket_nested);       << 
1212                                               << 
1213 struct rhash_lock_head __rcu **rht_bucket_nes << 
1214         const struct bucket_table *tbl, unsig << 
1215 {                                             << 
1216         static struct rhash_lock_head __rcu * << 
1217                                               << 
1218         if (!rhnull)                          << 
1219                 INIT_RHT_NULLS_HEAD(rhnull);  << 
1220         return __rht_bucket_nested(tbl, hash) << 
1221 }                                             << 
1222 EXPORT_SYMBOL_GPL(rht_bucket_nested);            1179 EXPORT_SYMBOL_GPL(rht_bucket_nested);
1223                                                  1180 
1224 struct rhash_lock_head __rcu **rht_bucket_nes !! 1181 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1225         struct rhashtable *ht, struct bucket_ !! 1182                                                    struct bucket_table *tbl,
                                                   >> 1183                                                    unsigned int hash)
1226 {                                                1184 {
1227         const unsigned int shift = PAGE_SHIFT    1185         const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1228         unsigned int index = hash & ((1 << tb    1186         unsigned int index = hash & ((1 << tbl->nest) - 1);
1229         unsigned int size = tbl->size >> tbl-    1187         unsigned int size = tbl->size >> tbl->nest;
1230         union nested_table *ntbl;                1188         union nested_table *ntbl;
                                                   >> 1189         unsigned int shifted;
                                                   >> 1190         unsigned int nhash;
1231                                                  1191 
1232         ntbl = nested_table_top(tbl);         !! 1192         ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1233         hash >>= tbl->nest;                      1193         hash >>= tbl->nest;
                                                   >> 1194         nhash = index;
                                                   >> 1195         shifted = tbl->nest;
1234         ntbl = nested_table_alloc(ht, &ntbl[i    1196         ntbl = nested_table_alloc(ht, &ntbl[index].table,
1235                                   size <= (1  !! 1197                                   size <= (1 << shift) ? shifted : 0, nhash);
1236                                                  1198 
1237         while (ntbl && size > (1 << shift)) {    1199         while (ntbl && size > (1 << shift)) {
1238                 index = hash & ((1 << shift)     1200                 index = hash & ((1 << shift) - 1);
1239                 size >>= shift;                  1201                 size >>= shift;
1240                 hash >>= shift;                  1202                 hash >>= shift;
                                                   >> 1203                 nhash |= index << shifted;
                                                   >> 1204                 shifted += shift;
1241                 ntbl = nested_table_alloc(ht,    1205                 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1242                                           siz !! 1206                                           size <= (1 << shift) ? shifted : 0,
                                                   >> 1207                                           nhash);
1243         }                                        1208         }
1244                                                  1209 
1245         if (!ntbl)                               1210         if (!ntbl)
1246                 return NULL;                     1211                 return NULL;
1247                                                  1212 
1248         return &ntbl[hash].bucket;               1213         return &ntbl[hash].bucket;
1249                                                  1214 
1250 }                                                1215 }
1251 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);     1216 EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
1252                                                  1217 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php