Merge remote-tracking branches 'regulator/fix/88pm800', 'regulator/fix/max8973',...
[linux-drm-fsl-dcu.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/log2.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/mm.h>
25 #include <linux/jhash.h>
26 #include <linux/random.h>
27 #include <linux/rhashtable.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
30
31 #define HASH_DEFAULT_SIZE       64UL
32 #define HASH_MIN_SIZE           4U
33 #define BUCKET_LOCKS_PER_CPU   128UL
34
35 static u32 head_hashfn(struct rhashtable *ht,
36                        const struct bucket_table *tbl,
37                        const struct rhash_head *he)
38 {
39         return rht_head_hashfn(ht, tbl, he, ht->p);
40 }
41
42 #ifdef CONFIG_PROVE_LOCKING
43 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
44
45 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
46 {
47         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
48 }
49 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
50
51 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
52 {
53         spinlock_t *lock = rht_bucket_lock(tbl, hash);
54
55         return (debug_locks) ? lockdep_is_held(lock) : 1;
56 }
57 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
58 #else
59 #define ASSERT_RHT_MUTEX(HT)
60 #endif
61
62
63 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
64                               gfp_t gfp)
65 {
66         unsigned int i, size;
67 #if defined(CONFIG_PROVE_LOCKING)
68         unsigned int nr_pcpus = 2;
69 #else
70         unsigned int nr_pcpus = num_possible_cpus();
71 #endif
72
73         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
74         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
75
76         /* Never allocate more than 0.5 locks per bucket */
77         size = min_t(unsigned int, size, tbl->size >> 1);
78
79         if (sizeof(spinlock_t) != 0) {
80 #ifdef CONFIG_NUMA
81                 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
82                     gfp == GFP_KERNEL)
83                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
84                 else
85 #endif
86                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
87                                            gfp);
88                 if (!tbl->locks)
89                         return -ENOMEM;
90                 for (i = 0; i < size; i++)
91                         spin_lock_init(&tbl->locks[i]);
92         }
93         tbl->locks_mask = size - 1;
94
95         return 0;
96 }
97
98 static void bucket_table_free(const struct bucket_table *tbl)
99 {
100         if (tbl)
101                 kvfree(tbl->locks);
102
103         kvfree(tbl);
104 }
105
106 static void bucket_table_free_rcu(struct rcu_head *head)
107 {
108         bucket_table_free(container_of(head, struct bucket_table, rcu));
109 }
110
111 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
112                                                size_t nbuckets,
113                                                gfp_t gfp)
114 {
115         struct bucket_table *tbl = NULL;
116         size_t size;
117         int i;
118
119         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
120         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
121             gfp != GFP_KERNEL)
122                 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
123         if (tbl == NULL && gfp == GFP_KERNEL)
124                 tbl = vzalloc(size);
125         if (tbl == NULL)
126                 return NULL;
127
128         tbl->size = nbuckets;
129
130         if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
131                 bucket_table_free(tbl);
132                 return NULL;
133         }
134
135         INIT_LIST_HEAD(&tbl->walkers);
136
137         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
138
139         for (i = 0; i < nbuckets; i++)
140                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
141
142         return tbl;
143 }
144
145 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
146                                                   struct bucket_table *tbl)
147 {
148         struct bucket_table *new_tbl;
149
150         do {
151                 new_tbl = tbl;
152                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
153         } while (tbl);
154
155         return new_tbl;
156 }
157
158 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
159 {
160         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
161         struct bucket_table *new_tbl = rhashtable_last_table(ht,
162                 rht_dereference_rcu(old_tbl->future_tbl, ht));
163         struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
164         int err = -ENOENT;
165         struct rhash_head *head, *next, *entry;
166         spinlock_t *new_bucket_lock;
167         unsigned int new_hash;
168
169         rht_for_each(entry, old_tbl, old_hash) {
170                 err = 0;
171                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
172
173                 if (rht_is_a_nulls(next))
174                         break;
175
176                 pprev = &entry->next;
177         }
178
179         if (err)
180                 goto out;
181
182         new_hash = head_hashfn(ht, new_tbl, entry);
183
184         new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
185
186         spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
187         head = rht_dereference_bucket(new_tbl->buckets[new_hash],
188                                       new_tbl, new_hash);
189
190         if (rht_is_a_nulls(head))
191                 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
192         else
193                 RCU_INIT_POINTER(entry->next, head);
194
195         rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
196         spin_unlock(new_bucket_lock);
197
198         rcu_assign_pointer(*pprev, next);
199
200 out:
201         return err;
202 }
203
204 static void rhashtable_rehash_chain(struct rhashtable *ht,
205                                     unsigned int old_hash)
206 {
207         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
208         spinlock_t *old_bucket_lock;
209
210         old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
211
212         spin_lock_bh(old_bucket_lock);
213         while (!rhashtable_rehash_one(ht, old_hash))
214                 ;
215         old_tbl->rehash++;
216         spin_unlock_bh(old_bucket_lock);
217 }
218
219 static int rhashtable_rehash_attach(struct rhashtable *ht,
220                                     struct bucket_table *old_tbl,
221                                     struct bucket_table *new_tbl)
222 {
223         /* Protect future_tbl using the first bucket lock. */
224         spin_lock_bh(old_tbl->locks);
225
226         /* Did somebody beat us to it? */
227         if (rcu_access_pointer(old_tbl->future_tbl)) {
228                 spin_unlock_bh(old_tbl->locks);
229                 return -EEXIST;
230         }
231
232         /* Make insertions go into the new, empty table right away. Deletions
233          * and lookups will be attempted in both tables until we synchronize.
234          */
235         rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
236
237         /* Ensure the new table is visible to readers. */
238         smp_wmb();
239
240         spin_unlock_bh(old_tbl->locks);
241
242         return 0;
243 }
244
245 static int rhashtable_rehash_table(struct rhashtable *ht)
246 {
247         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
248         struct bucket_table *new_tbl;
249         struct rhashtable_walker *walker;
250         unsigned int old_hash;
251
252         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
253         if (!new_tbl)
254                 return 0;
255
256         for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
257                 rhashtable_rehash_chain(ht, old_hash);
258
259         /* Publish the new table pointer. */
260         rcu_assign_pointer(ht->tbl, new_tbl);
261
262         spin_lock(&ht->lock);
263         list_for_each_entry(walker, &old_tbl->walkers, list)
264                 walker->tbl = NULL;
265         spin_unlock(&ht->lock);
266
267         /* Wait for readers. All new readers will see the new
268          * table, and thus no references to the old table will
269          * remain.
270          */
271         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
272
273         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
274 }
275
276 /**
277  * rhashtable_expand - Expand hash table while allowing concurrent lookups
278  * @ht:         the hash table to expand
279  *
280  * A secondary bucket array is allocated and the hash entries are migrated.
281  *
282  * This function may only be called in a context where it is safe to call
283  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
284  *
285  * The caller must ensure that no concurrent resizing occurs by holding
286  * ht->mutex.
287  *
288  * It is valid to have concurrent insertions and deletions protected by per
289  * bucket locks or concurrent RCU protected lookups and traversals.
290  */
291 static int rhashtable_expand(struct rhashtable *ht)
292 {
293         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
294         int err;
295
296         ASSERT_RHT_MUTEX(ht);
297
298         old_tbl = rhashtable_last_table(ht, old_tbl);
299
300         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
301         if (new_tbl == NULL)
302                 return -ENOMEM;
303
304         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
305         if (err)
306                 bucket_table_free(new_tbl);
307
308         return err;
309 }
310
311 /**
312  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
313  * @ht:         the hash table to shrink
314  *
315  * This function shrinks the hash table to fit, i.e., the smallest
316  * size would not cause it to expand right away automatically.
317  *
318  * The caller must ensure that no concurrent resizing occurs by holding
319  * ht->mutex.
320  *
321  * The caller must ensure that no concurrent table mutations take place.
322  * It is however valid to have concurrent lookups if they are RCU protected.
323  *
324  * It is valid to have concurrent insertions and deletions protected by per
325  * bucket locks or concurrent RCU protected lookups and traversals.
326  */
327 static int rhashtable_shrink(struct rhashtable *ht)
328 {
329         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
330         unsigned int size;
331         int err;
332
333         ASSERT_RHT_MUTEX(ht);
334
335         size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
336         if (size < ht->p.min_size)
337                 size = ht->p.min_size;
338
339         if (old_tbl->size <= size)
340                 return 0;
341
342         if (rht_dereference(old_tbl->future_tbl, ht))
343                 return -EEXIST;
344
345         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
346         if (new_tbl == NULL)
347                 return -ENOMEM;
348
349         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
350         if (err)
351                 bucket_table_free(new_tbl);
352
353         return err;
354 }
355
356 static void rht_deferred_worker(struct work_struct *work)
357 {
358         struct rhashtable *ht;
359         struct bucket_table *tbl;
360         int err = 0;
361
362         ht = container_of(work, struct rhashtable, run_work);
363         mutex_lock(&ht->mutex);
364
365         tbl = rht_dereference(ht->tbl, ht);
366         tbl = rhashtable_last_table(ht, tbl);
367
368         if (rht_grow_above_75(ht, tbl))
369                 rhashtable_expand(ht);
370         else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
371                 rhashtable_shrink(ht);
372
373         err = rhashtable_rehash_table(ht);
374
375         mutex_unlock(&ht->mutex);
376
377         if (err)
378                 schedule_work(&ht->run_work);
379 }
380
381 static bool rhashtable_check_elasticity(struct rhashtable *ht,
382                                         struct bucket_table *tbl,
383                                         unsigned int hash)
384 {
385         unsigned int elasticity = ht->elasticity;
386         struct rhash_head *head;
387
388         rht_for_each(head, tbl, hash)
389                 if (!--elasticity)
390                         return true;
391
392         return false;
393 }
394
395 int rhashtable_insert_rehash(struct rhashtable *ht)
396 {
397         struct bucket_table *old_tbl;
398         struct bucket_table *new_tbl;
399         struct bucket_table *tbl;
400         unsigned int size;
401         int err;
402
403         old_tbl = rht_dereference_rcu(ht->tbl, ht);
404         tbl = rhashtable_last_table(ht, old_tbl);
405
406         size = tbl->size;
407
408         if (rht_grow_above_75(ht, tbl))
409                 size *= 2;
410         /* Do not schedule more than one rehash */
411         else if (old_tbl != tbl)
412                 return -EBUSY;
413
414         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
415         if (new_tbl == NULL) {
416                 /* Schedule async resize/rehash to try allocation
417                  * non-atomic context.
418                  */
419                 schedule_work(&ht->run_work);
420                 return -ENOMEM;
421         }
422
423         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
424         if (err) {
425                 bucket_table_free(new_tbl);
426                 if (err == -EEXIST)
427                         err = 0;
428         } else
429                 schedule_work(&ht->run_work);
430
431         return err;
432 }
433 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
434
435 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
436                            struct rhash_head *obj,
437                            struct bucket_table *tbl)
438 {
439         struct rhash_head *head;
440         unsigned int hash;
441         int err;
442
443         tbl = rhashtable_last_table(ht, tbl);
444         hash = head_hashfn(ht, tbl, obj);
445         spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
446
447         err = -EEXIST;
448         if (key && rhashtable_lookup_fast(ht, key, ht->p))
449                 goto exit;
450
451         err = -E2BIG;
452         if (unlikely(rht_grow_above_max(ht, tbl)))
453                 goto exit;
454
455         err = -EAGAIN;
456         if (rhashtable_check_elasticity(ht, tbl, hash) ||
457             rht_grow_above_100(ht, tbl))
458                 goto exit;
459
460         err = 0;
461
462         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
463
464         RCU_INIT_POINTER(obj->next, head);
465
466         rcu_assign_pointer(tbl->buckets[hash], obj);
467
468         atomic_inc(&ht->nelems);
469
470 exit:
471         spin_unlock(rht_bucket_lock(tbl, hash));
472
473         return err;
474 }
475 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
476
477 /**
478  * rhashtable_walk_init - Initialise an iterator
479  * @ht:         Table to walk over
480  * @iter:       Hash table Iterator
481  *
482  * This function prepares a hash table walk.
483  *
484  * Note that if you restart a walk after rhashtable_walk_stop you
485  * may see the same object twice.  Also, you may miss objects if
486  * there are removals in between rhashtable_walk_stop and the next
487  * call to rhashtable_walk_start.
488  *
489  * For a completely stable walk you should construct your own data
490  * structure outside the hash table.
491  *
492  * This function may sleep so you must not call it from interrupt
493  * context or with spin locks held.
494  *
495  * You must call rhashtable_walk_exit if this function returns
496  * successfully.
497  */
498 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
499 {
500         iter->ht = ht;
501         iter->p = NULL;
502         iter->slot = 0;
503         iter->skip = 0;
504
505         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
506         if (!iter->walker)
507                 return -ENOMEM;
508
509         mutex_lock(&ht->mutex);
510         iter->walker->tbl = rht_dereference(ht->tbl, ht);
511         list_add(&iter->walker->list, &iter->walker->tbl->walkers);
512         mutex_unlock(&ht->mutex);
513
514         return 0;
515 }
516 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
517
518 /**
519  * rhashtable_walk_exit - Free an iterator
520  * @iter:       Hash table Iterator
521  *
522  * This function frees resources allocated by rhashtable_walk_init.
523  */
524 void rhashtable_walk_exit(struct rhashtable_iter *iter)
525 {
526         mutex_lock(&iter->ht->mutex);
527         if (iter->walker->tbl)
528                 list_del(&iter->walker->list);
529         mutex_unlock(&iter->ht->mutex);
530         kfree(iter->walker);
531 }
532 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
533
534 /**
535  * rhashtable_walk_start - Start a hash table walk
536  * @iter:       Hash table iterator
537  *
538  * Start a hash table walk.  Note that we take the RCU lock in all
539  * cases including when we return an error.  So you must always call
540  * rhashtable_walk_stop to clean up.
541  *
542  * Returns zero if successful.
543  *
544  * Returns -EAGAIN if resize event occured.  Note that the iterator
545  * will rewind back to the beginning and you may use it immediately
546  * by calling rhashtable_walk_next.
547  */
548 int rhashtable_walk_start(struct rhashtable_iter *iter)
549         __acquires(RCU)
550 {
551         struct rhashtable *ht = iter->ht;
552
553         mutex_lock(&ht->mutex);
554
555         if (iter->walker->tbl)
556                 list_del(&iter->walker->list);
557
558         rcu_read_lock();
559
560         mutex_unlock(&ht->mutex);
561
562         if (!iter->walker->tbl) {
563                 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
564                 return -EAGAIN;
565         }
566
567         return 0;
568 }
569 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
570
571 /**
572  * rhashtable_walk_next - Return the next object and advance the iterator
573  * @iter:       Hash table iterator
574  *
575  * Note that you must call rhashtable_walk_stop when you are finished
576  * with the walk.
577  *
578  * Returns the next object or NULL when the end of the table is reached.
579  *
580  * Returns -EAGAIN if resize event occured.  Note that the iterator
581  * will rewind back to the beginning and you may continue to use it.
582  */
583 void *rhashtable_walk_next(struct rhashtable_iter *iter)
584 {
585         struct bucket_table *tbl = iter->walker->tbl;
586         struct rhashtable *ht = iter->ht;
587         struct rhash_head *p = iter->p;
588
589         if (p) {
590                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
591                 goto next;
592         }
593
594         for (; iter->slot < tbl->size; iter->slot++) {
595                 int skip = iter->skip;
596
597                 rht_for_each_rcu(p, tbl, iter->slot) {
598                         if (!skip)
599                                 break;
600                         skip--;
601                 }
602
603 next:
604                 if (!rht_is_a_nulls(p)) {
605                         iter->skip++;
606                         iter->p = p;
607                         return rht_obj(ht, p);
608                 }
609
610                 iter->skip = 0;
611         }
612
613         iter->p = NULL;
614
615         /* Ensure we see any new tables. */
616         smp_rmb();
617
618         iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
619         if (iter->walker->tbl) {
620                 iter->slot = 0;
621                 iter->skip = 0;
622                 return ERR_PTR(-EAGAIN);
623         }
624
625         return NULL;
626 }
627 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
628
629 /**
630  * rhashtable_walk_stop - Finish a hash table walk
631  * @iter:       Hash table iterator
632  *
633  * Finish a hash table walk.
634  */
635 void rhashtable_walk_stop(struct rhashtable_iter *iter)
636         __releases(RCU)
637 {
638         struct rhashtable *ht;
639         struct bucket_table *tbl = iter->walker->tbl;
640
641         if (!tbl)
642                 goto out;
643
644         ht = iter->ht;
645
646         spin_lock(&ht->lock);
647         if (tbl->rehash < tbl->size)
648                 list_add(&iter->walker->list, &tbl->walkers);
649         else
650                 iter->walker->tbl = NULL;
651         spin_unlock(&ht->lock);
652
653         iter->p = NULL;
654
655 out:
656         rcu_read_unlock();
657 }
658 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
659
660 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
661 {
662         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
663                    (unsigned long)params->min_size);
664 }
665
666 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
667 {
668         return jhash2(key, length, seed);
669 }
670
671 /**
672  * rhashtable_init - initialize a new hash table
673  * @ht:         hash table to be initialized
674  * @params:     configuration parameters
675  *
676  * Initializes a new hash table based on the provided configuration
677  * parameters. A table can be configured either with a variable or
678  * fixed length key:
679  *
680  * Configuration Example 1: Fixed length keys
681  * struct test_obj {
682  *      int                     key;
683  *      void *                  my_member;
684  *      struct rhash_head       node;
685  * };
686  *
687  * struct rhashtable_params params = {
688  *      .head_offset = offsetof(struct test_obj, node),
689  *      .key_offset = offsetof(struct test_obj, key),
690  *      .key_len = sizeof(int),
691  *      .hashfn = jhash,
692  *      .nulls_base = (1U << RHT_BASE_SHIFT),
693  * };
694  *
695  * Configuration Example 2: Variable length keys
696  * struct test_obj {
697  *      [...]
698  *      struct rhash_head       node;
699  * };
700  *
701  * u32 my_hash_fn(const void *data, u32 len, u32 seed)
702  * {
703  *      struct test_obj *obj = data;
704  *
705  *      return [... hash ...];
706  * }
707  *
708  * struct rhashtable_params params = {
709  *      .head_offset = offsetof(struct test_obj, node),
710  *      .hashfn = jhash,
711  *      .obj_hashfn = my_hash_fn,
712  * };
713  */
714 int rhashtable_init(struct rhashtable *ht,
715                     const struct rhashtable_params *params)
716 {
717         struct bucket_table *tbl;
718         size_t size;
719
720         size = HASH_DEFAULT_SIZE;
721
722         if ((!params->key_len && !params->obj_hashfn) ||
723             (params->obj_hashfn && !params->obj_cmpfn))
724                 return -EINVAL;
725
726         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
727                 return -EINVAL;
728
729         if (params->nelem_hint)
730                 size = rounded_hashtable_size(params);
731
732         memset(ht, 0, sizeof(*ht));
733         mutex_init(&ht->mutex);
734         spin_lock_init(&ht->lock);
735         memcpy(&ht->p, params, sizeof(*params));
736
737         if (params->min_size)
738                 ht->p.min_size = roundup_pow_of_two(params->min_size);
739
740         if (params->max_size)
741                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
742
743         if (params->insecure_max_entries)
744                 ht->p.insecure_max_entries =
745                         rounddown_pow_of_two(params->insecure_max_entries);
746         else
747                 ht->p.insecure_max_entries = ht->p.max_size * 2;
748
749         ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
750
751         /* The maximum (not average) chain length grows with the
752          * size of the hash table, at a rate of (log N)/(log log N).
753          * The value of 16 is selected so that even if the hash
754          * table grew to 2^32 you would not expect the maximum
755          * chain length to exceed it unless we are under attack
756          * (or extremely unlucky).
757          *
758          * As this limit is only to detect attacks, we don't need
759          * to set it to a lower value as you'd need the chain
760          * length to vastly exceed 16 to have any real effect
761          * on the system.
762          */
763         if (!params->insecure_elasticity)
764                 ht->elasticity = 16;
765
766         if (params->locks_mul)
767                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
768         else
769                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
770
771         ht->key_len = ht->p.key_len;
772         if (!params->hashfn) {
773                 ht->p.hashfn = jhash;
774
775                 if (!(ht->key_len & (sizeof(u32) - 1))) {
776                         ht->key_len /= sizeof(u32);
777                         ht->p.hashfn = rhashtable_jhash2;
778                 }
779         }
780
781         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
782         if (tbl == NULL)
783                 return -ENOMEM;
784
785         atomic_set(&ht->nelems, 0);
786
787         RCU_INIT_POINTER(ht->tbl, tbl);
788
789         INIT_WORK(&ht->run_work, rht_deferred_worker);
790
791         return 0;
792 }
793 EXPORT_SYMBOL_GPL(rhashtable_init);
794
795 /**
796  * rhashtable_free_and_destroy - free elements and destroy hash table
797  * @ht:         the hash table to destroy
798  * @free_fn:    callback to release resources of element
799  * @arg:        pointer passed to free_fn
800  *
801  * Stops an eventual async resize. If defined, invokes free_fn for each
802  * element to releasal resources. Please note that RCU protected
803  * readers may still be accessing the elements. Releasing of resources
804  * must occur in a compatible manner. Then frees the bucket array.
805  *
806  * This function will eventually sleep to wait for an async resize
807  * to complete. The caller is responsible that no further write operations
808  * occurs in parallel.
809  */
810 void rhashtable_free_and_destroy(struct rhashtable *ht,
811                                  void (*free_fn)(void *ptr, void *arg),
812                                  void *arg)
813 {
814         const struct bucket_table *tbl;
815         unsigned int i;
816
817         cancel_work_sync(&ht->run_work);
818
819         mutex_lock(&ht->mutex);
820         tbl = rht_dereference(ht->tbl, ht);
821         if (free_fn) {
822                 for (i = 0; i < tbl->size; i++) {
823                         struct rhash_head *pos, *next;
824
825                         for (pos = rht_dereference(tbl->buckets[i], ht),
826                              next = !rht_is_a_nulls(pos) ?
827                                         rht_dereference(pos->next, ht) : NULL;
828                              !rht_is_a_nulls(pos);
829                              pos = next,
830                              next = !rht_is_a_nulls(pos) ?
831                                         rht_dereference(pos->next, ht) : NULL)
832                                 free_fn(rht_obj(ht, pos), arg);
833                 }
834         }
835
836         bucket_table_free(tbl);
837         mutex_unlock(&ht->mutex);
838 }
839 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
840
841 void rhashtable_destroy(struct rhashtable *ht)
842 {
843         return rhashtable_free_and_destroy(ht, NULL, NULL);
844 }
845 EXPORT_SYMBOL_GPL(rhashtable_destroy);