^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Resizable, Scalable, Concurrent Hash Table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Code partially derived from nft_hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Rewritten with rehash code from br_multicast plus single list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * pointer as suggested by Josh Triplett
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define HASH_DEFAULT_SIZE 64UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define HASH_MIN_SIZE 4U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) union nested_table {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) union nested_table __rcu *table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct rhash_lock_head __rcu *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static u32 head_hashfn(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) const struct bucket_table *tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) const struct rhash_head *he)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return rht_head_hashfn(ht, tbl, he, ht->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #ifdef CONFIG_PROVE_LOCKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) int lockdep_rht_mutex_is_held(struct rhashtable *ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (!debug_locks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (unlikely(tbl->nest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define ASSERT_RHT_MUTEX(HT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline union nested_table *nested_table_top(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) const struct bucket_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* The top-level bucket entry does not need RCU protection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * because it's set at the same time as tbl->nest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return (void *)rcu_dereference_protected(tbl->buckets[0], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static void nested_table_free(union nested_table *ntbl, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) const unsigned int len = 1 << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ntbl = rcu_dereference_protected(ntbl->table, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!ntbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (size > len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) size >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) nested_table_free(ntbl + i, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) kfree(ntbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static void nested_bucket_table_free(const struct bucket_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) unsigned int size = tbl->size >> tbl->nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned int len = 1 << tbl->nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) union nested_table *ntbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ntbl = nested_table_top(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) nested_table_free(ntbl + i, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) kfree(ntbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void bucket_table_free(const struct bucket_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (tbl->nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) nested_bucket_table_free(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) kvfree(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static void bucket_table_free_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) bucket_table_free(container_of(head, struct bucket_table, rcu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static union nested_table *nested_table_alloc(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) union nested_table __rcu **prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) bool leaf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) union nested_table *ntbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ntbl = rcu_dereference(*prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (ntbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return ntbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (ntbl && leaf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return ntbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Raced with another thread. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) kfree(ntbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return rcu_dereference(*prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) size_t nbuckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct bucket_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (nbuckets < (1 << (shift + 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) tbl = kzalloc(size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (!tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) false)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) kfree(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) size_t nbuckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct bucket_table *tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct lock_class_key __key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) size = nbuckets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (tbl == NULL && (gfp & ~__GFP_NOFAIL) != GFP_KERNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) nbuckets = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (tbl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) tbl->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) rcu_head_init(&tbl->rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) INIT_LIST_HEAD(&tbl->walkers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) tbl->hash_rnd = get_random_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) for (i = 0; i < nbuckets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct bucket_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct bucket_table *new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) new_tbl = tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) tbl = rht_dereference_rcu(tbl->future_tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) } while (tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static int rhashtable_rehash_one(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct rhash_lock_head __rcu **bkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned int old_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct rhash_head *head, *next, *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct rhash_head __rcu **pprev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned int new_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (new_tbl->nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) old_tbl, old_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (rht_is_a_nulls(next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pprev = &entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) new_hash = head_hashfn(ht, new_tbl, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) RCU_INIT_POINTER(entry->next, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (pprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rcu_assign_pointer(*pprev, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* Need to preserved the bit lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) rht_assign_locked(bkt, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int rhashtable_rehash_chain(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) unsigned int old_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (!bkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) rht_lock(old_tbl, bkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (err == -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) rht_unlock(old_tbl, bkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int rhashtable_rehash_attach(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct bucket_table *old_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) struct bucket_table *new_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* Make insertions go into the new, empty table right away. Deletions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * and lookups will be attempted in both tables until we synchronize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * As cmpxchg() provides strong barriers, we do not need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * rcu_assign_pointer().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) new_tbl) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static int rhashtable_rehash_table(struct rhashtable *ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct bucket_table *new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct rhashtable_walker *walker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned int old_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) new_tbl = rht_dereference(old_tbl->future_tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!new_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) err = rhashtable_rehash_chain(ht, old_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /* Publish the new table pointer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) rcu_assign_pointer(ht->tbl, new_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) spin_lock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) list_for_each_entry(walker, &old_tbl->walkers, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) walker->tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Wait for readers. All new readers will see the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * table, and thus no references to the old table will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * remain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * We do this inside the locked region so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * to check if it should not re-link the table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) spin_unlock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static int rhashtable_rehash_alloc(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct bucket_table *old_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct bucket_table *new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ASSERT_RHT_MUTEX(ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (new_tbl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) bucket_table_free(new_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @ht: the hash table to shrink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * This function shrinks the hash table to fit, i.e., the smallest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * size would not cause it to expand right away automatically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * The caller must ensure that no concurrent resizing occurs by holding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * ht->mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * The caller must ensure that no concurrent table mutations take place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * It is however valid to have concurrent lookups if they are RCU protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * It is valid to have concurrent insertions and deletions protected by per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * bucket locks or concurrent RCU protected lookups and traversals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int rhashtable_shrink(struct rhashtable *ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned int nelems = atomic_read(&ht->nelems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (nelems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) size = roundup_pow_of_two(nelems * 3 / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (size < ht->p.min_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) size = ht->p.min_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (old_tbl->size <= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (rht_dereference(old_tbl->future_tbl, ht))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return rhashtable_rehash_alloc(ht, old_tbl, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void rht_deferred_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct rhashtable *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct bucket_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ht = container_of(work, struct rhashtable, run_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) mutex_lock(&ht->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) tbl = rht_dereference(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) tbl = rhashtable_last_table(ht, tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (rht_grow_above_75(ht, tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) err = rhashtable_shrink(ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) else if (tbl->nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!err || err == -EEXIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) nerr = rhashtable_rehash_table(ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) err = err ?: nerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mutex_unlock(&ht->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) schedule_work(&ht->run_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int rhashtable_insert_rehash(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct bucket_table *tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct bucket_table *old_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct bucket_table *new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) old_tbl = rht_dereference_rcu(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) size = tbl->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (rht_grow_above_75(ht, tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) size *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* Do not schedule more than one rehash */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) else if (old_tbl != tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (new_tbl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) err = rhashtable_rehash_attach(ht, tbl, new_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) bucket_table_free(new_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (err == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) schedule_work(&ht->run_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Do not fail the insert if someone else did a rehash. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (likely(rcu_access_pointer(tbl->future_tbl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* Schedule async rehash to retry allocation in process context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (err == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) schedule_work(&ht->run_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static void *rhashtable_lookup_one(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct rhash_lock_head __rcu **bkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct bucket_table *tbl, unsigned int hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) const void *key, struct rhash_head *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct rhashtable_compare_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) .ht = ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .key = key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct rhash_head __rcu **pprev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct rhash_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int elasticity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) elasticity = RHT_ELASTICITY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct rhlist_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct rhlist_head *plist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) elasticity--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!key ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) (ht->p.obj_cmpfn ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rhashtable_compare(&arg, rht_obj(ht, head)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) pprev = &head->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!ht->rhlist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return rht_obj(ht, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) list = container_of(obj, struct rhlist_head, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) plist = container_of(head, struct rhlist_head, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) RCU_INIT_POINTER(list->next, plist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) head = rht_dereference_bucket(head->next, tbl, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) RCU_INIT_POINTER(list->rhead.next, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (pprev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) rcu_assign_pointer(*pprev, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Need to preserve the bit lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rht_assign_locked(bkt, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (elasticity <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static struct bucket_table *rhashtable_insert_one(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct rhashtable *ht, struct rhash_lock_head __rcu **bkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct bucket_table *new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct rhash_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (!IS_ERR_OR_NULL(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return ERR_PTR(-EEXIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return ERR_CAST(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (new_tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (PTR_ERR(data) != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return ERR_CAST(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (unlikely(rht_grow_above_max(ht, tbl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return ERR_PTR(-E2BIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (unlikely(rht_grow_above_100(ht, tbl)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) head = rht_ptr(bkt, tbl, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) RCU_INIT_POINTER(obj->next, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (ht->rhlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct rhlist_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) list = container_of(obj, struct rhlist_head, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) RCU_INIT_POINTER(list->next, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* bkt is always the head of the list, so it holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * the lock, which we need to preserve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) rht_assign_locked(bkt, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) atomic_inc(&ht->nelems);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (rht_grow_above_75(ht, tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) schedule_work(&ht->run_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct rhash_head *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct bucket_table *new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct bucket_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct rhash_lock_head __rcu **bkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) new_tbl = rcu_dereference(ht->tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) tbl = new_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) hash = rht_head_hashfn(ht, tbl, obj, ht->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (rcu_access_pointer(tbl->future_tbl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Failure is OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) bkt = rht_bucket_var(tbl, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) bkt = rht_bucket_insert(ht, tbl, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (bkt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) data = ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) rht_lock(tbl, bkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) data = rhashtable_lookup_one(ht, bkt, tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) hash, key, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) new_tbl = rhashtable_insert_one(ht, bkt, tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) hash, obj, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (PTR_ERR(new_tbl) != -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) data = ERR_CAST(new_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) rht_unlock(tbl, bkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) } while (!IS_ERR_OR_NULL(new_tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (PTR_ERR(data) == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct rhash_head *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) data = rhashtable_try_insert(ht, key, obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) } while (PTR_ERR(data) == -EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * rhashtable_walk_enter - Initialise an iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * @ht: Table to walk over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * @iter: Hash table Iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * This function prepares a hash table walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * Note that if you restart a walk after rhashtable_walk_stop you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * may see the same object twice. Also, you may miss objects if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * there are removals in between rhashtable_walk_stop and the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * call to rhashtable_walk_start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * For a completely stable walk you should construct your own data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * structure outside the hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * This function may be called from any process context, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * non-preemptable context, but cannot be called from softirq or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * hardirq context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * You must call rhashtable_walk_exit after this function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) iter->ht = ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) iter->p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) iter->slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) iter->skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) iter->end_of_table = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) spin_lock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) iter->walker.tbl =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) list_add(&iter->walker.list, &iter->walker.tbl->walkers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) spin_unlock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) EXPORT_SYMBOL_GPL(rhashtable_walk_enter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * rhashtable_walk_exit - Free an iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @iter: Hash table Iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * This function frees resources allocated by rhashtable_walk_enter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) void rhashtable_walk_exit(struct rhashtable_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) spin_lock(&iter->ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (iter->walker.tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) list_del(&iter->walker.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) spin_unlock(&iter->ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * rhashtable_walk_start_check - Start a hash table walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * @iter: Hash table iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * Start a hash table walk at the current iterator position. Note that we take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * the RCU lock in all cases including when we return an error. So you must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * always call rhashtable_walk_stop to clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * Returns zero if successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * Returns -EAGAIN if resize event occured. Note that the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * will rewind back to the beginning and you may use it immediately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * by calling rhashtable_walk_next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * rhashtable_walk_start is defined as an inline variant that returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * void. This is preferred in cases where the caller would ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * resize events and always continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int rhashtable_walk_start_check(struct rhashtable_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) __acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct rhashtable *ht = iter->ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) bool rhlist = ht->rhlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) spin_lock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (iter->walker.tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) list_del(&iter->walker.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) spin_unlock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (iter->end_of_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (!iter->walker.tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) iter->slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) iter->skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (iter->p && !rhlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * We need to validate that 'p' is still in the table, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * if so, update 'skip'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct rhash_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) int skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) skip++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (p == iter->p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) iter->skip = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) iter->p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) } else if (iter->p && rhlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /* Need to validate that 'list' is still in the table, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * if so, update 'skip' and 'p'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct rhash_head *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct rhlist_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rht_for_each_rcu(p, iter->walker.tbl, iter->slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) for (list = container_of(p, struct rhlist_head, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) list = rcu_dereference(list->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) skip++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (list == iter->list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) iter->p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) iter->skip = skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) iter->p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * __rhashtable_walk_find_next - Find the next element in a table (or the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) * one in case of a new walk).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @iter: Hash table iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Returns the found object or NULL when the end of the table is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * Returns -EAGAIN if resize event occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) struct bucket_table *tbl = iter->walker.tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) struct rhlist_head *list = iter->list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) struct rhashtable *ht = iter->ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct rhash_head *p = iter->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) bool rhlist = ht->rhlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) for (; iter->slot < tbl->size; iter->slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) int skip = iter->skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) rht_for_each_rcu(p, tbl, iter->slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (rhlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) list = container_of(p, struct rhlist_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) list = rcu_dereference(list->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) } while (list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (!skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (!rht_is_a_nulls(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) iter->skip++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) iter->p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) iter->list = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return rht_obj(ht, rhlist ? &list->rhead : p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) iter->skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) iter->p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Ensure we see any new tables. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (iter->walker.tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) iter->slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) iter->skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) iter->end_of_table = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * rhashtable_walk_next - Return the next object and advance the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * @iter: Hash table iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * Note that you must call rhashtable_walk_stop when you are finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * with the walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Returns the next object or NULL when the end of the table is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * Returns -EAGAIN if resize event occurred. Note that the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * will rewind back to the beginning and you may continue to use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) void *rhashtable_walk_next(struct rhashtable_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) struct rhlist_head *list = iter->list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct rhashtable *ht = iter->ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct rhash_head *p = iter->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) bool rhlist = ht->rhlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (!rhlist || !(list = rcu_dereference(list->next))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) p = rcu_dereference(p->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) list = container_of(p, struct rhlist_head, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (!rht_is_a_nulls(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) iter->skip++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) iter->p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) iter->list = list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) return rht_obj(ht, rhlist ? &list->rhead : p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /* At the end of this slot, switch to next one and then find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * next entry from that point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) iter->skip = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) iter->slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return __rhashtable_walk_find_next(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) EXPORT_SYMBOL_GPL(rhashtable_walk_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * rhashtable_walk_peek - Return the next object but don't advance the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * @iter: Hash table iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Returns the next object or NULL when the end of the table is reached.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * Returns -EAGAIN if resize event occurred. Note that the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * will rewind back to the beginning and you may continue to use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) void *rhashtable_walk_peek(struct rhashtable_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct rhlist_head *list = iter->list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct rhashtable *ht = iter->ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct rhash_head *p = iter->p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return rht_obj(ht, ht->rhlist ? &list->rhead : p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /* No object found in current iter, find next one in the table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (iter->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) /* A nonzero skip value points to the next entry in the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * beyond that last one that was found. Decrement skip so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * we find the current value. __rhashtable_walk_find_next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * will restore the original value of skip assuming that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * the table hasn't changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) iter->skip--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return __rhashtable_walk_find_next(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * rhashtable_walk_stop - Finish a hash table walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * @iter: Hash table iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * Finish a hash table walk. Does not reset the iterator to the start of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) void rhashtable_walk_stop(struct rhashtable_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) __releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct rhashtable *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct bucket_table *tbl = iter->walker.tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!tbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ht = iter->ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) spin_lock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* This bucket table is being freed, don't re-link it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) iter->walker.tbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) list_add(&iter->walker.list, &tbl->walkers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) spin_unlock(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) static size_t rounded_hashtable_size(const struct rhashtable_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) size_t retsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (params->nelem_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) (unsigned long)params->min_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) retsize = max(HASH_DEFAULT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) (unsigned long)params->min_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) return retsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return jhash2(key, length, seed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * rhashtable_init - initialize a new hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * @ht: hash table to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * @params: configuration parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * Initializes a new hash table based on the provided configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * parameters. A table can be configured either with a variable or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * fixed length key:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * Configuration Example 1: Fixed length keys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * struct test_obj {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * int key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * void * my_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * struct rhash_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * struct rhashtable_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * .head_offset = offsetof(struct test_obj, node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * .key_offset = offsetof(struct test_obj, key),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * .key_len = sizeof(int),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * .hashfn = jhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * Configuration Example 2: Variable length keys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * struct test_obj {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * [...]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * struct rhash_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * u32 my_hash_fn(const void *data, u32 len, u32 seed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * struct test_obj *obj = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * return [... hash ...];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * struct rhashtable_params params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * .head_offset = offsetof(struct test_obj, node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * .hashfn = jhash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * .obj_hashfn = my_hash_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) int rhashtable_init(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) const struct rhashtable_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct bucket_table *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if ((!params->key_len && !params->obj_hashfn) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) (params->obj_hashfn && !params->obj_cmpfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) memset(ht, 0, sizeof(*ht));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) mutex_init(&ht->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) spin_lock_init(&ht->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) memcpy(&ht->p, params, sizeof(*params));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (params->min_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ht->p.min_size = roundup_pow_of_two(params->min_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) /* Cap total entries at 2^31 to avoid nelems overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ht->max_elems = 1u << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (params->max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) ht->p.max_size = rounddown_pow_of_two(params->max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (ht->p.max_size < ht->max_elems / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) ht->max_elems = ht->p.max_size * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) size = rounded_hashtable_size(&ht->p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ht->key_len = ht->p.key_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (!params->hashfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) ht->p.hashfn = jhash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (!(ht->key_len & (sizeof(u32) - 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ht->key_len /= sizeof(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ht->p.hashfn = rhashtable_jhash2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * This is api initialization and thus we need to guarantee the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * initial rhashtable allocation. Upon failure, retry with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * smallest possible size with __GFP_NOFAIL semantics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (unlikely(tbl == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) atomic_set(&ht->nelems, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) RCU_INIT_POINTER(ht->tbl, tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) INIT_WORK(&ht->run_work, rht_deferred_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) EXPORT_SYMBOL_GPL(rhashtable_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * rhltable_init - initialize a new hash list table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) * @hlt: hash list table to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) * @params: configuration parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) * Initializes a new hash list table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * See documentation for rhashtable_init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) err = rhashtable_init(&hlt->ht, params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) hlt->ht.rhlist = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) EXPORT_SYMBOL_GPL(rhltable_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) void (*free_fn)(void *ptr, void *arg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct rhlist_head *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (!ht->rhlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) free_fn(rht_obj(ht, obj), arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) list = container_of(obj, struct rhlist_head, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) obj = &list->rhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) list = rht_dereference(list->next, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) free_fn(rht_obj(ht, obj), arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) } while (list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * rhashtable_free_and_destroy - free elements and destroy hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * @ht: the hash table to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * @free_fn: callback to release resources of element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * @arg: pointer passed to free_fn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * Stops an eventual async resize. If defined, invokes free_fn for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * element to releasal resources. Please note that RCU protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * readers may still be accessing the elements. Releasing of resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * must occur in a compatible manner. Then frees the bucket array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * This function will eventually sleep to wait for an async resize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * to complete. The caller is responsible that no further write operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * occurs in parallel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) void rhashtable_free_and_destroy(struct rhashtable *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) void (*free_fn)(void *ptr, void *arg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct bucket_table *tbl, *next_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) cancel_work_sync(&ht->run_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) mutex_lock(&ht->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) tbl = rht_dereference(ht->tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (free_fn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) for (i = 0; i < tbl->size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct rhash_head *pos, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) next = !rht_is_a_nulls(pos) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) rht_dereference(pos->next, ht) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) !rht_is_a_nulls(pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) pos = next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) next = !rht_is_a_nulls(pos) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) rht_dereference(pos->next, ht) : NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) rhashtable_free_one(ht, pos, free_fn, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) next_tbl = rht_dereference(tbl->future_tbl, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) bucket_table_free(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (next_tbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) tbl = next_tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) mutex_unlock(&ht->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) void rhashtable_destroy(struct rhashtable *ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return rhashtable_free_and_destroy(ht, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) EXPORT_SYMBOL_GPL(rhashtable_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct rhash_lock_head __rcu **__rht_bucket_nested(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) const struct bucket_table *tbl, unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) unsigned int index = hash & ((1 << tbl->nest) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) unsigned int size = tbl->size >> tbl->nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) unsigned int subhash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) union nested_table *ntbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) ntbl = nested_table_top(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) subhash >>= tbl->nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) while (ntbl && size > (1 << shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) index = subhash & ((1 << shift) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) tbl, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) size >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) subhash >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (!ntbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) return &ntbl[subhash].bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) EXPORT_SYMBOL_GPL(__rht_bucket_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct rhash_lock_head __rcu **rht_bucket_nested(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) const struct bucket_table *tbl, unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static struct rhash_lock_head __rcu *rhnull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!rhnull)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) INIT_RHT_NULLS_HEAD(rhnull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) return __rht_bucket_nested(tbl, hash) ?: &rhnull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) EXPORT_SYMBOL_GPL(rht_bucket_nested);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct rhash_lock_head __rcu **rht_bucket_nested_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) unsigned int index = hash & ((1 << tbl->nest) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) unsigned int size = tbl->size >> tbl->nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) union nested_table *ntbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ntbl = nested_table_top(tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) hash >>= tbl->nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ntbl = nested_table_alloc(ht, &ntbl[index].table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) size <= (1 << shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) while (ntbl && size > (1 << shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) index = hash & ((1 << shift) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) size >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) hash >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ntbl = nested_table_alloc(ht, &ntbl[index].table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) size <= (1 << shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (!ntbl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return &ntbl[hash].bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);