Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Resizable, Scalable, Concurrent Hash Table |
| 3 | * |
| 4 | * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> |
| 5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
| 6 | * |
| 7 | * Based on the following paper: |
| 8 | * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf |
| 9 | * |
| 10 | * Code partially derived from nft_hash |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License version 2 as |
| 14 | * published by the Free Software Foundation. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/log2.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/vmalloc.h> |
| 22 | #include <linux/mm.h> |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 23 | #include <linux/jhash.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 24 | #include <linux/random.h> |
| 25 | #include <linux/rhashtable.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 26 | |
| 27 | #define HASH_DEFAULT_SIZE 64UL |
| 28 | #define HASH_MIN_SIZE 4UL |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 29 | #define BUCKET_LOCKS_PER_CPU 128UL |
| 30 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 31 | /* Base bits plus 1 bit for nulls marker */ |
| 32 | #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) |
| 33 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 34 | enum { |
| 35 | RHT_LOCK_NORMAL, |
| 36 | RHT_LOCK_NESTED, |
| 37 | RHT_LOCK_NESTED2, |
| 38 | }; |
| 39 | |
| 40 | /* The bucket lock is selected based on the hash and protects mutations |
| 41 | * on a group of hash buckets. |
| 42 | * |
| 43 | * IMPORTANT: When holding the bucket lock of both the old and new table |
| 44 | * during expansions and shrinking, the old bucket lock must always be |
| 45 | * acquired first. |
| 46 | */ |
| 47 | static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) |
| 48 | { |
| 49 | return &tbl->locks[hash & tbl->locks_mask]; |
| 50 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 51 | |
| 52 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 53 | #define ASSERT_BUCKET_LOCK(TBL, HASH) \ |
| 54 | BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 55 | |
| 56 | #ifdef CONFIG_PROVE_LOCKING |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 57 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 58 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 59 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 60 | } |
| 61 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 62 | |
| 63 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) |
| 64 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 65 | spinlock_t *lock = bucket_lock(tbl, hash); |
| 66 | |
| 67 | return (debug_locks) ? lockdep_is_held(lock) : 1; |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 68 | } |
| 69 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 70 | #endif |
| 71 | |
Thomas Graf | c91eee5 | 2014-08-13 16:38:30 +0200 | [diff] [blame] | 72 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 73 | { |
| 74 | return (void *) he - ht->p.head_offset; |
| 75 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 76 | |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 77 | static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 78 | { |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 79 | return hash & (tbl->size - 1); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 80 | } |
| 81 | |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 82 | static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) |
| 83 | { |
| 84 | u32 hash; |
| 85 | |
| 86 | if (unlikely(!ht->p.key_len)) |
| 87 | hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); |
| 88 | else |
| 89 | hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, |
| 90 | ht->p.hash_rnd); |
| 91 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 92 | return hash >> HASH_RESERVED_SPACE; |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 93 | } |
| 94 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 95 | static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 96 | { |
| 97 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 98 | u32 hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 99 | |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 100 | hash = ht->p.hashfn(key, len, ht->p.hash_rnd); |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 101 | hash >>= HASH_RESERVED_SPACE; |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 102 | |
| 103 | return rht_bucket_index(tbl, hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 104 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 105 | |
| 106 | static u32 head_hashfn(const struct rhashtable *ht, |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 107 | const struct bucket_table *tbl, |
| 108 | const struct rhash_head *he) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 109 | { |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 110 | return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 111 | } |
| 112 | |
Thomas Graf | b8e1943 | 2015-01-02 23:00:17 +0100 | [diff] [blame] | 113 | static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) |
| 114 | { |
| 115 | struct rhash_head __rcu **pprev; |
| 116 | |
| 117 | for (pprev = &tbl->buckets[n]; |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 118 | !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); |
Thomas Graf | b8e1943 | 2015-01-02 23:00:17 +0100 | [diff] [blame] | 119 | pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) |
| 120 | ; |
| 121 | |
| 122 | return pprev; |
| 123 | } |
| 124 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 125 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) |
| 126 | { |
| 127 | unsigned int i, size; |
| 128 | #if defined(CONFIG_PROVE_LOCKING) |
| 129 | unsigned int nr_pcpus = 2; |
| 130 | #else |
| 131 | unsigned int nr_pcpus = num_possible_cpus(); |
| 132 | #endif |
| 133 | |
| 134 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); |
| 135 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
| 136 | |
| 137 | /* Never allocate more than one lock per bucket */ |
| 138 | size = min_t(unsigned int, size, tbl->size); |
| 139 | |
| 140 | if (sizeof(spinlock_t) != 0) { |
| 141 | #ifdef CONFIG_NUMA |
| 142 | if (size * sizeof(spinlock_t) > PAGE_SIZE) |
| 143 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
| 144 | else |
| 145 | #endif |
| 146 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
| 147 | GFP_KERNEL); |
| 148 | if (!tbl->locks) |
| 149 | return -ENOMEM; |
| 150 | for (i = 0; i < size; i++) |
| 151 | spin_lock_init(&tbl->locks[i]); |
| 152 | } |
| 153 | tbl->locks_mask = size - 1; |
| 154 | |
| 155 | return 0; |
| 156 | } |
| 157 | |
| 158 | static void bucket_table_free(const struct bucket_table *tbl) |
| 159 | { |
| 160 | if (tbl) |
| 161 | kvfree(tbl->locks); |
| 162 | |
| 163 | kvfree(tbl); |
| 164 | } |
| 165 | |
| 166 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
| 167 | size_t nbuckets) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 168 | { |
| 169 | struct bucket_table *tbl; |
| 170 | size_t size; |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 171 | int i; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 172 | |
| 173 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 174 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 175 | if (tbl == NULL) |
| 176 | tbl = vzalloc(size); |
| 177 | |
| 178 | if (tbl == NULL) |
| 179 | return NULL; |
| 180 | |
| 181 | tbl->size = nbuckets; |
| 182 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 183 | if (alloc_bucket_locks(ht, tbl) < 0) { |
| 184 | bucket_table_free(tbl); |
| 185 | return NULL; |
| 186 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 187 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 188 | for (i = 0; i < nbuckets; i++) |
| 189 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); |
| 190 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 191 | return tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | /** |
| 195 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size |
| 196 | * @ht: hash table |
| 197 | * @new_size: new table size |
| 198 | */ |
| 199 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) |
| 200 | { |
| 201 | /* Expand table when exceeding 75% load */ |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame^] | 202 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && |
| 203 | (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 204 | } |
| 205 | EXPORT_SYMBOL_GPL(rht_grow_above_75); |
| 206 | |
| 207 | /** |
| 208 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size |
| 209 | * @ht: hash table |
| 210 | * @new_size: new table size |
| 211 | */ |
| 212 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) |
| 213 | { |
| 214 | /* Shrink table beneath 30% load */ |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame^] | 215 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && |
| 216 | (atomic_read(&ht->shift) > ht->p.min_shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 217 | } |
| 218 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); |
| 219 | |
| 220 | static void hashtable_chain_unzip(const struct rhashtable *ht, |
| 221 | const struct bucket_table *new_tbl, |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 222 | struct bucket_table *old_tbl, |
| 223 | size_t old_hash) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 224 | { |
| 225 | struct rhash_head *he, *p, *next; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 226 | spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL; |
| 227 | unsigned int new_hash, new_hash2; |
| 228 | |
| 229 | ASSERT_BUCKET_LOCK(old_tbl, old_hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 230 | |
| 231 | /* Old bucket empty, no work needed. */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 232 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, |
| 233 | old_hash); |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 234 | if (rht_is_a_nulls(p)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 235 | return; |
| 236 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 237 | new_hash = new_hash2 = head_hashfn(ht, new_tbl, p); |
| 238 | new_bucket_lock = bucket_lock(new_tbl, new_hash); |
| 239 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 240 | /* Advance the old bucket pointer one or more times until it |
| 241 | * reaches a node that doesn't hash to the same bucket as the |
| 242 | * previous node p. Call the previous node p; |
| 243 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 244 | rht_for_each_continue(he, p->next, old_tbl, old_hash) { |
| 245 | new_hash2 = head_hashfn(ht, new_tbl, he); |
| 246 | if (new_hash != new_hash2) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 247 | break; |
| 248 | p = he; |
| 249 | } |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 250 | rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); |
| 251 | |
| 252 | spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); |
| 253 | |
| 254 | /* If we have encountered an entry that maps to a different bucket in |
| 255 | * the new table, lock down that bucket as well as we might cut off |
| 256 | * the end of the chain. |
| 257 | */ |
| 258 | new_bucket_lock2 = bucket_lock(new_tbl, new_hash); |
| 259 | if (new_bucket_lock != new_bucket_lock2) |
| 260 | spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 261 | |
| 262 | /* Find the subsequent node which does hash to the same |
| 263 | * bucket as node P, or NULL if no such node exists. |
| 264 | */ |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 265 | INIT_RHT_NULLS_HEAD(next, ht, old_hash); |
| 266 | if (!rht_is_a_nulls(he)) { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 267 | rht_for_each_continue(he, he->next, old_tbl, old_hash) { |
| 268 | if (head_hashfn(ht, new_tbl, he) == new_hash) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 269 | next = he; |
| 270 | break; |
| 271 | } |
| 272 | } |
| 273 | } |
| 274 | |
| 275 | /* Set p's next pointer to that subsequent node pointer, |
| 276 | * bypassing the nodes which do not hash to p's bucket |
| 277 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 278 | rcu_assign_pointer(p->next, next); |
| 279 | |
| 280 | if (new_bucket_lock != new_bucket_lock2) |
| 281 | spin_unlock_bh(new_bucket_lock2); |
| 282 | spin_unlock_bh(new_bucket_lock); |
| 283 | } |
| 284 | |
| 285 | static void link_old_to_new(struct bucket_table *new_tbl, |
| 286 | unsigned int new_hash, struct rhash_head *entry) |
| 287 | { |
| 288 | spinlock_t *new_bucket_lock; |
| 289 | |
| 290 | new_bucket_lock = bucket_lock(new_tbl, new_hash); |
| 291 | |
| 292 | spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); |
| 293 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); |
| 294 | spin_unlock_bh(new_bucket_lock); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | /** |
| 298 | * rhashtable_expand - Expand hash table while allowing concurrent lookups |
| 299 | * @ht: the hash table to expand |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 300 | * |
| 301 | * A secondary bucket array is allocated and the hash entries are migrated |
| 302 | * while keeping them on both lists until the end of the RCU grace period. |
| 303 | * |
| 304 | * This function may only be called in a context where it is safe to call |
| 305 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| 306 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 307 | * The caller must ensure that no concurrent resizing occurs by holding |
| 308 | * ht->mutex. |
| 309 | * |
| 310 | * It is valid to have concurrent insertions and deletions protected by per |
| 311 | * bucket locks or concurrent RCU protected lookups and traversals. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 312 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 313 | int rhashtable_expand(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 314 | { |
| 315 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
| 316 | struct rhash_head *he; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 317 | spinlock_t *old_bucket_lock; |
| 318 | unsigned int new_hash, old_hash; |
| 319 | bool complete = false; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 320 | |
| 321 | ASSERT_RHT_MUTEX(ht); |
| 322 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 323 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 324 | if (new_tbl == NULL) |
| 325 | return -ENOMEM; |
| 326 | |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame^] | 327 | atomic_inc(&ht->shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 328 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 329 | /* Make insertions go into the new, empty table right away. Deletions |
| 330 | * and lookups will be attempted in both tables until we synchronize. |
| 331 | * The synchronize_rcu() guarantees for the new table to be picked up |
| 332 | * so no new additions go into the old table while we relink. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 333 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 334 | rcu_assign_pointer(ht->future_tbl, new_tbl); |
| 335 | synchronize_rcu(); |
| 336 | |
| 337 | /* For each new bucket, search the corresponding old bucket for the |
| 338 | * first entry that hashes to the new bucket, and link the end of |
| 339 | * newly formed bucket chain (containing entries added to future |
| 340 | * table) to that entry. Since all the entries which will end up in |
| 341 | * the new bucket appear in the same old bucket, this constructs an |
| 342 | * entirely valid new hash table, but with multiple buckets |
| 343 | * "zipped" together into a single imprecise chain. |
| 344 | */ |
| 345 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
| 346 | old_hash = rht_bucket_index(old_tbl, new_hash); |
| 347 | old_bucket_lock = bucket_lock(old_tbl, old_hash); |
| 348 | |
| 349 | spin_lock_bh(old_bucket_lock); |
| 350 | rht_for_each(he, old_tbl, old_hash) { |
| 351 | if (head_hashfn(ht, new_tbl, he) == new_hash) { |
| 352 | link_old_to_new(new_tbl, new_hash, he); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 353 | break; |
| 354 | } |
| 355 | } |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 356 | spin_unlock_bh(old_bucket_lock); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | /* Publish the new table pointer. Lookups may now traverse |
Herbert Xu | 0c828f2 | 2014-11-13 13:10:48 +0800 | [diff] [blame] | 360 | * the new table, but they will not benefit from any |
| 361 | * additional efficiency until later steps unzip the buckets. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 362 | */ |
| 363 | rcu_assign_pointer(ht->tbl, new_tbl); |
| 364 | |
| 365 | /* Unzip interleaved hash chains */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 366 | while (!complete && !ht->being_destroyed) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 367 | /* Wait for readers. All new readers will see the new |
| 368 | * table, and thus no references to the old table will |
| 369 | * remain. |
| 370 | */ |
| 371 | synchronize_rcu(); |
| 372 | |
| 373 | /* For each bucket in the old table (each of which |
| 374 | * contains items from multiple buckets of the new |
| 375 | * table): ... |
| 376 | */ |
| 377 | complete = true; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 378 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 379 | struct rhash_head *head; |
| 380 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 381 | old_bucket_lock = bucket_lock(old_tbl, old_hash); |
| 382 | spin_lock_bh(old_bucket_lock); |
| 383 | |
| 384 | hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash); |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 385 | head = rht_dereference_bucket(old_tbl->buckets[old_hash], |
| 386 | old_tbl, old_hash); |
| 387 | if (!rht_is_a_nulls(head)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 388 | complete = false; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 389 | |
| 390 | spin_unlock_bh(old_bucket_lock); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 391 | } |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 392 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 393 | |
| 394 | bucket_table_free(old_tbl); |
| 395 | return 0; |
| 396 | } |
| 397 | EXPORT_SYMBOL_GPL(rhashtable_expand); |
| 398 | |
| 399 | /** |
| 400 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
| 401 | * @ht: the hash table to shrink |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 402 | * |
| 403 | * This function may only be called in a context where it is safe to call |
| 404 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| 405 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 406 | * The caller must ensure that no concurrent resizing occurs by holding |
| 407 | * ht->mutex. |
| 408 | * |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 409 | * The caller must ensure that no concurrent table mutations take place. |
| 410 | * It is however valid to have concurrent lookups if they are RCU protected. |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 411 | * |
| 412 | * It is valid to have concurrent insertions and deletions protected by per |
| 413 | * bucket locks or concurrent RCU protected lookups and traversals. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 414 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 415 | int rhashtable_shrink(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 416 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 417 | struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); |
| 418 | spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2; |
| 419 | unsigned int new_hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 420 | |
| 421 | ASSERT_RHT_MUTEX(ht); |
| 422 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 423 | new_tbl = bucket_table_alloc(ht, tbl->size / 2); |
| 424 | if (new_tbl == NULL) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 425 | return -ENOMEM; |
| 426 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 427 | rcu_assign_pointer(ht->future_tbl, new_tbl); |
| 428 | synchronize_rcu(); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 429 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 430 | /* Link the first entry in the old bucket to the end of the |
| 431 | * bucket in the new table. As entries are concurrently being |
| 432 | * added to the new table, lock down the new bucket. As we |
| 433 | * always divide the size in half when shrinking, each bucket |
| 434 | * in the new table maps to exactly two buckets in the old |
| 435 | * table. |
| 436 | * |
| 437 | * As removals can occur concurrently on the old table, we need |
| 438 | * to lock down both matching buckets in the old table. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 439 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 440 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
| 441 | old_bucket_lock1 = bucket_lock(tbl, new_hash); |
| 442 | old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size); |
| 443 | new_bucket_lock = bucket_lock(new_tbl, new_hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 444 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 445 | spin_lock_bh(old_bucket_lock1); |
| 446 | spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED); |
| 447 | spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2); |
| 448 | |
| 449 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
| 450 | tbl->buckets[new_hash]); |
| 451 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
| 452 | tbl->buckets[new_hash + new_tbl->size]); |
| 453 | |
| 454 | spin_unlock_bh(new_bucket_lock); |
| 455 | spin_unlock_bh(old_bucket_lock2); |
| 456 | spin_unlock_bh(old_bucket_lock1); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 457 | } |
| 458 | |
| 459 | /* Publish the new, valid hash table */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 460 | rcu_assign_pointer(ht->tbl, new_tbl); |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame^] | 461 | atomic_dec(&ht->shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 462 | |
| 463 | /* Wait for readers. No new readers will have references to the |
| 464 | * old hash table. |
| 465 | */ |
| 466 | synchronize_rcu(); |
| 467 | |
| 468 | bucket_table_free(tbl); |
| 469 | |
| 470 | return 0; |
| 471 | } |
| 472 | EXPORT_SYMBOL_GPL(rhashtable_shrink); |
| 473 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 474 | static void rht_deferred_worker(struct work_struct *work) |
| 475 | { |
| 476 | struct rhashtable *ht; |
| 477 | struct bucket_table *tbl; |
| 478 | |
| 479 | ht = container_of(work, struct rhashtable, run_work.work); |
| 480 | mutex_lock(&ht->mutex); |
| 481 | tbl = rht_dereference(ht->tbl, ht); |
| 482 | |
| 483 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
| 484 | rhashtable_expand(ht); |
| 485 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) |
| 486 | rhashtable_shrink(ht); |
| 487 | |
| 488 | mutex_unlock(&ht->mutex); |
| 489 | } |
| 490 | |
Ying Xue | 54c5b7d | 2015-01-07 13:41:53 +0800 | [diff] [blame] | 491 | static void rhashtable_wakeup_worker(struct rhashtable *ht) |
| 492 | { |
| 493 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); |
| 494 | struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); |
| 495 | size_t size = tbl->size; |
| 496 | |
| 497 | /* Only adjust the table if no resizing is currently in progress. */ |
| 498 | if (tbl == new_tbl && |
| 499 | ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || |
| 500 | (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) |
| 501 | schedule_delayed_work(&ht->run_work, 0); |
| 502 | } |
| 503 | |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 504 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, |
| 505 | struct bucket_table *tbl, u32 hash) |
| 506 | { |
| 507 | struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash], |
| 508 | tbl, hash); |
| 509 | |
| 510 | if (rht_is_a_nulls(head)) |
| 511 | INIT_RHT_NULLS_HEAD(obj->next, ht, hash); |
| 512 | else |
| 513 | RCU_INIT_POINTER(obj->next, head); |
| 514 | |
| 515 | rcu_assign_pointer(tbl->buckets[hash], obj); |
| 516 | |
| 517 | atomic_inc(&ht->nelems); |
| 518 | |
| 519 | rhashtable_wakeup_worker(ht); |
| 520 | } |
| 521 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 522 | /** |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 523 | * rhashtable_insert - insert object into hash table |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 524 | * @ht: hash table |
| 525 | * @obj: pointer to hash head inside object |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 526 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 527 | * Will take a per bucket spinlock to protect against mutual mutations |
| 528 | * on the same bucket. Multiple insertions may occur in parallel unless |
| 529 | * they map to the same bucket lock. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 530 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 531 | * It is safe to call this function from atomic context. |
| 532 | * |
| 533 | * Will trigger an automatic deferred table resizing if the size grows |
| 534 | * beyond the watermark indicated by grow_decision() which can be passed |
| 535 | * to rhashtable_init(). |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 536 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 537 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 538 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 539 | struct bucket_table *tbl; |
| 540 | spinlock_t *lock; |
| 541 | unsigned hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 542 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 543 | rcu_read_lock(); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 544 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 545 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 546 | hash = head_hashfn(ht, tbl, obj); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 547 | lock = bucket_lock(tbl, hash); |
| 548 | |
| 549 | spin_lock_bh(lock); |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 550 | __rhashtable_insert(ht, obj, tbl, hash); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 551 | spin_unlock_bh(lock); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 552 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 553 | rcu_read_unlock(); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 554 | } |
| 555 | EXPORT_SYMBOL_GPL(rhashtable_insert); |
| 556 | |
| 557 | /** |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 558 | * rhashtable_remove - remove object from hash table |
| 559 | * @ht: hash table |
| 560 | * @obj: pointer to hash head inside object |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 561 | * |
| 562 | * Since the hash chain is single linked, the removal operation needs to |
| 563 | * walk the bucket chain upon removal. The removal operation is thus |
| 564 | * considerable slow if the hash table is not correctly sized. |
| 565 | * |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 566 | * Will automatically shrink the table via rhashtable_expand() if the |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 567 | * shrink_decision function specified at rhashtable_init() returns true. |
| 568 | * |
| 569 | * The caller must ensure that no concurrent table mutations occur. It is |
| 570 | * however valid to have concurrent lookups if they are RCU protected. |
| 571 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 572 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 573 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 574 | struct bucket_table *tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 575 | struct rhash_head __rcu **pprev; |
| 576 | struct rhash_head *he; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 577 | spinlock_t *lock; |
| 578 | unsigned int hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 579 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 580 | rcu_read_lock(); |
| 581 | tbl = rht_dereference_rcu(ht->tbl, ht); |
| 582 | hash = head_hashfn(ht, tbl, obj); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 583 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 584 | lock = bucket_lock(tbl, hash); |
| 585 | spin_lock_bh(lock); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 586 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 587 | restart: |
| 588 | pprev = &tbl->buckets[hash]; |
| 589 | rht_for_each(he, tbl, hash) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 590 | if (he != obj) { |
| 591 | pprev = &he->next; |
| 592 | continue; |
| 593 | } |
| 594 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 595 | rcu_assign_pointer(*pprev, obj->next); |
| 596 | atomic_dec(&ht->nelems); |
Thomas Graf | 897362e | 2015-01-02 23:00:18 +0100 | [diff] [blame] | 597 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 598 | spin_unlock_bh(lock); |
| 599 | |
Ying Xue | 54c5b7d | 2015-01-07 13:41:53 +0800 | [diff] [blame] | 600 | rhashtable_wakeup_worker(ht); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 601 | |
| 602 | rcu_read_unlock(); |
Thomas Graf | 897362e | 2015-01-02 23:00:18 +0100 | [diff] [blame] | 603 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 604 | return true; |
| 605 | } |
| 606 | |
Ying Xue | bd6d4db | 2015-01-07 13:41:55 +0800 | [diff] [blame] | 607 | if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 608 | spin_unlock_bh(lock); |
| 609 | |
Ying Xue | bd6d4db | 2015-01-07 13:41:55 +0800 | [diff] [blame] | 610 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 611 | hash = head_hashfn(ht, tbl, obj); |
| 612 | |
| 613 | lock = bucket_lock(tbl, hash); |
| 614 | spin_lock_bh(lock); |
| 615 | goto restart; |
| 616 | } |
| 617 | |
| 618 | spin_unlock_bh(lock); |
| 619 | rcu_read_unlock(); |
| 620 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 621 | return false; |
| 622 | } |
| 623 | EXPORT_SYMBOL_GPL(rhashtable_remove); |
| 624 | |
Ying Xue | efb975a6 | 2015-01-07 13:41:52 +0800 | [diff] [blame] | 625 | struct rhashtable_compare_arg { |
| 626 | struct rhashtable *ht; |
| 627 | const void *key; |
| 628 | }; |
| 629 | |
| 630 | static bool rhashtable_compare(void *ptr, void *arg) |
| 631 | { |
| 632 | struct rhashtable_compare_arg *x = arg; |
| 633 | struct rhashtable *ht = x->ht; |
| 634 | |
| 635 | return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); |
| 636 | } |
| 637 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 638 | /** |
| 639 | * rhashtable_lookup - lookup key in hash table |
| 640 | * @ht: hash table |
| 641 | * @key: pointer to key |
| 642 | * |
| 643 | * Computes the hash value for the key and traverses the bucket chain looking |
| 644 | * for a entry with an identical key. The first matching entry is returned. |
| 645 | * |
| 646 | * This lookup function may only be used for fixed key hash table (key_len |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 647 | * parameter set). It will BUG() if used inappropriately. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 648 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 649 | * Lookups may occur in parallel with hashtable mutations and resizing. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 650 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 651 | void *rhashtable_lookup(struct rhashtable *ht, const void *key) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 652 | { |
Ying Xue | efb975a6 | 2015-01-07 13:41:52 +0800 | [diff] [blame] | 653 | struct rhashtable_compare_arg arg = { |
| 654 | .ht = ht, |
| 655 | .key = key, |
| 656 | }; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 657 | |
| 658 | BUG_ON(!ht->p.key_len); |
| 659 | |
Ying Xue | efb975a6 | 2015-01-07 13:41:52 +0800 | [diff] [blame] | 660 | return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 661 | } |
| 662 | EXPORT_SYMBOL_GPL(rhashtable_lookup); |
| 663 | |
| 664 | /** |
| 665 | * rhashtable_lookup_compare - search hash table with compare function |
| 666 | * @ht: hash table |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 667 | * @key: the pointer to the key |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 668 | * @compare: compare function, must return true on match |
| 669 | * @arg: argument passed on to compare function |
| 670 | * |
| 671 | * Traverses the bucket chain behind the provided hash value and calls the |
| 672 | * specified compare function for each entry. |
| 673 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 674 | * Lookups may occur in parallel with hashtable mutations and resizing. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 675 | * |
| 676 | * Returns the first entry on which the compare function returned true. |
| 677 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 678 | void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 679 | bool (*compare)(void *, void *), void *arg) |
| 680 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 681 | const struct bucket_table *tbl, *old_tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 682 | struct rhash_head *he; |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 683 | u32 hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 684 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 685 | rcu_read_lock(); |
| 686 | |
| 687 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 688 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 689 | hash = key_hashfn(ht, key, ht->p.key_len); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 690 | restart: |
| 691 | rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 692 | if (!compare(rht_obj(ht, he), arg)) |
| 693 | continue; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 694 | rcu_read_unlock(); |
Thomas Graf | a4b18cd | 2015-01-02 23:00:15 +0100 | [diff] [blame] | 695 | return rht_obj(ht, he); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 696 | } |
| 697 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 698 | if (unlikely(tbl != old_tbl)) { |
| 699 | tbl = old_tbl; |
| 700 | goto restart; |
| 701 | } |
| 702 | rcu_read_unlock(); |
| 703 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 704 | return NULL; |
| 705 | } |
| 706 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); |
| 707 | |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 708 | /** |
| 709 | * rhashtable_lookup_insert - lookup and insert object into hash table |
| 710 | * @ht: hash table |
| 711 | * @obj: pointer to hash head inside object |
| 712 | * |
| 713 | * Locks down the bucket chain in both the old and new table if a resize |
| 714 | * is in progress to ensure that writers can't remove from the old table |
| 715 | * and can't insert to the new table during the atomic operation of search |
| 716 | * and insertion. Searches for duplicates in both the old and new table if |
| 717 | * a resize is in progress. |
| 718 | * |
| 719 | * This lookup function may only be used for fixed key hash table (key_len |
| 720 | * parameter set). It will BUG() if used inappropriately. |
| 721 | * |
| 722 | * It is safe to call this function from atomic context. |
| 723 | * |
| 724 | * Will trigger an automatic deferred table resizing if the size grows |
| 725 | * beyond the watermark indicated by grow_decision() which can be passed |
| 726 | * to rhashtable_init(). |
| 727 | */ |
| 728 | bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) |
| 729 | { |
| 730 | struct bucket_table *new_tbl, *old_tbl; |
| 731 | spinlock_t *new_bucket_lock, *old_bucket_lock; |
| 732 | u32 new_hash, old_hash; |
| 733 | bool success = true; |
| 734 | |
| 735 | BUG_ON(!ht->p.key_len); |
| 736 | |
| 737 | rcu_read_lock(); |
| 738 | |
| 739 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 740 | old_hash = head_hashfn(ht, old_tbl, obj); |
| 741 | old_bucket_lock = bucket_lock(old_tbl, old_hash); |
| 742 | spin_lock_bh(old_bucket_lock); |
| 743 | |
| 744 | new_tbl = rht_dereference_rcu(ht->future_tbl, ht); |
| 745 | new_hash = head_hashfn(ht, new_tbl, obj); |
| 746 | new_bucket_lock = bucket_lock(new_tbl, new_hash); |
| 747 | if (unlikely(old_tbl != new_tbl)) |
| 748 | spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED); |
| 749 | |
| 750 | if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) { |
| 751 | success = false; |
| 752 | goto exit; |
| 753 | } |
| 754 | |
| 755 | __rhashtable_insert(ht, obj, new_tbl, new_hash); |
| 756 | |
| 757 | exit: |
| 758 | if (unlikely(old_tbl != new_tbl)) |
| 759 | spin_unlock_bh(new_bucket_lock); |
| 760 | spin_unlock_bh(old_bucket_lock); |
| 761 | |
| 762 | rcu_read_unlock(); |
| 763 | |
| 764 | return success; |
| 765 | } |
| 766 | EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); |
| 767 | |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 768 | static size_t rounded_hashtable_size(struct rhashtable_params *params) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 769 | { |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 770 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
| 771 | 1UL << params->min_shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 772 | } |
| 773 | |
| 774 | /** |
| 775 | * rhashtable_init - initialize a new hash table |
| 776 | * @ht: hash table to be initialized |
| 777 | * @params: configuration parameters |
| 778 | * |
| 779 | * Initializes a new hash table based on the provided configuration |
| 780 | * parameters. A table can be configured either with a variable or |
| 781 | * fixed length key: |
| 782 | * |
| 783 | * Configuration Example 1: Fixed length keys |
| 784 | * struct test_obj { |
| 785 | * int key; |
| 786 | * void * my_member; |
| 787 | * struct rhash_head node; |
| 788 | * }; |
| 789 | * |
| 790 | * struct rhashtable_params params = { |
| 791 | * .head_offset = offsetof(struct test_obj, node), |
| 792 | * .key_offset = offsetof(struct test_obj, key), |
| 793 | * .key_len = sizeof(int), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 794 | * .hashfn = jhash, |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 795 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 796 | * }; |
| 797 | * |
| 798 | * Configuration Example 2: Variable length keys |
| 799 | * struct test_obj { |
| 800 | * [...] |
| 801 | * struct rhash_head node; |
| 802 | * }; |
| 803 | * |
| 804 | * u32 my_hash_fn(const void *data, u32 seed) |
| 805 | * { |
| 806 | * struct test_obj *obj = data; |
| 807 | * |
| 808 | * return [... hash ...]; |
| 809 | * } |
| 810 | * |
| 811 | * struct rhashtable_params params = { |
| 812 | * .head_offset = offsetof(struct test_obj, node), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 813 | * .hashfn = jhash, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 814 | * .obj_hashfn = my_hash_fn, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 815 | * }; |
| 816 | */ |
| 817 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) |
| 818 | { |
| 819 | struct bucket_table *tbl; |
| 820 | size_t size; |
| 821 | |
| 822 | size = HASH_DEFAULT_SIZE; |
| 823 | |
| 824 | if ((params->key_len && !params->hashfn) || |
| 825 | (!params->key_len && !params->obj_hashfn)) |
| 826 | return -EINVAL; |
| 827 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 828 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
| 829 | return -EINVAL; |
| 830 | |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 831 | params->min_shift = max_t(size_t, params->min_shift, |
| 832 | ilog2(HASH_MIN_SIZE)); |
| 833 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 834 | if (params->nelem_hint) |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 835 | size = rounded_hashtable_size(params); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 836 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 837 | memset(ht, 0, sizeof(*ht)); |
| 838 | mutex_init(&ht->mutex); |
| 839 | memcpy(&ht->p, params, sizeof(*params)); |
| 840 | |
| 841 | if (params->locks_mul) |
| 842 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
| 843 | else |
| 844 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; |
| 845 | |
| 846 | tbl = bucket_table_alloc(ht, size); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 847 | if (tbl == NULL) |
| 848 | return -ENOMEM; |
| 849 | |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame^] | 850 | atomic_set(&ht->shift, ilog2(tbl->size)); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 851 | RCU_INIT_POINTER(ht->tbl, tbl); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 852 | RCU_INIT_POINTER(ht->future_tbl, tbl); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 853 | |
| 854 | if (!ht->p.hash_rnd) |
| 855 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); |
| 856 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 857 | if (ht->p.grow_decision || ht->p.shrink_decision) |
| 858 | INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker); |
| 859 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 860 | return 0; |
| 861 | } |
| 862 | EXPORT_SYMBOL_GPL(rhashtable_init); |
| 863 | |
| 864 | /** |
| 865 | * rhashtable_destroy - destroy hash table |
| 866 | * @ht: the hash table to destroy |
| 867 | * |
Pablo Neira Ayuso | ae82ddc | 2014-09-02 00:26:05 +0200 | [diff] [blame] | 868 | * Frees the bucket array. This function is not rcu safe, therefore the caller |
| 869 | * has to make sure that no resizing may happen by unpublishing the hashtable |
| 870 | * and waiting for the quiescent cycle before releasing the bucket array. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 871 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 872 | void rhashtable_destroy(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 873 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 874 | ht->being_destroyed = true; |
| 875 | |
| 876 | mutex_lock(&ht->mutex); |
| 877 | |
| 878 | cancel_delayed_work(&ht->run_work); |
| 879 | bucket_table_free(rht_dereference(ht->tbl, ht)); |
| 880 | |
| 881 | mutex_unlock(&ht->mutex); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 882 | } |
| 883 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
| 884 | |
| 885 | /************************************************************************** |
| 886 | * Self Test |
| 887 | **************************************************************************/ |
| 888 | |
| 889 | #ifdef CONFIG_TEST_RHASHTABLE |
| 890 | |
| 891 | #define TEST_HT_SIZE 8 |
| 892 | #define TEST_ENTRIES 2048 |
| 893 | #define TEST_PTR ((void *) 0xdeadbeef) |
| 894 | #define TEST_NEXPANDS 4 |
| 895 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 896 | struct test_obj { |
| 897 | void *ptr; |
| 898 | int value; |
| 899 | struct rhash_head node; |
| 900 | }; |
| 901 | |
| 902 | static int __init test_rht_lookup(struct rhashtable *ht) |
| 903 | { |
| 904 | unsigned int i; |
| 905 | |
| 906 | for (i = 0; i < TEST_ENTRIES * 2; i++) { |
| 907 | struct test_obj *obj; |
| 908 | bool expected = !(i % 2); |
| 909 | u32 key = i; |
| 910 | |
| 911 | obj = rhashtable_lookup(ht, &key); |
| 912 | |
| 913 | if (expected && !obj) { |
| 914 | pr_warn("Test failed: Could not find key %u\n", key); |
| 915 | return -ENOENT; |
| 916 | } else if (!expected && obj) { |
| 917 | pr_warn("Test failed: Unexpected entry found for key %u\n", |
| 918 | key); |
| 919 | return -EEXIST; |
| 920 | } else if (expected && obj) { |
| 921 | if (obj->ptr != TEST_PTR || obj->value != i) { |
| 922 | pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", |
| 923 | obj->ptr, TEST_PTR, obj->value, i); |
| 924 | return -EINVAL; |
| 925 | } |
| 926 | } |
| 927 | } |
| 928 | |
| 929 | return 0; |
| 930 | } |
| 931 | |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 932 | static void test_bucket_stats(struct rhashtable *ht, bool quiet) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 933 | { |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 934 | unsigned int cnt, rcu_cnt, i, total = 0; |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 935 | struct rhash_head *pos; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 936 | struct test_obj *obj; |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 937 | struct bucket_table *tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 938 | |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 939 | tbl = rht_dereference_rcu(ht->tbl, ht); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 940 | for (i = 0; i < tbl->size; i++) { |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 941 | rcu_cnt = cnt = 0; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 942 | |
| 943 | if (!quiet) |
| 944 | pr_info(" [%#4x/%zu]", i, tbl->size); |
| 945 | |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 946 | rht_for_each_entry_rcu(obj, pos, tbl, i, node) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 947 | cnt++; |
| 948 | total++; |
| 949 | if (!quiet) |
| 950 | pr_cont(" [%p],", obj); |
| 951 | } |
| 952 | |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 953 | rht_for_each_entry_rcu(obj, pos, tbl, i, node) |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 954 | rcu_cnt++; |
| 955 | |
| 956 | if (rcu_cnt != cnt) |
| 957 | pr_warn("Test failed: Chain count mismach %d != %d", |
| 958 | cnt, rcu_cnt); |
| 959 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 960 | if (!quiet) |
| 961 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", |
| 962 | i, tbl->buckets[i], cnt); |
| 963 | } |
| 964 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 965 | pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", |
| 966 | total, atomic_read(&ht->nelems), TEST_ENTRIES); |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 967 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 968 | if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 969 | pr_warn("Test failed: Total count mismatch ^^^"); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 970 | } |
| 971 | |
| 972 | static int __init test_rhashtable(struct rhashtable *ht) |
| 973 | { |
| 974 | struct bucket_table *tbl; |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 975 | struct test_obj *obj; |
| 976 | struct rhash_head *pos, *next; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 977 | int err; |
| 978 | unsigned int i; |
| 979 | |
| 980 | /* |
| 981 | * Insertion Test: |
| 982 | * Insert TEST_ENTRIES into table with all keys even numbers |
| 983 | */ |
| 984 | pr_info(" Adding %d keys\n", TEST_ENTRIES); |
| 985 | for (i = 0; i < TEST_ENTRIES; i++) { |
| 986 | struct test_obj *obj; |
| 987 | |
| 988 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
| 989 | if (!obj) { |
| 990 | err = -ENOMEM; |
| 991 | goto error; |
| 992 | } |
| 993 | |
| 994 | obj->ptr = TEST_PTR; |
| 995 | obj->value = i * 2; |
| 996 | |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 997 | rhashtable_insert(ht, &obj->node); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 998 | } |
| 999 | |
| 1000 | rcu_read_lock(); |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 1001 | test_bucket_stats(ht, true); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1002 | test_rht_lookup(ht); |
| 1003 | rcu_read_unlock(); |
| 1004 | |
| 1005 | for (i = 0; i < TEST_NEXPANDS; i++) { |
| 1006 | pr_info(" Table expansion iteration %u...\n", i); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1007 | mutex_lock(&ht->mutex); |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 1008 | rhashtable_expand(ht); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1009 | mutex_unlock(&ht->mutex); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1010 | |
| 1011 | rcu_read_lock(); |
| 1012 | pr_info(" Verifying lookups...\n"); |
| 1013 | test_rht_lookup(ht); |
| 1014 | rcu_read_unlock(); |
| 1015 | } |
| 1016 | |
| 1017 | for (i = 0; i < TEST_NEXPANDS; i++) { |
| 1018 | pr_info(" Table shrinkage iteration %u...\n", i); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1019 | mutex_lock(&ht->mutex); |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 1020 | rhashtable_shrink(ht); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1021 | mutex_unlock(&ht->mutex); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1022 | |
| 1023 | rcu_read_lock(); |
| 1024 | pr_info(" Verifying lookups...\n"); |
| 1025 | test_rht_lookup(ht); |
| 1026 | rcu_read_unlock(); |
| 1027 | } |
| 1028 | |
Thomas Graf | 3e7b2ec | 2014-11-24 12:37:58 +0100 | [diff] [blame] | 1029 | rcu_read_lock(); |
| 1030 | test_bucket_stats(ht, true); |
| 1031 | rcu_read_unlock(); |
| 1032 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1033 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); |
| 1034 | for (i = 0; i < TEST_ENTRIES; i++) { |
| 1035 | u32 key = i * 2; |
| 1036 | |
| 1037 | obj = rhashtable_lookup(ht, &key); |
| 1038 | BUG_ON(!obj); |
| 1039 | |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 1040 | rhashtable_remove(ht, &obj->node); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1041 | kfree(obj); |
| 1042 | } |
| 1043 | |
| 1044 | return 0; |
| 1045 | |
| 1046 | error: |
| 1047 | tbl = rht_dereference_rcu(ht->tbl, ht); |
| 1048 | for (i = 0; i < tbl->size; i++) |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 1049 | rht_for_each_entry_safe(obj, pos, next, tbl, i, node) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1050 | kfree(obj); |
| 1051 | |
| 1052 | return err; |
| 1053 | } |
| 1054 | |
| 1055 | static int __init test_rht_init(void) |
| 1056 | { |
| 1057 | struct rhashtable ht; |
| 1058 | struct rhashtable_params params = { |
| 1059 | .nelem_hint = TEST_HT_SIZE, |
| 1060 | .head_offset = offsetof(struct test_obj, node), |
| 1061 | .key_offset = offsetof(struct test_obj, value), |
| 1062 | .key_len = sizeof(int), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 1063 | .hashfn = jhash, |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 1064 | .nulls_base = (3U << RHT_BASE_SHIFT), |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1065 | .grow_decision = rht_grow_above_75, |
| 1066 | .shrink_decision = rht_shrink_below_30, |
| 1067 | }; |
| 1068 | int err; |
| 1069 | |
| 1070 | pr_info("Running resizable hashtable tests...\n"); |
| 1071 | |
| 1072 | err = rhashtable_init(&ht, ¶ms); |
| 1073 | if (err < 0) { |
| 1074 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", |
| 1075 | err); |
| 1076 | return err; |
| 1077 | } |
| 1078 | |
| 1079 | err = test_rhashtable(&ht); |
| 1080 | |
| 1081 | rhashtable_destroy(&ht); |
| 1082 | |
| 1083 | return err; |
| 1084 | } |
| 1085 | |
| 1086 | subsys_initcall(test_rht_init); |
| 1087 | |
| 1088 | #endif /* CONFIG_TEST_RHASHTABLE */ |