Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Resizable, Scalable, Concurrent Hash Table |
| 3 | * |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 4 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
| 6 | * |
| 7 | * Based on the following paper: |
| 8 | * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf |
| 9 | * |
| 10 | * Code partially derived from nft_hash |
| 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of the GNU General Public License version 2 as |
| 14 | * published by the Free Software Foundation. |
| 15 | */ |
| 16 | |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/log2.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/vmalloc.h> |
| 22 | #include <linux/mm.h> |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 23 | #include <linux/jhash.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 24 | #include <linux/random.h> |
| 25 | #include <linux/rhashtable.h> |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 26 | |
| 27 | #define HASH_DEFAULT_SIZE 64UL |
| 28 | #define HASH_MIN_SIZE 4UL |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 29 | #define BUCKET_LOCKS_PER_CPU 128UL |
| 30 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 31 | /* Base bits plus 1 bit for nulls marker */ |
| 32 | #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) |
| 33 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 34 | enum { |
| 35 | RHT_LOCK_NORMAL, |
| 36 | RHT_LOCK_NESTED, |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 37 | }; |
| 38 | |
| 39 | /* The bucket lock is selected based on the hash and protects mutations |
| 40 | * on a group of hash buckets. |
| 41 | * |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 42 | * A maximum of tbl->size/2 bucket locks is allocated. This ensures that |
| 43 | * a single lock always covers both buckets which may both contains |
| 44 | * entries which link to the same bucket of the old table during resizing. |
| 45 | * This allows to simplify the locking as locking the bucket in both |
| 46 | * tables during resize always guarantee protection. |
| 47 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 48 | * IMPORTANT: When holding the bucket lock of both the old and new table |
| 49 | * during expansions and shrinking, the old bucket lock must always be |
| 50 | * acquired first. |
| 51 | */ |
| 52 | static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) |
| 53 | { |
| 54 | return &tbl->locks[hash & tbl->locks_mask]; |
| 55 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 56 | |
| 57 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 58 | #define ASSERT_BUCKET_LOCK(TBL, HASH) \ |
| 59 | BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 60 | |
| 61 | #ifdef CONFIG_PROVE_LOCKING |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 62 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 63 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 64 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 65 | } |
| 66 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 67 | |
| 68 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) |
| 69 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 70 | spinlock_t *lock = bucket_lock(tbl, hash); |
| 71 | |
| 72 | return (debug_locks) ? lockdep_is_held(lock) : 1; |
Thomas Graf | 88d6ed1 | 2015-01-02 23:00:16 +0100 | [diff] [blame] | 73 | } |
| 74 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 75 | #endif |
| 76 | |
Thomas Graf | c91eee5 | 2014-08-13 16:38:30 +0200 | [diff] [blame] | 77 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 78 | { |
| 79 | return (void *) he - ht->p.head_offset; |
| 80 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 81 | |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 82 | static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 83 | { |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 84 | return hash & (tbl->size - 1); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 85 | } |
| 86 | |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 87 | static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) |
| 88 | { |
| 89 | u32 hash; |
| 90 | |
| 91 | if (unlikely(!ht->p.key_len)) |
| 92 | hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); |
| 93 | else |
| 94 | hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, |
| 95 | ht->p.hash_rnd); |
| 96 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 97 | return hash >> HASH_RESERVED_SPACE; |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 98 | } |
| 99 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 100 | static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 101 | { |
Thomas Graf | c88455c | 2015-02-05 02:03:31 +0100 | [diff] [blame] | 102 | return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 103 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 104 | |
| 105 | static u32 head_hashfn(const struct rhashtable *ht, |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 106 | const struct bucket_table *tbl, |
| 107 | const struct rhash_head *he) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 108 | { |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 109 | return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 110 | } |
| 111 | |
Thomas Graf | b8e1943 | 2015-01-02 23:00:17 +0100 | [diff] [blame] | 112 | static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) |
| 113 | { |
| 114 | struct rhash_head __rcu **pprev; |
| 115 | |
| 116 | for (pprev = &tbl->buckets[n]; |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 117 | !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); |
Thomas Graf | b8e1943 | 2015-01-02 23:00:17 +0100 | [diff] [blame] | 118 | pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) |
| 119 | ; |
| 120 | |
| 121 | return pprev; |
| 122 | } |
| 123 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 124 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) |
| 125 | { |
| 126 | unsigned int i, size; |
| 127 | #if defined(CONFIG_PROVE_LOCKING) |
| 128 | unsigned int nr_pcpus = 2; |
| 129 | #else |
| 130 | unsigned int nr_pcpus = num_possible_cpus(); |
| 131 | #endif |
| 132 | |
| 133 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); |
| 134 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
| 135 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 136 | /* Never allocate more than 0.5 locks per bucket */ |
| 137 | size = min_t(unsigned int, size, tbl->size >> 1); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 138 | |
| 139 | if (sizeof(spinlock_t) != 0) { |
| 140 | #ifdef CONFIG_NUMA |
| 141 | if (size * sizeof(spinlock_t) > PAGE_SIZE) |
| 142 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
| 143 | else |
| 144 | #endif |
| 145 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
| 146 | GFP_KERNEL); |
| 147 | if (!tbl->locks) |
| 148 | return -ENOMEM; |
| 149 | for (i = 0; i < size; i++) |
| 150 | spin_lock_init(&tbl->locks[i]); |
| 151 | } |
| 152 | tbl->locks_mask = size - 1; |
| 153 | |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | static void bucket_table_free(const struct bucket_table *tbl) |
| 158 | { |
| 159 | if (tbl) |
| 160 | kvfree(tbl->locks); |
| 161 | |
| 162 | kvfree(tbl); |
| 163 | } |
| 164 | |
| 165 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
| 166 | size_t nbuckets) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 167 | { |
| 168 | struct bucket_table *tbl; |
| 169 | size_t size; |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 170 | int i; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 171 | |
| 172 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 173 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 174 | if (tbl == NULL) |
| 175 | tbl = vzalloc(size); |
| 176 | |
| 177 | if (tbl == NULL) |
| 178 | return NULL; |
| 179 | |
| 180 | tbl->size = nbuckets; |
| 181 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 182 | if (alloc_bucket_locks(ht, tbl) < 0) { |
| 183 | bucket_table_free(tbl); |
| 184 | return NULL; |
| 185 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 186 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 187 | for (i = 0; i < nbuckets; i++) |
| 188 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); |
| 189 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 190 | return tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 191 | } |
| 192 | |
| 193 | /** |
| 194 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size |
| 195 | * @ht: hash table |
| 196 | * @new_size: new table size |
| 197 | */ |
| 198 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) |
| 199 | { |
| 200 | /* Expand table when exceeding 75% load */ |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame] | 201 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && |
| 202 | (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 203 | } |
| 204 | EXPORT_SYMBOL_GPL(rht_grow_above_75); |
| 205 | |
| 206 | /** |
| 207 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size |
| 208 | * @ht: hash table |
| 209 | * @new_size: new table size |
| 210 | */ |
| 211 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) |
| 212 | { |
| 213 | /* Shrink table beneath 30% load */ |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame] | 214 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && |
| 215 | (atomic_read(&ht->shift) > ht->p.min_shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 216 | } |
| 217 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); |
| 218 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 219 | static void lock_buckets(struct bucket_table *new_tbl, |
| 220 | struct bucket_table *old_tbl, unsigned int hash) |
| 221 | __acquires(old_bucket_lock) |
| 222 | { |
| 223 | spin_lock_bh(bucket_lock(old_tbl, hash)); |
| 224 | if (new_tbl != old_tbl) |
| 225 | spin_lock_bh_nested(bucket_lock(new_tbl, hash), |
| 226 | RHT_LOCK_NESTED); |
| 227 | } |
| 228 | |
| 229 | static void unlock_buckets(struct bucket_table *new_tbl, |
| 230 | struct bucket_table *old_tbl, unsigned int hash) |
| 231 | __releases(old_bucket_lock) |
| 232 | { |
| 233 | if (new_tbl != old_tbl) |
| 234 | spin_unlock_bh(bucket_lock(new_tbl, hash)); |
| 235 | spin_unlock_bh(bucket_lock(old_tbl, hash)); |
| 236 | } |
| 237 | |
| 238 | /** |
| 239 | * Unlink entries on bucket which hash to different bucket. |
| 240 | * |
| 241 | * Returns true if no more work needs to be performed on the bucket. |
| 242 | */ |
| 243 | static bool hashtable_chain_unzip(const struct rhashtable *ht, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 244 | const struct bucket_table *new_tbl, |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 245 | struct bucket_table *old_tbl, |
| 246 | size_t old_hash) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 247 | { |
| 248 | struct rhash_head *he, *p, *next; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 249 | unsigned int new_hash, new_hash2; |
| 250 | |
| 251 | ASSERT_BUCKET_LOCK(old_tbl, old_hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 252 | |
| 253 | /* Old bucket empty, no work needed. */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 254 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, |
| 255 | old_hash); |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 256 | if (rht_is_a_nulls(p)) |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 257 | return false; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 258 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 259 | new_hash = head_hashfn(ht, new_tbl, p); |
| 260 | ASSERT_BUCKET_LOCK(new_tbl, new_hash); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 261 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 262 | /* Advance the old bucket pointer one or more times until it |
| 263 | * reaches a node that doesn't hash to the same bucket as the |
| 264 | * previous node p. Call the previous node p; |
| 265 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 266 | rht_for_each_continue(he, p->next, old_tbl, old_hash) { |
| 267 | new_hash2 = head_hashfn(ht, new_tbl, he); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 268 | ASSERT_BUCKET_LOCK(new_tbl, new_hash2); |
| 269 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 270 | if (new_hash != new_hash2) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 271 | break; |
| 272 | p = he; |
| 273 | } |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 274 | rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); |
| 275 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 276 | /* Find the subsequent node which does hash to the same |
| 277 | * bucket as node P, or NULL if no such node exists. |
| 278 | */ |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 279 | INIT_RHT_NULLS_HEAD(next, ht, old_hash); |
| 280 | if (!rht_is_a_nulls(he)) { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 281 | rht_for_each_continue(he, he->next, old_tbl, old_hash) { |
| 282 | if (head_hashfn(ht, new_tbl, he) == new_hash) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 283 | next = he; |
| 284 | break; |
| 285 | } |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | /* Set p's next pointer to that subsequent node pointer, |
| 290 | * bypassing the nodes which do not hash to p's bucket |
| 291 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 292 | rcu_assign_pointer(p->next, next); |
| 293 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 294 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, |
| 295 | old_hash); |
| 296 | |
| 297 | return !rht_is_a_nulls(p); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | static void link_old_to_new(struct bucket_table *new_tbl, |
| 301 | unsigned int new_hash, struct rhash_head *entry) |
| 302 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 303 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | /** |
| 307 | * rhashtable_expand - Expand hash table while allowing concurrent lookups |
| 308 | * @ht: the hash table to expand |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 309 | * |
| 310 | * A secondary bucket array is allocated and the hash entries are migrated |
| 311 | * while keeping them on both lists until the end of the RCU grace period. |
| 312 | * |
| 313 | * This function may only be called in a context where it is safe to call |
| 314 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| 315 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 316 | * The caller must ensure that no concurrent resizing occurs by holding |
| 317 | * ht->mutex. |
| 318 | * |
| 319 | * It is valid to have concurrent insertions and deletions protected by per |
| 320 | * bucket locks or concurrent RCU protected lookups and traversals. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 321 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 322 | int rhashtable_expand(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 323 | { |
| 324 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
| 325 | struct rhash_head *he; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 326 | unsigned int new_hash, old_hash; |
| 327 | bool complete = false; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 328 | |
| 329 | ASSERT_RHT_MUTEX(ht); |
| 330 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 331 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 332 | if (new_tbl == NULL) |
| 333 | return -ENOMEM; |
| 334 | |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame] | 335 | atomic_inc(&ht->shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 336 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 337 | /* Make insertions go into the new, empty table right away. Deletions |
| 338 | * and lookups will be attempted in both tables until we synchronize. |
| 339 | * The synchronize_rcu() guarantees for the new table to be picked up |
| 340 | * so no new additions go into the old table while we relink. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 341 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 342 | rcu_assign_pointer(ht->future_tbl, new_tbl); |
| 343 | synchronize_rcu(); |
| 344 | |
| 345 | /* For each new bucket, search the corresponding old bucket for the |
| 346 | * first entry that hashes to the new bucket, and link the end of |
| 347 | * newly formed bucket chain (containing entries added to future |
| 348 | * table) to that entry. Since all the entries which will end up in |
| 349 | * the new bucket appear in the same old bucket, this constructs an |
| 350 | * entirely valid new hash table, but with multiple buckets |
| 351 | * "zipped" together into a single imprecise chain. |
| 352 | */ |
| 353 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
| 354 | old_hash = rht_bucket_index(old_tbl, new_hash); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 355 | lock_buckets(new_tbl, old_tbl, new_hash); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 356 | rht_for_each(he, old_tbl, old_hash) { |
| 357 | if (head_hashfn(ht, new_tbl, he) == new_hash) { |
| 358 | link_old_to_new(new_tbl, new_hash, he); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 359 | break; |
| 360 | } |
| 361 | } |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 362 | unlock_buckets(new_tbl, old_tbl, new_hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 363 | } |
| 364 | |
| 365 | /* Publish the new table pointer. Lookups may now traverse |
Herbert Xu | 0c828f2 | 2014-11-13 13:10:48 +0800 | [diff] [blame] | 366 | * the new table, but they will not benefit from any |
| 367 | * additional efficiency until later steps unzip the buckets. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 368 | */ |
| 369 | rcu_assign_pointer(ht->tbl, new_tbl); |
| 370 | |
| 371 | /* Unzip interleaved hash chains */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 372 | while (!complete && !ht->being_destroyed) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 373 | /* Wait for readers. All new readers will see the new |
| 374 | * table, and thus no references to the old table will |
| 375 | * remain. |
| 376 | */ |
| 377 | synchronize_rcu(); |
| 378 | |
| 379 | /* For each bucket in the old table (each of which |
| 380 | * contains items from multiple buckets of the new |
| 381 | * table): ... |
| 382 | */ |
| 383 | complete = true; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 384 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 385 | lock_buckets(new_tbl, old_tbl, old_hash); |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 386 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 387 | if (hashtable_chain_unzip(ht, new_tbl, old_tbl, |
| 388 | old_hash)) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 389 | complete = false; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 390 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 391 | unlock_buckets(new_tbl, old_tbl, old_hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 392 | } |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 393 | } |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 394 | |
Thomas Graf | 2af4b52 | 2015-02-05 02:03:33 +0100 | [diff] [blame^] | 395 | synchronize_rcu(); |
| 396 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 397 | bucket_table_free(old_tbl); |
| 398 | return 0; |
| 399 | } |
| 400 | EXPORT_SYMBOL_GPL(rhashtable_expand); |
| 401 | |
| 402 | /** |
| 403 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups |
| 404 | * @ht: the hash table to shrink |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 405 | * |
| 406 | * This function may only be called in a context where it is safe to call |
| 407 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| 408 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 409 | * The caller must ensure that no concurrent resizing occurs by holding |
| 410 | * ht->mutex. |
| 411 | * |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 412 | * The caller must ensure that no concurrent table mutations take place. |
| 413 | * It is however valid to have concurrent lookups if they are RCU protected. |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 414 | * |
| 415 | * It is valid to have concurrent insertions and deletions protected by per |
| 416 | * bucket locks or concurrent RCU protected lookups and traversals. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 417 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 418 | int rhashtable_shrink(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 419 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 420 | struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 421 | unsigned int new_hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 422 | |
| 423 | ASSERT_RHT_MUTEX(ht); |
| 424 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 425 | new_tbl = bucket_table_alloc(ht, tbl->size / 2); |
| 426 | if (new_tbl == NULL) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 427 | return -ENOMEM; |
| 428 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 429 | rcu_assign_pointer(ht->future_tbl, new_tbl); |
| 430 | synchronize_rcu(); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 431 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 432 | /* Link the first entry in the old bucket to the end of the |
| 433 | * bucket in the new table. As entries are concurrently being |
| 434 | * added to the new table, lock down the new bucket. As we |
| 435 | * always divide the size in half when shrinking, each bucket |
| 436 | * in the new table maps to exactly two buckets in the old |
| 437 | * table. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 438 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 439 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 440 | lock_buckets(new_tbl, tbl, new_hash); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 441 | |
| 442 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
| 443 | tbl->buckets[new_hash]); |
| 444 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
| 445 | tbl->buckets[new_hash + new_tbl->size]); |
| 446 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 447 | unlock_buckets(new_tbl, tbl, new_hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 448 | } |
| 449 | |
| 450 | /* Publish the new, valid hash table */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 451 | rcu_assign_pointer(ht->tbl, new_tbl); |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame] | 452 | atomic_dec(&ht->shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 453 | |
| 454 | /* Wait for readers. No new readers will have references to the |
| 455 | * old hash table. |
| 456 | */ |
| 457 | synchronize_rcu(); |
| 458 | |
| 459 | bucket_table_free(tbl); |
| 460 | |
| 461 | return 0; |
| 462 | } |
| 463 | EXPORT_SYMBOL_GPL(rhashtable_shrink); |
| 464 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 465 | static void rht_deferred_worker(struct work_struct *work) |
| 466 | { |
| 467 | struct rhashtable *ht; |
| 468 | struct bucket_table *tbl; |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 469 | struct rhashtable_walker *walker; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 470 | |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 471 | ht = container_of(work, struct rhashtable, run_work); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 472 | mutex_lock(&ht->mutex); |
Herbert Xu | 28134a5 | 2015-02-04 07:33:22 +1100 | [diff] [blame] | 473 | if (ht->being_destroyed) |
| 474 | goto unlock; |
| 475 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 476 | tbl = rht_dereference(ht->tbl, ht); |
| 477 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 478 | list_for_each_entry(walker, &ht->walkers, list) |
| 479 | walker->resize = true; |
| 480 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 481 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
| 482 | rhashtable_expand(ht); |
| 483 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) |
| 484 | rhashtable_shrink(ht); |
| 485 | |
Herbert Xu | 28134a5 | 2015-02-04 07:33:22 +1100 | [diff] [blame] | 486 | unlock: |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 487 | mutex_unlock(&ht->mutex); |
| 488 | } |
| 489 | |
Ying Xue | 54c5b7d | 2015-01-07 13:41:53 +0800 | [diff] [blame] | 490 | static void rhashtable_wakeup_worker(struct rhashtable *ht) |
| 491 | { |
| 492 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); |
| 493 | struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); |
| 494 | size_t size = tbl->size; |
| 495 | |
| 496 | /* Only adjust the table if no resizing is currently in progress. */ |
| 497 | if (tbl == new_tbl && |
| 498 | ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || |
| 499 | (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 500 | schedule_work(&ht->run_work); |
Ying Xue | 54c5b7d | 2015-01-07 13:41:53 +0800 | [diff] [blame] | 501 | } |
| 502 | |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 503 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, |
| 504 | struct bucket_table *tbl, u32 hash) |
| 505 | { |
| 506 | struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash], |
| 507 | tbl, hash); |
| 508 | |
| 509 | if (rht_is_a_nulls(head)) |
| 510 | INIT_RHT_NULLS_HEAD(obj->next, ht, hash); |
| 511 | else |
| 512 | RCU_INIT_POINTER(obj->next, head); |
| 513 | |
| 514 | rcu_assign_pointer(tbl->buckets[hash], obj); |
| 515 | |
| 516 | atomic_inc(&ht->nelems); |
| 517 | |
| 518 | rhashtable_wakeup_worker(ht); |
| 519 | } |
| 520 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 521 | /** |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 522 | * rhashtable_insert - insert object into hash table |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 523 | * @ht: hash table |
| 524 | * @obj: pointer to hash head inside object |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 525 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 526 | * Will take a per bucket spinlock to protect against mutual mutations |
| 527 | * on the same bucket. Multiple insertions may occur in parallel unless |
| 528 | * they map to the same bucket lock. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 529 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 530 | * It is safe to call this function from atomic context. |
| 531 | * |
| 532 | * Will trigger an automatic deferred table resizing if the size grows |
| 533 | * beyond the watermark indicated by grow_decision() which can be passed |
| 534 | * to rhashtable_init(). |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 535 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 536 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 537 | { |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 538 | struct bucket_table *tbl, *old_tbl; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 539 | unsigned hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 540 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 541 | rcu_read_lock(); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 542 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 543 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 544 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 545 | hash = head_hashfn(ht, tbl, obj); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 546 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 547 | lock_buckets(tbl, old_tbl, hash); |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 548 | __rhashtable_insert(ht, obj, tbl, hash); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 549 | unlock_buckets(tbl, old_tbl, hash); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 550 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 551 | rcu_read_unlock(); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 552 | } |
| 553 | EXPORT_SYMBOL_GPL(rhashtable_insert); |
| 554 | |
| 555 | /** |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 556 | * rhashtable_remove - remove object from hash table |
| 557 | * @ht: hash table |
| 558 | * @obj: pointer to hash head inside object |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 559 | * |
| 560 | * Since the hash chain is single linked, the removal operation needs to |
| 561 | * walk the bucket chain upon removal. The removal operation is thus |
| 562 | * considerable slow if the hash table is not correctly sized. |
| 563 | * |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 564 | * Will automatically shrink the table via rhashtable_expand() if the |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 565 | * shrink_decision function specified at rhashtable_init() returns true. |
| 566 | * |
| 567 | * The caller must ensure that no concurrent table mutations occur. It is |
| 568 | * however valid to have concurrent lookups if they are RCU protected. |
| 569 | */ |
Thomas Graf | 6eba822 | 2014-11-13 13:45:46 +0100 | [diff] [blame] | 570 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 571 | { |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 572 | struct bucket_table *tbl, *new_tbl, *old_tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 573 | struct rhash_head __rcu **pprev; |
| 574 | struct rhash_head *he; |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 575 | unsigned int hash, new_hash; |
Thomas Graf | fe6a043 | 2015-01-21 11:54:01 +0000 | [diff] [blame] | 576 | bool ret = false; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 577 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 578 | rcu_read_lock(); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 579 | tbl = old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 580 | new_tbl = rht_dereference_rcu(ht->future_tbl, ht); |
| 581 | new_hash = head_hashfn(ht, new_tbl, obj); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 582 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 583 | lock_buckets(new_tbl, old_tbl, new_hash); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 584 | restart: |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 585 | hash = rht_bucket_index(tbl, new_hash); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 586 | pprev = &tbl->buckets[hash]; |
| 587 | rht_for_each(he, tbl, hash) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 588 | if (he != obj) { |
| 589 | pprev = &he->next; |
| 590 | continue; |
| 591 | } |
| 592 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 593 | rcu_assign_pointer(*pprev, obj->next); |
Thomas Graf | 897362e | 2015-01-02 23:00:18 +0100 | [diff] [blame] | 594 | |
Thomas Graf | fe6a043 | 2015-01-21 11:54:01 +0000 | [diff] [blame] | 595 | ret = true; |
| 596 | break; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 597 | } |
| 598 | |
Thomas Graf | fe6a043 | 2015-01-21 11:54:01 +0000 | [diff] [blame] | 599 | /* The entry may be linked in either 'tbl', 'future_tbl', or both. |
| 600 | * 'future_tbl' only exists for a short period of time during |
| 601 | * resizing. Thus traversing both is fine and the added cost is |
| 602 | * very rare. |
| 603 | */ |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 604 | if (tbl != new_tbl) { |
| 605 | tbl = new_tbl; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 606 | goto restart; |
| 607 | } |
| 608 | |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 609 | unlock_buckets(new_tbl, old_tbl, new_hash); |
Thomas Graf | fe6a043 | 2015-01-21 11:54:01 +0000 | [diff] [blame] | 610 | |
| 611 | if (ret) { |
| 612 | atomic_dec(&ht->nelems); |
| 613 | rhashtable_wakeup_worker(ht); |
| 614 | } |
| 615 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 616 | rcu_read_unlock(); |
| 617 | |
Thomas Graf | fe6a043 | 2015-01-21 11:54:01 +0000 | [diff] [blame] | 618 | return ret; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 619 | } |
| 620 | EXPORT_SYMBOL_GPL(rhashtable_remove); |
| 621 | |
Ying Xue | efb975a6 | 2015-01-07 13:41:52 +0800 | [diff] [blame] | 622 | struct rhashtable_compare_arg { |
| 623 | struct rhashtable *ht; |
| 624 | const void *key; |
| 625 | }; |
| 626 | |
| 627 | static bool rhashtable_compare(void *ptr, void *arg) |
| 628 | { |
| 629 | struct rhashtable_compare_arg *x = arg; |
| 630 | struct rhashtable *ht = x->ht; |
| 631 | |
| 632 | return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); |
| 633 | } |
| 634 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 635 | /** |
| 636 | * rhashtable_lookup - lookup key in hash table |
| 637 | * @ht: hash table |
| 638 | * @key: pointer to key |
| 639 | * |
| 640 | * Computes the hash value for the key and traverses the bucket chain looking |
| 641 | * for a entry with an identical key. The first matching entry is returned. |
| 642 | * |
| 643 | * This lookup function may only be used for fixed key hash table (key_len |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 644 | * parameter set). It will BUG() if used inappropriately. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 645 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 646 | * Lookups may occur in parallel with hashtable mutations and resizing. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 647 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 648 | void *rhashtable_lookup(struct rhashtable *ht, const void *key) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 649 | { |
Ying Xue | efb975a6 | 2015-01-07 13:41:52 +0800 | [diff] [blame] | 650 | struct rhashtable_compare_arg arg = { |
| 651 | .ht = ht, |
| 652 | .key = key, |
| 653 | }; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 654 | |
| 655 | BUG_ON(!ht->p.key_len); |
| 656 | |
Ying Xue | efb975a6 | 2015-01-07 13:41:52 +0800 | [diff] [blame] | 657 | return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 658 | } |
| 659 | EXPORT_SYMBOL_GPL(rhashtable_lookup); |
| 660 | |
| 661 | /** |
| 662 | * rhashtable_lookup_compare - search hash table with compare function |
| 663 | * @ht: hash table |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 664 | * @key: the pointer to the key |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 665 | * @compare: compare function, must return true on match |
| 666 | * @arg: argument passed on to compare function |
| 667 | * |
| 668 | * Traverses the bucket chain behind the provided hash value and calls the |
| 669 | * specified compare function for each entry. |
| 670 | * |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 671 | * Lookups may occur in parallel with hashtable mutations and resizing. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 672 | * |
| 673 | * Returns the first entry on which the compare function returned true. |
| 674 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 675 | void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 676 | bool (*compare)(void *, void *), void *arg) |
| 677 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 678 | const struct bucket_table *tbl, *old_tbl; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 679 | struct rhash_head *he; |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 680 | u32 hash; |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 681 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 682 | rcu_read_lock(); |
| 683 | |
| 684 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 685 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
Thomas Graf | 8d24c0b | 2015-01-02 23:00:14 +0100 | [diff] [blame] | 686 | hash = key_hashfn(ht, key, ht->p.key_len); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 687 | restart: |
| 688 | rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 689 | if (!compare(rht_obj(ht, he), arg)) |
| 690 | continue; |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 691 | rcu_read_unlock(); |
Thomas Graf | a4b18cd | 2015-01-02 23:00:15 +0100 | [diff] [blame] | 692 | return rht_obj(ht, he); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 693 | } |
| 694 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 695 | if (unlikely(tbl != old_tbl)) { |
| 696 | tbl = old_tbl; |
| 697 | goto restart; |
| 698 | } |
| 699 | rcu_read_unlock(); |
| 700 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 701 | return NULL; |
| 702 | } |
| 703 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); |
| 704 | |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 705 | /** |
| 706 | * rhashtable_lookup_insert - lookup and insert object into hash table |
| 707 | * @ht: hash table |
| 708 | * @obj: pointer to hash head inside object |
| 709 | * |
| 710 | * Locks down the bucket chain in both the old and new table if a resize |
| 711 | * is in progress to ensure that writers can't remove from the old table |
| 712 | * and can't insert to the new table during the atomic operation of search |
| 713 | * and insertion. Searches for duplicates in both the old and new table if |
| 714 | * a resize is in progress. |
| 715 | * |
| 716 | * This lookup function may only be used for fixed key hash table (key_len |
| 717 | * parameter set). It will BUG() if used inappropriately. |
| 718 | * |
| 719 | * It is safe to call this function from atomic context. |
| 720 | * |
| 721 | * Will trigger an automatic deferred table resizing if the size grows |
| 722 | * beyond the watermark indicated by grow_decision() which can be passed |
| 723 | * to rhashtable_init(). |
| 724 | */ |
| 725 | bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) |
| 726 | { |
Ying Xue | 7a868d1 | 2015-01-12 14:52:22 +0800 | [diff] [blame] | 727 | struct rhashtable_compare_arg arg = { |
| 728 | .ht = ht, |
| 729 | .key = rht_obj(ht, obj) + ht->p.key_offset, |
| 730 | }; |
| 731 | |
| 732 | BUG_ON(!ht->p.key_len); |
| 733 | |
| 734 | return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, |
| 735 | &arg); |
| 736 | } |
| 737 | EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); |
| 738 | |
| 739 | /** |
| 740 | * rhashtable_lookup_compare_insert - search and insert object to hash table |
| 741 | * with compare function |
| 742 | * @ht: hash table |
| 743 | * @obj: pointer to hash head inside object |
| 744 | * @compare: compare function, must return true on match |
| 745 | * @arg: argument passed on to compare function |
| 746 | * |
| 747 | * Locks down the bucket chain in both the old and new table if a resize |
| 748 | * is in progress to ensure that writers can't remove from the old table |
| 749 | * and can't insert to the new table during the atomic operation of search |
| 750 | * and insertion. Searches for duplicates in both the old and new table if |
| 751 | * a resize is in progress. |
| 752 | * |
| 753 | * Lookups may occur in parallel with hashtable mutations and resizing. |
| 754 | * |
| 755 | * Will trigger an automatic deferred table resizing if the size grows |
| 756 | * beyond the watermark indicated by grow_decision() which can be passed |
| 757 | * to rhashtable_init(). |
| 758 | */ |
| 759 | bool rhashtable_lookup_compare_insert(struct rhashtable *ht, |
| 760 | struct rhash_head *obj, |
| 761 | bool (*compare)(void *, void *), |
| 762 | void *arg) |
| 763 | { |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 764 | struct bucket_table *new_tbl, *old_tbl; |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 765 | u32 new_hash; |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 766 | bool success = true; |
| 767 | |
| 768 | BUG_ON(!ht->p.key_len); |
| 769 | |
| 770 | rcu_read_lock(); |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 771 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 772 | new_tbl = rht_dereference_rcu(ht->future_tbl, ht); |
| 773 | new_hash = head_hashfn(ht, new_tbl, obj); |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 774 | |
| 775 | lock_buckets(new_tbl, old_tbl, new_hash); |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 776 | |
Ying Xue | 7a868d1 | 2015-01-12 14:52:22 +0800 | [diff] [blame] | 777 | if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, |
| 778 | compare, arg)) { |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 779 | success = false; |
| 780 | goto exit; |
| 781 | } |
| 782 | |
| 783 | __rhashtable_insert(ht, obj, new_tbl, new_hash); |
| 784 | |
| 785 | exit: |
Thomas Graf | a5ec68e | 2015-02-05 02:03:32 +0100 | [diff] [blame] | 786 | unlock_buckets(new_tbl, old_tbl, new_hash); |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 787 | rcu_read_unlock(); |
| 788 | |
| 789 | return success; |
| 790 | } |
Ying Xue | 7a868d1 | 2015-01-12 14:52:22 +0800 | [diff] [blame] | 791 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); |
Ying Xue | db30485 | 2015-01-07 13:41:54 +0800 | [diff] [blame] | 792 | |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 793 | /** |
| 794 | * rhashtable_walk_init - Initialise an iterator |
| 795 | * @ht: Table to walk over |
| 796 | * @iter: Hash table Iterator |
| 797 | * |
| 798 | * This function prepares a hash table walk. |
| 799 | * |
| 800 | * Note that if you restart a walk after rhashtable_walk_stop you |
| 801 | * may see the same object twice. Also, you may miss objects if |
| 802 | * there are removals in between rhashtable_walk_stop and the next |
| 803 | * call to rhashtable_walk_start. |
| 804 | * |
| 805 | * For a completely stable walk you should construct your own data |
| 806 | * structure outside the hash table. |
| 807 | * |
| 808 | * This function may sleep so you must not call it from interrupt |
| 809 | * context or with spin locks held. |
| 810 | * |
| 811 | * You must call rhashtable_walk_exit if this function returns |
| 812 | * successfully. |
| 813 | */ |
| 814 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) |
| 815 | { |
| 816 | iter->ht = ht; |
| 817 | iter->p = NULL; |
| 818 | iter->slot = 0; |
| 819 | iter->skip = 0; |
| 820 | |
| 821 | iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); |
| 822 | if (!iter->walker) |
| 823 | return -ENOMEM; |
| 824 | |
| 825 | mutex_lock(&ht->mutex); |
| 826 | list_add(&iter->walker->list, &ht->walkers); |
| 827 | mutex_unlock(&ht->mutex); |
| 828 | |
| 829 | return 0; |
| 830 | } |
| 831 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); |
| 832 | |
| 833 | /** |
| 834 | * rhashtable_walk_exit - Free an iterator |
| 835 | * @iter: Hash table Iterator |
| 836 | * |
| 837 | * This function frees resources allocated by rhashtable_walk_init. |
| 838 | */ |
| 839 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
| 840 | { |
| 841 | mutex_lock(&iter->ht->mutex); |
| 842 | list_del(&iter->walker->list); |
| 843 | mutex_unlock(&iter->ht->mutex); |
| 844 | kfree(iter->walker); |
| 845 | } |
| 846 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
| 847 | |
| 848 | /** |
| 849 | * rhashtable_walk_start - Start a hash table walk |
| 850 | * @iter: Hash table iterator |
| 851 | * |
| 852 | * Start a hash table walk. Note that we take the RCU lock in all |
| 853 | * cases including when we return an error. So you must always call |
| 854 | * rhashtable_walk_stop to clean up. |
| 855 | * |
| 856 | * Returns zero if successful. |
| 857 | * |
| 858 | * Returns -EAGAIN if resize event occured. Note that the iterator |
| 859 | * will rewind back to the beginning and you may use it immediately |
| 860 | * by calling rhashtable_walk_next. |
| 861 | */ |
| 862 | int rhashtable_walk_start(struct rhashtable_iter *iter) |
| 863 | { |
| 864 | rcu_read_lock(); |
| 865 | |
| 866 | if (iter->walker->resize) { |
| 867 | iter->slot = 0; |
| 868 | iter->skip = 0; |
| 869 | iter->walker->resize = false; |
| 870 | return -EAGAIN; |
| 871 | } |
| 872 | |
| 873 | return 0; |
| 874 | } |
| 875 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); |
| 876 | |
| 877 | /** |
| 878 | * rhashtable_walk_next - Return the next object and advance the iterator |
| 879 | * @iter: Hash table iterator |
| 880 | * |
| 881 | * Note that you must call rhashtable_walk_stop when you are finished |
| 882 | * with the walk. |
| 883 | * |
| 884 | * Returns the next object or NULL when the end of the table is reached. |
| 885 | * |
| 886 | * Returns -EAGAIN if resize event occured. Note that the iterator |
| 887 | * will rewind back to the beginning and you may continue to use it. |
| 888 | */ |
| 889 | void *rhashtable_walk_next(struct rhashtable_iter *iter) |
| 890 | { |
| 891 | const struct bucket_table *tbl; |
| 892 | struct rhashtable *ht = iter->ht; |
| 893 | struct rhash_head *p = iter->p; |
| 894 | void *obj = NULL; |
| 895 | |
| 896 | tbl = rht_dereference_rcu(ht->tbl, ht); |
| 897 | |
| 898 | if (p) { |
| 899 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); |
| 900 | goto next; |
| 901 | } |
| 902 | |
| 903 | for (; iter->slot < tbl->size; iter->slot++) { |
| 904 | int skip = iter->skip; |
| 905 | |
| 906 | rht_for_each_rcu(p, tbl, iter->slot) { |
| 907 | if (!skip) |
| 908 | break; |
| 909 | skip--; |
| 910 | } |
| 911 | |
| 912 | next: |
| 913 | if (!rht_is_a_nulls(p)) { |
| 914 | iter->skip++; |
| 915 | iter->p = p; |
| 916 | obj = rht_obj(ht, p); |
| 917 | goto out; |
| 918 | } |
| 919 | |
| 920 | iter->skip = 0; |
| 921 | } |
| 922 | |
| 923 | iter->p = NULL; |
| 924 | |
| 925 | out: |
| 926 | if (iter->walker->resize) { |
| 927 | iter->p = NULL; |
| 928 | iter->slot = 0; |
| 929 | iter->skip = 0; |
| 930 | iter->walker->resize = false; |
| 931 | return ERR_PTR(-EAGAIN); |
| 932 | } |
| 933 | |
| 934 | return obj; |
| 935 | } |
| 936 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); |
| 937 | |
| 938 | /** |
| 939 | * rhashtable_walk_stop - Finish a hash table walk |
| 940 | * @iter: Hash table iterator |
| 941 | * |
| 942 | * Finish a hash table walk. |
| 943 | */ |
| 944 | void rhashtable_walk_stop(struct rhashtable_iter *iter) |
| 945 | { |
| 946 | rcu_read_unlock(); |
| 947 | iter->p = NULL; |
| 948 | } |
| 949 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); |
| 950 | |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 951 | static size_t rounded_hashtable_size(struct rhashtable_params *params) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 952 | { |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 953 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
| 954 | 1UL << params->min_shift); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 955 | } |
| 956 | |
| 957 | /** |
| 958 | * rhashtable_init - initialize a new hash table |
| 959 | * @ht: hash table to be initialized |
| 960 | * @params: configuration parameters |
| 961 | * |
| 962 | * Initializes a new hash table based on the provided configuration |
| 963 | * parameters. A table can be configured either with a variable or |
| 964 | * fixed length key: |
| 965 | * |
| 966 | * Configuration Example 1: Fixed length keys |
| 967 | * struct test_obj { |
| 968 | * int key; |
| 969 | * void * my_member; |
| 970 | * struct rhash_head node; |
| 971 | * }; |
| 972 | * |
| 973 | * struct rhashtable_params params = { |
| 974 | * .head_offset = offsetof(struct test_obj, node), |
| 975 | * .key_offset = offsetof(struct test_obj, key), |
| 976 | * .key_len = sizeof(int), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 977 | * .hashfn = jhash, |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 978 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 979 | * }; |
| 980 | * |
| 981 | * Configuration Example 2: Variable length keys |
| 982 | * struct test_obj { |
| 983 | * [...] |
| 984 | * struct rhash_head node; |
| 985 | * }; |
| 986 | * |
| 987 | * u32 my_hash_fn(const void *data, u32 seed) |
| 988 | * { |
| 989 | * struct test_obj *obj = data; |
| 990 | * |
| 991 | * return [... hash ...]; |
| 992 | * } |
| 993 | * |
| 994 | * struct rhashtable_params params = { |
| 995 | * .head_offset = offsetof(struct test_obj, node), |
Daniel Borkmann | 8754589 | 2014-12-10 16:33:11 +0100 | [diff] [blame] | 996 | * .hashfn = jhash, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 997 | * .obj_hashfn = my_hash_fn, |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 998 | * }; |
| 999 | */ |
| 1000 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) |
| 1001 | { |
| 1002 | struct bucket_table *tbl; |
| 1003 | size_t size; |
| 1004 | |
| 1005 | size = HASH_DEFAULT_SIZE; |
| 1006 | |
| 1007 | if ((params->key_len && !params->hashfn) || |
| 1008 | (!params->key_len && !params->obj_hashfn)) |
| 1009 | return -EINVAL; |
| 1010 | |
Thomas Graf | f89bd6f | 2015-01-02 23:00:21 +0100 | [diff] [blame] | 1011 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
| 1012 | return -EINVAL; |
| 1013 | |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 1014 | params->min_shift = max_t(size_t, params->min_shift, |
| 1015 | ilog2(HASH_MIN_SIZE)); |
| 1016 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1017 | if (params->nelem_hint) |
Ying Xue | 9400017 | 2014-09-03 09:22:36 +0800 | [diff] [blame] | 1018 | size = rounded_hashtable_size(params); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1019 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1020 | memset(ht, 0, sizeof(*ht)); |
| 1021 | mutex_init(&ht->mutex); |
| 1022 | memcpy(&ht->p, params, sizeof(*params)); |
Herbert Xu | f2dba9c | 2015-02-04 07:33:23 +1100 | [diff] [blame] | 1023 | INIT_LIST_HEAD(&ht->walkers); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1024 | |
| 1025 | if (params->locks_mul) |
| 1026 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); |
| 1027 | else |
| 1028 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; |
| 1029 | |
| 1030 | tbl = bucket_table_alloc(ht, size); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1031 | if (tbl == NULL) |
| 1032 | return -ENOMEM; |
| 1033 | |
Ying Xue | 545a148 | 2015-01-07 13:41:57 +0800 | [diff] [blame] | 1034 | atomic_set(&ht->nelems, 0); |
Ying Xue | c0c09bf | 2015-01-07 13:41:56 +0800 | [diff] [blame] | 1035 | atomic_set(&ht->shift, ilog2(tbl->size)); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1036 | RCU_INIT_POINTER(ht->tbl, tbl); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1037 | RCU_INIT_POINTER(ht->future_tbl, tbl); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1038 | |
| 1039 | if (!ht->p.hash_rnd) |
| 1040 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); |
| 1041 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1042 | if (ht->p.grow_decision || ht->p.shrink_decision) |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 1043 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1044 | |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1045 | return 0; |
| 1046 | } |
| 1047 | EXPORT_SYMBOL_GPL(rhashtable_init); |
| 1048 | |
| 1049 | /** |
| 1050 | * rhashtable_destroy - destroy hash table |
| 1051 | * @ht: the hash table to destroy |
| 1052 | * |
Pablo Neira Ayuso | ae82ddc | 2014-09-02 00:26:05 +0200 | [diff] [blame] | 1053 | * Frees the bucket array. This function is not rcu safe, therefore the caller |
| 1054 | * has to make sure that no resizing may happen by unpublishing the hashtable |
| 1055 | * and waiting for the quiescent cycle before releasing the bucket array. |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1056 | */ |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1057 | void rhashtable_destroy(struct rhashtable *ht) |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1058 | { |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1059 | ht->being_destroyed = true; |
| 1060 | |
Ying Xue | 57699a4 | 2015-01-16 11:13:09 +0800 | [diff] [blame] | 1061 | if (ht->p.grow_decision || ht->p.shrink_decision) |
| 1062 | cancel_work_sync(&ht->run_work); |
| 1063 | |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1064 | mutex_lock(&ht->mutex); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1065 | bucket_table_free(rht_dereference(ht->tbl, ht)); |
Thomas Graf | 97defe1 | 2015-01-02 23:00:20 +0100 | [diff] [blame] | 1066 | mutex_unlock(&ht->mutex); |
Thomas Graf | 7e1e776 | 2014-08-02 11:47:44 +0200 | [diff] [blame] | 1067 | } |
| 1068 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |