Thomas Gleixner | 0fc479b | 2019-05-29 16:57:42 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Fast and scalable bitmaps. |
| 4 | * |
| 5 | * Copyright (C) 2016 Facebook |
| 6 | * Copyright (C) 2013-2014 Jens Axboe |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #ifndef __LINUX_SCALE_BITMAP_H |
| 10 | #define __LINUX_SCALE_BITMAP_H |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/slab.h> |
| 14 | |
Arnd Bergmann | 14b470b | 2018-07-06 22:19:07 +0200 | [diff] [blame] | 15 | struct seq_file; |
| 16 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 17 | /** |
| 18 | * struct sbitmap_word - Word in a &struct sbitmap. |
| 19 | */ |
| 20 | struct sbitmap_word { |
| 21 | /** |
Jens Axboe | ea86ea2 | 2018-11-30 13:18:06 -0700 | [diff] [blame] | 22 | * @depth: Number of bits being used in @word/@cleared |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 23 | */ |
| 24 | unsigned long depth; |
Jens Axboe | ea86ea2 | 2018-11-30 13:18:06 -0700 | [diff] [blame] | 25 | |
| 26 | /** |
| 27 | * @word: word holding free bits |
| 28 | */ |
| 29 | unsigned long word ____cacheline_aligned_in_smp; |
| 30 | |
| 31 | /** |
| 32 | * @cleared: word holding cleared bits |
| 33 | */ |
| 34 | unsigned long cleared ____cacheline_aligned_in_smp; |
| 35 | |
| 36 | /** |
| 37 | * @swap_lock: Held while swapping word <-> cleared |
| 38 | */ |
| 39 | spinlock_t swap_lock; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 40 | } ____cacheline_aligned_in_smp; |
| 41 | |
| 42 | /** |
| 43 | * struct sbitmap - Scalable bitmap. |
| 44 | * |
| 45 | * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This |
| 46 | * trades off higher memory usage for better scalability. |
| 47 | */ |
| 48 | struct sbitmap { |
| 49 | /** |
| 50 | * @depth: Number of bits used in the whole bitmap. |
| 51 | */ |
| 52 | unsigned int depth; |
| 53 | |
| 54 | /** |
| 55 | * @shift: log2(number of bits used per word) |
| 56 | */ |
| 57 | unsigned int shift; |
| 58 | |
| 59 | /** |
| 60 | * @map_nr: Number of words (cachelines) being used for the bitmap. |
| 61 | */ |
| 62 | unsigned int map_nr; |
| 63 | |
| 64 | /** |
| 65 | * @map: Allocated bitmap. |
| 66 | */ |
| 67 | struct sbitmap_word *map; |
| 68 | }; |
| 69 | |
| 70 | #define SBQ_WAIT_QUEUES 8 |
| 71 | #define SBQ_WAKE_BATCH 8 |
| 72 | |
| 73 | /** |
| 74 | * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue. |
| 75 | */ |
| 76 | struct sbq_wait_state { |
| 77 | /** |
| 78 | * @wait_cnt: Number of frees remaining before we wake up. |
| 79 | */ |
| 80 | atomic_t wait_cnt; |
| 81 | |
| 82 | /** |
| 83 | * @wait: Wait queue. |
| 84 | */ |
| 85 | wait_queue_head_t wait; |
| 86 | } ____cacheline_aligned_in_smp; |
| 87 | |
| 88 | /** |
| 89 | * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free |
| 90 | * bits. |
| 91 | * |
| 92 | * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to |
| 93 | * avoid contention on the wait queue spinlock. This ensures that we don't hit a |
| 94 | * scalability wall when we run out of free bits and have to start putting tasks |
| 95 | * to sleep. |
| 96 | */ |
| 97 | struct sbitmap_queue { |
| 98 | /** |
| 99 | * @sb: Scalable bitmap. |
| 100 | */ |
| 101 | struct sbitmap sb; |
| 102 | |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 103 | /* |
| 104 | * @alloc_hint: Cache of last successfully allocated or freed bit. |
| 105 | * |
| 106 | * This is per-cpu, which allows multiple users to stick to different |
| 107 | * cachelines until the map is exhausted. |
| 108 | */ |
| 109 | unsigned int __percpu *alloc_hint; |
| 110 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 111 | /** |
| 112 | * @wake_batch: Number of bits which must be freed before we wake up any |
| 113 | * waiters. |
| 114 | */ |
| 115 | unsigned int wake_batch; |
| 116 | |
| 117 | /** |
| 118 | * @wake_index: Next wait queue in @ws to wake up. |
| 119 | */ |
| 120 | atomic_t wake_index; |
| 121 | |
| 122 | /** |
| 123 | * @ws: Wait queues. |
| 124 | */ |
| 125 | struct sbq_wait_state *ws; |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 126 | |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 127 | /* |
| 128 | * @ws_active: count of currently active ws waitqueues |
| 129 | */ |
| 130 | atomic_t ws_active; |
| 131 | |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 132 | /** |
| 133 | * @round_robin: Allocate bits in strict round-robin order. |
| 134 | */ |
| 135 | bool round_robin; |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame] | 136 | |
| 137 | /** |
| 138 | * @min_shallow_depth: The minimum shallow depth which may be passed to |
| 139 | * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). |
| 140 | */ |
| 141 | unsigned int min_shallow_depth; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 142 | }; |
| 143 | |
| 144 | /** |
| 145 | * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node. |
| 146 | * @sb: Bitmap to initialize. |
| 147 | * @depth: Number of bits to allocate. |
| 148 | * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if |
| 149 | * given, a good default is chosen. |
| 150 | * @flags: Allocation flags. |
| 151 | * @node: Memory node to allocate on. |
| 152 | * |
| 153 | * Return: Zero on success or negative errno on failure. |
| 154 | */ |
| 155 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, |
| 156 | gfp_t flags, int node); |
| 157 | |
| 158 | /** |
| 159 | * sbitmap_free() - Free memory used by a &struct sbitmap. |
| 160 | * @sb: Bitmap to free. |
| 161 | */ |
| 162 | static inline void sbitmap_free(struct sbitmap *sb) |
| 163 | { |
| 164 | kfree(sb->map); |
| 165 | sb->map = NULL; |
| 166 | } |
| 167 | |
| 168 | /** |
| 169 | * sbitmap_resize() - Resize a &struct sbitmap. |
| 170 | * @sb: Bitmap to resize. |
| 171 | * @depth: New number of bits to resize to. |
| 172 | * |
| 173 | * Doesn't reallocate anything. It's up to the caller to ensure that the new |
| 174 | * depth doesn't exceed the depth that the sb was initialized with. |
| 175 | */ |
| 176 | void sbitmap_resize(struct sbitmap *sb, unsigned int depth); |
| 177 | |
| 178 | /** |
| 179 | * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. |
| 180 | * @sb: Bitmap to allocate from. |
| 181 | * @alloc_hint: Hint for where to start searching for a free bit. |
| 182 | * @round_robin: If true, be stricter about allocation order; always allocate |
| 183 | * starting from the last allocated bit. This is less efficient |
| 184 | * than the default behavior (false). |
| 185 | * |
Omar Sandoval | 4ace53f | 2018-02-27 16:56:43 -0800 | [diff] [blame] | 186 | * This operation provides acquire barrier semantics if it succeeds. |
| 187 | * |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 188 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
| 189 | */ |
| 190 | int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin); |
| 191 | |
| 192 | /** |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 193 | * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, |
| 194 | * limiting the depth used from each word. |
| 195 | * @sb: Bitmap to allocate from. |
| 196 | * @alloc_hint: Hint for where to start searching for a free bit. |
| 197 | * @shallow_depth: The maximum number of bits to allocate from a single word. |
| 198 | * |
| 199 | * This rather specific operation allows for having multiple users with |
| 200 | * different allocation limits. E.g., there can be a high-priority class that |
| 201 | * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() |
| 202 | * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority |
| 203 | * class can only allocate half of the total bits in the bitmap, preventing it |
| 204 | * from starving out the high-priority class. |
| 205 | * |
| 206 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
| 207 | */ |
| 208 | int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, |
| 209 | unsigned long shallow_depth); |
| 210 | |
| 211 | /** |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 212 | * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. |
| 213 | * @sb: Bitmap to check. |
| 214 | * |
| 215 | * Return: true if any bit in the bitmap is set, false otherwise. |
| 216 | */ |
| 217 | bool sbitmap_any_bit_set(const struct sbitmap *sb); |
| 218 | |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 219 | #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) |
| 220 | #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) |
| 221 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 222 | typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); |
| 223 | |
| 224 | /** |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 225 | * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. |
| 226 | * @start: Where to start the iteration. |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 227 | * @sb: Bitmap to iterate over. |
| 228 | * @fn: Callback. Should return true to continue or false to break early. |
| 229 | * @data: Pointer to pass to callback. |
| 230 | * |
| 231 | * This is inline even though it's non-trivial so that the function calls to the |
| 232 | * callback will hopefully get optimized away. |
| 233 | */ |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 234 | static inline void __sbitmap_for_each_set(struct sbitmap *sb, |
| 235 | unsigned int start, |
| 236 | sb_for_each_fn fn, void *data) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 237 | { |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 238 | unsigned int index; |
| 239 | unsigned int nr; |
| 240 | unsigned int scanned = 0; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 241 | |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 242 | if (start >= sb->depth) |
| 243 | start = 0; |
| 244 | index = SB_NR_TO_INDEX(sb, start); |
| 245 | nr = SB_NR_TO_BIT(sb, start); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 246 | |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 247 | while (scanned < sb->depth) { |
Omar Sandoval | 8c2def8 | 2018-12-03 14:45:43 -0800 | [diff] [blame] | 248 | unsigned long word; |
| 249 | unsigned int depth = min_t(unsigned int, |
| 250 | sb->map[index].depth - nr, |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 251 | sb->depth - scanned); |
| 252 | |
| 253 | scanned += depth; |
Omar Sandoval | 8c2def8 | 2018-12-03 14:45:43 -0800 | [diff] [blame] | 254 | word = sb->map[index].word & ~sb->map[index].cleared; |
| 255 | if (!word) |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 256 | goto next; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 257 | |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 258 | /* |
| 259 | * On the first iteration of the outer loop, we need to add the |
| 260 | * bit offset back to the size of the word for find_next_bit(). |
| 261 | * On all other iterations, nr is zero, so this is a noop. |
| 262 | */ |
| 263 | depth += nr; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 264 | while (1) { |
Omar Sandoval | 8c2def8 | 2018-12-03 14:45:43 -0800 | [diff] [blame] | 265 | nr = find_next_bit(&word, depth, nr); |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 266 | if (nr >= depth) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 267 | break; |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 268 | if (!fn(sb, (index << sb->shift) + nr, data)) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 269 | return; |
| 270 | |
| 271 | nr++; |
| 272 | } |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 273 | next: |
| 274 | nr = 0; |
| 275 | if (++index >= sb->map_nr) |
| 276 | index = 0; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 277 | } |
| 278 | } |
| 279 | |
Ming Lei | 7930d0a | 2017-10-14 17:22:27 +0800 | [diff] [blame] | 280 | /** |
| 281 | * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. |
| 282 | * @sb: Bitmap to iterate over. |
| 283 | * @fn: Callback. Should return true to continue or false to break early. |
| 284 | * @data: Pointer to pass to callback. |
| 285 | */ |
| 286 | static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, |
| 287 | void *data) |
| 288 | { |
| 289 | __sbitmap_for_each_set(sb, 0, fn, data); |
| 290 | } |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 291 | |
| 292 | static inline unsigned long *__sbitmap_word(struct sbitmap *sb, |
| 293 | unsigned int bitnr) |
| 294 | { |
| 295 | return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; |
| 296 | } |
| 297 | |
| 298 | /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */ |
| 299 | |
| 300 | static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr) |
| 301 | { |
| 302 | set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); |
| 303 | } |
| 304 | |
| 305 | static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) |
| 306 | { |
| 307 | clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); |
| 308 | } |
| 309 | |
Jens Axboe | ea86ea2 | 2018-11-30 13:18:06 -0700 | [diff] [blame] | 310 | /* |
| 311 | * This one is special, since it doesn't actually clear the bit, rather it |
| 312 | * sets the corresponding bit in the ->cleared mask instead. Paired with |
Shenghui Wang | 1e4471e | 2019-03-16 16:24:37 +0800 | [diff] [blame] | 313 | * the caller doing sbitmap_deferred_clear() if a given index is full, which |
Jens Axboe | ea86ea2 | 2018-11-30 13:18:06 -0700 | [diff] [blame] | 314 | * will clear the previously freed entries in the corresponding ->word. |
| 315 | */ |
| 316 | static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) |
| 317 | { |
| 318 | unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; |
| 319 | |
| 320 | set_bit(SB_NR_TO_BIT(sb, bitnr), addr); |
| 321 | } |
| 322 | |
Omar Sandoval | 4ace53f | 2018-02-27 16:56:43 -0800 | [diff] [blame] | 323 | static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb, |
| 324 | unsigned int bitnr) |
| 325 | { |
| 326 | clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); |
| 327 | } |
| 328 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 329 | static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) |
| 330 | { |
| 331 | return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); |
| 332 | } |
| 333 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 334 | /** |
Omar Sandoval | 24af1ccf | 2017-01-25 14:32:13 -0800 | [diff] [blame] | 335 | * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. |
| 336 | * @sb: Bitmap to show. |
| 337 | * @m: struct seq_file to write to. |
| 338 | * |
| 339 | * This is intended for debugging. The format may change at any time. |
| 340 | */ |
| 341 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m); |
| 342 | |
| 343 | /** |
| 344 | * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct |
| 345 | * seq_file. |
| 346 | * @sb: Bitmap to show. |
| 347 | * @m: struct seq_file to write to. |
| 348 | * |
| 349 | * This is intended for debugging. The output isn't guaranteed to be internally |
| 350 | * consistent. |
| 351 | */ |
| 352 | void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); |
| 353 | |
| 354 | /** |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 355 | * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific |
| 356 | * memory node. |
| 357 | * @sbq: Bitmap queue to initialize. |
| 358 | * @depth: See sbitmap_init_node(). |
| 359 | * @shift: See sbitmap_init_node(). |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 360 | * @round_robin: See sbitmap_get(). |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 361 | * @flags: Allocation flags. |
| 362 | * @node: Memory node to allocate on. |
| 363 | * |
| 364 | * Return: Zero on success or negative errno on failure. |
| 365 | */ |
| 366 | int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 367 | int shift, bool round_robin, gfp_t flags, int node); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 368 | |
| 369 | /** |
| 370 | * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue. |
| 371 | * |
| 372 | * @sbq: Bitmap queue to free. |
| 373 | */ |
| 374 | static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) |
| 375 | { |
| 376 | kfree(sbq->ws); |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 377 | free_percpu(sbq->alloc_hint); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 378 | sbitmap_free(&sbq->sb); |
| 379 | } |
| 380 | |
| 381 | /** |
| 382 | * sbitmap_queue_resize() - Resize a &struct sbitmap_queue. |
| 383 | * @sbq: Bitmap queue to resize. |
| 384 | * @depth: New number of bits to resize to. |
| 385 | * |
| 386 | * Like sbitmap_resize(), this doesn't reallocate anything. It has to do |
| 387 | * some extra work on the &struct sbitmap_queue, so it's not safe to just |
| 388 | * resize the underlying &struct sbitmap. |
| 389 | */ |
| 390 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); |
| 391 | |
| 392 | /** |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 393 | * __sbitmap_queue_get() - Try to allocate a free bit from a &struct |
| 394 | * sbitmap_queue with preemption already disabled. |
| 395 | * @sbq: Bitmap queue to allocate from. |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 396 | * |
| 397 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
| 398 | */ |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 399 | int __sbitmap_queue_get(struct sbitmap_queue *sbq); |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 400 | |
| 401 | /** |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 402 | * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct |
| 403 | * sbitmap_queue, limiting the depth used from each word, with preemption |
| 404 | * already disabled. |
| 405 | * @sbq: Bitmap queue to allocate from. |
| 406 | * @shallow_depth: The maximum number of bits to allocate from a single word. |
| 407 | * See sbitmap_get_shallow(). |
| 408 | * |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame] | 409 | * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after |
| 410 | * initializing @sbq. |
| 411 | * |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 412 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
| 413 | */ |
| 414 | int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |
| 415 | unsigned int shallow_depth); |
| 416 | |
| 417 | /** |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 418 | * sbitmap_queue_get() - Try to allocate a free bit from a &struct |
| 419 | * sbitmap_queue. |
| 420 | * @sbq: Bitmap queue to allocate from. |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 421 | * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to |
| 422 | * sbitmap_queue_clear()). |
| 423 | * |
| 424 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
| 425 | */ |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 426 | static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 427 | unsigned int *cpu) |
| 428 | { |
| 429 | int nr; |
| 430 | |
| 431 | *cpu = get_cpu(); |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 432 | nr = __sbitmap_queue_get(sbq); |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 433 | put_cpu(); |
| 434 | return nr; |
| 435 | } |
| 436 | |
| 437 | /** |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 438 | * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct |
| 439 | * sbitmap_queue, limiting the depth used from each word. |
| 440 | * @sbq: Bitmap queue to allocate from. |
| 441 | * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to |
| 442 | * sbitmap_queue_clear()). |
| 443 | * @shallow_depth: The maximum number of bits to allocate from a single word. |
| 444 | * See sbitmap_get_shallow(). |
| 445 | * |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame] | 446 | * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after |
| 447 | * initializing @sbq. |
| 448 | * |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 449 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
| 450 | */ |
| 451 | static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |
| 452 | unsigned int *cpu, |
| 453 | unsigned int shallow_depth) |
| 454 | { |
| 455 | int nr; |
| 456 | |
| 457 | *cpu = get_cpu(); |
| 458 | nr = __sbitmap_queue_get_shallow(sbq, shallow_depth); |
| 459 | put_cpu(); |
| 460 | return nr; |
| 461 | } |
| 462 | |
| 463 | /** |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame] | 464 | * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the |
| 465 | * minimum shallow depth that will be used. |
| 466 | * @sbq: Bitmap queue in question. |
| 467 | * @min_shallow_depth: The minimum shallow depth that will be passed to |
| 468 | * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). |
| 469 | * |
| 470 | * sbitmap_queue_clear() batches wakeups as an optimization. The batch size |
| 471 | * depends on the depth of the bitmap. Since the shallow allocation functions |
| 472 | * effectively operate with a different depth, the shallow depth must be taken |
| 473 | * into account when calculating the batch size. This function must be called |
| 474 | * with the minimum shallow depth that will be used. Failure to do so can result |
| 475 | * in missed wakeups. |
| 476 | */ |
| 477 | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, |
| 478 | unsigned int min_shallow_depth); |
| 479 | |
| 480 | /** |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 481 | * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a |
| 482 | * &struct sbitmap_queue. |
| 483 | * @sbq: Bitmap to free from. |
| 484 | * @nr: Bit number to free. |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 485 | * @cpu: CPU the bit was allocated on. |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 486 | */ |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 487 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 488 | unsigned int cpu); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 489 | |
| 490 | static inline int sbq_index_inc(int index) |
| 491 | { |
| 492 | return (index + 1) & (SBQ_WAIT_QUEUES - 1); |
| 493 | } |
| 494 | |
| 495 | static inline void sbq_index_atomic_inc(atomic_t *index) |
| 496 | { |
| 497 | int old = atomic_read(index); |
| 498 | int new = sbq_index_inc(old); |
| 499 | atomic_cmpxchg(index, old, new); |
| 500 | } |
| 501 | |
| 502 | /** |
| 503 | * sbq_wait_ptr() - Get the next wait queue to use for a &struct |
| 504 | * sbitmap_queue. |
| 505 | * @sbq: Bitmap queue to wait on. |
| 506 | * @wait_index: A counter per "user" of @sbq. |
| 507 | */ |
| 508 | static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, |
| 509 | atomic_t *wait_index) |
| 510 | { |
| 511 | struct sbq_wait_state *ws; |
| 512 | |
| 513 | ws = &sbq->ws[atomic_read(wait_index)]; |
| 514 | sbq_index_atomic_inc(wait_index); |
| 515 | return ws; |
| 516 | } |
| 517 | |
| 518 | /** |
| 519 | * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct |
| 520 | * sbitmap_queue. |
| 521 | * @sbq: Bitmap queue to wake up. |
| 522 | */ |
| 523 | void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); |
| 524 | |
Omar Sandoval | 24af1ccf | 2017-01-25 14:32:13 -0800 | [diff] [blame] | 525 | /** |
Ming Lei | e6fc464 | 2018-05-24 11:00:39 -0600 | [diff] [blame] | 526 | * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue |
| 527 | * on a &struct sbitmap_queue. |
| 528 | * @sbq: Bitmap queue to wake up. |
| 529 | */ |
| 530 | void sbitmap_queue_wake_up(struct sbitmap_queue *sbq); |
| 531 | |
| 532 | /** |
Omar Sandoval | 24af1ccf | 2017-01-25 14:32:13 -0800 | [diff] [blame] | 533 | * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct |
| 534 | * seq_file. |
| 535 | * @sbq: Bitmap queue to show. |
| 536 | * @m: struct seq_file to write to. |
| 537 | * |
| 538 | * This is intended for debugging. The format may change at any time. |
| 539 | */ |
| 540 | void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); |
| 541 | |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 542 | struct sbq_wait { |
Jens Axboe | 9f6b7ef | 2018-12-20 08:49:00 -0700 | [diff] [blame] | 543 | struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 544 | struct wait_queue_entry wait; |
| 545 | }; |
| 546 | |
| 547 | #define DEFINE_SBQ_WAIT(name) \ |
| 548 | struct sbq_wait name = { \ |
Jens Axboe | 9f6b7ef | 2018-12-20 08:49:00 -0700 | [diff] [blame] | 549 | .sbq = NULL, \ |
Jens Axboe | 5d2ee71 | 2018-11-29 17:36:41 -0700 | [diff] [blame] | 550 | .wait = { \ |
| 551 | .private = current, \ |
| 552 | .func = autoremove_wake_function, \ |
| 553 | .entry = LIST_HEAD_INIT((name).wait.entry), \ |
| 554 | } \ |
| 555 | } |
| 556 | |
| 557 | /* |
| 558 | * Wrapper around prepare_to_wait_exclusive(), which maintains some extra |
| 559 | * internal state. |
| 560 | */ |
| 561 | void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, |
| 562 | struct sbq_wait_state *ws, |
| 563 | struct sbq_wait *sbq_wait, int state); |
| 564 | |
| 565 | /* |
| 566 | * Must be paired with sbitmap_prepare_to_wait(). |
| 567 | */ |
| 568 | void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, |
| 569 | struct sbq_wait *sbq_wait); |
| 570 | |
Jens Axboe | 9f6b7ef | 2018-12-20 08:49:00 -0700 | [diff] [blame] | 571 | /* |
| 572 | * Wrapper around add_wait_queue(), which maintains some extra internal state |
| 573 | */ |
| 574 | void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, |
| 575 | struct sbq_wait_state *ws, |
| 576 | struct sbq_wait *sbq_wait); |
| 577 | |
| 578 | /* |
| 579 | * Must be paired with sbitmap_add_wait_queue() |
| 580 | */ |
| 581 | void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); |
| 582 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 583 | #endif /* __LINUX_SCALE_BITMAP_H */ |