blob: e40d019c3d9d6b69fc94190c9d2673eda053cbe5 [file] [log] [blame]
Thomas Gleixner0fc479b2019-05-29 16:57:42 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Omar Sandoval88459642016-09-17 08:38:44 -06002/*
3 * Fast and scalable bitmaps.
4 *
5 * Copyright (C) 2016 Facebook
6 * Copyright (C) 2013-2014 Jens Axboe
Omar Sandoval88459642016-09-17 08:38:44 -06007 */
8
9#ifndef __LINUX_SCALE_BITMAP_H
10#define __LINUX_SCALE_BITMAP_H
11
12#include <linux/kernel.h>
13#include <linux/slab.h>
14
Arnd Bergmann14b470b2018-07-06 22:19:07 +020015struct seq_file;
16
Omar Sandoval88459642016-09-17 08:38:44 -060017/**
18 * struct sbitmap_word - Word in a &struct sbitmap.
19 */
20struct sbitmap_word {
21 /**
Jens Axboeea86ea22018-11-30 13:18:06 -070022 * @depth: Number of bits being used in @word/@cleared
Omar Sandoval88459642016-09-17 08:38:44 -060023 */
24 unsigned long depth;
Jens Axboeea86ea22018-11-30 13:18:06 -070025
26 /**
27 * @word: word holding free bits
28 */
29 unsigned long word ____cacheline_aligned_in_smp;
30
31 /**
32 * @cleared: word holding cleared bits
33 */
34 unsigned long cleared ____cacheline_aligned_in_smp;
35
36 /**
37 * @swap_lock: Held while swapping word <-> cleared
38 */
39 spinlock_t swap_lock;
Omar Sandoval88459642016-09-17 08:38:44 -060040} ____cacheline_aligned_in_smp;
41
42/**
43 * struct sbitmap - Scalable bitmap.
44 *
45 * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
46 * trades off higher memory usage for better scalability.
47 */
48struct sbitmap {
49 /**
50 * @depth: Number of bits used in the whole bitmap.
51 */
52 unsigned int depth;
53
54 /**
55 * @shift: log2(number of bits used per word)
56 */
57 unsigned int shift;
58
59 /**
60 * @map_nr: Number of words (cachelines) being used for the bitmap.
61 */
62 unsigned int map_nr;
63
64 /**
65 * @map: Allocated bitmap.
66 */
67 struct sbitmap_word *map;
68};
69
70#define SBQ_WAIT_QUEUES 8
71#define SBQ_WAKE_BATCH 8
72
73/**
74 * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
75 */
76struct sbq_wait_state {
77 /**
78 * @wait_cnt: Number of frees remaining before we wake up.
79 */
80 atomic_t wait_cnt;
81
82 /**
83 * @wait: Wait queue.
84 */
85 wait_queue_head_t wait;
86} ____cacheline_aligned_in_smp;
87
88/**
89 * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
90 * bits.
91 *
92 * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
93 * avoid contention on the wait queue spinlock. This ensures that we don't hit a
94 * scalability wall when we run out of free bits and have to start putting tasks
95 * to sleep.
96 */
97struct sbitmap_queue {
98 /**
99 * @sb: Scalable bitmap.
100 */
101 struct sbitmap sb;
102
Omar Sandoval40aabb62016-09-17 01:28:23 -0700103 /*
104 * @alloc_hint: Cache of last successfully allocated or freed bit.
105 *
106 * This is per-cpu, which allows multiple users to stick to different
107 * cachelines until the map is exhausted.
108 */
109 unsigned int __percpu *alloc_hint;
110
Omar Sandoval88459642016-09-17 08:38:44 -0600111 /**
112 * @wake_batch: Number of bits which must be freed before we wake up any
113 * waiters.
114 */
115 unsigned int wake_batch;
116
117 /**
118 * @wake_index: Next wait queue in @ws to wake up.
119 */
120 atomic_t wake_index;
121
122 /**
123 * @ws: Wait queues.
124 */
125 struct sbq_wait_state *ws;
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700126
Jens Axboe5d2ee712018-11-29 17:36:41 -0700127 /*
128 * @ws_active: count of currently active ws waitqueues
129 */
130 atomic_t ws_active;
131
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700132 /**
133 * @round_robin: Allocate bits in strict round-robin order.
134 */
135 bool round_robin;
Omar Sandovala3275532018-05-09 17:16:31 -0700136
137 /**
138 * @min_shallow_depth: The minimum shallow depth which may be passed to
139 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
140 */
141 unsigned int min_shallow_depth;
Omar Sandoval88459642016-09-17 08:38:44 -0600142};
143
144/**
145 * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
146 * @sb: Bitmap to initialize.
147 * @depth: Number of bits to allocate.
148 * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
149 * given, a good default is chosen.
150 * @flags: Allocation flags.
151 * @node: Memory node to allocate on.
152 *
153 * Return: Zero on success or negative errno on failure.
154 */
155int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
156 gfp_t flags, int node);
157
158/**
159 * sbitmap_free() - Free memory used by a &struct sbitmap.
160 * @sb: Bitmap to free.
161 */
162static inline void sbitmap_free(struct sbitmap *sb)
163{
164 kfree(sb->map);
165 sb->map = NULL;
166}
167
168/**
169 * sbitmap_resize() - Resize a &struct sbitmap.
170 * @sb: Bitmap to resize.
171 * @depth: New number of bits to resize to.
172 *
173 * Doesn't reallocate anything. It's up to the caller to ensure that the new
174 * depth doesn't exceed the depth that the sb was initialized with.
175 */
176void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
177
178/**
179 * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
180 * @sb: Bitmap to allocate from.
181 * @alloc_hint: Hint for where to start searching for a free bit.
182 * @round_robin: If true, be stricter about allocation order; always allocate
183 * starting from the last allocated bit. This is less efficient
184 * than the default behavior (false).
185 *
Omar Sandoval4ace53f2018-02-27 16:56:43 -0800186 * This operation provides acquire barrier semantics if it succeeds.
187 *
Omar Sandoval88459642016-09-17 08:38:44 -0600188 * Return: Non-negative allocated bit number if successful, -1 otherwise.
189 */
190int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
191
192/**
Omar Sandovalc05e6672017-04-14 00:59:58 -0700193 * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
194 * limiting the depth used from each word.
195 * @sb: Bitmap to allocate from.
196 * @alloc_hint: Hint for where to start searching for a free bit.
197 * @shallow_depth: The maximum number of bits to allocate from a single word.
198 *
199 * This rather specific operation allows for having multiple users with
200 * different allocation limits. E.g., there can be a high-priority class that
201 * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
202 * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
203 * class can only allocate half of the total bits in the bitmap, preventing it
204 * from starving out the high-priority class.
205 *
206 * Return: Non-negative allocated bit number if successful, -1 otherwise.
207 */
208int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
209 unsigned long shallow_depth);
210
211/**
Omar Sandoval88459642016-09-17 08:38:44 -0600212 * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
213 * @sb: Bitmap to check.
214 *
215 * Return: true if any bit in the bitmap is set, false otherwise.
216 */
217bool sbitmap_any_bit_set(const struct sbitmap *sb);
218
Ming Lei7930d0a2017-10-14 17:22:27 +0800219#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
220#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
221
Omar Sandoval88459642016-09-17 08:38:44 -0600222typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
223
224/**
Ming Lei7930d0a2017-10-14 17:22:27 +0800225 * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
226 * @start: Where to start the iteration.
Omar Sandoval88459642016-09-17 08:38:44 -0600227 * @sb: Bitmap to iterate over.
228 * @fn: Callback. Should return true to continue or false to break early.
229 * @data: Pointer to pass to callback.
230 *
231 * This is inline even though it's non-trivial so that the function calls to the
232 * callback will hopefully get optimized away.
233 */
Ming Lei7930d0a2017-10-14 17:22:27 +0800234static inline void __sbitmap_for_each_set(struct sbitmap *sb,
235 unsigned int start,
236 sb_for_each_fn fn, void *data)
Omar Sandoval88459642016-09-17 08:38:44 -0600237{
Ming Lei7930d0a2017-10-14 17:22:27 +0800238 unsigned int index;
239 unsigned int nr;
240 unsigned int scanned = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600241
Ming Lei7930d0a2017-10-14 17:22:27 +0800242 if (start >= sb->depth)
243 start = 0;
244 index = SB_NR_TO_INDEX(sb, start);
245 nr = SB_NR_TO_BIT(sb, start);
Omar Sandoval88459642016-09-17 08:38:44 -0600246
Ming Lei7930d0a2017-10-14 17:22:27 +0800247 while (scanned < sb->depth) {
Omar Sandoval8c2def82018-12-03 14:45:43 -0800248 unsigned long word;
249 unsigned int depth = min_t(unsigned int,
250 sb->map[index].depth - nr,
Ming Lei7930d0a2017-10-14 17:22:27 +0800251 sb->depth - scanned);
252
253 scanned += depth;
Omar Sandoval8c2def82018-12-03 14:45:43 -0800254 word = sb->map[index].word & ~sb->map[index].cleared;
255 if (!word)
Ming Lei7930d0a2017-10-14 17:22:27 +0800256 goto next;
Omar Sandoval88459642016-09-17 08:38:44 -0600257
Ming Lei7930d0a2017-10-14 17:22:27 +0800258 /*
259 * On the first iteration of the outer loop, we need to add the
260 * bit offset back to the size of the word for find_next_bit().
261 * On all other iterations, nr is zero, so this is a noop.
262 */
263 depth += nr;
Omar Sandoval88459642016-09-17 08:38:44 -0600264 while (1) {
Omar Sandoval8c2def82018-12-03 14:45:43 -0800265 nr = find_next_bit(&word, depth, nr);
Ming Lei7930d0a2017-10-14 17:22:27 +0800266 if (nr >= depth)
Omar Sandoval88459642016-09-17 08:38:44 -0600267 break;
Ming Lei7930d0a2017-10-14 17:22:27 +0800268 if (!fn(sb, (index << sb->shift) + nr, data))
Omar Sandoval88459642016-09-17 08:38:44 -0600269 return;
270
271 nr++;
272 }
Ming Lei7930d0a2017-10-14 17:22:27 +0800273next:
274 nr = 0;
275 if (++index >= sb->map_nr)
276 index = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600277 }
278}
279
Ming Lei7930d0a2017-10-14 17:22:27 +0800280/**
281 * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
282 * @sb: Bitmap to iterate over.
283 * @fn: Callback. Should return true to continue or false to break early.
284 * @data: Pointer to pass to callback.
285 */
286static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
287 void *data)
288{
289 __sbitmap_for_each_set(sb, 0, fn, data);
290}
Omar Sandoval88459642016-09-17 08:38:44 -0600291
292static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
293 unsigned int bitnr)
294{
295 return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
296}
297
298/* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
299
300static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
301{
302 set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
303}
304
305static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
306{
307 clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
308}
309
Jens Axboeea86ea22018-11-30 13:18:06 -0700310/*
311 * This one is special, since it doesn't actually clear the bit, rather it
312 * sets the corresponding bit in the ->cleared mask instead. Paired with
Shenghui Wang1e4471e2019-03-16 16:24:37 +0800313 * the caller doing sbitmap_deferred_clear() if a given index is full, which
Jens Axboeea86ea22018-11-30 13:18:06 -0700314 * will clear the previously freed entries in the corresponding ->word.
315 */
316static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
317{
318 unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
319
320 set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
321}
322
Omar Sandoval4ace53f2018-02-27 16:56:43 -0800323static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
324 unsigned int bitnr)
325{
326 clear_bit_unlock(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
327}
328
Omar Sandoval88459642016-09-17 08:38:44 -0600329static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
330{
331 return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
332}
333
Omar Sandoval88459642016-09-17 08:38:44 -0600334/**
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800335 * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
336 * @sb: Bitmap to show.
337 * @m: struct seq_file to write to.
338 *
339 * This is intended for debugging. The format may change at any time.
340 */
341void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
342
343/**
344 * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
345 * seq_file.
346 * @sb: Bitmap to show.
347 * @m: struct seq_file to write to.
348 *
349 * This is intended for debugging. The output isn't guaranteed to be internally
350 * consistent.
351 */
352void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
353
354/**
Omar Sandoval88459642016-09-17 08:38:44 -0600355 * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
356 * memory node.
357 * @sbq: Bitmap queue to initialize.
358 * @depth: See sbitmap_init_node().
359 * @shift: See sbitmap_init_node().
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700360 * @round_robin: See sbitmap_get().
Omar Sandoval88459642016-09-17 08:38:44 -0600361 * @flags: Allocation flags.
362 * @node: Memory node to allocate on.
363 *
364 * Return: Zero on success or negative errno on failure.
365 */
366int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700367 int shift, bool round_robin, gfp_t flags, int node);
Omar Sandoval88459642016-09-17 08:38:44 -0600368
369/**
370 * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
371 *
372 * @sbq: Bitmap queue to free.
373 */
374static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
375{
376 kfree(sbq->ws);
Omar Sandoval40aabb62016-09-17 01:28:23 -0700377 free_percpu(sbq->alloc_hint);
Omar Sandoval88459642016-09-17 08:38:44 -0600378 sbitmap_free(&sbq->sb);
379}
380
381/**
382 * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
383 * @sbq: Bitmap queue to resize.
384 * @depth: New number of bits to resize to.
385 *
386 * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
387 * some extra work on the &struct sbitmap_queue, so it's not safe to just
388 * resize the underlying &struct sbitmap.
389 */
390void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
391
392/**
Omar Sandoval40aabb62016-09-17 01:28:23 -0700393 * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
394 * sbitmap_queue with preemption already disabled.
395 * @sbq: Bitmap queue to allocate from.
Omar Sandoval40aabb62016-09-17 01:28:23 -0700396 *
397 * Return: Non-negative allocated bit number if successful, -1 otherwise.
398 */
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700399int __sbitmap_queue_get(struct sbitmap_queue *sbq);
Omar Sandoval40aabb62016-09-17 01:28:23 -0700400
401/**
Omar Sandovalc05e6672017-04-14 00:59:58 -0700402 * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
403 * sbitmap_queue, limiting the depth used from each word, with preemption
404 * already disabled.
405 * @sbq: Bitmap queue to allocate from.
406 * @shallow_depth: The maximum number of bits to allocate from a single word.
407 * See sbitmap_get_shallow().
408 *
Omar Sandovala3275532018-05-09 17:16:31 -0700409 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
410 * initializing @sbq.
411 *
Omar Sandovalc05e6672017-04-14 00:59:58 -0700412 * Return: Non-negative allocated bit number if successful, -1 otherwise.
413 */
414int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
415 unsigned int shallow_depth);
416
417/**
Omar Sandoval40aabb62016-09-17 01:28:23 -0700418 * sbitmap_queue_get() - Try to allocate a free bit from a &struct
419 * sbitmap_queue.
420 * @sbq: Bitmap queue to allocate from.
Omar Sandoval40aabb62016-09-17 01:28:23 -0700421 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
422 * sbitmap_queue_clear()).
423 *
424 * Return: Non-negative allocated bit number if successful, -1 otherwise.
425 */
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700426static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
Omar Sandoval40aabb62016-09-17 01:28:23 -0700427 unsigned int *cpu)
428{
429 int nr;
430
431 *cpu = get_cpu();
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700432 nr = __sbitmap_queue_get(sbq);
Omar Sandoval40aabb62016-09-17 01:28:23 -0700433 put_cpu();
434 return nr;
435}
436
437/**
Omar Sandovalc05e6672017-04-14 00:59:58 -0700438 * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
439 * sbitmap_queue, limiting the depth used from each word.
440 * @sbq: Bitmap queue to allocate from.
441 * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
442 * sbitmap_queue_clear()).
443 * @shallow_depth: The maximum number of bits to allocate from a single word.
444 * See sbitmap_get_shallow().
445 *
Omar Sandovala3275532018-05-09 17:16:31 -0700446 * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
447 * initializing @sbq.
448 *
Omar Sandovalc05e6672017-04-14 00:59:58 -0700449 * Return: Non-negative allocated bit number if successful, -1 otherwise.
450 */
451static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
452 unsigned int *cpu,
453 unsigned int shallow_depth)
454{
455 int nr;
456
457 *cpu = get_cpu();
458 nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
459 put_cpu();
460 return nr;
461}
462
463/**
Omar Sandovala3275532018-05-09 17:16:31 -0700464 * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
465 * minimum shallow depth that will be used.
466 * @sbq: Bitmap queue in question.
467 * @min_shallow_depth: The minimum shallow depth that will be passed to
468 * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
469 *
470 * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
471 * depends on the depth of the bitmap. Since the shallow allocation functions
472 * effectively operate with a different depth, the shallow depth must be taken
473 * into account when calculating the batch size. This function must be called
474 * with the minimum shallow depth that will be used. Failure to do so can result
475 * in missed wakeups.
476 */
477void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
478 unsigned int min_shallow_depth);
479
480/**
Omar Sandoval88459642016-09-17 08:38:44 -0600481 * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
482 * &struct sbitmap_queue.
483 * @sbq: Bitmap to free from.
484 * @nr: Bit number to free.
Omar Sandoval40aabb62016-09-17 01:28:23 -0700485 * @cpu: CPU the bit was allocated on.
Omar Sandoval88459642016-09-17 08:38:44 -0600486 */
Omar Sandoval40aabb62016-09-17 01:28:23 -0700487void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700488 unsigned int cpu);
Omar Sandoval88459642016-09-17 08:38:44 -0600489
490static inline int sbq_index_inc(int index)
491{
492 return (index + 1) & (SBQ_WAIT_QUEUES - 1);
493}
494
495static inline void sbq_index_atomic_inc(atomic_t *index)
496{
497 int old = atomic_read(index);
498 int new = sbq_index_inc(old);
499 atomic_cmpxchg(index, old, new);
500}
501
502/**
503 * sbq_wait_ptr() - Get the next wait queue to use for a &struct
504 * sbitmap_queue.
505 * @sbq: Bitmap queue to wait on.
506 * @wait_index: A counter per "user" of @sbq.
507 */
508static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
509 atomic_t *wait_index)
510{
511 struct sbq_wait_state *ws;
512
513 ws = &sbq->ws[atomic_read(wait_index)];
514 sbq_index_atomic_inc(wait_index);
515 return ws;
516}
517
518/**
519 * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
520 * sbitmap_queue.
521 * @sbq: Bitmap queue to wake up.
522 */
523void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
524
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800525/**
Ming Leie6fc4642018-05-24 11:00:39 -0600526 * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
527 * on a &struct sbitmap_queue.
528 * @sbq: Bitmap queue to wake up.
529 */
530void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
531
532/**
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800533 * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
534 * seq_file.
535 * @sbq: Bitmap queue to show.
536 * @m: struct seq_file to write to.
537 *
538 * This is intended for debugging. The format may change at any time.
539 */
540void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
541
Jens Axboe5d2ee712018-11-29 17:36:41 -0700542struct sbq_wait {
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700543 struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
Jens Axboe5d2ee712018-11-29 17:36:41 -0700544 struct wait_queue_entry wait;
545};
546
547#define DEFINE_SBQ_WAIT(name) \
548 struct sbq_wait name = { \
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700549 .sbq = NULL, \
Jens Axboe5d2ee712018-11-29 17:36:41 -0700550 .wait = { \
551 .private = current, \
552 .func = autoremove_wake_function, \
553 .entry = LIST_HEAD_INIT((name).wait.entry), \
554 } \
555 }
556
557/*
558 * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
559 * internal state.
560 */
561void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
562 struct sbq_wait_state *ws,
563 struct sbq_wait *sbq_wait, int state);
564
565/*
566 * Must be paired with sbitmap_prepare_to_wait().
567 */
568void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
569 struct sbq_wait *sbq_wait);
570
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700571/*
572 * Wrapper around add_wait_queue(), which maintains some extra internal state
573 */
574void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
575 struct sbq_wait_state *ws,
576 struct sbq_wait *sbq_wait);
577
578/*
579 * Must be paired with sbitmap_add_wait_queue()
580 */
581void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
582
Omar Sandoval88459642016-09-17 08:38:44 -0600583#endif /* __LINUX_SCALE_BITMAP_H */