blob: c1c8a4e693251c6bd8d533607697a03f655097ad [file] [log] [blame]
Thomas Gleixner0fc479b2019-05-29 16:57:42 -07001// SPDX-License-Identifier: GPL-2.0-only
Omar Sandoval88459642016-09-17 08:38:44 -06002/*
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
Omar Sandoval88459642016-09-17 08:38:44 -06005 */
6
Ingo Molnaraf8601a2017-02-03 09:57:00 +01007#include <linux/sched.h>
Omar Sandoval98d95412016-09-17 01:28:25 -07008#include <linux/random.h>
Omar Sandoval88459642016-09-17 08:38:44 -06009#include <linux/sbitmap.h>
Omar Sandoval24af1ccf2017-01-25 14:32:13 -080010#include <linux/seq_file.h>
Omar Sandoval88459642016-09-17 08:38:44 -060011
Jens Axboeb2dbff12018-12-11 18:39:41 -070012/*
13 * See if we have deferred clears that we can batch move
14 */
Pavel Begunkovb78beea2020-11-22 15:35:45 +000015static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
Jens Axboeb2dbff12018-12-11 18:39:41 -070016{
17 unsigned long mask, val;
Jens Axboeb2dbff12018-12-11 18:39:41 -070018 bool ret = false;
Ming Leife76fc62019-01-15 11:59:52 +080019 unsigned long flags;
Jens Axboeb2dbff12018-12-11 18:39:41 -070020
Pavel Begunkovb78beea2020-11-22 15:35:45 +000021 spin_lock_irqsave(&map->swap_lock, flags);
Jens Axboeb2dbff12018-12-11 18:39:41 -070022
Pavel Begunkovb78beea2020-11-22 15:35:45 +000023 if (!map->cleared)
Jens Axboeb2dbff12018-12-11 18:39:41 -070024 goto out_unlock;
25
26 /*
27 * First get a stable cleared mask, setting the old mask to 0.
28 */
Pavel Begunkovb78beea2020-11-22 15:35:45 +000029 mask = xchg(&map->cleared, 0);
Jens Axboeb2dbff12018-12-11 18:39:41 -070030
31 /*
32 * Now clear the masked bits in our free word
33 */
34 do {
Pavel Begunkovb78beea2020-11-22 15:35:45 +000035 val = map->word;
36 } while (cmpxchg(&map->word, val, val & ~mask) != val);
Jens Axboeb2dbff12018-12-11 18:39:41 -070037
38 ret = true;
39out_unlock:
Pavel Begunkovb78beea2020-11-22 15:35:45 +000040 spin_unlock_irqrestore(&map->swap_lock, flags);
Jens Axboeb2dbff12018-12-11 18:39:41 -070041 return ret;
42}
43
Omar Sandoval88459642016-09-17 08:38:44 -060044int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
45 gfp_t flags, int node)
46{
47 unsigned int bits_per_word;
48 unsigned int i;
49
50 if (shift < 0) {
51 shift = ilog2(BITS_PER_LONG);
52 /*
53 * If the bitmap is small, shrink the number of bits per word so
54 * we spread over a few cachelines, at least. If less than 4
55 * bits, just forget about it, it's not going to work optimally
56 * anyway.
57 */
58 if (depth >= 4) {
59 while ((4U << shift) > depth)
60 shift--;
61 }
62 }
63 bits_per_word = 1U << shift;
64 if (bits_per_word > BITS_PER_LONG)
65 return -EINVAL;
66
67 sb->shift = shift;
68 sb->depth = depth;
69 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
70
71 if (depth == 0) {
72 sb->map = NULL;
73 return 0;
74 }
75
Kees Cook590b5b72018-06-12 14:04:20 -070076 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
Omar Sandoval88459642016-09-17 08:38:44 -060077 if (!sb->map)
78 return -ENOMEM;
79
80 for (i = 0; i < sb->map_nr; i++) {
81 sb->map[i].depth = min(depth, bits_per_word);
82 depth -= sb->map[i].depth;
Jens Axboeea86ea22018-11-30 13:18:06 -070083 spin_lock_init(&sb->map[i].swap_lock);
Omar Sandoval88459642016-09-17 08:38:44 -060084 }
85 return 0;
86}
87EXPORT_SYMBOL_GPL(sbitmap_init_node);
88
89void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
90{
91 unsigned int bits_per_word = 1U << sb->shift;
92 unsigned int i;
93
Jens Axboeb2dbff12018-12-11 18:39:41 -070094 for (i = 0; i < sb->map_nr; i++)
Pavel Begunkovb78beea2020-11-22 15:35:45 +000095 sbitmap_deferred_clear(&sb->map[i]);
Jens Axboeb2dbff12018-12-11 18:39:41 -070096
Omar Sandoval88459642016-09-17 08:38:44 -060097 sb->depth = depth;
98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
99
100 for (i = 0; i < sb->map_nr; i++) {
101 sb->map[i].depth = min(depth, bits_per_word);
102 depth -= sb->map[i].depth;
103 }
104}
105EXPORT_SYMBOL_GPL(sbitmap_resize);
106
Omar Sandovalc05e6672017-04-14 00:59:58 -0700107static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
108 unsigned int hint, bool wrap)
Omar Sandoval88459642016-09-17 08:38:44 -0600109{
110 unsigned int orig_hint = hint;
111 int nr;
112
113 while (1) {
Omar Sandovalc05e6672017-04-14 00:59:58 -0700114 nr = find_next_zero_bit(word, depth, hint);
115 if (unlikely(nr >= depth)) {
Omar Sandoval88459642016-09-17 08:38:44 -0600116 /*
117 * We started with an offset, and we didn't reset the
118 * offset to 0 in a failure case, so start from 0 to
119 * exhaust the map.
120 */
121 if (orig_hint && hint && wrap) {
122 hint = orig_hint = 0;
123 continue;
124 }
125 return -1;
126 }
127
Omar Sandoval4ace53f2018-02-27 16:56:43 -0800128 if (!test_and_set_bit_lock(nr, word))
Omar Sandoval88459642016-09-17 08:38:44 -0600129 break;
130
131 hint = nr + 1;
Omar Sandovalc05e6672017-04-14 00:59:58 -0700132 if (hint >= depth - 1)
Omar Sandoval88459642016-09-17 08:38:44 -0600133 hint = 0;
134 }
135
136 return nr;
137}
138
Jens Axboeea86ea22018-11-30 13:18:06 -0700139static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
140 unsigned int alloc_hint, bool round_robin)
141{
Pavel Begunkovb78beea2020-11-22 15:35:45 +0000142 struct sbitmap_word *map = &sb->map[index];
Jens Axboeea86ea22018-11-30 13:18:06 -0700143 int nr;
144
145 do {
Pavel Begunkovb78beea2020-11-22 15:35:45 +0000146 nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
Jens Axboeea86ea22018-11-30 13:18:06 -0700147 !round_robin);
148 if (nr != -1)
149 break;
Pavel Begunkovb78beea2020-11-22 15:35:45 +0000150 if (!sbitmap_deferred_clear(map))
Jens Axboeea86ea22018-11-30 13:18:06 -0700151 break;
152 } while (1);
153
154 return nr;
155}
156
Omar Sandoval88459642016-09-17 08:38:44 -0600157int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
158{
159 unsigned int i, index;
160 int nr = -1;
161
162 index = SB_NR_TO_INDEX(sb, alloc_hint);
163
Jens Axboe27fae422018-11-29 12:35:16 -0700164 /*
165 * Unless we're doing round robin tag allocation, just use the
166 * alloc_hint to find the right word index. No point in looping
167 * twice in find_next_zero_bit() for that case.
168 */
169 if (round_robin)
170 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
171 else
172 alloc_hint = 0;
173
Omar Sandoval88459642016-09-17 08:38:44 -0600174 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeea86ea22018-11-30 13:18:06 -0700175 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
176 round_robin);
Omar Sandoval88459642016-09-17 08:38:44 -0600177 if (nr != -1) {
178 nr += index << sb->shift;
179 break;
180 }
181
182 /* Jump to next index. */
Jens Axboe27fae422018-11-29 12:35:16 -0700183 alloc_hint = 0;
184 if (++index >= sb->map_nr)
Omar Sandoval88459642016-09-17 08:38:44 -0600185 index = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600186 }
187
188 return nr;
189}
190EXPORT_SYMBOL_GPL(sbitmap_get);
191
Omar Sandovalc05e6672017-04-14 00:59:58 -0700192int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
193 unsigned long shallow_depth)
194{
195 unsigned int i, index;
196 int nr = -1;
197
198 index = SB_NR_TO_INDEX(sb, alloc_hint);
199
200 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeb2dbff12018-12-11 18:39:41 -0700201again:
Omar Sandovalc05e6672017-04-14 00:59:58 -0700202 nr = __sbitmap_get_word(&sb->map[index].word,
203 min(sb->map[index].depth, shallow_depth),
204 SB_NR_TO_BIT(sb, alloc_hint), true);
205 if (nr != -1) {
206 nr += index << sb->shift;
207 break;
208 }
209
Pavel Begunkovb78beea2020-11-22 15:35:45 +0000210 if (sbitmap_deferred_clear(&sb->map[index]))
Jens Axboeb2dbff12018-12-11 18:39:41 -0700211 goto again;
212
Omar Sandovalc05e6672017-04-14 00:59:58 -0700213 /* Jump to next index. */
214 index++;
215 alloc_hint = index << sb->shift;
216
217 if (index >= sb->map_nr) {
218 index = 0;
219 alloc_hint = 0;
220 }
221 }
222
223 return nr;
224}
225EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
226
Omar Sandoval88459642016-09-17 08:38:44 -0600227bool sbitmap_any_bit_set(const struct sbitmap *sb)
228{
229 unsigned int i;
230
231 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeb2dbff12018-12-11 18:39:41 -0700232 if (sb->map[i].word & ~sb->map[i].cleared)
Omar Sandoval88459642016-09-17 08:38:44 -0600233 return true;
234 }
235 return false;
236}
237EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
238
Jens Axboeea86ea22018-11-30 13:18:06 -0700239static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
Omar Sandoval88459642016-09-17 08:38:44 -0600240{
Colin Ian King60658e02016-09-19 14:34:08 +0100241 unsigned int i, weight = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600242
243 for (i = 0; i < sb->map_nr; i++) {
244 const struct sbitmap_word *word = &sb->map[i];
245
Jens Axboeea86ea22018-11-30 13:18:06 -0700246 if (set)
247 weight += bitmap_weight(&word->word, word->depth);
248 else
249 weight += bitmap_weight(&word->cleared, word->depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600250 }
251 return weight;
252}
Jens Axboeea86ea22018-11-30 13:18:06 -0700253
254static unsigned int sbitmap_weight(const struct sbitmap *sb)
255{
256 return __sbitmap_weight(sb, true);
257}
258
259static unsigned int sbitmap_cleared(const struct sbitmap *sb)
260{
261 return __sbitmap_weight(sb, false);
262}
Omar Sandoval88459642016-09-17 08:38:44 -0600263
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800264void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
265{
266 seq_printf(m, "depth=%u\n", sb->depth);
Jens Axboeea86ea22018-11-30 13:18:06 -0700267 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
268 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800269 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
270 seq_printf(m, "map_nr=%u\n", sb->map_nr);
271}
272EXPORT_SYMBOL_GPL(sbitmap_show);
273
274static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
275{
276 if ((offset & 0xf) == 0) {
277 if (offset != 0)
278 seq_putc(m, '\n');
279 seq_printf(m, "%08x:", offset);
280 }
281 if ((offset & 0x1) == 0)
282 seq_putc(m, ' ');
283 seq_printf(m, "%02x", byte);
284}
285
286void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
287{
288 u8 byte = 0;
289 unsigned int byte_bits = 0;
290 unsigned int offset = 0;
291 int i;
292
293 for (i = 0; i < sb->map_nr; i++) {
294 unsigned long word = READ_ONCE(sb->map[i].word);
John Garry6bf0eb52020-07-01 16:06:25 +0800295 unsigned long cleared = READ_ONCE(sb->map[i].cleared);
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800296 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
297
John Garry6bf0eb52020-07-01 16:06:25 +0800298 word &= ~cleared;
299
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800300 while (word_bits > 0) {
301 unsigned int bits = min(8 - byte_bits, word_bits);
302
303 byte |= (word & (BIT(bits) - 1)) << byte_bits;
304 byte_bits += bits;
305 if (byte_bits == 8) {
306 emit_byte(m, offset, byte);
307 byte = 0;
308 byte_bits = 0;
309 offset++;
310 }
311 word >>= bits;
312 word_bits -= bits;
313 }
314 }
315 if (byte_bits) {
316 emit_byte(m, offset, byte);
317 offset++;
318 }
319 if (offset)
320 seq_putc(m, '\n');
321}
322EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
323
Omar Sandovala3275532018-05-09 17:16:31 -0700324static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
325 unsigned int depth)
Omar Sandoval88459642016-09-17 08:38:44 -0600326{
327 unsigned int wake_batch;
Omar Sandovala3275532018-05-09 17:16:31 -0700328 unsigned int shallow_depth;
Omar Sandoval88459642016-09-17 08:38:44 -0600329
330 /*
331 * For each batch, we wake up one queue. We need to make sure that our
Omar Sandovala3275532018-05-09 17:16:31 -0700332 * batch size is small enough that the full depth of the bitmap,
333 * potentially limited by a shallow depth, is enough to wake up all of
334 * the queues.
335 *
336 * Each full word of the bitmap has bits_per_word bits, and there might
337 * be a partial word. There are depth / bits_per_word full words and
338 * depth % bits_per_word bits left over. In bitwise arithmetic:
339 *
340 * bits_per_word = 1 << shift
341 * depth / bits_per_word = depth >> shift
342 * depth % bits_per_word = depth & ((1 << shift) - 1)
343 *
344 * Each word can be limited to sbq->min_shallow_depth bits.
Omar Sandoval88459642016-09-17 08:38:44 -0600345 */
Omar Sandovala3275532018-05-09 17:16:31 -0700346 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
347 depth = ((depth >> sbq->sb.shift) * shallow_depth +
348 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
349 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
350 SBQ_WAKE_BATCH);
Omar Sandoval88459642016-09-17 08:38:44 -0600351
352 return wake_batch;
353}
354
355int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700356 int shift, bool round_robin, gfp_t flags, int node)
Omar Sandoval88459642016-09-17 08:38:44 -0600357{
358 int ret;
359 int i;
360
361 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
362 if (ret)
363 return ret;
364
Omar Sandoval40aabb62016-09-17 01:28:23 -0700365 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
366 if (!sbq->alloc_hint) {
367 sbitmap_free(&sbq->sb);
368 return -ENOMEM;
369 }
370
Omar Sandoval98d95412016-09-17 01:28:25 -0700371 if (depth && !round_robin) {
372 for_each_possible_cpu(i)
373 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
374 }
375
Omar Sandovala3275532018-05-09 17:16:31 -0700376 sbq->min_shallow_depth = UINT_MAX;
377 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600378 atomic_set(&sbq->wake_index, 0);
Jens Axboe5d2ee712018-11-29 17:36:41 -0700379 atomic_set(&sbq->ws_active, 0);
Omar Sandoval88459642016-09-17 08:38:44 -0600380
Omar Sandoval48e28162016-09-17 01:28:22 -0700381 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
Omar Sandoval88459642016-09-17 08:38:44 -0600382 if (!sbq->ws) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700383 free_percpu(sbq->alloc_hint);
Omar Sandoval88459642016-09-17 08:38:44 -0600384 sbitmap_free(&sbq->sb);
385 return -ENOMEM;
386 }
387
388 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
389 init_waitqueue_head(&sbq->ws[i].wait);
390 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
391 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700392
393 sbq->round_robin = round_robin;
Omar Sandoval88459642016-09-17 08:38:44 -0600394 return 0;
395}
396EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
397
Omar Sandovala3275532018-05-09 17:16:31 -0700398static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
399 unsigned int depth)
Omar Sandoval88459642016-09-17 08:38:44 -0600400{
Omar Sandovala3275532018-05-09 17:16:31 -0700401 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800402 int i;
403
404 if (sbq->wake_batch != wake_batch) {
405 WRITE_ONCE(sbq->wake_batch, wake_batch);
406 /*
Ming Leie6fc4642018-05-24 11:00:39 -0600407 * Pairs with the memory barrier in sbitmap_queue_wake_up()
408 * to ensure that the batch size is updated before the wait
409 * counts.
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800410 */
Andrea Parria0934fd2019-05-20 19:23:57 +0200411 smp_mb();
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800412 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
413 atomic_set(&sbq->ws[i].wait_cnt, 1);
414 }
Omar Sandovala3275532018-05-09 17:16:31 -0700415}
416
417void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
418{
419 sbitmap_queue_update_wake_batch(sbq, depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600420 sbitmap_resize(&sbq->sb, depth);
421}
422EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
423
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700424int __sbitmap_queue_get(struct sbitmap_queue *sbq)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700425{
Omar Sandoval05fd0952016-09-17 01:28:26 -0700426 unsigned int hint, depth;
Omar Sandoval40aabb62016-09-17 01:28:23 -0700427 int nr;
428
429 hint = this_cpu_read(*sbq->alloc_hint);
Omar Sandoval05fd0952016-09-17 01:28:26 -0700430 depth = READ_ONCE(sbq->sb.depth);
431 if (unlikely(hint >= depth)) {
432 hint = depth ? prandom_u32() % depth : 0;
433 this_cpu_write(*sbq->alloc_hint, hint);
434 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700435 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
Omar Sandoval40aabb62016-09-17 01:28:23 -0700436
437 if (nr == -1) {
438 /* If the map is full, a hint won't do us much good. */
439 this_cpu_write(*sbq->alloc_hint, 0);
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700440 } else if (nr == hint || unlikely(sbq->round_robin)) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700441 /* Only update the hint if we used it. */
442 hint = nr + 1;
Omar Sandoval05fd0952016-09-17 01:28:26 -0700443 if (hint >= depth - 1)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700444 hint = 0;
445 this_cpu_write(*sbq->alloc_hint, hint);
446 }
447
448 return nr;
449}
450EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
451
Omar Sandovalc05e6672017-04-14 00:59:58 -0700452int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
453 unsigned int shallow_depth)
454{
455 unsigned int hint, depth;
456 int nr;
457
Omar Sandoval61445b562018-05-09 17:29:24 -0700458 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
459
Omar Sandovalc05e6672017-04-14 00:59:58 -0700460 hint = this_cpu_read(*sbq->alloc_hint);
461 depth = READ_ONCE(sbq->sb.depth);
462 if (unlikely(hint >= depth)) {
463 hint = depth ? prandom_u32() % depth : 0;
464 this_cpu_write(*sbq->alloc_hint, hint);
465 }
466 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
467
468 if (nr == -1) {
469 /* If the map is full, a hint won't do us much good. */
470 this_cpu_write(*sbq->alloc_hint, 0);
471 } else if (nr == hint || unlikely(sbq->round_robin)) {
472 /* Only update the hint if we used it. */
473 hint = nr + 1;
474 if (hint >= depth - 1)
475 hint = 0;
476 this_cpu_write(*sbq->alloc_hint, hint);
477 }
478
479 return nr;
480}
481EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
482
Omar Sandovala3275532018-05-09 17:16:31 -0700483void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
484 unsigned int min_shallow_depth)
485{
486 sbq->min_shallow_depth = min_shallow_depth;
487 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
488}
489EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
490
Omar Sandoval88459642016-09-17 08:38:44 -0600491static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
492{
493 int i, wake_index;
494
Jens Axboe5d2ee712018-11-29 17:36:41 -0700495 if (!atomic_read(&sbq->ws_active))
496 return NULL;
497
Omar Sandoval88459642016-09-17 08:38:44 -0600498 wake_index = atomic_read(&sbq->wake_index);
499 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
500 struct sbq_wait_state *ws = &sbq->ws[wake_index];
501
502 if (waitqueue_active(&ws->wait)) {
Pavel Begunkov41723282019-05-23 18:39:16 +0300503 if (wake_index != atomic_read(&sbq->wake_index))
504 atomic_set(&sbq->wake_index, wake_index);
Omar Sandoval88459642016-09-17 08:38:44 -0600505 return ws;
506 }
507
508 wake_index = sbq_index_inc(wake_index);
509 }
510
511 return NULL;
512}
513
Jens Axboec854ab52018-05-14 12:17:31 -0600514static bool __sbq_wake_up(struct sbitmap_queue *sbq)
Omar Sandoval88459642016-09-17 08:38:44 -0600515{
516 struct sbq_wait_state *ws;
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800517 unsigned int wake_batch;
Omar Sandoval88459642016-09-17 08:38:44 -0600518 int wait_cnt;
519
Omar Sandoval88459642016-09-17 08:38:44 -0600520 ws = sbq_wake_ptr(sbq);
521 if (!ws)
Jens Axboec854ab52018-05-14 12:17:31 -0600522 return false;
Omar Sandoval88459642016-09-17 08:38:44 -0600523
524 wait_cnt = atomic_dec_return(&ws->wait_cnt);
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800525 if (wait_cnt <= 0) {
Jens Axboec854ab52018-05-14 12:17:31 -0600526 int ret;
527
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800528 wake_batch = READ_ONCE(sbq->wake_batch);
Jens Axboec854ab52018-05-14 12:17:31 -0600529
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800530 /*
531 * Pairs with the memory barrier in sbitmap_queue_resize() to
532 * ensure that we see the batch size update before the wait
533 * count is reset.
534 */
535 smp_mb__before_atomic();
Jens Axboec854ab52018-05-14 12:17:31 -0600536
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800537 /*
Jens Axboec854ab52018-05-14 12:17:31 -0600538 * For concurrent callers of this, the one that failed the
539 * atomic_cmpxhcg() race should call this function again
540 * to wakeup a new batch on a different 'ws'.
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800541 */
Jens Axboec854ab52018-05-14 12:17:31 -0600542 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
543 if (ret == wait_cnt) {
544 sbq_index_atomic_inc(&sbq->wake_index);
545 wake_up_nr(&ws->wait, wake_batch);
546 return false;
547 }
548
549 return true;
Omar Sandoval88459642016-09-17 08:38:44 -0600550 }
Jens Axboec854ab52018-05-14 12:17:31 -0600551
552 return false;
553}
554
Ming Leie6fc4642018-05-24 11:00:39 -0600555void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
Jens Axboec854ab52018-05-14 12:17:31 -0600556{
557 while (__sbq_wake_up(sbq))
558 ;
Omar Sandoval88459642016-09-17 08:38:44 -0600559}
Ming Leie6fc4642018-05-24 11:00:39 -0600560EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
Omar Sandoval88459642016-09-17 08:38:44 -0600561
Omar Sandoval40aabb62016-09-17 01:28:23 -0700562void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700563 unsigned int cpu)
Omar Sandoval88459642016-09-17 08:38:44 -0600564{
Ming Leie6d1fa52019-03-22 09:13:51 +0800565 /*
566 * Once the clear bit is set, the bit may be allocated out.
567 *
568 * Orders READ/WRITE on the asssociated instance(such as request
569 * of blk_mq) by this bit for avoiding race with re-allocation,
570 * and its pair is the memory barrier implied in __sbitmap_get_word.
571 *
572 * One invariant is that the clear bit has to be zero when the bit
573 * is in use.
574 */
575 smp_mb__before_atomic();
Jens Axboeea86ea22018-11-30 13:18:06 -0700576 sbitmap_deferred_clear_bit(&sbq->sb, nr);
577
Ming Leie6fc4642018-05-24 11:00:39 -0600578 /*
579 * Pairs with the memory barrier in set_current_state() to ensure the
580 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
581 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
582 * waiter. See the comment on waitqueue_active().
583 */
584 smp_mb__after_atomic();
585 sbitmap_queue_wake_up(sbq);
586
Omar Sandoval5c64a8d2016-09-17 12:20:54 -0700587 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
Omar Sandoval40aabb62016-09-17 01:28:23 -0700588 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
Omar Sandoval88459642016-09-17 08:38:44 -0600589}
590EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
591
592void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
593{
594 int i, wake_index;
595
596 /*
Omar Sandovalf66227d2017-01-18 11:55:21 -0800597 * Pairs with the memory barrier in set_current_state() like in
Ming Leie6fc4642018-05-24 11:00:39 -0600598 * sbitmap_queue_wake_up().
Omar Sandoval88459642016-09-17 08:38:44 -0600599 */
600 smp_mb();
601 wake_index = atomic_read(&sbq->wake_index);
602 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
603 struct sbq_wait_state *ws = &sbq->ws[wake_index];
604
605 if (waitqueue_active(&ws->wait))
606 wake_up(&ws->wait);
607
608 wake_index = sbq_index_inc(wake_index);
609 }
610}
611EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800612
613void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
614{
615 bool first;
616 int i;
617
618 sbitmap_show(&sbq->sb, m);
619
620 seq_puts(m, "alloc_hint={");
621 first = true;
622 for_each_possible_cpu(i) {
623 if (!first)
624 seq_puts(m, ", ");
625 first = false;
626 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
627 }
628 seq_puts(m, "}\n");
629
630 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
631 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
Jens Axboe5d2ee712018-11-29 17:36:41 -0700632 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800633
634 seq_puts(m, "ws={\n");
635 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
636 struct sbq_wait_state *ws = &sbq->ws[i];
637
638 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
639 atomic_read(&ws->wait_cnt),
640 waitqueue_active(&ws->wait) ? "active" : "inactive");
641 }
642 seq_puts(m, "}\n");
643
644 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
Omar Sandovala3275532018-05-09 17:16:31 -0700645 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800646}
647EXPORT_SYMBOL_GPL(sbitmap_queue_show);
Jens Axboe5d2ee712018-11-29 17:36:41 -0700648
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700649void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
650 struct sbq_wait_state *ws,
651 struct sbq_wait *sbq_wait)
652{
653 if (!sbq_wait->sbq) {
654 sbq_wait->sbq = sbq;
655 atomic_inc(&sbq->ws_active);
David Jefferydf034c92019-12-17 11:00:24 -0500656 add_wait_queue(&ws->wait, &sbq_wait->wait);
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700657 }
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700658}
659EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
660
661void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
662{
663 list_del_init(&sbq_wait->wait.entry);
664 if (sbq_wait->sbq) {
665 atomic_dec(&sbq_wait->sbq->ws_active);
666 sbq_wait->sbq = NULL;
667 }
668}
669EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
670
Jens Axboe5d2ee712018-11-29 17:36:41 -0700671void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
672 struct sbq_wait_state *ws,
673 struct sbq_wait *sbq_wait, int state)
674{
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700675 if (!sbq_wait->sbq) {
Jens Axboe5d2ee712018-11-29 17:36:41 -0700676 atomic_inc(&sbq->ws_active);
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700677 sbq_wait->sbq = sbq;
Jens Axboe5d2ee712018-11-29 17:36:41 -0700678 }
679 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
680}
681EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
682
683void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
684 struct sbq_wait *sbq_wait)
685{
686 finish_wait(&ws->wait, &sbq_wait->wait);
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700687 if (sbq_wait->sbq) {
Jens Axboe5d2ee712018-11-29 17:36:41 -0700688 atomic_dec(&sbq->ws_active);
Jens Axboe9f6b7ef2018-12-20 08:49:00 -0700689 sbq_wait->sbq = NULL;
Jens Axboe5d2ee712018-11-29 17:36:41 -0700690 }
691}
692EXPORT_SYMBOL_GPL(sbitmap_finish_wait);