blob: 5b3e56d68dab8863eadf173b9dcd48452a7b5a87 [file] [log] [blame]
Omar Sandoval88459642016-09-17 08:38:44 -06001/*
2 * Copyright (C) 2016 Facebook
3 * Copyright (C) 2013-2014 Jens Axboe
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <https://www.gnu.org/licenses/>.
16 */
17
Ingo Molnaraf8601a2017-02-03 09:57:00 +010018#include <linux/sched.h>
Omar Sandoval98d95412016-09-17 01:28:25 -070019#include <linux/random.h>
Omar Sandoval88459642016-09-17 08:38:44 -060020#include <linux/sbitmap.h>
Omar Sandoval24af1ccf2017-01-25 14:32:13 -080021#include <linux/seq_file.h>
Omar Sandoval88459642016-09-17 08:38:44 -060022
Jens Axboeb2dbff12018-12-11 18:39:41 -070023/*
24 * See if we have deferred clears that we can batch move
25 */
26static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
27{
28 unsigned long mask, val;
29 unsigned long __maybe_unused flags;
30 bool ret = false;
31
32 /* Silence bogus lockdep warning */
33#if defined(CONFIG_LOCKDEP)
34 local_irq_save(flags);
35#endif
36 spin_lock(&sb->map[index].swap_lock);
37
38 if (!sb->map[index].cleared)
39 goto out_unlock;
40
41 /*
42 * First get a stable cleared mask, setting the old mask to 0.
43 */
44 do {
45 mask = sb->map[index].cleared;
46 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
47
48 /*
49 * Now clear the masked bits in our free word
50 */
51 do {
52 val = sb->map[index].word;
53 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
54
55 ret = true;
56out_unlock:
57 spin_unlock(&sb->map[index].swap_lock);
58#if defined(CONFIG_LOCKDEP)
59 local_irq_restore(flags);
60#endif
61 return ret;
62}
63
Omar Sandoval88459642016-09-17 08:38:44 -060064int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
65 gfp_t flags, int node)
66{
67 unsigned int bits_per_word;
68 unsigned int i;
69
70 if (shift < 0) {
71 shift = ilog2(BITS_PER_LONG);
72 /*
73 * If the bitmap is small, shrink the number of bits per word so
74 * we spread over a few cachelines, at least. If less than 4
75 * bits, just forget about it, it's not going to work optimally
76 * anyway.
77 */
78 if (depth >= 4) {
79 while ((4U << shift) > depth)
80 shift--;
81 }
82 }
83 bits_per_word = 1U << shift;
84 if (bits_per_word > BITS_PER_LONG)
85 return -EINVAL;
86
87 sb->shift = shift;
88 sb->depth = depth;
89 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
90
91 if (depth == 0) {
92 sb->map = NULL;
93 return 0;
94 }
95
Kees Cook590b5b72018-06-12 14:04:20 -070096 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
Omar Sandoval88459642016-09-17 08:38:44 -060097 if (!sb->map)
98 return -ENOMEM;
99
100 for (i = 0; i < sb->map_nr; i++) {
101 sb->map[i].depth = min(depth, bits_per_word);
102 depth -= sb->map[i].depth;
Jens Axboeea86ea22018-11-30 13:18:06 -0700103 spin_lock_init(&sb->map[i].swap_lock);
Omar Sandoval88459642016-09-17 08:38:44 -0600104 }
105 return 0;
106}
107EXPORT_SYMBOL_GPL(sbitmap_init_node);
108
109void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
110{
111 unsigned int bits_per_word = 1U << sb->shift;
112 unsigned int i;
113
Jens Axboeb2dbff12018-12-11 18:39:41 -0700114 for (i = 0; i < sb->map_nr; i++)
115 sbitmap_deferred_clear(sb, i);
116
Omar Sandoval88459642016-09-17 08:38:44 -0600117 sb->depth = depth;
118 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
119
120 for (i = 0; i < sb->map_nr; i++) {
121 sb->map[i].depth = min(depth, bits_per_word);
122 depth -= sb->map[i].depth;
123 }
124}
125EXPORT_SYMBOL_GPL(sbitmap_resize);
126
Omar Sandovalc05e6672017-04-14 00:59:58 -0700127static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
128 unsigned int hint, bool wrap)
Omar Sandoval88459642016-09-17 08:38:44 -0600129{
130 unsigned int orig_hint = hint;
131 int nr;
132
133 while (1) {
Omar Sandovalc05e6672017-04-14 00:59:58 -0700134 nr = find_next_zero_bit(word, depth, hint);
135 if (unlikely(nr >= depth)) {
Omar Sandoval88459642016-09-17 08:38:44 -0600136 /*
137 * We started with an offset, and we didn't reset the
138 * offset to 0 in a failure case, so start from 0 to
139 * exhaust the map.
140 */
141 if (orig_hint && hint && wrap) {
142 hint = orig_hint = 0;
143 continue;
144 }
145 return -1;
146 }
147
Omar Sandoval4ace53f2018-02-27 16:56:43 -0800148 if (!test_and_set_bit_lock(nr, word))
Omar Sandoval88459642016-09-17 08:38:44 -0600149 break;
150
151 hint = nr + 1;
Omar Sandovalc05e6672017-04-14 00:59:58 -0700152 if (hint >= depth - 1)
Omar Sandoval88459642016-09-17 08:38:44 -0600153 hint = 0;
154 }
155
156 return nr;
157}
158
Jens Axboeea86ea22018-11-30 13:18:06 -0700159static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
160 unsigned int alloc_hint, bool round_robin)
161{
162 int nr;
163
164 do {
165 nr = __sbitmap_get_word(&sb->map[index].word,
166 sb->map[index].depth, alloc_hint,
167 !round_robin);
168 if (nr != -1)
169 break;
170 if (!sbitmap_deferred_clear(sb, index))
171 break;
172 } while (1);
173
174 return nr;
175}
176
Omar Sandoval88459642016-09-17 08:38:44 -0600177int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
178{
179 unsigned int i, index;
180 int nr = -1;
181
182 index = SB_NR_TO_INDEX(sb, alloc_hint);
183
Jens Axboe27fae422018-11-29 12:35:16 -0700184 /*
185 * Unless we're doing round robin tag allocation, just use the
186 * alloc_hint to find the right word index. No point in looping
187 * twice in find_next_zero_bit() for that case.
188 */
189 if (round_robin)
190 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
191 else
192 alloc_hint = 0;
193
Omar Sandoval88459642016-09-17 08:38:44 -0600194 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeea86ea22018-11-30 13:18:06 -0700195 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
196 round_robin);
Omar Sandoval88459642016-09-17 08:38:44 -0600197 if (nr != -1) {
198 nr += index << sb->shift;
199 break;
200 }
201
202 /* Jump to next index. */
Jens Axboe27fae422018-11-29 12:35:16 -0700203 alloc_hint = 0;
204 if (++index >= sb->map_nr)
Omar Sandoval88459642016-09-17 08:38:44 -0600205 index = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600206 }
207
208 return nr;
209}
210EXPORT_SYMBOL_GPL(sbitmap_get);
211
Omar Sandovalc05e6672017-04-14 00:59:58 -0700212int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
213 unsigned long shallow_depth)
214{
215 unsigned int i, index;
216 int nr = -1;
217
218 index = SB_NR_TO_INDEX(sb, alloc_hint);
219
220 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeb2dbff12018-12-11 18:39:41 -0700221again:
Omar Sandovalc05e6672017-04-14 00:59:58 -0700222 nr = __sbitmap_get_word(&sb->map[index].word,
223 min(sb->map[index].depth, shallow_depth),
224 SB_NR_TO_BIT(sb, alloc_hint), true);
225 if (nr != -1) {
226 nr += index << sb->shift;
227 break;
228 }
229
Jens Axboeb2dbff12018-12-11 18:39:41 -0700230 if (sbitmap_deferred_clear(sb, index))
231 goto again;
232
Omar Sandovalc05e6672017-04-14 00:59:58 -0700233 /* Jump to next index. */
234 index++;
235 alloc_hint = index << sb->shift;
236
237 if (index >= sb->map_nr) {
238 index = 0;
239 alloc_hint = 0;
240 }
241 }
242
243 return nr;
244}
245EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
246
Omar Sandoval88459642016-09-17 08:38:44 -0600247bool sbitmap_any_bit_set(const struct sbitmap *sb)
248{
249 unsigned int i;
250
251 for (i = 0; i < sb->map_nr; i++) {
Jens Axboeb2dbff12018-12-11 18:39:41 -0700252 if (sb->map[i].word & ~sb->map[i].cleared)
Omar Sandoval88459642016-09-17 08:38:44 -0600253 return true;
254 }
255 return false;
256}
257EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
258
259bool sbitmap_any_bit_clear(const struct sbitmap *sb)
260{
261 unsigned int i;
262
263 for (i = 0; i < sb->map_nr; i++) {
264 const struct sbitmap_word *word = &sb->map[i];
Jens Axboeb2dbff12018-12-11 18:39:41 -0700265 unsigned long mask = word->word & ~word->cleared;
Omar Sandoval88459642016-09-17 08:38:44 -0600266 unsigned long ret;
267
Jens Axboeb2dbff12018-12-11 18:39:41 -0700268 ret = find_first_zero_bit(&mask, word->depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600269 if (ret < word->depth)
270 return true;
271 }
272 return false;
273}
274EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
275
Jens Axboeea86ea22018-11-30 13:18:06 -0700276static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
Omar Sandoval88459642016-09-17 08:38:44 -0600277{
Colin Ian King60658e02016-09-19 14:34:08 +0100278 unsigned int i, weight = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600279
280 for (i = 0; i < sb->map_nr; i++) {
281 const struct sbitmap_word *word = &sb->map[i];
282
Jens Axboeea86ea22018-11-30 13:18:06 -0700283 if (set)
284 weight += bitmap_weight(&word->word, word->depth);
285 else
286 weight += bitmap_weight(&word->cleared, word->depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600287 }
288 return weight;
289}
Jens Axboeea86ea22018-11-30 13:18:06 -0700290
291static unsigned int sbitmap_weight(const struct sbitmap *sb)
292{
293 return __sbitmap_weight(sb, true);
294}
295
296static unsigned int sbitmap_cleared(const struct sbitmap *sb)
297{
298 return __sbitmap_weight(sb, false);
299}
Omar Sandoval88459642016-09-17 08:38:44 -0600300
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800301void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
302{
303 seq_printf(m, "depth=%u\n", sb->depth);
Jens Axboeea86ea22018-11-30 13:18:06 -0700304 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
305 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800306 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
307 seq_printf(m, "map_nr=%u\n", sb->map_nr);
308}
309EXPORT_SYMBOL_GPL(sbitmap_show);
310
311static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
312{
313 if ((offset & 0xf) == 0) {
314 if (offset != 0)
315 seq_putc(m, '\n');
316 seq_printf(m, "%08x:", offset);
317 }
318 if ((offset & 0x1) == 0)
319 seq_putc(m, ' ');
320 seq_printf(m, "%02x", byte);
321}
322
323void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
324{
325 u8 byte = 0;
326 unsigned int byte_bits = 0;
327 unsigned int offset = 0;
328 int i;
329
330 for (i = 0; i < sb->map_nr; i++) {
331 unsigned long word = READ_ONCE(sb->map[i].word);
332 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
333
334 while (word_bits > 0) {
335 unsigned int bits = min(8 - byte_bits, word_bits);
336
337 byte |= (word & (BIT(bits) - 1)) << byte_bits;
338 byte_bits += bits;
339 if (byte_bits == 8) {
340 emit_byte(m, offset, byte);
341 byte = 0;
342 byte_bits = 0;
343 offset++;
344 }
345 word >>= bits;
346 word_bits -= bits;
347 }
348 }
349 if (byte_bits) {
350 emit_byte(m, offset, byte);
351 offset++;
352 }
353 if (offset)
354 seq_putc(m, '\n');
355}
356EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
357
Omar Sandovala3275532018-05-09 17:16:31 -0700358static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
359 unsigned int depth)
Omar Sandoval88459642016-09-17 08:38:44 -0600360{
361 unsigned int wake_batch;
Omar Sandovala3275532018-05-09 17:16:31 -0700362 unsigned int shallow_depth;
Omar Sandoval88459642016-09-17 08:38:44 -0600363
364 /*
365 * For each batch, we wake up one queue. We need to make sure that our
Omar Sandovala3275532018-05-09 17:16:31 -0700366 * batch size is small enough that the full depth of the bitmap,
367 * potentially limited by a shallow depth, is enough to wake up all of
368 * the queues.
369 *
370 * Each full word of the bitmap has bits_per_word bits, and there might
371 * be a partial word. There are depth / bits_per_word full words and
372 * depth % bits_per_word bits left over. In bitwise arithmetic:
373 *
374 * bits_per_word = 1 << shift
375 * depth / bits_per_word = depth >> shift
376 * depth % bits_per_word = depth & ((1 << shift) - 1)
377 *
378 * Each word can be limited to sbq->min_shallow_depth bits.
Omar Sandoval88459642016-09-17 08:38:44 -0600379 */
Omar Sandovala3275532018-05-09 17:16:31 -0700380 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
381 depth = ((depth >> sbq->sb.shift) * shallow_depth +
382 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
383 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
384 SBQ_WAKE_BATCH);
Omar Sandoval88459642016-09-17 08:38:44 -0600385
386 return wake_batch;
387}
388
389int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700390 int shift, bool round_robin, gfp_t flags, int node)
Omar Sandoval88459642016-09-17 08:38:44 -0600391{
392 int ret;
393 int i;
394
395 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
396 if (ret)
397 return ret;
398
Omar Sandoval40aabb62016-09-17 01:28:23 -0700399 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
400 if (!sbq->alloc_hint) {
401 sbitmap_free(&sbq->sb);
402 return -ENOMEM;
403 }
404
Omar Sandoval98d95412016-09-17 01:28:25 -0700405 if (depth && !round_robin) {
406 for_each_possible_cpu(i)
407 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
408 }
409
Omar Sandovala3275532018-05-09 17:16:31 -0700410 sbq->min_shallow_depth = UINT_MAX;
411 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600412 atomic_set(&sbq->wake_index, 0);
Jens Axboe5d2ee712018-11-29 17:36:41 -0700413 atomic_set(&sbq->ws_active, 0);
Omar Sandoval88459642016-09-17 08:38:44 -0600414
Omar Sandoval48e28162016-09-17 01:28:22 -0700415 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
Omar Sandoval88459642016-09-17 08:38:44 -0600416 if (!sbq->ws) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700417 free_percpu(sbq->alloc_hint);
Omar Sandoval88459642016-09-17 08:38:44 -0600418 sbitmap_free(&sbq->sb);
419 return -ENOMEM;
420 }
421
422 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
423 init_waitqueue_head(&sbq->ws[i].wait);
424 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
425 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700426
427 sbq->round_robin = round_robin;
Omar Sandoval88459642016-09-17 08:38:44 -0600428 return 0;
429}
430EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
431
Omar Sandovala3275532018-05-09 17:16:31 -0700432static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
433 unsigned int depth)
Omar Sandoval88459642016-09-17 08:38:44 -0600434{
Omar Sandovala3275532018-05-09 17:16:31 -0700435 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800436 int i;
437
438 if (sbq->wake_batch != wake_batch) {
439 WRITE_ONCE(sbq->wake_batch, wake_batch);
440 /*
Ming Leie6fc4642018-05-24 11:00:39 -0600441 * Pairs with the memory barrier in sbitmap_queue_wake_up()
442 * to ensure that the batch size is updated before the wait
443 * counts.
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800444 */
445 smp_mb__before_atomic();
446 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
447 atomic_set(&sbq->ws[i].wait_cnt, 1);
448 }
Omar Sandovala3275532018-05-09 17:16:31 -0700449}
450
451void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
452{
453 sbitmap_queue_update_wake_batch(sbq, depth);
Omar Sandoval88459642016-09-17 08:38:44 -0600454 sbitmap_resize(&sbq->sb, depth);
455}
456EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
457
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700458int __sbitmap_queue_get(struct sbitmap_queue *sbq)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700459{
Omar Sandoval05fd0952016-09-17 01:28:26 -0700460 unsigned int hint, depth;
Omar Sandoval40aabb62016-09-17 01:28:23 -0700461 int nr;
462
463 hint = this_cpu_read(*sbq->alloc_hint);
Omar Sandoval05fd0952016-09-17 01:28:26 -0700464 depth = READ_ONCE(sbq->sb.depth);
465 if (unlikely(hint >= depth)) {
466 hint = depth ? prandom_u32() % depth : 0;
467 this_cpu_write(*sbq->alloc_hint, hint);
468 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700469 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
Omar Sandoval40aabb62016-09-17 01:28:23 -0700470
471 if (nr == -1) {
472 /* If the map is full, a hint won't do us much good. */
473 this_cpu_write(*sbq->alloc_hint, 0);
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700474 } else if (nr == hint || unlikely(sbq->round_robin)) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700475 /* Only update the hint if we used it. */
476 hint = nr + 1;
Omar Sandoval05fd0952016-09-17 01:28:26 -0700477 if (hint >= depth - 1)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700478 hint = 0;
479 this_cpu_write(*sbq->alloc_hint, hint);
480 }
481
482 return nr;
483}
484EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
485
Omar Sandovalc05e6672017-04-14 00:59:58 -0700486int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
487 unsigned int shallow_depth)
488{
489 unsigned int hint, depth;
490 int nr;
491
Omar Sandoval61445b562018-05-09 17:29:24 -0700492 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
493
Omar Sandovalc05e6672017-04-14 00:59:58 -0700494 hint = this_cpu_read(*sbq->alloc_hint);
495 depth = READ_ONCE(sbq->sb.depth);
496 if (unlikely(hint >= depth)) {
497 hint = depth ? prandom_u32() % depth : 0;
498 this_cpu_write(*sbq->alloc_hint, hint);
499 }
500 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
501
502 if (nr == -1) {
503 /* If the map is full, a hint won't do us much good. */
504 this_cpu_write(*sbq->alloc_hint, 0);
505 } else if (nr == hint || unlikely(sbq->round_robin)) {
506 /* Only update the hint if we used it. */
507 hint = nr + 1;
508 if (hint >= depth - 1)
509 hint = 0;
510 this_cpu_write(*sbq->alloc_hint, hint);
511 }
512
513 return nr;
514}
515EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
516
Omar Sandovala3275532018-05-09 17:16:31 -0700517void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
518 unsigned int min_shallow_depth)
519{
520 sbq->min_shallow_depth = min_shallow_depth;
521 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
522}
523EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
524
Omar Sandoval88459642016-09-17 08:38:44 -0600525static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
526{
527 int i, wake_index;
528
Jens Axboe5d2ee712018-11-29 17:36:41 -0700529 if (!atomic_read(&sbq->ws_active))
530 return NULL;
531
Omar Sandoval88459642016-09-17 08:38:44 -0600532 wake_index = atomic_read(&sbq->wake_index);
533 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
534 struct sbq_wait_state *ws = &sbq->ws[wake_index];
535
536 if (waitqueue_active(&ws->wait)) {
537 int o = atomic_read(&sbq->wake_index);
538
539 if (wake_index != o)
540 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
541 return ws;
542 }
543
544 wake_index = sbq_index_inc(wake_index);
545 }
546
547 return NULL;
548}
549
Jens Axboec854ab52018-05-14 12:17:31 -0600550static bool __sbq_wake_up(struct sbitmap_queue *sbq)
Omar Sandoval88459642016-09-17 08:38:44 -0600551{
552 struct sbq_wait_state *ws;
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800553 unsigned int wake_batch;
Omar Sandoval88459642016-09-17 08:38:44 -0600554 int wait_cnt;
555
Omar Sandoval88459642016-09-17 08:38:44 -0600556 ws = sbq_wake_ptr(sbq);
557 if (!ws)
Jens Axboec854ab52018-05-14 12:17:31 -0600558 return false;
Omar Sandoval88459642016-09-17 08:38:44 -0600559
560 wait_cnt = atomic_dec_return(&ws->wait_cnt);
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800561 if (wait_cnt <= 0) {
Jens Axboec854ab52018-05-14 12:17:31 -0600562 int ret;
563
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800564 wake_batch = READ_ONCE(sbq->wake_batch);
Jens Axboec854ab52018-05-14 12:17:31 -0600565
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800566 /*
567 * Pairs with the memory barrier in sbitmap_queue_resize() to
568 * ensure that we see the batch size update before the wait
569 * count is reset.
570 */
571 smp_mb__before_atomic();
Jens Axboec854ab52018-05-14 12:17:31 -0600572
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800573 /*
Jens Axboec854ab52018-05-14 12:17:31 -0600574 * For concurrent callers of this, the one that failed the
575 * atomic_cmpxhcg() race should call this function again
576 * to wakeup a new batch on a different 'ws'.
Omar Sandoval6c0ca7a2017-01-18 11:55:22 -0800577 */
Jens Axboec854ab52018-05-14 12:17:31 -0600578 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
579 if (ret == wait_cnt) {
580 sbq_index_atomic_inc(&sbq->wake_index);
581 wake_up_nr(&ws->wait, wake_batch);
582 return false;
583 }
584
585 return true;
Omar Sandoval88459642016-09-17 08:38:44 -0600586 }
Jens Axboec854ab52018-05-14 12:17:31 -0600587
588 return false;
589}
590
Ming Leie6fc4642018-05-24 11:00:39 -0600591void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
Jens Axboec854ab52018-05-14 12:17:31 -0600592{
593 while (__sbq_wake_up(sbq))
594 ;
Omar Sandoval88459642016-09-17 08:38:44 -0600595}
Ming Leie6fc4642018-05-24 11:00:39 -0600596EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
Omar Sandoval88459642016-09-17 08:38:44 -0600597
Omar Sandoval40aabb62016-09-17 01:28:23 -0700598void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700599 unsigned int cpu)
Omar Sandoval88459642016-09-17 08:38:44 -0600600{
Jens Axboeea86ea22018-11-30 13:18:06 -0700601 sbitmap_deferred_clear_bit(&sbq->sb, nr);
602
Ming Leie6fc4642018-05-24 11:00:39 -0600603 /*
604 * Pairs with the memory barrier in set_current_state() to ensure the
605 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
606 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
607 * waiter. See the comment on waitqueue_active().
608 */
609 smp_mb__after_atomic();
610 sbitmap_queue_wake_up(sbq);
611
Omar Sandoval5c64a8d2016-09-17 12:20:54 -0700612 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
Omar Sandoval40aabb62016-09-17 01:28:23 -0700613 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
Omar Sandoval88459642016-09-17 08:38:44 -0600614}
615EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
616
617void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
618{
619 int i, wake_index;
620
621 /*
Omar Sandovalf66227d2017-01-18 11:55:21 -0800622 * Pairs with the memory barrier in set_current_state() like in
Ming Leie6fc4642018-05-24 11:00:39 -0600623 * sbitmap_queue_wake_up().
Omar Sandoval88459642016-09-17 08:38:44 -0600624 */
625 smp_mb();
626 wake_index = atomic_read(&sbq->wake_index);
627 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
628 struct sbq_wait_state *ws = &sbq->ws[wake_index];
629
630 if (waitqueue_active(&ws->wait))
631 wake_up(&ws->wait);
632
633 wake_index = sbq_index_inc(wake_index);
634 }
635}
636EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800637
638void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
639{
640 bool first;
641 int i;
642
643 sbitmap_show(&sbq->sb, m);
644
645 seq_puts(m, "alloc_hint={");
646 first = true;
647 for_each_possible_cpu(i) {
648 if (!first)
649 seq_puts(m, ", ");
650 first = false;
651 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
652 }
653 seq_puts(m, "}\n");
654
655 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
656 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
Jens Axboe5d2ee712018-11-29 17:36:41 -0700657 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800658
659 seq_puts(m, "ws={\n");
660 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
661 struct sbq_wait_state *ws = &sbq->ws[i];
662
663 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
664 atomic_read(&ws->wait_cnt),
665 waitqueue_active(&ws->wait) ? "active" : "inactive");
666 }
667 seq_puts(m, "}\n");
668
669 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
Omar Sandovala3275532018-05-09 17:16:31 -0700670 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
Omar Sandoval24af1ccf2017-01-25 14:32:13 -0800671}
672EXPORT_SYMBOL_GPL(sbitmap_queue_show);
Jens Axboe5d2ee712018-11-29 17:36:41 -0700673
674void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
675 struct sbq_wait_state *ws,
676 struct sbq_wait *sbq_wait, int state)
677{
678 if (!sbq_wait->accounted) {
679 atomic_inc(&sbq->ws_active);
680 sbq_wait->accounted = 1;
681 }
682 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
683}
684EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
685
686void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
687 struct sbq_wait *sbq_wait)
688{
689 finish_wait(&ws->wait, &sbq_wait->wait);
690 if (sbq_wait->accounted) {
691 atomic_dec(&sbq->ws_active);
692 sbq_wait->accounted = 0;
693 }
694}
695EXPORT_SYMBOL_GPL(sbitmap_finish_wait);