Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 Facebook |
| 3 | * Copyright (C) 2013-2014 Jens Axboe |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public |
| 7 | * License v2 as published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 12 | * General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <https://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
Ingo Molnar | af8601a | 2017-02-03 09:57:00 +0100 | [diff] [blame] | 18 | #include <linux/sched.h> |
Omar Sandoval | 98d9541 | 2016-09-17 01:28:25 -0700 | [diff] [blame] | 19 | #include <linux/random.h> |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 20 | #include <linux/sbitmap.h> |
Omar Sandoval | 24af1ccf | 2017-01-25 14:32:13 -0800 | [diff] [blame] | 21 | #include <linux/seq_file.h> |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 22 | |
| 23 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, |
| 24 | gfp_t flags, int node) |
| 25 | { |
| 26 | unsigned int bits_per_word; |
| 27 | unsigned int i; |
| 28 | |
| 29 | if (shift < 0) { |
| 30 | shift = ilog2(BITS_PER_LONG); |
| 31 | /* |
| 32 | * If the bitmap is small, shrink the number of bits per word so |
| 33 | * we spread over a few cachelines, at least. If less than 4 |
| 34 | * bits, just forget about it, it's not going to work optimally |
| 35 | * anyway. |
| 36 | */ |
| 37 | if (depth >= 4) { |
| 38 | while ((4U << shift) > depth) |
| 39 | shift--; |
| 40 | } |
| 41 | } |
| 42 | bits_per_word = 1U << shift; |
| 43 | if (bits_per_word > BITS_PER_LONG) |
| 44 | return -EINVAL; |
| 45 | |
| 46 | sb->shift = shift; |
| 47 | sb->depth = depth; |
| 48 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); |
| 49 | |
| 50 | if (depth == 0) { |
| 51 | sb->map = NULL; |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); |
| 56 | if (!sb->map) |
| 57 | return -ENOMEM; |
| 58 | |
| 59 | for (i = 0; i < sb->map_nr; i++) { |
| 60 | sb->map[i].depth = min(depth, bits_per_word); |
| 61 | depth -= sb->map[i].depth; |
| 62 | } |
| 63 | return 0; |
| 64 | } |
| 65 | EXPORT_SYMBOL_GPL(sbitmap_init_node); |
| 66 | |
| 67 | void sbitmap_resize(struct sbitmap *sb, unsigned int depth) |
| 68 | { |
| 69 | unsigned int bits_per_word = 1U << sb->shift; |
| 70 | unsigned int i; |
| 71 | |
| 72 | sb->depth = depth; |
| 73 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); |
| 74 | |
| 75 | for (i = 0; i < sb->map_nr; i++) { |
| 76 | sb->map[i].depth = min(depth, bits_per_word); |
| 77 | depth -= sb->map[i].depth; |
| 78 | } |
| 79 | } |
| 80 | EXPORT_SYMBOL_GPL(sbitmap_resize); |
| 81 | |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 82 | static int __sbitmap_get_word(unsigned long *word, unsigned long depth, |
| 83 | unsigned int hint, bool wrap) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 84 | { |
| 85 | unsigned int orig_hint = hint; |
| 86 | int nr; |
| 87 | |
| 88 | while (1) { |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 89 | nr = find_next_zero_bit(word, depth, hint); |
| 90 | if (unlikely(nr >= depth)) { |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 91 | /* |
| 92 | * We started with an offset, and we didn't reset the |
| 93 | * offset to 0 in a failure case, so start from 0 to |
| 94 | * exhaust the map. |
| 95 | */ |
| 96 | if (orig_hint && hint && wrap) { |
| 97 | hint = orig_hint = 0; |
| 98 | continue; |
| 99 | } |
| 100 | return -1; |
| 101 | } |
| 102 | |
Omar Sandoval | 4ace53f | 2018-02-27 16:56:43 -0800 | [diff] [blame] | 103 | if (!test_and_set_bit_lock(nr, word)) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 104 | break; |
| 105 | |
| 106 | hint = nr + 1; |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 107 | if (hint >= depth - 1) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 108 | hint = 0; |
| 109 | } |
| 110 | |
| 111 | return nr; |
| 112 | } |
| 113 | |
| 114 | int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) |
| 115 | { |
| 116 | unsigned int i, index; |
| 117 | int nr = -1; |
| 118 | |
| 119 | index = SB_NR_TO_INDEX(sb, alloc_hint); |
| 120 | |
| 121 | for (i = 0; i < sb->map_nr; i++) { |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 122 | nr = __sbitmap_get_word(&sb->map[index].word, |
| 123 | sb->map[index].depth, |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 124 | SB_NR_TO_BIT(sb, alloc_hint), |
| 125 | !round_robin); |
| 126 | if (nr != -1) { |
| 127 | nr += index << sb->shift; |
| 128 | break; |
| 129 | } |
| 130 | |
| 131 | /* Jump to next index. */ |
| 132 | index++; |
| 133 | alloc_hint = index << sb->shift; |
| 134 | |
| 135 | if (index >= sb->map_nr) { |
| 136 | index = 0; |
| 137 | alloc_hint = 0; |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | return nr; |
| 142 | } |
| 143 | EXPORT_SYMBOL_GPL(sbitmap_get); |
| 144 | |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 145 | int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, |
| 146 | unsigned long shallow_depth) |
| 147 | { |
| 148 | unsigned int i, index; |
| 149 | int nr = -1; |
| 150 | |
| 151 | index = SB_NR_TO_INDEX(sb, alloc_hint); |
| 152 | |
| 153 | for (i = 0; i < sb->map_nr; i++) { |
| 154 | nr = __sbitmap_get_word(&sb->map[index].word, |
| 155 | min(sb->map[index].depth, shallow_depth), |
| 156 | SB_NR_TO_BIT(sb, alloc_hint), true); |
| 157 | if (nr != -1) { |
| 158 | nr += index << sb->shift; |
| 159 | break; |
| 160 | } |
| 161 | |
| 162 | /* Jump to next index. */ |
| 163 | index++; |
| 164 | alloc_hint = index << sb->shift; |
| 165 | |
| 166 | if (index >= sb->map_nr) { |
| 167 | index = 0; |
| 168 | alloc_hint = 0; |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | return nr; |
| 173 | } |
| 174 | EXPORT_SYMBOL_GPL(sbitmap_get_shallow); |
| 175 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 176 | bool sbitmap_any_bit_set(const struct sbitmap *sb) |
| 177 | { |
| 178 | unsigned int i; |
| 179 | |
| 180 | for (i = 0; i < sb->map_nr; i++) { |
| 181 | if (sb->map[i].word) |
| 182 | return true; |
| 183 | } |
| 184 | return false; |
| 185 | } |
| 186 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); |
| 187 | |
| 188 | bool sbitmap_any_bit_clear(const struct sbitmap *sb) |
| 189 | { |
| 190 | unsigned int i; |
| 191 | |
| 192 | for (i = 0; i < sb->map_nr; i++) { |
| 193 | const struct sbitmap_word *word = &sb->map[i]; |
| 194 | unsigned long ret; |
| 195 | |
| 196 | ret = find_first_zero_bit(&word->word, word->depth); |
| 197 | if (ret < word->depth) |
| 198 | return true; |
| 199 | } |
| 200 | return false; |
| 201 | } |
| 202 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); |
| 203 | |
| 204 | unsigned int sbitmap_weight(const struct sbitmap *sb) |
| 205 | { |
Colin Ian King | 60658e0 | 2016-09-19 14:34:08 +0100 | [diff] [blame] | 206 | unsigned int i, weight = 0; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 207 | |
| 208 | for (i = 0; i < sb->map_nr; i++) { |
| 209 | const struct sbitmap_word *word = &sb->map[i]; |
| 210 | |
| 211 | weight += bitmap_weight(&word->word, word->depth); |
| 212 | } |
| 213 | return weight; |
| 214 | } |
| 215 | EXPORT_SYMBOL_GPL(sbitmap_weight); |
| 216 | |
Omar Sandoval | 24af1ccf | 2017-01-25 14:32:13 -0800 | [diff] [blame] | 217 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m) |
| 218 | { |
| 219 | seq_printf(m, "depth=%u\n", sb->depth); |
| 220 | seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); |
| 221 | seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); |
| 222 | seq_printf(m, "map_nr=%u\n", sb->map_nr); |
| 223 | } |
| 224 | EXPORT_SYMBOL_GPL(sbitmap_show); |
| 225 | |
| 226 | static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) |
| 227 | { |
| 228 | if ((offset & 0xf) == 0) { |
| 229 | if (offset != 0) |
| 230 | seq_putc(m, '\n'); |
| 231 | seq_printf(m, "%08x:", offset); |
| 232 | } |
| 233 | if ((offset & 0x1) == 0) |
| 234 | seq_putc(m, ' '); |
| 235 | seq_printf(m, "%02x", byte); |
| 236 | } |
| 237 | |
| 238 | void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) |
| 239 | { |
| 240 | u8 byte = 0; |
| 241 | unsigned int byte_bits = 0; |
| 242 | unsigned int offset = 0; |
| 243 | int i; |
| 244 | |
| 245 | for (i = 0; i < sb->map_nr; i++) { |
| 246 | unsigned long word = READ_ONCE(sb->map[i].word); |
| 247 | unsigned int word_bits = READ_ONCE(sb->map[i].depth); |
| 248 | |
| 249 | while (word_bits > 0) { |
| 250 | unsigned int bits = min(8 - byte_bits, word_bits); |
| 251 | |
| 252 | byte |= (word & (BIT(bits) - 1)) << byte_bits; |
| 253 | byte_bits += bits; |
| 254 | if (byte_bits == 8) { |
| 255 | emit_byte(m, offset, byte); |
| 256 | byte = 0; |
| 257 | byte_bits = 0; |
| 258 | offset++; |
| 259 | } |
| 260 | word >>= bits; |
| 261 | word_bits -= bits; |
| 262 | } |
| 263 | } |
| 264 | if (byte_bits) { |
| 265 | emit_byte(m, offset, byte); |
| 266 | offset++; |
| 267 | } |
| 268 | if (offset) |
| 269 | seq_putc(m, '\n'); |
| 270 | } |
| 271 | EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); |
| 272 | |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 273 | static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, |
| 274 | unsigned int depth) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 275 | { |
| 276 | unsigned int wake_batch; |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 277 | unsigned int shallow_depth; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 278 | |
| 279 | /* |
| 280 | * For each batch, we wake up one queue. We need to make sure that our |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 281 | * batch size is small enough that the full depth of the bitmap, |
| 282 | * potentially limited by a shallow depth, is enough to wake up all of |
| 283 | * the queues. |
| 284 | * |
| 285 | * Each full word of the bitmap has bits_per_word bits, and there might |
| 286 | * be a partial word. There are depth / bits_per_word full words and |
| 287 | * depth % bits_per_word bits left over. In bitwise arithmetic: |
| 288 | * |
| 289 | * bits_per_word = 1 << shift |
| 290 | * depth / bits_per_word = depth >> shift |
| 291 | * depth % bits_per_word = depth & ((1 << shift) - 1) |
| 292 | * |
| 293 | * Each word can be limited to sbq->min_shallow_depth bits. |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 294 | */ |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 295 | shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); |
| 296 | depth = ((depth >> sbq->sb.shift) * shallow_depth + |
| 297 | min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); |
| 298 | wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, |
| 299 | SBQ_WAKE_BATCH); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 300 | |
| 301 | return wake_batch; |
| 302 | } |
| 303 | |
| 304 | int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 305 | int shift, bool round_robin, gfp_t flags, int node) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 306 | { |
| 307 | int ret; |
| 308 | int i; |
| 309 | |
| 310 | ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); |
| 311 | if (ret) |
| 312 | return ret; |
| 313 | |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 314 | sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); |
| 315 | if (!sbq->alloc_hint) { |
| 316 | sbitmap_free(&sbq->sb); |
| 317 | return -ENOMEM; |
| 318 | } |
| 319 | |
Omar Sandoval | 98d9541 | 2016-09-17 01:28:25 -0700 | [diff] [blame] | 320 | if (depth && !round_robin) { |
| 321 | for_each_possible_cpu(i) |
| 322 | *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; |
| 323 | } |
| 324 | |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 325 | sbq->min_shallow_depth = UINT_MAX; |
| 326 | sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 327 | atomic_set(&sbq->wake_index, 0); |
| 328 | |
Omar Sandoval | 48e2816 | 2016-09-17 01:28:22 -0700 | [diff] [blame] | 329 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 330 | if (!sbq->ws) { |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 331 | free_percpu(sbq->alloc_hint); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 332 | sbitmap_free(&sbq->sb); |
| 333 | return -ENOMEM; |
| 334 | } |
| 335 | |
| 336 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { |
| 337 | init_waitqueue_head(&sbq->ws[i].wait); |
| 338 | atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); |
| 339 | } |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 340 | |
| 341 | sbq->round_robin = round_robin; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 342 | return 0; |
| 343 | } |
| 344 | EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); |
| 345 | |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 346 | static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, |
| 347 | unsigned int depth) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 348 | { |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 349 | unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); |
Omar Sandoval | 6c0ca7a | 2017-01-18 11:55:22 -0800 | [diff] [blame] | 350 | int i; |
| 351 | |
| 352 | if (sbq->wake_batch != wake_batch) { |
| 353 | WRITE_ONCE(sbq->wake_batch, wake_batch); |
| 354 | /* |
| 355 | * Pairs with the memory barrier in sbq_wake_up() to ensure that |
| 356 | * the batch size is updated before the wait counts. |
| 357 | */ |
| 358 | smp_mb__before_atomic(); |
| 359 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) |
| 360 | atomic_set(&sbq->ws[i].wait_cnt, 1); |
| 361 | } |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 362 | } |
| 363 | |
| 364 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) |
| 365 | { |
| 366 | sbitmap_queue_update_wake_batch(sbq, depth); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 367 | sbitmap_resize(&sbq->sb, depth); |
| 368 | } |
| 369 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); |
| 370 | |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 371 | int __sbitmap_queue_get(struct sbitmap_queue *sbq) |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 372 | { |
Omar Sandoval | 05fd095 | 2016-09-17 01:28:26 -0700 | [diff] [blame] | 373 | unsigned int hint, depth; |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 374 | int nr; |
| 375 | |
| 376 | hint = this_cpu_read(*sbq->alloc_hint); |
Omar Sandoval | 05fd095 | 2016-09-17 01:28:26 -0700 | [diff] [blame] | 377 | depth = READ_ONCE(sbq->sb.depth); |
| 378 | if (unlikely(hint >= depth)) { |
| 379 | hint = depth ? prandom_u32() % depth : 0; |
| 380 | this_cpu_write(*sbq->alloc_hint, hint); |
| 381 | } |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 382 | nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 383 | |
| 384 | if (nr == -1) { |
| 385 | /* If the map is full, a hint won't do us much good. */ |
| 386 | this_cpu_write(*sbq->alloc_hint, 0); |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 387 | } else if (nr == hint || unlikely(sbq->round_robin)) { |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 388 | /* Only update the hint if we used it. */ |
| 389 | hint = nr + 1; |
Omar Sandoval | 05fd095 | 2016-09-17 01:28:26 -0700 | [diff] [blame] | 390 | if (hint >= depth - 1) |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 391 | hint = 0; |
| 392 | this_cpu_write(*sbq->alloc_hint, hint); |
| 393 | } |
| 394 | |
| 395 | return nr; |
| 396 | } |
| 397 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get); |
| 398 | |
Omar Sandoval | c05e667 | 2017-04-14 00:59:58 -0700 | [diff] [blame] | 399 | int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |
| 400 | unsigned int shallow_depth) |
| 401 | { |
| 402 | unsigned int hint, depth; |
| 403 | int nr; |
| 404 | |
| 405 | hint = this_cpu_read(*sbq->alloc_hint); |
| 406 | depth = READ_ONCE(sbq->sb.depth); |
| 407 | if (unlikely(hint >= depth)) { |
| 408 | hint = depth ? prandom_u32() % depth : 0; |
| 409 | this_cpu_write(*sbq->alloc_hint, hint); |
| 410 | } |
| 411 | nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); |
| 412 | |
| 413 | if (nr == -1) { |
| 414 | /* If the map is full, a hint won't do us much good. */ |
| 415 | this_cpu_write(*sbq->alloc_hint, 0); |
| 416 | } else if (nr == hint || unlikely(sbq->round_robin)) { |
| 417 | /* Only update the hint if we used it. */ |
| 418 | hint = nr + 1; |
| 419 | if (hint >= depth - 1) |
| 420 | hint = 0; |
| 421 | this_cpu_write(*sbq->alloc_hint, hint); |
| 422 | } |
| 423 | |
| 424 | return nr; |
| 425 | } |
| 426 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); |
| 427 | |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 428 | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, |
| 429 | unsigned int min_shallow_depth) |
| 430 | { |
| 431 | sbq->min_shallow_depth = min_shallow_depth; |
| 432 | sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); |
| 433 | } |
| 434 | EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); |
| 435 | |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 436 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) |
| 437 | { |
| 438 | int i, wake_index; |
| 439 | |
| 440 | wake_index = atomic_read(&sbq->wake_index); |
| 441 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { |
| 442 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; |
| 443 | |
| 444 | if (waitqueue_active(&ws->wait)) { |
| 445 | int o = atomic_read(&sbq->wake_index); |
| 446 | |
| 447 | if (wake_index != o) |
| 448 | atomic_cmpxchg(&sbq->wake_index, o, wake_index); |
| 449 | return ws; |
| 450 | } |
| 451 | |
| 452 | wake_index = sbq_index_inc(wake_index); |
| 453 | } |
| 454 | |
| 455 | return NULL; |
| 456 | } |
| 457 | |
| 458 | static void sbq_wake_up(struct sbitmap_queue *sbq) |
| 459 | { |
| 460 | struct sbq_wait_state *ws; |
Omar Sandoval | 6c0ca7a | 2017-01-18 11:55:22 -0800 | [diff] [blame] | 461 | unsigned int wake_batch; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 462 | int wait_cnt; |
| 463 | |
Omar Sandoval | f66227d | 2017-01-18 11:55:21 -0800 | [diff] [blame] | 464 | /* |
| 465 | * Pairs with the memory barrier in set_current_state() to ensure the |
| 466 | * proper ordering of clear_bit()/waitqueue_active() in the waker and |
Omar Sandoval | 4ace53f | 2018-02-27 16:56:43 -0800 | [diff] [blame] | 467 | * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the |
| 468 | * waiter. See the comment on waitqueue_active(). This is __after_atomic |
| 469 | * because we just did clear_bit_unlock() in the caller. |
Omar Sandoval | f66227d | 2017-01-18 11:55:21 -0800 | [diff] [blame] | 470 | */ |
| 471 | smp_mb__after_atomic(); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 472 | |
| 473 | ws = sbq_wake_ptr(sbq); |
| 474 | if (!ws) |
| 475 | return; |
| 476 | |
| 477 | wait_cnt = atomic_dec_return(&ws->wait_cnt); |
Omar Sandoval | 6c0ca7a | 2017-01-18 11:55:22 -0800 | [diff] [blame] | 478 | if (wait_cnt <= 0) { |
| 479 | wake_batch = READ_ONCE(sbq->wake_batch); |
| 480 | /* |
| 481 | * Pairs with the memory barrier in sbitmap_queue_resize() to |
| 482 | * ensure that we see the batch size update before the wait |
| 483 | * count is reset. |
| 484 | */ |
| 485 | smp_mb__before_atomic(); |
| 486 | /* |
| 487 | * If there are concurrent callers to sbq_wake_up(), the last |
| 488 | * one to decrement the wait count below zero will bump it back |
| 489 | * up. If there is a concurrent resize, the count reset will |
| 490 | * either cause the cmpxchg to fail or overwrite after the |
| 491 | * cmpxchg. |
| 492 | */ |
| 493 | atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 494 | sbq_index_atomic_inc(&sbq->wake_index); |
Jens Axboe | 4e5dff4 | 2017-11-14 10:24:58 -0700 | [diff] [blame] | 495 | wake_up_nr(&ws->wait, wake_batch); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 496 | } |
| 497 | } |
| 498 | |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 499 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
Omar Sandoval | f4a644d | 2016-09-17 01:28:24 -0700 | [diff] [blame] | 500 | unsigned int cpu) |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 501 | { |
Omar Sandoval | 4ace53f | 2018-02-27 16:56:43 -0800 | [diff] [blame] | 502 | sbitmap_clear_bit_unlock(&sbq->sb, nr); |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 503 | sbq_wake_up(sbq); |
Omar Sandoval | 5c64a8d | 2016-09-17 12:20:54 -0700 | [diff] [blame] | 504 | if (likely(!sbq->round_robin && nr < sbq->sb.depth)) |
Omar Sandoval | 40aabb6 | 2016-09-17 01:28:23 -0700 | [diff] [blame] | 505 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 506 | } |
| 507 | EXPORT_SYMBOL_GPL(sbitmap_queue_clear); |
| 508 | |
| 509 | void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) |
| 510 | { |
| 511 | int i, wake_index; |
| 512 | |
| 513 | /* |
Omar Sandoval | f66227d | 2017-01-18 11:55:21 -0800 | [diff] [blame] | 514 | * Pairs with the memory barrier in set_current_state() like in |
| 515 | * sbq_wake_up(). |
Omar Sandoval | 8845964 | 2016-09-17 08:38:44 -0600 | [diff] [blame] | 516 | */ |
| 517 | smp_mb(); |
| 518 | wake_index = atomic_read(&sbq->wake_index); |
| 519 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { |
| 520 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; |
| 521 | |
| 522 | if (waitqueue_active(&ws->wait)) |
| 523 | wake_up(&ws->wait); |
| 524 | |
| 525 | wake_index = sbq_index_inc(wake_index); |
| 526 | } |
| 527 | } |
| 528 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); |
Omar Sandoval | 24af1ccf | 2017-01-25 14:32:13 -0800 | [diff] [blame] | 529 | |
| 530 | void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) |
| 531 | { |
| 532 | bool first; |
| 533 | int i; |
| 534 | |
| 535 | sbitmap_show(&sbq->sb, m); |
| 536 | |
| 537 | seq_puts(m, "alloc_hint={"); |
| 538 | first = true; |
| 539 | for_each_possible_cpu(i) { |
| 540 | if (!first) |
| 541 | seq_puts(m, ", "); |
| 542 | first = false; |
| 543 | seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); |
| 544 | } |
| 545 | seq_puts(m, "}\n"); |
| 546 | |
| 547 | seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); |
| 548 | seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); |
| 549 | |
| 550 | seq_puts(m, "ws={\n"); |
| 551 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { |
| 552 | struct sbq_wait_state *ws = &sbq->ws[i]; |
| 553 | |
| 554 | seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", |
| 555 | atomic_read(&ws->wait_cnt), |
| 556 | waitqueue_active(&ws->wait) ? "active" : "inactive"); |
| 557 | } |
| 558 | seq_puts(m, "}\n"); |
| 559 | |
| 560 | seq_printf(m, "round_robin=%d\n", sbq->round_robin); |
Omar Sandoval | a327553 | 2018-05-09 17:16:31 -0700 | [diff] [blame^] | 561 | seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); |
Omar Sandoval | 24af1ccf | 2017-01-25 14:32:13 -0800 | [diff] [blame] | 562 | } |
| 563 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); |