blob: df4e472df8a3df420696f162bf8864cc15954ecb [file] [log] [blame]
Omar Sandoval88459642016-09-17 08:38:44 -06001/*
2 * Copyright (C) 2016 Facebook
3 * Copyright (C) 2013-2014 Jens Axboe
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <https://www.gnu.org/licenses/>.
16 */
17
Omar Sandoval98d95412016-09-17 01:28:25 -070018#include <linux/random.h>
Omar Sandoval88459642016-09-17 08:38:44 -060019#include <linux/sbitmap.h>
20
21int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
22 gfp_t flags, int node)
23{
24 unsigned int bits_per_word;
25 unsigned int i;
26
27 if (shift < 0) {
28 shift = ilog2(BITS_PER_LONG);
29 /*
30 * If the bitmap is small, shrink the number of bits per word so
31 * we spread over a few cachelines, at least. If less than 4
32 * bits, just forget about it, it's not going to work optimally
33 * anyway.
34 */
35 if (depth >= 4) {
36 while ((4U << shift) > depth)
37 shift--;
38 }
39 }
40 bits_per_word = 1U << shift;
41 if (bits_per_word > BITS_PER_LONG)
42 return -EINVAL;
43
44 sb->shift = shift;
45 sb->depth = depth;
46 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
47
48 if (depth == 0) {
49 sb->map = NULL;
50 return 0;
51 }
52
53 sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
54 if (!sb->map)
55 return -ENOMEM;
56
57 for (i = 0; i < sb->map_nr; i++) {
58 sb->map[i].depth = min(depth, bits_per_word);
59 depth -= sb->map[i].depth;
60 }
61 return 0;
62}
63EXPORT_SYMBOL_GPL(sbitmap_init_node);
64
65void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
66{
67 unsigned int bits_per_word = 1U << sb->shift;
68 unsigned int i;
69
70 sb->depth = depth;
71 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
72
73 for (i = 0; i < sb->map_nr; i++) {
74 sb->map[i].depth = min(depth, bits_per_word);
75 depth -= sb->map[i].depth;
76 }
77}
78EXPORT_SYMBOL_GPL(sbitmap_resize);
79
80static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
81 bool wrap)
82{
83 unsigned int orig_hint = hint;
84 int nr;
85
86 while (1) {
87 nr = find_next_zero_bit(&word->word, word->depth, hint);
88 if (unlikely(nr >= word->depth)) {
89 /*
90 * We started with an offset, and we didn't reset the
91 * offset to 0 in a failure case, so start from 0 to
92 * exhaust the map.
93 */
94 if (orig_hint && hint && wrap) {
95 hint = orig_hint = 0;
96 continue;
97 }
98 return -1;
99 }
100
101 if (!test_and_set_bit(nr, &word->word))
102 break;
103
104 hint = nr + 1;
105 if (hint >= word->depth - 1)
106 hint = 0;
107 }
108
109 return nr;
110}
111
112int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
113{
114 unsigned int i, index;
115 int nr = -1;
116
117 index = SB_NR_TO_INDEX(sb, alloc_hint);
118
119 for (i = 0; i < sb->map_nr; i++) {
120 nr = __sbitmap_get_word(&sb->map[index],
121 SB_NR_TO_BIT(sb, alloc_hint),
122 !round_robin);
123 if (nr != -1) {
124 nr += index << sb->shift;
125 break;
126 }
127
128 /* Jump to next index. */
129 index++;
130 alloc_hint = index << sb->shift;
131
132 if (index >= sb->map_nr) {
133 index = 0;
134 alloc_hint = 0;
135 }
136 }
137
138 return nr;
139}
140EXPORT_SYMBOL_GPL(sbitmap_get);
141
142bool sbitmap_any_bit_set(const struct sbitmap *sb)
143{
144 unsigned int i;
145
146 for (i = 0; i < sb->map_nr; i++) {
147 if (sb->map[i].word)
148 return true;
149 }
150 return false;
151}
152EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
153
154bool sbitmap_any_bit_clear(const struct sbitmap *sb)
155{
156 unsigned int i;
157
158 for (i = 0; i < sb->map_nr; i++) {
159 const struct sbitmap_word *word = &sb->map[i];
160 unsigned long ret;
161
162 ret = find_first_zero_bit(&word->word, word->depth);
163 if (ret < word->depth)
164 return true;
165 }
166 return false;
167}
168EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
169
170unsigned int sbitmap_weight(const struct sbitmap *sb)
171{
Colin Ian King60658e02016-09-19 14:34:08 +0100172 unsigned int i, weight = 0;
Omar Sandoval88459642016-09-17 08:38:44 -0600173
174 for (i = 0; i < sb->map_nr; i++) {
175 const struct sbitmap_word *word = &sb->map[i];
176
177 weight += bitmap_weight(&word->word, word->depth);
178 }
179 return weight;
180}
181EXPORT_SYMBOL_GPL(sbitmap_weight);
182
183static unsigned int sbq_calc_wake_batch(unsigned int depth)
184{
185 unsigned int wake_batch;
186
187 /*
188 * For each batch, we wake up one queue. We need to make sure that our
189 * batch size is small enough that the full depth of the bitmap is
190 * enough to wake up all of the queues.
191 */
192 wake_batch = SBQ_WAKE_BATCH;
193 if (wake_batch > depth / SBQ_WAIT_QUEUES)
194 wake_batch = max(1U, depth / SBQ_WAIT_QUEUES);
195
196 return wake_batch;
197}
198
199int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700200 int shift, bool round_robin, gfp_t flags, int node)
Omar Sandoval88459642016-09-17 08:38:44 -0600201{
202 int ret;
203 int i;
204
205 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
206 if (ret)
207 return ret;
208
Omar Sandoval40aabb62016-09-17 01:28:23 -0700209 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
210 if (!sbq->alloc_hint) {
211 sbitmap_free(&sbq->sb);
212 return -ENOMEM;
213 }
214
Omar Sandoval98d95412016-09-17 01:28:25 -0700215 if (depth && !round_robin) {
216 for_each_possible_cpu(i)
217 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
218 }
219
Omar Sandoval88459642016-09-17 08:38:44 -0600220 sbq->wake_batch = sbq_calc_wake_batch(depth);
221 atomic_set(&sbq->wake_index, 0);
222
Omar Sandoval48e28162016-09-17 01:28:22 -0700223 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
Omar Sandoval88459642016-09-17 08:38:44 -0600224 if (!sbq->ws) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700225 free_percpu(sbq->alloc_hint);
Omar Sandoval88459642016-09-17 08:38:44 -0600226 sbitmap_free(&sbq->sb);
227 return -ENOMEM;
228 }
229
230 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
231 init_waitqueue_head(&sbq->ws[i].wait);
232 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
233 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700234
235 sbq->round_robin = round_robin;
Omar Sandoval88459642016-09-17 08:38:44 -0600236 return 0;
237}
238EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
239
240void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
241{
242 sbq->wake_batch = sbq_calc_wake_batch(depth);
243 sbitmap_resize(&sbq->sb, depth);
244}
245EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
246
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700247int __sbitmap_queue_get(struct sbitmap_queue *sbq)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700248{
Omar Sandoval05fd0952016-09-17 01:28:26 -0700249 unsigned int hint, depth;
Omar Sandoval40aabb62016-09-17 01:28:23 -0700250 int nr;
251
252 hint = this_cpu_read(*sbq->alloc_hint);
Omar Sandoval05fd0952016-09-17 01:28:26 -0700253 depth = READ_ONCE(sbq->sb.depth);
254 if (unlikely(hint >= depth)) {
255 hint = depth ? prandom_u32() % depth : 0;
256 this_cpu_write(*sbq->alloc_hint, hint);
257 }
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700258 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
Omar Sandoval40aabb62016-09-17 01:28:23 -0700259
260 if (nr == -1) {
261 /* If the map is full, a hint won't do us much good. */
262 this_cpu_write(*sbq->alloc_hint, 0);
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700263 } else if (nr == hint || unlikely(sbq->round_robin)) {
Omar Sandoval40aabb62016-09-17 01:28:23 -0700264 /* Only update the hint if we used it. */
265 hint = nr + 1;
Omar Sandoval05fd0952016-09-17 01:28:26 -0700266 if (hint >= depth - 1)
Omar Sandoval40aabb62016-09-17 01:28:23 -0700267 hint = 0;
268 this_cpu_write(*sbq->alloc_hint, hint);
269 }
270
271 return nr;
272}
273EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
274
Omar Sandoval88459642016-09-17 08:38:44 -0600275static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
276{
277 int i, wake_index;
278
279 wake_index = atomic_read(&sbq->wake_index);
280 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
281 struct sbq_wait_state *ws = &sbq->ws[wake_index];
282
283 if (waitqueue_active(&ws->wait)) {
284 int o = atomic_read(&sbq->wake_index);
285
286 if (wake_index != o)
287 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
288 return ws;
289 }
290
291 wake_index = sbq_index_inc(wake_index);
292 }
293
294 return NULL;
295}
296
297static void sbq_wake_up(struct sbitmap_queue *sbq)
298{
299 struct sbq_wait_state *ws;
300 int wait_cnt;
301
Omar Sandovalf66227d2017-01-18 11:55:21 -0800302 /*
303 * Pairs with the memory barrier in set_current_state() to ensure the
304 * proper ordering of clear_bit()/waitqueue_active() in the waker and
305 * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See
306 * the comment on waitqueue_active(). This is __after_atomic because we
307 * just did clear_bit() in the caller.
308 */
309 smp_mb__after_atomic();
Omar Sandoval88459642016-09-17 08:38:44 -0600310
311 ws = sbq_wake_ptr(sbq);
312 if (!ws)
313 return;
314
315 wait_cnt = atomic_dec_return(&ws->wait_cnt);
316 if (unlikely(wait_cnt < 0))
317 wait_cnt = atomic_inc_return(&ws->wait_cnt);
318 if (wait_cnt == 0) {
319 atomic_add(sbq->wake_batch, &ws->wait_cnt);
320 sbq_index_atomic_inc(&sbq->wake_index);
321 wake_up(&ws->wait);
322 }
323}
324
Omar Sandoval40aabb62016-09-17 01:28:23 -0700325void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
Omar Sandovalf4a644d2016-09-17 01:28:24 -0700326 unsigned int cpu)
Omar Sandoval88459642016-09-17 08:38:44 -0600327{
328 sbitmap_clear_bit(&sbq->sb, nr);
329 sbq_wake_up(sbq);
Omar Sandoval5c64a8d2016-09-17 12:20:54 -0700330 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
Omar Sandoval40aabb62016-09-17 01:28:23 -0700331 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
Omar Sandoval88459642016-09-17 08:38:44 -0600332}
333EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
334
335void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
336{
337 int i, wake_index;
338
339 /*
Omar Sandovalf66227d2017-01-18 11:55:21 -0800340 * Pairs with the memory barrier in set_current_state() like in
341 * sbq_wake_up().
Omar Sandoval88459642016-09-17 08:38:44 -0600342 */
343 smp_mb();
344 wake_index = atomic_read(&sbq->wake_index);
345 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
346 struct sbq_wait_state *ws = &sbq->ws[wake_index];
347
348 if (waitqueue_active(&ws->wait))
349 wake_up(&ws->wait);
350
351 wake_index = sbq_index_inc(wake_index);
352 }
353}
354EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);