blob: 698c08f851b872899d1d3ec6dd2a3f8fd6825538 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Matthew Wilcox0a835c42016-12-20 10:27:56 -05002/*
3 * idr-test.c: Test the IDR API
4 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org>
Matthew Wilcox0a835c42016-12-20 10:27:56 -05005 */
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -05006#include <linux/bitmap.h>
Matthew Wilcox0a835c42016-12-20 10:27:56 -05007#include <linux/idr.h>
8#include <linux/slab.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11
12#include "test.h"
13
Matthew Wilcox3159f942017-11-03 13:30:42 -040014#define DUMMY_PTR ((void *)0x10)
Matthew Wilcox0a835c42016-12-20 10:27:56 -050015
16int item_idr_free(int id, void *p, void *data)
17{
18 struct item *item = p;
19 assert(item->index == id);
20 free(p);
21
22 return 0;
23}
24
25void item_idr_remove(struct idr *idr, int id)
26{
27 struct item *item = idr_find(idr, id);
28 assert(item->index == id);
29 idr_remove(idr, id);
30 free(item);
31}
32
33void idr_alloc_test(void)
34{
35 unsigned long i;
36 DEFINE_IDR(idr);
37
38 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0);
39 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd);
40 idr_remove(&idr, 0x3ffd);
41 idr_remove(&idr, 0);
42
43 for (i = 0x3ffe; i < 0x4003; i++) {
44 int id;
45 struct item *item;
46
47 if (i < 0x4000)
48 item = item_create(i, 0);
49 else
50 item = item_create(i - 0x3fff, 0);
51
52 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL);
53 assert(id == item->index);
54 }
55
56 idr_for_each(&idr, item_idr_free, &idr);
57 idr_destroy(&idr);
58}
59
60void idr_replace_test(void)
61{
62 DEFINE_IDR(idr);
63
64 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL);
65 idr_replace(&idr, &idr, 10);
66
67 idr_destroy(&idr);
68}
69
70/*
71 * Unlike the radix tree, you can put a NULL pointer -- with care -- into
72 * the IDR. Some interfaces, like idr_find() do not distinguish between
73 * "present, value is NULL" and "not present", but that's exactly what some
74 * users want.
75 */
76void idr_null_test(void)
77{
78 int i;
79 DEFINE_IDR(idr);
80
81 assert(idr_is_empty(&idr));
82
83 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
84 assert(!idr_is_empty(&idr));
85 idr_remove(&idr, 0);
86 assert(idr_is_empty(&idr));
87
88 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
89 assert(!idr_is_empty(&idr));
90 idr_destroy(&idr);
91 assert(idr_is_empty(&idr));
92
93 for (i = 0; i < 10; i++) {
94 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i);
95 }
96
97 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL);
98 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL);
99 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR);
100 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT));
101 idr_remove(&idr, 5);
102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5);
103 idr_remove(&idr, 5);
104
105 for (i = 0; i < 9; i++) {
106 idr_remove(&idr, i);
107 assert(!idr_is_empty(&idr));
108 }
109 idr_remove(&idr, 8);
110 assert(!idr_is_empty(&idr));
111 idr_remove(&idr, 9);
112 assert(idr_is_empty(&idr));
113
114 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0);
115 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT));
116 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL);
117 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR);
118
119 idr_destroy(&idr);
120 assert(idr_is_empty(&idr));
121
122 for (i = 1; i < 10; i++) {
123 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i);
124 }
125
126 idr_destroy(&idr);
127 assert(idr_is_empty(&idr));
128}
129
130void idr_nowait_test(void)
131{
132 unsigned int i;
133 DEFINE_IDR(idr);
134
135 idr_preload(GFP_KERNEL);
136
137 for (i = 0; i < 3; i++) {
138 struct item *item = item_create(i, 0);
139 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i);
140 }
141
142 idr_preload_end();
143
144 idr_for_each(&idr, item_idr_free, &idr);
145 idr_destroy(&idr);
146}
147
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500148void idr_get_next_test(int base)
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500149{
150 unsigned long i;
151 int nextid;
152 DEFINE_IDR(idr);
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500153 idr_init_base(&idr, base);
Rehas Sachdeva2eacc792017-02-18 07:31:00 -0500154
155 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
156
157 for(i = 0; indices[i]; i++) {
158 struct item *item = item_create(indices[i], 0);
159 assert(idr_alloc(&idr, item, indices[i], indices[i+1],
160 GFP_KERNEL) == indices[i]);
161 }
162
163 for(i = 0, nextid = 0; indices[i]; i++) {
164 idr_get_next(&idr, &nextid);
165 assert(nextid == indices[i]);
166 nextid++;
167 }
168
169 idr_for_each(&idr, item_idr_free, &idr);
170 idr_destroy(&idr);
171}
172
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500173int idr_u32_cb(int id, void *ptr, void *data)
174{
175 BUG_ON(id < 0);
176 BUG_ON(ptr != DUMMY_PTR);
177 return 0;
178}
179
180void idr_u32_test1(struct idr *idr, u32 handle)
181{
182 static bool warned = false;
183 u32 id = handle;
184 int sid = 0;
185 void *ptr;
186
187 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL));
188 BUG_ON(id != handle);
189 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC);
190 BUG_ON(id != handle);
191 if (!warned && id > INT_MAX)
192 printk("vvv Ignore these warnings\n");
193 ptr = idr_get_next(idr, &sid);
194 if (id > INT_MAX) {
195 BUG_ON(ptr != NULL);
196 BUG_ON(sid != 0);
197 } else {
198 BUG_ON(ptr != DUMMY_PTR);
199 BUG_ON(sid != id);
200 }
201 idr_for_each(idr, idr_u32_cb, NULL);
202 if (!warned && id > INT_MAX) {
203 printk("^^^ Warnings over\n");
204 warned = true;
205 }
206 BUG_ON(idr_remove(idr, id) != DUMMY_PTR);
207 BUG_ON(!idr_is_empty(idr));
208}
209
210void idr_u32_test(int base)
211{
212 DEFINE_IDR(idr);
213 idr_init_base(&idr, base);
214 idr_u32_test1(&idr, 10);
215 idr_u32_test1(&idr, 0x7fffffff);
216 idr_u32_test1(&idr, 0x80000000);
217 idr_u32_test1(&idr, 0x80000001);
218 idr_u32_test1(&idr, 0xffe00000);
219 idr_u32_test1(&idr, 0xffffffff);
220}
221
Matthew Wilcox66ee6202018-06-25 06:56:50 -0400222static void idr_align_test(struct idr *idr)
223{
224 char name[] = "Motorola 68000";
225 int i, id;
226 void *entry;
227
228 for (i = 0; i < 9; i++) {
229 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i);
230 idr_for_each_entry(idr, entry, id);
231 }
232 idr_destroy(idr);
233
234 for (i = 1; i < 10; i++) {
235 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1);
236 idr_for_each_entry(idr, entry, id);
237 }
238 idr_destroy(idr);
239
240 for (i = 2; i < 11; i++) {
241 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2);
242 idr_for_each_entry(idr, entry, id);
243 }
244 idr_destroy(idr);
245
246 for (i = 3; i < 12; i++) {
247 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3);
248 idr_for_each_entry(idr, entry, id);
249 }
250 idr_destroy(idr);
251
252 for (i = 0; i < 8; i++) {
253 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
254 BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1);
255 idr_for_each_entry(idr, entry, id);
256 idr_remove(idr, 1);
257 idr_for_each_entry(idr, entry, id);
258 idr_remove(idr, 0);
259 BUG_ON(!idr_is_empty(idr));
260 }
261
262 for (i = 0; i < 8; i++) {
263 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0);
264 idr_for_each_entry(idr, entry, id);
265 idr_replace(idr, &name[i], 0);
266 idr_for_each_entry(idr, entry, id);
267 BUG_ON(idr_find(idr, 0) != &name[i]);
268 idr_remove(idr, 0);
269 }
270
271 for (i = 0; i < 8; i++) {
272 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
273 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1);
274 idr_remove(idr, 1);
275 idr_for_each_entry(idr, entry, id);
276 idr_replace(idr, &name[i + 1], 0);
277 idr_for_each_entry(idr, entry, id);
278 idr_remove(idr, 0);
279 }
280}
281
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500282void idr_checks(void)
283{
284 unsigned long i;
285 DEFINE_IDR(idr);
286
287 for (i = 0; i < 10000; i++) {
288 struct item *item = item_create(i, 0);
289 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i);
290 }
291
292 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0);
293
294 for (i = 0; i < 5000; i++)
295 item_idr_remove(&idr, i);
296
297 idr_remove(&idr, 3);
298
299 idr_for_each(&idr, item_idr_free, &idr);
300 idr_destroy(&idr);
301
302 assert(idr_is_empty(&idr));
303
304 idr_remove(&idr, 3);
305 idr_remove(&idr, 0);
306
Matthew Wilcox7a4deea2018-05-25 14:47:24 -0700307 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0);
308 idr_remove(&idr, 1);
309 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++)
310 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i);
311 idr_remove(&idr, 1 << 30);
312 idr_destroy(&idr);
313
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500314 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) {
315 struct item *item = item_create(i, 0);
316 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
317 }
318 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox6e6d3012017-11-28 14:27:14 -0500319 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500320
321 idr_for_each(&idr, item_idr_free, &idr);
322 idr_destroy(&idr);
323 idr_destroy(&idr);
324
325 assert(idr_is_empty(&idr));
326
Matthew Wilcox460488c2017-11-28 15:16:24 -0500327 idr_set_cursor(&idr, INT_MAX - 3UL);
328 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
329 struct item *item;
330 unsigned int id;
331 if (i <= INT_MAX)
332 item = item_create(i, 0);
333 else
334 item = item_create(i - INT_MAX - 1, 0);
335
336 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
337 assert(id == item->index);
338 }
339
340 idr_for_each(&idr, item_idr_free, &idr);
341 idr_destroy(&idr);
342 assert(idr_is_empty(&idr));
343
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500344 for (i = 1; i < 10000; i++) {
345 struct item *item = item_create(i, 0);
346 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
347 }
348
349 idr_for_each(&idr, item_idr_free, &idr);
350 idr_destroy(&idr);
351
352 idr_replace_test();
353 idr_alloc_test();
354 idr_null_test();
355 idr_nowait_test();
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500356 idr_get_next_test(0);
357 idr_get_next_test(1);
358 idr_get_next_test(4);
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500359 idr_u32_test(4);
360 idr_u32_test(1);
361 idr_u32_test(0);
Matthew Wilcox66ee6202018-06-25 06:56:50 -0400362 idr_align_test(&idr);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500363}
364
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400365#define module_init(x)
366#define module_exit(x)
367#define MODULE_AUTHOR(x)
368#define MODULE_LICENSE(x)
369#define dump_stack() assert(0)
370void ida_dump(struct ida *);
371
372#include "../../../lib/test_ida.c"
373
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500374/*
375 * Check that we get the correct error when we run out of memory doing
Matthew Wilcox06b01112018-06-18 17:06:58 -0400376 * allocations. In userspace, GFP_NOWAIT will always fail an allocation.
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500377 * The first test is for not having a bitmap available, and the second test
378 * is for not being able to allocate a level of the radix tree.
379 */
380void ida_check_nomem(void)
381{
382 DEFINE_IDA(ida);
Matthew Wilcox06b01112018-06-18 17:06:58 -0400383 int id;
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500384
Matthew Wilcox06b01112018-06-18 17:06:58 -0400385 id = ida_alloc_min(&ida, 256, GFP_NOWAIT);
386 IDA_BUG_ON(&ida, id != -ENOMEM);
387 id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT);
388 IDA_BUG_ON(&ida, id != -ENOMEM);
389 IDA_BUG_ON(&ida, !ida_is_empty(&ida));
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500390}
391
392/*
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500393 * Check handling of conversions between exceptional entries and full bitmaps.
394 */
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400395void ida_check_conv_user(void)
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500396{
397 DEFINE_IDA(ida);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500398 unsigned long i;
399
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500400 for (i = 0; i < 1000000; i++) {
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400401 int id = ida_alloc(&ida, GFP_NOWAIT);
402 if (id == -ENOMEM) {
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400403 IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) !=
404 BITS_PER_XA_VALUE) &&
405 ((i % IDA_BITMAP_BITS) != 0));
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400406 id = ida_alloc(&ida, GFP_KERNEL);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500407 } else {
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400408 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
Matthew Wilcox3159f942017-11-03 13:30:42 -0400409 BITS_PER_XA_VALUE);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500410 }
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400411 IDA_BUG_ON(&ida, id != i);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500412 }
413 ida_destroy(&ida);
414}
415
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500416void ida_check_random(void)
417{
418 DEFINE_IDA(ida);
419 DECLARE_BITMAP(bitmap, 2048);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500420 unsigned int i;
421 time_t s = time(NULL);
422
423 repeat:
424 memset(bitmap, 0, sizeof(bitmap));
425 for (i = 0; i < 100000; i++) {
426 int i = rand();
427 int bit = i & 2047;
428 if (test_bit(bit, bitmap)) {
429 __clear_bit(bit, bitmap);
Matthew Wilcoxf2726682018-06-18 18:39:28 -0400430 ida_free(&ida, bit);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500431 } else {
432 __set_bit(bit, bitmap);
Matthew Wilcoxf2726682018-06-18 18:39:28 -0400433 IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL)
434 != bit);
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500435 }
436 }
437 ida_destroy(&ida);
438 if (time(NULL) < s + 10)
439 goto repeat;
440}
441
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500442void ida_simple_get_remove_test(void)
443{
444 DEFINE_IDA(ida);
445 unsigned long i;
446
447 for (i = 0; i < 10000; i++) {
448 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i);
449 }
450 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0);
451
452 for (i = 0; i < 10000; i++) {
453 ida_simple_remove(&ida, i);
454 }
455 assert(ida_is_empty(&ida));
456
457 ida_destroy(&ida);
458}
459
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400460void user_ida_checks(void)
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500461{
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500462 radix_tree_cpu_dead(1);
Matthew Wilcoxf2726682018-06-18 18:39:28 -0400463
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500464 ida_check_nomem();
Matthew Wilcox5c78b0b2018-06-18 18:10:32 -0400465 ida_check_conv_user();
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500466 ida_check_random();
Rehas Sachdeva166bb1f2017-02-20 06:40:00 -0500467 ida_simple_get_remove_test();
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500468
469 radix_tree_cpu_dead(1);
470}
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500471
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500472static void *ida_random_fn(void *arg)
473{
474 rcu_register_thread();
475 ida_check_random();
476 rcu_unregister_thread();
477 return NULL;
478}
479
480void ida_thread_tests(void)
481{
Matthew Wilcox490645d2017-11-09 20:15:14 -0500482 pthread_t threads[20];
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500483 int i;
484
485 for (i = 0; i < ARRAY_SIZE(threads); i++)
486 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) {
487 perror("creating ida thread");
488 exit(1);
489 }
490
491 while (i--)
492 pthread_join(threads[i], NULL);
493}
494
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400495void ida_tests(void)
496{
497 user_ida_checks();
498 ida_checks();
499 ida_exit();
500 ida_thread_tests();
501}
502
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500503int __weak main(void)
504{
505 radix_tree_init();
506 idr_checks();
Matthew Wilcox8ab8ba32018-06-18 16:59:29 -0400507 ida_tests();
Matthew Wilcox4ecd9542017-03-03 12:16:10 -0500508 radix_tree_cpu_dead(1);
Matthew Wilcox8ac04862016-12-18 22:56:05 -0500509 rcu_barrier();
510 if (nr_allocated)
511 printf("nr_allocated = %d\n", nr_allocated);
512 return 0;
513}