blob: 7df4f7f395bf2810913725747d29f1717eb4e1ec [file] [log] [blame]
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * test_xarray.c: Test the XArray API
4 * Copyright (c) 2017-2018 Microsoft Corporation
5 * Author: Matthew Wilcox <willy@infradead.org>
6 */
7
8#include <linux/xarray.h>
9#include <linux/module.h>
10
11static unsigned int tests_run;
12static unsigned int tests_passed;
13
14#ifndef XA_DEBUG
15# ifdef __KERNEL__
16void xa_dump(const struct xarray *xa) { }
17# endif
18#undef XA_BUG_ON
19#define XA_BUG_ON(xa, x) do { \
20 tests_run++; \
21 if (x) { \
22 printk("BUG at %s:%d\n", __func__, __LINE__); \
23 xa_dump(xa); \
24 dump_stack(); \
25 } else { \
26 tests_passed++; \
27 } \
28} while (0)
29#endif
30
Matthew Wilcoxb7677a12018-11-05 13:19:54 -050031static void *xa_mk_index(unsigned long index)
32{
33 return xa_mk_value(index & LONG_MAX);
34}
35
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050036static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
37{
Matthew Wilcoxb7677a12018-11-05 13:19:54 -050038 return xa_store(xa, index, xa_mk_index(index), gfp);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050039}
40
Matthew Wilcox12fd2ae2019-03-09 22:25:27 -050041static void xa_insert_index(struct xarray *xa, unsigned long index)
42{
43 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
44 GFP_KERNEL) != 0);
45}
46
Matthew Wilcox371c7522018-07-04 10:50:12 -040047static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
48{
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -050049 u32 id;
Matthew Wilcox371c7522018-07-04 10:50:12 -040050
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -050051 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b,
Matthew Wilcox371c7522018-07-04 10:50:12 -040052 gfp) != 0);
53 XA_BUG_ON(xa, id != index);
54}
55
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050056static void xa_erase_index(struct xarray *xa, unsigned long index)
57{
Matthew Wilcoxb7677a12018-11-05 13:19:54 -050058 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
Matthew Wilcox58d6ea32017-11-10 15:15:08 -050059 XA_BUG_ON(xa, xa_load(xa, index) != NULL);
60}
61
62/*
63 * If anyone needs this, please move it to xarray.c. We have no current
64 * users outside the test suite because all current multislot users want
65 * to use the advanced API.
66 */
67static void *xa_store_order(struct xarray *xa, unsigned long index,
68 unsigned order, void *entry, gfp_t gfp)
69{
70 XA_STATE_ORDER(xas, xa, index, order);
71 void *curr;
72
73 do {
74 xas_lock(&xas);
75 curr = xas_store(&xas, entry);
76 xas_unlock(&xas);
77 } while (xas_nomem(&xas, gfp));
78
79 return curr;
80}
81
82static noinline void check_xa_err(struct xarray *xa)
83{
84 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
85 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
86#ifndef __KERNEL__
87 /* The kernel does not fail GFP_NOWAIT allocations */
88 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
89 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
90#endif
91 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
92 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
93 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
94// kills the test-suite :-(
95// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -050096}
97
Matthew Wilcoxb803b422017-11-14 08:30:11 -050098static noinline void check_xas_retry(struct xarray *xa)
99{
100 XA_STATE(xas, xa, 0);
101 void *entry;
102
103 xa_store_index(xa, 0, GFP_KERNEL);
104 xa_store_index(xa, 1, GFP_KERNEL);
105
106 rcu_read_lock();
107 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
108 xa_erase_index(xa, 1);
109 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
110 XA_BUG_ON(xa, xas_retry(&xas, NULL));
111 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
112 xas_reset(&xas);
113 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
114 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
115 XA_BUG_ON(xa, xas.xa_node != NULL);
Matthew Wilcoxbd542112019-02-04 23:12:08 -0500116 rcu_read_unlock();
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500117
118 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
Matthew Wilcoxbd542112019-02-04 23:12:08 -0500119
120 rcu_read_lock();
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500121 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
122 xas.xa_node = XAS_RESTART;
123 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
124 rcu_read_unlock();
125
126 /* Make sure we can iterate through retry entries */
127 xas_lock(&xas);
128 xas_set(&xas, 0);
129 xas_store(&xas, XA_RETRY_ENTRY);
130 xas_set(&xas, 1);
131 xas_store(&xas, XA_RETRY_ENTRY);
132
133 xas_set(&xas, 0);
134 xas_for_each(&xas, entry, ULONG_MAX) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500135 xas_store(&xas, xa_mk_index(xas.xa_index));
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500136 }
137 xas_unlock(&xas);
138
139 xa_erase_index(xa, 0);
140 xa_erase_index(xa, 1);
141}
142
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -0500143static noinline void check_xa_load(struct xarray *xa)
144{
145 unsigned long i, j;
146
147 for (i = 0; i < 1024; i++) {
148 for (j = 0; j < 1024; j++) {
149 void *entry = xa_load(xa, j);
150 if (j < i)
151 XA_BUG_ON(xa, xa_to_value(entry) != j);
152 else
153 XA_BUG_ON(xa, entry);
154 }
155 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
156 }
157
158 for (i = 0; i < 1024; i++) {
159 for (j = 0; j < 1024; j++) {
160 void *entry = xa_load(xa, j);
161 if (j >= i)
162 XA_BUG_ON(xa, xa_to_value(entry) != j);
163 else
164 XA_BUG_ON(xa, entry);
165 }
166 xa_erase_index(xa, i);
167 }
168 XA_BUG_ON(xa, !xa_empty(xa));
169}
170
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500171static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
172{
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500173 unsigned int order;
174 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
175
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500176 /* NULL elements have no marks set */
177 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
178 xa_set_mark(xa, index, XA_MARK_0);
179 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
180
181 /* Storing a pointer will not make a mark appear */
182 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
183 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
184 xa_set_mark(xa, index, XA_MARK_0);
185 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
186
187 /* Setting one mark will not set another mark */
188 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
189 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
190
191 /* Storing NULL clears marks, and they can't be set again */
192 xa_erase_index(xa, index);
193 XA_BUG_ON(xa, !xa_empty(xa));
194 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
195 xa_set_mark(xa, index, XA_MARK_0);
196 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500197
198 /*
199 * Storing a multi-index entry over entries with marks gives the
200 * entire entry the union of the marks
201 */
202 BUG_ON((index % 4) != 0);
203 for (order = 2; order < max_order; order++) {
204 unsigned long base = round_down(index, 1UL << order);
205 unsigned long next = base + (1UL << order);
206 unsigned long i;
207
208 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
209 xa_set_mark(xa, index + 1, XA_MARK_0);
210 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
Matthew Wilcoxd69d2872019-01-14 13:57:31 -0500211 xa_set_mark(xa, index + 2, XA_MARK_2);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500212 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500213 xa_store_order(xa, index, order, xa_mk_index(index),
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500214 GFP_KERNEL);
215 for (i = base; i < next; i++) {
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400216 XA_STATE(xas, xa, i);
217 unsigned int seen = 0;
218 void *entry;
219
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500220 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
Matthew Wilcoxd69d2872019-01-14 13:57:31 -0500221 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1));
222 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2));
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400223
224 /* We should see two elements in the array */
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500225 rcu_read_lock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400226 xas_for_each(&xas, entry, ULONG_MAX)
227 seen++;
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500228 rcu_read_unlock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400229 XA_BUG_ON(xa, seen != 2);
230
231 /* One of which is marked */
232 xas_set(&xas, 0);
233 seen = 0;
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500234 rcu_read_lock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400235 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
236 seen++;
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500237 rcu_read_unlock();
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400238 XA_BUG_ON(xa, seen != 1);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500239 }
240 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
241 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
242 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
243 xa_erase_index(xa, index);
244 xa_erase_index(xa, next);
245 XA_BUG_ON(xa, !xa_empty(xa));
246 }
247 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500248}
249
Matthew Wilcoxadb9d9c2018-04-09 16:52:21 -0400250static noinline void check_xa_mark_2(struct xarray *xa)
251{
252 XA_STATE(xas, xa, 0);
253 unsigned long index;
254 unsigned int count = 0;
255 void *entry;
256
257 xa_store_index(xa, 0, GFP_KERNEL);
258 xa_set_mark(xa, 0, XA_MARK_0);
259 xas_lock(&xas);
260 xas_load(&xas);
261 xas_init_marks(&xas);
262 xas_unlock(&xas);
263 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
264
265 for (index = 3500; index < 4500; index++) {
266 xa_store_index(xa, index, GFP_KERNEL);
267 xa_set_mark(xa, index, XA_MARK_0);
268 }
269
270 xas_reset(&xas);
271 rcu_read_lock();
272 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
273 count++;
274 rcu_read_unlock();
275 XA_BUG_ON(xa, count != 1000);
276
277 xas_lock(&xas);
278 xas_for_each(&xas, entry, ULONG_MAX) {
279 xas_init_marks(&xas);
280 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
281 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
282 }
283 xas_unlock(&xas);
284
285 xa_destroy(xa);
286}
287
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500288static noinline void check_xa_mark(struct xarray *xa)
289{
290 unsigned long index;
291
292 for (index = 0; index < 16384; index += 4)
293 check_xa_mark_1(xa, index);
Matthew Wilcoxadb9d9c2018-04-09 16:52:21 -0400294
295 check_xa_mark_2(xa);
Matthew Wilcox9b89a032017-11-10 09:34:31 -0500296}
297
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500298static noinline void check_xa_shrink(struct xarray *xa)
299{
300 XA_STATE(xas, xa, 1);
301 struct xa_node *node;
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400302 unsigned int order;
303 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500304
305 XA_BUG_ON(xa, !xa_empty(xa));
306 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
307 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
308
309 /*
310 * Check that erasing the entry at 1 shrinks the tree and properly
311 * marks the node as being deleted.
312 */
313 xas_lock(&xas);
314 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
315 node = xas.xa_node;
316 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
317 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
318 XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
319 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
320 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
321 XA_BUG_ON(xa, xas_load(&xas) != NULL);
322 xas_unlock(&xas);
323 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
324 xa_erase_index(xa, 0);
325 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox93eb07f2018-09-08 12:09:52 -0400326
327 for (order = 0; order < max_order; order++) {
328 unsigned long max = (1UL << order) - 1;
329 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
330 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
331 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
332 rcu_read_lock();
333 node = xa_head(xa);
334 rcu_read_unlock();
335 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
336 NULL);
337 rcu_read_lock();
338 XA_BUG_ON(xa, xa_head(xa) == node);
339 rcu_read_unlock();
340 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
341 xa_erase_index(xa, ULONG_MAX);
342 XA_BUG_ON(xa, xa->xa_head != node);
343 xa_erase_index(xa, 0);
344 }
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500345}
346
Matthew Wilcox12fd2ae2019-03-09 22:25:27 -0500347static noinline void check_insert(struct xarray *xa)
348{
349 unsigned long i;
350
351 for (i = 0; i < 1024; i++) {
352 xa_insert_index(xa, i);
353 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
354 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
355 xa_erase_index(xa, i);
356 }
357
358 for (i = 10; i < BITS_PER_LONG; i++) {
359 xa_insert_index(xa, 1UL << i);
360 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
361 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
362 xa_erase_index(xa, 1UL << i);
363
364 xa_insert_index(xa, (1UL << i) - 1);
365 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
366 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
367 xa_erase_index(xa, (1UL << i) - 1);
368 }
369
370 xa_insert_index(xa, ~0UL);
371 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
372 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
373 xa_erase_index(xa, ~0UL);
374
375 XA_BUG_ON(xa, !xa_empty(xa));
376}
377
Matthew Wilcox41aec912017-11-10 15:34:55 -0500378static noinline void check_cmpxchg(struct xarray *xa)
379{
380 void *FIVE = xa_mk_value(5);
381 void *SIX = xa_mk_value(6);
382 void *LOTS = xa_mk_value(12345678);
383
384 XA_BUG_ON(xa, !xa_empty(xa));
385 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
Matthew Wilcoxfd9dc932019-02-06 13:07:11 -0500386 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY);
Matthew Wilcox41aec912017-11-10 15:34:55 -0500387 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
388 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
389 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
390 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
391 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
392 xa_erase_index(xa, 12345678);
393 xa_erase_index(xa, 5);
394 XA_BUG_ON(xa, !xa_empty(xa));
395}
396
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400397static noinline void check_reserve(struct xarray *xa)
398{
399 void *entry;
Matthew Wilcox4a318962018-12-17 14:45:36 -0500400 unsigned long index;
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500401 int count;
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400402
403 /* An array with a reserved entry is not empty */
404 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500405 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400406 XA_BUG_ON(xa, xa_empty(xa));
407 XA_BUG_ON(xa, xa_load(xa, 12345678));
408 xa_release(xa, 12345678);
409 XA_BUG_ON(xa, !xa_empty(xa));
410
411 /* Releasing a used entry does nothing */
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500412 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400413 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
414 xa_release(xa, 12345678);
415 xa_erase_index(xa, 12345678);
416 XA_BUG_ON(xa, !xa_empty(xa));
417
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500418 /* cmpxchg sees a reserved entry as ZERO */
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500419 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500420 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY,
421 xa_mk_value(12345678), GFP_NOWAIT) != NULL);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400422 xa_release(xa, 12345678);
423 xa_erase_index(xa, 12345678);
424 XA_BUG_ON(xa, !xa_empty(xa));
425
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500426 /* xa_insert treats it as busy */
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500427 XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
Matthew Wilcoxb0606fe2019-01-02 13:57:03 -0500428 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
Matthew Wilcoxfd9dc932019-02-06 13:07:11 -0500429 -EBUSY);
Matthew Wilcoxb0606fe2019-01-02 13:57:03 -0500430 XA_BUG_ON(xa, xa_empty(xa));
431 XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL);
Matthew Wilcox4c0608f2018-10-30 09:45:55 -0400432 XA_BUG_ON(xa, !xa_empty(xa));
433
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400434 /* Can iterate through a reserved entry */
435 xa_store_index(xa, 5, GFP_KERNEL);
Matthew Wilcoxf818b822019-02-08 14:02:45 -0500436 XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400437 xa_store_index(xa, 7, GFP_KERNEL);
438
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500439 count = 0;
Matthew Wilcox4a318962018-12-17 14:45:36 -0500440 xa_for_each(xa, index, entry) {
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400441 XA_BUG_ON(xa, index != 5 && index != 7);
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500442 count++;
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400443 }
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -0500444 XA_BUG_ON(xa, count != 2);
445
446 /* If we free a reserved entry, we should be able to allocate it */
447 if (xa->xa_flags & XA_FLAGS_ALLOC) {
448 u32 id;
449
450 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(8),
451 XA_LIMIT(5, 10), GFP_KERNEL) != 0);
452 XA_BUG_ON(xa, id != 8);
453
454 xa_release(xa, 6);
455 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_value(6),
456 XA_LIMIT(5, 10), GFP_KERNEL) != 0);
457 XA_BUG_ON(xa, id != 6);
458 }
459
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -0400460 xa_destroy(xa);
461}
462
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500463static noinline void check_xas_erase(struct xarray *xa)
464{
465 XA_STATE(xas, xa, 0);
466 void *entry;
467 unsigned long i, j;
468
469 for (i = 0; i < 200; i++) {
470 for (j = i; j < 2 * i + 17; j++) {
471 xas_set(&xas, j);
472 do {
473 xas_lock(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500474 xas_store(&xas, xa_mk_index(j));
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500475 xas_unlock(&xas);
476 } while (xas_nomem(&xas, GFP_KERNEL));
477 }
478
479 xas_set(&xas, ULONG_MAX);
480 do {
481 xas_lock(&xas);
482 xas_store(&xas, xa_mk_value(0));
483 xas_unlock(&xas);
484 } while (xas_nomem(&xas, GFP_KERNEL));
485
486 xas_lock(&xas);
487 xas_store(&xas, NULL);
488
489 xas_set(&xas, 0);
490 j = i;
491 xas_for_each(&xas, entry, ULONG_MAX) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500492 XA_BUG_ON(xa, entry != xa_mk_index(j));
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500493 xas_store(&xas, NULL);
494 j++;
495 }
496 xas_unlock(&xas);
497 XA_BUG_ON(xa, !xa_empty(xa));
498 }
499}
500
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400501#ifdef CONFIG_XARRAY_MULTI
502static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
503 unsigned int order)
504{
505 XA_STATE(xas, xa, index);
506 unsigned long min = index & ~((1UL << order) - 1);
507 unsigned long max = min + (1UL << order);
508
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500509 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
510 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
511 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400512 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
513 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
514
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500515 xas_lock(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500516 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500517 xas_unlock(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500518 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
519 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400520 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
521 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
522
523 xa_erase_index(xa, min);
524 XA_BUG_ON(xa, !xa_empty(xa));
525}
526
527static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
528 unsigned int order)
529{
530 XA_STATE(xas, xa, index);
531 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
532
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500533 xas_lock(&xas);
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400534 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
535 XA_BUG_ON(xa, xas.xa_index != index);
536 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -0500537 xas_unlock(&xas);
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400538 XA_BUG_ON(xa, !xa_empty(xa));
539}
Matthew Wilcox4f145cd2018-11-29 16:04:35 -0500540
541static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
542 unsigned int order)
543{
544 XA_STATE(xas, xa, 0);
545 void *entry;
546 int n = 0;
547
548 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
549
550 xas_lock(&xas);
551 xas_for_each(&xas, entry, ULONG_MAX) {
552 XA_BUG_ON(xa, entry != xa_mk_index(index));
553 n++;
554 }
555 XA_BUG_ON(xa, n != 1);
556 xas_set(&xas, index + 1);
557 xas_for_each(&xas, entry, ULONG_MAX) {
558 XA_BUG_ON(xa, entry != xa_mk_index(index));
559 n++;
560 }
561 XA_BUG_ON(xa, n != 2);
562 xas_unlock(&xas);
563
564 xa_destroy(xa);
565}
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400566#endif
567
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500568static noinline void check_multi_store(struct xarray *xa)
569{
570#ifdef CONFIG_XARRAY_MULTI
571 unsigned long i, j, k;
572 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
573
574 /* Loading from any position returns the same value */
575 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
576 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
577 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
578 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
579 rcu_read_lock();
580 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
581 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
582 rcu_read_unlock();
583
584 /* Storing adjacent to the value does not alter the value */
585 xa_store(xa, 3, xa, GFP_KERNEL);
586 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
587 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
588 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
589 rcu_read_lock();
590 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
591 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
592 rcu_read_unlock();
593
594 /* Overwriting multiple indexes works */
595 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
596 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
597 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
598 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
599 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
600 XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
601 rcu_read_lock();
602 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
603 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
604 rcu_read_unlock();
605
606 /* We can erase multiple values with a single store */
Matthew Wilcox5404a7f2018-11-05 09:34:04 -0500607 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500608 XA_BUG_ON(xa, !xa_empty(xa));
609
610 /* Even when the first slot is empty but the others aren't */
611 xa_store_index(xa, 1, GFP_KERNEL);
612 xa_store_index(xa, 2, GFP_KERNEL);
613 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
614 XA_BUG_ON(xa, !xa_empty(xa));
615
616 for (i = 0; i < max_order; i++) {
617 for (j = 0; j < max_order; j++) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500618 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
619 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500620
621 for (k = 0; k < max_order; k++) {
622 void *entry = xa_load(xa, (1UL << k) - 1);
623 if ((i < k) && (j < k))
624 XA_BUG_ON(xa, entry != NULL);
625 else
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500626 XA_BUG_ON(xa, entry != xa_mk_index(j));
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500627 }
628
629 xa_erase(xa, 0);
630 XA_BUG_ON(xa, !xa_empty(xa));
631 }
632 }
Matthew Wilcox4f06d632018-09-09 01:52:17 -0400633
634 for (i = 0; i < 20; i++) {
635 check_multi_store_1(xa, 200, i);
636 check_multi_store_1(xa, 0, i);
637 check_multi_store_1(xa, (1UL << i) + 1, i);
638 }
639 check_multi_store_2(xa, 4095, 9);
Matthew Wilcox4f145cd2018-11-29 16:04:35 -0500640
641 for (i = 1; i < 20; i++) {
642 check_multi_store_3(xa, 0, i);
643 check_multi_store_3(xa, 1UL << i, i);
644 }
Matthew Wilcox58d6ea32017-11-10 15:15:08 -0500645#endif
646}
647
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400648static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
Matthew Wilcox371c7522018-07-04 10:50:12 -0400649{
650 int i;
651 u32 id;
652
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400653 XA_BUG_ON(xa, !xa_empty(xa));
654 /* An empty array should assign %base to the first alloc */
655 xa_alloc_index(xa, base, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400656
657 /* Erasing it should make the array empty again */
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400658 xa_erase_index(xa, base);
659 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox371c7522018-07-04 10:50:12 -0400660
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400661 /* And it should assign %base again */
662 xa_alloc_index(xa, base, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400663
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400664 /* Allocating and then erasing a lot should not lose base */
665 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
666 xa_alloc_index(xa, i, GFP_KERNEL);
667 for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
668 xa_erase_index(xa, i);
669 xa_alloc_index(xa, base, GFP_KERNEL);
670
671 /* Destroying the array should do the same as erasing */
672 xa_destroy(xa);
673
674 /* And it should assign %base again */
675 xa_alloc_index(xa, base, GFP_KERNEL);
676
677 /* The next assigned ID should be base+1 */
678 xa_alloc_index(xa, base + 1, GFP_KERNEL);
679 xa_erase_index(xa, base + 1);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400680
681 /* Storing a value should mark it used */
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400682 xa_store_index(xa, base + 1, GFP_KERNEL);
683 xa_alloc_index(xa, base + 2, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400684
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400685 /* If we then erase base, it should be free */
686 xa_erase_index(xa, base);
687 xa_alloc_index(xa, base, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400688
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400689 xa_erase_index(xa, base + 1);
690 xa_erase_index(xa, base + 2);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400691
692 for (i = 1; i < 5000; i++) {
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400693 xa_alloc_index(xa, base + i, GFP_KERNEL);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400694 }
695
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400696 xa_destroy(xa);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400697
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400698 /* Check that we fail properly at the limit of allocation */
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500699 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1),
700 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
Matthew Wilcox371c7522018-07-04 10:50:12 -0400701 GFP_KERNEL) != 0);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400702 XA_BUG_ON(xa, id != 0xfffffffeU);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500703 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX),
704 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
Matthew Wilcox371c7522018-07-04 10:50:12 -0400705 GFP_KERNEL) != 0);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400706 XA_BUG_ON(xa, id != 0xffffffffU);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500707 id = 3;
708 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0),
709 XA_LIMIT(UINT_MAX - 1, UINT_MAX),
710 GFP_KERNEL) != -EBUSY);
711 XA_BUG_ON(xa, id != 3);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400712 xa_destroy(xa);
Matthew Wilcox48483612018-12-13 13:57:42 -0500713
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500714 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
715 GFP_KERNEL) != -EBUSY);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400716 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500717 XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5),
718 GFP_KERNEL) != -EBUSY);
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400719 xa_erase_index(xa, 3);
720 XA_BUG_ON(xa, !xa_empty(xa));
721}
722
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500723static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
724{
725 unsigned int i, id;
726 unsigned long index;
727 void *entry;
728
729 /* Allocate and free a NULL and check xa_empty() behaves */
730 XA_BUG_ON(xa, !xa_empty(xa));
731 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
732 XA_BUG_ON(xa, id != base);
733 XA_BUG_ON(xa, xa_empty(xa));
734 XA_BUG_ON(xa, xa_erase(xa, id) != NULL);
735 XA_BUG_ON(xa, !xa_empty(xa));
736
737 /* Ditto, but check destroy instead of erase */
738 XA_BUG_ON(xa, !xa_empty(xa));
739 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
740 XA_BUG_ON(xa, id != base);
741 XA_BUG_ON(xa, xa_empty(xa));
742 xa_destroy(xa);
743 XA_BUG_ON(xa, !xa_empty(xa));
744
745 for (i = base; i < base + 10; i++) {
746 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b,
747 GFP_KERNEL) != 0);
748 XA_BUG_ON(xa, id != i);
749 }
750
751 XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL);
752 XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL);
753 XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4));
754 XA_BUG_ON(xa, xa_erase(xa, 5) != NULL);
755 XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0);
756 XA_BUG_ON(xa, id != 5);
757
758 xa_for_each(xa, index, entry) {
759 xa_erase_index(xa, index);
760 }
761
762 for (i = base; i < base + 9; i++) {
763 XA_BUG_ON(xa, xa_erase(xa, i) != NULL);
764 XA_BUG_ON(xa, xa_empty(xa));
765 }
766 XA_BUG_ON(xa, xa_erase(xa, 8) != NULL);
767 XA_BUG_ON(xa, xa_empty(xa));
768 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
769 XA_BUG_ON(xa, !xa_empty(xa));
770
771 xa_destroy(xa);
772}
773
Matthew Wilcox2fa044e2018-11-06 14:13:35 -0500774static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
775{
776 struct xa_limit limit = XA_LIMIT(1, 0x3fff);
777 u32 next = 0;
778 unsigned int i, id;
779 unsigned long index;
780 void *entry;
781
782 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
783 &next, GFP_KERNEL) != 0);
784 XA_BUG_ON(xa, id != 1);
785
786 next = 0x3ffd;
787 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
788 &next, GFP_KERNEL) != 0);
789 XA_BUG_ON(xa, id != 0x3ffd);
790 xa_erase_index(xa, 0x3ffd);
791 xa_erase_index(xa, 1);
792 XA_BUG_ON(xa, !xa_empty(xa));
793
794 for (i = 0x3ffe; i < 0x4003; i++) {
795 if (i < 0x4000)
796 entry = xa_mk_index(i);
797 else
798 entry = xa_mk_index(i - 0x3fff);
799 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
800 &next, GFP_KERNEL) != (id == 1));
801 XA_BUG_ON(xa, xa_mk_index(id) != entry);
802 }
803
804 /* Check wrap-around is handled correctly */
805 if (base != 0)
806 xa_erase_index(xa, base);
807 xa_erase_index(xa, base + 1);
808 next = UINT_MAX;
809 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
810 xa_limit_32b, &next, GFP_KERNEL) != 0);
811 XA_BUG_ON(xa, id != UINT_MAX);
812 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
813 xa_limit_32b, &next, GFP_KERNEL) != 1);
814 XA_BUG_ON(xa, id != base);
815 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
816 xa_limit_32b, &next, GFP_KERNEL) != 0);
817 XA_BUG_ON(xa, id != base + 1);
818
819 xa_for_each(xa, index, entry)
820 xa_erase_index(xa, index);
821
822 XA_BUG_ON(xa, !xa_empty(xa));
823}
824
Matthew Wilcox3ccaf572018-10-26 14:43:22 -0400825static DEFINE_XARRAY_ALLOC(xa0);
826static DEFINE_XARRAY_ALLOC1(xa1);
827
828static noinline void check_xa_alloc(void)
829{
830 check_xa_alloc_1(&xa0, 0);
831 check_xa_alloc_1(&xa1, 1);
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -0500832 check_xa_alloc_2(&xa0, 0);
833 check_xa_alloc_2(&xa1, 1);
Matthew Wilcox2fa044e2018-11-06 14:13:35 -0500834 check_xa_alloc_3(&xa0, 0);
835 check_xa_alloc_3(&xa1, 1);
Matthew Wilcox371c7522018-07-04 10:50:12 -0400836}
837
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400838static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
839 unsigned int order, unsigned int present)
840{
841 XA_STATE_ORDER(xas, xa, start, order);
842 void *entry;
843 unsigned int count = 0;
844
845retry:
846 xas_lock(&xas);
847 xas_for_each_conflict(&xas, entry) {
848 XA_BUG_ON(xa, !xa_is_value(entry));
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500849 XA_BUG_ON(xa, entry < xa_mk_index(start));
850 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400851 count++;
852 }
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500853 xas_store(&xas, xa_mk_index(start));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400854 xas_unlock(&xas);
855 if (xas_nomem(&xas, GFP_KERNEL)) {
856 count = 0;
857 goto retry;
858 }
859 XA_BUG_ON(xa, xas_error(&xas));
860 XA_BUG_ON(xa, count != present);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500861 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400862 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500863 xa_mk_index(start));
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -0400864 xa_erase_index(xa, start);
865}
866
867static noinline void check_store_iter(struct xarray *xa)
868{
869 unsigned int i, j;
870 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
871
872 for (i = 0; i < max_order; i++) {
873 unsigned int min = 1 << i;
874 unsigned int max = (2 << i) - 1;
875 __check_store_iter(xa, 0, i, 0);
876 XA_BUG_ON(xa, !xa_empty(xa));
877 __check_store_iter(xa, min, i, 0);
878 XA_BUG_ON(xa, !xa_empty(xa));
879
880 xa_store_index(xa, min, GFP_KERNEL);
881 __check_store_iter(xa, min, i, 1);
882 XA_BUG_ON(xa, !xa_empty(xa));
883 xa_store_index(xa, max, GFP_KERNEL);
884 __check_store_iter(xa, min, i, 1);
885 XA_BUG_ON(xa, !xa_empty(xa));
886
887 for (j = 0; j < min; j++)
888 xa_store_index(xa, j, GFP_KERNEL);
889 __check_store_iter(xa, 0, i, min);
890 XA_BUG_ON(xa, !xa_empty(xa));
891 for (j = 0; j < min; j++)
892 xa_store_index(xa, min + j, GFP_KERNEL);
893 __check_store_iter(xa, min, i, min);
894 XA_BUG_ON(xa, !xa_empty(xa));
895 }
896#ifdef CONFIG_XARRAY_MULTI
897 xa_store_index(xa, 63, GFP_KERNEL);
898 xa_store_index(xa, 65, GFP_KERNEL);
899 __check_store_iter(xa, 64, 2, 1);
900 xa_erase_index(xa, 63);
901#endif
902 XA_BUG_ON(xa, !xa_empty(xa));
903}
904
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500905static noinline void check_multi_find(struct xarray *xa)
906{
907#ifdef CONFIG_XARRAY_MULTI
908 unsigned long index;
909
910 xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
911 XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
912
913 index = 0;
914 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
915 xa_mk_value(12));
916 XA_BUG_ON(xa, index != 12);
917 index = 13;
918 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
919 xa_mk_value(12));
920 XA_BUG_ON(xa, (index < 12) || (index >= 16));
921 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
922 xa_mk_value(16));
923 XA_BUG_ON(xa, index != 16);
924
925 xa_erase_index(xa, 12);
926 xa_erase_index(xa, 16);
927 XA_BUG_ON(xa, !xa_empty(xa));
928#endif
929}
930
931static noinline void check_multi_find_2(struct xarray *xa)
932{
933 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
934 unsigned int i, j;
935 void *entry;
936
937 for (i = 0; i < max_order; i++) {
938 unsigned long index = 1UL << i;
939 for (j = 0; j < index; j++) {
940 XA_STATE(xas, xa, j + index);
941 xa_store_index(xa, index - 1, GFP_KERNEL);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -0500942 xa_store_order(xa, index, i, xa_mk_index(index),
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500943 GFP_KERNEL);
944 rcu_read_lock();
945 xas_for_each(&xas, entry, ULONG_MAX) {
946 xa_erase_index(xa, index);
947 }
948 rcu_read_unlock();
949 xa_erase_index(xa, index - 1);
950 XA_BUG_ON(xa, !xa_empty(xa));
951 }
952 }
953}
954
Matthew Wilcox8229706e2018-11-01 16:55:19 -0400955static noinline void check_find_1(struct xarray *xa)
Matthew Wilcoxb803b422017-11-14 08:30:11 -0500956{
957 unsigned long i, j, k;
958
959 XA_BUG_ON(xa, !xa_empty(xa));
960
961 /*
962 * Check xa_find with all pairs between 0 and 99 inclusive,
963 * starting at every index between 0 and 99
964 */
965 for (i = 0; i < 100; i++) {
966 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
967 xa_set_mark(xa, i, XA_MARK_0);
968 for (j = 0; j < i; j++) {
969 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
970 NULL);
971 xa_set_mark(xa, j, XA_MARK_0);
972 for (k = 0; k < 100; k++) {
973 unsigned long index = k;
974 void *entry = xa_find(xa, &index, ULONG_MAX,
975 XA_PRESENT);
976 if (k <= j)
977 XA_BUG_ON(xa, index != j);
978 else if (k <= i)
979 XA_BUG_ON(xa, index != i);
980 else
981 XA_BUG_ON(xa, entry != NULL);
982
983 index = k;
984 entry = xa_find(xa, &index, ULONG_MAX,
985 XA_MARK_0);
986 if (k <= j)
987 XA_BUG_ON(xa, index != j);
988 else if (k <= i)
989 XA_BUG_ON(xa, index != i);
990 else
991 XA_BUG_ON(xa, entry != NULL);
992 }
993 xa_erase_index(xa, j);
994 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
995 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
996 }
997 xa_erase_index(xa, i);
998 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
999 }
1000 XA_BUG_ON(xa, !xa_empty(xa));
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001001}
1002
1003static noinline void check_find_2(struct xarray *xa)
1004{
1005 void *entry;
Matthew Wilcox4a318962018-12-17 14:45:36 -05001006 unsigned long i, j, index;
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001007
Matthew Wilcox4a318962018-12-17 14:45:36 -05001008 xa_for_each(xa, index, entry) {
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001009 XA_BUG_ON(xa, true);
1010 }
1011
1012 for (i = 0; i < 1024; i++) {
1013 xa_store_index(xa, index, GFP_KERNEL);
1014 j = 0;
Matthew Wilcox4a318962018-12-17 14:45:36 -05001015 xa_for_each(xa, index, entry) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001016 XA_BUG_ON(xa, xa_mk_index(index) != entry);
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001017 XA_BUG_ON(xa, index != j++);
1018 }
1019 }
1020
1021 xa_destroy(xa);
1022}
1023
Matthew Wilcox48483612018-12-13 13:57:42 -05001024static noinline void check_find_3(struct xarray *xa)
1025{
1026 XA_STATE(xas, xa, 0);
1027 unsigned long i, j, k;
1028 void *entry;
1029
1030 for (i = 0; i < 100; i++) {
1031 for (j = 0; j < 100; j++) {
Matthew Wilcox490fd30f2018-12-17 17:37:25 -05001032 rcu_read_lock();
Matthew Wilcox48483612018-12-13 13:57:42 -05001033 for (k = 0; k < 100; k++) {
1034 xas_set(&xas, j);
1035 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
1036 ;
1037 if (j > k)
1038 XA_BUG_ON(xa,
1039 xas.xa_node != XAS_RESTART);
1040 }
Matthew Wilcox490fd30f2018-12-17 17:37:25 -05001041 rcu_read_unlock();
Matthew Wilcox48483612018-12-13 13:57:42 -05001042 }
1043 xa_store_index(xa, i, GFP_KERNEL);
1044 xa_set_mark(xa, i, XA_MARK_0);
1045 }
1046 xa_destroy(xa);
1047}
1048
Matthew Wilcox8229706e2018-11-01 16:55:19 -04001049static noinline void check_find(struct xarray *xa)
1050{
1051 check_find_1(xa);
1052 check_find_2(xa);
Matthew Wilcox48483612018-12-13 13:57:42 -05001053 check_find_3(xa);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001054 check_multi_find(xa);
1055 check_multi_find_2(xa);
1056}
1057
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001058/* See find_swap_entry() in mm/shmem.c */
1059static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
1060{
1061 XA_STATE(xas, xa, 0);
1062 unsigned int checked = 0;
1063 void *entry;
1064
1065 rcu_read_lock();
1066 xas_for_each(&xas, entry, ULONG_MAX) {
1067 if (xas_retry(&xas, entry))
1068 continue;
1069 if (entry == item)
1070 break;
1071 checked++;
1072 if ((checked % 4) != 0)
1073 continue;
1074 xas_pause(&xas);
1075 }
1076 rcu_read_unlock();
1077
1078 return entry ? xas.xa_index : -1;
1079}
1080
1081static noinline void check_find_entry(struct xarray *xa)
1082{
1083#ifdef CONFIG_XARRAY_MULTI
1084 unsigned int order;
1085 unsigned long offset, index;
1086
1087 for (order = 0; order < 20; order++) {
1088 for (offset = 0; offset < (1UL << (order + 3));
1089 offset += (1UL << order)) {
1090 for (index = 0; index < (1UL << (order + 5));
1091 index += (1UL << order)) {
1092 xa_store_order(xa, index, order,
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001093 xa_mk_index(index), GFP_KERNEL);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001094 XA_BUG_ON(xa, xa_load(xa, index) !=
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001095 xa_mk_index(index));
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001096 XA_BUG_ON(xa, xa_find_entry(xa,
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001097 xa_mk_index(index)) != index);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001098 }
1099 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1100 xa_destroy(xa);
1101 }
1102 }
1103#endif
1104
1105 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
1106 xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
1107 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001108 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001109 xa_erase_index(xa, ULONG_MAX);
1110 XA_BUG_ON(xa, !xa_empty(xa));
1111}
1112
Matthew Wilcox (Oracle)91abab82019-07-01 17:03:29 -04001113static noinline void check_move_tiny(struct xarray *xa)
1114{
1115 XA_STATE(xas, xa, 0);
1116
1117 XA_BUG_ON(xa, !xa_empty(xa));
1118 rcu_read_lock();
1119 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1120 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1121 rcu_read_unlock();
1122 xa_store_index(xa, 0, GFP_KERNEL);
1123 rcu_read_lock();
1124 xas_set(&xas, 0);
1125 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_index(0));
1126 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1127 xas_set(&xas, 0);
1128 XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0));
1129 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1130 rcu_read_unlock();
1131 xa_erase_index(xa, 0);
1132 XA_BUG_ON(xa, !xa_empty(xa));
1133}
1134
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001135static noinline void check_move_small(struct xarray *xa, unsigned long idx)
1136{
1137 XA_STATE(xas, xa, 0);
1138 unsigned long i;
1139
1140 xa_store_index(xa, 0, GFP_KERNEL);
1141 xa_store_index(xa, idx, GFP_KERNEL);
1142
1143 rcu_read_lock();
1144 for (i = 0; i < idx * 4; i++) {
1145 void *entry = xas_next(&xas);
1146 if (i <= idx)
1147 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1148 XA_BUG_ON(xa, xas.xa_index != i);
1149 if (i == 0 || i == idx)
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001150 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001151 else
1152 XA_BUG_ON(xa, entry != NULL);
1153 }
1154 xas_next(&xas);
1155 XA_BUG_ON(xa, xas.xa_index != i);
1156
1157 do {
1158 void *entry = xas_prev(&xas);
1159 i--;
1160 if (i <= idx)
1161 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
1162 XA_BUG_ON(xa, xas.xa_index != i);
1163 if (i == 0 || i == idx)
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001164 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001165 else
1166 XA_BUG_ON(xa, entry != NULL);
1167 } while (i > 0);
1168
1169 xas_set(&xas, ULONG_MAX);
1170 XA_BUG_ON(xa, xas_next(&xas) != NULL);
1171 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1172 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
1173 XA_BUG_ON(xa, xas.xa_index != 0);
1174 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1175 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1176 rcu_read_unlock();
1177
1178 xa_erase_index(xa, 0);
1179 xa_erase_index(xa, idx);
1180 XA_BUG_ON(xa, !xa_empty(xa));
1181}
1182
1183static noinline void check_move(struct xarray *xa)
1184{
1185 XA_STATE(xas, xa, (1 << 16) - 1);
1186 unsigned long i;
1187
1188 for (i = 0; i < (1 << 16); i++)
1189 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
1190
1191 rcu_read_lock();
1192 do {
1193 void *entry = xas_prev(&xas);
1194 i--;
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001195 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001196 XA_BUG_ON(xa, i != xas.xa_index);
1197 } while (i != 0);
1198
1199 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1200 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1201
1202 do {
1203 void *entry = xas_next(&xas);
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001204 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001205 XA_BUG_ON(xa, i != xas.xa_index);
1206 i++;
1207 } while (i < (1 << 16));
1208 rcu_read_unlock();
1209
1210 for (i = (1 << 8); i < (1 << 15); i++)
1211 xa_erase_index(xa, i);
1212
1213 i = xas.xa_index;
1214
1215 rcu_read_lock();
1216 do {
1217 void *entry = xas_prev(&xas);
1218 i--;
1219 if ((i < (1 << 8)) || (i >= (1 << 15)))
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001220 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001221 else
1222 XA_BUG_ON(xa, entry != NULL);
1223 XA_BUG_ON(xa, i != xas.xa_index);
1224 } while (i != 0);
1225
1226 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1227 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1228
1229 do {
1230 void *entry = xas_next(&xas);
1231 if ((i < (1 << 8)) || (i >= (1 << 15)))
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001232 XA_BUG_ON(xa, entry != xa_mk_index(i));
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001233 else
1234 XA_BUG_ON(xa, entry != NULL);
1235 XA_BUG_ON(xa, i != xas.xa_index);
1236 i++;
1237 } while (i < (1 << 16));
1238 rcu_read_unlock();
1239
1240 xa_destroy(xa);
1241
Matthew Wilcox (Oracle)91abab82019-07-01 17:03:29 -04001242 check_move_tiny(xa);
1243
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001244 for (i = 0; i < 16; i++)
1245 check_move_small(xa, 1UL << i);
1246
1247 for (i = 2; i < 16; i++)
1248 check_move_small(xa, (1UL << i) - 1);
1249}
1250
Matthew Wilcox2264f512017-12-04 00:11:48 -05001251static noinline void xa_store_many_order(struct xarray *xa,
1252 unsigned long index, unsigned order)
1253{
1254 XA_STATE_ORDER(xas, xa, index, order);
1255 unsigned int i = 0;
1256
1257 do {
1258 xas_lock(&xas);
1259 XA_BUG_ON(xa, xas_find_conflict(&xas));
1260 xas_create_range(&xas);
1261 if (xas_error(&xas))
1262 goto unlock;
1263 for (i = 0; i < (1U << order); i++) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001264 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
Matthew Wilcox2264f512017-12-04 00:11:48 -05001265 xas_next(&xas);
1266 }
1267unlock:
1268 xas_unlock(&xas);
1269 } while (xas_nomem(&xas, GFP_KERNEL));
1270
1271 XA_BUG_ON(xa, xas_error(&xas));
1272}
1273
1274static noinline void check_create_range_1(struct xarray *xa,
1275 unsigned long index, unsigned order)
1276{
1277 unsigned long i;
1278
1279 xa_store_many_order(xa, index, order);
1280 for (i = index; i < index + (1UL << order); i++)
1281 xa_erase_index(xa, i);
1282 XA_BUG_ON(xa, !xa_empty(xa));
1283}
1284
1285static noinline void check_create_range_2(struct xarray *xa, unsigned order)
1286{
1287 unsigned long i;
1288 unsigned long nr = 1UL << order;
1289
1290 for (i = 0; i < nr * nr; i += nr)
1291 xa_store_many_order(xa, i, order);
1292 for (i = 0; i < nr * nr; i++)
1293 xa_erase_index(xa, i);
1294 XA_BUG_ON(xa, !xa_empty(xa));
1295}
1296
1297static noinline void check_create_range_3(void)
1298{
1299 XA_STATE(xas, NULL, 0);
1300 xas_set_err(&xas, -EEXIST);
1301 xas_create_range(&xas);
1302 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
1303}
1304
1305static noinline void check_create_range_4(struct xarray *xa,
1306 unsigned long index, unsigned order)
1307{
1308 XA_STATE_ORDER(xas, xa, index, order);
1309 unsigned long base = xas.xa_index;
1310 unsigned long i = 0;
1311
1312 xa_store_index(xa, index, GFP_KERNEL);
1313 do {
1314 xas_lock(&xas);
1315 xas_create_range(&xas);
1316 if (xas_error(&xas))
1317 goto unlock;
1318 for (i = 0; i < (1UL << order); i++) {
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001319 void *old = xas_store(&xas, xa_mk_index(base + i));
Matthew Wilcox2264f512017-12-04 00:11:48 -05001320 if (xas.xa_index == index)
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001321 XA_BUG_ON(xa, old != xa_mk_index(base + i));
Matthew Wilcox2264f512017-12-04 00:11:48 -05001322 else
1323 XA_BUG_ON(xa, old != NULL);
1324 xas_next(&xas);
1325 }
1326unlock:
1327 xas_unlock(&xas);
1328 } while (xas_nomem(&xas, GFP_KERNEL));
1329
1330 XA_BUG_ON(xa, xas_error(&xas));
1331
1332 for (i = base; i < base + (1UL << order); i++)
1333 xa_erase_index(xa, i);
1334 XA_BUG_ON(xa, !xa_empty(xa));
1335}
1336
1337static noinline void check_create_range(struct xarray *xa)
1338{
1339 unsigned int order;
1340 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
1341
1342 for (order = 0; order < max_order; order++) {
1343 check_create_range_1(xa, 0, order);
1344 check_create_range_1(xa, 1U << order, order);
1345 check_create_range_1(xa, 2U << order, order);
1346 check_create_range_1(xa, 3U << order, order);
1347 check_create_range_1(xa, 1U << 24, order);
1348 if (order < 10)
1349 check_create_range_2(xa, order);
1350
1351 check_create_range_4(xa, 0, order);
1352 check_create_range_4(xa, 1U << order, order);
1353 check_create_range_4(xa, 2U << order, order);
1354 check_create_range_4(xa, 3U << order, order);
1355 check_create_range_4(xa, 1U << 24, order);
1356
1357 check_create_range_4(xa, 1, order);
1358 check_create_range_4(xa, (1U << order) + 1, order);
1359 check_create_range_4(xa, (2U << order) + 1, order);
1360 check_create_range_4(xa, (2U << order) - 1, order);
1361 check_create_range_4(xa, (3U << order) + 1, order);
1362 check_create_range_4(xa, (3U << order) - 1, order);
1363 check_create_range_4(xa, (1U << 24) + 1, order);
1364 }
1365
1366 check_create_range_3();
1367}
1368
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001369static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1370 unsigned long last)
1371{
1372#ifdef CONFIG_XARRAY_MULTI
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001373 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001374
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001375 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1376 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001377 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1378 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1379
1380 xa_store_range(xa, first, last, NULL, GFP_KERNEL);
1381#endif
1382
1383 XA_BUG_ON(xa, !xa_empty(xa));
1384}
1385
1386static noinline void check_store_range(struct xarray *xa)
1387{
1388 unsigned long i, j;
1389
1390 for (i = 0; i < 128; i++) {
1391 for (j = i; j < 128; j++) {
1392 __check_store_range(xa, i, j);
1393 __check_store_range(xa, 128 + i, 128 + j);
1394 __check_store_range(xa, 4095 + i, 4095 + j);
1395 __check_store_range(xa, 4096 + i, 4096 + j);
1396 __check_store_range(xa, 123456 + i, 123456 + j);
Matthew Wilcox5404a7f2018-11-05 09:34:04 -05001397 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001398 }
1399 }
1400}
1401
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001402static void check_align_1(struct xarray *xa, char *name)
1403{
1404 int i;
1405 unsigned int id;
1406 unsigned long index;
1407 void *entry;
1408
1409 for (i = 0; i < 8; i++) {
Matthew Wilcoxa3e4d3f2018-12-31 10:41:01 -05001410 XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b,
1411 GFP_KERNEL) != 0);
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001412 XA_BUG_ON(xa, id != i);
1413 }
1414 xa_for_each(xa, index, entry)
1415 XA_BUG_ON(xa, xa_is_err(entry));
1416 xa_destroy(xa);
1417}
1418
Matthew Wilcox4a5c8d82019-02-21 17:54:44 -05001419/*
1420 * We should always be able to store without allocating memory after
1421 * reserving a slot.
1422 */
Matthew Wilcox2fbe9672019-02-21 17:36:45 -05001423static void check_align_2(struct xarray *xa, char *name)
1424{
1425 int i;
1426
1427 XA_BUG_ON(xa, !xa_empty(xa));
1428
1429 for (i = 0; i < 8; i++) {
1430 XA_BUG_ON(xa, xa_store(xa, 0, name + i, GFP_KERNEL) != NULL);
1431 xa_erase(xa, 0);
1432 }
1433
Matthew Wilcox4a5c8d82019-02-21 17:54:44 -05001434 for (i = 0; i < 8; i++) {
1435 XA_BUG_ON(xa, xa_reserve(xa, 0, GFP_KERNEL) != 0);
1436 XA_BUG_ON(xa, xa_store(xa, 0, name + i, 0) != NULL);
1437 xa_erase(xa, 0);
1438 }
1439
Matthew Wilcox2fbe9672019-02-21 17:36:45 -05001440 XA_BUG_ON(xa, !xa_empty(xa));
1441}
1442
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001443static noinline void check_align(struct xarray *xa)
1444{
1445 char name[] = "Motorola 68000";
1446
1447 check_align_1(xa, name);
1448 check_align_1(xa, name + 1);
1449 check_align_1(xa, name + 2);
1450 check_align_1(xa, name + 3);
Matthew Wilcox2fbe9672019-02-21 17:36:45 -05001451 check_align_2(xa, name);
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001452}
1453
Matthew Wilcoxa97e7902017-11-24 14:24:59 -05001454static LIST_HEAD(shadow_nodes);
1455
1456static void test_update_node(struct xa_node *node)
1457{
1458 if (node->count && node->count == node->nr_values) {
1459 if (list_empty(&node->private_list))
1460 list_add(&shadow_nodes, &node->private_list);
1461 } else {
1462 if (!list_empty(&node->private_list))
1463 list_del_init(&node->private_list);
1464 }
1465}
1466
1467static noinline void shadow_remove(struct xarray *xa)
1468{
1469 struct xa_node *node;
1470
1471 xa_lock(xa);
1472 while ((node = list_first_entry_or_null(&shadow_nodes,
1473 struct xa_node, private_list))) {
1474 XA_STATE(xas, node->array, 0);
1475 XA_BUG_ON(xa, node->array != xa);
1476 list_del_init(&node->private_list);
1477 xas.xa_node = xa_parent_locked(node->array, node);
1478 xas.xa_offset = node->offset;
1479 xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
1480 xas_set_update(&xas, test_update_node);
1481 xas_store(&xas, NULL);
1482 }
1483 xa_unlock(xa);
1484}
1485
1486static noinline void check_workingset(struct xarray *xa, unsigned long index)
1487{
1488 XA_STATE(xas, xa, index);
1489 xas_set_update(&xas, test_update_node);
1490
1491 do {
1492 xas_lock(&xas);
1493 xas_store(&xas, xa_mk_value(0));
1494 xas_next(&xas);
1495 xas_store(&xas, xa_mk_value(1));
1496 xas_unlock(&xas);
1497 } while (xas_nomem(&xas, GFP_KERNEL));
1498
1499 XA_BUG_ON(xa, list_empty(&shadow_nodes));
1500
1501 xas_lock(&xas);
1502 xas_next(&xas);
1503 xas_store(&xas, &xas);
1504 XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1505
1506 xas_store(&xas, xa_mk_value(2));
1507 xas_unlock(&xas);
1508 XA_BUG_ON(xa, list_empty(&shadow_nodes));
1509
1510 shadow_remove(xa);
1511 XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1512 XA_BUG_ON(xa, !xa_empty(xa));
1513}
1514
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001515/*
1516 * Check that the pointer / value / sibling entries are accounted the
1517 * way we expect them to be.
1518 */
1519static noinline void check_account(struct xarray *xa)
1520{
1521#ifdef CONFIG_XARRAY_MULTI
1522 unsigned int order;
1523
1524 for (order = 1; order < 12; order++) {
1525 XA_STATE(xas, xa, 1 << order);
1526
1527 xa_store_order(xa, 0, order, xa, GFP_KERNEL);
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -05001528 rcu_read_lock();
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001529 xas_load(&xas);
1530 XA_BUG_ON(xa, xas.xa_node->count == 0);
1531 XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1532 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
Matthew Wilcoxfffc9a22018-11-19 09:36:29 -05001533 rcu_read_unlock();
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001534
Matthew Wilcoxb7677a12018-11-05 13:19:54 -05001535 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001536 GFP_KERNEL);
1537 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1538
1539 xa_erase(xa, 1 << order);
1540 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1541
1542 xa_erase(xa, 0);
1543 XA_BUG_ON(xa, !xa_empty(xa));
1544 }
1545#endif
1546}
1547
Matthew Wilcox687149f2017-11-17 08:16:34 -05001548static noinline void check_destroy(struct xarray *xa)
1549{
1550 unsigned long index;
1551
1552 XA_BUG_ON(xa, !xa_empty(xa));
1553
1554 /* Destroying an empty array is a no-op */
1555 xa_destroy(xa);
1556 XA_BUG_ON(xa, !xa_empty(xa));
1557
1558 /* Destroying an array with a single entry */
1559 for (index = 0; index < 1000; index++) {
1560 xa_store_index(xa, index, GFP_KERNEL);
1561 XA_BUG_ON(xa, xa_empty(xa));
1562 xa_destroy(xa);
1563 XA_BUG_ON(xa, !xa_empty(xa));
1564 }
1565
1566 /* Destroying an array with a single entry at ULONG_MAX */
1567 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
1568 XA_BUG_ON(xa, xa_empty(xa));
1569 xa_destroy(xa);
1570 XA_BUG_ON(xa, !xa_empty(xa));
1571
1572#ifdef CONFIG_XARRAY_MULTI
1573 /* Destroying an array with a multi-index entry */
1574 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
1575 XA_BUG_ON(xa, xa_empty(xa));
1576 xa_destroy(xa);
1577 XA_BUG_ON(xa, !xa_empty(xa));
1578#endif
1579}
1580
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001581static DEFINE_XARRAY(array);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001582
1583static int xarray_checks(void)
1584{
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001585 check_xa_err(&array);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001586 check_xas_retry(&array);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001587 check_xa_load(&array);
Matthew Wilcox9b89a032017-11-10 09:34:31 -05001588 check_xa_mark(&array);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001589 check_xa_shrink(&array);
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001590 check_xas_erase(&array);
Matthew Wilcox12fd2ae2019-03-09 22:25:27 -05001591 check_insert(&array);
Matthew Wilcox41aec912017-11-10 15:34:55 -05001592 check_cmpxchg(&array);
Matthew Wilcox9f14d4f2018-10-01 14:54:59 -04001593 check_reserve(&array);
Matthew Wilcoxb38f6c52019-02-20 11:30:49 -05001594 check_reserve(&xa0);
Matthew Wilcox58d6ea32017-11-10 15:15:08 -05001595 check_multi_store(&array);
Matthew Wilcox371c7522018-07-04 10:50:12 -04001596 check_xa_alloc();
Matthew Wilcoxb803b422017-11-14 08:30:11 -05001597 check_find(&array);
Matthew Wilcoxe21a2952017-11-22 08:36:00 -05001598 check_find_entry(&array);
Matthew Wilcoxd6427f82018-08-28 16:13:16 -04001599 check_account(&array);
Matthew Wilcox687149f2017-11-17 08:16:34 -05001600 check_destroy(&array);
Matthew Wilcox64d3e9a2017-12-01 00:06:52 -05001601 check_move(&array);
Matthew Wilcox2264f512017-12-04 00:11:48 -05001602 check_create_range(&array);
Matthew Wilcox0e9446c2018-08-15 14:13:29 -04001603 check_store_range(&array);
Matthew Wilcox4e99d4e2018-06-01 22:46:02 -04001604 check_store_iter(&array);
Matthew Wilcox76b4e522018-12-28 23:20:44 -05001605 check_align(&xa0);
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001606
Matthew Wilcoxa97e7902017-11-24 14:24:59 -05001607 check_workingset(&array, 0);
1608 check_workingset(&array, 64);
1609 check_workingset(&array, 4096);
1610
Matthew Wilcoxad3d6c72017-11-07 14:57:46 -05001611 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
1612 return (tests_run == tests_passed) ? 0 : -EINVAL;
1613}
1614
1615static void xarray_exit(void)
1616{
1617}
1618
1619module_init(xarray_checks);
1620module_exit(xarray_exit);
1621MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
1622MODULE_LICENSE("GPL");