blob: f4ab4f4aa3c7f5b269dcd1917c65f15f03d2475a [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Matthew Wilcox0a835c42016-12-20 10:27:56 -05002#include <linux/bitmap.h>
Matthew Wilcox460488c2017-11-28 15:16:24 -05003#include <linux/bug.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -05004#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/idr.h>
Matthew Wilcox0a835c42016-12-20 10:27:56 -05006#include <linux/slab.h>
Rusty Russell88eca022011-08-03 16:21:06 -07007#include <linux/spinlock.h>
Matthew Wilcoxb94078e2018-06-07 17:10:45 -07008#include <linux/xarray.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Matthew Wilcoxe096f6a2017-11-28 10:14:27 -050010/**
11 * idr_alloc_u32() - Allocate an ID.
12 * @idr: IDR handle.
13 * @ptr: Pointer to be associated with the new ID.
14 * @nextid: Pointer to an ID.
15 * @max: The maximum ID to allocate (inclusive).
16 * @gfp: Memory allocation flags.
17 *
18 * Allocates an unused ID in the range specified by @nextid and @max.
19 * Note that @max is inclusive whereas the @end parameter to idr_alloc()
Matthew Wilcox460488c2017-11-28 15:16:24 -050020 * is exclusive. The new ID is assigned to @nextid before the pointer
21 * is inserted into the IDR, so if @nextid points into the object pointed
22 * to by @ptr, a concurrent lookup will not find an uninitialised ID.
Matthew Wilcoxe096f6a2017-11-28 10:14:27 -050023 *
24 * The caller should provide their own locking to ensure that two
25 * concurrent modifications to the IDR are not possible. Read-only
26 * accesses to the IDR may be done under the RCU read lock or may
27 * exclude simultaneous writers.
28 *
29 * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
30 * or -ENOSPC if no free IDs could be found. If an error occurred,
31 * @nextid is unchanged.
32 */
33int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
34 unsigned long max, gfp_t gfp)
35{
Matthew Wilcox0a835c42016-12-20 10:27:56 -050036 struct radix_tree_iter iter;
Chris Mi388f79f2017-08-30 02:31:57 -040037 void __rcu **slot;
Matthew Wilcox4b0ad072018-02-26 14:39:30 -050038 unsigned int base = idr->idr_base;
39 unsigned int id = *nextid;
Tejun Heod5c74092013-02-27 17:03:55 -080040
Matthew Wilcoxf8d5d0c2017-11-07 16:30:10 -050041 if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
42 idr->idr_rt.xa_flags |= IDR_RT_MARKER;
Tejun Heod5c74092013-02-27 17:03:55 -080043
Matthew Wilcox6ce711f2017-11-30 13:45:11 -050044 id = (id < base) ? 0 : id - base;
45 radix_tree_iter_init(&iter, id);
46 slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
Matthew Wilcox0a835c42016-12-20 10:27:56 -050047 if (IS_ERR(slot))
48 return PTR_ERR(slot);
Tejun Heod5c74092013-02-27 17:03:55 -080049
Matthew Wilcox6ce711f2017-11-30 13:45:11 -050050 *nextid = iter.index + base;
Matthew Wilcox460488c2017-11-28 15:16:24 -050051 /* there is a memory barrier inside radix_tree_iter_replace() */
Matthew Wilcox0a835c42016-12-20 10:27:56 -050052 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
53 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
Chris Mi388f79f2017-08-30 02:31:57 -040054
Chris Mi388f79f2017-08-30 02:31:57 -040055 return 0;
Tejun Heod5c74092013-02-27 17:03:55 -080056}
Matthew Wilcox460488c2017-11-28 15:16:24 -050057EXPORT_SYMBOL_GPL(idr_alloc_u32);
Tejun Heod5c74092013-02-27 17:03:55 -080058
Jeff Layton3e6628c42013-04-29 16:21:16 -070059/**
Matthew Wilcox460488c2017-11-28 15:16:24 -050060 * idr_alloc() - Allocate an ID.
61 * @idr: IDR handle.
62 * @ptr: Pointer to be associated with the new ID.
63 * @start: The minimum ID (inclusive).
64 * @end: The maximum ID (exclusive).
65 * @gfp: Memory allocation flags.
Jeff Layton3e6628c42013-04-29 16:21:16 -070066 *
Matthew Wilcox460488c2017-11-28 15:16:24 -050067 * Allocates an unused ID in the range specified by @start and @end. If
68 * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
69 * callers to use @start + N as @end as long as N is within integer range.
70 *
71 * The caller should provide their own locking to ensure that two
72 * concurrent modifications to the IDR are not possible. Read-only
73 * accesses to the IDR may be done under the RCU read lock or may
74 * exclude simultaneous writers.
75 *
76 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
77 * or -ENOSPC if no free IDs could be found.
78 */
79int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
80{
81 u32 id = start;
82 int ret;
83
84 if (WARN_ON_ONCE(start < 0))
85 return -EINVAL;
86
87 ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
88 if (ret)
89 return ret;
90
91 return id;
92}
93EXPORT_SYMBOL_GPL(idr_alloc);
94
95/**
96 * idr_alloc_cyclic() - Allocate an ID cyclically.
97 * @idr: IDR handle.
98 * @ptr: Pointer to be associated with the new ID.
99 * @start: The minimum ID (inclusive).
100 * @end: The maximum ID (exclusive).
101 * @gfp: Memory allocation flags.
102 *
103 * Allocates an unused ID in the range specified by @nextid and @end. If
104 * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
105 * callers to use @start + N as @end as long as N is within integer range.
106 * The search for an unused ID will start at the last ID allocated and will
107 * wrap around to @start if no free IDs are found before reaching @end.
108 *
109 * The caller should provide their own locking to ensure that two
110 * concurrent modifications to the IDR are not possible. Read-only
111 * accesses to the IDR may be done under the RCU read lock or may
112 * exclude simultaneous writers.
113 *
114 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
115 * or -ENOSPC if no free IDs could be found.
Jeff Layton3e6628c42013-04-29 16:21:16 -0700116 */
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500117int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
Jeff Layton3e6628c42013-04-29 16:21:16 -0700118{
Matthew Wilcox460488c2017-11-28 15:16:24 -0500119 u32 id = idr->idr_next;
120 int err, max = end > 0 ? end - 1 : INT_MAX;
Jeff Layton3e6628c42013-04-29 16:21:16 -0700121
Matthew Wilcox460488c2017-11-28 15:16:24 -0500122 if ((int)id < start)
123 id = start;
Jeff Layton3e6628c42013-04-29 16:21:16 -0700124
Matthew Wilcox460488c2017-11-28 15:16:24 -0500125 err = idr_alloc_u32(idr, ptr, &id, max, gfp);
126 if ((err == -ENOSPC) && (id > start)) {
127 id = start;
128 err = idr_alloc_u32(idr, ptr, &id, max, gfp);
129 }
130 if (err)
131 return err;
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500132
Matthew Wilcox460488c2017-11-28 15:16:24 -0500133 idr->idr_next = id + 1;
Jeff Layton3e6628c42013-04-29 16:21:16 -0700134 return id;
135}
136EXPORT_SYMBOL(idr_alloc_cyclic);
137
Jeff Mahoney5806f072006-06-26 00:27:19 -0700138/**
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500139 * idr_remove() - Remove an ID from the IDR.
140 * @idr: IDR handle.
141 * @id: Pointer ID.
142 *
143 * Removes this ID from the IDR. If the ID was not previously in the IDR,
144 * this function returns %NULL.
145 *
146 * Since this function modifies the IDR, the caller should provide their
147 * own locking to ensure that concurrent modification of the same IDR is
148 * not possible.
149 *
150 * Return: The pointer formerly associated with this ID.
151 */
152void *idr_remove(struct idr *idr, unsigned long id)
153{
154 return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
155}
156EXPORT_SYMBOL_GPL(idr_remove);
157
158/**
159 * idr_find() - Return pointer for given ID.
160 * @idr: IDR handle.
161 * @id: Pointer ID.
162 *
163 * Looks up the pointer associated with this ID. A %NULL pointer may
164 * indicate that @id is not allocated or that the %NULL pointer was
165 * associated with this ID.
166 *
167 * This function can be called under rcu_read_lock(), given that the leaf
168 * pointers lifetimes are correctly managed.
169 *
170 * Return: The pointer associated with this ID.
171 */
172void *idr_find(const struct idr *idr, unsigned long id)
173{
174 return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
175}
176EXPORT_SYMBOL_GPL(idr_find);
177
178/**
Matthew Wilcox7a457572017-11-28 15:39:51 -0500179 * idr_for_each() - Iterate through all stored pointers.
180 * @idr: IDR handle.
181 * @fn: Function to be called for each pointer.
182 * @data: Data passed to callback function.
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700183 *
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500184 * The callback function will be called for each entry in @idr, passing
Matthew Wilcox7a457572017-11-28 15:39:51 -0500185 * the ID, the entry and @data.
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700186 *
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500187 * If @fn returns anything other than %0, the iteration stops and that
188 * value is returned from this function.
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700189 *
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500190 * idr_for_each() can be called concurrently with idr_alloc() and
191 * idr_remove() if protected by RCU. Newly added entries may not be
192 * seen and deleted entries may be seen, but adding and removing entries
193 * will not cause other entries to be skipped, nor spurious ones to be seen.
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700194 */
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500195int idr_for_each(const struct idr *idr,
196 int (*fn)(int id, void *p, void *data), void *data)
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700197{
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500198 struct radix_tree_iter iter;
Matthew Wilcox7e73eb02017-02-13 16:03:55 -0500199 void __rcu **slot;
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500200 int base = idr->idr_base;
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700201
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500202 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
Matthew Wilcox72fd6c72017-11-28 15:50:12 -0500203 int ret;
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500204 unsigned long id = iter.index + base;
Matthew Wilcox72fd6c72017-11-28 15:50:12 -0500205
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500206 if (WARN_ON_ONCE(id > INT_MAX))
Matthew Wilcox72fd6c72017-11-28 15:50:12 -0500207 break;
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500208 ret = fn(id, rcu_dereference_raw(*slot), data);
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500209 if (ret)
210 return ret;
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700211 }
212
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500213 return 0;
Kristian Hoegsberg96d7fa42007-07-15 23:37:24 -0700214}
215EXPORT_SYMBOL(idr_for_each);
216
217/**
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400218 * idr_get_next_ul() - Find next populated entry.
Matthew Wilcox7a457572017-11-28 15:39:51 -0500219 * @idr: IDR handle.
220 * @nextid: Pointer to an ID.
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700221 *
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500222 * Returns the next populated entry in the tree with an ID greater than
223 * or equal to the value pointed to by @nextid. On exit, @nextid is updated
224 * to the ID of the found value. To use in a loop, the value pointed to by
225 * nextid must be incremented by the user.
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700226 */
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400227void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700228{
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500229 struct radix_tree_iter iter;
Matthew Wilcox7e73eb02017-02-13 16:03:55 -0500230 void __rcu **slot;
Matthew Wilcox (Oracle)5c089fd2019-05-14 16:05:45 -0400231 void *entry = NULL;
Matthew Wilcox4b0ad072018-02-26 14:39:30 -0500232 unsigned long base = idr->idr_base;
233 unsigned long id = *nextid;
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700234
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500235 id = (id < base) ? 0 : id - base;
Matthew Wilcox (Oracle)5c089fd2019-05-14 16:05:45 -0400236 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
237 entry = rcu_dereference_raw(*slot);
238 if (!entry)
239 continue;
240 if (!xa_is_internal(entry))
241 break;
242 if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
243 break;
244 slot = radix_tree_iter_retry(&iter);
245 }
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500246 if (!slot)
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700247 return NULL;
248
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400249 *nextid = iter.index + base;
Matthew Wilcox (Oracle)5c089fd2019-05-14 16:05:45 -0400250 return entry;
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700251}
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400252EXPORT_SYMBOL(idr_get_next_ul);
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700253
Matthew Wilcox7a457572017-11-28 15:39:51 -0500254/**
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400255 * idr_get_next() - Find next populated entry.
Matthew Wilcox7a457572017-11-28 15:39:51 -0500256 * @idr: IDR handle.
257 * @nextid: Pointer to an ID.
258 *
259 * Returns the next populated entry in the tree with an ID greater than
260 * or equal to the value pointed to by @nextid. On exit, @nextid is updated
261 * to the ID of the found value. To use in a loop, the value pointed to by
262 * nextid must be incremented by the user.
263 */
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400264void *idr_get_next(struct idr *idr, int *nextid)
Chris Mi388f79f2017-08-30 02:31:57 -0400265{
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500266 unsigned long id = *nextid;
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400267 void *entry = idr_get_next_ul(idr, &id);
Chris Mi388f79f2017-08-30 02:31:57 -0400268
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400269 if (WARN_ON_ONCE(id > INT_MAX))
Chris Mi388f79f2017-08-30 02:31:57 -0400270 return NULL;
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400271 *nextid = id;
272 return entry;
Chris Mi388f79f2017-08-30 02:31:57 -0400273}
Matthew Wilcox (Oracle)5a74ac42019-11-01 21:36:39 -0400274EXPORT_SYMBOL(idr_get_next);
Chris Mi388f79f2017-08-30 02:31:57 -0400275
KAMEZAWA Hiroyuki38460b42009-04-02 16:57:25 -0700276/**
Matthew Wilcox460488c2017-11-28 15:16:24 -0500277 * idr_replace() - replace pointer for given ID.
278 * @idr: IDR handle.
279 * @ptr: New pointer to associate with the ID.
280 * @id: ID to change.
Jeff Mahoney5806f072006-06-26 00:27:19 -0700281 *
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500282 * Replace the pointer registered with an ID and return the old value.
283 * This function can be called under the RCU read lock concurrently with
284 * idr_alloc() and idr_remove() (as long as the ID being removed is not
285 * the one being replaced!).
Jeff Mahoney5806f072006-06-26 00:27:19 -0700286 *
Eric Biggersa70e43a2017-10-03 16:16:13 -0700287 * Returns: the old value on success. %-ENOENT indicates that @id was not
Matthew Wilcox234a4622017-11-28 09:56:36 -0500288 * found. %-EINVAL indicates that @ptr was not valid.
Jeff Mahoney5806f072006-06-26 00:27:19 -0700289 */
Matthew Wilcox234a4622017-11-28 09:56:36 -0500290void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
Chris Mi388f79f2017-08-30 02:31:57 -0400291{
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500292 struct radix_tree_node *node;
Matthew Wilcox7e73eb02017-02-13 16:03:55 -0500293 void __rcu **slot = NULL;
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500294 void *entry;
Jeff Mahoney5806f072006-06-26 00:27:19 -0700295
Matthew Wilcox6ce711f2017-11-30 13:45:11 -0500296 id -= idr->idr_base;
Tejun Heoe8c8d1b2013-02-27 17:05:04 -0800297
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500298 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
299 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
Lai Jiangshanb93804b2014-06-06 14:37:13 -0700300 return ERR_PTR(-ENOENT);
Manfred Spraul6ff2d392008-12-01 13:14:02 -0800301
Matthew Wilcox1cf56f92018-04-09 16:24:45 -0400302 __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
Jeff Mahoney5806f072006-06-26 00:27:19 -0700303
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500304 return entry;
Jeff Mahoney5806f072006-06-26 00:27:19 -0700305}
Matthew Wilcox234a4622017-11-28 09:56:36 -0500306EXPORT_SYMBOL(idr_replace);
Jeff Mahoney5806f072006-06-26 00:27:19 -0700307
Randy Dunlap56083ab2010-10-26 14:19:08 -0700308/**
309 * DOC: IDA description
Tejun Heo72dba582007-06-14 03:45:13 +0900310 *
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500311 * The IDA is an ID allocator which does not provide the ability to
312 * associate an ID with a pointer. As such, it only needs to store one
313 * bit per ID, and so is more space efficient than an IDR. To use an IDA,
314 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
315 * then initialise it using ida_init()). To allocate a new ID, call
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400316 * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
317 * To free an ID, call ida_free().
Tejun Heo72dba582007-06-14 03:45:13 +0900318 *
Matthew Wilcoxb03f8e42018-06-18 19:02:48 -0400319 * ida_destroy() can be used to dispose of an IDA without needing to
320 * free the individual IDs in it. You can use ida_is_empty() to find
321 * out whether the IDA has any IDs currently allocated.
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500322 *
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400323 * The IDA handles its own locking. It is safe to call any of the IDA
324 * functions without synchronisation in your code.
325 *
Matthew Wilcox0a835c42016-12-20 10:27:56 -0500326 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
327 * limitation, it should be quite straightforward to raise the maximum.
Tejun Heo72dba582007-06-14 03:45:13 +0900328 */
329
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500330/*
331 * Developer's notes:
332 *
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400333 * The IDA uses the functionality provided by the XArray to store bitmaps in
334 * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
335 * have been set.
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500336 *
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400337 * I considered telling the XArray that each slot is an order-10 node
338 * and indexing by bit number, but the XArray can't allow a single multi-index
339 * entry in the head, which would significantly increase memory consumption
340 * for the IDA. So instead we divide the index by the number of bits in the
341 * leaf bitmap before doing a radix tree lookup.
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500342 *
343 * As an optimisation, if there are only a few low bits set in any given
Matthew Wilcox3159f942017-11-03 13:30:42 -0400344 * leaf, instead of allocating a 128-byte bitmap, we store the bits
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400345 * as a value entry. Value entries never have the XA_FREE_MARK cleared
346 * because we can always convert them into a bitmap entry.
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500347 *
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400348 * It would be possible to optimise further; once we've run out of a
349 * single 128-byte bitmap, we currently switch to a 576-byte node, put
350 * the 128-byte bitmap in the first entry and then start allocating extra
351 * 128-byte entries. We could instead use the 512 bytes of the node's
352 * data as a bitmap before moving to that scheme. I do not believe this
353 * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
354 * users of the IDA and almost none of them use more than 1024 entries.
355 * Those that do use more than the 8192 IDs that the 512 bytes would
356 * provide.
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500357 *
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400358 * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
Matthew Wilcoxd37cacc2016-12-17 08:18:17 -0500359 * equivalent, it will still need locking. Going to RCU lookup would require
360 * using RCU to free bitmaps, and that's not trivial without embedding an
361 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
362 * bitmap, which is excessive.
363 */
364
Tejun Heo72dba582007-06-14 03:45:13 +0900365/**
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400366 * ida_alloc_range() - Allocate an unused ID.
367 * @ida: IDA handle.
368 * @min: Lowest ID to allocate.
369 * @max: Highest ID to allocate.
370 * @gfp: Memory allocation flags.
Rusty Russell88eca022011-08-03 16:21:06 -0700371 *
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400372 * Allocate an ID between @min and @max, inclusive. The allocated ID will
373 * not exceed %INT_MAX, even if @max is larger.
Rusty Russell88eca022011-08-03 16:21:06 -0700374 *
Stephen Boyd3b674262020-10-15 20:11:17 -0700375 * Context: Any context. It is safe to call this function without
376 * locking in your code.
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400377 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
378 * or %-ENOSPC if there are no free IDs.
Rusty Russell88eca022011-08-03 16:21:06 -0700379 */
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400380int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
381 gfp_t gfp)
Rusty Russell88eca022011-08-03 16:21:06 -0700382{
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400383 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
384 unsigned bit = min % IDA_BITMAP_BITS;
Tejun Heo46cbc1d2011-11-02 13:38:46 -0700385 unsigned long flags;
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400386 struct ida_bitmap *bitmap, *alloc = NULL;
Rusty Russell88eca022011-08-03 16:21:06 -0700387
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400388 if ((int)min < 0)
389 return -ENOSPC;
Rusty Russell88eca022011-08-03 16:21:06 -0700390
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400391 if ((int)max < 0)
392 max = INT_MAX;
Rusty Russell88eca022011-08-03 16:21:06 -0700393
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400394retry:
395 xas_lock_irqsave(&xas, flags);
396next:
397 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
398 if (xas.xa_index > min / IDA_BITMAP_BITS)
399 bit = 0;
400 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
401 goto nospc;
Rusty Russell88eca022011-08-03 16:21:06 -0700402
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400403 if (xa_is_value(bitmap)) {
404 unsigned long tmp = xa_to_value(bitmap);
405
406 if (bit < BITS_PER_XA_VALUE) {
407 bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
408 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
409 goto nospc;
410 if (bit < BITS_PER_XA_VALUE) {
411 tmp |= 1UL << bit;
412 xas_store(&xas, xa_mk_value(tmp));
413 goto out;
414 }
415 }
416 bitmap = alloc;
417 if (!bitmap)
418 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
419 if (!bitmap)
420 goto alloc;
421 bitmap->bitmap[0] = tmp;
422 xas_store(&xas, bitmap);
423 if (xas_error(&xas)) {
424 bitmap->bitmap[0] = 0;
425 goto out;
426 }
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400427 }
Rusty Russell88eca022011-08-03 16:21:06 -0700428
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400429 if (bitmap) {
430 bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
431 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
432 goto nospc;
433 if (bit == IDA_BITMAP_BITS)
434 goto next;
435
436 __set_bit(bit, bitmap->bitmap);
437 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
438 xas_clear_mark(&xas, XA_FREE_MARK);
439 } else {
440 if (bit < BITS_PER_XA_VALUE) {
441 bitmap = xa_mk_value(1UL << bit);
442 } else {
443 bitmap = alloc;
444 if (!bitmap)
445 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
446 if (!bitmap)
447 goto alloc;
448 __set_bit(bit, bitmap->bitmap);
449 }
450 xas_store(&xas, bitmap);
451 }
452out:
453 xas_unlock_irqrestore(&xas, flags);
454 if (xas_nomem(&xas, gfp)) {
455 xas.xa_index = min / IDA_BITMAP_BITS;
456 bit = min % IDA_BITMAP_BITS;
457 goto retry;
458 }
459 if (bitmap != alloc)
460 kfree(alloc);
461 if (xas_error(&xas))
462 return xas_error(&xas);
463 return xas.xa_index * IDA_BITMAP_BITS + bit;
464alloc:
465 xas_unlock_irqrestore(&xas, flags);
466 alloc = kzalloc(sizeof(*bitmap), gfp);
467 if (!alloc)
468 return -ENOMEM;
469 xas_set(&xas, min / IDA_BITMAP_BITS);
470 bit = min % IDA_BITMAP_BITS;
471 goto retry;
472nospc:
473 xas_unlock_irqrestore(&xas, flags);
Matthew Wilcox (Oracle)a219b852020-04-02 14:26:13 -0400474 kfree(alloc);
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400475 return -ENOSPC;
Rusty Russell88eca022011-08-03 16:21:06 -0700476}
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400477EXPORT_SYMBOL(ida_alloc_range);
Rusty Russell88eca022011-08-03 16:21:06 -0700478
479/**
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400480 * ida_free() - Release an allocated ID.
481 * @ida: IDA handle.
482 * @id: Previously allocated ID.
Daniel Vettera2ef9472016-12-12 16:46:20 -0800483 *
Stephen Boyd3b674262020-10-15 20:11:17 -0700484 * Context: Any context. It is safe to call this function without
485 * locking in your code.
Rusty Russell88eca022011-08-03 16:21:06 -0700486 */
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400487void ida_free(struct ida *ida, unsigned int id)
Rusty Russell88eca022011-08-03 16:21:06 -0700488{
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400489 XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
490 unsigned bit = id % IDA_BITMAP_BITS;
491 struct ida_bitmap *bitmap;
Tejun Heo46cbc1d2011-11-02 13:38:46 -0700492 unsigned long flags;
493
Rusty Russell88eca022011-08-03 16:21:06 -0700494 BUG_ON((int)id < 0);
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400495
496 xas_lock_irqsave(&xas, flags);
497 bitmap = xas_load(&xas);
498
499 if (xa_is_value(bitmap)) {
500 unsigned long v = xa_to_value(bitmap);
501 if (bit >= BITS_PER_XA_VALUE)
502 goto err;
503 if (!(v & (1UL << bit)))
504 goto err;
505 v &= ~(1UL << bit);
506 if (!v)
507 goto delete;
508 xas_store(&xas, xa_mk_value(v));
509 } else {
510 if (!test_bit(bit, bitmap->bitmap))
511 goto err;
512 __clear_bit(bit, bitmap->bitmap);
513 xas_set_mark(&xas, XA_FREE_MARK);
514 if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
515 kfree(bitmap);
516delete:
517 xas_store(&xas, NULL);
518 }
519 }
520 xas_unlock_irqrestore(&xas, flags);
521 return;
522 err:
523 xas_unlock_irqrestore(&xas, flags);
524 WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
Rusty Russell88eca022011-08-03 16:21:06 -0700525}
Matthew Wilcox5ade60d2018-03-20 17:07:11 -0400526EXPORT_SYMBOL(ida_free);
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400527
528/**
529 * ida_destroy() - Free all IDs.
530 * @ida: IDA handle.
531 *
532 * Calling this function frees all IDs and releases all resources used
533 * by an IDA. When this call returns, the IDA is empty and can be reused
534 * or freed. If the IDA is already empty, there is no need to call this
535 * function.
536 *
Stephen Boyd3b674262020-10-15 20:11:17 -0700537 * Context: Any context. It is safe to call this function without
538 * locking in your code.
Matthew Wilcoxf32f0042018-07-04 15:42:46 -0400539 */
540void ida_destroy(struct ida *ida)
541{
542 XA_STATE(xas, &ida->xa, 0);
543 struct ida_bitmap *bitmap;
544 unsigned long flags;
545
546 xas_lock_irqsave(&xas, flags);
547 xas_for_each(&xas, bitmap, ULONG_MAX) {
548 if (!xa_is_value(bitmap))
549 kfree(bitmap);
550 xas_store(&xas, NULL);
551 }
552 xas_unlock_irqrestore(&xas, flags);
553}
554EXPORT_SYMBOL(ida_destroy);
555
556#ifndef __KERNEL__
557extern void xa_dump_index(unsigned long index, unsigned int shift);
558#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
559
560static void ida_dump_entry(void *entry, unsigned long index)
561{
562 unsigned long i;
563
564 if (!entry)
565 return;
566
567 if (xa_is_node(entry)) {
568 struct xa_node *node = xa_to_node(entry);
569 unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
570 XA_CHUNK_SHIFT;
571
572 xa_dump_index(index * IDA_BITMAP_BITS, shift);
573 xa_dump_node(node);
574 for (i = 0; i < XA_CHUNK_SIZE; i++)
575 ida_dump_entry(node->slots[i],
576 index | (i << node->shift));
577 } else if (xa_is_value(entry)) {
578 xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
579 pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
580 } else {
581 struct ida_bitmap *bitmap = entry;
582
583 xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
584 pr_cont("bitmap: %p data", bitmap);
585 for (i = 0; i < IDA_BITMAP_LONGS; i++)
586 pr_cont(" %lx", bitmap->bitmap[i]);
587 pr_cont("\n");
588 }
589}
590
591static void ida_dump(struct ida *ida)
592{
593 struct xarray *xa = &ida->xa;
594 pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
595 xa->xa_flags >> ROOT_TAG_SHIFT);
596 ida_dump_entry(xa->xa_head, 0);
597}
598#endif