Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1 | #include <linux/bitmap.h> |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 2 | #include <linux/bug.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 3 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/idr.h> |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 5 | #include <linux/slab.h> |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 6 | #include <linux/spinlock.h> |
Matthew Wilcox | b94078e | 2018-06-07 17:10:45 -0700 | [diff] [blame] | 7 | #include <linux/xarray.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 9 | DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Matthew Wilcox | e096f6a | 2017-11-28 10:14:27 -0500 | [diff] [blame] | 11 | /** |
| 12 | * idr_alloc_u32() - Allocate an ID. |
| 13 | * @idr: IDR handle. |
| 14 | * @ptr: Pointer to be associated with the new ID. |
| 15 | * @nextid: Pointer to an ID. |
| 16 | * @max: The maximum ID to allocate (inclusive). |
| 17 | * @gfp: Memory allocation flags. |
| 18 | * |
| 19 | * Allocates an unused ID in the range specified by @nextid and @max. |
| 20 | * Note that @max is inclusive whereas the @end parameter to idr_alloc() |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 21 | * is exclusive. The new ID is assigned to @nextid before the pointer |
| 22 | * is inserted into the IDR, so if @nextid points into the object pointed |
| 23 | * to by @ptr, a concurrent lookup will not find an uninitialised ID. |
Matthew Wilcox | e096f6a | 2017-11-28 10:14:27 -0500 | [diff] [blame] | 24 | * |
| 25 | * The caller should provide their own locking to ensure that two |
| 26 | * concurrent modifications to the IDR are not possible. Read-only |
| 27 | * accesses to the IDR may be done under the RCU read lock or may |
| 28 | * exclude simultaneous writers. |
| 29 | * |
| 30 | * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed, |
| 31 | * or -ENOSPC if no free IDs could be found. If an error occurred, |
| 32 | * @nextid is unchanged. |
| 33 | */ |
| 34 | int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, |
| 35 | unsigned long max, gfp_t gfp) |
| 36 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 37 | struct radix_tree_iter iter; |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 38 | void __rcu **slot; |
Matthew Wilcox | 4b0ad07 | 2018-02-26 14:39:30 -0500 | [diff] [blame] | 39 | unsigned int base = idr->idr_base; |
| 40 | unsigned int id = *nextid; |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 41 | |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame^] | 42 | if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR))) |
| 43 | idr->idr_rt.xa_flags |= IDR_RT_MARKER; |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 44 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 45 | id = (id < base) ? 0 : id - base; |
| 46 | radix_tree_iter_init(&iter, id); |
| 47 | slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 48 | if (IS_ERR(slot)) |
| 49 | return PTR_ERR(slot); |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 50 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 51 | *nextid = iter.index + base; |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 52 | /* there is a memory barrier inside radix_tree_iter_replace() */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 53 | radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); |
| 54 | radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 55 | |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 56 | return 0; |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 57 | } |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 58 | EXPORT_SYMBOL_GPL(idr_alloc_u32); |
Tejun Heo | d5c7409 | 2013-02-27 17:03:55 -0800 | [diff] [blame] | 59 | |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 60 | /** |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 61 | * idr_alloc() - Allocate an ID. |
| 62 | * @idr: IDR handle. |
| 63 | * @ptr: Pointer to be associated with the new ID. |
| 64 | * @start: The minimum ID (inclusive). |
| 65 | * @end: The maximum ID (exclusive). |
| 66 | * @gfp: Memory allocation flags. |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 67 | * |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 68 | * Allocates an unused ID in the range specified by @start and @end. If |
| 69 | * @end is <= 0, it is treated as one larger than %INT_MAX. This allows |
| 70 | * callers to use @start + N as @end as long as N is within integer range. |
| 71 | * |
| 72 | * The caller should provide their own locking to ensure that two |
| 73 | * concurrent modifications to the IDR are not possible. Read-only |
| 74 | * accesses to the IDR may be done under the RCU read lock or may |
| 75 | * exclude simultaneous writers. |
| 76 | * |
| 77 | * Return: The newly allocated ID, -ENOMEM if memory allocation failed, |
| 78 | * or -ENOSPC if no free IDs could be found. |
| 79 | */ |
| 80 | int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) |
| 81 | { |
| 82 | u32 id = start; |
| 83 | int ret; |
| 84 | |
| 85 | if (WARN_ON_ONCE(start < 0)) |
| 86 | return -EINVAL; |
| 87 | |
| 88 | ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp); |
| 89 | if (ret) |
| 90 | return ret; |
| 91 | |
| 92 | return id; |
| 93 | } |
| 94 | EXPORT_SYMBOL_GPL(idr_alloc); |
| 95 | |
| 96 | /** |
| 97 | * idr_alloc_cyclic() - Allocate an ID cyclically. |
| 98 | * @idr: IDR handle. |
| 99 | * @ptr: Pointer to be associated with the new ID. |
| 100 | * @start: The minimum ID (inclusive). |
| 101 | * @end: The maximum ID (exclusive). |
| 102 | * @gfp: Memory allocation flags. |
| 103 | * |
| 104 | * Allocates an unused ID in the range specified by @nextid and @end. If |
| 105 | * @end is <= 0, it is treated as one larger than %INT_MAX. This allows |
| 106 | * callers to use @start + N as @end as long as N is within integer range. |
| 107 | * The search for an unused ID will start at the last ID allocated and will |
| 108 | * wrap around to @start if no free IDs are found before reaching @end. |
| 109 | * |
| 110 | * The caller should provide their own locking to ensure that two |
| 111 | * concurrent modifications to the IDR are not possible. Read-only |
| 112 | * accesses to the IDR may be done under the RCU read lock or may |
| 113 | * exclude simultaneous writers. |
| 114 | * |
| 115 | * Return: The newly allocated ID, -ENOMEM if memory allocation failed, |
| 116 | * or -ENOSPC if no free IDs could be found. |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 117 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 118 | int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 119 | { |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 120 | u32 id = idr->idr_next; |
| 121 | int err, max = end > 0 ? end - 1 : INT_MAX; |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 122 | |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 123 | if ((int)id < start) |
| 124 | id = start; |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 125 | |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 126 | err = idr_alloc_u32(idr, ptr, &id, max, gfp); |
| 127 | if ((err == -ENOSPC) && (id > start)) { |
| 128 | id = start; |
| 129 | err = idr_alloc_u32(idr, ptr, &id, max, gfp); |
| 130 | } |
| 131 | if (err) |
| 132 | return err; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 133 | |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 134 | idr->idr_next = id + 1; |
Jeff Layton | 3e6628c4 | 2013-04-29 16:21:16 -0700 | [diff] [blame] | 135 | return id; |
| 136 | } |
| 137 | EXPORT_SYMBOL(idr_alloc_cyclic); |
| 138 | |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 139 | /** |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 140 | * idr_remove() - Remove an ID from the IDR. |
| 141 | * @idr: IDR handle. |
| 142 | * @id: Pointer ID. |
| 143 | * |
| 144 | * Removes this ID from the IDR. If the ID was not previously in the IDR, |
| 145 | * this function returns %NULL. |
| 146 | * |
| 147 | * Since this function modifies the IDR, the caller should provide their |
| 148 | * own locking to ensure that concurrent modification of the same IDR is |
| 149 | * not possible. |
| 150 | * |
| 151 | * Return: The pointer formerly associated with this ID. |
| 152 | */ |
| 153 | void *idr_remove(struct idr *idr, unsigned long id) |
| 154 | { |
| 155 | return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL); |
| 156 | } |
| 157 | EXPORT_SYMBOL_GPL(idr_remove); |
| 158 | |
| 159 | /** |
| 160 | * idr_find() - Return pointer for given ID. |
| 161 | * @idr: IDR handle. |
| 162 | * @id: Pointer ID. |
| 163 | * |
| 164 | * Looks up the pointer associated with this ID. A %NULL pointer may |
| 165 | * indicate that @id is not allocated or that the %NULL pointer was |
| 166 | * associated with this ID. |
| 167 | * |
| 168 | * This function can be called under rcu_read_lock(), given that the leaf |
| 169 | * pointers lifetimes are correctly managed. |
| 170 | * |
| 171 | * Return: The pointer associated with this ID. |
| 172 | */ |
| 173 | void *idr_find(const struct idr *idr, unsigned long id) |
| 174 | { |
| 175 | return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base); |
| 176 | } |
| 177 | EXPORT_SYMBOL_GPL(idr_find); |
| 178 | |
| 179 | /** |
Matthew Wilcox | 7a45757 | 2017-11-28 15:39:51 -0500 | [diff] [blame] | 180 | * idr_for_each() - Iterate through all stored pointers. |
| 181 | * @idr: IDR handle. |
| 182 | * @fn: Function to be called for each pointer. |
| 183 | * @data: Data passed to callback function. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 184 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 185 | * The callback function will be called for each entry in @idr, passing |
Matthew Wilcox | 7a45757 | 2017-11-28 15:39:51 -0500 | [diff] [blame] | 186 | * the ID, the entry and @data. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 187 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 188 | * If @fn returns anything other than %0, the iteration stops and that |
| 189 | * value is returned from this function. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 190 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 191 | * idr_for_each() can be called concurrently with idr_alloc() and |
| 192 | * idr_remove() if protected by RCU. Newly added entries may not be |
| 193 | * seen and deleted entries may be seen, but adding and removing entries |
| 194 | * will not cause other entries to be skipped, nor spurious ones to be seen. |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 195 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 196 | int idr_for_each(const struct idr *idr, |
| 197 | int (*fn)(int id, void *p, void *data), void *data) |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 198 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 199 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 200 | void __rcu **slot; |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 201 | int base = idr->idr_base; |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 202 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 203 | radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { |
Matthew Wilcox | 72fd6c7 | 2017-11-28 15:50:12 -0500 | [diff] [blame] | 204 | int ret; |
Matthew Wilcox | 4b0ad07 | 2018-02-26 14:39:30 -0500 | [diff] [blame] | 205 | unsigned long id = iter.index + base; |
Matthew Wilcox | 72fd6c7 | 2017-11-28 15:50:12 -0500 | [diff] [blame] | 206 | |
Matthew Wilcox | 4b0ad07 | 2018-02-26 14:39:30 -0500 | [diff] [blame] | 207 | if (WARN_ON_ONCE(id > INT_MAX)) |
Matthew Wilcox | 72fd6c7 | 2017-11-28 15:50:12 -0500 | [diff] [blame] | 208 | break; |
Matthew Wilcox | 4b0ad07 | 2018-02-26 14:39:30 -0500 | [diff] [blame] | 209 | ret = fn(id, rcu_dereference_raw(*slot), data); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 210 | if (ret) |
| 211 | return ret; |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 212 | } |
| 213 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 214 | return 0; |
Kristian Hoegsberg | 96d7fa4 | 2007-07-15 23:37:24 -0700 | [diff] [blame] | 215 | } |
| 216 | EXPORT_SYMBOL(idr_for_each); |
| 217 | |
| 218 | /** |
Matthew Wilcox | 7a45757 | 2017-11-28 15:39:51 -0500 | [diff] [blame] | 219 | * idr_get_next() - Find next populated entry. |
| 220 | * @idr: IDR handle. |
| 221 | * @nextid: Pointer to an ID. |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 222 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 223 | * Returns the next populated entry in the tree with an ID greater than |
| 224 | * or equal to the value pointed to by @nextid. On exit, @nextid is updated |
| 225 | * to the ID of the found value. To use in a loop, the value pointed to by |
| 226 | * nextid must be incremented by the user. |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 227 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 228 | void *idr_get_next(struct idr *idr, int *nextid) |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 229 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 230 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 231 | void __rcu **slot; |
Matthew Wilcox | 4b0ad07 | 2018-02-26 14:39:30 -0500 | [diff] [blame] | 232 | unsigned long base = idr->idr_base; |
| 233 | unsigned long id = *nextid; |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 234 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 235 | id = (id < base) ? 0 : id - base; |
| 236 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 237 | if (!slot) |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 238 | return NULL; |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 239 | id = iter.index + base; |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 240 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 241 | if (WARN_ON_ONCE(id > INT_MAX)) |
Matthew Wilcox | 72fd6c7 | 2017-11-28 15:50:12 -0500 | [diff] [blame] | 242 | return NULL; |
| 243 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 244 | *nextid = id; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 245 | return rcu_dereference_raw(*slot); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 246 | } |
Ben Hutchings | 4d1ee80 | 2010-01-29 20:59:17 +0000 | [diff] [blame] | 247 | EXPORT_SYMBOL(idr_get_next); |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 248 | |
Matthew Wilcox | 7a45757 | 2017-11-28 15:39:51 -0500 | [diff] [blame] | 249 | /** |
| 250 | * idr_get_next_ul() - Find next populated entry. |
| 251 | * @idr: IDR handle. |
| 252 | * @nextid: Pointer to an ID. |
| 253 | * |
| 254 | * Returns the next populated entry in the tree with an ID greater than |
| 255 | * or equal to the value pointed to by @nextid. On exit, @nextid is updated |
| 256 | * to the ID of the found value. To use in a loop, the value pointed to by |
| 257 | * nextid must be incremented by the user. |
| 258 | */ |
| 259 | void *idr_get_next_ul(struct idr *idr, unsigned long *nextid) |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 260 | { |
| 261 | struct radix_tree_iter iter; |
| 262 | void __rcu **slot; |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 263 | unsigned long base = idr->idr_base; |
| 264 | unsigned long id = *nextid; |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 265 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 266 | id = (id < base) ? 0 : id - base; |
| 267 | slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 268 | if (!slot) |
| 269 | return NULL; |
| 270 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 271 | *nextid = iter.index + base; |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 272 | return rcu_dereference_raw(*slot); |
| 273 | } |
Matthew Wilcox | 7a45757 | 2017-11-28 15:39:51 -0500 | [diff] [blame] | 274 | EXPORT_SYMBOL(idr_get_next_ul); |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 275 | |
KAMEZAWA Hiroyuki | 38460b4 | 2009-04-02 16:57:25 -0700 | [diff] [blame] | 276 | /** |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 277 | * idr_replace() - replace pointer for given ID. |
| 278 | * @idr: IDR handle. |
| 279 | * @ptr: New pointer to associate with the ID. |
| 280 | * @id: ID to change. |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 281 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 282 | * Replace the pointer registered with an ID and return the old value. |
| 283 | * This function can be called under the RCU read lock concurrently with |
| 284 | * idr_alloc() and idr_remove() (as long as the ID being removed is not |
| 285 | * the one being replaced!). |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 286 | * |
Eric Biggers | a70e43a | 2017-10-03 16:16:13 -0700 | [diff] [blame] | 287 | * Returns: the old value on success. %-ENOENT indicates that @id was not |
Matthew Wilcox | 234a462 | 2017-11-28 09:56:36 -0500 | [diff] [blame] | 288 | * found. %-EINVAL indicates that @ptr was not valid. |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 289 | */ |
Matthew Wilcox | 234a462 | 2017-11-28 09:56:36 -0500 | [diff] [blame] | 290 | void *idr_replace(struct idr *idr, void *ptr, unsigned long id) |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 291 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 292 | struct radix_tree_node *node; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 293 | void __rcu **slot = NULL; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 294 | void *entry; |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 295 | |
Matthew Wilcox | 6ce711f | 2017-11-30 13:45:11 -0500 | [diff] [blame] | 296 | id -= idr->idr_base; |
Tejun Heo | e8c8d1b | 2013-02-27 17:05:04 -0800 | [diff] [blame] | 297 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 298 | entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); |
| 299 | if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) |
Lai Jiangshan | b93804b | 2014-06-06 14:37:13 -0700 | [diff] [blame] | 300 | return ERR_PTR(-ENOENT); |
Manfred Spraul | 6ff2d39 | 2008-12-01 13:14:02 -0800 | [diff] [blame] | 301 | |
Mel Gorman | c7df8ad | 2017-11-15 17:37:41 -0800 | [diff] [blame] | 302 | __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL); |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 303 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 304 | return entry; |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 305 | } |
Matthew Wilcox | 234a462 | 2017-11-28 09:56:36 -0500 | [diff] [blame] | 306 | EXPORT_SYMBOL(idr_replace); |
Jeff Mahoney | 5806f07 | 2006-06-26 00:27:19 -0700 | [diff] [blame] | 307 | |
Randy Dunlap | 56083ab | 2010-10-26 14:19:08 -0700 | [diff] [blame] | 308 | /** |
| 309 | * DOC: IDA description |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 310 | * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 311 | * The IDA is an ID allocator which does not provide the ability to |
| 312 | * associate an ID with a pointer. As such, it only needs to store one |
| 313 | * bit per ID, and so is more space efficient than an IDR. To use an IDA, |
| 314 | * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, |
| 315 | * then initialise it using ida_init()). To allocate a new ID, call |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 316 | * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range(). |
| 317 | * To free an ID, call ida_free(). |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 318 | * |
Matthew Wilcox | b03f8e4 | 2018-06-18 19:02:48 -0400 | [diff] [blame] | 319 | * ida_destroy() can be used to dispose of an IDA without needing to |
| 320 | * free the individual IDs in it. You can use ida_is_empty() to find |
| 321 | * out whether the IDA has any IDs currently allocated. |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 322 | * |
| 323 | * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward |
| 324 | * limitation, it should be quite straightforward to raise the maximum. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 325 | */ |
| 326 | |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 327 | /* |
| 328 | * Developer's notes: |
| 329 | * |
| 330 | * The IDA uses the functionality provided by the IDR & radix tree to store |
| 331 | * bitmaps in each entry. The IDR_FREE tag means there is at least one bit |
| 332 | * free, unlike the IDR where it means at least one entry is free. |
| 333 | * |
| 334 | * I considered telling the radix tree that each slot is an order-10 node |
| 335 | * and storing the bit numbers in the radix tree, but the radix tree can't |
| 336 | * allow a single multiorder entry at index 0, which would significantly |
| 337 | * increase memory consumption for the IDA. So instead we divide the index |
| 338 | * by the number of bits in the leaf bitmap before doing a radix tree lookup. |
| 339 | * |
| 340 | * As an optimisation, if there are only a few low bits set in any given |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 341 | * leaf, instead of allocating a 128-byte bitmap, we store the bits |
| 342 | * directly in the entry. |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 343 | * |
| 344 | * We allow the radix tree 'exceptional' count to get out of date. Nothing |
| 345 | * in the IDA nor the radix tree code checks it. If it becomes important |
| 346 | * to maintain an accurate exceptional count, switch the rcu_assign_pointer() |
| 347 | * calls to radix_tree_iter_replace() which will correct the exceptional |
| 348 | * count. |
| 349 | * |
| 350 | * The IDA always requires a lock to alloc/free. If we add a 'test_bit' |
| 351 | * equivalent, it will still need locking. Going to RCU lookup would require |
| 352 | * using RCU to free bitmaps, and that's not trivial without embedding an |
| 353 | * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte |
| 354 | * bitmap, which is excessive. |
| 355 | */ |
| 356 | |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 357 | #define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 358 | |
Matthew Wilcox | 1df8951 | 2018-06-18 19:11:56 -0400 | [diff] [blame] | 359 | static int ida_get_new_above(struct ida *ida, int start) |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 360 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 361 | struct radix_tree_root *root = &ida->ida_rt; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 362 | void __rcu **slot; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 363 | struct radix_tree_iter iter; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 364 | struct ida_bitmap *bitmap; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 365 | unsigned long index; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 366 | unsigned bit; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 367 | int new; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 368 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 369 | index = start / IDA_BITMAP_BITS; |
| 370 | bit = start % IDA_BITMAP_BITS; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 371 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 372 | slot = radix_tree_iter_init(&iter, index); |
| 373 | for (;;) { |
| 374 | if (slot) |
| 375 | slot = radix_tree_next_slot(slot, &iter, |
| 376 | RADIX_TREE_ITER_TAGGED); |
| 377 | if (!slot) { |
| 378 | slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); |
| 379 | if (IS_ERR(slot)) { |
| 380 | if (slot == ERR_PTR(-ENOMEM)) |
| 381 | return -EAGAIN; |
| 382 | return PTR_ERR(slot); |
| 383 | } |
| 384 | } |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 385 | if (iter.index > index) |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 386 | bit = 0; |
| 387 | new = iter.index * IDA_BITMAP_BITS; |
| 388 | bitmap = rcu_dereference_raw(*slot); |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 389 | if (xa_is_value(bitmap)) { |
| 390 | unsigned long tmp = xa_to_value(bitmap); |
| 391 | int vbit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, |
| 392 | bit); |
| 393 | if (vbit < BITS_PER_XA_VALUE) { |
| 394 | tmp |= 1UL << vbit; |
| 395 | rcu_assign_pointer(*slot, xa_mk_value(tmp)); |
| 396 | return new + vbit; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 397 | } |
| 398 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
| 399 | if (!bitmap) |
| 400 | return -EAGAIN; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 401 | bitmap->bitmap[0] = tmp; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 402 | rcu_assign_pointer(*slot, bitmap); |
| 403 | } |
| 404 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 405 | if (bitmap) { |
| 406 | bit = find_next_zero_bit(bitmap->bitmap, |
| 407 | IDA_BITMAP_BITS, bit); |
| 408 | new += bit; |
| 409 | if (new < 0) |
| 410 | return -ENOSPC; |
| 411 | if (bit == IDA_BITMAP_BITS) |
| 412 | continue; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 413 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 414 | __set_bit(bit, bitmap->bitmap); |
| 415 | if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) |
| 416 | radix_tree_iter_tag_clear(root, &iter, |
| 417 | IDR_FREE); |
| 418 | } else { |
| 419 | new += bit; |
| 420 | if (new < 0) |
| 421 | return -ENOSPC; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 422 | if (bit < BITS_PER_XA_VALUE) { |
| 423 | bitmap = xa_mk_value(1UL << bit); |
| 424 | } else { |
| 425 | bitmap = this_cpu_xchg(ida_bitmap, NULL); |
| 426 | if (!bitmap) |
| 427 | return -EAGAIN; |
| 428 | __set_bit(bit, bitmap->bitmap); |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 429 | } |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 430 | radix_tree_iter_replace(root, &iter, slot, bitmap); |
| 431 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 432 | |
Matthew Wilcox | 1df8951 | 2018-06-18 19:11:56 -0400 | [diff] [blame] | 433 | return new; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 434 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 435 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 436 | |
Matthew Wilcox | b03f8e4 | 2018-06-18 19:02:48 -0400 | [diff] [blame] | 437 | static void ida_remove(struct ida *ida, int id) |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 438 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 439 | unsigned long index = id / IDA_BITMAP_BITS; |
| 440 | unsigned offset = id % IDA_BITMAP_BITS; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 441 | struct ida_bitmap *bitmap; |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 442 | unsigned long *btmp; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 443 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 444 | void __rcu **slot; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 445 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 446 | slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); |
| 447 | if (!slot) |
Lai Jiangshan | 8f9f665 | 2014-06-06 14:37:11 -0700 | [diff] [blame] | 448 | goto err; |
| 449 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 450 | bitmap = rcu_dereference_raw(*slot); |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 451 | if (xa_is_value(bitmap)) { |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 452 | btmp = (unsigned long *)slot; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 453 | offset += 1; /* Intimate knowledge of the value encoding */ |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 454 | if (offset >= BITS_PER_LONG) |
| 455 | goto err; |
| 456 | } else { |
| 457 | btmp = bitmap->bitmap; |
| 458 | } |
| 459 | if (!test_bit(offset, btmp)) |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 460 | goto err; |
| 461 | |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 462 | __clear_bit(offset, btmp); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 463 | radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 464 | if (xa_is_value(bitmap)) { |
| 465 | if (xa_to_value(rcu_dereference_raw(*slot)) == 0) |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 466 | radix_tree_iter_delete(&ida->ida_rt, &iter, slot); |
| 467 | } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 468 | kfree(bitmap); |
| 469 | radix_tree_iter_delete(&ida->ida_rt, &iter, slot); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 470 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 471 | return; |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 472 | err: |
Matthew Wilcox | b03f8e4 | 2018-06-18 19:02:48 -0400 | [diff] [blame] | 473 | WARN(1, "ida_free called for id=%d which is not allocated.\n", id); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 474 | } |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 475 | |
| 476 | /** |
Matthew Wilcox | 50d97d5 | 2018-06-21 15:36:45 -0400 | [diff] [blame] | 477 | * ida_destroy() - Free all IDs. |
| 478 | * @ida: IDA handle. |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 479 | * |
Matthew Wilcox | 50d97d5 | 2018-06-21 15:36:45 -0400 | [diff] [blame] | 480 | * Calling this function frees all IDs and releases all resources used |
| 481 | * by an IDA. When this call returns, the IDA is empty and can be reused |
| 482 | * or freed. If the IDA is already empty, there is no need to call this |
| 483 | * function. |
| 484 | * |
| 485 | * Context: Any context. |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 486 | */ |
| 487 | void ida_destroy(struct ida *ida) |
| 488 | { |
Matthew Wilcox | 50d97d5 | 2018-06-21 15:36:45 -0400 | [diff] [blame] | 489 | unsigned long flags; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 490 | struct radix_tree_iter iter; |
Matthew Wilcox | 7e73eb0 | 2017-02-13 16:03:55 -0500 | [diff] [blame] | 491 | void __rcu **slot; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 492 | |
Matthew Wilcox | 50d97d5 | 2018-06-21 15:36:45 -0400 | [diff] [blame] | 493 | xa_lock_irqsave(&ida->ida_rt, flags); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 494 | radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { |
| 495 | struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 496 | if (!xa_is_value(bitmap)) |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 497 | kfree(bitmap); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 498 | radix_tree_iter_delete(&ida->ida_rt, &iter, slot); |
| 499 | } |
Matthew Wilcox | 50d97d5 | 2018-06-21 15:36:45 -0400 | [diff] [blame] | 500 | xa_unlock_irqrestore(&ida->ida_rt, flags); |
Tejun Heo | 72dba58 | 2007-06-14 03:45:13 +0900 | [diff] [blame] | 501 | } |
| 502 | EXPORT_SYMBOL(ida_destroy); |
| 503 | |
| 504 | /** |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 505 | * ida_alloc_range() - Allocate an unused ID. |
| 506 | * @ida: IDA handle. |
| 507 | * @min: Lowest ID to allocate. |
| 508 | * @max: Highest ID to allocate. |
| 509 | * @gfp: Memory allocation flags. |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 510 | * |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 511 | * Allocate an ID between @min and @max, inclusive. The allocated ID will |
| 512 | * not exceed %INT_MAX, even if @max is larger. |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 513 | * |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 514 | * Context: Any context. |
| 515 | * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, |
| 516 | * or %-ENOSPC if there are no free IDs. |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 517 | */ |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 518 | int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, |
| 519 | gfp_t gfp) |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 520 | { |
Matthew Wilcox | 1df8951 | 2018-06-18 19:11:56 -0400 | [diff] [blame] | 521 | int id = 0; |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 522 | unsigned long flags; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 523 | |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 524 | if ((int)min < 0) |
| 525 | return -ENOSPC; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 526 | |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 527 | if ((int)max < 0) |
| 528 | max = INT_MAX; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 529 | |
| 530 | again: |
Matthew Wilcox | b94078e | 2018-06-07 17:10:45 -0700 | [diff] [blame] | 531 | xa_lock_irqsave(&ida->ida_rt, flags); |
Matthew Wilcox | 1df8951 | 2018-06-18 19:11:56 -0400 | [diff] [blame] | 532 | id = ida_get_new_above(ida, min); |
| 533 | if (id > (int)max) { |
| 534 | ida_remove(ida, id); |
| 535 | id = -ENOSPC; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 536 | } |
Matthew Wilcox | b94078e | 2018-06-07 17:10:45 -0700 | [diff] [blame] | 537 | xa_unlock_irqrestore(&ida->ida_rt, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 538 | |
Matthew Wilcox | 1df8951 | 2018-06-18 19:11:56 -0400 | [diff] [blame] | 539 | if (unlikely(id == -EAGAIN)) { |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 540 | if (!ida_pre_get(ida, gfp)) |
| 541 | return -ENOMEM; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 542 | goto again; |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 543 | } |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 544 | |
Matthew Wilcox | 1df8951 | 2018-06-18 19:11:56 -0400 | [diff] [blame] | 545 | return id; |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 546 | } |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 547 | EXPORT_SYMBOL(ida_alloc_range); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 548 | |
| 549 | /** |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 550 | * ida_free() - Release an allocated ID. |
| 551 | * @ida: IDA handle. |
| 552 | * @id: Previously allocated ID. |
Daniel Vetter | a2ef947 | 2016-12-12 16:46:20 -0800 | [diff] [blame] | 553 | * |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 554 | * Context: Any context. |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 555 | */ |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 556 | void ida_free(struct ida *ida, unsigned int id) |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 557 | { |
Tejun Heo | 46cbc1d | 2011-11-02 13:38:46 -0700 | [diff] [blame] | 558 | unsigned long flags; |
| 559 | |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 560 | BUG_ON((int)id < 0); |
Matthew Wilcox | b94078e | 2018-06-07 17:10:45 -0700 | [diff] [blame] | 561 | xa_lock_irqsave(&ida->ida_rt, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 562 | ida_remove(ida, id); |
Matthew Wilcox | b94078e | 2018-06-07 17:10:45 -0700 | [diff] [blame] | 563 | xa_unlock_irqrestore(&ida->ida_rt, flags); |
Rusty Russell | 88eca02 | 2011-08-03 16:21:06 -0700 | [diff] [blame] | 564 | } |
Matthew Wilcox | 5ade60d | 2018-03-20 17:07:11 -0400 | [diff] [blame] | 565 | EXPORT_SYMBOL(ida_free); |