Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * XArray implementation |
Matthew Wilcox (Oracle) | c44aa5e | 2020-01-17 22:13:21 -0500 | [diff] [blame] | 4 | * Copyright (c) 2017-2018 Microsoft Corporation |
| 5 | * Copyright (c) 2018-2020 Oracle |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 6 | * Author: Matthew Wilcox <willy@infradead.org> |
| 7 | */ |
| 8 | |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 9 | #include <linux/bitmap.h> |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 10 | #include <linux/export.h> |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 11 | #include <linux/list.h> |
| 12 | #include <linux/slab.h> |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 13 | #include <linux/xarray.h> |
| 14 | |
| 15 | /* |
| 16 | * Coding conventions in this file: |
| 17 | * |
| 18 | * @xa is used to refer to the entire xarray. |
| 19 | * @xas is the 'xarray operation state'. It may be either a pointer to |
| 20 | * an xa_state, or an xa_state stored on the stack. This is an unfortunate |
| 21 | * ambiguity. |
| 22 | * @index is the index of the entry being operated on |
| 23 | * @mark is an xa_mark_t; a small number indicating one of the mark bits. |
| 24 | * @node refers to an xa_node; usually the primary one being operated on by |
| 25 | * this function. |
| 26 | * @offset is the index into the slots array inside an xa_node. |
| 27 | * @parent refers to the @xa_node closer to the head than @node. |
| 28 | * @entry refers to something stored in a slot in the xarray |
| 29 | */ |
| 30 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 31 | static inline unsigned int xa_lock_type(const struct xarray *xa) |
| 32 | { |
| 33 | return (__force unsigned int)xa->xa_flags & 3; |
| 34 | } |
| 35 | |
| 36 | static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) |
| 37 | { |
| 38 | if (lock_type == XA_LOCK_IRQ) |
| 39 | xas_lock_irq(xas); |
| 40 | else if (lock_type == XA_LOCK_BH) |
| 41 | xas_lock_bh(xas); |
| 42 | else |
| 43 | xas_lock(xas); |
| 44 | } |
| 45 | |
| 46 | static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) |
| 47 | { |
| 48 | if (lock_type == XA_LOCK_IRQ) |
| 49 | xas_unlock_irq(xas); |
| 50 | else if (lock_type == XA_LOCK_BH) |
| 51 | xas_unlock_bh(xas); |
| 52 | else |
| 53 | xas_unlock(xas); |
| 54 | } |
| 55 | |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 56 | static inline bool xa_track_free(const struct xarray *xa) |
| 57 | { |
| 58 | return xa->xa_flags & XA_FLAGS_TRACK_FREE; |
| 59 | } |
| 60 | |
Matthew Wilcox | 3ccaf57 | 2018-10-26 14:43:22 -0400 | [diff] [blame] | 61 | static inline bool xa_zero_busy(const struct xarray *xa) |
| 62 | { |
| 63 | return xa->xa_flags & XA_FLAGS_ZERO_BUSY; |
| 64 | } |
| 65 | |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 66 | static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) |
| 67 | { |
| 68 | if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) |
| 69 | xa->xa_flags |= XA_FLAGS_MARK(mark); |
| 70 | } |
| 71 | |
| 72 | static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) |
| 73 | { |
| 74 | if (xa->xa_flags & XA_FLAGS_MARK(mark)) |
| 75 | xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); |
| 76 | } |
| 77 | |
| 78 | static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) |
| 79 | { |
| 80 | return node->marks[(__force unsigned)mark]; |
| 81 | } |
| 82 | |
| 83 | static inline bool node_get_mark(struct xa_node *node, |
| 84 | unsigned int offset, xa_mark_t mark) |
| 85 | { |
| 86 | return test_bit(offset, node_marks(node, mark)); |
| 87 | } |
| 88 | |
| 89 | /* returns true if the bit was set */ |
| 90 | static inline bool node_set_mark(struct xa_node *node, unsigned int offset, |
| 91 | xa_mark_t mark) |
| 92 | { |
| 93 | return __test_and_set_bit(offset, node_marks(node, mark)); |
| 94 | } |
| 95 | |
| 96 | /* returns true if the bit was set */ |
| 97 | static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, |
| 98 | xa_mark_t mark) |
| 99 | { |
| 100 | return __test_and_clear_bit(offset, node_marks(node, mark)); |
| 101 | } |
| 102 | |
| 103 | static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) |
| 104 | { |
| 105 | return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); |
| 106 | } |
| 107 | |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 108 | static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) |
| 109 | { |
| 110 | bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); |
| 111 | } |
| 112 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 113 | #define mark_inc(mark) do { \ |
| 114 | mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ |
| 115 | } while (0) |
| 116 | |
| 117 | /* |
| 118 | * xas_squash_marks() - Merge all marks to the first entry |
| 119 | * @xas: Array operation state. |
| 120 | * |
| 121 | * Set a mark on the first entry if any entry has it set. Clear marks on |
| 122 | * all sibling entries. |
| 123 | */ |
| 124 | static void xas_squash_marks(const struct xa_state *xas) |
| 125 | { |
| 126 | unsigned int mark = 0; |
| 127 | unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; |
| 128 | |
| 129 | if (!xas->xa_sibs) |
| 130 | return; |
| 131 | |
| 132 | do { |
| 133 | unsigned long *marks = xas->xa_node->marks[mark]; |
| 134 | if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) |
| 135 | continue; |
| 136 | __set_bit(xas->xa_offset, marks); |
| 137 | bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); |
| 138 | } while (mark++ != (__force unsigned)XA_MARK_MAX); |
| 139 | } |
| 140 | |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 141 | /* extracts the offset within this node from the index */ |
| 142 | static unsigned int get_offset(unsigned long index, struct xa_node *node) |
| 143 | { |
| 144 | return (index >> node->shift) & XA_CHUNK_MASK; |
| 145 | } |
| 146 | |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 147 | static void xas_set_offset(struct xa_state *xas) |
| 148 | { |
| 149 | xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); |
| 150 | } |
| 151 | |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 152 | /* move the index either forwards (find) or backwards (sibling slot) */ |
| 153 | static void xas_move_index(struct xa_state *xas, unsigned long offset) |
| 154 | { |
| 155 | unsigned int shift = xas->xa_node->shift; |
| 156 | xas->xa_index &= ~XA_CHUNK_MASK << shift; |
| 157 | xas->xa_index += offset << shift; |
| 158 | } |
| 159 | |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 160 | static void xas_advance(struct xa_state *xas) |
| 161 | { |
| 162 | xas->xa_offset++; |
| 163 | xas_move_index(xas, xas->xa_offset); |
| 164 | } |
| 165 | |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 166 | static void *set_bounds(struct xa_state *xas) |
| 167 | { |
| 168 | xas->xa_node = XAS_BOUNDS; |
| 169 | return NULL; |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * Starts a walk. If the @xas is already valid, we assume that it's on |
| 174 | * the right path and just return where we've got to. If we're in an |
| 175 | * error state, return NULL. If the index is outside the current scope |
| 176 | * of the xarray, return NULL without changing @xas->xa_node. Otherwise |
| 177 | * set @xas->xa_node to NULL and return the current head of the array. |
| 178 | */ |
| 179 | static void *xas_start(struct xa_state *xas) |
| 180 | { |
| 181 | void *entry; |
| 182 | |
| 183 | if (xas_valid(xas)) |
| 184 | return xas_reload(xas); |
| 185 | if (xas_error(xas)) |
| 186 | return NULL; |
| 187 | |
| 188 | entry = xa_head(xas->xa); |
| 189 | if (!xa_is_node(entry)) { |
| 190 | if (xas->xa_index) |
| 191 | return set_bounds(xas); |
| 192 | } else { |
| 193 | if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) |
| 194 | return set_bounds(xas); |
| 195 | } |
| 196 | |
| 197 | xas->xa_node = NULL; |
| 198 | return entry; |
| 199 | } |
| 200 | |
| 201 | static void *xas_descend(struct xa_state *xas, struct xa_node *node) |
| 202 | { |
| 203 | unsigned int offset = get_offset(xas->xa_index, node); |
| 204 | void *entry = xa_entry(xas->xa, node, offset); |
| 205 | |
| 206 | xas->xa_node = node; |
| 207 | if (xa_is_sibling(entry)) { |
| 208 | offset = xa_to_sibling(entry); |
| 209 | entry = xa_entry(xas->xa, node, offset); |
| 210 | } |
| 211 | |
| 212 | xas->xa_offset = offset; |
| 213 | return entry; |
| 214 | } |
| 215 | |
| 216 | /** |
| 217 | * xas_load() - Load an entry from the XArray (advanced). |
| 218 | * @xas: XArray operation state. |
| 219 | * |
| 220 | * Usually walks the @xas to the appropriate state to load the entry |
| 221 | * stored at xa_index. However, it will do nothing and return %NULL if |
| 222 | * @xas is in an error state. xas_load() will never expand the tree. |
| 223 | * |
| 224 | * If the xa_state is set up to operate on a multi-index entry, xas_load() |
| 225 | * may return %NULL or an internal entry, even if there are entries |
| 226 | * present within the range specified by @xas. |
| 227 | * |
| 228 | * Context: Any context. The caller should hold the xa_lock or the RCU lock. |
| 229 | * Return: Usually an entry in the XArray, but see description for exceptions. |
| 230 | */ |
| 231 | void *xas_load(struct xa_state *xas) |
| 232 | { |
| 233 | void *entry = xas_start(xas); |
| 234 | |
| 235 | while (xa_is_node(entry)) { |
| 236 | struct xa_node *node = xa_to_node(entry); |
| 237 | |
| 238 | if (xas->xa_shift > node->shift) |
| 239 | break; |
| 240 | entry = xas_descend(xas, node); |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 241 | if (node->shift == 0) |
| 242 | break; |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 243 | } |
| 244 | return entry; |
| 245 | } |
| 246 | EXPORT_SYMBOL_GPL(xas_load); |
| 247 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 248 | /* Move the radix tree node cache here */ |
| 249 | extern struct kmem_cache *radix_tree_node_cachep; |
| 250 | extern void radix_tree_node_rcu_free(struct rcu_head *head); |
| 251 | |
| 252 | #define XA_RCU_FREE ((struct xarray *)1) |
| 253 | |
| 254 | static void xa_node_free(struct xa_node *node) |
| 255 | { |
| 256 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); |
| 257 | node->array = XA_RCU_FREE; |
| 258 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * xas_destroy() - Free any resources allocated during the XArray operation. |
| 263 | * @xas: XArray operation state. |
| 264 | * |
| 265 | * This function is now internal-only. |
| 266 | */ |
| 267 | static void xas_destroy(struct xa_state *xas) |
| 268 | { |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 269 | struct xa_node *next, *node = xas->xa_alloc; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 270 | |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 271 | while (node) { |
| 272 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); |
| 273 | next = rcu_dereference_raw(node->parent); |
| 274 | radix_tree_node_rcu_free(&node->rcu_head); |
| 275 | xas->xa_alloc = node = next; |
| 276 | } |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 277 | } |
| 278 | |
| 279 | /** |
| 280 | * xas_nomem() - Allocate memory if needed. |
| 281 | * @xas: XArray operation state. |
| 282 | * @gfp: Memory allocation flags. |
| 283 | * |
| 284 | * If we need to add new nodes to the XArray, we try to allocate memory |
| 285 | * with GFP_NOWAIT while holding the lock, which will usually succeed. |
| 286 | * If it fails, @xas is flagged as needing memory to continue. The caller |
| 287 | * should drop the lock and call xas_nomem(). If xas_nomem() succeeds, |
| 288 | * the caller should retry the operation. |
| 289 | * |
| 290 | * Forward progress is guaranteed as one node is allocated here and |
| 291 | * stored in the xa_state where it will be found by xas_alloc(). More |
| 292 | * nodes will likely be found in the slab allocator, but we do not tie |
| 293 | * them up here. |
| 294 | * |
| 295 | * Return: true if memory was needed, and was successfully allocated. |
| 296 | */ |
| 297 | bool xas_nomem(struct xa_state *xas, gfp_t gfp) |
| 298 | { |
| 299 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { |
| 300 | xas_destroy(xas); |
| 301 | return false; |
| 302 | } |
Johannes Weiner | 7b78564 | 2019-05-24 10:12:46 -0400 | [diff] [blame] | 303 | if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) |
| 304 | gfp |= __GFP_ACCOUNT; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 305 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); |
| 306 | if (!xas->xa_alloc) |
| 307 | return false; |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 308 | xas->xa_alloc->parent = NULL; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 309 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); |
| 310 | xas->xa_node = XAS_RESTART; |
| 311 | return true; |
| 312 | } |
| 313 | EXPORT_SYMBOL_GPL(xas_nomem); |
| 314 | |
| 315 | /* |
| 316 | * __xas_nomem() - Drop locks and allocate memory if needed. |
| 317 | * @xas: XArray operation state. |
| 318 | * @gfp: Memory allocation flags. |
| 319 | * |
| 320 | * Internal variant of xas_nomem(). |
| 321 | * |
| 322 | * Return: true if memory was needed, and was successfully allocated. |
| 323 | */ |
| 324 | static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) |
| 325 | __must_hold(xas->xa->xa_lock) |
| 326 | { |
| 327 | unsigned int lock_type = xa_lock_type(xas->xa); |
| 328 | |
| 329 | if (xas->xa_node != XA_ERROR(-ENOMEM)) { |
| 330 | xas_destroy(xas); |
| 331 | return false; |
| 332 | } |
Johannes Weiner | 7b78564 | 2019-05-24 10:12:46 -0400 | [diff] [blame] | 333 | if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) |
| 334 | gfp |= __GFP_ACCOUNT; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 335 | if (gfpflags_allow_blocking(gfp)) { |
| 336 | xas_unlock_type(xas, lock_type); |
| 337 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); |
| 338 | xas_lock_type(xas, lock_type); |
| 339 | } else { |
| 340 | xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); |
| 341 | } |
| 342 | if (!xas->xa_alloc) |
| 343 | return false; |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 344 | xas->xa_alloc->parent = NULL; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 345 | XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); |
| 346 | xas->xa_node = XAS_RESTART; |
| 347 | return true; |
| 348 | } |
| 349 | |
| 350 | static void xas_update(struct xa_state *xas, struct xa_node *node) |
| 351 | { |
| 352 | if (xas->xa_update) |
| 353 | xas->xa_update(node); |
| 354 | else |
| 355 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); |
| 356 | } |
| 357 | |
| 358 | static void *xas_alloc(struct xa_state *xas, unsigned int shift) |
| 359 | { |
| 360 | struct xa_node *parent = xas->xa_node; |
| 361 | struct xa_node *node = xas->xa_alloc; |
| 362 | |
| 363 | if (xas_invalid(xas)) |
| 364 | return NULL; |
| 365 | |
| 366 | if (node) { |
| 367 | xas->xa_alloc = NULL; |
| 368 | } else { |
Johannes Weiner | 7b78564 | 2019-05-24 10:12:46 -0400 | [diff] [blame] | 369 | gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; |
| 370 | |
| 371 | if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) |
| 372 | gfp |= __GFP_ACCOUNT; |
| 373 | |
| 374 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp); |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 375 | if (!node) { |
| 376 | xas_set_err(xas, -ENOMEM); |
| 377 | return NULL; |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | if (parent) { |
| 382 | node->offset = xas->xa_offset; |
| 383 | parent->count++; |
| 384 | XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); |
| 385 | xas_update(xas, parent); |
| 386 | } |
| 387 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); |
| 388 | XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); |
| 389 | node->shift = shift; |
| 390 | node->count = 0; |
| 391 | node->nr_values = 0; |
| 392 | RCU_INIT_POINTER(node->parent, xas->xa_node); |
| 393 | node->array = xas->xa; |
| 394 | |
| 395 | return node; |
| 396 | } |
| 397 | |
Matthew Wilcox | 0e9446c | 2018-08-15 14:13:29 -0400 | [diff] [blame] | 398 | #ifdef CONFIG_XARRAY_MULTI |
| 399 | /* Returns the number of indices covered by a given xa_state */ |
| 400 | static unsigned long xas_size(const struct xa_state *xas) |
| 401 | { |
| 402 | return (xas->xa_sibs + 1UL) << xas->xa_shift; |
| 403 | } |
| 404 | #endif |
| 405 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 406 | /* |
| 407 | * Use this to calculate the maximum index that will need to be created |
| 408 | * in order to add the entry described by @xas. Because we cannot store a |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 409 | * multi-index entry at index 0, the calculation is a little more complex |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 410 | * than you might expect. |
| 411 | */ |
| 412 | static unsigned long xas_max(struct xa_state *xas) |
| 413 | { |
| 414 | unsigned long max = xas->xa_index; |
| 415 | |
| 416 | #ifdef CONFIG_XARRAY_MULTI |
| 417 | if (xas->xa_shift || xas->xa_sibs) { |
Matthew Wilcox | 0e9446c | 2018-08-15 14:13:29 -0400 | [diff] [blame] | 418 | unsigned long mask = xas_size(xas) - 1; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 419 | max |= mask; |
| 420 | if (mask == max) |
| 421 | max++; |
| 422 | } |
| 423 | #endif |
| 424 | |
| 425 | return max; |
| 426 | } |
| 427 | |
| 428 | /* The maximum index that can be contained in the array without expanding it */ |
| 429 | static unsigned long max_index(void *entry) |
| 430 | { |
| 431 | if (!xa_is_node(entry)) |
| 432 | return 0; |
| 433 | return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; |
| 434 | } |
| 435 | |
| 436 | static void xas_shrink(struct xa_state *xas) |
| 437 | { |
| 438 | struct xarray *xa = xas->xa; |
| 439 | struct xa_node *node = xas->xa_node; |
| 440 | |
| 441 | for (;;) { |
| 442 | void *entry; |
| 443 | |
| 444 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); |
| 445 | if (node->count != 1) |
| 446 | break; |
| 447 | entry = xa_entry_locked(xa, node, 0); |
| 448 | if (!entry) |
| 449 | break; |
| 450 | if (!xa_is_node(entry) && node->shift) |
| 451 | break; |
Matthew Wilcox | 3ccaf57 | 2018-10-26 14:43:22 -0400 | [diff] [blame] | 452 | if (xa_is_zero(entry) && xa_zero_busy(xa)) |
| 453 | entry = NULL; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 454 | xas->xa_node = XAS_BOUNDS; |
| 455 | |
| 456 | RCU_INIT_POINTER(xa->xa_head, entry); |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 457 | if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) |
| 458 | xa_mark_clear(xa, XA_FREE_MARK); |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 459 | |
| 460 | node->count = 0; |
| 461 | node->nr_values = 0; |
| 462 | if (!xa_is_node(entry)) |
| 463 | RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); |
| 464 | xas_update(xas, node); |
| 465 | xa_node_free(node); |
| 466 | if (!xa_is_node(entry)) |
| 467 | break; |
| 468 | node = xa_to_node(entry); |
| 469 | node->parent = NULL; |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | /* |
| 474 | * xas_delete_node() - Attempt to delete an xa_node |
| 475 | * @xas: Array operation state. |
| 476 | * |
| 477 | * Attempts to delete the @xas->xa_node. This will fail if xa->node has |
| 478 | * a non-zero reference count. |
| 479 | */ |
| 480 | static void xas_delete_node(struct xa_state *xas) |
| 481 | { |
| 482 | struct xa_node *node = xas->xa_node; |
| 483 | |
| 484 | for (;;) { |
| 485 | struct xa_node *parent; |
| 486 | |
| 487 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); |
| 488 | if (node->count) |
| 489 | break; |
| 490 | |
| 491 | parent = xa_parent_locked(xas->xa, node); |
| 492 | xas->xa_node = parent; |
| 493 | xas->xa_offset = node->offset; |
| 494 | xa_node_free(node); |
| 495 | |
| 496 | if (!parent) { |
| 497 | xas->xa->xa_head = NULL; |
| 498 | xas->xa_node = XAS_BOUNDS; |
| 499 | return; |
| 500 | } |
| 501 | |
| 502 | parent->slots[xas->xa_offset] = NULL; |
| 503 | parent->count--; |
| 504 | XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); |
| 505 | node = parent; |
| 506 | xas_update(xas, node); |
| 507 | } |
| 508 | |
| 509 | if (!node->parent) |
| 510 | xas_shrink(xas); |
| 511 | } |
| 512 | |
| 513 | /** |
| 514 | * xas_free_nodes() - Free this node and all nodes that it references |
| 515 | * @xas: Array operation state. |
| 516 | * @top: Node to free |
| 517 | * |
| 518 | * This node has been removed from the tree. We must now free it and all |
| 519 | * of its subnodes. There may be RCU walkers with references into the tree, |
| 520 | * so we must replace all entries with retry markers. |
| 521 | */ |
| 522 | static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) |
| 523 | { |
| 524 | unsigned int offset = 0; |
| 525 | struct xa_node *node = top; |
| 526 | |
| 527 | for (;;) { |
| 528 | void *entry = xa_entry_locked(xas->xa, node, offset); |
| 529 | |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 530 | if (node->shift && xa_is_node(entry)) { |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 531 | node = xa_to_node(entry); |
| 532 | offset = 0; |
| 533 | continue; |
| 534 | } |
| 535 | if (entry) |
| 536 | RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); |
| 537 | offset++; |
| 538 | while (offset == XA_CHUNK_SIZE) { |
| 539 | struct xa_node *parent; |
| 540 | |
| 541 | parent = xa_parent_locked(xas->xa, node); |
| 542 | offset = node->offset + 1; |
| 543 | node->count = 0; |
| 544 | node->nr_values = 0; |
| 545 | xas_update(xas, node); |
| 546 | xa_node_free(node); |
| 547 | if (node == top) |
| 548 | return; |
| 549 | node = parent; |
| 550 | } |
| 551 | } |
| 552 | } |
| 553 | |
| 554 | /* |
| 555 | * xas_expand adds nodes to the head of the tree until it has reached |
| 556 | * sufficient height to be able to contain @xas->xa_index |
| 557 | */ |
| 558 | static int xas_expand(struct xa_state *xas, void *head) |
| 559 | { |
| 560 | struct xarray *xa = xas->xa; |
| 561 | struct xa_node *node = NULL; |
| 562 | unsigned int shift = 0; |
| 563 | unsigned long max = xas_max(xas); |
| 564 | |
| 565 | if (!head) { |
| 566 | if (max == 0) |
| 567 | return 0; |
| 568 | while ((max >> shift) >= XA_CHUNK_SIZE) |
| 569 | shift += XA_CHUNK_SHIFT; |
| 570 | return shift + XA_CHUNK_SHIFT; |
| 571 | } else if (xa_is_node(head)) { |
| 572 | node = xa_to_node(head); |
| 573 | shift = node->shift + XA_CHUNK_SHIFT; |
| 574 | } |
| 575 | xas->xa_node = NULL; |
| 576 | |
| 577 | while (max > max_index(head)) { |
| 578 | xa_mark_t mark = 0; |
| 579 | |
| 580 | XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); |
| 581 | node = xas_alloc(xas, shift); |
| 582 | if (!node) |
| 583 | return -ENOMEM; |
| 584 | |
| 585 | node->count = 1; |
| 586 | if (xa_is_value(head)) |
| 587 | node->nr_values = 1; |
| 588 | RCU_INIT_POINTER(node->slots[0], head); |
| 589 | |
| 590 | /* Propagate the aggregated mark info to the new child */ |
| 591 | for (;;) { |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 592 | if (xa_track_free(xa) && mark == XA_FREE_MARK) { |
| 593 | node_mark_all(node, XA_FREE_MARK); |
| 594 | if (!xa_marked(xa, XA_FREE_MARK)) { |
| 595 | node_clear_mark(node, 0, XA_FREE_MARK); |
| 596 | xa_mark_set(xa, XA_FREE_MARK); |
| 597 | } |
| 598 | } else if (xa_marked(xa, mark)) { |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 599 | node_set_mark(node, 0, mark); |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 600 | } |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 601 | if (mark == XA_MARK_MAX) |
| 602 | break; |
| 603 | mark_inc(mark); |
| 604 | } |
| 605 | |
| 606 | /* |
| 607 | * Now that the new node is fully initialised, we can add |
| 608 | * it to the tree |
| 609 | */ |
| 610 | if (xa_is_node(head)) { |
| 611 | xa_to_node(head)->offset = 0; |
| 612 | rcu_assign_pointer(xa_to_node(head)->parent, node); |
| 613 | } |
| 614 | head = xa_mk_node(node); |
| 615 | rcu_assign_pointer(xa->xa_head, head); |
| 616 | xas_update(xas, node); |
| 617 | |
| 618 | shift += XA_CHUNK_SHIFT; |
| 619 | } |
| 620 | |
| 621 | xas->xa_node = node; |
| 622 | return shift; |
| 623 | } |
| 624 | |
| 625 | /* |
| 626 | * xas_create() - Create a slot to store an entry in. |
| 627 | * @xas: XArray operation state. |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 628 | * @allow_root: %true if we can store the entry in the root directly |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 629 | * |
| 630 | * Most users will not need to call this function directly, as it is called |
| 631 | * by xas_store(). It is useful for doing conditional store operations |
| 632 | * (see the xa_cmpxchg() implementation for an example). |
| 633 | * |
| 634 | * Return: If the slot already existed, returns the contents of this slot. |
Matthew Wilcox | 804dfaf | 2018-11-05 16:37:15 -0500 | [diff] [blame] | 635 | * If the slot was newly created, returns %NULL. If it failed to create the |
| 636 | * slot, returns %NULL and indicates the error in @xas. |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 637 | */ |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 638 | static void *xas_create(struct xa_state *xas, bool allow_root) |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 639 | { |
| 640 | struct xarray *xa = xas->xa; |
| 641 | void *entry; |
| 642 | void __rcu **slot; |
| 643 | struct xa_node *node = xas->xa_node; |
| 644 | int shift; |
| 645 | unsigned int order = xas->xa_shift; |
| 646 | |
| 647 | if (xas_top(node)) { |
| 648 | entry = xa_head_locked(xa); |
| 649 | xas->xa_node = NULL; |
Matthew Wilcox | 3ccaf57 | 2018-10-26 14:43:22 -0400 | [diff] [blame] | 650 | if (!entry && xa_zero_busy(xa)) |
| 651 | entry = XA_ZERO_ENTRY; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 652 | shift = xas_expand(xas, entry); |
| 653 | if (shift < 0) |
| 654 | return NULL; |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 655 | if (!shift && !allow_root) |
| 656 | shift = XA_CHUNK_SHIFT; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 657 | entry = xa_head_locked(xa); |
| 658 | slot = &xa->xa_head; |
| 659 | } else if (xas_error(xas)) { |
| 660 | return NULL; |
| 661 | } else if (node) { |
| 662 | unsigned int offset = xas->xa_offset; |
| 663 | |
| 664 | shift = node->shift; |
| 665 | entry = xa_entry_locked(xa, node, offset); |
| 666 | slot = &node->slots[offset]; |
| 667 | } else { |
| 668 | shift = 0; |
| 669 | entry = xa_head_locked(xa); |
| 670 | slot = &xa->xa_head; |
| 671 | } |
| 672 | |
| 673 | while (shift > order) { |
| 674 | shift -= XA_CHUNK_SHIFT; |
| 675 | if (!entry) { |
| 676 | node = xas_alloc(xas, shift); |
| 677 | if (!node) |
| 678 | break; |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 679 | if (xa_track_free(xa)) |
| 680 | node_mark_all(node, XA_FREE_MARK); |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 681 | rcu_assign_pointer(*slot, xa_mk_node(node)); |
| 682 | } else if (xa_is_node(entry)) { |
| 683 | node = xa_to_node(entry); |
| 684 | } else { |
| 685 | break; |
| 686 | } |
| 687 | entry = xas_descend(xas, node); |
| 688 | slot = &node->slots[xas->xa_offset]; |
| 689 | } |
| 690 | |
| 691 | return entry; |
| 692 | } |
| 693 | |
Matthew Wilcox | 2264f51 | 2017-12-04 00:11:48 -0500 | [diff] [blame] | 694 | /** |
| 695 | * xas_create_range() - Ensure that stores to this range will succeed |
| 696 | * @xas: XArray operation state. |
| 697 | * |
| 698 | * Creates all of the slots in the range covered by @xas. Sets @xas to |
| 699 | * create single-index entries and positions it at the beginning of the |
| 700 | * range. This is for the benefit of users which have not yet been |
| 701 | * converted to use multi-index entries. |
| 702 | */ |
| 703 | void xas_create_range(struct xa_state *xas) |
| 704 | { |
| 705 | unsigned long index = xas->xa_index; |
| 706 | unsigned char shift = xas->xa_shift; |
| 707 | unsigned char sibs = xas->xa_sibs; |
| 708 | |
Matthew Wilcox (Oracle) | 84c34df | 2020-10-13 08:46:29 -0400 | [diff] [blame] | 709 | xas->xa_index |= ((sibs + 1UL) << shift) - 1; |
Matthew Wilcox | 2264f51 | 2017-12-04 00:11:48 -0500 | [diff] [blame] | 710 | if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) |
| 711 | xas->xa_offset |= sibs; |
| 712 | xas->xa_shift = 0; |
| 713 | xas->xa_sibs = 0; |
| 714 | |
| 715 | for (;;) { |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 716 | xas_create(xas, true); |
Matthew Wilcox | 2264f51 | 2017-12-04 00:11:48 -0500 | [diff] [blame] | 717 | if (xas_error(xas)) |
| 718 | goto restore; |
| 719 | if (xas->xa_index <= (index | XA_CHUNK_MASK)) |
| 720 | goto success; |
| 721 | xas->xa_index -= XA_CHUNK_SIZE; |
| 722 | |
| 723 | for (;;) { |
| 724 | struct xa_node *node = xas->xa_node; |
Matthew Wilcox (Oracle) | 1ac49c8 | 2022-03-28 19:25:11 -0400 | [diff] [blame] | 725 | if (node->shift >= shift) |
| 726 | break; |
Matthew Wilcox | 2264f51 | 2017-12-04 00:11:48 -0500 | [diff] [blame] | 727 | xas->xa_node = xa_parent_locked(xas->xa, node); |
| 728 | xas->xa_offset = node->offset - 1; |
| 729 | if (node->offset != 0) |
| 730 | break; |
| 731 | } |
| 732 | } |
| 733 | |
| 734 | restore: |
| 735 | xas->xa_shift = shift; |
| 736 | xas->xa_sibs = sibs; |
| 737 | xas->xa_index = index; |
| 738 | return; |
| 739 | success: |
| 740 | xas->xa_index = index; |
| 741 | if (xas->xa_node) |
| 742 | xas_set_offset(xas); |
| 743 | } |
| 744 | EXPORT_SYMBOL_GPL(xas_create_range); |
| 745 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 746 | static void update_node(struct xa_state *xas, struct xa_node *node, |
| 747 | int count, int values) |
| 748 | { |
| 749 | if (!node || (!count && !values)) |
| 750 | return; |
| 751 | |
| 752 | node->count += count; |
| 753 | node->nr_values += values; |
| 754 | XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); |
| 755 | XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); |
| 756 | xas_update(xas, node); |
| 757 | if (count < 0) |
| 758 | xas_delete_node(xas); |
| 759 | } |
| 760 | |
| 761 | /** |
| 762 | * xas_store() - Store this entry in the XArray. |
| 763 | * @xas: XArray operation state. |
| 764 | * @entry: New entry. |
| 765 | * |
| 766 | * If @xas is operating on a multi-index entry, the entry returned by this |
| 767 | * function is essentially meaningless (it may be an internal entry or it |
| 768 | * may be %NULL, even if there are non-NULL entries at some of the indices |
| 769 | * covered by the range). This is not a problem for any current users, |
| 770 | * and can be changed if needed. |
| 771 | * |
| 772 | * Return: The old entry at this index. |
| 773 | */ |
| 774 | void *xas_store(struct xa_state *xas, void *entry) |
| 775 | { |
| 776 | struct xa_node *node; |
| 777 | void __rcu **slot = &xas->xa->xa_head; |
| 778 | unsigned int offset, max; |
| 779 | int count = 0; |
| 780 | int values = 0; |
| 781 | void *first, *next; |
| 782 | bool value = xa_is_value(entry); |
| 783 | |
Matthew Wilcox | 4a5c8d8 | 2019-02-21 17:54:44 -0500 | [diff] [blame] | 784 | if (entry) { |
| 785 | bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry); |
| 786 | first = xas_create(xas, allow_root); |
| 787 | } else { |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 788 | first = xas_load(xas); |
Matthew Wilcox | 4a5c8d8 | 2019-02-21 17:54:44 -0500 | [diff] [blame] | 789 | } |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 790 | |
| 791 | if (xas_invalid(xas)) |
| 792 | return first; |
| 793 | node = xas->xa_node; |
| 794 | if (node && (xas->xa_shift < node->shift)) |
| 795 | xas->xa_sibs = 0; |
| 796 | if ((first == entry) && !xas->xa_sibs) |
| 797 | return first; |
| 798 | |
| 799 | next = first; |
| 800 | offset = xas->xa_offset; |
| 801 | max = xas->xa_offset + xas->xa_sibs; |
| 802 | if (node) { |
| 803 | slot = &node->slots[offset]; |
| 804 | if (xas->xa_sibs) |
| 805 | xas_squash_marks(xas); |
| 806 | } |
| 807 | if (!entry) |
| 808 | xas_init_marks(xas); |
| 809 | |
| 810 | for (;;) { |
| 811 | /* |
| 812 | * Must clear the marks before setting the entry to NULL, |
| 813 | * otherwise xas_for_each_marked may find a NULL entry and |
| 814 | * stop early. rcu_assign_pointer contains a release barrier |
| 815 | * so the mark clearing will appear to happen before the |
| 816 | * entry is set to NULL. |
| 817 | */ |
| 818 | rcu_assign_pointer(*slot, entry); |
Matthew Wilcox | 2fbe967 | 2019-02-21 17:36:45 -0500 | [diff] [blame] | 819 | if (xa_is_node(next) && (!node || node->shift)) |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 820 | xas_free_nodes(xas, xa_to_node(next)); |
| 821 | if (!node) |
| 822 | break; |
| 823 | count += !next - !entry; |
| 824 | values += !xa_is_value(first) - !value; |
| 825 | if (entry) { |
| 826 | if (offset == max) |
| 827 | break; |
| 828 | if (!xa_is_sibling(entry)) |
| 829 | entry = xa_mk_sibling(xas->xa_offset); |
| 830 | } else { |
| 831 | if (offset == XA_CHUNK_MASK) |
| 832 | break; |
| 833 | } |
| 834 | next = xa_entry_locked(xas->xa, node, ++offset); |
| 835 | if (!xa_is_sibling(next)) { |
| 836 | if (!entry && (offset > max)) |
| 837 | break; |
| 838 | first = next; |
| 839 | } |
| 840 | slot++; |
| 841 | } |
| 842 | |
| 843 | update_node(xas, node, count, values); |
| 844 | return first; |
| 845 | } |
| 846 | EXPORT_SYMBOL_GPL(xas_store); |
| 847 | |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 848 | /** |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 849 | * xas_get_mark() - Returns the state of this mark. |
| 850 | * @xas: XArray operation state. |
| 851 | * @mark: Mark number. |
| 852 | * |
| 853 | * Return: true if the mark is set, false if the mark is clear or @xas |
| 854 | * is in an error state. |
| 855 | */ |
| 856 | bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) |
| 857 | { |
| 858 | if (xas_invalid(xas)) |
| 859 | return false; |
| 860 | if (!xas->xa_node) |
| 861 | return xa_marked(xas->xa, mark); |
| 862 | return node_get_mark(xas->xa_node, xas->xa_offset, mark); |
| 863 | } |
| 864 | EXPORT_SYMBOL_GPL(xas_get_mark); |
| 865 | |
| 866 | /** |
| 867 | * xas_set_mark() - Sets the mark on this entry and its parents. |
| 868 | * @xas: XArray operation state. |
| 869 | * @mark: Mark number. |
| 870 | * |
| 871 | * Sets the specified mark on this entry, and walks up the tree setting it |
| 872 | * on all the ancestor entries. Does nothing if @xas has not been walked to |
| 873 | * an entry, or is in an error state. |
| 874 | */ |
| 875 | void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) |
| 876 | { |
| 877 | struct xa_node *node = xas->xa_node; |
| 878 | unsigned int offset = xas->xa_offset; |
| 879 | |
| 880 | if (xas_invalid(xas)) |
| 881 | return; |
| 882 | |
| 883 | while (node) { |
| 884 | if (node_set_mark(node, offset, mark)) |
| 885 | return; |
| 886 | offset = node->offset; |
| 887 | node = xa_parent_locked(xas->xa, node); |
| 888 | } |
| 889 | |
| 890 | if (!xa_marked(xas->xa, mark)) |
| 891 | xa_mark_set(xas->xa, mark); |
| 892 | } |
| 893 | EXPORT_SYMBOL_GPL(xas_set_mark); |
| 894 | |
| 895 | /** |
| 896 | * xas_clear_mark() - Clears the mark on this entry and its parents. |
| 897 | * @xas: XArray operation state. |
| 898 | * @mark: Mark number. |
| 899 | * |
| 900 | * Clears the specified mark on this entry, and walks back to the head |
| 901 | * attempting to clear it on all the ancestor entries. Does nothing if |
| 902 | * @xas has not been walked to an entry, or is in an error state. |
| 903 | */ |
| 904 | void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) |
| 905 | { |
| 906 | struct xa_node *node = xas->xa_node; |
| 907 | unsigned int offset = xas->xa_offset; |
| 908 | |
| 909 | if (xas_invalid(xas)) |
| 910 | return; |
| 911 | |
| 912 | while (node) { |
| 913 | if (!node_clear_mark(node, offset, mark)) |
| 914 | return; |
| 915 | if (node_any_mark(node, mark)) |
| 916 | return; |
| 917 | |
| 918 | offset = node->offset; |
| 919 | node = xa_parent_locked(xas->xa, node); |
| 920 | } |
| 921 | |
| 922 | if (xa_marked(xas->xa, mark)) |
| 923 | xa_mark_clear(xas->xa, mark); |
| 924 | } |
| 925 | EXPORT_SYMBOL_GPL(xas_clear_mark); |
| 926 | |
| 927 | /** |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 928 | * xas_init_marks() - Initialise all marks for the entry |
| 929 | * @xas: Array operations state. |
| 930 | * |
| 931 | * Initialise all marks for the entry specified by @xas. If we're tracking |
| 932 | * free entries with a mark, we need to set it on all entries. All other |
| 933 | * marks are cleared. |
| 934 | * |
| 935 | * This implementation is not as efficient as it could be; we may walk |
| 936 | * up the tree multiple times. |
| 937 | */ |
| 938 | void xas_init_marks(const struct xa_state *xas) |
| 939 | { |
| 940 | xa_mark_t mark = 0; |
| 941 | |
| 942 | for (;;) { |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 943 | if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) |
| 944 | xas_set_mark(xas, mark); |
| 945 | else |
| 946 | xas_clear_mark(xas, mark); |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 947 | if (mark == XA_MARK_MAX) |
| 948 | break; |
| 949 | mark_inc(mark); |
| 950 | } |
| 951 | } |
| 952 | EXPORT_SYMBOL_GPL(xas_init_marks); |
| 953 | |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 954 | #ifdef CONFIG_XARRAY_MULTI |
| 955 | static unsigned int node_get_marks(struct xa_node *node, unsigned int offset) |
| 956 | { |
| 957 | unsigned int marks = 0; |
| 958 | xa_mark_t mark = XA_MARK_0; |
| 959 | |
| 960 | for (;;) { |
| 961 | if (node_get_mark(node, offset, mark)) |
| 962 | marks |= 1 << (__force unsigned int)mark; |
| 963 | if (mark == XA_MARK_MAX) |
| 964 | break; |
| 965 | mark_inc(mark); |
| 966 | } |
| 967 | |
| 968 | return marks; |
| 969 | } |
| 970 | |
| 971 | static void node_set_marks(struct xa_node *node, unsigned int offset, |
| 972 | struct xa_node *child, unsigned int marks) |
| 973 | { |
| 974 | xa_mark_t mark = XA_MARK_0; |
| 975 | |
| 976 | for (;;) { |
| 977 | if (marks & (1 << (__force unsigned int)mark)) { |
| 978 | node_set_mark(node, offset, mark); |
| 979 | if (child) |
| 980 | node_mark_all(child, mark); |
| 981 | } |
| 982 | if (mark == XA_MARK_MAX) |
| 983 | break; |
| 984 | mark_inc(mark); |
| 985 | } |
| 986 | } |
| 987 | |
| 988 | /** |
| 989 | * xas_split_alloc() - Allocate memory for splitting an entry. |
| 990 | * @xas: XArray operation state. |
| 991 | * @entry: New entry which will be stored in the array. |
| 992 | * @order: New entry order. |
| 993 | * @gfp: Memory allocation flags. |
| 994 | * |
| 995 | * This function should be called before calling xas_split(). |
| 996 | * If necessary, it will allocate new nodes (and fill them with @entry) |
| 997 | * to prepare for the upcoming split of an entry of @order size into |
| 998 | * entries of the order stored in the @xas. |
| 999 | * |
| 1000 | * Context: May sleep if @gfp flags permit. |
| 1001 | */ |
| 1002 | void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, |
| 1003 | gfp_t gfp) |
| 1004 | { |
| 1005 | unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; |
| 1006 | unsigned int mask = xas->xa_sibs; |
| 1007 | |
| 1008 | /* XXX: no support for splitting really large entries yet */ |
| 1009 | if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order)) |
| 1010 | goto nomem; |
| 1011 | if (xas->xa_shift + XA_CHUNK_SHIFT > order) |
| 1012 | return; |
| 1013 | |
| 1014 | do { |
| 1015 | unsigned int i; |
Matthew Wilcox (Oracle) | d99e22c | 2020-11-19 08:32:31 -0500 | [diff] [blame] | 1016 | void *sibling = NULL; |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 1017 | struct xa_node *node; |
| 1018 | |
| 1019 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp); |
| 1020 | if (!node) |
| 1021 | goto nomem; |
| 1022 | node->array = xas->xa; |
| 1023 | for (i = 0; i < XA_CHUNK_SIZE; i++) { |
| 1024 | if ((i & mask) == 0) { |
| 1025 | RCU_INIT_POINTER(node->slots[i], entry); |
Matthew Wilcox (Oracle) | d99e22c | 2020-11-19 08:32:31 -0500 | [diff] [blame] | 1026 | sibling = xa_mk_sibling(i); |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 1027 | } else { |
| 1028 | RCU_INIT_POINTER(node->slots[i], sibling); |
| 1029 | } |
| 1030 | } |
| 1031 | RCU_INIT_POINTER(node->parent, xas->xa_alloc); |
| 1032 | xas->xa_alloc = node; |
| 1033 | } while (sibs-- > 0); |
| 1034 | |
| 1035 | return; |
| 1036 | nomem: |
| 1037 | xas_destroy(xas); |
| 1038 | xas_set_err(xas, -ENOMEM); |
| 1039 | } |
| 1040 | EXPORT_SYMBOL_GPL(xas_split_alloc); |
| 1041 | |
| 1042 | /** |
| 1043 | * xas_split() - Split a multi-index entry into smaller entries. |
| 1044 | * @xas: XArray operation state. |
| 1045 | * @entry: New entry to store in the array. |
| 1046 | * @order: New entry order. |
| 1047 | * |
| 1048 | * The value in the entry is copied to all the replacement entries. |
| 1049 | * |
| 1050 | * Context: Any context. The caller should hold the xa_lock. |
| 1051 | */ |
| 1052 | void xas_split(struct xa_state *xas, void *entry, unsigned int order) |
| 1053 | { |
| 1054 | unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; |
| 1055 | unsigned int offset, marks; |
| 1056 | struct xa_node *node; |
| 1057 | void *curr = xas_load(xas); |
| 1058 | int values = 0; |
| 1059 | |
| 1060 | node = xas->xa_node; |
| 1061 | if (xas_top(node)) |
| 1062 | return; |
| 1063 | |
| 1064 | marks = node_get_marks(node, xas->xa_offset); |
| 1065 | |
| 1066 | offset = xas->xa_offset + sibs; |
| 1067 | do { |
| 1068 | if (xas->xa_shift < node->shift) { |
| 1069 | struct xa_node *child = xas->xa_alloc; |
| 1070 | |
| 1071 | xas->xa_alloc = rcu_dereference_raw(child->parent); |
| 1072 | child->shift = node->shift - XA_CHUNK_SHIFT; |
| 1073 | child->offset = offset; |
| 1074 | child->count = XA_CHUNK_SIZE; |
| 1075 | child->nr_values = xa_is_value(entry) ? |
| 1076 | XA_CHUNK_SIZE : 0; |
| 1077 | RCU_INIT_POINTER(child->parent, node); |
| 1078 | node_set_marks(node, offset, child, marks); |
| 1079 | rcu_assign_pointer(node->slots[offset], |
| 1080 | xa_mk_node(child)); |
| 1081 | if (xa_is_value(curr)) |
| 1082 | values--; |
Matthew Wilcox (Oracle) | 381636f | 2022-03-31 08:27:09 -0400 | [diff] [blame] | 1083 | xas_update(xas, child); |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 1084 | } else { |
| 1085 | unsigned int canon = offset - xas->xa_sibs; |
| 1086 | |
| 1087 | node_set_marks(node, canon, NULL, marks); |
| 1088 | rcu_assign_pointer(node->slots[canon], entry); |
| 1089 | while (offset > canon) |
| 1090 | rcu_assign_pointer(node->slots[offset--], |
| 1091 | xa_mk_sibling(canon)); |
| 1092 | values += (xa_is_value(entry) - xa_is_value(curr)) * |
| 1093 | (xas->xa_sibs + 1); |
| 1094 | } |
| 1095 | } while (offset-- > xas->xa_offset); |
| 1096 | |
| 1097 | node->nr_values += values; |
Matthew Wilcox (Oracle) | 381636f | 2022-03-31 08:27:09 -0400 | [diff] [blame] | 1098 | xas_update(xas, node); |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 1099 | } |
| 1100 | EXPORT_SYMBOL_GPL(xas_split); |
| 1101 | #endif |
| 1102 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1103 | /** |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1104 | * xas_pause() - Pause a walk to drop a lock. |
| 1105 | * @xas: XArray operation state. |
| 1106 | * |
| 1107 | * Some users need to pause a walk and drop the lock they're holding in |
| 1108 | * order to yield to a higher priority thread or carry out an operation |
| 1109 | * on an entry. Those users should call this function before they drop |
| 1110 | * the lock. It resets the @xas to be suitable for the next iteration |
| 1111 | * of the loop after the user has reacquired the lock. If most entries |
| 1112 | * found during a walk require you to call xas_pause(), the xa_for_each() |
| 1113 | * iterator may be more appropriate. |
| 1114 | * |
| 1115 | * Note that xas_pause() only works for forward iteration. If a user needs |
| 1116 | * to pause a reverse iteration, we will need a xas_pause_rev(). |
| 1117 | */ |
| 1118 | void xas_pause(struct xa_state *xas) |
| 1119 | { |
| 1120 | struct xa_node *node = xas->xa_node; |
| 1121 | |
| 1122 | if (xas_invalid(xas)) |
| 1123 | return; |
| 1124 | |
Matthew Wilcox (Oracle) | 82a2231 | 2019-11-07 22:49:11 -0500 | [diff] [blame] | 1125 | xas->xa_node = XAS_RESTART; |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1126 | if (node) { |
Matthew Wilcox (Oracle) | c36d451 | 2020-01-31 06:17:09 -0500 | [diff] [blame] | 1127 | unsigned long offset = xas->xa_offset; |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1128 | while (++offset < XA_CHUNK_SIZE) { |
| 1129 | if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) |
| 1130 | break; |
| 1131 | } |
| 1132 | xas->xa_index += (offset - xas->xa_offset) << node->shift; |
Matthew Wilcox (Oracle) | 82a2231 | 2019-11-07 22:49:11 -0500 | [diff] [blame] | 1133 | if (xas->xa_index == 0) |
| 1134 | xas->xa_node = XAS_BOUNDS; |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1135 | } else { |
| 1136 | xas->xa_index++; |
| 1137 | } |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1138 | } |
| 1139 | EXPORT_SYMBOL_GPL(xas_pause); |
| 1140 | |
Matthew Wilcox | 64d3e9a | 2017-12-01 00:06:52 -0500 | [diff] [blame] | 1141 | /* |
| 1142 | * __xas_prev() - Find the previous entry in the XArray. |
| 1143 | * @xas: XArray operation state. |
| 1144 | * |
| 1145 | * Helper function for xas_prev() which handles all the complex cases |
| 1146 | * out of line. |
| 1147 | */ |
| 1148 | void *__xas_prev(struct xa_state *xas) |
| 1149 | { |
| 1150 | void *entry; |
| 1151 | |
| 1152 | if (!xas_frozen(xas->xa_node)) |
| 1153 | xas->xa_index--; |
Matthew Wilcox (Oracle) | 91abab8 | 2019-07-01 17:03:29 -0400 | [diff] [blame] | 1154 | if (!xas->xa_node) |
| 1155 | return set_bounds(xas); |
Matthew Wilcox | 64d3e9a | 2017-12-01 00:06:52 -0500 | [diff] [blame] | 1156 | if (xas_not_node(xas->xa_node)) |
| 1157 | return xas_load(xas); |
| 1158 | |
| 1159 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) |
| 1160 | xas->xa_offset--; |
| 1161 | |
| 1162 | while (xas->xa_offset == 255) { |
| 1163 | xas->xa_offset = xas->xa_node->offset - 1; |
| 1164 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); |
| 1165 | if (!xas->xa_node) |
| 1166 | return set_bounds(xas); |
| 1167 | } |
| 1168 | |
| 1169 | for (;;) { |
| 1170 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); |
| 1171 | if (!xa_is_node(entry)) |
| 1172 | return entry; |
| 1173 | |
| 1174 | xas->xa_node = xa_to_node(entry); |
| 1175 | xas_set_offset(xas); |
| 1176 | } |
| 1177 | } |
| 1178 | EXPORT_SYMBOL_GPL(__xas_prev); |
| 1179 | |
| 1180 | /* |
| 1181 | * __xas_next() - Find the next entry in the XArray. |
| 1182 | * @xas: XArray operation state. |
| 1183 | * |
| 1184 | * Helper function for xas_next() which handles all the complex cases |
| 1185 | * out of line. |
| 1186 | */ |
| 1187 | void *__xas_next(struct xa_state *xas) |
| 1188 | { |
| 1189 | void *entry; |
| 1190 | |
| 1191 | if (!xas_frozen(xas->xa_node)) |
| 1192 | xas->xa_index++; |
Matthew Wilcox (Oracle) | 91abab8 | 2019-07-01 17:03:29 -0400 | [diff] [blame] | 1193 | if (!xas->xa_node) |
| 1194 | return set_bounds(xas); |
Matthew Wilcox | 64d3e9a | 2017-12-01 00:06:52 -0500 | [diff] [blame] | 1195 | if (xas_not_node(xas->xa_node)) |
| 1196 | return xas_load(xas); |
| 1197 | |
| 1198 | if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) |
| 1199 | xas->xa_offset++; |
| 1200 | |
| 1201 | while (xas->xa_offset == XA_CHUNK_SIZE) { |
| 1202 | xas->xa_offset = xas->xa_node->offset + 1; |
| 1203 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); |
| 1204 | if (!xas->xa_node) |
| 1205 | return set_bounds(xas); |
| 1206 | } |
| 1207 | |
| 1208 | for (;;) { |
| 1209 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); |
| 1210 | if (!xa_is_node(entry)) |
| 1211 | return entry; |
| 1212 | |
| 1213 | xas->xa_node = xa_to_node(entry); |
| 1214 | xas_set_offset(xas); |
| 1215 | } |
| 1216 | } |
| 1217 | EXPORT_SYMBOL_GPL(__xas_next); |
| 1218 | |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1219 | /** |
| 1220 | * xas_find() - Find the next present entry in the XArray. |
| 1221 | * @xas: XArray operation state. |
| 1222 | * @max: Highest index to return. |
| 1223 | * |
| 1224 | * If the @xas has not yet been walked to an entry, return the entry |
| 1225 | * which has an index >= xas.xa_index. If it has been walked, the entry |
| 1226 | * currently being pointed at has been processed, and so we move to the |
| 1227 | * next entry. |
| 1228 | * |
| 1229 | * If no entry is found and the array is smaller than @max, the iterator |
| 1230 | * is set to the smallest index not yet in the array. This allows @xas |
| 1231 | * to be immediately passed to xas_store(). |
| 1232 | * |
| 1233 | * Return: The entry, if found, otherwise %NULL. |
| 1234 | */ |
| 1235 | void *xas_find(struct xa_state *xas, unsigned long max) |
| 1236 | { |
| 1237 | void *entry; |
| 1238 | |
Matthew Wilcox (Oracle) | 82a2231 | 2019-11-07 22:49:11 -0500 | [diff] [blame] | 1239 | if (xas_error(xas) || xas->xa_node == XAS_BOUNDS) |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1240 | return NULL; |
Matthew Wilcox (Oracle) | c44aa5e | 2020-01-17 22:13:21 -0500 | [diff] [blame] | 1241 | if (xas->xa_index > max) |
| 1242 | return set_bounds(xas); |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1243 | |
| 1244 | if (!xas->xa_node) { |
| 1245 | xas->xa_index = 1; |
| 1246 | return set_bounds(xas); |
Matthew Wilcox (Oracle) | 82a2231 | 2019-11-07 22:49:11 -0500 | [diff] [blame] | 1247 | } else if (xas->xa_node == XAS_RESTART) { |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1248 | entry = xas_load(xas); |
| 1249 | if (entry || xas_not_node(xas->xa_node)) |
| 1250 | return entry; |
| 1251 | } else if (!xas->xa_node->shift && |
| 1252 | xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { |
| 1253 | xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; |
| 1254 | } |
| 1255 | |
| 1256 | xas_advance(xas); |
| 1257 | |
| 1258 | while (xas->xa_node && (xas->xa_index <= max)) { |
| 1259 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { |
| 1260 | xas->xa_offset = xas->xa_node->offset + 1; |
| 1261 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); |
| 1262 | continue; |
| 1263 | } |
| 1264 | |
| 1265 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); |
| 1266 | if (xa_is_node(entry)) { |
| 1267 | xas->xa_node = xa_to_node(entry); |
| 1268 | xas->xa_offset = 0; |
| 1269 | continue; |
| 1270 | } |
| 1271 | if (entry && !xa_is_sibling(entry)) |
| 1272 | return entry; |
| 1273 | |
| 1274 | xas_advance(xas); |
| 1275 | } |
| 1276 | |
| 1277 | if (!xas->xa_node) |
| 1278 | xas->xa_node = XAS_BOUNDS; |
| 1279 | return NULL; |
| 1280 | } |
| 1281 | EXPORT_SYMBOL_GPL(xas_find); |
| 1282 | |
| 1283 | /** |
| 1284 | * xas_find_marked() - Find the next marked entry in the XArray. |
| 1285 | * @xas: XArray operation state. |
| 1286 | * @max: Highest index to return. |
| 1287 | * @mark: Mark number to search for. |
| 1288 | * |
| 1289 | * If the @xas has not yet been walked to an entry, return the marked entry |
| 1290 | * which has an index >= xas.xa_index. If it has been walked, the entry |
| 1291 | * currently being pointed at has been processed, and so we return the |
| 1292 | * first marked entry with an index > xas.xa_index. |
| 1293 | * |
| 1294 | * If no marked entry is found and the array is smaller than @max, @xas is |
| 1295 | * set to the bounds state and xas->xa_index is set to the smallest index |
| 1296 | * not yet in the array. This allows @xas to be immediately passed to |
| 1297 | * xas_store(). |
| 1298 | * |
| 1299 | * If no entry is found before @max is reached, @xas is set to the restart |
| 1300 | * state. |
| 1301 | * |
| 1302 | * Return: The entry, if found, otherwise %NULL. |
| 1303 | */ |
| 1304 | void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) |
| 1305 | { |
| 1306 | bool advance = true; |
| 1307 | unsigned int offset; |
| 1308 | void *entry; |
| 1309 | |
| 1310 | if (xas_error(xas)) |
| 1311 | return NULL; |
Matthew Wilcox (Oracle) | c44aa5e | 2020-01-17 22:13:21 -0500 | [diff] [blame] | 1312 | if (xas->xa_index > max) |
| 1313 | goto max; |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1314 | |
| 1315 | if (!xas->xa_node) { |
| 1316 | xas->xa_index = 1; |
| 1317 | goto out; |
| 1318 | } else if (xas_top(xas->xa_node)) { |
| 1319 | advance = false; |
| 1320 | entry = xa_head(xas->xa); |
| 1321 | xas->xa_node = NULL; |
| 1322 | if (xas->xa_index > max_index(entry)) |
Matthew Wilcox | 4848361 | 2018-12-13 13:57:42 -0500 | [diff] [blame] | 1323 | goto out; |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1324 | if (!xa_is_node(entry)) { |
| 1325 | if (xa_marked(xas->xa, mark)) |
| 1326 | return entry; |
| 1327 | xas->xa_index = 1; |
| 1328 | goto out; |
| 1329 | } |
| 1330 | xas->xa_node = xa_to_node(entry); |
| 1331 | xas->xa_offset = xas->xa_index >> xas->xa_node->shift; |
| 1332 | } |
| 1333 | |
| 1334 | while (xas->xa_index <= max) { |
| 1335 | if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { |
| 1336 | xas->xa_offset = xas->xa_node->offset + 1; |
| 1337 | xas->xa_node = xa_parent(xas->xa, xas->xa_node); |
| 1338 | if (!xas->xa_node) |
| 1339 | break; |
| 1340 | advance = false; |
| 1341 | continue; |
| 1342 | } |
| 1343 | |
| 1344 | if (!advance) { |
| 1345 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); |
| 1346 | if (xa_is_sibling(entry)) { |
| 1347 | xas->xa_offset = xa_to_sibling(entry); |
| 1348 | xas_move_index(xas, xas->xa_offset); |
| 1349 | } |
| 1350 | } |
| 1351 | |
| 1352 | offset = xas_find_chunk(xas, advance, mark); |
| 1353 | if (offset > xas->xa_offset) { |
| 1354 | advance = false; |
| 1355 | xas_move_index(xas, offset); |
| 1356 | /* Mind the wrap */ |
| 1357 | if ((xas->xa_index - 1) >= max) |
| 1358 | goto max; |
| 1359 | xas->xa_offset = offset; |
| 1360 | if (offset == XA_CHUNK_SIZE) |
| 1361 | continue; |
| 1362 | } |
| 1363 | |
| 1364 | entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); |
Matthew Wilcox (Oracle) | 7e934cf | 2020-03-12 17:29:11 -0400 | [diff] [blame] | 1365 | if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) |
| 1366 | continue; |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1367 | if (!xa_is_node(entry)) |
| 1368 | return entry; |
| 1369 | xas->xa_node = xa_to_node(entry); |
| 1370 | xas_set_offset(xas); |
| 1371 | } |
| 1372 | |
| 1373 | out: |
Matthew Wilcox | 4848361 | 2018-12-13 13:57:42 -0500 | [diff] [blame] | 1374 | if (xas->xa_index > max) |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1375 | goto max; |
Matthew Wilcox | 4848361 | 2018-12-13 13:57:42 -0500 | [diff] [blame] | 1376 | return set_bounds(xas); |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1377 | max: |
| 1378 | xas->xa_node = XAS_RESTART; |
| 1379 | return NULL; |
| 1380 | } |
| 1381 | EXPORT_SYMBOL_GPL(xas_find_marked); |
| 1382 | |
| 1383 | /** |
Matthew Wilcox | 4e99d4e | 2018-06-01 22:46:02 -0400 | [diff] [blame] | 1384 | * xas_find_conflict() - Find the next present entry in a range. |
| 1385 | * @xas: XArray operation state. |
| 1386 | * |
| 1387 | * The @xas describes both a range and a position within that range. |
| 1388 | * |
| 1389 | * Context: Any context. Expects xa_lock to be held. |
| 1390 | * Return: The next entry in the range covered by @xas or %NULL. |
| 1391 | */ |
| 1392 | void *xas_find_conflict(struct xa_state *xas) |
| 1393 | { |
| 1394 | void *curr; |
| 1395 | |
| 1396 | if (xas_error(xas)) |
| 1397 | return NULL; |
| 1398 | |
| 1399 | if (!xas->xa_node) |
| 1400 | return NULL; |
| 1401 | |
| 1402 | if (xas_top(xas->xa_node)) { |
| 1403 | curr = xas_start(xas); |
| 1404 | if (!curr) |
| 1405 | return NULL; |
| 1406 | while (xa_is_node(curr)) { |
| 1407 | struct xa_node *node = xa_to_node(curr); |
| 1408 | curr = xas_descend(xas, node); |
| 1409 | } |
| 1410 | if (curr) |
| 1411 | return curr; |
| 1412 | } |
| 1413 | |
| 1414 | if (xas->xa_node->shift > xas->xa_shift) |
| 1415 | return NULL; |
| 1416 | |
| 1417 | for (;;) { |
| 1418 | if (xas->xa_node->shift == xas->xa_shift) { |
| 1419 | if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) |
| 1420 | break; |
| 1421 | } else if (xas->xa_offset == XA_CHUNK_MASK) { |
| 1422 | xas->xa_offset = xas->xa_node->offset; |
| 1423 | xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); |
| 1424 | if (!xas->xa_node) |
| 1425 | break; |
| 1426 | continue; |
| 1427 | } |
| 1428 | curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); |
| 1429 | if (xa_is_sibling(curr)) |
| 1430 | continue; |
| 1431 | while (xa_is_node(curr)) { |
| 1432 | xas->xa_node = xa_to_node(curr); |
| 1433 | xas->xa_offset = 0; |
| 1434 | curr = xa_entry_locked(xas->xa, xas->xa_node, 0); |
| 1435 | } |
| 1436 | if (curr) |
| 1437 | return curr; |
| 1438 | } |
| 1439 | xas->xa_offset -= xas->xa_sibs; |
| 1440 | return NULL; |
| 1441 | } |
| 1442 | EXPORT_SYMBOL_GPL(xas_find_conflict); |
| 1443 | |
| 1444 | /** |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 1445 | * xa_load() - Load an entry from an XArray. |
| 1446 | * @xa: XArray. |
| 1447 | * @index: index into array. |
| 1448 | * |
| 1449 | * Context: Any context. Takes and releases the RCU lock. |
| 1450 | * Return: The entry at @index in @xa. |
| 1451 | */ |
| 1452 | void *xa_load(struct xarray *xa, unsigned long index) |
| 1453 | { |
| 1454 | XA_STATE(xas, xa, index); |
| 1455 | void *entry; |
| 1456 | |
| 1457 | rcu_read_lock(); |
| 1458 | do { |
| 1459 | entry = xas_load(&xas); |
Matthew Wilcox | 9f14d4f | 2018-10-01 14:54:59 -0400 | [diff] [blame] | 1460 | if (xa_is_zero(entry)) |
| 1461 | entry = NULL; |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 1462 | } while (xas_retry(&xas, entry)); |
| 1463 | rcu_read_unlock(); |
| 1464 | |
| 1465 | return entry; |
| 1466 | } |
| 1467 | EXPORT_SYMBOL(xa_load); |
| 1468 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1469 | static void *xas_result(struct xa_state *xas, void *curr) |
| 1470 | { |
Matthew Wilcox | 9f14d4f | 2018-10-01 14:54:59 -0400 | [diff] [blame] | 1471 | if (xa_is_zero(curr)) |
| 1472 | return NULL; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1473 | if (xas_error(xas)) |
| 1474 | curr = xas->xa_node; |
| 1475 | return curr; |
| 1476 | } |
| 1477 | |
| 1478 | /** |
| 1479 | * __xa_erase() - Erase this entry from the XArray while locked. |
| 1480 | * @xa: XArray. |
| 1481 | * @index: Index into array. |
| 1482 | * |
Matthew Wilcox | 809ab93 | 2019-01-26 00:52:26 -0500 | [diff] [blame] | 1483 | * After this function returns, loading from @index will return %NULL. |
| 1484 | * If the index is part of a multi-index entry, all indices will be erased |
| 1485 | * and none of the entries will be part of a multi-index entry. |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1486 | * |
Matthew Wilcox | 809ab93 | 2019-01-26 00:52:26 -0500 | [diff] [blame] | 1487 | * Context: Any context. Expects xa_lock to be held on entry. |
| 1488 | * Return: The entry which used to be at this index. |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1489 | */ |
| 1490 | void *__xa_erase(struct xarray *xa, unsigned long index) |
| 1491 | { |
| 1492 | XA_STATE(xas, xa, index); |
| 1493 | return xas_result(&xas, xas_store(&xas, NULL)); |
| 1494 | } |
Matthew Wilcox | 9ee5a3b | 2018-11-01 22:52:06 -0400 | [diff] [blame] | 1495 | EXPORT_SYMBOL(__xa_erase); |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1496 | |
| 1497 | /** |
Matthew Wilcox | 9c16bb8 | 2018-11-05 15:48:49 -0500 | [diff] [blame] | 1498 | * xa_erase() - Erase this entry from the XArray. |
| 1499 | * @xa: XArray. |
| 1500 | * @index: Index of entry. |
| 1501 | * |
Matthew Wilcox | 809ab93 | 2019-01-26 00:52:26 -0500 | [diff] [blame] | 1502 | * After this function returns, loading from @index will return %NULL. |
| 1503 | * If the index is part of a multi-index entry, all indices will be erased |
| 1504 | * and none of the entries will be part of a multi-index entry. |
Matthew Wilcox | 9c16bb8 | 2018-11-05 15:48:49 -0500 | [diff] [blame] | 1505 | * |
| 1506 | * Context: Any context. Takes and releases the xa_lock. |
| 1507 | * Return: The entry which used to be at this index. |
| 1508 | */ |
| 1509 | void *xa_erase(struct xarray *xa, unsigned long index) |
| 1510 | { |
| 1511 | void *entry; |
| 1512 | |
| 1513 | xa_lock(xa); |
| 1514 | entry = __xa_erase(xa, index); |
| 1515 | xa_unlock(xa); |
| 1516 | |
| 1517 | return entry; |
| 1518 | } |
| 1519 | EXPORT_SYMBOL(xa_erase); |
| 1520 | |
| 1521 | /** |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1522 | * __xa_store() - Store this entry in the XArray. |
| 1523 | * @xa: XArray. |
| 1524 | * @index: Index into array. |
| 1525 | * @entry: New entry. |
| 1526 | * @gfp: Memory allocation flags. |
| 1527 | * |
| 1528 | * You must already be holding the xa_lock when calling this function. |
| 1529 | * It will drop the lock if needed to allocate memory, and then reacquire |
| 1530 | * it afterwards. |
| 1531 | * |
| 1532 | * Context: Any context. Expects xa_lock to be held on entry. May |
| 1533 | * release and reacquire xa_lock if @gfp flags permit. |
| 1534 | * Return: The old entry at this index or xa_err() if an error happened. |
| 1535 | */ |
| 1536 | void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) |
| 1537 | { |
| 1538 | XA_STATE(xas, xa, index); |
| 1539 | void *curr; |
| 1540 | |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 1541 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1542 | return XA_ERROR(-EINVAL); |
Matthew Wilcox | d9c4804 | 2018-11-05 16:15:56 -0500 | [diff] [blame] | 1543 | if (xa_track_free(xa) && !entry) |
| 1544 | entry = XA_ZERO_ENTRY; |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1545 | |
| 1546 | do { |
| 1547 | curr = xas_store(&xas, entry); |
Matthew Wilcox | d9c4804 | 2018-11-05 16:15:56 -0500 | [diff] [blame] | 1548 | if (xa_track_free(xa)) |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1549 | xas_clear_mark(&xas, XA_FREE_MARK); |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 1550 | } while (__xas_nomem(&xas, gfp)); |
| 1551 | |
| 1552 | return xas_result(&xas, curr); |
| 1553 | } |
| 1554 | EXPORT_SYMBOL(__xa_store); |
| 1555 | |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 1556 | /** |
Matthew Wilcox | 611f318 | 2018-11-05 15:56:17 -0500 | [diff] [blame] | 1557 | * xa_store() - Store this entry in the XArray. |
| 1558 | * @xa: XArray. |
| 1559 | * @index: Index into array. |
| 1560 | * @entry: New entry. |
| 1561 | * @gfp: Memory allocation flags. |
| 1562 | * |
| 1563 | * After this function returns, loads from this index will return @entry. |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 1564 | * Storing into an existing multi-index entry updates the entry of every index. |
Matthew Wilcox | 611f318 | 2018-11-05 15:56:17 -0500 | [diff] [blame] | 1565 | * The marks associated with @index are unaffected unless @entry is %NULL. |
| 1566 | * |
| 1567 | * Context: Any context. Takes and releases the xa_lock. |
| 1568 | * May sleep if the @gfp flags permit. |
| 1569 | * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry |
| 1570 | * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation |
| 1571 | * failed. |
| 1572 | */ |
| 1573 | void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) |
| 1574 | { |
| 1575 | void *curr; |
| 1576 | |
| 1577 | xa_lock(xa); |
| 1578 | curr = __xa_store(xa, index, entry, gfp); |
| 1579 | xa_unlock(xa); |
| 1580 | |
| 1581 | return curr; |
| 1582 | } |
| 1583 | EXPORT_SYMBOL(xa_store); |
| 1584 | |
| 1585 | /** |
Matthew Wilcox | 41aec91 | 2017-11-10 15:34:55 -0500 | [diff] [blame] | 1586 | * __xa_cmpxchg() - Store this entry in the XArray. |
| 1587 | * @xa: XArray. |
| 1588 | * @index: Index into array. |
| 1589 | * @old: Old value to test against. |
| 1590 | * @entry: New entry. |
| 1591 | * @gfp: Memory allocation flags. |
| 1592 | * |
| 1593 | * You must already be holding the xa_lock when calling this function. |
| 1594 | * It will drop the lock if needed to allocate memory, and then reacquire |
| 1595 | * it afterwards. |
| 1596 | * |
| 1597 | * Context: Any context. Expects xa_lock to be held on entry. May |
| 1598 | * release and reacquire xa_lock if @gfp flags permit. |
| 1599 | * Return: The old entry at this index or xa_err() if an error happened. |
| 1600 | */ |
| 1601 | void *__xa_cmpxchg(struct xarray *xa, unsigned long index, |
| 1602 | void *old, void *entry, gfp_t gfp) |
| 1603 | { |
| 1604 | XA_STATE(xas, xa, index); |
| 1605 | void *curr; |
| 1606 | |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 1607 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
Matthew Wilcox | 41aec91 | 2017-11-10 15:34:55 -0500 | [diff] [blame] | 1608 | return XA_ERROR(-EINVAL); |
| 1609 | |
| 1610 | do { |
| 1611 | curr = xas_load(&xas); |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1612 | if (curr == old) { |
Matthew Wilcox | 41aec91 | 2017-11-10 15:34:55 -0500 | [diff] [blame] | 1613 | xas_store(&xas, entry); |
Matthew Wilcox | b38f6c5 | 2019-02-20 11:30:49 -0500 | [diff] [blame] | 1614 | if (xa_track_free(xa) && entry && !curr) |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1615 | xas_clear_mark(&xas, XA_FREE_MARK); |
| 1616 | } |
Matthew Wilcox | 41aec91 | 2017-11-10 15:34:55 -0500 | [diff] [blame] | 1617 | } while (__xas_nomem(&xas, gfp)); |
| 1618 | |
| 1619 | return xas_result(&xas, curr); |
| 1620 | } |
| 1621 | EXPORT_SYMBOL(__xa_cmpxchg); |
| 1622 | |
| 1623 | /** |
Matthew Wilcox | b0606fe | 2019-01-02 13:57:03 -0500 | [diff] [blame] | 1624 | * __xa_insert() - Store this entry in the XArray if no entry is present. |
| 1625 | * @xa: XArray. |
| 1626 | * @index: Index into array. |
| 1627 | * @entry: New entry. |
| 1628 | * @gfp: Memory allocation flags. |
| 1629 | * |
| 1630 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
| 1631 | * if no entry is present. Inserting will fail if a reserved entry is |
| 1632 | * present, even though loading from this index will return NULL. |
| 1633 | * |
| 1634 | * Context: Any context. Expects xa_lock to be held on entry. May |
| 1635 | * release and reacquire xa_lock if @gfp flags permit. |
Matthew Wilcox | fd9dc93 | 2019-02-06 13:07:11 -0500 | [diff] [blame] | 1636 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
Matthew Wilcox | b0606fe | 2019-01-02 13:57:03 -0500 | [diff] [blame] | 1637 | * -ENOMEM if memory could not be allocated. |
| 1638 | */ |
| 1639 | int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) |
| 1640 | { |
| 1641 | XA_STATE(xas, xa, index); |
| 1642 | void *curr; |
| 1643 | |
| 1644 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
| 1645 | return -EINVAL; |
| 1646 | if (!entry) |
| 1647 | entry = XA_ZERO_ENTRY; |
| 1648 | |
| 1649 | do { |
| 1650 | curr = xas_load(&xas); |
| 1651 | if (!curr) { |
| 1652 | xas_store(&xas, entry); |
| 1653 | if (xa_track_free(xa)) |
| 1654 | xas_clear_mark(&xas, XA_FREE_MARK); |
| 1655 | } else { |
Matthew Wilcox | fd9dc93 | 2019-02-06 13:07:11 -0500 | [diff] [blame] | 1656 | xas_set_err(&xas, -EBUSY); |
Matthew Wilcox | b0606fe | 2019-01-02 13:57:03 -0500 | [diff] [blame] | 1657 | } |
| 1658 | } while (__xas_nomem(&xas, gfp)); |
| 1659 | |
| 1660 | return xas_error(&xas); |
| 1661 | } |
| 1662 | EXPORT_SYMBOL(__xa_insert); |
| 1663 | |
Matthew Wilcox | 0e9446c | 2018-08-15 14:13:29 -0400 | [diff] [blame] | 1664 | #ifdef CONFIG_XARRAY_MULTI |
| 1665 | static void xas_set_range(struct xa_state *xas, unsigned long first, |
| 1666 | unsigned long last) |
| 1667 | { |
| 1668 | unsigned int shift = 0; |
| 1669 | unsigned long sibs = last - first; |
| 1670 | unsigned int offset = XA_CHUNK_MASK; |
| 1671 | |
| 1672 | xas_set(xas, first); |
| 1673 | |
| 1674 | while ((first & XA_CHUNK_MASK) == 0) { |
| 1675 | if (sibs < XA_CHUNK_MASK) |
| 1676 | break; |
| 1677 | if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) |
| 1678 | break; |
| 1679 | shift += XA_CHUNK_SHIFT; |
| 1680 | if (offset == XA_CHUNK_MASK) |
| 1681 | offset = sibs & XA_CHUNK_MASK; |
| 1682 | sibs >>= XA_CHUNK_SHIFT; |
| 1683 | first >>= XA_CHUNK_SHIFT; |
| 1684 | } |
| 1685 | |
| 1686 | offset = first & XA_CHUNK_MASK; |
| 1687 | if (offset + sibs > XA_CHUNK_MASK) |
| 1688 | sibs = XA_CHUNK_MASK - offset; |
| 1689 | if ((((first + sibs + 1) << shift) - 1) > last) |
| 1690 | sibs -= 1; |
| 1691 | |
| 1692 | xas->xa_shift = shift; |
| 1693 | xas->xa_sibs = sibs; |
| 1694 | } |
| 1695 | |
| 1696 | /** |
| 1697 | * xa_store_range() - Store this entry at a range of indices in the XArray. |
| 1698 | * @xa: XArray. |
| 1699 | * @first: First index to affect. |
| 1700 | * @last: Last index to affect. |
| 1701 | * @entry: New entry. |
| 1702 | * @gfp: Memory allocation flags. |
| 1703 | * |
| 1704 | * After this function returns, loads from any index between @first and @last, |
| 1705 | * inclusive will return @entry. |
Matthew Wilcox (Oracle) | 8fc7564 | 2020-10-15 20:05:16 -0700 | [diff] [blame] | 1706 | * Storing into an existing multi-index entry updates the entry of every index. |
Matthew Wilcox | 0e9446c | 2018-08-15 14:13:29 -0400 | [diff] [blame] | 1707 | * The marks associated with @index are unaffected unless @entry is %NULL. |
| 1708 | * |
| 1709 | * Context: Process context. Takes and releases the xa_lock. May sleep |
| 1710 | * if the @gfp flags permit. |
| 1711 | * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in |
| 1712 | * an XArray, or xa_err(-ENOMEM) if memory allocation failed. |
| 1713 | */ |
| 1714 | void *xa_store_range(struct xarray *xa, unsigned long first, |
| 1715 | unsigned long last, void *entry, gfp_t gfp) |
| 1716 | { |
| 1717 | XA_STATE(xas, xa, 0); |
| 1718 | |
| 1719 | if (WARN_ON_ONCE(xa_is_internal(entry))) |
| 1720 | return XA_ERROR(-EINVAL); |
| 1721 | if (last < first) |
| 1722 | return XA_ERROR(-EINVAL); |
| 1723 | |
| 1724 | do { |
| 1725 | xas_lock(&xas); |
| 1726 | if (entry) { |
Matthew Wilcox | 44a4a66 | 2018-11-05 10:53:09 -0500 | [diff] [blame] | 1727 | unsigned int order = BITS_PER_LONG; |
| 1728 | if (last + 1) |
| 1729 | order = __ffs(last + 1); |
Matthew Wilcox | 0e9446c | 2018-08-15 14:13:29 -0400 | [diff] [blame] | 1730 | xas_set_order(&xas, last, order); |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 1731 | xas_create(&xas, true); |
Matthew Wilcox | 0e9446c | 2018-08-15 14:13:29 -0400 | [diff] [blame] | 1732 | if (xas_error(&xas)) |
| 1733 | goto unlock; |
| 1734 | } |
| 1735 | do { |
| 1736 | xas_set_range(&xas, first, last); |
| 1737 | xas_store(&xas, entry); |
| 1738 | if (xas_error(&xas)) |
| 1739 | goto unlock; |
| 1740 | first += xas_size(&xas); |
| 1741 | } while (first <= last); |
| 1742 | unlock: |
| 1743 | xas_unlock(&xas); |
| 1744 | } while (xas_nomem(&xas, gfp)); |
| 1745 | |
| 1746 | return xas_result(&xas, NULL); |
| 1747 | } |
| 1748 | EXPORT_SYMBOL(xa_store_range); |
Matthew Wilcox (Oracle) | 57417ce | 2020-10-15 20:05:13 -0700 | [diff] [blame] | 1749 | |
| 1750 | /** |
| 1751 | * xa_get_order() - Get the order of an entry. |
| 1752 | * @xa: XArray. |
| 1753 | * @index: Index of the entry. |
| 1754 | * |
| 1755 | * Return: A number between 0 and 63 indicating the order of the entry. |
| 1756 | */ |
| 1757 | int xa_get_order(struct xarray *xa, unsigned long index) |
| 1758 | { |
| 1759 | XA_STATE(xas, xa, index); |
| 1760 | void *entry; |
| 1761 | int order = 0; |
| 1762 | |
| 1763 | rcu_read_lock(); |
| 1764 | entry = xas_load(&xas); |
| 1765 | |
| 1766 | if (!entry) |
| 1767 | goto unlock; |
| 1768 | |
| 1769 | if (!xas.xa_node) |
| 1770 | goto unlock; |
| 1771 | |
| 1772 | for (;;) { |
| 1773 | unsigned int slot = xas.xa_offset + (1 << order); |
| 1774 | |
| 1775 | if (slot >= XA_CHUNK_SIZE) |
| 1776 | break; |
| 1777 | if (!xa_is_sibling(xas.xa_node->slots[slot])) |
| 1778 | break; |
| 1779 | order++; |
| 1780 | } |
| 1781 | |
| 1782 | order += xas.xa_node->shift; |
| 1783 | unlock: |
| 1784 | rcu_read_unlock(); |
| 1785 | |
| 1786 | return order; |
| 1787 | } |
| 1788 | EXPORT_SYMBOL(xa_get_order); |
Matthew Wilcox | 0e9446c | 2018-08-15 14:13:29 -0400 | [diff] [blame] | 1789 | #endif /* CONFIG_XARRAY_MULTI */ |
| 1790 | |
Matthew Wilcox | 9f14d4f | 2018-10-01 14:54:59 -0400 | [diff] [blame] | 1791 | /** |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1792 | * __xa_alloc() - Find somewhere to store this entry in the XArray. |
| 1793 | * @xa: XArray. |
| 1794 | * @id: Pointer to ID. |
Matthew Wilcox | a3e4d3f | 2018-12-31 10:41:01 -0500 | [diff] [blame] | 1795 | * @limit: Range for allocated ID. |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1796 | * @entry: New entry. |
| 1797 | * @gfp: Memory allocation flags. |
| 1798 | * |
Matthew Wilcox | a3e4d3f | 2018-12-31 10:41:01 -0500 | [diff] [blame] | 1799 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
| 1800 | * stores the index into the @id pointer, then stores the entry at |
| 1801 | * that index. A concurrent lookup will not see an uninitialised @id. |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1802 | * |
| 1803 | * Context: Any context. Expects xa_lock to be held on entry. May |
| 1804 | * release and reacquire xa_lock if @gfp flags permit. |
Matthew Wilcox | a3e4d3f | 2018-12-31 10:41:01 -0500 | [diff] [blame] | 1805 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
| 1806 | * -EBUSY if there are no free entries in @limit. |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1807 | */ |
Matthew Wilcox | a3e4d3f | 2018-12-31 10:41:01 -0500 | [diff] [blame] | 1808 | int __xa_alloc(struct xarray *xa, u32 *id, void *entry, |
| 1809 | struct xa_limit limit, gfp_t gfp) |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1810 | { |
| 1811 | XA_STATE(xas, xa, 0); |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1812 | |
Matthew Wilcox | 76b4e52 | 2018-12-28 23:20:44 -0500 | [diff] [blame] | 1813 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1814 | return -EINVAL; |
| 1815 | if (WARN_ON_ONCE(!xa_track_free(xa))) |
| 1816 | return -EINVAL; |
| 1817 | |
| 1818 | if (!entry) |
| 1819 | entry = XA_ZERO_ENTRY; |
| 1820 | |
| 1821 | do { |
Matthew Wilcox | a3e4d3f | 2018-12-31 10:41:01 -0500 | [diff] [blame] | 1822 | xas.xa_index = limit.min; |
| 1823 | xas_find_marked(&xas, limit.max, XA_FREE_MARK); |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1824 | if (xas.xa_node == XAS_RESTART) |
Matthew Wilcox | a3e4d3f | 2018-12-31 10:41:01 -0500 | [diff] [blame] | 1825 | xas_set_err(&xas, -EBUSY); |
| 1826 | else |
| 1827 | *id = xas.xa_index; |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1828 | xas_store(&xas, entry); |
| 1829 | xas_clear_mark(&xas, XA_FREE_MARK); |
| 1830 | } while (__xas_nomem(&xas, gfp)); |
| 1831 | |
Matthew Wilcox | a3e4d3f | 2018-12-31 10:41:01 -0500 | [diff] [blame] | 1832 | return xas_error(&xas); |
Matthew Wilcox | 371c752 | 2018-07-04 10:50:12 -0400 | [diff] [blame] | 1833 | } |
| 1834 | EXPORT_SYMBOL(__xa_alloc); |
| 1835 | |
| 1836 | /** |
Matthew Wilcox | 2fa044e | 2018-11-06 14:13:35 -0500 | [diff] [blame] | 1837 | * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. |
| 1838 | * @xa: XArray. |
| 1839 | * @id: Pointer to ID. |
| 1840 | * @entry: New entry. |
| 1841 | * @limit: Range of allocated ID. |
| 1842 | * @next: Pointer to next ID to allocate. |
| 1843 | * @gfp: Memory allocation flags. |
| 1844 | * |
| 1845 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
| 1846 | * stores the index into the @id pointer, then stores the entry at |
| 1847 | * that index. A concurrent lookup will not see an uninitialised @id. |
| 1848 | * The search for an empty entry will start at @next and will wrap |
| 1849 | * around if necessary. |
| 1850 | * |
| 1851 | * Context: Any context. Expects xa_lock to be held on entry. May |
| 1852 | * release and reacquire xa_lock if @gfp flags permit. |
| 1853 | * Return: 0 if the allocation succeeded without wrapping. 1 if the |
| 1854 | * allocation succeeded after wrapping, -ENOMEM if memory could not be |
| 1855 | * allocated or -EBUSY if there are no free entries in @limit. |
| 1856 | */ |
| 1857 | int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, |
| 1858 | struct xa_limit limit, u32 *next, gfp_t gfp) |
| 1859 | { |
| 1860 | u32 min = limit.min; |
| 1861 | int ret; |
| 1862 | |
| 1863 | limit.min = max(min, *next); |
| 1864 | ret = __xa_alloc(xa, id, entry, limit, gfp); |
| 1865 | if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { |
| 1866 | xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; |
| 1867 | ret = 1; |
| 1868 | } |
| 1869 | |
| 1870 | if (ret < 0 && limit.min > min) { |
| 1871 | limit.min = min; |
| 1872 | ret = __xa_alloc(xa, id, entry, limit, gfp); |
| 1873 | if (ret == 0) |
| 1874 | ret = 1; |
| 1875 | } |
| 1876 | |
| 1877 | if (ret >= 0) { |
| 1878 | *next = *id + 1; |
| 1879 | if (*next == 0) |
| 1880 | xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; |
| 1881 | } |
| 1882 | return ret; |
| 1883 | } |
| 1884 | EXPORT_SYMBOL(__xa_alloc_cyclic); |
| 1885 | |
| 1886 | /** |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 1887 | * __xa_set_mark() - Set this mark on this entry while locked. |
| 1888 | * @xa: XArray. |
| 1889 | * @index: Index of entry. |
| 1890 | * @mark: Mark number. |
| 1891 | * |
Matthew Wilcox | 804dfaf | 2018-11-05 16:37:15 -0500 | [diff] [blame] | 1892 | * Attempting to set a mark on a %NULL entry does not succeed. |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 1893 | * |
| 1894 | * Context: Any context. Expects xa_lock to be held on entry. |
| 1895 | */ |
| 1896 | void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) |
| 1897 | { |
| 1898 | XA_STATE(xas, xa, index); |
| 1899 | void *entry = xas_load(&xas); |
| 1900 | |
| 1901 | if (entry) |
| 1902 | xas_set_mark(&xas, mark); |
| 1903 | } |
Matthew Wilcox | 9ee5a3b | 2018-11-01 22:52:06 -0400 | [diff] [blame] | 1904 | EXPORT_SYMBOL(__xa_set_mark); |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 1905 | |
| 1906 | /** |
| 1907 | * __xa_clear_mark() - Clear this mark on this entry while locked. |
| 1908 | * @xa: XArray. |
| 1909 | * @index: Index of entry. |
| 1910 | * @mark: Mark number. |
| 1911 | * |
| 1912 | * Context: Any context. Expects xa_lock to be held on entry. |
| 1913 | */ |
| 1914 | void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) |
| 1915 | { |
| 1916 | XA_STATE(xas, xa, index); |
| 1917 | void *entry = xas_load(&xas); |
| 1918 | |
| 1919 | if (entry) |
| 1920 | xas_clear_mark(&xas, mark); |
| 1921 | } |
Matthew Wilcox | 9ee5a3b | 2018-11-01 22:52:06 -0400 | [diff] [blame] | 1922 | EXPORT_SYMBOL(__xa_clear_mark); |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 1923 | |
| 1924 | /** |
| 1925 | * xa_get_mark() - Inquire whether this mark is set on this entry. |
| 1926 | * @xa: XArray. |
| 1927 | * @index: Index of entry. |
| 1928 | * @mark: Mark number. |
| 1929 | * |
| 1930 | * This function uses the RCU read lock, so the result may be out of date |
| 1931 | * by the time it returns. If you need the result to be stable, use a lock. |
| 1932 | * |
| 1933 | * Context: Any context. Takes and releases the RCU lock. |
| 1934 | * Return: True if the entry at @index has this mark set, false if it doesn't. |
| 1935 | */ |
| 1936 | bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) |
| 1937 | { |
| 1938 | XA_STATE(xas, xa, index); |
| 1939 | void *entry; |
| 1940 | |
| 1941 | rcu_read_lock(); |
| 1942 | entry = xas_start(&xas); |
| 1943 | while (xas_get_mark(&xas, mark)) { |
| 1944 | if (!xa_is_node(entry)) |
| 1945 | goto found; |
| 1946 | entry = xas_descend(&xas, xa_to_node(entry)); |
| 1947 | } |
| 1948 | rcu_read_unlock(); |
| 1949 | return false; |
| 1950 | found: |
| 1951 | rcu_read_unlock(); |
| 1952 | return true; |
| 1953 | } |
| 1954 | EXPORT_SYMBOL(xa_get_mark); |
| 1955 | |
| 1956 | /** |
| 1957 | * xa_set_mark() - Set this mark on this entry. |
| 1958 | * @xa: XArray. |
| 1959 | * @index: Index of entry. |
| 1960 | * @mark: Mark number. |
| 1961 | * |
Matthew Wilcox | 804dfaf | 2018-11-05 16:37:15 -0500 | [diff] [blame] | 1962 | * Attempting to set a mark on a %NULL entry does not succeed. |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 1963 | * |
| 1964 | * Context: Process context. Takes and releases the xa_lock. |
| 1965 | */ |
| 1966 | void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) |
| 1967 | { |
| 1968 | xa_lock(xa); |
| 1969 | __xa_set_mark(xa, index, mark); |
| 1970 | xa_unlock(xa); |
| 1971 | } |
| 1972 | EXPORT_SYMBOL(xa_set_mark); |
| 1973 | |
| 1974 | /** |
| 1975 | * xa_clear_mark() - Clear this mark on this entry. |
| 1976 | * @xa: XArray. |
| 1977 | * @index: Index of entry. |
| 1978 | * @mark: Mark number. |
| 1979 | * |
| 1980 | * Clearing a mark always succeeds. |
| 1981 | * |
| 1982 | * Context: Process context. Takes and releases the xa_lock. |
| 1983 | */ |
| 1984 | void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) |
| 1985 | { |
| 1986 | xa_lock(xa); |
| 1987 | __xa_clear_mark(xa, index, mark); |
| 1988 | xa_unlock(xa); |
| 1989 | } |
| 1990 | EXPORT_SYMBOL(xa_clear_mark); |
| 1991 | |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 1992 | /** |
| 1993 | * xa_find() - Search the XArray for an entry. |
| 1994 | * @xa: XArray. |
| 1995 | * @indexp: Pointer to an index. |
| 1996 | * @max: Maximum index to search to. |
| 1997 | * @filter: Selection criterion. |
| 1998 | * |
| 1999 | * Finds the entry in @xa which matches the @filter, and has the lowest |
| 2000 | * index that is at least @indexp and no more than @max. |
| 2001 | * If an entry is found, @indexp is updated to be the index of the entry. |
| 2002 | * This function is protected by the RCU read lock, so it may not find |
| 2003 | * entries which are being simultaneously added. It will not return an |
| 2004 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). |
| 2005 | * |
| 2006 | * Context: Any context. Takes and releases the RCU lock. |
| 2007 | * Return: The entry, if found, otherwise %NULL. |
| 2008 | */ |
| 2009 | void *xa_find(struct xarray *xa, unsigned long *indexp, |
| 2010 | unsigned long max, xa_mark_t filter) |
| 2011 | { |
| 2012 | XA_STATE(xas, xa, *indexp); |
| 2013 | void *entry; |
| 2014 | |
| 2015 | rcu_read_lock(); |
| 2016 | do { |
| 2017 | if ((__force unsigned int)filter < XA_MAX_MARKS) |
| 2018 | entry = xas_find_marked(&xas, max, filter); |
| 2019 | else |
| 2020 | entry = xas_find(&xas, max); |
| 2021 | } while (xas_retry(&xas, entry)); |
| 2022 | rcu_read_unlock(); |
| 2023 | |
| 2024 | if (entry) |
| 2025 | *indexp = xas.xa_index; |
| 2026 | return entry; |
| 2027 | } |
| 2028 | EXPORT_SYMBOL(xa_find); |
| 2029 | |
Matthew Wilcox (Oracle) | 19c30f4 | 2020-01-17 22:00:41 -0500 | [diff] [blame] | 2030 | static bool xas_sibling(struct xa_state *xas) |
| 2031 | { |
| 2032 | struct xa_node *node = xas->xa_node; |
| 2033 | unsigned long mask; |
| 2034 | |
Matthew Wilcox (Oracle) | d8e93e3 | 2020-02-27 07:37:40 -0500 | [diff] [blame] | 2035 | if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node) |
Matthew Wilcox (Oracle) | 19c30f4 | 2020-01-17 22:00:41 -0500 | [diff] [blame] | 2036 | return false; |
| 2037 | mask = (XA_CHUNK_SIZE << node->shift) - 1; |
Matthew Wilcox (Oracle) | bd40b17 | 2020-01-31 05:07:55 -0500 | [diff] [blame] | 2038 | return (xas->xa_index & mask) > |
| 2039 | ((unsigned long)xas->xa_offset << node->shift); |
Matthew Wilcox (Oracle) | 19c30f4 | 2020-01-17 22:00:41 -0500 | [diff] [blame] | 2040 | } |
| 2041 | |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 2042 | /** |
| 2043 | * xa_find_after() - Search the XArray for a present entry. |
| 2044 | * @xa: XArray. |
| 2045 | * @indexp: Pointer to an index. |
| 2046 | * @max: Maximum index to search to. |
| 2047 | * @filter: Selection criterion. |
| 2048 | * |
| 2049 | * Finds the entry in @xa which matches the @filter and has the lowest |
| 2050 | * index that is above @indexp and no more than @max. |
| 2051 | * If an entry is found, @indexp is updated to be the index of the entry. |
| 2052 | * This function is protected by the RCU read lock, so it may miss entries |
| 2053 | * which are being simultaneously added. It will not return an |
| 2054 | * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). |
| 2055 | * |
| 2056 | * Context: Any context. Takes and releases the RCU lock. |
| 2057 | * Return: The pointer, if found, otherwise %NULL. |
| 2058 | */ |
| 2059 | void *xa_find_after(struct xarray *xa, unsigned long *indexp, |
| 2060 | unsigned long max, xa_mark_t filter) |
| 2061 | { |
| 2062 | XA_STATE(xas, xa, *indexp + 1); |
| 2063 | void *entry; |
| 2064 | |
Matthew Wilcox (Oracle) | 430f24f | 2020-01-17 17:45:12 -0500 | [diff] [blame] | 2065 | if (xas.xa_index == 0) |
| 2066 | return NULL; |
| 2067 | |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 2068 | rcu_read_lock(); |
| 2069 | for (;;) { |
| 2070 | if ((__force unsigned int)filter < XA_MAX_MARKS) |
| 2071 | entry = xas_find_marked(&xas, max, filter); |
| 2072 | else |
| 2073 | entry = xas_find(&xas, max); |
Matthew Wilcox (Oracle) | c44aa5e | 2020-01-17 22:13:21 -0500 | [diff] [blame] | 2074 | |
| 2075 | if (xas_invalid(&xas)) |
Matthew Wilcox | 8229706e | 2018-11-01 16:55:19 -0400 | [diff] [blame] | 2076 | break; |
Matthew Wilcox (Oracle) | 19c30f4 | 2020-01-17 22:00:41 -0500 | [diff] [blame] | 2077 | if (xas_sibling(&xas)) |
| 2078 | continue; |
Matthew Wilcox | b803b42 | 2017-11-14 08:30:11 -0500 | [diff] [blame] | 2079 | if (!xas_retry(&xas, entry)) |
| 2080 | break; |
| 2081 | } |
| 2082 | rcu_read_unlock(); |
| 2083 | |
| 2084 | if (entry) |
| 2085 | *indexp = xas.xa_index; |
| 2086 | return entry; |
| 2087 | } |
| 2088 | EXPORT_SYMBOL(xa_find_after); |
| 2089 | |
Matthew Wilcox | 80a0a1a | 2017-11-14 16:42:22 -0500 | [diff] [blame] | 2090 | static unsigned int xas_extract_present(struct xa_state *xas, void **dst, |
| 2091 | unsigned long max, unsigned int n) |
| 2092 | { |
| 2093 | void *entry; |
| 2094 | unsigned int i = 0; |
| 2095 | |
| 2096 | rcu_read_lock(); |
| 2097 | xas_for_each(xas, entry, max) { |
| 2098 | if (xas_retry(xas, entry)) |
| 2099 | continue; |
| 2100 | dst[i++] = entry; |
| 2101 | if (i == n) |
| 2102 | break; |
| 2103 | } |
| 2104 | rcu_read_unlock(); |
| 2105 | |
| 2106 | return i; |
| 2107 | } |
| 2108 | |
| 2109 | static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, |
| 2110 | unsigned long max, unsigned int n, xa_mark_t mark) |
| 2111 | { |
| 2112 | void *entry; |
| 2113 | unsigned int i = 0; |
| 2114 | |
| 2115 | rcu_read_lock(); |
| 2116 | xas_for_each_marked(xas, entry, max, mark) { |
| 2117 | if (xas_retry(xas, entry)) |
| 2118 | continue; |
| 2119 | dst[i++] = entry; |
| 2120 | if (i == n) |
| 2121 | break; |
| 2122 | } |
| 2123 | rcu_read_unlock(); |
| 2124 | |
| 2125 | return i; |
| 2126 | } |
| 2127 | |
| 2128 | /** |
| 2129 | * xa_extract() - Copy selected entries from the XArray into a normal array. |
| 2130 | * @xa: The source XArray to copy from. |
| 2131 | * @dst: The buffer to copy entries into. |
| 2132 | * @start: The first index in the XArray eligible to be selected. |
| 2133 | * @max: The last index in the XArray eligible to be selected. |
| 2134 | * @n: The maximum number of entries to copy. |
| 2135 | * @filter: Selection criterion. |
| 2136 | * |
| 2137 | * Copies up to @n entries that match @filter from the XArray. The |
| 2138 | * copied entries will have indices between @start and @max, inclusive. |
| 2139 | * |
| 2140 | * The @filter may be an XArray mark value, in which case entries which are |
| 2141 | * marked with that mark will be copied. It may also be %XA_PRESENT, in |
Matthew Wilcox | 804dfaf | 2018-11-05 16:37:15 -0500 | [diff] [blame] | 2142 | * which case all entries which are not %NULL will be copied. |
Matthew Wilcox | 80a0a1a | 2017-11-14 16:42:22 -0500 | [diff] [blame] | 2143 | * |
| 2144 | * The entries returned may not represent a snapshot of the XArray at a |
| 2145 | * moment in time. For example, if another thread stores to index 5, then |
| 2146 | * index 10, calling xa_extract() may return the old contents of index 5 |
| 2147 | * and the new contents of index 10. Indices not modified while this |
| 2148 | * function is running will not be skipped. |
| 2149 | * |
| 2150 | * If you need stronger guarantees, holding the xa_lock across calls to this |
| 2151 | * function will prevent concurrent modification. |
| 2152 | * |
| 2153 | * Context: Any context. Takes and releases the RCU lock. |
| 2154 | * Return: The number of entries copied. |
| 2155 | */ |
| 2156 | unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, |
| 2157 | unsigned long max, unsigned int n, xa_mark_t filter) |
| 2158 | { |
| 2159 | XA_STATE(xas, xa, start); |
| 2160 | |
| 2161 | if (!n) |
| 2162 | return 0; |
| 2163 | |
| 2164 | if ((__force unsigned int)filter < XA_MAX_MARKS) |
| 2165 | return xas_extract_marked(&xas, dst, max, n, filter); |
| 2166 | return xas_extract_present(&xas, dst, max, n); |
| 2167 | } |
| 2168 | EXPORT_SYMBOL(xa_extract); |
| 2169 | |
Matthew Wilcox | 687149f | 2017-11-17 08:16:34 -0500 | [diff] [blame] | 2170 | /** |
Matthew Wilcox (Oracle) | f82cd2f | 2020-08-18 09:05:56 -0400 | [diff] [blame] | 2171 | * xa_delete_node() - Private interface for workingset code. |
| 2172 | * @node: Node to be removed from the tree. |
| 2173 | * @update: Function to call to update ancestor nodes. |
| 2174 | * |
| 2175 | * Context: xa_lock must be held on entry and will not be released. |
| 2176 | */ |
| 2177 | void xa_delete_node(struct xa_node *node, xa_update_node_t update) |
| 2178 | { |
| 2179 | struct xa_state xas = { |
| 2180 | .xa = node->array, |
| 2181 | .xa_index = (unsigned long)node->offset << |
| 2182 | (node->shift + XA_CHUNK_SHIFT), |
| 2183 | .xa_shift = node->shift + XA_CHUNK_SHIFT, |
| 2184 | .xa_offset = node->offset, |
| 2185 | .xa_node = xa_parent_locked(node->array, node), |
| 2186 | .xa_update = update, |
| 2187 | }; |
| 2188 | |
| 2189 | xas_store(&xas, NULL); |
| 2190 | } |
| 2191 | EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */ |
| 2192 | |
| 2193 | /** |
Matthew Wilcox | 687149f | 2017-11-17 08:16:34 -0500 | [diff] [blame] | 2194 | * xa_destroy() - Free all internal data structures. |
| 2195 | * @xa: XArray. |
| 2196 | * |
| 2197 | * After calling this function, the XArray is empty and has freed all memory |
| 2198 | * allocated for its internal data structures. You are responsible for |
| 2199 | * freeing the objects referenced by the XArray. |
| 2200 | * |
| 2201 | * Context: Any context. Takes and releases the xa_lock, interrupt-safe. |
| 2202 | */ |
| 2203 | void xa_destroy(struct xarray *xa) |
| 2204 | { |
| 2205 | XA_STATE(xas, xa, 0); |
| 2206 | unsigned long flags; |
| 2207 | void *entry; |
| 2208 | |
| 2209 | xas.xa_node = NULL; |
| 2210 | xas_lock_irqsave(&xas, flags); |
| 2211 | entry = xa_head_locked(xa); |
| 2212 | RCU_INIT_POINTER(xa->xa_head, NULL); |
| 2213 | xas_init_marks(&xas); |
Matthew Wilcox | 3ccaf57 | 2018-10-26 14:43:22 -0400 | [diff] [blame] | 2214 | if (xa_zero_busy(xa)) |
| 2215 | xa_mark_clear(xa, XA_FREE_MARK); |
Matthew Wilcox | 687149f | 2017-11-17 08:16:34 -0500 | [diff] [blame] | 2216 | /* lockdep checks we're still holding the lock in xas_free_nodes() */ |
| 2217 | if (xa_is_node(entry)) |
| 2218 | xas_free_nodes(&xas, xa_to_node(entry)); |
| 2219 | xas_unlock_irqrestore(&xas, flags); |
| 2220 | } |
| 2221 | EXPORT_SYMBOL(xa_destroy); |
| 2222 | |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 2223 | #ifdef XA_DEBUG |
| 2224 | void xa_dump_node(const struct xa_node *node) |
| 2225 | { |
| 2226 | unsigned i, j; |
| 2227 | |
| 2228 | if (!node) |
| 2229 | return; |
| 2230 | if ((unsigned long)node & 3) { |
| 2231 | pr_cont("node %px\n", node); |
| 2232 | return; |
| 2233 | } |
| 2234 | |
| 2235 | pr_cont("node %px %s %d parent %px shift %d count %d values %d " |
| 2236 | "array %px list %px %px marks", |
| 2237 | node, node->parent ? "offset" : "max", node->offset, |
| 2238 | node->parent, node->shift, node->count, node->nr_values, |
| 2239 | node->array, node->private_list.prev, node->private_list.next); |
| 2240 | for (i = 0; i < XA_MAX_MARKS; i++) |
| 2241 | for (j = 0; j < XA_MARK_LONGS; j++) |
| 2242 | pr_cont(" %lx", node->marks[i][j]); |
| 2243 | pr_cont("\n"); |
| 2244 | } |
| 2245 | |
| 2246 | void xa_dump_index(unsigned long index, unsigned int shift) |
| 2247 | { |
| 2248 | if (!shift) |
| 2249 | pr_info("%lu: ", index); |
| 2250 | else if (shift >= BITS_PER_LONG) |
| 2251 | pr_info("0-%lu: ", ~0UL); |
| 2252 | else |
| 2253 | pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); |
| 2254 | } |
| 2255 | |
| 2256 | void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) |
| 2257 | { |
| 2258 | if (!entry) |
| 2259 | return; |
| 2260 | |
| 2261 | xa_dump_index(index, shift); |
| 2262 | |
| 2263 | if (xa_is_node(entry)) { |
| 2264 | if (shift == 0) { |
| 2265 | pr_cont("%px\n", entry); |
| 2266 | } else { |
| 2267 | unsigned long i; |
| 2268 | struct xa_node *node = xa_to_node(entry); |
| 2269 | xa_dump_node(node); |
| 2270 | for (i = 0; i < XA_CHUNK_SIZE; i++) |
| 2271 | xa_dump_entry(node->slots[i], |
| 2272 | index + (i << node->shift), node->shift); |
| 2273 | } |
| 2274 | } else if (xa_is_value(entry)) |
| 2275 | pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), |
| 2276 | xa_to_value(entry), entry); |
| 2277 | else if (!xa_is_internal(entry)) |
| 2278 | pr_cont("%px\n", entry); |
| 2279 | else if (xa_is_retry(entry)) |
| 2280 | pr_cont("retry (%ld)\n", xa_to_internal(entry)); |
| 2281 | else if (xa_is_sibling(entry)) |
| 2282 | pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); |
Matthew Wilcox | 9f14d4f | 2018-10-01 14:54:59 -0400 | [diff] [blame] | 2283 | else if (xa_is_zero(entry)) |
| 2284 | pr_cont("zero (%ld)\n", xa_to_internal(entry)); |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 2285 | else |
| 2286 | pr_cont("UNKNOWN ENTRY (%px)\n", entry); |
| 2287 | } |
| 2288 | |
| 2289 | void xa_dump(const struct xarray *xa) |
| 2290 | { |
| 2291 | void *entry = xa->xa_head; |
| 2292 | unsigned int shift = 0; |
| 2293 | |
| 2294 | pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, |
Matthew Wilcox | 9b89a03 | 2017-11-10 09:34:31 -0500 | [diff] [blame] | 2295 | xa->xa_flags, xa_marked(xa, XA_MARK_0), |
| 2296 | xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); |
Matthew Wilcox | ad3d6c7 | 2017-11-07 14:57:46 -0500 | [diff] [blame] | 2297 | if (xa_is_node(entry)) |
| 2298 | shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; |
| 2299 | xa_dump_entry(entry, 0, shift); |
| 2300 | } |
| 2301 | #endif |