Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2001 Momchil Velikov |
| 3 | * Portions Copyright (C) 2001 Christoph Hellwig |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 5 | * Copyright (C) 2006 Nick Piggin |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
Matthew Wilcox | 6b053b8 | 2016-05-20 17:02:58 -0700 | [diff] [blame] | 7 | * Copyright (C) 2016 Intel, Matthew Wilcox |
| 8 | * Copyright (C) 2016 Intel, Ross Zwisler |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License as |
| 12 | * published by the Free Software Foundation; either version 2, or (at |
| 13 | * your option) any later version. |
| 14 | * |
| 15 | * This program is distributed in the hope that it will be useful, but |
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 18 | * General Public License for more details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with this program; if not, write to the Free Software |
| 22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | */ |
| 24 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 25 | #include <linux/bitmap.h> |
| 26 | #include <linux/bitops.h> |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 27 | #include <linux/bug.h> |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 28 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/errno.h> |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 30 | #include <linux/export.h> |
| 31 | #include <linux/idr.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <linux/init.h> |
| 33 | #include <linux/kernel.h> |
Catalin Marinas | ce80b06 | 2014-06-06 14:38:18 -0700 | [diff] [blame] | 34 | #include <linux/kmemleak.h> |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 35 | #include <linux/percpu.h> |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 36 | #include <linux/preempt.h> /* in_interrupt() */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 37 | #include <linux/radix-tree.h> |
| 38 | #include <linux/rcupdate.h> |
| 39 | #include <linux/slab.h> |
| 40 | #include <linux/string.h> |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 41 | #include <linux/xarray.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
| 43 | |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 44 | /* Number of nodes in fully populated tree of given height */ |
| 45 | static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; |
| 46 | |
Jeff Moyer | 26fb158 | 2007-10-16 01:24:49 -0700 | [diff] [blame] | 47 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | * Radix tree node cache. |
| 49 | */ |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 50 | struct kmem_cache *radix_tree_node_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
| 52 | /* |
Nick Piggin | 5536805 | 2012-05-29 15:07:34 -0700 | [diff] [blame] | 53 | * The radix tree is variable-height, so an insert operation not only has |
| 54 | * to build the branch to its corresponding item, it also has to build the |
| 55 | * branch to existing items if the size has to be increased (by |
| 56 | * radix_tree_extend). |
| 57 | * |
| 58 | * The worst case is a zero height tree with just a single item at index 0, |
| 59 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches |
| 60 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. |
| 61 | * Hence: |
| 62 | */ |
| 63 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) |
| 64 | |
| 65 | /* |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 66 | * The IDR does not have to be as high as the radix tree since it uses |
| 67 | * signed integers, not unsigned longs. |
| 68 | */ |
| 69 | #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) |
| 70 | #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ |
| 71 | RADIX_TREE_MAP_SHIFT)) |
| 72 | #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) |
| 73 | |
| 74 | /* |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 75 | * The IDA is even shorter since it uses a bitmap at the last level. |
| 76 | */ |
| 77 | #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS)) |
| 78 | #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \ |
| 79 | RADIX_TREE_MAP_SHIFT)) |
| 80 | #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1) |
| 81 | |
| 82 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | * Per-cpu pool of preloaded nodes |
| 84 | */ |
| 85 | struct radix_tree_preload { |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 86 | unsigned nr; |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame] | 87 | /* nodes->parent points to next preallocated node */ |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 88 | struct radix_tree_node *nodes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | }; |
Harvey Harrison | 8cef7d5 | 2009-01-06 14:40:50 -0800 | [diff] [blame] | 90 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 92 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
| 93 | { |
| 94 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); |
| 95 | } |
| 96 | |
Matthew Wilcox | a4db4dc | 2016-05-20 17:03:24 -0700 | [diff] [blame] | 97 | static inline void *node_to_entry(void *ptr) |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 98 | { |
Matthew Wilcox | 30ff46cc | 2016-05-20 17:03:22 -0700 | [diff] [blame] | 99 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 100 | } |
| 101 | |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 102 | #define RADIX_TREE_RETRY XA_RETRY_ENTRY |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 103 | |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 104 | static inline unsigned long |
| 105 | get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 106 | { |
Matthew Wilcox | 76f070b | 2018-08-18 07:05:50 -0400 | [diff] [blame] | 107 | return parent ? slot - parent->slots : 0; |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 110 | static unsigned int radix_tree_descend(const struct radix_tree_node *parent, |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 111 | struct radix_tree_node **nodep, unsigned long index) |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 112 | { |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 113 | unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 114 | void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 115 | |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 116 | if (xa_is_sibling(entry)) { |
| 117 | offset = xa_to_sibling(entry); |
| 118 | entry = rcu_dereference_raw(parent->slots[offset]); |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 119 | } |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 120 | |
| 121 | *nodep = (void *)entry; |
| 122 | return offset; |
| 123 | } |
| 124 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 125 | static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 126 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 127 | return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK); |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 128 | } |
| 129 | |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 130 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
| 131 | int offset) |
| 132 | { |
| 133 | __set_bit(offset, node->tags[tag]); |
| 134 | } |
| 135 | |
| 136 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, |
| 137 | int offset) |
| 138 | { |
| 139 | __clear_bit(offset, node->tags[tag]); |
| 140 | } |
| 141 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 142 | static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 143 | int offset) |
| 144 | { |
| 145 | return test_bit(offset, node->tags[tag]); |
| 146 | } |
| 147 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 148 | static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 149 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 150 | root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 151 | } |
| 152 | |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 153 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 154 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 155 | root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | static inline void root_tag_clear_all(struct radix_tree_root *root) |
| 159 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 160 | root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 161 | } |
| 162 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 163 | static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 164 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 165 | return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT)); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 168 | static inline unsigned root_tags_get(const struct radix_tree_root *root) |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 169 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 170 | return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 171 | } |
| 172 | |
| 173 | static inline bool is_idr(const struct radix_tree_root *root) |
| 174 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 175 | return !!(root->xa_flags & ROOT_IS_IDR); |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 178 | /* |
| 179 | * Returns 1 if any slot in the node has this tag set. |
| 180 | * Otherwise returns 0. |
| 181 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 182 | static inline int any_tag_set(const struct radix_tree_node *node, |
| 183 | unsigned int tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 184 | { |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 185 | unsigned idx; |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 186 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
| 187 | if (node->tags[tag][idx]) |
| 188 | return 1; |
| 189 | } |
| 190 | return 0; |
| 191 | } |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 192 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 193 | static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) |
| 194 | { |
| 195 | bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); |
| 196 | } |
| 197 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 198 | /** |
| 199 | * radix_tree_find_next_bit - find the next set bit in a memory region |
| 200 | * |
| 201 | * @addr: The address to base the search on |
| 202 | * @size: The bitmap size in bits |
| 203 | * @offset: The bitnumber to start searching at |
| 204 | * |
| 205 | * Unrollable variant of find_next_bit() for constant size arrays. |
| 206 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. |
| 207 | * Returns next bit offset, or size if nothing found. |
| 208 | */ |
| 209 | static __always_inline unsigned long |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 210 | radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, |
| 211 | unsigned long offset) |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 212 | { |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 213 | const unsigned long *addr = node->tags[tag]; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 214 | |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 215 | if (offset < RADIX_TREE_MAP_SIZE) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 216 | unsigned long tmp; |
| 217 | |
| 218 | addr += offset / BITS_PER_LONG; |
| 219 | tmp = *addr >> (offset % BITS_PER_LONG); |
| 220 | if (tmp) |
| 221 | return __ffs(tmp) + offset; |
| 222 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 223 | while (offset < RADIX_TREE_MAP_SIZE) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 224 | tmp = *++addr; |
| 225 | if (tmp) |
| 226 | return __ffs(tmp) + offset; |
| 227 | offset += BITS_PER_LONG; |
| 228 | } |
| 229 | } |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 230 | return RADIX_TREE_MAP_SIZE; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 231 | } |
| 232 | |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 233 | static unsigned int iter_offset(const struct radix_tree_iter *iter) |
| 234 | { |
| 235 | return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; |
| 236 | } |
| 237 | |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 238 | /* |
| 239 | * The maximum index which can be stored in a radix tree |
| 240 | */ |
| 241 | static inline unsigned long shift_maxindex(unsigned int shift) |
| 242 | { |
| 243 | return (RADIX_TREE_MAP_SIZE << shift) - 1; |
| 244 | } |
| 245 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 246 | static inline unsigned long node_maxindex(const struct radix_tree_node *node) |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 247 | { |
| 248 | return shift_maxindex(node->shift); |
| 249 | } |
| 250 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 251 | static unsigned long next_index(unsigned long index, |
| 252 | const struct radix_tree_node *node, |
| 253 | unsigned long offset) |
| 254 | { |
| 255 | return (index & ~node_maxindex(node)) + (offset << node->shift); |
| 256 | } |
| 257 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | /* |
| 259 | * This assumes that the caller has performed appropriate preallocation, and |
| 260 | * that the caller has pinned this thread of control to the current CPU. |
| 261 | */ |
| 262 | static struct radix_tree_node * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 263 | radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, |
Matthew Wilcox | d58275b | 2017-01-16 17:10:21 -0500 | [diff] [blame] | 264 | struct radix_tree_root *root, |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 265 | unsigned int shift, unsigned int offset, |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 266 | unsigned int count, unsigned int nr_values) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | { |
Nick Piggin | e2848a0 | 2008-02-04 22:29:10 -0800 | [diff] [blame] | 268 | struct radix_tree_node *ret = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 270 | /* |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 271 | * Preload code isn't irq safe and it doesn't make sense to use |
| 272 | * preloading during an interrupt anyway as all the allocations have |
| 273 | * to be atomic. So just do normal allocation when in interrupt. |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 274 | */ |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 275 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | struct radix_tree_preload *rtp; |
| 277 | |
Nick Piggin | e2848a0 | 2008-02-04 22:29:10 -0800 | [diff] [blame] | 278 | /* |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 279 | * Even if the caller has preloaded, try to allocate from the |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 280 | * cache first for the new node to get accounted to the memory |
| 281 | * cgroup. |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 282 | */ |
| 283 | ret = kmem_cache_alloc(radix_tree_node_cachep, |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 284 | gfp_mask | __GFP_NOWARN); |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 285 | if (ret) |
| 286 | goto out; |
| 287 | |
| 288 | /* |
Nick Piggin | e2848a0 | 2008-02-04 22:29:10 -0800 | [diff] [blame] | 289 | * Provided the caller has preloaded here, we will always |
| 290 | * succeed in getting a node here (and never reach |
| 291 | * kmem_cache_alloc) |
| 292 | */ |
Christoph Lameter | 7c8e018 | 2014-06-04 16:07:56 -0700 | [diff] [blame] | 293 | rtp = this_cpu_ptr(&radix_tree_preloads); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | if (rtp->nr) { |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 295 | ret = rtp->nodes; |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame] | 296 | rtp->nodes = ret->parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | rtp->nr--; |
| 298 | } |
Catalin Marinas | ce80b06 | 2014-06-06 14:38:18 -0700 | [diff] [blame] | 299 | /* |
| 300 | * Update the allocation stack trace as this is more useful |
| 301 | * for debugging. |
| 302 | */ |
| 303 | kmemleak_update_trace(ret); |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 304 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | } |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 306 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 307 | out: |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 308 | BUG_ON(radix_tree_is_internal_node(ret)); |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 309 | if (ret) { |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 310 | ret->shift = shift; |
| 311 | ret->offset = offset; |
| 312 | ret->count = count; |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 313 | ret->nr_values = nr_values; |
Matthew Wilcox | d58275b | 2017-01-16 17:10:21 -0500 | [diff] [blame] | 314 | ret->parent = parent; |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 315 | ret->array = root; |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 316 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | return ret; |
| 318 | } |
| 319 | |
Matthew Wilcox | 58d6ea3 | 2017-11-10 15:15:08 -0500 | [diff] [blame] | 320 | void radix_tree_node_rcu_free(struct rcu_head *head) |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 321 | { |
| 322 | struct radix_tree_node *node = |
| 323 | container_of(head, struct radix_tree_node, rcu_head); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 324 | |
| 325 | /* |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 326 | * Must only free zeroed nodes into the slab. We can be left with |
| 327 | * non-NULL entries by radix_tree_free_nodes, so clear the entries |
| 328 | * and tags here. |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 329 | */ |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 330 | memset(node->slots, 0, sizeof(node->slots)); |
| 331 | memset(node->tags, 0, sizeof(node->tags)); |
Matthew Wilcox | 91d9c05 | 2016-12-14 15:08:34 -0800 | [diff] [blame] | 332 | INIT_LIST_HEAD(&node->private_list); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 333 | |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 334 | kmem_cache_free(radix_tree_node_cachep, node); |
| 335 | } |
| 336 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | static inline void |
| 338 | radix_tree_node_free(struct radix_tree_node *node) |
| 339 | { |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 340 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | /* |
| 344 | * Load up this CPU's radix_tree_node buffer with sufficient objects to |
| 345 | * ensure that the addition of a single element in the tree cannot fail. On |
| 346 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
| 347 | * with preemption not disabled. |
David Howells | b34df79 | 2009-11-19 18:11:14 +0000 | [diff] [blame] | 348 | * |
| 349 | * To make use of this facility, the radix tree must be initialised without |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 350 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | */ |
Eric Dumazet | bc9ae22 | 2017-09-08 16:15:54 -0700 | [diff] [blame] | 352 | static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | { |
| 354 | struct radix_tree_preload *rtp; |
| 355 | struct radix_tree_node *node; |
| 356 | int ret = -ENOMEM; |
| 357 | |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 358 | /* |
| 359 | * Nodes preloaded by one cgroup can be be used by another cgroup, so |
| 360 | * they should never be accounted to any particular memory cgroup. |
| 361 | */ |
| 362 | gfp_mask &= ~__GFP_ACCOUNT; |
| 363 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | preempt_disable(); |
Christoph Lameter | 7c8e018 | 2014-06-04 16:07:56 -0700 | [diff] [blame] | 365 | rtp = this_cpu_ptr(&radix_tree_preloads); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 366 | while (rtp->nr < nr) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | preempt_enable(); |
Christoph Lameter | 488514d | 2008-04-28 02:12:05 -0700 | [diff] [blame] | 368 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | if (node == NULL) |
| 370 | goto out; |
| 371 | preempt_disable(); |
Christoph Lameter | 7c8e018 | 2014-06-04 16:07:56 -0700 | [diff] [blame] | 372 | rtp = this_cpu_ptr(&radix_tree_preloads); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 373 | if (rtp->nr < nr) { |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame] | 374 | node->parent = rtp->nodes; |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 375 | rtp->nodes = node; |
| 376 | rtp->nr++; |
| 377 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | kmem_cache_free(radix_tree_node_cachep, node); |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 379 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | } |
| 381 | ret = 0; |
| 382 | out: |
| 383 | return ret; |
| 384 | } |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 385 | |
| 386 | /* |
| 387 | * Load up this CPU's radix_tree_node buffer with sufficient objects to |
| 388 | * ensure that the addition of a single element in the tree cannot fail. On |
| 389 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
| 390 | * with preemption not disabled. |
| 391 | * |
| 392 | * To make use of this facility, the radix tree must be initialised without |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 393 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 394 | */ |
| 395 | int radix_tree_preload(gfp_t gfp_mask) |
| 396 | { |
| 397 | /* Warn on non-sensical use... */ |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 398 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 399 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 400 | } |
David Chinner | d7f0923 | 2007-07-14 16:05:04 +1000 | [diff] [blame] | 401 | EXPORT_SYMBOL(radix_tree_preload); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | |
Nick Piggin | 6e954b9 | 2006-01-08 01:01:40 -0800 | [diff] [blame] | 403 | /* |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 404 | * The same as above function, except we don't guarantee preloading happens. |
| 405 | * We do it, if we decide it helps. On success, return zero with preemption |
| 406 | * disabled. On error, return -ENOMEM with preemption not disabled. |
| 407 | */ |
| 408 | int radix_tree_maybe_preload(gfp_t gfp_mask) |
| 409 | { |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 410 | if (gfpflags_allow_blocking(gfp_mask)) |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 411 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 412 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
| 413 | preempt_disable(); |
| 414 | return 0; |
| 415 | } |
| 416 | EXPORT_SYMBOL(radix_tree_maybe_preload); |
| 417 | |
| 418 | /* |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 419 | * The same as function above, but preload number of nodes required to insert |
| 420 | * (1 << order) continuous naturally-aligned elements. |
| 421 | */ |
| 422 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) |
| 423 | { |
| 424 | unsigned long nr_subtrees; |
| 425 | int nr_nodes, subtree_height; |
| 426 | |
| 427 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
| 428 | if (!gfpflags_allow_blocking(gfp_mask)) { |
| 429 | preempt_disable(); |
| 430 | return 0; |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * Calculate number and height of fully populated subtrees it takes to |
| 435 | * store (1 << order) elements. |
| 436 | */ |
| 437 | nr_subtrees = 1 << order; |
| 438 | for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; |
| 439 | subtree_height++) |
| 440 | nr_subtrees >>= RADIX_TREE_MAP_SHIFT; |
| 441 | |
| 442 | /* |
| 443 | * The worst case is zero height tree with a single item at index 0 and |
| 444 | * then inserting items starting at ULONG_MAX - (1 << order). |
| 445 | * |
| 446 | * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to |
| 447 | * 0-index item. |
| 448 | */ |
| 449 | nr_nodes = RADIX_TREE_MAX_PATH; |
| 450 | |
| 451 | /* Plus branch to fully populated subtrees. */ |
| 452 | nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; |
| 453 | |
| 454 | /* Root node is shared. */ |
| 455 | nr_nodes--; |
| 456 | |
| 457 | /* Plus nodes required to build subtrees. */ |
| 458 | nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; |
| 459 | |
| 460 | return __radix_tree_preload(gfp_mask, nr_nodes); |
| 461 | } |
| 462 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 463 | static unsigned radix_tree_load_root(const struct radix_tree_root *root, |
Matthew Wilcox | 1456a43 | 2016-05-20 17:02:08 -0700 | [diff] [blame] | 464 | struct radix_tree_node **nodep, unsigned long *maxindex) |
| 465 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 466 | struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); |
Matthew Wilcox | 1456a43 | 2016-05-20 17:02:08 -0700 | [diff] [blame] | 467 | |
| 468 | *nodep = node; |
| 469 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 470 | if (likely(radix_tree_is_internal_node(node))) { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 471 | node = entry_to_node(node); |
Matthew Wilcox | 1456a43 | 2016-05-20 17:02:08 -0700 | [diff] [blame] | 472 | *maxindex = node_maxindex(node); |
Matthew Wilcox | c12e51b | 2016-05-20 17:03:10 -0700 | [diff] [blame] | 473 | return node->shift + RADIX_TREE_MAP_SHIFT; |
Matthew Wilcox | 1456a43 | 2016-05-20 17:02:08 -0700 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | *maxindex = 0; |
| 477 | return 0; |
| 478 | } |
| 479 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | /* |
| 481 | * Extend a radix tree so it can store key @index. |
| 482 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 483 | static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 484 | unsigned long index, unsigned int shift) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | { |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 486 | void *entry; |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 487 | unsigned int maxshift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | int tag; |
| 489 | |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 490 | /* Figure out what the shift should be. */ |
| 491 | maxshift = shift; |
| 492 | while (index > shift_maxindex(maxshift)) |
| 493 | maxshift += RADIX_TREE_MAP_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 495 | entry = rcu_dereference_raw(root->xa_head); |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 496 | if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | do { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 500 | struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, |
Matthew Wilcox | d58275b | 2017-01-16 17:10:21 -0500 | [diff] [blame] | 501 | root, shift, 0, 1, 0); |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 502 | if (!node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 503 | return -ENOMEM; |
| 504 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 505 | if (is_idr(root)) { |
| 506 | all_tag_set(node, IDR_FREE); |
| 507 | if (!root_tag_get(root, IDR_FREE)) { |
| 508 | tag_clear(node, IDR_FREE, 0); |
| 509 | root_tag_set(root, IDR_FREE); |
| 510 | } |
| 511 | } else { |
| 512 | /* Propagate the aggregated tag info to the new child */ |
| 513 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
| 514 | if (root_tag_get(root, tag)) |
| 515 | tag_set(node, tag, 0); |
| 516 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | } |
| 518 | |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 519 | BUG_ON(shift > BITS_PER_LONG); |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 520 | if (radix_tree_is_internal_node(entry)) { |
| 521 | entry_to_node(entry)->parent = node; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 522 | } else if (xa_is_value(entry)) { |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 523 | /* Moving a value entry root->xa_head to a node */ |
| 524 | node->nr_values = 1; |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 525 | } |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 526 | /* |
| 527 | * entry was already in the radix tree, so we do not need |
| 528 | * rcu_assign_pointer here |
| 529 | */ |
| 530 | node->slots[0] = (void __rcu *)entry; |
| 531 | entry = node_to_entry(node); |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 532 | rcu_assign_pointer(root->xa_head, entry); |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 533 | shift += RADIX_TREE_MAP_SHIFT; |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 534 | } while (shift <= maxshift); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 535 | out: |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 536 | return maxshift + RADIX_TREE_MAP_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | } |
| 538 | |
| 539 | /** |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 540 | * radix_tree_shrink - shrink radix tree to minimum height |
| 541 | * @root radix tree root |
| 542 | */ |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 543 | static inline bool radix_tree_shrink(struct radix_tree_root *root) |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 544 | { |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 545 | bool shrunk = false; |
| 546 | |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 547 | for (;;) { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 548 | struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 549 | struct radix_tree_node *child; |
| 550 | |
| 551 | if (!radix_tree_is_internal_node(node)) |
| 552 | break; |
| 553 | node = entry_to_node(node); |
| 554 | |
| 555 | /* |
| 556 | * The candidate node has more than one child, or its child |
| 557 | * is not at the leftmost slot, or the child is a multiorder |
| 558 | * entry, we cannot shrink. |
| 559 | */ |
| 560 | if (node->count != 1) |
| 561 | break; |
Matthew Wilcox | 12320d0 | 2017-02-13 15:22:48 -0500 | [diff] [blame] | 562 | child = rcu_dereference_raw(node->slots[0]); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 563 | if (!child) |
| 564 | break; |
| 565 | if (!radix_tree_is_internal_node(child) && node->shift) |
| 566 | break; |
| 567 | |
Matthew Wilcox | 66ee620 | 2018-06-25 06:56:50 -0400 | [diff] [blame] | 568 | /* |
| 569 | * For an IDR, we must not shrink entry 0 into the root in |
| 570 | * case somebody calls idr_replace() with a pointer that |
| 571 | * appears to be an internal entry |
| 572 | */ |
| 573 | if (!node->shift && is_idr(root)) |
| 574 | break; |
| 575 | |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 576 | if (radix_tree_is_internal_node(child)) |
| 577 | entry_to_node(child)->parent = NULL; |
| 578 | |
| 579 | /* |
| 580 | * We don't need rcu_assign_pointer(), since we are simply |
| 581 | * moving the node from one part of the tree to another: if it |
| 582 | * was safe to dereference the old pointer to it |
| 583 | * (node->slots[0]), it will be safe to dereference the new |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 584 | * one (root->xa_head) as far as dependent read barriers go. |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 585 | */ |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 586 | root->xa_head = (void __rcu *)child; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 587 | if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) |
| 588 | root_tag_clear(root, IDR_FREE); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 589 | |
| 590 | /* |
| 591 | * We have a dilemma here. The node's slot[0] must not be |
| 592 | * NULLed in case there are concurrent lookups expecting to |
| 593 | * find the item. However if this was a bottom-level node, |
| 594 | * then it may be subject to the slot pointer being visible |
| 595 | * to callers dereferencing it. If item corresponding to |
| 596 | * slot[0] is subsequently deleted, these callers would expect |
| 597 | * their slot to become empty sooner or later. |
| 598 | * |
| 599 | * For example, lockless pagecache will look up a slot, deref |
| 600 | * the page pointer, and if the page has 0 refcount it means it |
| 601 | * was concurrently deleted from pagecache so try the deref |
| 602 | * again. Fortunately there is already a requirement for logic |
| 603 | * to retry the entire slot lookup -- the indirect pointer |
| 604 | * problem (replacing direct root node with an indirect pointer |
| 605 | * also results in a stale slot). So tag the slot as indirect |
| 606 | * to force callers to retry. |
| 607 | */ |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 608 | node->count = 0; |
| 609 | if (!radix_tree_is_internal_node(child)) { |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 610 | node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 611 | } |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 612 | |
Johannes Weiner | ea07b86 | 2017-01-06 19:21:43 -0500 | [diff] [blame] | 613 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 614 | radix_tree_node_free(node); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 615 | shrunk = true; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 616 | } |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 617 | |
| 618 | return shrunk; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 619 | } |
| 620 | |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 621 | static bool delete_node(struct radix_tree_root *root, |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 622 | struct radix_tree_node *node) |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 623 | { |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 624 | bool deleted = false; |
| 625 | |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 626 | do { |
| 627 | struct radix_tree_node *parent; |
| 628 | |
| 629 | if (node->count) { |
Matthew Wilcox | 12320d0 | 2017-02-13 15:22:48 -0500 | [diff] [blame] | 630 | if (node_to_entry(node) == |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 631 | rcu_dereference_raw(root->xa_head)) |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 632 | deleted |= radix_tree_shrink(root); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 633 | return deleted; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 634 | } |
| 635 | |
| 636 | parent = node->parent; |
| 637 | if (parent) { |
| 638 | parent->slots[node->offset] = NULL; |
| 639 | parent->count--; |
| 640 | } else { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 641 | /* |
| 642 | * Shouldn't the tags already have all been cleared |
| 643 | * by the caller? |
| 644 | */ |
| 645 | if (!is_idr(root)) |
| 646 | root_tag_clear_all(root); |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 647 | root->xa_head = NULL; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 648 | } |
| 649 | |
Johannes Weiner | ea07b86 | 2017-01-06 19:21:43 -0500 | [diff] [blame] | 650 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 651 | radix_tree_node_free(node); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 652 | deleted = true; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 653 | |
| 654 | node = parent; |
| 655 | } while (node); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 656 | |
| 657 | return deleted; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 658 | } |
| 659 | |
| 660 | /** |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 661 | * __radix_tree_create - create a slot in a radix tree |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | * @root: radix tree root |
| 663 | * @index: index key |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 664 | * @order: index occupies 2^order aligned slots |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 665 | * @nodep: returns node |
| 666 | * @slotp: returns slot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | * |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 668 | * Create, if necessary, and return the node and slot for an item |
| 669 | * at position @index in the radix tree @root. |
| 670 | * |
| 671 | * Until there is more than one item in the tree, no nodes are |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 672 | * allocated and @root->xa_head is used as a direct slot instead of |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 673 | * pointing to a node, in which case *@nodep will be NULL. |
| 674 | * |
| 675 | * Returns -ENOMEM, or 0 for success. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 676 | */ |
Matthew Wilcox | 74d6095 | 2017-11-17 10:01:45 -0500 | [diff] [blame] | 677 | static int __radix_tree_create(struct radix_tree_root *root, |
| 678 | unsigned long index, unsigned order, |
| 679 | struct radix_tree_node **nodep, void __rcu ***slotp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | { |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 681 | struct radix_tree_node *node = NULL, *child; |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 682 | void __rcu **slot = (void __rcu **)&root->xa_head; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 683 | unsigned long maxindex; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 684 | unsigned int shift, offset = 0; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 685 | unsigned long max = index | ((1UL << order) - 1); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 686 | gfp_t gfp = root_gfp_mask(root); |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 687 | |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 688 | shift = radix_tree_load_root(root, &child, &maxindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | |
| 690 | /* Make sure the tree is high enough. */ |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 691 | if (order > 0 && max == ((1UL << order) - 1)) |
| 692 | max++; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 693 | if (max > maxindex) { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 694 | int error = radix_tree_extend(root, gfp, max, shift); |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 695 | if (error < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | return error; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 697 | shift = error; |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 698 | child = rcu_dereference_raw(root->xa_head); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | } |
| 700 | |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 701 | while (shift > order) { |
Matthew Wilcox | c12e51b | 2016-05-20 17:03:10 -0700 | [diff] [blame] | 702 | shift -= RADIX_TREE_MAP_SHIFT; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 703 | if (child == NULL) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | /* Have to add a child node. */ |
Matthew Wilcox | d58275b | 2017-01-16 17:10:21 -0500 | [diff] [blame] | 705 | child = radix_tree_node_alloc(gfp, node, root, shift, |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 706 | offset, 0, 0); |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 707 | if (!child) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | return -ENOMEM; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 709 | rcu_assign_pointer(*slot, node_to_entry(child)); |
| 710 | if (node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | node->count++; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 712 | } else if (!radix_tree_is_internal_node(child)) |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 713 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | |
| 715 | /* Go a level down */ |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 716 | node = entry_to_node(child); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 717 | offset = radix_tree_descend(node, &child, index); |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 718 | slot = &node->slots[offset]; |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 719 | } |
| 720 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 721 | if (nodep) |
| 722 | *nodep = node; |
| 723 | if (slotp) |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 724 | *slotp = slot; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 725 | return 0; |
| 726 | } |
| 727 | |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 728 | /* |
| 729 | * Free any nodes below this node. The tree is presumed to not need |
| 730 | * shrinking, and any user data in the tree is presumed to not need a |
| 731 | * destructor called on it. If we need to add a destructor, we can |
| 732 | * add that functionality later. Note that we may not clear tags or |
| 733 | * slots from the tree as an RCU walker may still have a pointer into |
| 734 | * this subtree. We could replace the entries with RADIX_TREE_RETRY, |
| 735 | * but we'll still have to clear those in rcu_free. |
| 736 | */ |
| 737 | static void radix_tree_free_nodes(struct radix_tree_node *node) |
| 738 | { |
| 739 | unsigned offset = 0; |
| 740 | struct radix_tree_node *child = entry_to_node(node); |
| 741 | |
| 742 | for (;;) { |
Matthew Wilcox | 12320d0 | 2017-02-13 15:22:48 -0500 | [diff] [blame] | 743 | void *entry = rcu_dereference_raw(child->slots[offset]); |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 744 | if (xa_is_node(entry) && child->shift) { |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 745 | child = entry_to_node(entry); |
| 746 | offset = 0; |
| 747 | continue; |
| 748 | } |
| 749 | offset++; |
| 750 | while (offset == RADIX_TREE_MAP_SIZE) { |
| 751 | struct radix_tree_node *old = child; |
| 752 | offset = child->offset + 1; |
| 753 | child = child->parent; |
Matthew Wilcox | dd040b6 | 2017-01-24 15:18:16 -0800 | [diff] [blame] | 754 | WARN_ON_ONCE(!list_empty(&old->private_list)); |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 755 | radix_tree_node_free(old); |
| 756 | if (old == entry_to_node(node)) |
| 757 | return; |
| 758 | } |
| 759 | } |
| 760 | } |
| 761 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 762 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 763 | static inline int insert_entries(struct radix_tree_node *node, |
| 764 | void __rcu **slot, void *item, unsigned order, bool replace) |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 765 | { |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 766 | void *sibling; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 767 | unsigned i, n, tag, offset, tags = 0; |
| 768 | |
| 769 | if (node) { |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 770 | if (order > node->shift) |
| 771 | n = 1 << (order - node->shift); |
| 772 | else |
| 773 | n = 1; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 774 | offset = get_slot_offset(node, slot); |
| 775 | } else { |
| 776 | n = 1; |
| 777 | offset = 0; |
| 778 | } |
| 779 | |
| 780 | if (n > 1) { |
| 781 | offset = offset & ~(n - 1); |
| 782 | slot = &node->slots[offset]; |
| 783 | } |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 784 | sibling = xa_mk_sibling(offset); |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 785 | |
| 786 | for (i = 0; i < n; i++) { |
| 787 | if (slot[i]) { |
| 788 | if (replace) { |
| 789 | node->count--; |
| 790 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 791 | if (tag_get(node, tag, offset + i)) |
| 792 | tags |= 1 << tag; |
| 793 | } else |
| 794 | return -EEXIST; |
| 795 | } |
| 796 | } |
| 797 | |
| 798 | for (i = 0; i < n; i++) { |
Matthew Wilcox | 12320d0 | 2017-02-13 15:22:48 -0500 | [diff] [blame] | 799 | struct radix_tree_node *old = rcu_dereference_raw(slot[i]); |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 800 | if (i) { |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 801 | rcu_assign_pointer(slot[i], sibling); |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 802 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 803 | if (tags & (1 << tag)) |
| 804 | tag_clear(node, tag, offset + i); |
| 805 | } else { |
| 806 | rcu_assign_pointer(slot[i], item); |
| 807 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 808 | if (tags & (1 << tag)) |
| 809 | tag_set(node, tag, offset); |
| 810 | } |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 811 | if (xa_is_node(old)) |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 812 | radix_tree_free_nodes(old); |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 813 | if (xa_is_value(old)) |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 814 | node->nr_values--; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 815 | } |
| 816 | if (node) { |
| 817 | node->count += n; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 818 | if (xa_is_value(item)) |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 819 | node->nr_values += n; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 820 | } |
| 821 | return n; |
| 822 | } |
| 823 | #else |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 824 | static inline int insert_entries(struct radix_tree_node *node, |
| 825 | void __rcu **slot, void *item, unsigned order, bool replace) |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 826 | { |
| 827 | if (*slot) |
| 828 | return -EEXIST; |
| 829 | rcu_assign_pointer(*slot, item); |
| 830 | if (node) { |
| 831 | node->count++; |
Matthew Wilcox | 3159f94 | 2017-11-03 13:30:42 -0400 | [diff] [blame] | 832 | if (xa_is_value(item)) |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 833 | node->nr_values++; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 834 | } |
| 835 | return 1; |
| 836 | } |
| 837 | #endif |
| 838 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 839 | /** |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 840 | * __radix_tree_insert - insert into a radix tree |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 841 | * @root: radix tree root |
| 842 | * @index: index key |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 843 | * @order: key covers the 2^order indices around index |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 844 | * @item: item to insert |
| 845 | * |
| 846 | * Insert an item into the radix tree at position @index. |
| 847 | */ |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 848 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
| 849 | unsigned order, void *item) |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 850 | { |
| 851 | struct radix_tree_node *node; |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 852 | void __rcu **slot; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 853 | int error; |
| 854 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 855 | BUG_ON(radix_tree_is_internal_node(item)); |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 856 | |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 857 | error = __radix_tree_create(root, index, order, &node, &slot); |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 858 | if (error) |
| 859 | return error; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 860 | |
| 861 | error = insert_entries(node, slot, item, order, false); |
| 862 | if (error < 0) |
| 863 | return error; |
Christoph Lameter | 201b626 | 2005-09-06 15:16:46 -0700 | [diff] [blame] | 864 | |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 865 | if (node) { |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 866 | unsigned offset = get_slot_offset(node, slot); |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 867 | BUG_ON(tag_get(node, 0, offset)); |
| 868 | BUG_ON(tag_get(node, 1, offset)); |
| 869 | BUG_ON(tag_get(node, 2, offset)); |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 870 | } else { |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 871 | BUG_ON(root_tags_get(root)); |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 872 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | return 0; |
| 875 | } |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 876 | EXPORT_SYMBOL(__radix_tree_insert); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 878 | /** |
| 879 | * __radix_tree_lookup - lookup an item in a radix tree |
| 880 | * @root: radix tree root |
| 881 | * @index: index key |
| 882 | * @nodep: returns node |
| 883 | * @slotp: returns slot |
| 884 | * |
| 885 | * Lookup and return the item at position @index in the radix |
| 886 | * tree @root. |
| 887 | * |
| 888 | * Until there is more than one item in the tree, no nodes are |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 889 | * allocated and @root->xa_head is used as a direct slot instead of |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 890 | * pointing to a node, in which case *@nodep will be NULL. |
Hans Reiser | a433136 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 891 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 892 | void *__radix_tree_lookup(const struct radix_tree_root *root, |
| 893 | unsigned long index, struct radix_tree_node **nodep, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 894 | void __rcu ***slotp) |
Hans Reiser | a433136 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 895 | { |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 896 | struct radix_tree_node *node, *parent; |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 897 | unsigned long maxindex; |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 898 | void __rcu **slot; |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 899 | |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 900 | restart: |
| 901 | parent = NULL; |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 902 | slot = (void __rcu **)&root->xa_head; |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 903 | radix_tree_load_root(root, &node, &maxindex); |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 904 | if (index > maxindex) |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 905 | return NULL; |
| 906 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 907 | while (radix_tree_is_internal_node(node)) { |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 908 | unsigned offset; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 909 | |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 910 | if (node == RADIX_TREE_RETRY) |
| 911 | goto restart; |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 912 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 913 | offset = radix_tree_descend(parent, &node, index); |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 914 | slot = parent->slots + offset; |
Matthew Wilcox | 66ee620 | 2018-06-25 06:56:50 -0400 | [diff] [blame] | 915 | if (parent->shift == 0) |
| 916 | break; |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 917 | } |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 918 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 919 | if (nodep) |
| 920 | *nodep = parent; |
| 921 | if (slotp) |
| 922 | *slotp = slot; |
| 923 | return node; |
Huang Shijie | b72b71c | 2009-06-16 15:33:42 -0700 | [diff] [blame] | 924 | } |
| 925 | |
| 926 | /** |
| 927 | * radix_tree_lookup_slot - lookup a slot in a radix tree |
| 928 | * @root: radix tree root |
| 929 | * @index: index key |
| 930 | * |
| 931 | * Returns: the slot corresponding to the position @index in the |
| 932 | * radix tree @root. This is useful for update-if-exists operations. |
| 933 | * |
| 934 | * This function can be called under rcu_read_lock iff the slot is not |
| 935 | * modified by radix_tree_replace_slot, otherwise it must be called |
| 936 | * exclusive from other writers. Any dereference of the slot must be done |
| 937 | * using radix_tree_deref_slot. |
| 938 | */ |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 939 | void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 940 | unsigned long index) |
Huang Shijie | b72b71c | 2009-06-16 15:33:42 -0700 | [diff] [blame] | 941 | { |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 942 | void __rcu **slot; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 943 | |
| 944 | if (!__radix_tree_lookup(root, index, NULL, &slot)) |
| 945 | return NULL; |
| 946 | return slot; |
Hans Reiser | a433136 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 947 | } |
| 948 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
| 949 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | /** |
| 951 | * radix_tree_lookup - perform lookup operation on a radix tree |
| 952 | * @root: radix tree root |
| 953 | * @index: index key |
| 954 | * |
| 955 | * Lookup the item at the position @index in the radix tree @root. |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 956 | * |
| 957 | * This function can be called under rcu_read_lock, however the caller |
| 958 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free |
| 959 | * them safely). No RCU barriers are required to access or modify the |
| 960 | * returned item, however. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 962 | void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | { |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 964 | return __radix_tree_lookup(root, index, NULL, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | } |
| 966 | EXPORT_SYMBOL(radix_tree_lookup); |
| 967 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 968 | static inline void replace_sibling_entries(struct radix_tree_node *node, |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 969 | void __rcu **slot, int count, int values) |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 970 | { |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 971 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 972 | unsigned offset = get_slot_offset(node, slot); |
| 973 | void *ptr = xa_mk_sibling(offset); |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 974 | |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 975 | while (++offset < RADIX_TREE_MAP_SIZE) { |
Matthew Wilcox | 12320d0 | 2017-02-13 15:22:48 -0500 | [diff] [blame] | 976 | if (rcu_dereference_raw(node->slots[offset]) != ptr) |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 977 | break; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 978 | if (count < 0) { |
| 979 | node->slots[offset] = NULL; |
| 980 | node->count--; |
| 981 | } |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 982 | node->nr_values += values; |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 983 | } |
| 984 | #endif |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 985 | } |
| 986 | |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 987 | static void replace_slot(void __rcu **slot, void *item, |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 988 | struct radix_tree_node *node, int count, int values) |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 989 | { |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 990 | if (node && (count || values)) { |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 991 | node->count += count; |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 992 | node->nr_values += values; |
| 993 | replace_sibling_entries(node, slot, count, values); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 994 | } |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 995 | |
| 996 | rcu_assign_pointer(*slot, item); |
| 997 | } |
| 998 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 999 | static bool node_tag_get(const struct radix_tree_root *root, |
| 1000 | const struct radix_tree_node *node, |
| 1001 | unsigned int tag, unsigned int offset) |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1002 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1003 | if (node) |
| 1004 | return tag_get(node, tag, offset); |
| 1005 | return root_tag_get(root, tag); |
| 1006 | } |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1007 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1008 | /* |
| 1009 | * IDR users want to be able to store NULL in the tree, so if the slot isn't |
| 1010 | * free, don't adjust the count, even if it's transitioning between NULL and |
| 1011 | * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still |
| 1012 | * have empty bits, but it only stores NULL in slots when they're being |
| 1013 | * deleted. |
| 1014 | */ |
| 1015 | static int calculate_count(struct radix_tree_root *root, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1016 | struct radix_tree_node *node, void __rcu **slot, |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1017 | void *item, void *old) |
| 1018 | { |
| 1019 | if (is_idr(root)) { |
| 1020 | unsigned offset = get_slot_offset(node, slot); |
| 1021 | bool free = node_tag_get(root, node, IDR_FREE, offset); |
| 1022 | if (!free) |
| 1023 | return 0; |
| 1024 | if (!old) |
| 1025 | return 1; |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1026 | } |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1027 | return !!item - !!old; |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1028 | } |
| 1029 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | /** |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1031 | * __radix_tree_replace - replace item in a slot |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 1032 | * @root: radix tree root |
| 1033 | * @node: pointer to tree node |
| 1034 | * @slot: pointer to slot in @node |
| 1035 | * @item: new item to store in the slot. |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1036 | * |
| 1037 | * For use with __radix_tree_lookup(). Caller must hold tree write locked |
| 1038 | * across slot lookup and replacement. |
| 1039 | */ |
| 1040 | void __radix_tree_replace(struct radix_tree_root *root, |
| 1041 | struct radix_tree_node *node, |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 1042 | void __rcu **slot, void *item) |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1043 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1044 | void *old = rcu_dereference_raw(*slot); |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 1045 | int values = !!xa_is_value(item) - !!xa_is_value(old); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1046 | int count = calculate_count(root, node, slot, item, old); |
| 1047 | |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1048 | /* |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 1049 | * This function supports replacing value entries and |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1050 | * deleting entries, but that needs accounting against the |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1051 | * node unless the slot is root->xa_head. |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1052 | */ |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1053 | WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) && |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 1054 | (count || values)); |
| 1055 | replace_slot(slot, item, node, count, values); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1056 | |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 1057 | if (!node) |
| 1058 | return; |
| 1059 | |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 1060 | delete_node(root, node); |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1061 | } |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1062 | |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1063 | /** |
| 1064 | * radix_tree_replace_slot - replace item in a slot |
| 1065 | * @root: radix tree root |
| 1066 | * @slot: pointer to slot |
| 1067 | * @item: new item to store in the slot. |
| 1068 | * |
Matthew Wilcox | 7b8d046 | 2017-12-01 22:13:06 -0500 | [diff] [blame] | 1069 | * For use with radix_tree_lookup_slot() and |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1070 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked |
| 1071 | * across slot lookup and replacement. |
| 1072 | * |
| 1073 | * NOTE: This cannot be used to switch between non-entries (empty slots), |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 1074 | * regular entries, and value entries, as that requires accounting |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1075 | * inside the radix tree node. When switching from one type of entry or |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1076 | * deleting, use __radix_tree_lookup() and __radix_tree_replace() or |
| 1077 | * radix_tree_iter_replace(). |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1078 | */ |
| 1079 | void radix_tree_replace_slot(struct radix_tree_root *root, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1080 | void __rcu **slot, void *item) |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1081 | { |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 1082 | __radix_tree_replace(root, NULL, slot, item); |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1083 | } |
Song Liu | 10257d7 | 2017-01-11 10:00:51 -0800 | [diff] [blame] | 1084 | EXPORT_SYMBOL(radix_tree_replace_slot); |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1085 | |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1086 | /** |
| 1087 | * radix_tree_iter_replace - replace item in a slot |
| 1088 | * @root: radix tree root |
| 1089 | * @slot: pointer to slot |
| 1090 | * @item: new item to store in the slot. |
| 1091 | * |
Matthew Wilcox | 2956c66 | 2018-05-19 16:47:47 -0400 | [diff] [blame^] | 1092 | * For use with radix_tree_for_each_slot(). |
| 1093 | * Caller must hold tree write locked. |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1094 | */ |
| 1095 | void radix_tree_iter_replace(struct radix_tree_root *root, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1096 | const struct radix_tree_iter *iter, |
| 1097 | void __rcu **slot, void *item) |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1098 | { |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 1099 | __radix_tree_replace(root, iter->node, slot, item); |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1100 | } |
| 1101 | |
Matthew Wilcox | 30b888b | 2017-01-28 09:55:20 -0500 | [diff] [blame] | 1102 | static void node_tag_set(struct radix_tree_root *root, |
| 1103 | struct radix_tree_node *node, |
| 1104 | unsigned int tag, unsigned int offset) |
| 1105 | { |
| 1106 | while (node) { |
| 1107 | if (tag_get(node, tag, offset)) |
| 1108 | return; |
| 1109 | tag_set(node, tag, offset); |
| 1110 | offset = node->offset; |
| 1111 | node = node->parent; |
| 1112 | } |
| 1113 | |
| 1114 | if (!root_tag_get(root, tag)) |
| 1115 | root_tag_set(root, tag); |
| 1116 | } |
| 1117 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | /** |
| 1119 | * radix_tree_tag_set - set a tag on a radix tree node |
| 1120 | * @root: radix tree root |
| 1121 | * @index: index key |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1122 | * @tag: tag index |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 | * |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1124 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
| 1125 | * corresponding to @index in the radix tree. From |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | * the root all the way down to the leaf node. |
| 1127 | * |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1128 | * Returns the address of the tagged item. Setting a tag on a not-present |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1129 | * item is a bug. |
| 1130 | */ |
| 1131 | void *radix_tree_tag_set(struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1132 | unsigned long index, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | { |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1134 | struct radix_tree_node *node, *parent; |
| 1135 | unsigned long maxindex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 | |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1137 | radix_tree_load_root(root, &node, &maxindex); |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1138 | BUG_ON(index > maxindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1140 | while (radix_tree_is_internal_node(node)) { |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1141 | unsigned offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 1143 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1144 | offset = radix_tree_descend(parent, &node, index); |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1145 | BUG_ON(!node); |
| 1146 | |
| 1147 | if (!tag_get(parent, tag, offset)) |
| 1148 | tag_set(parent, tag, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | } |
| 1150 | |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1151 | /* set the root's tag bit */ |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1152 | if (!root_tag_get(root, tag)) |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1153 | root_tag_set(root, tag); |
| 1154 | |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1155 | return node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 | } |
| 1157 | EXPORT_SYMBOL(radix_tree_tag_set); |
| 1158 | |
Matthew Wilcox | 30b888b | 2017-01-28 09:55:20 -0500 | [diff] [blame] | 1159 | /** |
| 1160 | * radix_tree_iter_tag_set - set a tag on the current iterator entry |
| 1161 | * @root: radix tree root |
| 1162 | * @iter: iterator state |
| 1163 | * @tag: tag to set |
| 1164 | */ |
| 1165 | void radix_tree_iter_tag_set(struct radix_tree_root *root, |
| 1166 | const struct radix_tree_iter *iter, unsigned int tag) |
| 1167 | { |
| 1168 | node_tag_set(root, iter->node, tag, iter_offset(iter)); |
| 1169 | } |
| 1170 | |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1171 | static void node_tag_clear(struct radix_tree_root *root, |
| 1172 | struct radix_tree_node *node, |
| 1173 | unsigned int tag, unsigned int offset) |
| 1174 | { |
| 1175 | while (node) { |
| 1176 | if (!tag_get(node, tag, offset)) |
| 1177 | return; |
| 1178 | tag_clear(node, tag, offset); |
| 1179 | if (any_tag_set(node, tag)) |
| 1180 | return; |
| 1181 | |
| 1182 | offset = node->offset; |
| 1183 | node = node->parent; |
| 1184 | } |
| 1185 | |
| 1186 | /* clear the root's tag bit */ |
| 1187 | if (root_tag_get(root, tag)) |
| 1188 | root_tag_clear(root, tag); |
| 1189 | } |
| 1190 | |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1191 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | * radix_tree_tag_clear - clear a tag on a radix tree node |
| 1193 | * @root: radix tree root |
| 1194 | * @index: index key |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1195 | * @tag: tag index |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 | * |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1197 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1198 | * corresponding to @index in the radix tree. If this causes |
| 1199 | * the leaf node to have no tags set then clear the tag in the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | * next-to-leaf node, etc. |
| 1201 | * |
| 1202 | * Returns the address of the tagged item on success, else NULL. ie: |
| 1203 | * has the same return value and semantics as radix_tree_lookup(). |
| 1204 | */ |
| 1205 | void *radix_tree_tag_clear(struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1206 | unsigned long index, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1207 | { |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1208 | struct radix_tree_node *node, *parent; |
| 1209 | unsigned long maxindex; |
Hugh Dickins | e2bdb93 | 2012-01-12 17:20:41 -0800 | [diff] [blame] | 1210 | int uninitialized_var(offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1211 | |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1212 | radix_tree_load_root(root, &node, &maxindex); |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1213 | if (index > maxindex) |
| 1214 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1215 | |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1216 | parent = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1218 | while (radix_tree_is_internal_node(node)) { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 1219 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1220 | offset = radix_tree_descend(parent, &node, index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | } |
| 1222 | |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1223 | if (node) |
| 1224 | node_tag_clear(root, parent, tag, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1226 | return node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | } |
| 1228 | EXPORT_SYMBOL(radix_tree_tag_clear); |
| 1229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | /** |
Matthew Wilcox | 30b888b | 2017-01-28 09:55:20 -0500 | [diff] [blame] | 1231 | * radix_tree_iter_tag_clear - clear a tag on the current iterator entry |
| 1232 | * @root: radix tree root |
| 1233 | * @iter: iterator state |
| 1234 | * @tag: tag to clear |
| 1235 | */ |
| 1236 | void radix_tree_iter_tag_clear(struct radix_tree_root *root, |
| 1237 | const struct radix_tree_iter *iter, unsigned int tag) |
| 1238 | { |
| 1239 | node_tag_clear(root, iter->node, tag, iter_offset(iter)); |
| 1240 | } |
| 1241 | |
| 1242 | /** |
Marcelo Tosatti | 32605a1 | 2005-09-06 15:16:48 -0700 | [diff] [blame] | 1243 | * radix_tree_tag_get - get a tag on a radix tree node |
| 1244 | * @root: radix tree root |
| 1245 | * @index: index key |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1246 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | * |
Marcelo Tosatti | 32605a1 | 2005-09-06 15:16:48 -0700 | [diff] [blame] | 1248 | * Return values: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | * |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1250 | * 0: tag not present or not set |
| 1251 | * 1: tag set |
David Howells | ce82653 | 2010-04-06 22:36:20 +0100 | [diff] [blame] | 1252 | * |
| 1253 | * Note that the return value of this function may not be relied on, even if |
| 1254 | * the RCU lock is held, unless tag modification and node deletion are excluded |
| 1255 | * from concurrency. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1257 | int radix_tree_tag_get(const struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1258 | unsigned long index, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | { |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1260 | struct radix_tree_node *node, *parent; |
| 1261 | unsigned long maxindex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1262 | |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1263 | if (!root_tag_get(root, tag)) |
| 1264 | return 0; |
| 1265 | |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1266 | radix_tree_load_root(root, &node, &maxindex); |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1267 | if (index > maxindex) |
| 1268 | return 0; |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1269 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1270 | while (radix_tree_is_internal_node(node)) { |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1271 | unsigned offset; |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1272 | |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 1273 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1274 | offset = radix_tree_descend(parent, &node, index); |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1275 | |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1276 | if (!tag_get(parent, tag, offset)) |
| 1277 | return 0; |
| 1278 | if (node == RADIX_TREE_RETRY) |
| 1279 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1280 | } |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1281 | |
| 1282 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | } |
| 1284 | EXPORT_SYMBOL(radix_tree_tag_get); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1285 | |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1286 | static inline void __set_iter_shift(struct radix_tree_iter *iter, |
| 1287 | unsigned int shift) |
| 1288 | { |
| 1289 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 1290 | iter->shift = shift; |
| 1291 | #endif |
| 1292 | } |
| 1293 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1294 | /* Construct iter->tags bit-mask from node->tags[tag] array */ |
| 1295 | static void set_iter_tags(struct radix_tree_iter *iter, |
| 1296 | struct radix_tree_node *node, unsigned offset, |
| 1297 | unsigned tag) |
| 1298 | { |
| 1299 | unsigned tag_long = offset / BITS_PER_LONG; |
| 1300 | unsigned tag_bit = offset % BITS_PER_LONG; |
| 1301 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1302 | if (!node) { |
| 1303 | iter->tags = 1; |
| 1304 | return; |
| 1305 | } |
| 1306 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1307 | iter->tags = node->tags[tag][tag_long] >> tag_bit; |
| 1308 | |
| 1309 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ |
| 1310 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { |
| 1311 | /* Pick tags from next element */ |
| 1312 | if (tag_bit) |
| 1313 | iter->tags |= node->tags[tag][tag_long + 1] << |
| 1314 | (BITS_PER_LONG - tag_bit); |
| 1315 | /* Clip chunk size, here only BITS_PER_LONG tags */ |
| 1316 | iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); |
| 1317 | } |
| 1318 | } |
| 1319 | |
| 1320 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1321 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
| 1322 | void __rcu **slot, struct radix_tree_iter *iter) |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1323 | { |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1324 | while (iter->index < iter->next_index) { |
| 1325 | *nodep = rcu_dereference_raw(*slot); |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 1326 | if (*nodep && !xa_is_sibling(*nodep)) |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1327 | return slot; |
| 1328 | slot++; |
| 1329 | iter->index = __radix_tree_iter_add(iter, 1); |
| 1330 | iter->tags >>= 1; |
| 1331 | } |
| 1332 | |
| 1333 | *nodep = NULL; |
| 1334 | return NULL; |
| 1335 | } |
| 1336 | |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1337 | void __rcu **__radix_tree_next_slot(void __rcu **slot, |
| 1338 | struct radix_tree_iter *iter, unsigned flags) |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1339 | { |
| 1340 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
Ross Zwisler | 9f41822 | 2018-05-18 16:09:06 -0700 | [diff] [blame] | 1341 | struct radix_tree_node *node; |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1342 | |
| 1343 | slot = skip_siblings(&node, slot, iter); |
| 1344 | |
| 1345 | while (radix_tree_is_internal_node(node)) { |
| 1346 | unsigned offset; |
| 1347 | unsigned long next_index; |
| 1348 | |
| 1349 | if (node == RADIX_TREE_RETRY) |
| 1350 | return slot; |
| 1351 | node = entry_to_node(node); |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1352 | iter->node = node; |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1353 | iter->shift = node->shift; |
| 1354 | |
| 1355 | if (flags & RADIX_TREE_ITER_TAGGED) { |
| 1356 | offset = radix_tree_find_next_bit(node, tag, 0); |
| 1357 | if (offset == RADIX_TREE_MAP_SIZE) |
| 1358 | return NULL; |
| 1359 | slot = &node->slots[offset]; |
| 1360 | iter->index = __radix_tree_iter_add(iter, offset); |
| 1361 | set_iter_tags(iter, node, offset, tag); |
| 1362 | node = rcu_dereference_raw(*slot); |
| 1363 | } else { |
| 1364 | offset = 0; |
| 1365 | slot = &node->slots[0]; |
| 1366 | for (;;) { |
| 1367 | node = rcu_dereference_raw(*slot); |
| 1368 | if (node) |
| 1369 | break; |
| 1370 | slot++; |
| 1371 | offset++; |
| 1372 | if (offset == RADIX_TREE_MAP_SIZE) |
| 1373 | return NULL; |
| 1374 | } |
| 1375 | iter->index = __radix_tree_iter_add(iter, offset); |
| 1376 | } |
| 1377 | if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) |
| 1378 | goto none; |
| 1379 | next_index = (iter->index | shift_maxindex(iter->shift)) + 1; |
| 1380 | if (next_index < iter->next_index) |
| 1381 | iter->next_index = next_index; |
| 1382 | } |
| 1383 | |
| 1384 | return slot; |
| 1385 | none: |
| 1386 | iter->next_index = 0; |
| 1387 | return NULL; |
| 1388 | } |
| 1389 | EXPORT_SYMBOL(__radix_tree_next_slot); |
| 1390 | #else |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1391 | static void __rcu **skip_siblings(struct radix_tree_node **nodep, |
| 1392 | void __rcu **slot, struct radix_tree_iter *iter) |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1393 | { |
| 1394 | return slot; |
| 1395 | } |
| 1396 | #endif |
| 1397 | |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1398 | void __rcu **radix_tree_iter_resume(void __rcu **slot, |
| 1399 | struct radix_tree_iter *iter) |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1400 | { |
| 1401 | struct radix_tree_node *node; |
| 1402 | |
| 1403 | slot++; |
| 1404 | iter->index = __radix_tree_iter_add(iter, 1); |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1405 | skip_siblings(&node, slot, iter); |
| 1406 | iter->next_index = iter->index; |
| 1407 | iter->tags = 0; |
| 1408 | return NULL; |
| 1409 | } |
| 1410 | EXPORT_SYMBOL(radix_tree_iter_resume); |
| 1411 | |
Fengguang Wu | 6df8ba4 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 1412 | /** |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1413 | * radix_tree_next_chunk - find next chunk of slots for iteration |
| 1414 | * |
| 1415 | * @root: radix tree root |
| 1416 | * @iter: iterator state |
| 1417 | * @flags: RADIX_TREE_ITER_* flags and tag index |
| 1418 | * Returns: pointer to chunk first slot, or NULL if iteration is over |
| 1419 | */ |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1420 | void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1421 | struct radix_tree_iter *iter, unsigned flags) |
| 1422 | { |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1423 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1424 | struct radix_tree_node *node, *child; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1425 | unsigned long index, offset, maxindex; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1426 | |
| 1427 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) |
| 1428 | return NULL; |
| 1429 | |
| 1430 | /* |
| 1431 | * Catch next_index overflow after ~0UL. iter->index never overflows |
| 1432 | * during iterating; it can be zero only at the beginning. |
| 1433 | * And we cannot overflow iter->next_index in a single step, |
| 1434 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. |
Konstantin Khlebnikov | fffaee3 | 2012-06-05 21:36:33 +0400 | [diff] [blame] | 1435 | * |
| 1436 | * This condition also used by radix_tree_next_slot() to stop |
Matthew Wilcox | 91b9677c | 2016-12-14 15:08:31 -0800 | [diff] [blame] | 1437 | * contiguous iterating, and forbid switching to the next chunk. |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1438 | */ |
| 1439 | index = iter->next_index; |
| 1440 | if (!index && iter->index) |
| 1441 | return NULL; |
| 1442 | |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1443 | restart: |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1444 | radix_tree_load_root(root, &child, &maxindex); |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1445 | if (index > maxindex) |
| 1446 | return NULL; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1447 | if (!child) |
| 1448 | return NULL; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1449 | |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1450 | if (!radix_tree_is_internal_node(child)) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1451 | /* Single-slot tree */ |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1452 | iter->index = index; |
| 1453 | iter->next_index = maxindex + 1; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1454 | iter->tags = 1; |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1455 | iter->node = NULL; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1456 | __set_iter_shift(iter, 0); |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1457 | return (void __rcu **)&root->xa_head; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1458 | } |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1459 | |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1460 | do { |
| 1461 | node = entry_to_node(child); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1462 | offset = radix_tree_descend(node, &child, index); |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1463 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1464 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1465 | !tag_get(node, tag, offset) : !child) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1466 | /* Hole detected */ |
| 1467 | if (flags & RADIX_TREE_ITER_CONTIG) |
| 1468 | return NULL; |
| 1469 | |
| 1470 | if (flags & RADIX_TREE_ITER_TAGGED) |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 1471 | offset = radix_tree_find_next_bit(node, tag, |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1472 | offset + 1); |
| 1473 | else |
| 1474 | while (++offset < RADIX_TREE_MAP_SIZE) { |
Matthew Wilcox | 12320d0 | 2017-02-13 15:22:48 -0500 | [diff] [blame] | 1475 | void *slot = rcu_dereference_raw( |
| 1476 | node->slots[offset]); |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 1477 | if (xa_is_sibling(slot)) |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1478 | continue; |
| 1479 | if (slot) |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1480 | break; |
| 1481 | } |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1482 | index &= ~node_maxindex(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1483 | index += offset << node->shift; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1484 | /* Overflow after ~0UL */ |
| 1485 | if (!index) |
| 1486 | return NULL; |
| 1487 | if (offset == RADIX_TREE_MAP_SIZE) |
| 1488 | goto restart; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1489 | child = rcu_dereference_raw(node->slots[offset]); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1490 | } |
| 1491 | |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1492 | if (!child) |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1493 | goto restart; |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1494 | if (child == RADIX_TREE_RETRY) |
| 1495 | break; |
Matthew Wilcox | 66ee620 | 2018-06-25 06:56:50 -0400 | [diff] [blame] | 1496 | } while (node->shift && radix_tree_is_internal_node(child)); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1497 | |
| 1498 | /* Update the iterator state */ |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1499 | iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); |
| 1500 | iter->next_index = (index | node_maxindex(node)) + 1; |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1501 | iter->node = node; |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1502 | __set_iter_shift(iter, node->shift); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1503 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1504 | if (flags & RADIX_TREE_ITER_TAGGED) |
| 1505 | set_iter_tags(iter, node, offset, tag); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1506 | |
| 1507 | return node->slots + offset; |
| 1508 | } |
| 1509 | EXPORT_SYMBOL(radix_tree_next_chunk); |
| 1510 | |
| 1511 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1512 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree |
| 1513 | * @root: radix tree root |
| 1514 | * @results: where the results of the lookup are placed |
| 1515 | * @first_index: start the lookup from this key |
| 1516 | * @max_items: place up to this many items at *results |
| 1517 | * |
| 1518 | * Performs an index-ascending scan of the tree for present items. Places |
| 1519 | * them at *@results and returns the number of items which were placed at |
| 1520 | * *@results. |
| 1521 | * |
| 1522 | * The implementation is naive. |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1523 | * |
| 1524 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under |
| 1525 | * rcu_read_lock. In this case, rather than the returned results being |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1526 | * an atomic snapshot of the tree at a single point in time, the |
| 1527 | * semantics of an RCU protected gang lookup are as though multiple |
| 1528 | * radix_tree_lookups have been issued in individual locks, and results |
| 1529 | * stored in 'results'. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1530 | */ |
| 1531 | unsigned int |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1532 | radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1533 | unsigned long first_index, unsigned int max_items) |
| 1534 | { |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1535 | struct radix_tree_iter iter; |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1536 | void __rcu **slot; |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1537 | unsigned int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1538 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1539 | if (unlikely(!max_items)) |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1540 | return 0; |
| 1541 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1542 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1543 | results[ret] = rcu_dereference_raw(*slot); |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1544 | if (!results[ret]) |
| 1545 | continue; |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1546 | if (radix_tree_is_internal_node(results[ret])) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1547 | slot = radix_tree_iter_retry(&iter); |
| 1548 | continue; |
| 1549 | } |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1550 | if (++ret == max_items) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 | } |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1553 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | return ret; |
| 1555 | } |
| 1556 | EXPORT_SYMBOL(radix_tree_gang_lookup); |
| 1557 | |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1558 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1559 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree |
| 1560 | * based on a tag |
| 1561 | * @root: radix tree root |
| 1562 | * @results: where the results of the lookup are placed |
| 1563 | * @first_index: start the lookup from this key |
| 1564 | * @max_items: place up to this many items at *results |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1565 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1566 | * |
| 1567 | * Performs an index-ascending scan of the tree for present items which |
| 1568 | * have the tag indexed by @tag set. Places the items at *@results and |
| 1569 | * returns the number of items which were placed at *@results. |
| 1570 | */ |
| 1571 | unsigned int |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1572 | radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1573 | unsigned long first_index, unsigned int max_items, |
| 1574 | unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1575 | { |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1576 | struct radix_tree_iter iter; |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1577 | void __rcu **slot; |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1578 | unsigned int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1579 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1580 | if (unlikely(!max_items)) |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1581 | return 0; |
| 1582 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1583 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1584 | results[ret] = rcu_dereference_raw(*slot); |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1585 | if (!results[ret]) |
| 1586 | continue; |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1587 | if (radix_tree_is_internal_node(results[ret])) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1588 | slot = radix_tree_iter_retry(&iter); |
| 1589 | continue; |
| 1590 | } |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1591 | if (++ret == max_items) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1593 | } |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1594 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | return ret; |
| 1596 | } |
| 1597 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); |
| 1598 | |
| 1599 | /** |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1600 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a |
| 1601 | * radix tree based on a tag |
| 1602 | * @root: radix tree root |
| 1603 | * @results: where the results of the lookup are placed |
| 1604 | * @first_index: start the lookup from this key |
| 1605 | * @max_items: place up to this many items at *results |
| 1606 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
| 1607 | * |
| 1608 | * Performs an index-ascending scan of the tree for present items which |
| 1609 | * have the tag indexed by @tag set. Places the slots at *@results and |
| 1610 | * returns the number of slots which were placed at *@results. |
| 1611 | */ |
| 1612 | unsigned int |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1613 | radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1614 | void __rcu ***results, unsigned long first_index, |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1615 | unsigned int max_items, unsigned int tag) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1616 | { |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1617 | struct radix_tree_iter iter; |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1618 | void __rcu **slot; |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1619 | unsigned int ret = 0; |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1620 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1621 | if (unlikely(!max_items)) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1622 | return 0; |
| 1623 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1624 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
| 1625 | results[ret] = slot; |
| 1626 | if (++ret == max_items) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1627 | break; |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1628 | } |
| 1629 | |
| 1630 | return ret; |
| 1631 | } |
| 1632 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); |
| 1633 | |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1634 | static bool __radix_tree_delete(struct radix_tree_root *root, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1635 | struct radix_tree_node *node, void __rcu **slot) |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1636 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1637 | void *old = rcu_dereference_raw(*slot); |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 1638 | int values = xa_is_value(old) ? -1 : 0; |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1639 | unsigned offset = get_slot_offset(node, slot); |
| 1640 | int tag; |
| 1641 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1642 | if (is_idr(root)) |
| 1643 | node_tag_set(root, node, IDR_FREE, offset); |
| 1644 | else |
| 1645 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 1646 | node_tag_clear(root, node, tag, offset); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1647 | |
Matthew Wilcox | 01959df | 2017-11-09 09:23:56 -0500 | [diff] [blame] | 1648 | replace_slot(slot, NULL, node, -1, values); |
Matthew Wilcox | 1cf56f9 | 2018-04-09 16:24:45 -0400 | [diff] [blame] | 1649 | return node && delete_node(root, node); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1650 | } |
| 1651 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1652 | /** |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1653 | * radix_tree_iter_delete - delete the entry at this iterator position |
| 1654 | * @root: radix tree root |
| 1655 | * @iter: iterator state |
| 1656 | * @slot: pointer to slot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1657 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1658 | * Delete the entry at the position currently pointed to by the iterator. |
| 1659 | * This may result in the current node being freed; if it is, the iterator |
| 1660 | * is advanced so that it will not reference the freed memory. This |
| 1661 | * function may be called without any locking if there are no other threads |
| 1662 | * which can access this tree. |
| 1663 | */ |
| 1664 | void radix_tree_iter_delete(struct radix_tree_root *root, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1665 | struct radix_tree_iter *iter, void __rcu **slot) |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1666 | { |
| 1667 | if (__radix_tree_delete(root, iter->node, slot)) |
| 1668 | iter->index = iter->next_index; |
| 1669 | } |
Chris Wilson | d1b48c1 | 2017-08-16 09:52:08 +0100 | [diff] [blame] | 1670 | EXPORT_SYMBOL(radix_tree_iter_delete); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1671 | |
| 1672 | /** |
| 1673 | * radix_tree_delete_item - delete an item from a radix tree |
| 1674 | * @root: radix tree root |
| 1675 | * @index: index key |
| 1676 | * @item: expected item |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1677 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1678 | * Remove @item at @index from the radix tree rooted at @root. |
| 1679 | * |
| 1680 | * Return: the deleted entry, or %NULL if it was not present |
| 1681 | * or the entry at the given @index was not @item. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1682 | */ |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 1683 | void *radix_tree_delete_item(struct radix_tree_root *root, |
| 1684 | unsigned long index, void *item) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1685 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1686 | struct radix_tree_node *node = NULL; |
Matthew Wilcox | 7a4deea | 2018-05-25 14:47:24 -0700 | [diff] [blame] | 1687 | void __rcu **slot = NULL; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1688 | void *entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1689 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1690 | entry = __radix_tree_lookup(root, index, &node, &slot); |
Matthew Wilcox | 7a4deea | 2018-05-25 14:47:24 -0700 | [diff] [blame] | 1691 | if (!slot) |
| 1692 | return NULL; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1693 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, |
| 1694 | get_slot_offset(node, slot)))) |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1695 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1696 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1697 | if (item && entry != item) |
| 1698 | return NULL; |
| 1699 | |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1700 | __radix_tree_delete(root, node, slot); |
Christoph Lameter | 201b626 | 2005-09-06 15:16:46 -0700 | [diff] [blame] | 1701 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1702 | return entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | } |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 1704 | EXPORT_SYMBOL(radix_tree_delete_item); |
| 1705 | |
| 1706 | /** |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1707 | * radix_tree_delete - delete an entry from a radix tree |
| 1708 | * @root: radix tree root |
| 1709 | * @index: index key |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 1710 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1711 | * Remove the entry at @index from the radix tree rooted at @root. |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 1712 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1713 | * Return: The deleted entry, or %NULL if it was not present. |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 1714 | */ |
| 1715 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) |
| 1716 | { |
| 1717 | return radix_tree_delete_item(root, index, NULL); |
| 1718 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | EXPORT_SYMBOL(radix_tree_delete); |
| 1720 | |
Johannes Weiner | d3798ae | 2016-10-04 22:02:08 +0200 | [diff] [blame] | 1721 | void radix_tree_clear_tags(struct radix_tree_root *root, |
| 1722 | struct radix_tree_node *node, |
Matthew Wilcox | d7b6272 | 2017-02-13 15:58:24 -0500 | [diff] [blame] | 1723 | void __rcu **slot) |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1724 | { |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1725 | if (node) { |
| 1726 | unsigned int tag, offset = get_slot_offset(node, slot); |
| 1727 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 1728 | node_tag_clear(root, node, tag, offset); |
| 1729 | } else { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1730 | root_tag_clear_all(root); |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1731 | } |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1732 | } |
| 1733 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1734 | /** |
| 1735 | * radix_tree_tagged - test whether any items in the tree are tagged |
| 1736 | * @root: radix tree root |
| 1737 | * @tag: tag to test |
| 1738 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1739 | int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | { |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1741 | return root_tag_get(root, tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1742 | } |
| 1743 | EXPORT_SYMBOL(radix_tree_tagged); |
| 1744 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1745 | /** |
| 1746 | * idr_preload - preload for idr_alloc() |
| 1747 | * @gfp_mask: allocation mask to use for preloading |
| 1748 | * |
| 1749 | * Preallocate memory to use for the next call to idr_alloc(). This function |
| 1750 | * returns with preemption disabled. It will be enabled by idr_preload_end(). |
| 1751 | */ |
| 1752 | void idr_preload(gfp_t gfp_mask) |
| 1753 | { |
Eric Dumazet | bc9ae22 | 2017-09-08 16:15:54 -0700 | [diff] [blame] | 1754 | if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) |
| 1755 | preempt_disable(); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1756 | } |
| 1757 | EXPORT_SYMBOL(idr_preload); |
| 1758 | |
Matthew Wilcox | 460488c | 2017-11-28 15:16:24 -0500 | [diff] [blame] | 1759 | void __rcu **idr_get_free(struct radix_tree_root *root, |
Chris Mi | 388f79f | 2017-08-30 02:31:57 -0400 | [diff] [blame] | 1760 | struct radix_tree_iter *iter, gfp_t gfp, |
| 1761 | unsigned long max) |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1762 | { |
| 1763 | struct radix_tree_node *node = NULL, *child; |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1764 | void __rcu **slot = (void __rcu **)&root->xa_head; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1765 | unsigned long maxindex, start = iter->next_index; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1766 | unsigned int shift, offset = 0; |
| 1767 | |
| 1768 | grow: |
| 1769 | shift = radix_tree_load_root(root, &child, &maxindex); |
| 1770 | if (!radix_tree_tagged(root, IDR_FREE)) |
| 1771 | start = max(start, maxindex + 1); |
| 1772 | if (start > max) |
| 1773 | return ERR_PTR(-ENOSPC); |
| 1774 | |
| 1775 | if (start > maxindex) { |
| 1776 | int error = radix_tree_extend(root, gfp, start, shift); |
| 1777 | if (error < 0) |
| 1778 | return ERR_PTR(error); |
| 1779 | shift = error; |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1780 | child = rcu_dereference_raw(root->xa_head); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1781 | } |
Matthew Wilcox | 66ee620 | 2018-06-25 06:56:50 -0400 | [diff] [blame] | 1782 | if (start == 0 && shift == 0) |
| 1783 | shift = RADIX_TREE_MAP_SHIFT; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1784 | |
| 1785 | while (shift) { |
| 1786 | shift -= RADIX_TREE_MAP_SHIFT; |
| 1787 | if (child == NULL) { |
| 1788 | /* Have to add a child node. */ |
Matthew Wilcox | d58275b | 2017-01-16 17:10:21 -0500 | [diff] [blame] | 1789 | child = radix_tree_node_alloc(gfp, node, root, shift, |
| 1790 | offset, 0, 0); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1791 | if (!child) |
| 1792 | return ERR_PTR(-ENOMEM); |
| 1793 | all_tag_set(child, IDR_FREE); |
| 1794 | rcu_assign_pointer(*slot, node_to_entry(child)); |
| 1795 | if (node) |
| 1796 | node->count++; |
| 1797 | } else if (!radix_tree_is_internal_node(child)) |
| 1798 | break; |
| 1799 | |
| 1800 | node = entry_to_node(child); |
| 1801 | offset = radix_tree_descend(node, &child, start); |
| 1802 | if (!tag_get(node, IDR_FREE, offset)) { |
| 1803 | offset = radix_tree_find_next_bit(node, IDR_FREE, |
| 1804 | offset + 1); |
| 1805 | start = next_index(start, node, offset); |
| 1806 | if (start > max) |
| 1807 | return ERR_PTR(-ENOSPC); |
| 1808 | while (offset == RADIX_TREE_MAP_SIZE) { |
| 1809 | offset = node->offset + 1; |
| 1810 | node = node->parent; |
| 1811 | if (!node) |
| 1812 | goto grow; |
| 1813 | shift = node->shift; |
| 1814 | } |
| 1815 | child = rcu_dereference_raw(node->slots[offset]); |
| 1816 | } |
| 1817 | slot = &node->slots[offset]; |
| 1818 | } |
| 1819 | |
| 1820 | iter->index = start; |
| 1821 | if (node) |
| 1822 | iter->next_index = 1 + min(max, (start | node_maxindex(node))); |
| 1823 | else |
| 1824 | iter->next_index = 1; |
| 1825 | iter->node = node; |
| 1826 | __set_iter_shift(iter, shift); |
| 1827 | set_iter_tags(iter, node, offset, IDR_FREE); |
| 1828 | |
| 1829 | return slot; |
| 1830 | } |
| 1831 | |
| 1832 | /** |
| 1833 | * idr_destroy - release all internal memory from an IDR |
| 1834 | * @idr: idr handle |
| 1835 | * |
| 1836 | * After this function is called, the IDR is empty, and may be reused or |
| 1837 | * the data structure containing it may be freed. |
| 1838 | * |
| 1839 | * A typical clean-up sequence for objects stored in an idr tree will use |
| 1840 | * idr_for_each() to free all objects, if necessary, then idr_destroy() to |
| 1841 | * free the memory used to keep track of those objects. |
| 1842 | */ |
| 1843 | void idr_destroy(struct idr *idr) |
| 1844 | { |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1845 | struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1846 | if (radix_tree_is_internal_node(node)) |
| 1847 | radix_tree_free_nodes(node); |
Matthew Wilcox | f8d5d0c | 2017-11-07 16:30:10 -0500 | [diff] [blame] | 1848 | idr->idr_rt.xa_head = NULL; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1849 | root_tag_set(&idr->idr_rt, IDR_FREE); |
| 1850 | } |
| 1851 | EXPORT_SYMBOL(idr_destroy); |
| 1852 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1853 | static void |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 1854 | radix_tree_node_ctor(void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1855 | { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 1856 | struct radix_tree_node *node = arg; |
| 1857 | |
| 1858 | memset(node, 0, sizeof(*node)); |
| 1859 | INIT_LIST_HEAD(&node->private_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1860 | } |
| 1861 | |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 1862 | static __init unsigned long __maxindex(unsigned int height) |
| 1863 | { |
| 1864 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; |
| 1865 | int shift = RADIX_TREE_INDEX_BITS - width; |
| 1866 | |
| 1867 | if (shift < 0) |
| 1868 | return ~0UL; |
| 1869 | if (shift >= BITS_PER_LONG) |
| 1870 | return 0UL; |
| 1871 | return ~0UL >> shift; |
| 1872 | } |
| 1873 | |
| 1874 | static __init void radix_tree_init_maxnodes(void) |
| 1875 | { |
| 1876 | unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; |
| 1877 | unsigned int i, j; |
| 1878 | |
| 1879 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) |
| 1880 | height_to_maxindex[i] = __maxindex(i); |
| 1881 | for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { |
| 1882 | for (j = i; j > 0; j--) |
| 1883 | height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; |
| 1884 | } |
| 1885 | } |
| 1886 | |
Sebastian Andrzej Siewior | d544abd | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 1887 | static int radix_tree_cpu_dead(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1888 | { |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1889 | struct radix_tree_preload *rtp; |
| 1890 | struct radix_tree_node *node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1891 | |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1892 | /* Free per-cpu pool of preloaded nodes */ |
Sebastian Andrzej Siewior | d544abd | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 1893 | rtp = &per_cpu(radix_tree_preloads, cpu); |
| 1894 | while (rtp->nr) { |
| 1895 | node = rtp->nodes; |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame] | 1896 | rtp->nodes = node->parent; |
Sebastian Andrzej Siewior | d544abd | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 1897 | kmem_cache_free(radix_tree_node_cachep, node); |
| 1898 | rtp->nr--; |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1899 | } |
Sebastian Andrzej Siewior | d544abd | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 1900 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1901 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | |
| 1903 | void __init radix_tree_init(void) |
| 1904 | { |
Sebastian Andrzej Siewior | d544abd | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 1905 | int ret; |
Michal Hocko | 7e78442 | 2017-05-03 14:53:09 -0700 | [diff] [blame] | 1906 | |
| 1907 | BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); |
Matthew Wilcox | fa290cd | 2018-04-10 16:36:28 -0700 | [diff] [blame] | 1908 | BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); |
Matthew Wilcox | 02c02bf | 2017-11-03 23:09:45 -0400 | [diff] [blame] | 1909 | BUILD_BUG_ON(XA_CHUNK_SIZE > 255); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1910 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
| 1911 | sizeof(struct radix_tree_node), 0, |
Christoph Lameter | 488514d | 2008-04-28 02:12:05 -0700 | [diff] [blame] | 1912 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
| 1913 | radix_tree_node_ctor); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 1914 | radix_tree_init_maxnodes(); |
Sebastian Andrzej Siewior | d544abd | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 1915 | ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", |
| 1916 | NULL, radix_tree_cpu_dead); |
| 1917 | WARN_ON(ret < 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1918 | } |