Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2001 Momchil Velikov |
| 3 | * Portions Copyright (C) 2001 Christoph Hellwig |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 4 | * Copyright (C) 2005 SGI, Christoph Lameter |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 5 | * Copyright (C) 2006 Nick Piggin |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 6 | * Copyright (C) 2012 Konstantin Khlebnikov |
Matthew Wilcox | 6b053b8 | 2016-05-20 17:02:58 -0700 | [diff] [blame] | 7 | * Copyright (C) 2016 Intel, Matthew Wilcox |
| 8 | * Copyright (C) 2016 Intel, Ross Zwisler |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License as |
| 12 | * published by the Free Software Foundation; either version 2, or (at |
| 13 | * your option) any later version. |
| 14 | * |
| 15 | * This program is distributed in the hope that it will be useful, but |
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 18 | * General Public License for more details. |
| 19 | * |
| 20 | * You should have received a copy of the GNU General Public License |
| 21 | * along with this program; if not, write to the Free Software |
| 22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 23 | */ |
| 24 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 25 | #include <linux/bitmap.h> |
| 26 | #include <linux/bitops.h> |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 27 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/errno.h> |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 29 | #include <linux/export.h> |
| 30 | #include <linux/idr.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/init.h> |
| 32 | #include <linux/kernel.h> |
Catalin Marinas | ce80b06 | 2014-06-06 14:38:18 -0700 | [diff] [blame] | 33 | #include <linux/kmemleak.h> |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 34 | #include <linux/percpu.h> |
Frederic Weisbecker | 92cf211 | 2015-05-12 16:41:46 +0200 | [diff] [blame] | 35 | #include <linux/preempt.h> /* in_interrupt() */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 36 | #include <linux/radix-tree.h> |
| 37 | #include <linux/rcupdate.h> |
| 38 | #include <linux/slab.h> |
| 39 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
| 41 | |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 42 | /* Number of nodes in fully populated tree of given height */ |
| 43 | static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; |
| 44 | |
Jeff Moyer | 26fb158 | 2007-10-16 01:24:49 -0700 | [diff] [blame] | 45 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | * Radix tree node cache. |
| 47 | */ |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 48 | static struct kmem_cache *radix_tree_node_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | /* |
Nick Piggin | 5536805 | 2012-05-29 15:07:34 -0700 | [diff] [blame] | 51 | * The radix tree is variable-height, so an insert operation not only has |
| 52 | * to build the branch to its corresponding item, it also has to build the |
| 53 | * branch to existing items if the size has to be increased (by |
| 54 | * radix_tree_extend). |
| 55 | * |
| 56 | * The worst case is a zero height tree with just a single item at index 0, |
| 57 | * and then inserting an item at index ULONG_MAX. This requires 2 new branches |
| 58 | * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. |
| 59 | * Hence: |
| 60 | */ |
| 61 | #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) |
| 62 | |
| 63 | /* |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 64 | * The IDR does not have to be as high as the radix tree since it uses |
| 65 | * signed integers, not unsigned longs. |
| 66 | */ |
| 67 | #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) |
| 68 | #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ |
| 69 | RADIX_TREE_MAP_SHIFT)) |
| 70 | #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) |
| 71 | |
| 72 | /* |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 73 | * The IDA is even shorter since it uses a bitmap at the last level. |
| 74 | */ |
| 75 | #define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS)) |
| 76 | #define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \ |
| 77 | RADIX_TREE_MAP_SHIFT)) |
| 78 | #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1) |
| 79 | |
| 80 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | * Per-cpu pool of preloaded nodes |
| 82 | */ |
| 83 | struct radix_tree_preload { |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 84 | unsigned nr; |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame^] | 85 | /* nodes->parent points to next preallocated node */ |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 86 | struct radix_tree_node *nodes; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | }; |
Harvey Harrison | 8cef7d5 | 2009-01-06 14:40:50 -0800 | [diff] [blame] | 88 | static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 90 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
| 91 | { |
| 92 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); |
| 93 | } |
| 94 | |
Matthew Wilcox | a4db4dc | 2016-05-20 17:03:24 -0700 | [diff] [blame] | 95 | static inline void *node_to_entry(void *ptr) |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 96 | { |
Matthew Wilcox | 30ff46cc | 2016-05-20 17:03:22 -0700 | [diff] [blame] | 97 | return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 98 | } |
| 99 | |
Matthew Wilcox | a4db4dc | 2016-05-20 17:03:24 -0700 | [diff] [blame] | 100 | #define RADIX_TREE_RETRY node_to_entry(NULL) |
Matthew Wilcox | afe0e39 | 2016-05-20 17:02:17 -0700 | [diff] [blame] | 101 | |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 102 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 103 | /* Sibling slots point directly to another slot in the same node */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 104 | static inline |
| 105 | bool is_sibling_entry(const struct radix_tree_node *parent, void *node) |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 106 | { |
| 107 | void **ptr = node; |
| 108 | return (parent->slots <= ptr) && |
| 109 | (ptr < parent->slots + RADIX_TREE_MAP_SIZE); |
| 110 | } |
| 111 | #else |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 112 | static inline |
| 113 | bool is_sibling_entry(const struct radix_tree_node *parent, void *node) |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 114 | { |
| 115 | return false; |
| 116 | } |
| 117 | #endif |
| 118 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 119 | static inline |
| 120 | unsigned long get_slot_offset(const struct radix_tree_node *parent, void **slot) |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 121 | { |
| 122 | return slot - parent->slots; |
| 123 | } |
| 124 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 125 | static unsigned int radix_tree_descend(const struct radix_tree_node *parent, |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 126 | struct radix_tree_node **nodep, unsigned long index) |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 127 | { |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 128 | unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 129 | void **entry = rcu_dereference_raw(parent->slots[offset]); |
| 130 | |
| 131 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 132 | if (radix_tree_is_internal_node(entry)) { |
Linus Torvalds | 8d2c0d3 | 2016-09-25 13:32:46 -0700 | [diff] [blame] | 133 | if (is_sibling_entry(parent, entry)) { |
| 134 | void **sibentry = (void **) entry_to_node(entry); |
| 135 | offset = get_slot_offset(parent, sibentry); |
| 136 | entry = rcu_dereference_raw(*sibentry); |
Matthew Wilcox | db050f2 | 2016-05-20 17:01:57 -0700 | [diff] [blame] | 137 | } |
| 138 | } |
| 139 | #endif |
| 140 | |
| 141 | *nodep = (void *)entry; |
| 142 | return offset; |
| 143 | } |
| 144 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 145 | static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 146 | { |
| 147 | return root->gfp_mask & __GFP_BITS_MASK; |
| 148 | } |
| 149 | |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 150 | static inline void tag_set(struct radix_tree_node *node, unsigned int tag, |
| 151 | int offset) |
| 152 | { |
| 153 | __set_bit(offset, node->tags[tag]); |
| 154 | } |
| 155 | |
| 156 | static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, |
| 157 | int offset) |
| 158 | { |
| 159 | __clear_bit(offset, node->tags[tag]); |
| 160 | } |
| 161 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 162 | static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 163 | int offset) |
| 164 | { |
| 165 | return test_bit(offset, node->tags[tag]); |
| 166 | } |
| 167 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 168 | static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 169 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 170 | root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 173 | static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 174 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 175 | root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | static inline void root_tag_clear_all(struct radix_tree_root *root) |
| 179 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 180 | root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1; |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 181 | } |
| 182 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 183 | static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 184 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 185 | return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT)); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 186 | } |
| 187 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 188 | static inline unsigned root_tags_get(const struct radix_tree_root *root) |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 189 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 190 | return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT; |
| 191 | } |
| 192 | |
| 193 | static inline bool is_idr(const struct radix_tree_root *root) |
| 194 | { |
| 195 | return !!(root->gfp_mask & ROOT_IS_IDR); |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 196 | } |
| 197 | |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 198 | /* |
| 199 | * Returns 1 if any slot in the node has this tag set. |
| 200 | * Otherwise returns 0. |
| 201 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 202 | static inline int any_tag_set(const struct radix_tree_node *node, |
| 203 | unsigned int tag) |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 204 | { |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 205 | unsigned idx; |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 206 | for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { |
| 207 | if (node->tags[tag][idx]) |
| 208 | return 1; |
| 209 | } |
| 210 | return 0; |
| 211 | } |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 212 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 213 | static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) |
| 214 | { |
| 215 | bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); |
| 216 | } |
| 217 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 218 | /** |
| 219 | * radix_tree_find_next_bit - find the next set bit in a memory region |
| 220 | * |
| 221 | * @addr: The address to base the search on |
| 222 | * @size: The bitmap size in bits |
| 223 | * @offset: The bitnumber to start searching at |
| 224 | * |
| 225 | * Unrollable variant of find_next_bit() for constant size arrays. |
| 226 | * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. |
| 227 | * Returns next bit offset, or size if nothing found. |
| 228 | */ |
| 229 | static __always_inline unsigned long |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 230 | radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, |
| 231 | unsigned long offset) |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 232 | { |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 233 | const unsigned long *addr = node->tags[tag]; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 234 | |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 235 | if (offset < RADIX_TREE_MAP_SIZE) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 236 | unsigned long tmp; |
| 237 | |
| 238 | addr += offset / BITS_PER_LONG; |
| 239 | tmp = *addr >> (offset % BITS_PER_LONG); |
| 240 | if (tmp) |
| 241 | return __ffs(tmp) + offset; |
| 242 | offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 243 | while (offset < RADIX_TREE_MAP_SIZE) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 244 | tmp = *++addr; |
| 245 | if (tmp) |
| 246 | return __ffs(tmp) + offset; |
| 247 | offset += BITS_PER_LONG; |
| 248 | } |
| 249 | } |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 250 | return RADIX_TREE_MAP_SIZE; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 251 | } |
| 252 | |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 253 | static unsigned int iter_offset(const struct radix_tree_iter *iter) |
| 254 | { |
| 255 | return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; |
| 256 | } |
| 257 | |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 258 | /* |
| 259 | * The maximum index which can be stored in a radix tree |
| 260 | */ |
| 261 | static inline unsigned long shift_maxindex(unsigned int shift) |
| 262 | { |
| 263 | return (RADIX_TREE_MAP_SIZE << shift) - 1; |
| 264 | } |
| 265 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 266 | static inline unsigned long node_maxindex(const struct radix_tree_node *node) |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 267 | { |
| 268 | return shift_maxindex(node->shift); |
| 269 | } |
| 270 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 271 | static unsigned long next_index(unsigned long index, |
| 272 | const struct radix_tree_node *node, |
| 273 | unsigned long offset) |
| 274 | { |
| 275 | return (index & ~node_maxindex(node)) + (offset << node->shift); |
| 276 | } |
| 277 | |
Ross Zwisler | 0796c58 | 2016-05-20 17:02:55 -0700 | [diff] [blame] | 278 | #ifndef __KERNEL__ |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 279 | static void dump_node(struct radix_tree_node *node, unsigned long index) |
Matthew Wilcox | 7cf19af | 2016-03-17 14:21:57 -0700 | [diff] [blame] | 280 | { |
Ross Zwisler | 0796c58 | 2016-05-20 17:02:55 -0700 | [diff] [blame] | 281 | unsigned long i; |
Matthew Wilcox | 7cf19af | 2016-03-17 14:21:57 -0700 | [diff] [blame] | 282 | |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 283 | pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n", |
| 284 | node, node->offset, index, index | node_maxindex(node), |
| 285 | node->parent, |
Ross Zwisler | 0796c58 | 2016-05-20 17:02:55 -0700 | [diff] [blame] | 286 | node->tags[0][0], node->tags[1][0], node->tags[2][0], |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 287 | node->shift, node->count, node->exceptional); |
Matthew Wilcox | 7cf19af | 2016-03-17 14:21:57 -0700 | [diff] [blame] | 288 | |
Ross Zwisler | 0796c58 | 2016-05-20 17:02:55 -0700 | [diff] [blame] | 289 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 290 | unsigned long first = index | (i << node->shift); |
| 291 | unsigned long last = first | ((1UL << node->shift) - 1); |
Ross Zwisler | 0796c58 | 2016-05-20 17:02:55 -0700 | [diff] [blame] | 292 | void *entry = node->slots[i]; |
| 293 | if (!entry) |
| 294 | continue; |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 295 | if (entry == RADIX_TREE_RETRY) { |
| 296 | pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n", |
| 297 | i, first, last, node); |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 298 | } else if (!radix_tree_is_internal_node(entry)) { |
Matthew Wilcox | 218ed75 | 2016-12-14 15:08:43 -0800 | [diff] [blame] | 299 | pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n", |
| 300 | entry, i, first, last, node); |
| 301 | } else if (is_sibling_entry(node, entry)) { |
| 302 | pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n", |
| 303 | entry, i, first, last, node, |
| 304 | *(void **)entry_to_node(entry)); |
Ross Zwisler | 0796c58 | 2016-05-20 17:02:55 -0700 | [diff] [blame] | 305 | } else { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 306 | dump_node(entry_to_node(entry), first); |
Ross Zwisler | 0796c58 | 2016-05-20 17:02:55 -0700 | [diff] [blame] | 307 | } |
| 308 | } |
Matthew Wilcox | 7cf19af | 2016-03-17 14:21:57 -0700 | [diff] [blame] | 309 | } |
| 310 | |
| 311 | /* For debug */ |
| 312 | static void radix_tree_dump(struct radix_tree_root *root) |
| 313 | { |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 314 | pr_debug("radix root: %p rnode %p tags %x\n", |
| 315 | root, root->rnode, |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 316 | root->gfp_mask >> ROOT_TAG_SHIFT); |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 317 | if (!radix_tree_is_internal_node(root->rnode)) |
Matthew Wilcox | 7cf19af | 2016-03-17 14:21:57 -0700 | [diff] [blame] | 318 | return; |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 319 | dump_node(entry_to_node(root->rnode), 0); |
Matthew Wilcox | 7cf19af | 2016-03-17 14:21:57 -0700 | [diff] [blame] | 320 | } |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 321 | |
| 322 | static void dump_ida_node(void *entry, unsigned long index) |
| 323 | { |
| 324 | unsigned long i; |
| 325 | |
| 326 | if (!entry) |
| 327 | return; |
| 328 | |
| 329 | if (radix_tree_is_internal_node(entry)) { |
| 330 | struct radix_tree_node *node = entry_to_node(entry); |
| 331 | |
| 332 | pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n", |
| 333 | node, node->offset, index * IDA_BITMAP_BITS, |
| 334 | ((index | node_maxindex(node)) + 1) * |
| 335 | IDA_BITMAP_BITS - 1, |
| 336 | node->parent, node->tags[0][0], node->shift, |
| 337 | node->count); |
| 338 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) |
| 339 | dump_ida_node(node->slots[i], |
| 340 | index | (i << node->shift)); |
Matthew Wilcox | d37cacc | 2016-12-17 08:18:17 -0500 | [diff] [blame] | 341 | } else if (radix_tree_exceptional_entry(entry)) { |
| 342 | pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n", |
| 343 | entry, (int)(index & RADIX_TREE_MAP_MASK), |
| 344 | index * IDA_BITMAP_BITS, |
| 345 | index * IDA_BITMAP_BITS + BITS_PER_LONG - |
| 346 | RADIX_TREE_EXCEPTIONAL_SHIFT, |
| 347 | (unsigned long)entry >> |
| 348 | RADIX_TREE_EXCEPTIONAL_SHIFT); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 349 | } else { |
| 350 | struct ida_bitmap *bitmap = entry; |
| 351 | |
| 352 | pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap, |
| 353 | (int)(index & RADIX_TREE_MAP_MASK), |
| 354 | index * IDA_BITMAP_BITS, |
| 355 | (index + 1) * IDA_BITMAP_BITS - 1); |
| 356 | for (i = 0; i < IDA_BITMAP_LONGS; i++) |
| 357 | pr_cont(" %lx", bitmap->bitmap[i]); |
| 358 | pr_cont("\n"); |
| 359 | } |
| 360 | } |
| 361 | |
| 362 | static void ida_dump(struct ida *ida) |
| 363 | { |
| 364 | struct radix_tree_root *root = &ida->ida_rt; |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 365 | pr_debug("ida: %p node %p free %d\n", ida, root->rnode, |
| 366 | root->gfp_mask >> ROOT_TAG_SHIFT); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 367 | dump_ida_node(root->rnode, 0); |
| 368 | } |
Matthew Wilcox | 7cf19af | 2016-03-17 14:21:57 -0700 | [diff] [blame] | 369 | #endif |
| 370 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | /* |
| 372 | * This assumes that the caller has performed appropriate preallocation, and |
| 373 | * that the caller has pinned this thread of control to the current CPU. |
| 374 | */ |
| 375 | static struct radix_tree_node * |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 376 | radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 377 | unsigned int shift, unsigned int offset, |
| 378 | unsigned int count, unsigned int exceptional) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | { |
Nick Piggin | e2848a0 | 2008-02-04 22:29:10 -0800 | [diff] [blame] | 380 | struct radix_tree_node *ret = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 382 | /* |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 383 | * Preload code isn't irq safe and it doesn't make sense to use |
| 384 | * preloading during an interrupt anyway as all the allocations have |
| 385 | * to be atomic. So just do normal allocation when in interrupt. |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 386 | */ |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 387 | if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 388 | struct radix_tree_preload *rtp; |
| 389 | |
Nick Piggin | e2848a0 | 2008-02-04 22:29:10 -0800 | [diff] [blame] | 390 | /* |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 391 | * Even if the caller has preloaded, try to allocate from the |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 392 | * cache first for the new node to get accounted to the memory |
| 393 | * cgroup. |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 394 | */ |
| 395 | ret = kmem_cache_alloc(radix_tree_node_cachep, |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 396 | gfp_mask | __GFP_NOWARN); |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 397 | if (ret) |
| 398 | goto out; |
| 399 | |
| 400 | /* |
Nick Piggin | e2848a0 | 2008-02-04 22:29:10 -0800 | [diff] [blame] | 401 | * Provided the caller has preloaded here, we will always |
| 402 | * succeed in getting a node here (and never reach |
| 403 | * kmem_cache_alloc) |
| 404 | */ |
Christoph Lameter | 7c8e018 | 2014-06-04 16:07:56 -0700 | [diff] [blame] | 405 | rtp = this_cpu_ptr(&radix_tree_preloads); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | if (rtp->nr) { |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 407 | ret = rtp->nodes; |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame^] | 408 | rtp->nodes = ret->parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | rtp->nr--; |
| 410 | } |
Catalin Marinas | ce80b06 | 2014-06-06 14:38:18 -0700 | [diff] [blame] | 411 | /* |
| 412 | * Update the allocation stack trace as this is more useful |
| 413 | * for debugging. |
| 414 | */ |
| 415 | kmemleak_update_trace(ret); |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 416 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | } |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 418 | ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
Vladimir Davydov | 58e698a | 2016-03-17 14:18:36 -0700 | [diff] [blame] | 419 | out: |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 420 | BUG_ON(radix_tree_is_internal_node(ret)); |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 421 | if (ret) { |
| 422 | ret->parent = parent; |
| 423 | ret->shift = shift; |
| 424 | ret->offset = offset; |
| 425 | ret->count = count; |
| 426 | ret->exceptional = exceptional; |
| 427 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | return ret; |
| 429 | } |
| 430 | |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 431 | static void radix_tree_node_rcu_free(struct rcu_head *head) |
| 432 | { |
| 433 | struct radix_tree_node *node = |
| 434 | container_of(head, struct radix_tree_node, rcu_head); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 435 | |
| 436 | /* |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 437 | * Must only free zeroed nodes into the slab. We can be left with |
| 438 | * non-NULL entries by radix_tree_free_nodes, so clear the entries |
| 439 | * and tags here. |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 440 | */ |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 441 | memset(node->slots, 0, sizeof(node->slots)); |
| 442 | memset(node->tags, 0, sizeof(node->tags)); |
Matthew Wilcox | 91d9c05 | 2016-12-14 15:08:34 -0800 | [diff] [blame] | 443 | INIT_LIST_HEAD(&node->private_list); |
Nick Piggin | 643b52b | 2008-06-12 15:21:52 -0700 | [diff] [blame] | 444 | |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 445 | kmem_cache_free(radix_tree_node_cachep, node); |
| 446 | } |
| 447 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 448 | static inline void |
| 449 | radix_tree_node_free(struct radix_tree_node *node) |
| 450 | { |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 451 | call_rcu(&node->rcu_head, radix_tree_node_rcu_free); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } |
| 453 | |
| 454 | /* |
| 455 | * Load up this CPU's radix_tree_node buffer with sufficient objects to |
| 456 | * ensure that the addition of a single element in the tree cannot fail. On |
| 457 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
| 458 | * with preemption not disabled. |
David Howells | b34df79 | 2009-11-19 18:11:14 +0000 | [diff] [blame] | 459 | * |
| 460 | * To make use of this facility, the radix tree must be initialised without |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 461 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | */ |
Matthew Wilcox | 2791653a | 2016-12-14 15:09:04 -0800 | [diff] [blame] | 463 | static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | { |
| 465 | struct radix_tree_preload *rtp; |
| 466 | struct radix_tree_node *node; |
| 467 | int ret = -ENOMEM; |
| 468 | |
Vladimir Davydov | 05eb6e7 | 2016-08-02 14:03:01 -0700 | [diff] [blame] | 469 | /* |
| 470 | * Nodes preloaded by one cgroup can be be used by another cgroup, so |
| 471 | * they should never be accounted to any particular memory cgroup. |
| 472 | */ |
| 473 | gfp_mask &= ~__GFP_ACCOUNT; |
| 474 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | preempt_disable(); |
Christoph Lameter | 7c8e018 | 2014-06-04 16:07:56 -0700 | [diff] [blame] | 476 | rtp = this_cpu_ptr(&radix_tree_preloads); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 477 | while (rtp->nr < nr) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | preempt_enable(); |
Christoph Lameter | 488514d | 2008-04-28 02:12:05 -0700 | [diff] [blame] | 479 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | if (node == NULL) |
| 481 | goto out; |
| 482 | preempt_disable(); |
Christoph Lameter | 7c8e018 | 2014-06-04 16:07:56 -0700 | [diff] [blame] | 483 | rtp = this_cpu_ptr(&radix_tree_preloads); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 484 | if (rtp->nr < nr) { |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame^] | 485 | node->parent = rtp->nodes; |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 486 | rtp->nodes = node; |
| 487 | rtp->nr++; |
| 488 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | kmem_cache_free(radix_tree_node_cachep, node); |
Kirill A. Shutemov | 9d2a8da | 2015-06-25 15:02:19 -0700 | [diff] [blame] | 490 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | } |
| 492 | ret = 0; |
| 493 | out: |
| 494 | return ret; |
| 495 | } |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 496 | |
| 497 | /* |
| 498 | * Load up this CPU's radix_tree_node buffer with sufficient objects to |
| 499 | * ensure that the addition of a single element in the tree cannot fail. On |
| 500 | * success, return zero, with preemption disabled. On error, return -ENOMEM |
| 501 | * with preemption not disabled. |
| 502 | * |
| 503 | * To make use of this facility, the radix tree must be initialised without |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 504 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 505 | */ |
| 506 | int radix_tree_preload(gfp_t gfp_mask) |
| 507 | { |
| 508 | /* Warn on non-sensical use... */ |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 509 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 510 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 511 | } |
David Chinner | d7f0923 | 2007-07-14 16:05:04 +1000 | [diff] [blame] | 512 | EXPORT_SYMBOL(radix_tree_preload); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | |
Nick Piggin | 6e954b9 | 2006-01-08 01:01:40 -0800 | [diff] [blame] | 514 | /* |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 515 | * The same as above function, except we don't guarantee preloading happens. |
| 516 | * We do it, if we decide it helps. On success, return zero with preemption |
| 517 | * disabled. On error, return -ENOMEM with preemption not disabled. |
| 518 | */ |
| 519 | int radix_tree_maybe_preload(gfp_t gfp_mask) |
| 520 | { |
Mel Gorman | d0164ad | 2015-11-06 16:28:21 -0800 | [diff] [blame] | 521 | if (gfpflags_allow_blocking(gfp_mask)) |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 522 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 523 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
| 524 | preempt_disable(); |
| 525 | return 0; |
| 526 | } |
| 527 | EXPORT_SYMBOL(radix_tree_maybe_preload); |
| 528 | |
Matthew Wilcox | 2791653a | 2016-12-14 15:09:04 -0800 | [diff] [blame] | 529 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 530 | /* |
| 531 | * Preload with enough objects to ensure that we can split a single entry |
| 532 | * of order @old_order into many entries of size @new_order |
| 533 | */ |
| 534 | int radix_tree_split_preload(unsigned int old_order, unsigned int new_order, |
| 535 | gfp_t gfp_mask) |
| 536 | { |
| 537 | unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT); |
| 538 | unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) - |
| 539 | (new_order / RADIX_TREE_MAP_SHIFT); |
| 540 | unsigned nr = 0; |
| 541 | |
| 542 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
| 543 | BUG_ON(new_order >= old_order); |
| 544 | |
| 545 | while (layers--) |
| 546 | nr = nr * RADIX_TREE_MAP_SIZE + 1; |
| 547 | return __radix_tree_preload(gfp_mask, top * nr); |
| 548 | } |
| 549 | #endif |
| 550 | |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 551 | /* |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 552 | * The same as function above, but preload number of nodes required to insert |
| 553 | * (1 << order) continuous naturally-aligned elements. |
| 554 | */ |
| 555 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) |
| 556 | { |
| 557 | unsigned long nr_subtrees; |
| 558 | int nr_nodes, subtree_height; |
| 559 | |
| 560 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
| 561 | if (!gfpflags_allow_blocking(gfp_mask)) { |
| 562 | preempt_disable(); |
| 563 | return 0; |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * Calculate number and height of fully populated subtrees it takes to |
| 568 | * store (1 << order) elements. |
| 569 | */ |
| 570 | nr_subtrees = 1 << order; |
| 571 | for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; |
| 572 | subtree_height++) |
| 573 | nr_subtrees >>= RADIX_TREE_MAP_SHIFT; |
| 574 | |
| 575 | /* |
| 576 | * The worst case is zero height tree with a single item at index 0 and |
| 577 | * then inserting items starting at ULONG_MAX - (1 << order). |
| 578 | * |
| 579 | * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to |
| 580 | * 0-index item. |
| 581 | */ |
| 582 | nr_nodes = RADIX_TREE_MAX_PATH; |
| 583 | |
| 584 | /* Plus branch to fully populated subtrees. */ |
| 585 | nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; |
| 586 | |
| 587 | /* Root node is shared. */ |
| 588 | nr_nodes--; |
| 589 | |
| 590 | /* Plus nodes required to build subtrees. */ |
| 591 | nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; |
| 592 | |
| 593 | return __radix_tree_preload(gfp_mask, nr_nodes); |
| 594 | } |
| 595 | |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 596 | static unsigned radix_tree_load_root(const struct radix_tree_root *root, |
Matthew Wilcox | 1456a43 | 2016-05-20 17:02:08 -0700 | [diff] [blame] | 597 | struct radix_tree_node **nodep, unsigned long *maxindex) |
| 598 | { |
| 599 | struct radix_tree_node *node = rcu_dereference_raw(root->rnode); |
| 600 | |
| 601 | *nodep = node; |
| 602 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 603 | if (likely(radix_tree_is_internal_node(node))) { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 604 | node = entry_to_node(node); |
Matthew Wilcox | 1456a43 | 2016-05-20 17:02:08 -0700 | [diff] [blame] | 605 | *maxindex = node_maxindex(node); |
Matthew Wilcox | c12e51b | 2016-05-20 17:03:10 -0700 | [diff] [blame] | 606 | return node->shift + RADIX_TREE_MAP_SHIFT; |
Matthew Wilcox | 1456a43 | 2016-05-20 17:02:08 -0700 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | *maxindex = 0; |
| 610 | return 0; |
| 611 | } |
| 612 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | /* |
| 614 | * Extend a radix tree so it can store key @index. |
| 615 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 616 | static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 617 | unsigned long index, unsigned int shift) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 618 | { |
Hugh Dickins | e2bdb93 | 2012-01-12 17:20:41 -0800 | [diff] [blame] | 619 | struct radix_tree_node *slot; |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 620 | unsigned int maxshift; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | int tag; |
| 622 | |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 623 | /* Figure out what the shift should be. */ |
| 624 | maxshift = shift; |
| 625 | while (index > shift_maxindex(maxshift)) |
| 626 | maxshift += RADIX_TREE_MAP_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 628 | slot = root->rnode; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 629 | if (!slot && (!is_idr(root) || root_tag_get(root, IDR_FREE))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | do { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 633 | struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, |
| 634 | shift, 0, 1, 0); |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 635 | if (!node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | return -ENOMEM; |
| 637 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 638 | if (is_idr(root)) { |
| 639 | all_tag_set(node, IDR_FREE); |
| 640 | if (!root_tag_get(root, IDR_FREE)) { |
| 641 | tag_clear(node, IDR_FREE, 0); |
| 642 | root_tag_set(root, IDR_FREE); |
| 643 | } |
| 644 | } else { |
| 645 | /* Propagate the aggregated tag info to the new child */ |
| 646 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { |
| 647 | if (root_tag_get(root, tag)) |
| 648 | tag_set(node, tag, 0); |
| 649 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | } |
| 651 | |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 652 | BUG_ON(shift > BITS_PER_LONG); |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 653 | if (radix_tree_is_internal_node(slot)) { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 654 | entry_to_node(slot)->parent = node; |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 655 | } else if (radix_tree_exceptional_entry(slot)) { |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 656 | /* Moving an exceptional root->rnode to a node */ |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 657 | node->exceptional = 1; |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 658 | } |
Hugh Dickins | e2bdb93 | 2012-01-12 17:20:41 -0800 | [diff] [blame] | 659 | node->slots[0] = slot; |
Matthew Wilcox | a4db4dc | 2016-05-20 17:03:24 -0700 | [diff] [blame] | 660 | slot = node_to_entry(node); |
| 661 | rcu_assign_pointer(root->rnode, slot); |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 662 | shift += RADIX_TREE_MAP_SHIFT; |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 663 | } while (shift <= maxshift); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 664 | out: |
Matthew Wilcox | d089126 | 2016-05-20 17:03:19 -0700 | [diff] [blame] | 665 | return maxshift + RADIX_TREE_MAP_SHIFT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | } |
| 667 | |
| 668 | /** |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 669 | * radix_tree_shrink - shrink radix tree to minimum height |
| 670 | * @root radix tree root |
| 671 | */ |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 672 | static inline bool radix_tree_shrink(struct radix_tree_root *root, |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 673 | radix_tree_update_node_t update_node, |
| 674 | void *private) |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 675 | { |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 676 | bool shrunk = false; |
| 677 | |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 678 | for (;;) { |
| 679 | struct radix_tree_node *node = root->rnode; |
| 680 | struct radix_tree_node *child; |
| 681 | |
| 682 | if (!radix_tree_is_internal_node(node)) |
| 683 | break; |
| 684 | node = entry_to_node(node); |
| 685 | |
| 686 | /* |
| 687 | * The candidate node has more than one child, or its child |
| 688 | * is not at the leftmost slot, or the child is a multiorder |
| 689 | * entry, we cannot shrink. |
| 690 | */ |
| 691 | if (node->count != 1) |
| 692 | break; |
| 693 | child = node->slots[0]; |
| 694 | if (!child) |
| 695 | break; |
| 696 | if (!radix_tree_is_internal_node(child) && node->shift) |
| 697 | break; |
| 698 | |
| 699 | if (radix_tree_is_internal_node(child)) |
| 700 | entry_to_node(child)->parent = NULL; |
| 701 | |
| 702 | /* |
| 703 | * We don't need rcu_assign_pointer(), since we are simply |
| 704 | * moving the node from one part of the tree to another: if it |
| 705 | * was safe to dereference the old pointer to it |
| 706 | * (node->slots[0]), it will be safe to dereference the new |
| 707 | * one (root->rnode) as far as dependent read barriers go. |
| 708 | */ |
| 709 | root->rnode = child; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 710 | if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) |
| 711 | root_tag_clear(root, IDR_FREE); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 712 | |
| 713 | /* |
| 714 | * We have a dilemma here. The node's slot[0] must not be |
| 715 | * NULLed in case there are concurrent lookups expecting to |
| 716 | * find the item. However if this was a bottom-level node, |
| 717 | * then it may be subject to the slot pointer being visible |
| 718 | * to callers dereferencing it. If item corresponding to |
| 719 | * slot[0] is subsequently deleted, these callers would expect |
| 720 | * their slot to become empty sooner or later. |
| 721 | * |
| 722 | * For example, lockless pagecache will look up a slot, deref |
| 723 | * the page pointer, and if the page has 0 refcount it means it |
| 724 | * was concurrently deleted from pagecache so try the deref |
| 725 | * again. Fortunately there is already a requirement for logic |
| 726 | * to retry the entire slot lookup -- the indirect pointer |
| 727 | * problem (replacing direct root node with an indirect pointer |
| 728 | * also results in a stale slot). So tag the slot as indirect |
| 729 | * to force callers to retry. |
| 730 | */ |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 731 | node->count = 0; |
| 732 | if (!radix_tree_is_internal_node(child)) { |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 733 | node->slots[0] = RADIX_TREE_RETRY; |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 734 | if (update_node) |
| 735 | update_node(node, private); |
| 736 | } |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 737 | |
Johannes Weiner | ea07b86 | 2017-01-06 19:21:43 -0500 | [diff] [blame] | 738 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 739 | radix_tree_node_free(node); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 740 | shrunk = true; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 741 | } |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 742 | |
| 743 | return shrunk; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 744 | } |
| 745 | |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 746 | static bool delete_node(struct radix_tree_root *root, |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 747 | struct radix_tree_node *node, |
| 748 | radix_tree_update_node_t update_node, void *private) |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 749 | { |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 750 | bool deleted = false; |
| 751 | |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 752 | do { |
| 753 | struct radix_tree_node *parent; |
| 754 | |
| 755 | if (node->count) { |
| 756 | if (node == entry_to_node(root->rnode)) |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 757 | deleted |= radix_tree_shrink(root, update_node, |
| 758 | private); |
| 759 | return deleted; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | parent = node->parent; |
| 763 | if (parent) { |
| 764 | parent->slots[node->offset] = NULL; |
| 765 | parent->count--; |
| 766 | } else { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 767 | /* |
| 768 | * Shouldn't the tags already have all been cleared |
| 769 | * by the caller? |
| 770 | */ |
| 771 | if (!is_idr(root)) |
| 772 | root_tag_clear_all(root); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 773 | root->rnode = NULL; |
| 774 | } |
| 775 | |
Johannes Weiner | ea07b86 | 2017-01-06 19:21:43 -0500 | [diff] [blame] | 776 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 777 | radix_tree_node_free(node); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 778 | deleted = true; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 779 | |
| 780 | node = parent; |
| 781 | } while (node); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 782 | |
| 783 | return deleted; |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 784 | } |
| 785 | |
| 786 | /** |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 787 | * __radix_tree_create - create a slot in a radix tree |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | * @root: radix tree root |
| 789 | * @index: index key |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 790 | * @order: index occupies 2^order aligned slots |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 791 | * @nodep: returns node |
| 792 | * @slotp: returns slot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 793 | * |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 794 | * Create, if necessary, and return the node and slot for an item |
| 795 | * at position @index in the radix tree @root. |
| 796 | * |
| 797 | * Until there is more than one item in the tree, no nodes are |
| 798 | * allocated and @root->rnode is used as a direct slot instead of |
| 799 | * pointing to a node, in which case *@nodep will be NULL. |
| 800 | * |
| 801 | * Returns -ENOMEM, or 0 for success. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | */ |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 803 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 804 | unsigned order, struct radix_tree_node **nodep, |
| 805 | void ***slotp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 806 | { |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 807 | struct radix_tree_node *node = NULL, *child; |
| 808 | void **slot = (void **)&root->rnode; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 809 | unsigned long maxindex; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 810 | unsigned int shift, offset = 0; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 811 | unsigned long max = index | ((1UL << order) - 1); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 812 | gfp_t gfp = root_gfp_mask(root); |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 813 | |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 814 | shift = radix_tree_load_root(root, &child, &maxindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | |
| 816 | /* Make sure the tree is high enough. */ |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 817 | if (order > 0 && max == ((1UL << order) - 1)) |
| 818 | max++; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 819 | if (max > maxindex) { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 820 | int error = radix_tree_extend(root, gfp, max, shift); |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 821 | if (error < 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 | return error; |
Matthew Wilcox | 49ea6eb | 2016-05-20 17:02:11 -0700 | [diff] [blame] | 823 | shift = error; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 824 | child = root->rnode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 825 | } |
| 826 | |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 827 | while (shift > order) { |
Matthew Wilcox | c12e51b | 2016-05-20 17:03:10 -0700 | [diff] [blame] | 828 | shift -= RADIX_TREE_MAP_SHIFT; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 829 | if (child == NULL) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | /* Have to add a child node. */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 831 | child = radix_tree_node_alloc(gfp, node, shift, |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 832 | offset, 0, 0); |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 833 | if (!child) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 834 | return -ENOMEM; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 835 | rcu_assign_pointer(*slot, node_to_entry(child)); |
| 836 | if (node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 837 | node->count++; |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 838 | } else if (!radix_tree_is_internal_node(child)) |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 839 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | |
| 841 | /* Go a level down */ |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 842 | node = entry_to_node(child); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 843 | offset = radix_tree_descend(node, &child, index); |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 844 | slot = &node->slots[offset]; |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 845 | } |
| 846 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 847 | if (nodep) |
| 848 | *nodep = node; |
| 849 | if (slotp) |
Matthew Wilcox | 89148aa | 2016-05-20 17:03:42 -0700 | [diff] [blame] | 850 | *slotp = slot; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 851 | return 0; |
| 852 | } |
| 853 | |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 854 | /* |
| 855 | * Free any nodes below this node. The tree is presumed to not need |
| 856 | * shrinking, and any user data in the tree is presumed to not need a |
| 857 | * destructor called on it. If we need to add a destructor, we can |
| 858 | * add that functionality later. Note that we may not clear tags or |
| 859 | * slots from the tree as an RCU walker may still have a pointer into |
| 860 | * this subtree. We could replace the entries with RADIX_TREE_RETRY, |
| 861 | * but we'll still have to clear those in rcu_free. |
| 862 | */ |
| 863 | static void radix_tree_free_nodes(struct radix_tree_node *node) |
| 864 | { |
| 865 | unsigned offset = 0; |
| 866 | struct radix_tree_node *child = entry_to_node(node); |
| 867 | |
| 868 | for (;;) { |
| 869 | void *entry = child->slots[offset]; |
| 870 | if (radix_tree_is_internal_node(entry) && |
| 871 | !is_sibling_entry(child, entry)) { |
| 872 | child = entry_to_node(entry); |
| 873 | offset = 0; |
| 874 | continue; |
| 875 | } |
| 876 | offset++; |
| 877 | while (offset == RADIX_TREE_MAP_SIZE) { |
| 878 | struct radix_tree_node *old = child; |
| 879 | offset = child->offset + 1; |
| 880 | child = child->parent; |
Matthew Wilcox | dd040b6 | 2017-01-24 15:18:16 -0800 | [diff] [blame] | 881 | WARN_ON_ONCE(!list_empty(&old->private_list)); |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 882 | radix_tree_node_free(old); |
| 883 | if (old == entry_to_node(node)) |
| 884 | return; |
| 885 | } |
| 886 | } |
| 887 | } |
| 888 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 889 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 890 | static inline int insert_entries(struct radix_tree_node *node, void **slot, |
| 891 | void *item, unsigned order, bool replace) |
| 892 | { |
| 893 | struct radix_tree_node *child; |
| 894 | unsigned i, n, tag, offset, tags = 0; |
| 895 | |
| 896 | if (node) { |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 897 | if (order > node->shift) |
| 898 | n = 1 << (order - node->shift); |
| 899 | else |
| 900 | n = 1; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 901 | offset = get_slot_offset(node, slot); |
| 902 | } else { |
| 903 | n = 1; |
| 904 | offset = 0; |
| 905 | } |
| 906 | |
| 907 | if (n > 1) { |
| 908 | offset = offset & ~(n - 1); |
| 909 | slot = &node->slots[offset]; |
| 910 | } |
| 911 | child = node_to_entry(slot); |
| 912 | |
| 913 | for (i = 0; i < n; i++) { |
| 914 | if (slot[i]) { |
| 915 | if (replace) { |
| 916 | node->count--; |
| 917 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 918 | if (tag_get(node, tag, offset + i)) |
| 919 | tags |= 1 << tag; |
| 920 | } else |
| 921 | return -EEXIST; |
| 922 | } |
| 923 | } |
| 924 | |
| 925 | for (i = 0; i < n; i++) { |
| 926 | struct radix_tree_node *old = slot[i]; |
| 927 | if (i) { |
| 928 | rcu_assign_pointer(slot[i], child); |
| 929 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 930 | if (tags & (1 << tag)) |
| 931 | tag_clear(node, tag, offset + i); |
| 932 | } else { |
| 933 | rcu_assign_pointer(slot[i], item); |
| 934 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 935 | if (tags & (1 << tag)) |
| 936 | tag_set(node, tag, offset); |
| 937 | } |
| 938 | if (radix_tree_is_internal_node(old) && |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 939 | !is_sibling_entry(node, old) && |
| 940 | (old != RADIX_TREE_RETRY)) |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 941 | radix_tree_free_nodes(old); |
| 942 | if (radix_tree_exceptional_entry(old)) |
| 943 | node->exceptional--; |
| 944 | } |
| 945 | if (node) { |
| 946 | node->count += n; |
| 947 | if (radix_tree_exceptional_entry(item)) |
| 948 | node->exceptional += n; |
| 949 | } |
| 950 | return n; |
| 951 | } |
| 952 | #else |
| 953 | static inline int insert_entries(struct radix_tree_node *node, void **slot, |
| 954 | void *item, unsigned order, bool replace) |
| 955 | { |
| 956 | if (*slot) |
| 957 | return -EEXIST; |
| 958 | rcu_assign_pointer(*slot, item); |
| 959 | if (node) { |
| 960 | node->count++; |
| 961 | if (radix_tree_exceptional_entry(item)) |
| 962 | node->exceptional++; |
| 963 | } |
| 964 | return 1; |
| 965 | } |
| 966 | #endif |
| 967 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 968 | /** |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 969 | * __radix_tree_insert - insert into a radix tree |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 970 | * @root: radix tree root |
| 971 | * @index: index key |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 972 | * @order: key covers the 2^order indices around index |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 973 | * @item: item to insert |
| 974 | * |
| 975 | * Insert an item into the radix tree at position @index. |
| 976 | */ |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 977 | int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, |
| 978 | unsigned order, void *item) |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 979 | { |
| 980 | struct radix_tree_node *node; |
| 981 | void **slot; |
| 982 | int error; |
| 983 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 984 | BUG_ON(radix_tree_is_internal_node(item)); |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 985 | |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 986 | error = __radix_tree_create(root, index, order, &node, &slot); |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 987 | if (error) |
| 988 | return error; |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 989 | |
| 990 | error = insert_entries(node, slot, item, order, false); |
| 991 | if (error < 0) |
| 992 | return error; |
Christoph Lameter | 201b626 | 2005-09-06 15:16:46 -0700 | [diff] [blame] | 993 | |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 994 | if (node) { |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 995 | unsigned offset = get_slot_offset(node, slot); |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 996 | BUG_ON(tag_get(node, 0, offset)); |
| 997 | BUG_ON(tag_get(node, 1, offset)); |
| 998 | BUG_ON(tag_get(node, 2, offset)); |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 999 | } else { |
Matthew Wilcox | 7b60e9a | 2016-05-20 17:02:23 -0700 | [diff] [blame] | 1000 | BUG_ON(root_tags_get(root)); |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1001 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 | return 0; |
| 1004 | } |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 1005 | EXPORT_SYMBOL(__radix_tree_insert); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1007 | /** |
| 1008 | * __radix_tree_lookup - lookup an item in a radix tree |
| 1009 | * @root: radix tree root |
| 1010 | * @index: index key |
| 1011 | * @nodep: returns node |
| 1012 | * @slotp: returns slot |
| 1013 | * |
| 1014 | * Lookup and return the item at position @index in the radix |
| 1015 | * tree @root. |
| 1016 | * |
| 1017 | * Until there is more than one item in the tree, no nodes are |
| 1018 | * allocated and @root->rnode is used as a direct slot instead of |
| 1019 | * pointing to a node, in which case *@nodep will be NULL. |
Hans Reiser | a433136 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 1020 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1021 | void *__radix_tree_lookup(const struct radix_tree_root *root, |
| 1022 | unsigned long index, struct radix_tree_node **nodep, |
| 1023 | void ***slotp) |
Hans Reiser | a433136 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 1024 | { |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1025 | struct radix_tree_node *node, *parent; |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 1026 | unsigned long maxindex; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1027 | void **slot; |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1028 | |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 1029 | restart: |
| 1030 | parent = NULL; |
| 1031 | slot = (void **)&root->rnode; |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1032 | radix_tree_load_root(root, &node, &maxindex); |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 1033 | if (index > maxindex) |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1034 | return NULL; |
| 1035 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1036 | while (radix_tree_is_internal_node(node)) { |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 1037 | unsigned offset; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1038 | |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 1039 | if (node == RADIX_TREE_RETRY) |
| 1040 | goto restart; |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 1041 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1042 | offset = radix_tree_descend(parent, &node, index); |
Matthew Wilcox | 8582995 | 2016-05-20 17:02:20 -0700 | [diff] [blame] | 1043 | slot = parent->slots + offset; |
| 1044 | } |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1045 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1046 | if (nodep) |
| 1047 | *nodep = parent; |
| 1048 | if (slotp) |
| 1049 | *slotp = slot; |
| 1050 | return node; |
Huang Shijie | b72b71c | 2009-06-16 15:33:42 -0700 | [diff] [blame] | 1051 | } |
| 1052 | |
| 1053 | /** |
| 1054 | * radix_tree_lookup_slot - lookup a slot in a radix tree |
| 1055 | * @root: radix tree root |
| 1056 | * @index: index key |
| 1057 | * |
| 1058 | * Returns: the slot corresponding to the position @index in the |
| 1059 | * radix tree @root. This is useful for update-if-exists operations. |
| 1060 | * |
| 1061 | * This function can be called under rcu_read_lock iff the slot is not |
| 1062 | * modified by radix_tree_replace_slot, otherwise it must be called |
| 1063 | * exclusive from other writers. Any dereference of the slot must be done |
| 1064 | * using radix_tree_deref_slot. |
| 1065 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1066 | void **radix_tree_lookup_slot(const struct radix_tree_root *root, |
| 1067 | unsigned long index) |
Huang Shijie | b72b71c | 2009-06-16 15:33:42 -0700 | [diff] [blame] | 1068 | { |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1069 | void **slot; |
| 1070 | |
| 1071 | if (!__radix_tree_lookup(root, index, NULL, &slot)) |
| 1072 | return NULL; |
| 1073 | return slot; |
Hans Reiser | a433136 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 1074 | } |
| 1075 | EXPORT_SYMBOL(radix_tree_lookup_slot); |
| 1076 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1077 | /** |
| 1078 | * radix_tree_lookup - perform lookup operation on a radix tree |
| 1079 | * @root: radix tree root |
| 1080 | * @index: index key |
| 1081 | * |
| 1082 | * Lookup the item at the position @index in the radix tree @root. |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1083 | * |
| 1084 | * This function can be called under rcu_read_lock, however the caller |
| 1085 | * must manage lifetimes of leaf nodes (eg. RCU may also be used to free |
| 1086 | * them safely). No RCU barriers are required to access or modify the |
| 1087 | * returned item, however. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1088 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1089 | void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | { |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1091 | return __radix_tree_lookup(root, index, NULL, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | } |
| 1093 | EXPORT_SYMBOL(radix_tree_lookup); |
| 1094 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1095 | static inline void replace_sibling_entries(struct radix_tree_node *node, |
| 1096 | void **slot, int count, int exceptional) |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1097 | { |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1098 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 1099 | void *ptr = node_to_entry(slot); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1100 | unsigned offset = get_slot_offset(node, slot) + 1; |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1101 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1102 | while (offset < RADIX_TREE_MAP_SIZE) { |
| 1103 | if (node->slots[offset] != ptr) |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1104 | break; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1105 | if (count < 0) { |
| 1106 | node->slots[offset] = NULL; |
| 1107 | node->count--; |
| 1108 | } |
| 1109 | node->exceptional += exceptional; |
| 1110 | offset++; |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1111 | } |
| 1112 | #endif |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1113 | } |
| 1114 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1115 | static void replace_slot(void **slot, void *item, struct radix_tree_node *node, |
| 1116 | int count, int exceptional) |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1117 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1118 | if (WARN_ON_ONCE(radix_tree_is_internal_node(item))) |
| 1119 | return; |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1120 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1121 | if (node && (count || exceptional)) { |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1122 | node->count += count; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1123 | node->exceptional += exceptional; |
| 1124 | replace_sibling_entries(node, slot, count, exceptional); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1125 | } |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1126 | |
| 1127 | rcu_assign_pointer(*slot, item); |
| 1128 | } |
| 1129 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1130 | static bool node_tag_get(const struct radix_tree_root *root, |
| 1131 | const struct radix_tree_node *node, |
| 1132 | unsigned int tag, unsigned int offset) |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1133 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1134 | if (node) |
| 1135 | return tag_get(node, tag, offset); |
| 1136 | return root_tag_get(root, tag); |
| 1137 | } |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1138 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1139 | /* |
| 1140 | * IDR users want to be able to store NULL in the tree, so if the slot isn't |
| 1141 | * free, don't adjust the count, even if it's transitioning between NULL and |
| 1142 | * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still |
| 1143 | * have empty bits, but it only stores NULL in slots when they're being |
| 1144 | * deleted. |
| 1145 | */ |
| 1146 | static int calculate_count(struct radix_tree_root *root, |
| 1147 | struct radix_tree_node *node, void **slot, |
| 1148 | void *item, void *old) |
| 1149 | { |
| 1150 | if (is_idr(root)) { |
| 1151 | unsigned offset = get_slot_offset(node, slot); |
| 1152 | bool free = node_tag_get(root, node, IDR_FREE, offset); |
| 1153 | if (!free) |
| 1154 | return 0; |
| 1155 | if (!old) |
| 1156 | return 1; |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1157 | } |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1158 | return !!item - !!old; |
Matthew Wilcox | a90eb3a | 2016-12-14 15:09:07 -0800 | [diff] [blame] | 1159 | } |
| 1160 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1161 | /** |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1162 | * __radix_tree_replace - replace item in a slot |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 1163 | * @root: radix tree root |
| 1164 | * @node: pointer to tree node |
| 1165 | * @slot: pointer to slot in @node |
| 1166 | * @item: new item to store in the slot. |
| 1167 | * @update_node: callback for changing leaf nodes |
| 1168 | * @private: private data to pass to @update_node |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1169 | * |
| 1170 | * For use with __radix_tree_lookup(). Caller must hold tree write locked |
| 1171 | * across slot lookup and replacement. |
| 1172 | */ |
| 1173 | void __radix_tree_replace(struct radix_tree_root *root, |
| 1174 | struct radix_tree_node *node, |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 1175 | void **slot, void *item, |
| 1176 | radix_tree_update_node_t update_node, void *private) |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1177 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1178 | void *old = rcu_dereference_raw(*slot); |
| 1179 | int exceptional = !!radix_tree_exceptional_entry(item) - |
| 1180 | !!radix_tree_exceptional_entry(old); |
| 1181 | int count = calculate_count(root, node, slot, item, old); |
| 1182 | |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1183 | /* |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1184 | * This function supports replacing exceptional entries and |
| 1185 | * deleting entries, but that needs accounting against the |
| 1186 | * node unless the slot is root->rnode. |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1187 | */ |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1188 | WARN_ON_ONCE(!node && (slot != (void **)&root->rnode) && |
| 1189 | (count || exceptional)); |
| 1190 | replace_slot(slot, item, node, count, exceptional); |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1191 | |
Johannes Weiner | 4d693d0 | 2016-12-12 16:43:49 -0800 | [diff] [blame] | 1192 | if (!node) |
| 1193 | return; |
| 1194 | |
| 1195 | if (update_node) |
| 1196 | update_node(node, private); |
| 1197 | |
| 1198 | delete_node(root, node, update_node, private); |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1199 | } |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1200 | |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1201 | /** |
| 1202 | * radix_tree_replace_slot - replace item in a slot |
| 1203 | * @root: radix tree root |
| 1204 | * @slot: pointer to slot |
| 1205 | * @item: new item to store in the slot. |
| 1206 | * |
| 1207 | * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), |
| 1208 | * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked |
| 1209 | * across slot lookup and replacement. |
| 1210 | * |
| 1211 | * NOTE: This cannot be used to switch between non-entries (empty slots), |
| 1212 | * regular entries, and exceptional entries, as that requires accounting |
Johannes Weiner | f4b109c | 2016-12-12 16:43:46 -0800 | [diff] [blame] | 1213 | * inside the radix tree node. When switching from one type of entry or |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1214 | * deleting, use __radix_tree_lookup() and __radix_tree_replace() or |
| 1215 | * radix_tree_iter_replace(). |
Johannes Weiner | 6d75f36 | 2016-12-12 16:43:43 -0800 | [diff] [blame] | 1216 | */ |
| 1217 | void radix_tree_replace_slot(struct radix_tree_root *root, |
| 1218 | void **slot, void *item) |
| 1219 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1220 | __radix_tree_replace(root, NULL, slot, item, NULL, NULL); |
Johannes Weiner | f794243 | 2016-12-12 16:43:41 -0800 | [diff] [blame] | 1221 | } |
| 1222 | |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1223 | /** |
| 1224 | * radix_tree_iter_replace - replace item in a slot |
| 1225 | * @root: radix tree root |
| 1226 | * @slot: pointer to slot |
| 1227 | * @item: new item to store in the slot. |
| 1228 | * |
| 1229 | * For use with radix_tree_split() and radix_tree_for_each_slot(). |
| 1230 | * Caller must hold tree write locked across split and replacement. |
| 1231 | */ |
| 1232 | void radix_tree_iter_replace(struct radix_tree_root *root, |
| 1233 | const struct radix_tree_iter *iter, void **slot, void *item) |
| 1234 | { |
| 1235 | __radix_tree_replace(root, iter->node, slot, item, NULL, NULL); |
| 1236 | } |
| 1237 | |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 1238 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 1239 | /** |
| 1240 | * radix_tree_join - replace multiple entries with one multiorder entry |
| 1241 | * @root: radix tree root |
| 1242 | * @index: an index inside the new entry |
| 1243 | * @order: order of the new entry |
| 1244 | * @item: new entry |
| 1245 | * |
| 1246 | * Call this function to replace several entries with one larger entry. |
| 1247 | * The existing entries are presumed to not need freeing as a result of |
| 1248 | * this call. |
| 1249 | * |
| 1250 | * The replacement entry will have all the tags set on it that were set |
| 1251 | * on any of the entries it is replacing. |
| 1252 | */ |
| 1253 | int radix_tree_join(struct radix_tree_root *root, unsigned long index, |
| 1254 | unsigned order, void *item) |
| 1255 | { |
| 1256 | struct radix_tree_node *node; |
| 1257 | void **slot; |
| 1258 | int error; |
| 1259 | |
| 1260 | BUG_ON(radix_tree_is_internal_node(item)); |
| 1261 | |
| 1262 | error = __radix_tree_create(root, index, order, &node, &slot); |
| 1263 | if (!error) |
| 1264 | error = insert_entries(node, slot, item, order, true); |
| 1265 | if (error > 0) |
| 1266 | error = 0; |
| 1267 | |
| 1268 | return error; |
| 1269 | } |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1270 | |
| 1271 | /** |
| 1272 | * radix_tree_split - Split an entry into smaller entries |
| 1273 | * @root: radix tree root |
| 1274 | * @index: An index within the large entry |
| 1275 | * @order: Order of new entries |
| 1276 | * |
| 1277 | * Call this function as the first step in replacing a multiorder entry |
| 1278 | * with several entries of lower order. After this function returns, |
| 1279 | * loop over the relevant portion of the tree using radix_tree_for_each_slot() |
| 1280 | * and call radix_tree_iter_replace() to set up each new entry. |
| 1281 | * |
| 1282 | * The tags from this entry are replicated to all the new entries. |
| 1283 | * |
| 1284 | * The radix tree should be locked against modification during the entire |
| 1285 | * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which |
| 1286 | * should prompt RCU walkers to restart the lookup from the root. |
| 1287 | */ |
| 1288 | int radix_tree_split(struct radix_tree_root *root, unsigned long index, |
| 1289 | unsigned order) |
| 1290 | { |
| 1291 | struct radix_tree_node *parent, *node, *child; |
| 1292 | void **slot; |
| 1293 | unsigned int offset, end; |
| 1294 | unsigned n, tag, tags = 0; |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1295 | gfp_t gfp = root_gfp_mask(root); |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1296 | |
| 1297 | if (!__radix_tree_lookup(root, index, &parent, &slot)) |
| 1298 | return -ENOENT; |
| 1299 | if (!parent) |
| 1300 | return -ENOENT; |
| 1301 | |
| 1302 | offset = get_slot_offset(parent, slot); |
| 1303 | |
| 1304 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 1305 | if (tag_get(parent, tag, offset)) |
| 1306 | tags |= 1 << tag; |
| 1307 | |
| 1308 | for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { |
| 1309 | if (!is_sibling_entry(parent, parent->slots[end])) |
| 1310 | break; |
| 1311 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 1312 | if (tags & (1 << tag)) |
| 1313 | tag_set(parent, tag, end); |
| 1314 | /* rcu_assign_pointer ensures tags are set before RETRY */ |
| 1315 | rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY); |
| 1316 | } |
| 1317 | rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY); |
| 1318 | parent->exceptional -= (end - offset); |
| 1319 | |
| 1320 | if (order == parent->shift) |
| 1321 | return 0; |
| 1322 | if (order > parent->shift) { |
| 1323 | while (offset < end) |
| 1324 | offset += insert_entries(parent, &parent->slots[offset], |
| 1325 | RADIX_TREE_RETRY, order, true); |
| 1326 | return 0; |
| 1327 | } |
| 1328 | |
| 1329 | node = parent; |
| 1330 | |
| 1331 | for (;;) { |
| 1332 | if (node->shift > order) { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1333 | child = radix_tree_node_alloc(gfp, node, |
Matthew Wilcox | e8de434 | 2016-12-14 15:09:31 -0800 | [diff] [blame] | 1334 | node->shift - RADIX_TREE_MAP_SHIFT, |
| 1335 | offset, 0, 0); |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1336 | if (!child) |
| 1337 | goto nomem; |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1338 | if (node != parent) { |
| 1339 | node->count++; |
| 1340 | node->slots[offset] = node_to_entry(child); |
| 1341 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 1342 | if (tags & (1 << tag)) |
| 1343 | tag_set(node, tag, offset); |
| 1344 | } |
| 1345 | |
| 1346 | node = child; |
| 1347 | offset = 0; |
| 1348 | continue; |
| 1349 | } |
| 1350 | |
| 1351 | n = insert_entries(node, &node->slots[offset], |
| 1352 | RADIX_TREE_RETRY, order, false); |
| 1353 | BUG_ON(n > RADIX_TREE_MAP_SIZE); |
| 1354 | |
| 1355 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 1356 | if (tags & (1 << tag)) |
| 1357 | tag_set(node, tag, offset); |
| 1358 | offset += n; |
| 1359 | |
| 1360 | while (offset == RADIX_TREE_MAP_SIZE) { |
| 1361 | if (node == parent) |
| 1362 | break; |
| 1363 | offset = node->offset; |
| 1364 | child = node; |
| 1365 | node = node->parent; |
| 1366 | rcu_assign_pointer(node->slots[offset], |
| 1367 | node_to_entry(child)); |
| 1368 | offset++; |
| 1369 | } |
| 1370 | if ((node == parent) && (offset == end)) |
| 1371 | return 0; |
| 1372 | } |
| 1373 | |
| 1374 | nomem: |
| 1375 | /* Shouldn't happen; did user forget to preload? */ |
| 1376 | /* TODO: free all the allocated nodes */ |
| 1377 | WARN_ON(1); |
| 1378 | return -ENOMEM; |
| 1379 | } |
Matthew Wilcox | 175542f | 2016-12-14 15:08:58 -0800 | [diff] [blame] | 1380 | #endif |
| 1381 | |
Matthew Wilcox | 30b888b | 2017-01-28 09:55:20 -0500 | [diff] [blame] | 1382 | static void node_tag_set(struct radix_tree_root *root, |
| 1383 | struct radix_tree_node *node, |
| 1384 | unsigned int tag, unsigned int offset) |
| 1385 | { |
| 1386 | while (node) { |
| 1387 | if (tag_get(node, tag, offset)) |
| 1388 | return; |
| 1389 | tag_set(node, tag, offset); |
| 1390 | offset = node->offset; |
| 1391 | node = node->parent; |
| 1392 | } |
| 1393 | |
| 1394 | if (!root_tag_get(root, tag)) |
| 1395 | root_tag_set(root, tag); |
| 1396 | } |
| 1397 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1398 | /** |
| 1399 | * radix_tree_tag_set - set a tag on a radix tree node |
| 1400 | * @root: radix tree root |
| 1401 | * @index: index key |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1402 | * @tag: tag index |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1403 | * |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1404 | * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) |
| 1405 | * corresponding to @index in the radix tree. From |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1406 | * the root all the way down to the leaf node. |
| 1407 | * |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1408 | * Returns the address of the tagged item. Setting a tag on a not-present |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1409 | * item is a bug. |
| 1410 | */ |
| 1411 | void *radix_tree_tag_set(struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1412 | unsigned long index, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1413 | { |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1414 | struct radix_tree_node *node, *parent; |
| 1415 | unsigned long maxindex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1416 | |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1417 | radix_tree_load_root(root, &node, &maxindex); |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1418 | BUG_ON(index > maxindex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1419 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1420 | while (radix_tree_is_internal_node(node)) { |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1421 | unsigned offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1422 | |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 1423 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1424 | offset = radix_tree_descend(parent, &node, index); |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1425 | BUG_ON(!node); |
| 1426 | |
| 1427 | if (!tag_get(parent, tag, offset)) |
| 1428 | tag_set(parent, tag, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1429 | } |
| 1430 | |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1431 | /* set the root's tag bit */ |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1432 | if (!root_tag_get(root, tag)) |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1433 | root_tag_set(root, tag); |
| 1434 | |
Ross Zwisler | fb96990 | 2016-05-20 17:02:32 -0700 | [diff] [blame] | 1435 | return node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 | } |
| 1437 | EXPORT_SYMBOL(radix_tree_tag_set); |
| 1438 | |
Matthew Wilcox | 30b888b | 2017-01-28 09:55:20 -0500 | [diff] [blame] | 1439 | /** |
| 1440 | * radix_tree_iter_tag_set - set a tag on the current iterator entry |
| 1441 | * @root: radix tree root |
| 1442 | * @iter: iterator state |
| 1443 | * @tag: tag to set |
| 1444 | */ |
| 1445 | void radix_tree_iter_tag_set(struct radix_tree_root *root, |
| 1446 | const struct radix_tree_iter *iter, unsigned int tag) |
| 1447 | { |
| 1448 | node_tag_set(root, iter->node, tag, iter_offset(iter)); |
| 1449 | } |
| 1450 | |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1451 | static void node_tag_clear(struct radix_tree_root *root, |
| 1452 | struct radix_tree_node *node, |
| 1453 | unsigned int tag, unsigned int offset) |
| 1454 | { |
| 1455 | while (node) { |
| 1456 | if (!tag_get(node, tag, offset)) |
| 1457 | return; |
| 1458 | tag_clear(node, tag, offset); |
| 1459 | if (any_tag_set(node, tag)) |
| 1460 | return; |
| 1461 | |
| 1462 | offset = node->offset; |
| 1463 | node = node->parent; |
| 1464 | } |
| 1465 | |
| 1466 | /* clear the root's tag bit */ |
| 1467 | if (root_tag_get(root, tag)) |
| 1468 | root_tag_clear(root, tag); |
| 1469 | } |
| 1470 | |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1471 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1472 | * radix_tree_tag_clear - clear a tag on a radix tree node |
| 1473 | * @root: radix tree root |
| 1474 | * @index: index key |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1475 | * @tag: tag index |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1476 | * |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1477 | * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1478 | * corresponding to @index in the radix tree. If this causes |
| 1479 | * the leaf node to have no tags set then clear the tag in the |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 | * next-to-leaf node, etc. |
| 1481 | * |
| 1482 | * Returns the address of the tagged item on success, else NULL. ie: |
| 1483 | * has the same return value and semantics as radix_tree_lookup(). |
| 1484 | */ |
| 1485 | void *radix_tree_tag_clear(struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1486 | unsigned long index, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | { |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1488 | struct radix_tree_node *node, *parent; |
| 1489 | unsigned long maxindex; |
Hugh Dickins | e2bdb93 | 2012-01-12 17:20:41 -0800 | [diff] [blame] | 1490 | int uninitialized_var(offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1491 | |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1492 | radix_tree_load_root(root, &node, &maxindex); |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1493 | if (index > maxindex) |
| 1494 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 | |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1496 | parent = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1497 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1498 | while (radix_tree_is_internal_node(node)) { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 1499 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1500 | offset = radix_tree_descend(parent, &node, index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | } |
| 1502 | |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 1503 | if (node) |
| 1504 | node_tag_clear(root, parent, tag, offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1505 | |
Ross Zwisler | 00f47b5 | 2016-05-20 17:02:35 -0700 | [diff] [blame] | 1506 | return node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1507 | } |
| 1508 | EXPORT_SYMBOL(radix_tree_tag_clear); |
| 1509 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | /** |
Matthew Wilcox | 30b888b | 2017-01-28 09:55:20 -0500 | [diff] [blame] | 1511 | * radix_tree_iter_tag_clear - clear a tag on the current iterator entry |
| 1512 | * @root: radix tree root |
| 1513 | * @iter: iterator state |
| 1514 | * @tag: tag to clear |
| 1515 | */ |
| 1516 | void radix_tree_iter_tag_clear(struct radix_tree_root *root, |
| 1517 | const struct radix_tree_iter *iter, unsigned int tag) |
| 1518 | { |
| 1519 | node_tag_clear(root, iter->node, tag, iter_offset(iter)); |
| 1520 | } |
| 1521 | |
| 1522 | /** |
Marcelo Tosatti | 32605a1 | 2005-09-06 15:16:48 -0700 | [diff] [blame] | 1523 | * radix_tree_tag_get - get a tag on a radix tree node |
| 1524 | * @root: radix tree root |
| 1525 | * @index: index key |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1526 | * @tag: tag index (< RADIX_TREE_MAX_TAGS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1527 | * |
Marcelo Tosatti | 32605a1 | 2005-09-06 15:16:48 -0700 | [diff] [blame] | 1528 | * Return values: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1529 | * |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1530 | * 0: tag not present or not set |
| 1531 | * 1: tag set |
David Howells | ce82653 | 2010-04-06 22:36:20 +0100 | [diff] [blame] | 1532 | * |
| 1533 | * Note that the return value of this function may not be relied on, even if |
| 1534 | * the RCU lock is held, unless tag modification and node deletion are excluded |
| 1535 | * from concurrency. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1536 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1537 | int radix_tree_tag_get(const struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1538 | unsigned long index, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 | { |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1540 | struct radix_tree_node *node, *parent; |
| 1541 | unsigned long maxindex; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1542 | |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1543 | if (!root_tag_get(root, tag)) |
| 1544 | return 0; |
| 1545 | |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1546 | radix_tree_load_root(root, &node, &maxindex); |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1547 | if (index > maxindex) |
| 1548 | return 0; |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1549 | |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1550 | while (radix_tree_is_internal_node(node)) { |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1551 | unsigned offset; |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1552 | |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 1553 | parent = entry_to_node(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1554 | offset = radix_tree_descend(parent, &node, index); |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1555 | |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1556 | if (!tag_get(parent, tag, offset)) |
| 1557 | return 0; |
| 1558 | if (node == RADIX_TREE_RETRY) |
| 1559 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1560 | } |
Ross Zwisler | 4589ba6 | 2016-05-20 17:02:38 -0700 | [diff] [blame] | 1561 | |
| 1562 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1563 | } |
| 1564 | EXPORT_SYMBOL(radix_tree_tag_get); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1565 | |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1566 | static inline void __set_iter_shift(struct radix_tree_iter *iter, |
| 1567 | unsigned int shift) |
| 1568 | { |
| 1569 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 1570 | iter->shift = shift; |
| 1571 | #endif |
| 1572 | } |
| 1573 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1574 | /* Construct iter->tags bit-mask from node->tags[tag] array */ |
| 1575 | static void set_iter_tags(struct radix_tree_iter *iter, |
| 1576 | struct radix_tree_node *node, unsigned offset, |
| 1577 | unsigned tag) |
| 1578 | { |
| 1579 | unsigned tag_long = offset / BITS_PER_LONG; |
| 1580 | unsigned tag_bit = offset % BITS_PER_LONG; |
| 1581 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1582 | if (!node) { |
| 1583 | iter->tags = 1; |
| 1584 | return; |
| 1585 | } |
| 1586 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1587 | iter->tags = node->tags[tag][tag_long] >> tag_bit; |
| 1588 | |
| 1589 | /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ |
| 1590 | if (tag_long < RADIX_TREE_TAG_LONGS - 1) { |
| 1591 | /* Pick tags from next element */ |
| 1592 | if (tag_bit) |
| 1593 | iter->tags |= node->tags[tag][tag_long + 1] << |
| 1594 | (BITS_PER_LONG - tag_bit); |
| 1595 | /* Clip chunk size, here only BITS_PER_LONG tags */ |
| 1596 | iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); |
| 1597 | } |
| 1598 | } |
| 1599 | |
| 1600 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 1601 | static void **skip_siblings(struct radix_tree_node **nodep, |
| 1602 | void **slot, struct radix_tree_iter *iter) |
| 1603 | { |
| 1604 | void *sib = node_to_entry(slot - 1); |
| 1605 | |
| 1606 | while (iter->index < iter->next_index) { |
| 1607 | *nodep = rcu_dereference_raw(*slot); |
| 1608 | if (*nodep && *nodep != sib) |
| 1609 | return slot; |
| 1610 | slot++; |
| 1611 | iter->index = __radix_tree_iter_add(iter, 1); |
| 1612 | iter->tags >>= 1; |
| 1613 | } |
| 1614 | |
| 1615 | *nodep = NULL; |
| 1616 | return NULL; |
| 1617 | } |
| 1618 | |
| 1619 | void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, |
| 1620 | unsigned flags) |
| 1621 | { |
| 1622 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
| 1623 | struct radix_tree_node *node = rcu_dereference_raw(*slot); |
| 1624 | |
| 1625 | slot = skip_siblings(&node, slot, iter); |
| 1626 | |
| 1627 | while (radix_tree_is_internal_node(node)) { |
| 1628 | unsigned offset; |
| 1629 | unsigned long next_index; |
| 1630 | |
| 1631 | if (node == RADIX_TREE_RETRY) |
| 1632 | return slot; |
| 1633 | node = entry_to_node(node); |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1634 | iter->node = node; |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1635 | iter->shift = node->shift; |
| 1636 | |
| 1637 | if (flags & RADIX_TREE_ITER_TAGGED) { |
| 1638 | offset = radix_tree_find_next_bit(node, tag, 0); |
| 1639 | if (offset == RADIX_TREE_MAP_SIZE) |
| 1640 | return NULL; |
| 1641 | slot = &node->slots[offset]; |
| 1642 | iter->index = __radix_tree_iter_add(iter, offset); |
| 1643 | set_iter_tags(iter, node, offset, tag); |
| 1644 | node = rcu_dereference_raw(*slot); |
| 1645 | } else { |
| 1646 | offset = 0; |
| 1647 | slot = &node->slots[0]; |
| 1648 | for (;;) { |
| 1649 | node = rcu_dereference_raw(*slot); |
| 1650 | if (node) |
| 1651 | break; |
| 1652 | slot++; |
| 1653 | offset++; |
| 1654 | if (offset == RADIX_TREE_MAP_SIZE) |
| 1655 | return NULL; |
| 1656 | } |
| 1657 | iter->index = __radix_tree_iter_add(iter, offset); |
| 1658 | } |
| 1659 | if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0)) |
| 1660 | goto none; |
| 1661 | next_index = (iter->index | shift_maxindex(iter->shift)) + 1; |
| 1662 | if (next_index < iter->next_index) |
| 1663 | iter->next_index = next_index; |
| 1664 | } |
| 1665 | |
| 1666 | return slot; |
| 1667 | none: |
| 1668 | iter->next_index = 0; |
| 1669 | return NULL; |
| 1670 | } |
| 1671 | EXPORT_SYMBOL(__radix_tree_next_slot); |
| 1672 | #else |
| 1673 | static void **skip_siblings(struct radix_tree_node **nodep, |
| 1674 | void **slot, struct radix_tree_iter *iter) |
| 1675 | { |
| 1676 | return slot; |
| 1677 | } |
| 1678 | #endif |
| 1679 | |
| 1680 | void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) |
| 1681 | { |
| 1682 | struct radix_tree_node *node; |
| 1683 | |
| 1684 | slot++; |
| 1685 | iter->index = __radix_tree_iter_add(iter, 1); |
| 1686 | node = rcu_dereference_raw(*slot); |
| 1687 | skip_siblings(&node, slot, iter); |
| 1688 | iter->next_index = iter->index; |
| 1689 | iter->tags = 0; |
| 1690 | return NULL; |
| 1691 | } |
| 1692 | EXPORT_SYMBOL(radix_tree_iter_resume); |
| 1693 | |
Fengguang Wu | 6df8ba4 | 2007-10-16 01:24:33 -0700 | [diff] [blame] | 1694 | /** |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1695 | * radix_tree_next_chunk - find next chunk of slots for iteration |
| 1696 | * |
| 1697 | * @root: radix tree root |
| 1698 | * @iter: iterator state |
| 1699 | * @flags: RADIX_TREE_ITER_* flags and tag index |
| 1700 | * Returns: pointer to chunk first slot, or NULL if iteration is over |
| 1701 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1702 | void **radix_tree_next_chunk(const struct radix_tree_root *root, |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1703 | struct radix_tree_iter *iter, unsigned flags) |
| 1704 | { |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1705 | unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1706 | struct radix_tree_node *node, *child; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1707 | unsigned long index, offset, maxindex; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1708 | |
| 1709 | if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) |
| 1710 | return NULL; |
| 1711 | |
| 1712 | /* |
| 1713 | * Catch next_index overflow after ~0UL. iter->index never overflows |
| 1714 | * during iterating; it can be zero only at the beginning. |
| 1715 | * And we cannot overflow iter->next_index in a single step, |
| 1716 | * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. |
Konstantin Khlebnikov | fffaee3 | 2012-06-05 21:36:33 +0400 | [diff] [blame] | 1717 | * |
| 1718 | * This condition also used by radix_tree_next_slot() to stop |
Matthew Wilcox | 91b9677c | 2016-12-14 15:08:31 -0800 | [diff] [blame] | 1719 | * contiguous iterating, and forbid switching to the next chunk. |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1720 | */ |
| 1721 | index = iter->next_index; |
| 1722 | if (!index && iter->index) |
| 1723 | return NULL; |
| 1724 | |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1725 | restart: |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1726 | radix_tree_load_root(root, &child, &maxindex); |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1727 | if (index > maxindex) |
| 1728 | return NULL; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1729 | if (!child) |
| 1730 | return NULL; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1731 | |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1732 | if (!radix_tree_is_internal_node(child)) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1733 | /* Single-slot tree */ |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1734 | iter->index = index; |
| 1735 | iter->next_index = maxindex + 1; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1736 | iter->tags = 1; |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1737 | iter->node = NULL; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1738 | __set_iter_shift(iter, 0); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1739 | return (void **)&root->rnode; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1740 | } |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1741 | |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1742 | do { |
| 1743 | node = entry_to_node(child); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1744 | offset = radix_tree_descend(node, &child, index); |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1745 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1746 | if ((flags & RADIX_TREE_ITER_TAGGED) ? |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1747 | !tag_get(node, tag, offset) : !child) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1748 | /* Hole detected */ |
| 1749 | if (flags & RADIX_TREE_ITER_CONTIG) |
| 1750 | return NULL; |
| 1751 | |
| 1752 | if (flags & RADIX_TREE_ITER_TAGGED) |
Matthew Wilcox | bc412fc | 2016-12-14 15:08:40 -0800 | [diff] [blame] | 1753 | offset = radix_tree_find_next_bit(node, tag, |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1754 | offset + 1); |
| 1755 | else |
| 1756 | while (++offset < RADIX_TREE_MAP_SIZE) { |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 1757 | void *slot = node->slots[offset]; |
| 1758 | if (is_sibling_entry(node, slot)) |
| 1759 | continue; |
| 1760 | if (slot) |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1761 | break; |
| 1762 | } |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1763 | index &= ~node_maxindex(node); |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1764 | index += offset << node->shift; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1765 | /* Overflow after ~0UL */ |
| 1766 | if (!index) |
| 1767 | return NULL; |
| 1768 | if (offset == RADIX_TREE_MAP_SIZE) |
| 1769 | goto restart; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1770 | child = rcu_dereference_raw(node->slots[offset]); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1771 | } |
| 1772 | |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1773 | if (!child) |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1774 | goto restart; |
Matthew Wilcox | e157b55 | 2016-12-14 15:09:01 -0800 | [diff] [blame] | 1775 | if (child == RADIX_TREE_RETRY) |
| 1776 | break; |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1777 | } while (radix_tree_is_internal_node(child)); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1778 | |
| 1779 | /* Update the iterator state */ |
Matthew Wilcox | 8c1244d | 2016-05-20 17:03:36 -0700 | [diff] [blame] | 1780 | iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); |
| 1781 | iter->next_index = (index | node_maxindex(node)) + 1; |
Matthew Wilcox | 268f42d | 2016-12-14 15:08:55 -0800 | [diff] [blame] | 1782 | iter->node = node; |
Matthew Wilcox | 9e85d81 | 2016-05-20 17:03:48 -0700 | [diff] [blame] | 1783 | __set_iter_shift(iter, node->shift); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1784 | |
Matthew Wilcox | 148deab | 2016-12-14 15:08:49 -0800 | [diff] [blame] | 1785 | if (flags & RADIX_TREE_ITER_TAGGED) |
| 1786 | set_iter_tags(iter, node, offset, tag); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1787 | |
| 1788 | return node->slots + offset; |
| 1789 | } |
| 1790 | EXPORT_SYMBOL(radix_tree_next_chunk); |
| 1791 | |
| 1792 | /** |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1793 | * radix_tree_gang_lookup - perform multiple lookup on a radix tree |
| 1794 | * @root: radix tree root |
| 1795 | * @results: where the results of the lookup are placed |
| 1796 | * @first_index: start the lookup from this key |
| 1797 | * @max_items: place up to this many items at *results |
| 1798 | * |
| 1799 | * Performs an index-ascending scan of the tree for present items. Places |
| 1800 | * them at *@results and returns the number of items which were placed at |
| 1801 | * *@results. |
| 1802 | * |
| 1803 | * The implementation is naive. |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1804 | * |
| 1805 | * Like radix_tree_lookup, radix_tree_gang_lookup may be called under |
| 1806 | * rcu_read_lock. In this case, rather than the returned results being |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 1807 | * an atomic snapshot of the tree at a single point in time, the |
| 1808 | * semantics of an RCU protected gang lookup are as though multiple |
| 1809 | * radix_tree_lookups have been issued in individual locks, and results |
| 1810 | * stored in 'results'. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1811 | */ |
| 1812 | unsigned int |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1813 | radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1814 | unsigned long first_index, unsigned int max_items) |
| 1815 | { |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1816 | struct radix_tree_iter iter; |
| 1817 | void **slot; |
| 1818 | unsigned int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1819 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1820 | if (unlikely(!max_items)) |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1821 | return 0; |
| 1822 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1823 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1824 | results[ret] = rcu_dereference_raw(*slot); |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1825 | if (!results[ret]) |
| 1826 | continue; |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1827 | if (radix_tree_is_internal_node(results[ret])) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1828 | slot = radix_tree_iter_retry(&iter); |
| 1829 | continue; |
| 1830 | } |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1831 | if (++ret == max_items) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1832 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1833 | } |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1834 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1835 | return ret; |
| 1836 | } |
| 1837 | EXPORT_SYMBOL(radix_tree_gang_lookup); |
| 1838 | |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1839 | /** |
| 1840 | * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree |
| 1841 | * @root: radix tree root |
| 1842 | * @results: where the results of the lookup are placed |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 1843 | * @indices: where their indices should be placed (but usually NULL) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1844 | * @first_index: start the lookup from this key |
| 1845 | * @max_items: place up to this many items at *results |
| 1846 | * |
| 1847 | * Performs an index-ascending scan of the tree for present items. Places |
| 1848 | * their slots at *@results and returns the number of items which were |
| 1849 | * placed at *@results. |
| 1850 | * |
| 1851 | * The implementation is naive. |
| 1852 | * |
| 1853 | * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must |
| 1854 | * be dereferenced with radix_tree_deref_slot, and if using only RCU |
| 1855 | * protection, radix_tree_deref_slot may fail requiring a retry. |
| 1856 | */ |
| 1857 | unsigned int |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1858 | radix_tree_gang_lookup_slot(const struct radix_tree_root *root, |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 1859 | void ***results, unsigned long *indices, |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1860 | unsigned long first_index, unsigned int max_items) |
| 1861 | { |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1862 | struct radix_tree_iter iter; |
| 1863 | void **slot; |
| 1864 | unsigned int ret = 0; |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1865 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1866 | if (unlikely(!max_items)) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1867 | return 0; |
| 1868 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1869 | radix_tree_for_each_slot(slot, root, &iter, first_index) { |
| 1870 | results[ret] = slot; |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 1871 | if (indices) |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1872 | indices[ret] = iter.index; |
| 1873 | if (++ret == max_items) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1874 | break; |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1875 | } |
| 1876 | |
| 1877 | return ret; |
| 1878 | } |
| 1879 | EXPORT_SYMBOL(radix_tree_gang_lookup_slot); |
| 1880 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1881 | /** |
| 1882 | * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree |
| 1883 | * based on a tag |
| 1884 | * @root: radix tree root |
| 1885 | * @results: where the results of the lookup are placed |
| 1886 | * @first_index: start the lookup from this key |
| 1887 | * @max_items: place up to this many items at *results |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1888 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1889 | * |
| 1890 | * Performs an index-ascending scan of the tree for present items which |
| 1891 | * have the tag indexed by @tag set. Places the items at *@results and |
| 1892 | * returns the number of items which were placed at *@results. |
| 1893 | */ |
| 1894 | unsigned int |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1895 | radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 1896 | unsigned long first_index, unsigned int max_items, |
| 1897 | unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1898 | { |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1899 | struct radix_tree_iter iter; |
| 1900 | void **slot; |
| 1901 | unsigned int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1902 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1903 | if (unlikely(!max_items)) |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 1904 | return 0; |
| 1905 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1906 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1907 | results[ret] = rcu_dereference_raw(*slot); |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1908 | if (!results[ret]) |
| 1909 | continue; |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 1910 | if (radix_tree_is_internal_node(results[ret])) { |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 1911 | slot = radix_tree_iter_retry(&iter); |
| 1912 | continue; |
| 1913 | } |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1914 | if (++ret == max_items) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1915 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1916 | } |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 1917 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1918 | return ret; |
| 1919 | } |
| 1920 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag); |
| 1921 | |
| 1922 | /** |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1923 | * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a |
| 1924 | * radix tree based on a tag |
| 1925 | * @root: radix tree root |
| 1926 | * @results: where the results of the lookup are placed |
| 1927 | * @first_index: start the lookup from this key |
| 1928 | * @max_items: place up to this many items at *results |
| 1929 | * @tag: the tag index (< RADIX_TREE_MAX_TAGS) |
| 1930 | * |
| 1931 | * Performs an index-ascending scan of the tree for present items which |
| 1932 | * have the tag indexed by @tag set. Places the slots at *@results and |
| 1933 | * returns the number of slots which were placed at *@results. |
| 1934 | */ |
| 1935 | unsigned int |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 1936 | radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, |
| 1937 | void ***results, unsigned long first_index, |
| 1938 | unsigned int max_items, unsigned int tag) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1939 | { |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1940 | struct radix_tree_iter iter; |
| 1941 | void **slot; |
| 1942 | unsigned int ret = 0; |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1943 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1944 | if (unlikely(!max_items)) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1945 | return 0; |
| 1946 | |
Konstantin Khlebnikov | cebbd29 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 1947 | radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { |
| 1948 | results[ret] = slot; |
| 1949 | if (++ret == max_items) |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1950 | break; |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1951 | } |
| 1952 | |
| 1953 | return ret; |
| 1954 | } |
| 1955 | EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); |
| 1956 | |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 1957 | /** |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1958 | * __radix_tree_delete_node - try to free node after clearing a slot |
| 1959 | * @root: radix tree root |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1960 | * @node: node containing @index |
Johannes Weiner | ea07b86 | 2017-01-06 19:21:43 -0500 | [diff] [blame] | 1961 | * @update_node: callback for changing leaf nodes |
| 1962 | * @private: private data to pass to @update_node |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1963 | * |
| 1964 | * After clearing the slot at @index in @node from radix tree |
| 1965 | * rooted at @root, call this function to attempt freeing the |
| 1966 | * node and shrinking the tree. |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1967 | */ |
Johannes Weiner | 14b4687 | 2016-12-12 16:43:52 -0800 | [diff] [blame] | 1968 | void __radix_tree_delete_node(struct radix_tree_root *root, |
Johannes Weiner | ea07b86 | 2017-01-06 19:21:43 -0500 | [diff] [blame] | 1969 | struct radix_tree_node *node, |
| 1970 | radix_tree_update_node_t update_node, |
| 1971 | void *private) |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1972 | { |
Johannes Weiner | ea07b86 | 2017-01-06 19:21:43 -0500 | [diff] [blame] | 1973 | delete_node(root, node, update_node, private); |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1974 | } |
| 1975 | |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1976 | static bool __radix_tree_delete(struct radix_tree_root *root, |
| 1977 | struct radix_tree_node *node, void **slot) |
| 1978 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1979 | void *old = rcu_dereference_raw(*slot); |
| 1980 | int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1981 | unsigned offset = get_slot_offset(node, slot); |
| 1982 | int tag; |
| 1983 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1984 | if (is_idr(root)) |
| 1985 | node_tag_set(root, node, IDR_FREE, offset); |
| 1986 | else |
| 1987 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 1988 | node_tag_clear(root, node, tag, offset); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1989 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 1990 | replace_slot(slot, NULL, node, -1, exceptional); |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1991 | return node && delete_node(root, node, NULL, NULL); |
| 1992 | } |
| 1993 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 1994 | /** |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 1995 | * radix_tree_iter_delete - delete the entry at this iterator position |
| 1996 | * @root: radix tree root |
| 1997 | * @iter: iterator state |
| 1998 | * @slot: pointer to slot |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1999 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 2000 | * Delete the entry at the position currently pointed to by the iterator. |
| 2001 | * This may result in the current node being freed; if it is, the iterator |
| 2002 | * is advanced so that it will not reference the freed memory. This |
| 2003 | * function may be called without any locking if there are no other threads |
| 2004 | * which can access this tree. |
| 2005 | */ |
| 2006 | void radix_tree_iter_delete(struct radix_tree_root *root, |
| 2007 | struct radix_tree_iter *iter, void **slot) |
| 2008 | { |
| 2009 | if (__radix_tree_delete(root, iter->node, slot)) |
| 2010 | iter->index = iter->next_index; |
| 2011 | } |
| 2012 | |
| 2013 | /** |
| 2014 | * radix_tree_delete_item - delete an item from a radix tree |
| 2015 | * @root: radix tree root |
| 2016 | * @index: index key |
| 2017 | * @item: expected item |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 2019 | * Remove @item at @index from the radix tree rooted at @root. |
| 2020 | * |
| 2021 | * Return: the deleted entry, or %NULL if it was not present |
| 2022 | * or the entry at the given @index was not @item. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | */ |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 2024 | void *radix_tree_delete_item(struct radix_tree_root *root, |
| 2025 | unsigned long index, void *item) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2026 | { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 2027 | struct radix_tree_node *node = NULL; |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 2028 | void **slot; |
| 2029 | void *entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2030 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 2031 | entry = __radix_tree_lookup(root, index, &node, &slot); |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 2032 | if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, |
| 2033 | get_slot_offset(node, slot)))) |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 2034 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2035 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 2036 | if (item && entry != item) |
| 2037 | return NULL; |
| 2038 | |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 2039 | __radix_tree_delete(root, node, slot); |
Christoph Lameter | 201b626 | 2005-09-06 15:16:46 -0700 | [diff] [blame] | 2040 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 2041 | return entry; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2042 | } |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 2043 | EXPORT_SYMBOL(radix_tree_delete_item); |
| 2044 | |
| 2045 | /** |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 2046 | * radix_tree_delete - delete an entry from a radix tree |
| 2047 | * @root: radix tree root |
| 2048 | * @index: index key |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 2049 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 2050 | * Remove the entry at @index from the radix tree rooted at @root. |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 2051 | * |
Matthew Wilcox | 0ac398e | 2017-01-28 09:56:22 -0500 | [diff] [blame] | 2052 | * Return: The deleted entry, or %NULL if it was not present. |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 2053 | */ |
| 2054 | void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) |
| 2055 | { |
| 2056 | return radix_tree_delete_item(root, index, NULL); |
| 2057 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2058 | EXPORT_SYMBOL(radix_tree_delete); |
| 2059 | |
Johannes Weiner | d3798ae | 2016-10-04 22:02:08 +0200 | [diff] [blame] | 2060 | void radix_tree_clear_tags(struct radix_tree_root *root, |
| 2061 | struct radix_tree_node *node, |
| 2062 | void **slot) |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 2063 | { |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 2064 | if (node) { |
| 2065 | unsigned int tag, offset = get_slot_offset(node, slot); |
| 2066 | for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) |
| 2067 | node_tag_clear(root, node, tag, offset); |
| 2068 | } else { |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 2069 | root_tag_clear_all(root); |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 2070 | } |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 2071 | } |
| 2072 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2073 | /** |
| 2074 | * radix_tree_tagged - test whether any items in the tree are tagged |
| 2075 | * @root: radix tree root |
| 2076 | * @tag: tag to test |
| 2077 | */ |
Matthew Wilcox | 35534c8 | 2016-12-19 17:43:19 -0500 | [diff] [blame] | 2078 | int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2079 | { |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 2080 | return root_tag_get(root, tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2081 | } |
| 2082 | EXPORT_SYMBOL(radix_tree_tagged); |
| 2083 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 2084 | /** |
| 2085 | * idr_preload - preload for idr_alloc() |
| 2086 | * @gfp_mask: allocation mask to use for preloading |
| 2087 | * |
| 2088 | * Preallocate memory to use for the next call to idr_alloc(). This function |
| 2089 | * returns with preemption disabled. It will be enabled by idr_preload_end(). |
| 2090 | */ |
| 2091 | void idr_preload(gfp_t gfp_mask) |
| 2092 | { |
| 2093 | __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE); |
| 2094 | } |
| 2095 | EXPORT_SYMBOL(idr_preload); |
| 2096 | |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 2097 | /** |
| 2098 | * ida_pre_get - reserve resources for ida allocation |
| 2099 | * @ida: ida handle |
| 2100 | * @gfp: memory allocation flags |
| 2101 | * |
| 2102 | * This function should be called before calling ida_get_new_above(). If it |
| 2103 | * is unable to allocate memory, it will return %0. On success, it returns %1. |
| 2104 | */ |
| 2105 | int ida_pre_get(struct ida *ida, gfp_t gfp) |
| 2106 | { |
| 2107 | __radix_tree_preload(gfp, IDA_PRELOAD_SIZE); |
| 2108 | /* |
| 2109 | * The IDA API has no preload_end() equivalent. Instead, |
| 2110 | * ida_get_new() can return -EAGAIN, prompting the caller |
| 2111 | * to return to the ida_pre_get() step. |
| 2112 | */ |
| 2113 | preempt_enable(); |
| 2114 | |
| 2115 | if (!this_cpu_read(ida_bitmap)) { |
| 2116 | struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); |
| 2117 | if (!bitmap) |
| 2118 | return 0; |
| 2119 | bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap); |
| 2120 | kfree(bitmap); |
| 2121 | } |
| 2122 | |
| 2123 | return 1; |
| 2124 | } |
| 2125 | EXPORT_SYMBOL(ida_pre_get); |
| 2126 | |
Matthew Wilcox | 0a835c4 | 2016-12-20 10:27:56 -0500 | [diff] [blame] | 2127 | void **idr_get_free(struct radix_tree_root *root, |
| 2128 | struct radix_tree_iter *iter, gfp_t gfp, int end) |
| 2129 | { |
| 2130 | struct radix_tree_node *node = NULL, *child; |
| 2131 | void **slot = (void **)&root->rnode; |
| 2132 | unsigned long maxindex, start = iter->next_index; |
| 2133 | unsigned long max = end > 0 ? end - 1 : INT_MAX; |
| 2134 | unsigned int shift, offset = 0; |
| 2135 | |
| 2136 | grow: |
| 2137 | shift = radix_tree_load_root(root, &child, &maxindex); |
| 2138 | if (!radix_tree_tagged(root, IDR_FREE)) |
| 2139 | start = max(start, maxindex + 1); |
| 2140 | if (start > max) |
| 2141 | return ERR_PTR(-ENOSPC); |
| 2142 | |
| 2143 | if (start > maxindex) { |
| 2144 | int error = radix_tree_extend(root, gfp, start, shift); |
| 2145 | if (error < 0) |
| 2146 | return ERR_PTR(error); |
| 2147 | shift = error; |
| 2148 | child = rcu_dereference_raw(root->rnode); |
| 2149 | } |
| 2150 | |
| 2151 | while (shift) { |
| 2152 | shift -= RADIX_TREE_MAP_SHIFT; |
| 2153 | if (child == NULL) { |
| 2154 | /* Have to add a child node. */ |
| 2155 | child = radix_tree_node_alloc(gfp, node, shift, offset, |
| 2156 | 0, 0); |
| 2157 | if (!child) |
| 2158 | return ERR_PTR(-ENOMEM); |
| 2159 | all_tag_set(child, IDR_FREE); |
| 2160 | rcu_assign_pointer(*slot, node_to_entry(child)); |
| 2161 | if (node) |
| 2162 | node->count++; |
| 2163 | } else if (!radix_tree_is_internal_node(child)) |
| 2164 | break; |
| 2165 | |
| 2166 | node = entry_to_node(child); |
| 2167 | offset = radix_tree_descend(node, &child, start); |
| 2168 | if (!tag_get(node, IDR_FREE, offset)) { |
| 2169 | offset = radix_tree_find_next_bit(node, IDR_FREE, |
| 2170 | offset + 1); |
| 2171 | start = next_index(start, node, offset); |
| 2172 | if (start > max) |
| 2173 | return ERR_PTR(-ENOSPC); |
| 2174 | while (offset == RADIX_TREE_MAP_SIZE) { |
| 2175 | offset = node->offset + 1; |
| 2176 | node = node->parent; |
| 2177 | if (!node) |
| 2178 | goto grow; |
| 2179 | shift = node->shift; |
| 2180 | } |
| 2181 | child = rcu_dereference_raw(node->slots[offset]); |
| 2182 | } |
| 2183 | slot = &node->slots[offset]; |
| 2184 | } |
| 2185 | |
| 2186 | iter->index = start; |
| 2187 | if (node) |
| 2188 | iter->next_index = 1 + min(max, (start | node_maxindex(node))); |
| 2189 | else |
| 2190 | iter->next_index = 1; |
| 2191 | iter->node = node; |
| 2192 | __set_iter_shift(iter, shift); |
| 2193 | set_iter_tags(iter, node, offset, IDR_FREE); |
| 2194 | |
| 2195 | return slot; |
| 2196 | } |
| 2197 | |
| 2198 | /** |
| 2199 | * idr_destroy - release all internal memory from an IDR |
| 2200 | * @idr: idr handle |
| 2201 | * |
| 2202 | * After this function is called, the IDR is empty, and may be reused or |
| 2203 | * the data structure containing it may be freed. |
| 2204 | * |
| 2205 | * A typical clean-up sequence for objects stored in an idr tree will use |
| 2206 | * idr_for_each() to free all objects, if necessary, then idr_destroy() to |
| 2207 | * free the memory used to keep track of those objects. |
| 2208 | */ |
| 2209 | void idr_destroy(struct idr *idr) |
| 2210 | { |
| 2211 | struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode); |
| 2212 | if (radix_tree_is_internal_node(node)) |
| 2213 | radix_tree_free_nodes(node); |
| 2214 | idr->idr_rt.rnode = NULL; |
| 2215 | root_tag_set(&idr->idr_rt, IDR_FREE); |
| 2216 | } |
| 2217 | EXPORT_SYMBOL(idr_destroy); |
| 2218 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2219 | static void |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 2220 | radix_tree_node_ctor(void *arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2221 | { |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 2222 | struct radix_tree_node *node = arg; |
| 2223 | |
| 2224 | memset(node, 0, sizeof(*node)); |
| 2225 | INIT_LIST_HEAD(&node->private_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2226 | } |
| 2227 | |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 2228 | static __init unsigned long __maxindex(unsigned int height) |
| 2229 | { |
| 2230 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; |
| 2231 | int shift = RADIX_TREE_INDEX_BITS - width; |
| 2232 | |
| 2233 | if (shift < 0) |
| 2234 | return ~0UL; |
| 2235 | if (shift >= BITS_PER_LONG) |
| 2236 | return 0UL; |
| 2237 | return ~0UL >> shift; |
| 2238 | } |
| 2239 | |
| 2240 | static __init void radix_tree_init_maxnodes(void) |
| 2241 | { |
| 2242 | unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; |
| 2243 | unsigned int i, j; |
| 2244 | |
| 2245 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) |
| 2246 | height_to_maxindex[i] = __maxindex(i); |
| 2247 | for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { |
| 2248 | for (j = i; j > 0; j--) |
| 2249 | height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; |
| 2250 | } |
| 2251 | } |
| 2252 | |
Sebastian Andrzej Siewior | d544abd5 | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 2253 | static int radix_tree_cpu_dead(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2254 | { |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 2255 | struct radix_tree_preload *rtp; |
| 2256 | struct radix_tree_node *node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2257 | |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 2258 | /* Free per-cpu pool of preloaded nodes */ |
Sebastian Andrzej Siewior | d544abd5 | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 2259 | rtp = &per_cpu(radix_tree_preloads, cpu); |
| 2260 | while (rtp->nr) { |
| 2261 | node = rtp->nodes; |
Matthew Wilcox | 1293d5c | 2017-01-16 16:41:29 -0500 | [diff] [blame^] | 2262 | rtp->nodes = node->parent; |
Sebastian Andrzej Siewior | d544abd5 | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 2263 | kmem_cache_free(radix_tree_node_cachep, node); |
| 2264 | rtp->nr--; |
Matthew Wilcox | 2fcd900 | 2016-05-20 17:03:04 -0700 | [diff] [blame] | 2265 | } |
Matthew Wilcox | 7ad3d4d | 2016-12-16 11:55:56 -0500 | [diff] [blame] | 2266 | kfree(per_cpu(ida_bitmap, cpu)); |
| 2267 | per_cpu(ida_bitmap, cpu) = NULL; |
Sebastian Andrzej Siewior | d544abd5 | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 2268 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2269 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2270 | |
| 2271 | void __init radix_tree_init(void) |
| 2272 | { |
Sebastian Andrzej Siewior | d544abd5 | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 2273 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2274 | radix_tree_node_cachep = kmem_cache_create("radix_tree_node", |
| 2275 | sizeof(struct radix_tree_node), 0, |
Christoph Lameter | 488514d | 2008-04-28 02:12:05 -0700 | [diff] [blame] | 2276 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
| 2277 | radix_tree_node_ctor); |
Kirill A. Shutemov | c78c66d | 2016-07-26 15:26:02 -0700 | [diff] [blame] | 2278 | radix_tree_init_maxnodes(); |
Sebastian Andrzej Siewior | d544abd5 | 2016-11-03 15:50:01 +0100 | [diff] [blame] | 2279 | ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", |
| 2280 | NULL, radix_tree_cpu_dead); |
| 2281 | WARN_ON(ret < 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2282 | } |