Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2001 Momchil Velikov |
| 3 | * Portions Copyright (C) 2001 Christoph Hellwig |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 4 | * Copyright (C) 2006 Nick Piggin |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 5 | * Copyright (C) 2012 Konstantin Khlebnikov |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License as |
| 9 | * published by the Free Software Foundation; either version 2, or (at |
| 10 | * your option) any later version. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, but |
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 15 | * General Public License for more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU General Public License |
| 18 | * along with this program; if not, write to the Free Software |
| 19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 20 | */ |
| 21 | #ifndef _LINUX_RADIX_TREE_H |
| 22 | #define _LINUX_RADIX_TREE_H |
| 23 | |
Matthew Wilcox | f67c07f | 2016-03-17 14:21:42 -0700 | [diff] [blame] | 24 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <linux/preempt.h> |
| 26 | #include <linux/types.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 27 | #include <linux/bug.h> |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 28 | #include <linux/kernel.h> |
| 29 | #include <linux/rcupdate.h> |
| 30 | |
| 31 | /* |
Matthew Wilcox | 3bcadd6 | 2016-05-20 17:03:54 -0700 | [diff] [blame] | 32 | * The bottom two bits of the slot determine how the remaining bits in the |
| 33 | * slot are interpreted: |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 34 | * |
Matthew Wilcox | 3bcadd6 | 2016-05-20 17:03:54 -0700 | [diff] [blame] | 35 | * 00 - data pointer |
| 36 | * 01 - internal entry |
| 37 | * 10 - exceptional entry |
| 38 | * 11 - locked exceptional entry |
| 39 | * |
| 40 | * The internal entry may be a pointer to the next level in the tree, a |
| 41 | * sibling entry, or an indicator that the entry in this slot has been moved |
| 42 | * to another location in the tree and the lookup should be restarted. While |
| 43 | * NULL fits the 'data pointer' pattern, it means that there is no entry in |
| 44 | * the tree for this index (no matter what level of the tree it is found at). |
| 45 | * This means that you cannot store NULL in the tree as a value for the index. |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 46 | */ |
Matthew Wilcox | 3bcadd6 | 2016-05-20 17:03:54 -0700 | [diff] [blame] | 47 | #define RADIX_TREE_ENTRY_MASK 3UL |
| 48 | #define RADIX_TREE_INTERNAL_NODE 1UL |
Matthew Wilcox | 30ff46cc | 2016-05-20 17:03:22 -0700 | [diff] [blame] | 49 | |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 50 | /* |
Matthew Wilcox | 3bcadd6 | 2016-05-20 17:03:54 -0700 | [diff] [blame] | 51 | * Most users of the radix tree store pointers but shmem/tmpfs stores swap |
| 52 | * entries in the same tree. They are marked as exceptional entries to |
| 53 | * distinguish them from pointers to struct page. |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 54 | * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it. |
| 55 | */ |
| 56 | #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 |
| 57 | #define RADIX_TREE_EXCEPTIONAL_SHIFT 2 |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 58 | |
Matthew Wilcox | 3bcadd6 | 2016-05-20 17:03:54 -0700 | [diff] [blame] | 59 | static inline bool radix_tree_is_internal_node(void *ptr) |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 60 | { |
Matthew Wilcox | 3bcadd6 | 2016-05-20 17:03:54 -0700 | [diff] [blame] | 61 | return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == |
| 62 | RADIX_TREE_INTERNAL_NODE; |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | /*** radix-tree API starts here ***/ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
Jan Kara | f446daae | 2010-08-09 17:19:12 -0700 | [diff] [blame] | 67 | #define RADIX_TREE_MAX_TAGS 3 |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 68 | |
Ross Zwisler | 97d778b | 2016-05-20 17:01:42 -0700 | [diff] [blame] | 69 | #ifndef RADIX_TREE_MAP_SHIFT |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 70 | #define RADIX_TREE_MAP_SHIFT (CONFIG_BASE_SMALL ? 4 : 6) |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 71 | #endif |
| 72 | |
| 73 | #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) |
| 74 | #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) |
| 75 | |
| 76 | #define RADIX_TREE_TAG_LONGS \ |
| 77 | ((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG) |
| 78 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 79 | #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) |
| 80 | #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ |
| 81 | RADIX_TREE_MAP_SHIFT)) |
| 82 | |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 83 | /* Internally used bits of node->count */ |
| 84 | #define RADIX_TREE_COUNT_SHIFT (RADIX_TREE_MAP_SHIFT + 1) |
| 85 | #define RADIX_TREE_COUNT_MASK ((1UL << RADIX_TREE_COUNT_SHIFT) - 1) |
| 86 | |
| 87 | struct radix_tree_node { |
Matthew Wilcox | c12e51b | 2016-05-20 17:03:10 -0700 | [diff] [blame] | 88 | unsigned char shift; /* Bits remaining in each slot */ |
Matthew Wilcox | 0c7fa0a | 2016-05-20 17:03:07 -0700 | [diff] [blame] | 89 | unsigned char offset; /* Slot offset in parent */ |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 90 | unsigned int count; |
| 91 | union { |
| 92 | struct { |
| 93 | /* Used when ascending tree */ |
| 94 | struct radix_tree_node *parent; |
| 95 | /* For tree user */ |
| 96 | void *private_data; |
| 97 | }; |
| 98 | /* Used when freeing node */ |
| 99 | struct rcu_head rcu_head; |
| 100 | }; |
| 101 | /* For tree user */ |
| 102 | struct list_head private_list; |
| 103 | void __rcu *slots[RADIX_TREE_MAP_SIZE]; |
| 104 | unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; |
| 105 | }; |
| 106 | |
Nick Piggin | 612d6c1 | 2006-06-23 02:03:22 -0700 | [diff] [blame] | 107 | /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | struct radix_tree_root { |
Al Viro | fd4f2df | 2005-10-21 03:18:50 -0400 | [diff] [blame] | 109 | gfp_t gfp_mask; |
Arnd Bergmann | a111557 | 2010-02-25 23:43:52 +0100 | [diff] [blame] | 110 | struct radix_tree_node __rcu *rnode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | }; |
| 112 | |
| 113 | #define RADIX_TREE_INIT(mask) { \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | .gfp_mask = (mask), \ |
| 115 | .rnode = NULL, \ |
| 116 | } |
| 117 | |
| 118 | #define RADIX_TREE(name, mask) \ |
| 119 | struct radix_tree_root name = RADIX_TREE_INIT(mask) |
| 120 | |
| 121 | #define INIT_RADIX_TREE(root, mask) \ |
| 122 | do { \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | (root)->gfp_mask = (mask); \ |
| 124 | (root)->rnode = NULL; \ |
| 125 | } while (0) |
| 126 | |
Matthew Wilcox | e9256ef | 2016-05-20 17:01:33 -0700 | [diff] [blame] | 127 | static inline bool radix_tree_empty(struct radix_tree_root *root) |
| 128 | { |
| 129 | return root->rnode == NULL; |
| 130 | } |
| 131 | |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 132 | /** |
| 133 | * Radix-tree synchronization |
| 134 | * |
| 135 | * The radix-tree API requires that users provide all synchronisation (with |
| 136 | * specific exceptions, noted below). |
| 137 | * |
| 138 | * Synchronization of access to the data items being stored in the tree, and |
| 139 | * management of their lifetimes must be completely managed by API users. |
| 140 | * |
| 141 | * For API usage, in general, |
Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 142 | * - any function _modifying_ the tree or tags (inserting or deleting |
Tim Pepper | eb8dc5e | 2008-02-03 16:12:47 +0200 | [diff] [blame] | 143 | * items, setting or clearing tags) must exclude other modifications, and |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 144 | * exclude any functions reading the tree. |
Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 145 | * - any function _reading_ the tree or tags (looking up items or tags, |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 146 | * gang lookups) must exclude modifications to the tree, but may occur |
| 147 | * concurrently with other readers. |
| 148 | * |
| 149 | * The notable exceptions to this rule are the following functions: |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 150 | * __radix_tree_lookup |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 151 | * radix_tree_lookup |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 152 | * radix_tree_lookup_slot |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 153 | * radix_tree_tag_get |
| 154 | * radix_tree_gang_lookup |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 155 | * radix_tree_gang_lookup_slot |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 156 | * radix_tree_gang_lookup_tag |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 157 | * radix_tree_gang_lookup_tag_slot |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 158 | * radix_tree_tagged |
| 159 | * |
Adam Barth | 243c213 | 2016-01-20 14:59:09 -0800 | [diff] [blame] | 160 | * The first 8 functions are able to be called locklessly, using RCU. The |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 161 | * caller must ensure calls to these functions are made within rcu_read_lock() |
| 162 | * regions. Other readers (lock-free or otherwise) and modifications may be |
| 163 | * running concurrently. |
| 164 | * |
| 165 | * It is still required that the caller manage the synchronization and lifetimes |
| 166 | * of the items. So if RCU lock-free lookups are used, typically this would mean |
| 167 | * that the items have their own locks, or are amenable to lock-free access; and |
| 168 | * that the items are freed by RCU (or only freed after having been deleted from |
| 169 | * the radix tree *and* a synchronize_rcu() grace period). |
| 170 | * |
| 171 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control |
| 172 | * access to data items when inserting into or looking up from the radix tree) |
| 173 | * |
David Howells | ce82653 | 2010-04-06 22:36:20 +0100 | [diff] [blame] | 174 | * Note that the value returned by radix_tree_tag_get() may not be relied upon |
| 175 | * if only the RCU read lock is held. Functions to set/clear tags and to |
| 176 | * delete nodes running concurrently with it may affect its result such that |
| 177 | * two consecutive reads in the same locked section may return different |
| 178 | * values. If reliability is required, modification functions must also be |
| 179 | * excluded from concurrency. |
| 180 | * |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 181 | * radix_tree_tagged is able to be called without locking or RCU. |
| 182 | */ |
| 183 | |
| 184 | /** |
| 185 | * radix_tree_deref_slot - dereference a slot |
| 186 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot |
| 187 | * Returns: item that was stored in that slot with any direct pointer flag |
| 188 | * removed. |
| 189 | * |
| 190 | * For use with radix_tree_lookup_slot(). Caller must hold tree at least read |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 191 | * locked across slot lookup and dereference. Not required if write lock is |
| 192 | * held (ie. items cannot be concurrently inserted). |
| 193 | * |
| 194 | * radix_tree_deref_retry must be used to confirm validity of the pointer if |
| 195 | * only the read lock is held. |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 196 | */ |
| 197 | static inline void *radix_tree_deref_slot(void **pslot) |
| 198 | { |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 199 | return rcu_dereference(*pslot); |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 200 | } |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 201 | |
| 202 | /** |
Mel Gorman | 29c1f67 | 2011-01-13 15:47:21 -0800 | [diff] [blame] | 203 | * radix_tree_deref_slot_protected - dereference a slot without RCU lock but with tree lock held |
| 204 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot |
| 205 | * Returns: item that was stored in that slot with any direct pointer flag |
| 206 | * removed. |
| 207 | * |
| 208 | * Similar to radix_tree_deref_slot but only used during migration when a pages |
| 209 | * mapping is being moved. The caller does not hold the RCU read lock but it |
| 210 | * must hold the tree lock to prevent parallel updates. |
| 211 | */ |
| 212 | static inline void *radix_tree_deref_slot_protected(void **pslot, |
| 213 | spinlock_t *treelock) |
| 214 | { |
| 215 | return rcu_dereference_protected(*pslot, lockdep_is_held(treelock)); |
| 216 | } |
| 217 | |
| 218 | /** |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 219 | * radix_tree_deref_retry - check radix_tree_deref_slot |
| 220 | * @arg: pointer returned by radix_tree_deref_slot |
| 221 | * Returns: 0 if retry is not required, otherwise retry is required |
| 222 | * |
| 223 | * radix_tree_deref_retry must be used with radix_tree_deref_slot. |
| 224 | */ |
| 225 | static inline int radix_tree_deref_retry(void *arg) |
| 226 | { |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 227 | return unlikely(radix_tree_is_internal_node(arg)); |
Nick Piggin | 27d20fd | 2010-11-11 14:05:19 -0800 | [diff] [blame] | 228 | } |
| 229 | |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 230 | /** |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 231 | * radix_tree_exceptional_entry - radix_tree_deref_slot gave exceptional entry? |
| 232 | * @arg: value returned by radix_tree_deref_slot |
| 233 | * Returns: 0 if well-aligned pointer, non-0 if exceptional entry. |
| 234 | */ |
| 235 | static inline int radix_tree_exceptional_entry(void *arg) |
| 236 | { |
| 237 | /* Not unlikely because radix_tree_exception often tested first */ |
| 238 | return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY; |
| 239 | } |
| 240 | |
| 241 | /** |
| 242 | * radix_tree_exception - radix_tree_deref_slot returned either exception? |
| 243 | * @arg: value returned by radix_tree_deref_slot |
| 244 | * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. |
| 245 | */ |
| 246 | static inline int radix_tree_exception(void *arg) |
| 247 | { |
Matthew Wilcox | 3bcadd6 | 2016-05-20 17:03:54 -0700 | [diff] [blame] | 248 | return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | /** |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 252 | * radix_tree_replace_slot - replace item in a slot |
| 253 | * @pslot: pointer to slot, returned by radix_tree_lookup_slot |
| 254 | * @item: new item to store in the slot. |
| 255 | * |
| 256 | * For use with radix_tree_lookup_slot(). Caller must hold tree write locked |
| 257 | * across slot lookup and replacement. |
| 258 | */ |
| 259 | static inline void radix_tree_replace_slot(void **pslot, void *item) |
| 260 | { |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 261 | BUG_ON(radix_tree_is_internal_node(item)); |
Nick Piggin | c0bc987 | 2007-10-16 01:24:42 -0700 | [diff] [blame] | 262 | rcu_assign_pointer(*pslot, item); |
Nick Piggin | 7cf9c2c | 2006-12-06 20:33:44 -0800 | [diff] [blame] | 263 | } |
| 264 | |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 265 | int __radix_tree_create(struct radix_tree_root *root, unsigned long index, |
Matthew Wilcox | e614523 | 2016-03-17 14:21:54 -0700 | [diff] [blame] | 266 | unsigned order, struct radix_tree_node **nodep, |
| 267 | void ***slotp); |
| 268 | int __radix_tree_insert(struct radix_tree_root *, unsigned long index, |
| 269 | unsigned order, void *); |
| 270 | static inline int radix_tree_insert(struct radix_tree_root *root, |
| 271 | unsigned long index, void *entry) |
| 272 | { |
| 273 | return __radix_tree_insert(root, index, 0, entry); |
| 274 | } |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 275 | void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, |
| 276 | struct radix_tree_node **nodep, void ***slotp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | void *radix_tree_lookup(struct radix_tree_root *, unsigned long); |
Hans Reiser | a433136 | 2005-11-07 00:59:29 -0800 | [diff] [blame] | 278 | void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); |
Johannes Weiner | 449dd69 | 2014-04-03 14:47:56 -0700 | [diff] [blame] | 279 | bool __radix_tree_delete_node(struct radix_tree_root *root, |
Johannes Weiner | 139e561 | 2014-04-03 14:47:54 -0700 | [diff] [blame] | 280 | struct radix_tree_node *node); |
Johannes Weiner | 53c59f2 | 2014-04-03 14:47:39 -0700 | [diff] [blame] | 281 | void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | void *radix_tree_delete(struct radix_tree_root *, unsigned long); |
Matthew Wilcox | d604c32 | 2016-05-20 17:03:45 -0700 | [diff] [blame] | 283 | struct radix_tree_node *radix_tree_replace_clear_tags( |
| 284 | struct radix_tree_root *root, |
| 285 | unsigned long index, void *entry); |
| 286 | unsigned int radix_tree_gang_lookup(struct radix_tree_root *root, |
| 287 | void **results, unsigned long first_index, |
| 288 | unsigned int max_items); |
Hugh Dickins | 6328650 | 2011-08-03 16:21:18 -0700 | [diff] [blame] | 289 | unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, |
| 290 | void ***results, unsigned long *indices, |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 291 | unsigned long first_index, unsigned int max_items); |
Al Viro | dd0fc66 | 2005-10-07 07:46:04 +0100 | [diff] [blame] | 292 | int radix_tree_preload(gfp_t gfp_mask); |
Jan Kara | 5e4c0d97 | 2013-09-11 14:26:05 -0700 | [diff] [blame] | 293 | int radix_tree_maybe_preload(gfp_t gfp_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | void radix_tree_init(void); |
| 295 | void *radix_tree_tag_set(struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 296 | unsigned long index, unsigned int tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | void *radix_tree_tag_clear(struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 298 | unsigned long index, unsigned int tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | int radix_tree_tag_get(struct radix_tree_root *root, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 300 | unsigned long index, unsigned int tag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | unsigned int |
| 302 | radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 303 | unsigned long first_index, unsigned int max_items, |
| 304 | unsigned int tag); |
Nick Piggin | 47feff2 | 2008-07-25 19:45:29 -0700 | [diff] [blame] | 305 | unsigned int |
| 306 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, |
| 307 | unsigned long first_index, unsigned int max_items, |
| 308 | unsigned int tag); |
Jan Kara | ebf8aa4 | 2010-08-09 17:19:11 -0700 | [diff] [blame] | 309 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, |
| 310 | unsigned long *first_indexp, unsigned long last_index, |
| 311 | unsigned long nr_to_tag, |
| 312 | unsigned int fromtag, unsigned int totag); |
Jonathan Corbet | daff89f | 2006-03-25 03:08:05 -0800 | [diff] [blame] | 313 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); |
Hugh Dickins | e504f3f | 2011-08-03 16:21:27 -0700 | [diff] [blame] | 314 | unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
| 316 | static inline void radix_tree_preload_end(void) |
| 317 | { |
| 318 | preempt_enable(); |
| 319 | } |
| 320 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 321 | /** |
| 322 | * struct radix_tree_iter - radix tree iterator state |
| 323 | * |
| 324 | * @index: index of current slot |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 325 | * @next_index: one beyond the last index for this chunk |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 326 | * @tags: bit-mask for tag-iterating |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 327 | * @shift: shift for the node that holds our slots |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 328 | * |
| 329 | * This radix tree iterator works in terms of "chunks" of slots. A chunk is a |
| 330 | * subinterval of slots contained within one radix tree leaf node. It is |
| 331 | * described by a pointer to its first slot and a struct radix_tree_iter |
| 332 | * which holds the chunk's position in the tree and its size. For tagged |
| 333 | * iteration radix_tree_iter also holds the slots' bit-mask for one chosen |
| 334 | * radix tree tag. |
| 335 | */ |
| 336 | struct radix_tree_iter { |
| 337 | unsigned long index; |
| 338 | unsigned long next_index; |
| 339 | unsigned long tags; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 340 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 341 | unsigned int shift; |
| 342 | #endif |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 343 | }; |
| 344 | |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 345 | static inline unsigned int iter_shift(struct radix_tree_iter *iter) |
| 346 | { |
| 347 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| 348 | return iter->shift; |
| 349 | #else |
| 350 | return 0; |
| 351 | #endif |
| 352 | } |
| 353 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 354 | #define RADIX_TREE_ITER_TAG_MASK 0x00FF /* tag index in lower byte */ |
| 355 | #define RADIX_TREE_ITER_TAGGED 0x0100 /* lookup tagged slots */ |
| 356 | #define RADIX_TREE_ITER_CONTIG 0x0200 /* stop at first hole */ |
| 357 | |
| 358 | /** |
| 359 | * radix_tree_iter_init - initialize radix tree iterator |
| 360 | * |
| 361 | * @iter: pointer to iterator state |
| 362 | * @start: iteration starting index |
| 363 | * Returns: NULL |
| 364 | */ |
| 365 | static __always_inline void ** |
| 366 | radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) |
| 367 | { |
| 368 | /* |
| 369 | * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it |
| 370 | * in the case of a successful tagged chunk lookup. If the lookup was |
| 371 | * unsuccessful or non-tagged then nobody cares about ->tags. |
| 372 | * |
| 373 | * Set index to zero to bypass next_index overflow protection. |
| 374 | * See the comment in radix_tree_next_chunk() for details. |
| 375 | */ |
| 376 | iter->index = 0; |
| 377 | iter->next_index = start; |
| 378 | return NULL; |
| 379 | } |
| 380 | |
| 381 | /** |
| 382 | * radix_tree_next_chunk - find next chunk of slots for iteration |
| 383 | * |
| 384 | * @root: radix tree root |
| 385 | * @iter: iterator state |
| 386 | * @flags: RADIX_TREE_ITER_* flags and tag index |
| 387 | * Returns: pointer to chunk first slot, or NULL if there no more left |
| 388 | * |
| 389 | * This function looks up the next chunk in the radix tree starting from |
| 390 | * @iter->next_index. It returns a pointer to the chunk's first slot. |
| 391 | * Also it fills @iter with data about chunk: position in the tree (index), |
| 392 | * its end (next_index), and constructs a bit mask for tagged iterating (tags). |
| 393 | */ |
| 394 | void **radix_tree_next_chunk(struct radix_tree_root *root, |
| 395 | struct radix_tree_iter *iter, unsigned flags); |
| 396 | |
| 397 | /** |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 398 | * radix_tree_iter_retry - retry this chunk of the iteration |
| 399 | * @iter: iterator state |
| 400 | * |
| 401 | * If we iterate over a tree protected only by the RCU lock, a race |
| 402 | * against deletion or creation may result in seeing a slot for which |
| 403 | * radix_tree_deref_retry() returns true. If so, call this function |
| 404 | * and continue the iteration. |
| 405 | */ |
| 406 | static inline __must_check |
| 407 | void **radix_tree_iter_retry(struct radix_tree_iter *iter) |
| 408 | { |
| 409 | iter->next_index = iter->index; |
| 410 | return NULL; |
| 411 | } |
| 412 | |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 413 | static inline unsigned long |
| 414 | __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) |
| 415 | { |
| 416 | return iter->index + (slots << iter_shift(iter)); |
| 417 | } |
| 418 | |
Matthew Wilcox | 46437f9 | 2016-02-02 16:57:52 -0800 | [diff] [blame] | 419 | /** |
Matthew Wilcox | 7165092 | 2016-03-17 14:22:06 -0700 | [diff] [blame] | 420 | * radix_tree_iter_next - resume iterating when the chunk may be invalid |
| 421 | * @iter: iterator state |
| 422 | * |
| 423 | * If the iterator needs to release then reacquire a lock, the chunk may |
| 424 | * have been invalidated by an insertion or deletion. Call this function |
| 425 | * to continue the iteration from the next index. |
| 426 | */ |
| 427 | static inline __must_check |
| 428 | void **radix_tree_iter_next(struct radix_tree_iter *iter) |
| 429 | { |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 430 | iter->next_index = __radix_tree_iter_add(iter, 1); |
Matthew Wilcox | 7165092 | 2016-03-17 14:22:06 -0700 | [diff] [blame] | 431 | iter->tags = 0; |
| 432 | return NULL; |
| 433 | } |
| 434 | |
| 435 | /** |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 436 | * radix_tree_chunk_size - get current chunk size |
| 437 | * |
| 438 | * @iter: pointer to radix tree iterator |
| 439 | * Returns: current chunk size |
| 440 | */ |
Konstantin Khlebnikov | 7320428 | 2016-02-05 15:37:01 -0800 | [diff] [blame] | 441 | static __always_inline long |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 442 | radix_tree_chunk_size(struct radix_tree_iter *iter) |
| 443 | { |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 444 | return (iter->next_index - iter->index) >> iter_shift(iter); |
| 445 | } |
| 446 | |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 447 | static inline struct radix_tree_node *entry_to_node(void *ptr) |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 448 | { |
Matthew Wilcox | 30ff46cc | 2016-05-20 17:03:22 -0700 | [diff] [blame] | 449 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | /** |
| 453 | * radix_tree_next_slot - find next slot in chunk |
| 454 | * |
| 455 | * @slot: pointer to current slot |
| 456 | * @iter: pointer to interator state |
| 457 | * @flags: RADIX_TREE_ITER_*, should be constant |
| 458 | * Returns: pointer to next slot, or NULL if there no more left |
| 459 | * |
| 460 | * This function updates @iter->index in the case of a successful lookup. |
| 461 | * For tagged lookup it also eats @iter->tags. |
| 462 | */ |
| 463 | static __always_inline void ** |
| 464 | radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) |
| 465 | { |
| 466 | if (flags & RADIX_TREE_ITER_TAGGED) { |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 467 | void *canon = slot; |
| 468 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 469 | iter->tags >>= 1; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 470 | if (unlikely(!iter->tags)) |
| 471 | return NULL; |
| 472 | while (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 473 | radix_tree_is_internal_node(slot[1])) { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 474 | if (entry_to_node(slot[1]) == canon) { |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 475 | iter->tags >>= 1; |
| 476 | iter->index = __radix_tree_iter_add(iter, 1); |
| 477 | slot++; |
| 478 | continue; |
| 479 | } |
| 480 | iter->next_index = __radix_tree_iter_add(iter, 1); |
| 481 | return NULL; |
| 482 | } |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 483 | if (likely(iter->tags & 1ul)) { |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 484 | iter->index = __radix_tree_iter_add(iter, 1); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 485 | return slot + 1; |
| 486 | } |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 487 | if (!(flags & RADIX_TREE_ITER_CONTIG)) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 488 | unsigned offset = __ffs(iter->tags); |
| 489 | |
| 490 | iter->tags >>= offset; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 491 | iter->index = __radix_tree_iter_add(iter, offset + 1); |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 492 | return slot + offset + 1; |
| 493 | } |
| 494 | } else { |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 495 | long count = radix_tree_chunk_size(iter); |
| 496 | void *canon = slot; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 497 | |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 498 | while (--count > 0) { |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 499 | slot++; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 500 | iter->index = __radix_tree_iter_add(iter, 1); |
| 501 | |
| 502 | if (IS_ENABLED(CONFIG_RADIX_TREE_MULTIORDER) && |
Matthew Wilcox | b194d16 | 2016-05-20 17:03:30 -0700 | [diff] [blame] | 503 | radix_tree_is_internal_node(*slot)) { |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 504 | if (entry_to_node(*slot) == canon) |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 505 | continue; |
Matthew Wilcox | 4dd6c09 | 2016-05-20 17:03:27 -0700 | [diff] [blame] | 506 | iter->next_index = iter->index; |
| 507 | break; |
Ross Zwisler | 21ef533 | 2016-05-20 17:02:26 -0700 | [diff] [blame] | 508 | } |
| 509 | |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 510 | if (likely(*slot)) |
| 511 | return slot; |
Konstantin Khlebnikov | fffaee3 | 2012-06-05 21:36:33 +0400 | [diff] [blame] | 512 | if (flags & RADIX_TREE_ITER_CONTIG) { |
| 513 | /* forbid switching to the next chunk */ |
| 514 | iter->next_index = 0; |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 515 | break; |
Konstantin Khlebnikov | fffaee3 | 2012-06-05 21:36:33 +0400 | [diff] [blame] | 516 | } |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 517 | } |
| 518 | } |
| 519 | return NULL; |
| 520 | } |
| 521 | |
| 522 | /** |
Konstantin Khlebnikov | 78c1d78 | 2012-03-28 14:42:53 -0700 | [diff] [blame] | 523 | * radix_tree_for_each_slot - iterate over non-empty slots |
| 524 | * |
| 525 | * @slot: the void** variable for pointer to slot |
| 526 | * @root: the struct radix_tree_root pointer |
| 527 | * @iter: the struct radix_tree_iter pointer |
| 528 | * @start: iteration starting index |
| 529 | * |
| 530 | * @slot points to radix tree slot, @iter->index contains its index. |
| 531 | */ |
| 532 | #define radix_tree_for_each_slot(slot, root, iter, start) \ |
| 533 | for (slot = radix_tree_iter_init(iter, start) ; \ |
| 534 | slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ |
| 535 | slot = radix_tree_next_slot(slot, iter, 0)) |
| 536 | |
| 537 | /** |
| 538 | * radix_tree_for_each_contig - iterate over contiguous slots |
| 539 | * |
| 540 | * @slot: the void** variable for pointer to slot |
| 541 | * @root: the struct radix_tree_root pointer |
| 542 | * @iter: the struct radix_tree_iter pointer |
| 543 | * @start: iteration starting index |
| 544 | * |
| 545 | * @slot points to radix tree slot, @iter->index contains its index. |
| 546 | */ |
| 547 | #define radix_tree_for_each_contig(slot, root, iter, start) \ |
| 548 | for (slot = radix_tree_iter_init(iter, start) ; \ |
| 549 | slot || (slot = radix_tree_next_chunk(root, iter, \ |
| 550 | RADIX_TREE_ITER_CONTIG)) ; \ |
| 551 | slot = radix_tree_next_slot(slot, iter, \ |
| 552 | RADIX_TREE_ITER_CONTIG)) |
| 553 | |
| 554 | /** |
| 555 | * radix_tree_for_each_tagged - iterate over tagged slots |
| 556 | * |
| 557 | * @slot: the void** variable for pointer to slot |
| 558 | * @root: the struct radix_tree_root pointer |
| 559 | * @iter: the struct radix_tree_iter pointer |
| 560 | * @start: iteration starting index |
| 561 | * @tag: tag index |
| 562 | * |
| 563 | * @slot points to radix tree slot, @iter->index contains its index. |
| 564 | */ |
| 565 | #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ |
| 566 | for (slot = radix_tree_iter_init(iter, start) ; \ |
| 567 | slot || (slot = radix_tree_next_chunk(root, iter, \ |
| 568 | RADIX_TREE_ITER_TAGGED | tag)) ; \ |
| 569 | slot = radix_tree_next_slot(slot, iter, \ |
| 570 | RADIX_TREE_ITER_TAGGED)) |
| 571 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | #endif /* _LINUX_RADIX_TREE_H */ |