Thomas Gleixner | 8092f73 | 2019-06-03 07:45:04 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 2 | /* |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 3 | * klist.c - Routines for manipulating klists. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 4 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 5 | * Copyright (C) 2005 Patrick Mochel |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 6 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 7 | * This klist interface provides a couple of structures that wrap around |
| 8 | * struct list_head to provide explicit list "head" (struct klist) and list |
| 9 | * "node" (struct klist_node) objects. For struct klist, a spinlock is |
| 10 | * included that protects access to the actual list itself. struct |
| 11 | * klist_node provides a pointer to the klist that owns it and a kref |
| 12 | * reference count that indicates the number of current users of that node |
| 13 | * in the list. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 14 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 15 | * The entire point is to provide an interface for iterating over a list |
| 16 | * that is safe and allows for modification of the list during the |
| 17 | * iteration (e.g. insertion and removal), including modification of the |
| 18 | * current node on the list. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 19 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 20 | * It works using a 3rd object type - struct klist_iter - that is declared |
| 21 | * and initialized before an iteration. klist_next() is used to acquire the |
| 22 | * next element in the list. It returns NULL if there are no more items. |
| 23 | * Internally, that routine takes the klist's lock, decrements the |
| 24 | * reference count of the previous klist_node and increments the count of |
| 25 | * the next klist_node. It then drops the lock and returns. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 26 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 27 | * There are primitives for adding and removing nodes to/from a klist. |
| 28 | * When deleting, klist_del() will simply decrement the reference count. |
| 29 | * Only when the count goes to 0 is the node removed from the list. |
| 30 | * klist_remove() will try to delete the node from the list and block until |
| 31 | * it is actually removed. This is useful for objects (like devices) that |
| 32 | * have been removed from the system and must be freed (but must wait until |
| 33 | * all accessors have finished). |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 34 | */ |
| 35 | |
| 36 | #include <linux/klist.h> |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 37 | #include <linux/export.h> |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 38 | #include <linux/sched.h> |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 39 | |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 40 | /* |
| 41 | * Use the lowest bit of n_klist to mark deleted nodes and exclude |
| 42 | * dead ones from iteration. |
| 43 | */ |
| 44 | #define KNODE_DEAD 1LU |
| 45 | #define KNODE_KLIST_MASK ~KNODE_DEAD |
| 46 | |
| 47 | static struct klist *knode_klist(struct klist_node *knode) |
| 48 | { |
| 49 | return (struct klist *) |
| 50 | ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); |
| 51 | } |
| 52 | |
| 53 | static bool knode_dead(struct klist_node *knode) |
| 54 | { |
| 55 | return (unsigned long)knode->n_klist & KNODE_DEAD; |
| 56 | } |
| 57 | |
| 58 | static void knode_set_klist(struct klist_node *knode, struct klist *klist) |
| 59 | { |
| 60 | knode->n_klist = klist; |
| 61 | /* no knode deserves to start its life dead */ |
| 62 | WARN_ON(knode_dead(knode)); |
| 63 | } |
| 64 | |
| 65 | static void knode_kill(struct klist_node *knode) |
| 66 | { |
| 67 | /* and no knode should die twice ever either, see we're very humane */ |
| 68 | WARN_ON(knode_dead(knode)); |
| 69 | *(unsigned long *)&knode->n_klist |= KNODE_DEAD; |
| 70 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 71 | |
| 72 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 73 | * klist_init - Initialize a klist structure. |
| 74 | * @k: The klist we're initializing. |
| 75 | * @get: The get function for the embedding object (NULL if none) |
| 76 | * @put: The put function for the embedding object (NULL if none) |
James Bottomley | 34bb61f | 2005-09-06 16:56:51 -0700 | [diff] [blame] | 77 | * |
| 78 | * Initialises the klist structure. If the klist_node structures are |
| 79 | * going to be embedded in refcounted objects (necessary for safe |
| 80 | * deletion) then the get/put arguments are used to initialise |
| 81 | * functions that take and release references on the embedding |
| 82 | * objects. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 83 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 84 | void klist_init(struct klist *k, void (*get)(struct klist_node *), |
James Bottomley | 34bb61f | 2005-09-06 16:56:51 -0700 | [diff] [blame] | 85 | void (*put)(struct klist_node *)) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 86 | { |
| 87 | INIT_LIST_HEAD(&k->k_list); |
| 88 | spin_lock_init(&k->k_lock); |
James Bottomley | 34bb61f | 2005-09-06 16:56:51 -0700 | [diff] [blame] | 89 | k->get = get; |
| 90 | k->put = put; |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 91 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 92 | EXPORT_SYMBOL_GPL(klist_init); |
| 93 | |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 94 | static void add_head(struct klist *k, struct klist_node *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 95 | { |
| 96 | spin_lock(&k->k_lock); |
| 97 | list_add(&n->n_node, &k->k_list); |
| 98 | spin_unlock(&k->k_lock); |
| 99 | } |
| 100 | |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 101 | static void add_tail(struct klist *k, struct klist_node *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 102 | { |
| 103 | spin_lock(&k->k_lock); |
| 104 | list_add_tail(&n->n_node, &k->k_list); |
| 105 | spin_unlock(&k->k_lock); |
| 106 | } |
| 107 | |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 108 | static void klist_node_init(struct klist *k, struct klist_node *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 109 | { |
| 110 | INIT_LIST_HEAD(&n->n_node); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 111 | kref_init(&n->n_ref); |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 112 | knode_set_klist(n, k); |
James Bottomley | 34bb61f | 2005-09-06 16:56:51 -0700 | [diff] [blame] | 113 | if (k->get) |
| 114 | k->get(n); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 115 | } |
| 116 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 117 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 118 | * klist_add_head - Initialize a klist_node and add it to front. |
| 119 | * @n: node we're adding. |
| 120 | * @k: klist it's going on. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 121 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 122 | void klist_add_head(struct klist_node *n, struct klist *k) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 123 | { |
| 124 | klist_node_init(k, n); |
| 125 | add_head(k, n); |
| 126 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 127 | EXPORT_SYMBOL_GPL(klist_add_head); |
| 128 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 129 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 130 | * klist_add_tail - Initialize a klist_node and add it to back. |
| 131 | * @n: node we're adding. |
| 132 | * @k: klist it's going on. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 133 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 134 | void klist_add_tail(struct klist_node *n, struct klist *k) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 135 | { |
| 136 | klist_node_init(k, n); |
| 137 | add_tail(k, n); |
| 138 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 139 | EXPORT_SYMBOL_GPL(klist_add_tail); |
| 140 | |
Tejun Heo | 93dd400 | 2008-04-22 18:58:46 +0900 | [diff] [blame] | 141 | /** |
Ken Helias | 0f9859c | 2014-08-06 16:09:18 -0700 | [diff] [blame] | 142 | * klist_add_behind - Init a klist_node and add it after an existing node |
Tejun Heo | 93dd400 | 2008-04-22 18:58:46 +0900 | [diff] [blame] | 143 | * @n: node we're adding. |
| 144 | * @pos: node to put @n after |
| 145 | */ |
Ken Helias | 0f9859c | 2014-08-06 16:09:18 -0700 | [diff] [blame] | 146 | void klist_add_behind(struct klist_node *n, struct klist_node *pos) |
Tejun Heo | 93dd400 | 2008-04-22 18:58:46 +0900 | [diff] [blame] | 147 | { |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 148 | struct klist *k = knode_klist(pos); |
Tejun Heo | 93dd400 | 2008-04-22 18:58:46 +0900 | [diff] [blame] | 149 | |
| 150 | klist_node_init(k, n); |
| 151 | spin_lock(&k->k_lock); |
| 152 | list_add(&n->n_node, &pos->n_node); |
| 153 | spin_unlock(&k->k_lock); |
| 154 | } |
Ken Helias | 0f9859c | 2014-08-06 16:09:18 -0700 | [diff] [blame] | 155 | EXPORT_SYMBOL_GPL(klist_add_behind); |
Tejun Heo | 93dd400 | 2008-04-22 18:58:46 +0900 | [diff] [blame] | 156 | |
| 157 | /** |
| 158 | * klist_add_before - Init a klist_node and add it before an existing node |
| 159 | * @n: node we're adding. |
| 160 | * @pos: node to put @n after |
| 161 | */ |
| 162 | void klist_add_before(struct klist_node *n, struct klist_node *pos) |
| 163 | { |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 164 | struct klist *k = knode_klist(pos); |
Tejun Heo | 93dd400 | 2008-04-22 18:58:46 +0900 | [diff] [blame] | 165 | |
| 166 | klist_node_init(k, n); |
| 167 | spin_lock(&k->k_lock); |
| 168 | list_add_tail(&n->n_node, &pos->n_node); |
| 169 | spin_unlock(&k->k_lock); |
| 170 | } |
| 171 | EXPORT_SYMBOL_GPL(klist_add_before); |
| 172 | |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 173 | struct klist_waiter { |
| 174 | struct list_head list; |
| 175 | struct klist_node *node; |
| 176 | struct task_struct *process; |
| 177 | int woken; |
| 178 | }; |
| 179 | |
| 180 | static DEFINE_SPINLOCK(klist_remove_lock); |
| 181 | static LIST_HEAD(klist_remove_waiters); |
| 182 | |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 183 | static void klist_release(struct kref *kref) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 184 | { |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 185 | struct klist_waiter *waiter, *tmp; |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 186 | struct klist_node *n = container_of(kref, struct klist_node, n_ref); |
Alan Stern | 7e9f4b2 | 2006-09-18 16:28:06 -0400 | [diff] [blame] | 187 | |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 188 | WARN_ON(!knode_dead(n)); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 189 | list_del(&n->n_node); |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 190 | spin_lock(&klist_remove_lock); |
| 191 | list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { |
| 192 | if (waiter->node != n) |
| 193 | continue; |
| 194 | |
wang, biao | ac5a296 | 2013-05-16 09:50:13 +0800 | [diff] [blame] | 195 | list_del(&waiter->list); |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 196 | waiter->woken = 1; |
| 197 | mb(); |
| 198 | wake_up_process(waiter->process); |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 199 | } |
| 200 | spin_unlock(&klist_remove_lock); |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 201 | knode_set_klist(n, NULL); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 202 | } |
| 203 | |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 204 | static int klist_dec_and_del(struct klist_node *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 205 | { |
| 206 | return kref_put(&n->n_ref, klist_release); |
| 207 | } |
| 208 | |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 209 | static void klist_put(struct klist_node *n, bool kill) |
| 210 | { |
| 211 | struct klist *k = knode_klist(n); |
| 212 | void (*put)(struct klist_node *) = k->put; |
| 213 | |
| 214 | spin_lock(&k->k_lock); |
| 215 | if (kill) |
| 216 | knode_kill(n); |
| 217 | if (!klist_dec_and_del(n)) |
| 218 | put = NULL; |
| 219 | spin_unlock(&k->k_lock); |
| 220 | if (put) |
| 221 | put(n); |
| 222 | } |
| 223 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 224 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 225 | * klist_del - Decrement the reference count of node and try to remove. |
| 226 | * @n: node we're deleting. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 227 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 228 | void klist_del(struct klist_node *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 229 | { |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 230 | klist_put(n, true); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 231 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 232 | EXPORT_SYMBOL_GPL(klist_del); |
| 233 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 234 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 235 | * klist_remove - Decrement the refcount of node and wait for it to go away. |
| 236 | * @n: node we're removing. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 237 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 238 | void klist_remove(struct klist_node *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 239 | { |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 240 | struct klist_waiter waiter; |
| 241 | |
| 242 | waiter.node = n; |
| 243 | waiter.process = current; |
| 244 | waiter.woken = 0; |
| 245 | spin_lock(&klist_remove_lock); |
| 246 | list_add(&waiter.list, &klist_remove_waiters); |
| 247 | spin_unlock(&klist_remove_lock); |
| 248 | |
Alan Stern | 7e9f4b2 | 2006-09-18 16:28:06 -0400 | [diff] [blame] | 249 | klist_del(n); |
Matthew Wilcox | 210272a | 2008-10-16 14:57:54 -0600 | [diff] [blame] | 250 | |
| 251 | for (;;) { |
| 252 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 253 | if (waiter.woken) |
| 254 | break; |
| 255 | schedule(); |
| 256 | } |
| 257 | __set_current_state(TASK_RUNNING); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 258 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 259 | EXPORT_SYMBOL_GPL(klist_remove); |
| 260 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 261 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 262 | * klist_node_attached - Say whether a node is bound to a list or not. |
| 263 | * @n: Node that we're testing. |
mochel@digitalimplant.org | 8b0c250b | 2005-03-24 12:58:57 -0800 | [diff] [blame] | 264 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 265 | int klist_node_attached(struct klist_node *n) |
mochel@digitalimplant.org | 8b0c250b | 2005-03-24 12:58:57 -0800 | [diff] [blame] | 266 | { |
| 267 | return (n->n_klist != NULL); |
| 268 | } |
mochel@digitalimplant.org | 8b0c250b | 2005-03-24 12:58:57 -0800 | [diff] [blame] | 269 | EXPORT_SYMBOL_GPL(klist_node_attached); |
| 270 | |
mochel@digitalimplant.org | 8b0c250b | 2005-03-24 12:58:57 -0800 | [diff] [blame] | 271 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 272 | * klist_iter_init_node - Initialize a klist_iter structure. |
| 273 | * @k: klist we're iterating. |
| 274 | * @i: klist_iter we're filling. |
| 275 | * @n: node to start with. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 276 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 277 | * Similar to klist_iter_init(), but starts the action off with @n, |
| 278 | * instead of with the list head. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 279 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 280 | void klist_iter_init_node(struct klist *k, struct klist_iter *i, |
| 281 | struct klist_node *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 282 | { |
| 283 | i->i_klist = k; |
James Bottomley | 00cd29b | 2016-01-13 08:10:31 -0800 | [diff] [blame] | 284 | i->i_cur = NULL; |
| 285 | if (n && kref_get_unless_zero(&n->n_ref)) |
| 286 | i->i_cur = n; |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 287 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 288 | EXPORT_SYMBOL_GPL(klist_iter_init_node); |
| 289 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 290 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 291 | * klist_iter_init - Iniitalize a klist_iter structure. |
| 292 | * @k: klist we're iterating. |
| 293 | * @i: klist_iter structure we're filling. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 294 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 295 | * Similar to klist_iter_init_node(), but start with the list head. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 296 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 297 | void klist_iter_init(struct klist *k, struct klist_iter *i) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 298 | { |
| 299 | klist_iter_init_node(k, i, NULL); |
| 300 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 301 | EXPORT_SYMBOL_GPL(klist_iter_init); |
| 302 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 303 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 304 | * klist_iter_exit - Finish a list iteration. |
| 305 | * @i: Iterator structure. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 306 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 307 | * Must be called when done iterating over list, as it decrements the |
| 308 | * refcount of the current node. Necessary in case iteration exited before |
| 309 | * the end of the list was reached, and always good form. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 310 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 311 | void klist_iter_exit(struct klist_iter *i) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 312 | { |
| 313 | if (i->i_cur) { |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 314 | klist_put(i->i_cur, false); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 315 | i->i_cur = NULL; |
| 316 | } |
| 317 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 318 | EXPORT_SYMBOL_GPL(klist_iter_exit); |
| 319 | |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 320 | static struct klist_node *to_klist_node(struct list_head *n) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 321 | { |
| 322 | return container_of(n, struct klist_node, n_node); |
| 323 | } |
| 324 | |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 325 | /** |
Andy Shevchenko | 2e0fed7 | 2015-07-27 18:03:59 +0300 | [diff] [blame] | 326 | * klist_prev - Ante up prev node in list. |
| 327 | * @i: Iterator structure. |
| 328 | * |
| 329 | * First grab list lock. Decrement the reference count of the previous |
| 330 | * node, if there was one. Grab the prev node, increment its reference |
| 331 | * count, drop the lock, and return that prev node. |
| 332 | */ |
| 333 | struct klist_node *klist_prev(struct klist_iter *i) |
| 334 | { |
| 335 | void (*put)(struct klist_node *) = i->i_klist->put; |
| 336 | struct klist_node *last = i->i_cur; |
| 337 | struct klist_node *prev; |
Bart Van Assche | 624fa77 | 2018-06-22 14:54:49 -0700 | [diff] [blame] | 338 | unsigned long flags; |
Andy Shevchenko | 2e0fed7 | 2015-07-27 18:03:59 +0300 | [diff] [blame] | 339 | |
Bart Van Assche | 624fa77 | 2018-06-22 14:54:49 -0700 | [diff] [blame] | 340 | spin_lock_irqsave(&i->i_klist->k_lock, flags); |
Andy Shevchenko | 2e0fed7 | 2015-07-27 18:03:59 +0300 | [diff] [blame] | 341 | |
| 342 | if (last) { |
| 343 | prev = to_klist_node(last->n_node.prev); |
| 344 | if (!klist_dec_and_del(last)) |
| 345 | put = NULL; |
| 346 | } else |
| 347 | prev = to_klist_node(i->i_klist->k_list.prev); |
| 348 | |
| 349 | i->i_cur = NULL; |
| 350 | while (prev != to_klist_node(&i->i_klist->k_list)) { |
| 351 | if (likely(!knode_dead(prev))) { |
| 352 | kref_get(&prev->n_ref); |
| 353 | i->i_cur = prev; |
| 354 | break; |
| 355 | } |
| 356 | prev = to_klist_node(prev->n_node.prev); |
| 357 | } |
| 358 | |
Bart Van Assche | 624fa77 | 2018-06-22 14:54:49 -0700 | [diff] [blame] | 359 | spin_unlock_irqrestore(&i->i_klist->k_lock, flags); |
Andy Shevchenko | 2e0fed7 | 2015-07-27 18:03:59 +0300 | [diff] [blame] | 360 | |
| 361 | if (put && last) |
| 362 | put(last); |
| 363 | return i->i_cur; |
| 364 | } |
| 365 | EXPORT_SYMBOL_GPL(klist_prev); |
| 366 | |
| 367 | /** |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 368 | * klist_next - Ante up next node in list. |
| 369 | * @i: Iterator structure. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 370 | * |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 371 | * First grab list lock. Decrement the reference count of the previous |
| 372 | * node, if there was one. Grab the next node, increment its reference |
| 373 | * count, drop the lock, and return that next node. |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 374 | */ |
Greg Kroah-Hartman | c3bb7fad | 2008-04-30 16:43:45 -0700 | [diff] [blame] | 375 | struct klist_node *klist_next(struct klist_iter *i) |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 376 | { |
Alan Stern | 7e9f4b2 | 2006-09-18 16:28:06 -0400 | [diff] [blame] | 377 | void (*put)(struct klist_node *) = i->i_klist->put; |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 378 | struct klist_node *last = i->i_cur; |
| 379 | struct klist_node *next; |
Bart Van Assche | 624fa77 | 2018-06-22 14:54:49 -0700 | [diff] [blame] | 380 | unsigned long flags; |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 381 | |
Bart Van Assche | 624fa77 | 2018-06-22 14:54:49 -0700 | [diff] [blame] | 382 | spin_lock_irqsave(&i->i_klist->k_lock, flags); |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 383 | |
| 384 | if (last) { |
| 385 | next = to_klist_node(last->n_node.next); |
| 386 | if (!klist_dec_and_del(last)) |
Alan Stern | 7e9f4b2 | 2006-09-18 16:28:06 -0400 | [diff] [blame] | 387 | put = NULL; |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 388 | } else |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 389 | next = to_klist_node(i->i_klist->k_list.next); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 390 | |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 391 | i->i_cur = NULL; |
| 392 | while (next != to_klist_node(&i->i_klist->k_list)) { |
| 393 | if (likely(!knode_dead(next))) { |
| 394 | kref_get(&next->n_ref); |
| 395 | i->i_cur = next; |
| 396 | break; |
| 397 | } |
| 398 | next = to_klist_node(next->n_node.next); |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 399 | } |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 400 | |
Bart Van Assche | 624fa77 | 2018-06-22 14:54:49 -0700 | [diff] [blame] | 401 | spin_unlock_irqrestore(&i->i_klist->k_lock, flags); |
Tejun Heo | a1ed5b0 | 2008-08-25 19:50:16 +0200 | [diff] [blame] | 402 | |
| 403 | if (put && last) |
| 404 | put(last); |
| 405 | return i->i_cur; |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 406 | } |
mochel@digitalimplant.org | 9a19fea | 2005-03-21 11:45:16 -0800 | [diff] [blame] | 407 | EXPORT_SYMBOL_GPL(klist_next); |