Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 2 | #define pr_fmt(fmt) "%s: " fmt "\n", __func__ |
| 3 | |
| 4 | #include <linux/kernel.h> |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 5 | #include <linux/sched.h> |
| 6 | #include <linux/wait.h> |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 7 | #include <linux/percpu-refcount.h> |
| 8 | |
| 9 | /* |
| 10 | * Initially, a percpu refcount is just a set of percpu counters. Initially, we |
| 11 | * don't try to detect the ref hitting 0 - which means that get/put can just |
| 12 | * increment or decrement the local counter. Note that the counter on a |
| 13 | * particular cpu can (and will) wrap - this is fine, when we go to shutdown the |
| 14 | * percpu counters will all sum to the correct value |
| 15 | * |
Bogdan Sikora | bdb428c | 2015-12-27 14:58:23 +0100 | [diff] [blame] | 16 | * (More precisely: because modular arithmetic is commutative the sum of all the |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 17 | * percpu_count vars will be equal to what it would have been if all the gets |
| 18 | * and puts were done to a single integer, even if some of the percpu integers |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 19 | * overflow or underflow). |
| 20 | * |
| 21 | * The real trick to implementing percpu refcounts is shutdown. We can't detect |
| 22 | * the ref hitting 0 on every put - this would require global synchronization |
| 23 | * and defeat the whole purpose of using percpu refs. |
| 24 | * |
| 25 | * What we do is require the user to keep track of the initial refcount; we know |
| 26 | * the ref can't hit 0 before the user drops the initial ref, so as long as we |
| 27 | * convert to non percpu mode before the initial ref is dropped everything |
| 28 | * works. |
| 29 | * |
| 30 | * Converting to non percpu mode is done with some RCUish stuff in |
Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 31 | * percpu_ref_kill. Additionally, we need a bias value so that the |
| 32 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 33 | */ |
| 34 | |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 35 | #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 36 | |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 37 | static DEFINE_SPINLOCK(percpu_ref_switch_lock); |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 38 | static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq); |
| 39 | |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 40 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) |
Tejun Heo | eae7975 | 2014-06-28 08:10:13 -0400 | [diff] [blame] | 41 | { |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 42 | return (unsigned long __percpu *) |
Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 43 | (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); |
Tejun Heo | eae7975 | 2014-06-28 08:10:13 -0400 | [diff] [blame] | 44 | } |
| 45 | |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 46 | /** |
| 47 | * percpu_ref_init - initialize a percpu refcount |
Tejun Heo | ac89906 | 2013-06-12 20:43:06 -0700 | [diff] [blame] | 48 | * @ref: percpu_ref to initialize |
| 49 | * @release: function which will be called when refcount hits 0 |
Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 50 | * @flags: PERCPU_REF_INIT_* flags |
Tejun Heo | a34375e | 2014-09-08 09:51:30 +0900 | [diff] [blame] | 51 | * @gfp: allocation mask to use |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 52 | * |
Ira Weiny | 15617df | 2020-02-21 15:16:07 -0800 | [diff] [blame] | 53 | * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless |
| 54 | * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags |
| 55 | * change the start state to atomic with the latter setting the initial refcount |
| 56 | * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors. |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 57 | * |
| 58 | * Note that @release must not sleep - it may potentially be called from RCU |
| 59 | * callback context by percpu_ref_kill(). |
| 60 | */ |
Tejun Heo | a34375e | 2014-09-08 09:51:30 +0900 | [diff] [blame] | 61 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 62 | unsigned int flags, gfp_t gfp) |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 63 | { |
Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 64 | size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, |
| 65 | __alignof__(unsigned long)); |
Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 66 | unsigned long start_count = 0; |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 67 | |
Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 68 | ref->percpu_count_ptr = (unsigned long) |
| 69 | __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 70 | if (!ref->percpu_count_ptr) |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 71 | return -ENOMEM; |
| 72 | |
Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 73 | ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; |
Roman Gushchin | 7d9ab9b | 2019-05-07 10:01:50 -0700 | [diff] [blame] | 74 | ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; |
Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 75 | |
Roman Gushchin | 7d9ab9b | 2019-05-07 10:01:50 -0700 | [diff] [blame] | 76 | if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) { |
Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 77 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; |
Roman Gushchin | 7d9ab9b | 2019-05-07 10:01:50 -0700 | [diff] [blame] | 78 | ref->allow_reinit = true; |
| 79 | } else { |
Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 80 | start_count += PERCPU_COUNT_BIAS; |
Roman Gushchin | 7d9ab9b | 2019-05-07 10:01:50 -0700 | [diff] [blame] | 81 | } |
Tejun Heo | 2aad2a8 | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 82 | |
| 83 | if (flags & PERCPU_REF_INIT_DEAD) |
| 84 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; |
| 85 | else |
| 86 | start_count++; |
| 87 | |
| 88 | atomic_long_set(&ref->count, start_count); |
| 89 | |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 90 | ref->release = release; |
Roman Pen | a67823c | 2016-08-11 19:27:09 +0200 | [diff] [blame] | 91 | ref->confirm_switch = NULL; |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 92 | return 0; |
| 93 | } |
Matias Bjorling | 5e9dd37 | 2013-10-16 13:47:01 -0700 | [diff] [blame] | 94 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 95 | |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 96 | /** |
Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 97 | * percpu_ref_exit - undo percpu_ref_init() |
| 98 | * @ref: percpu_ref to exit |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 99 | * |
Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 100 | * This function exits @ref. The caller is responsible for ensuring that |
| 101 | * @ref is no longer in active use. The usual places to invoke this |
| 102 | * function from are the @ref->release() callback or in init failure path |
| 103 | * where percpu_ref_init() succeeded but other parts of the initialization |
| 104 | * of the embedding object failed. |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 105 | */ |
Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 106 | void percpu_ref_exit(struct percpu_ref *ref) |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 107 | { |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 108 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 109 | |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 110 | if (percpu_count) { |
Roman Pen | a67823c | 2016-08-11 19:27:09 +0200 | [diff] [blame] | 111 | /* non-NULL confirm_switch indicates switching in progress */ |
| 112 | WARN_ON_ONCE(ref->confirm_switch); |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 113 | free_percpu(percpu_count); |
Tejun Heo | 27344a9 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 114 | ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 115 | } |
| 116 | } |
Tejun Heo | 9a1049d | 2014-06-28 08:10:14 -0400 | [diff] [blame] | 117 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
Tejun Heo | bc497bd | 2013-06-12 20:52:35 -0700 | [diff] [blame] | 118 | |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 119 | static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) |
| 120 | { |
| 121 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
| 122 | |
| 123 | ref->confirm_switch(ref); |
| 124 | ref->confirm_switch = NULL; |
| 125 | wake_up_all(&percpu_ref_switch_waitq); |
| 126 | |
Roman Gushchin | 7d9ab9b | 2019-05-07 10:01:50 -0700 | [diff] [blame] | 127 | if (!ref->allow_reinit) |
| 128 | percpu_ref_exit(ref); |
| 129 | |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 130 | /* drop ref from percpu_ref_switch_to_atomic() */ |
| 131 | percpu_ref_put(ref); |
| 132 | } |
| 133 | |
| 134 | static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 135 | { |
| 136 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 137 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 138 | unsigned long count = 0; |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 139 | int cpu; |
| 140 | |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 141 | for_each_possible_cpu(cpu) |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 142 | count += *per_cpu_ptr(percpu_count, cpu); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 143 | |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 144 | pr_debug("global %ld percpu %ld", |
Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 145 | atomic_long_read(&ref->count), (long)count); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 146 | |
| 147 | /* |
| 148 | * It's crucial that we sum the percpu counters _before_ adding the sum |
| 149 | * to &ref->count; since gets could be happening on one cpu while puts |
| 150 | * happen on another, adding a single cpu's count could cause |
| 151 | * @ref->count to hit 0 before we've got a consistent value - but the |
| 152 | * sum of all the counts will be consistent and correct. |
| 153 | * |
| 154 | * Subtracting the bias value then has to happen _after_ adding count to |
| 155 | * &ref->count; we need the bias value to prevent &ref->count from |
| 156 | * reaching 0 before we add the percpu counts. But doing it at the same |
| 157 | * time is equivalent and saves us atomic operations: |
| 158 | */ |
Tejun Heo | eecc16b | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 159 | atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 160 | |
Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 161 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 162 | "percpu ref (%ps) <= 0 (%ld) after switching to atomic", |
Tejun Heo | e625305 | 2014-09-20 01:27:25 -0400 | [diff] [blame] | 163 | ref->release, atomic_long_read(&ref->count)); |
Kent Overstreet | 687b0ad | 2014-01-06 13:13:26 -0800 | [diff] [blame] | 164 | |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 165 | /* @ref is viewed as dead on all CPUs, send out switch confirmation */ |
| 166 | percpu_ref_call_confirm_rcu(rcu); |
| 167 | } |
Tejun Heo | dbece3a | 2013-06-13 19:23:53 -0700 | [diff] [blame] | 168 | |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 169 | static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) |
| 170 | { |
| 171 | } |
| 172 | |
| 173 | static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, |
| 174 | percpu_ref_func_t *confirm_switch) |
| 175 | { |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 176 | if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { |
Tejun Heo | 1880835 | 2015-09-29 17:47:18 -0400 | [diff] [blame] | 177 | if (confirm_switch) |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 178 | confirm_switch(ref); |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 179 | return; |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 180 | } |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 181 | |
| 182 | /* switching from percpu to atomic */ |
| 183 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; |
| 184 | |
| 185 | /* |
| 186 | * Non-NULL ->confirm_switch is used to indicate that switching is |
| 187 | * in progress. Use noop one if unspecified. |
| 188 | */ |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 189 | ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; |
| 190 | |
| 191 | percpu_ref_get(ref); /* put after confirmation */ |
Paul E. McKenney | 36bd1a8 | 2018-11-06 19:22:23 -0800 | [diff] [blame] | 192 | call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu); |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) |
| 196 | { |
| 197 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
| 198 | int cpu; |
| 199 | |
| 200 | BUG_ON(!percpu_count); |
| 201 | |
| 202 | if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) |
| 203 | return; |
| 204 | |
Roman Gushchin | 7d9ab9b | 2019-05-07 10:01:50 -0700 | [diff] [blame] | 205 | if (WARN_ON_ONCE(!ref->allow_reinit)) |
| 206 | return; |
| 207 | |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 208 | atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); |
| 209 | |
| 210 | /* |
Paul E. McKenney | b393e8b | 2017-10-09 10:20:44 -0700 | [diff] [blame] | 211 | * Restore per-cpu operation. smp_store_release() is paired |
| 212 | * with READ_ONCE() in __ref_is_percpu() and guarantees that the |
| 213 | * zeroing is visible to all percpu accesses which can see the |
| 214 | * following __PERCPU_REF_ATOMIC clearing. |
Tejun Heo | b2302c7 | 2015-09-29 17:47:17 -0400 | [diff] [blame] | 215 | */ |
| 216 | for_each_possible_cpu(cpu) |
| 217 | *per_cpu_ptr(percpu_count, cpu) = 0; |
| 218 | |
| 219 | smp_store_release(&ref->percpu_count_ptr, |
| 220 | ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 223 | static void __percpu_ref_switch_mode(struct percpu_ref *ref, |
| 224 | percpu_ref_func_t *confirm_switch) |
| 225 | { |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 226 | lockdep_assert_held(&percpu_ref_switch_lock); |
| 227 | |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 228 | /* |
| 229 | * If the previous ATOMIC switching hasn't finished yet, wait for |
| 230 | * its completion. If the caller ensures that ATOMIC switching |
| 231 | * isn't in progress, this function can be called from any context. |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 232 | */ |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 233 | wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, |
| 234 | percpu_ref_switch_lock); |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 235 | |
| 236 | if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) |
| 237 | __percpu_ref_switch_to_atomic(ref, confirm_switch); |
| 238 | else |
| 239 | __percpu_ref_switch_to_percpu(ref); |
| 240 | } |
| 241 | |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 242 | /** |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 243 | * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode |
| 244 | * @ref: percpu_ref to switch to atomic mode |
| 245 | * @confirm_switch: optional confirmation callback |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 246 | * |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 247 | * There's no reason to use this function for the usual reference counting. |
| 248 | * Use percpu_ref_kill[_and_confirm](). |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 249 | * |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 250 | * Schedule switching of @ref to atomic mode. All its percpu counts will |
| 251 | * be collected to the main atomic counter. On completion, when all CPUs |
| 252 | * are guaraneed to be in atomic mode, @confirm_switch, which may not |
| 253 | * block, is invoked. This function may be invoked concurrently with all |
| 254 | * the get/put operations and can safely be mixed with kill and reinit |
Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 255 | * operations. Note that @ref will stay in atomic mode across kill/reinit |
| 256 | * cycles until percpu_ref_switch_to_percpu() is called. |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 257 | * |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 258 | * This function may block if @ref is in the process of switching to atomic |
| 259 | * mode. If the caller ensures that @ref is not in the process of |
| 260 | * switching to atomic mode, this function can be called from any context. |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 261 | */ |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 262 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, |
| 263 | percpu_ref_func_t *confirm_switch) |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 264 | { |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 265 | unsigned long flags; |
| 266 | |
| 267 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
| 268 | |
Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 269 | ref->force_atomic = true; |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 270 | __percpu_ref_switch_mode(ref, confirm_switch); |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 271 | |
| 272 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
Kent Overstreet | 215e262 | 2013-05-31 15:26:45 -0700 | [diff] [blame] | 273 | } |
NeilBrown | 210f7cd | 2017-03-15 14:05:14 +1100 | [diff] [blame] | 274 | EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic); |
| 275 | |
| 276 | /** |
| 277 | * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode |
| 278 | * @ref: percpu_ref to switch to atomic mode |
| 279 | * |
| 280 | * Schedule switching the ref to atomic mode, and wait for the |
| 281 | * switch to complete. Caller must ensure that no other thread |
| 282 | * will switch back to percpu mode. |
| 283 | */ |
| 284 | void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref) |
| 285 | { |
| 286 | percpu_ref_switch_to_atomic(ref, NULL); |
| 287 | wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); |
| 288 | } |
| 289 | EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync); |
Tejun Heo | a223737 | 2014-09-24 13:31:48 -0400 | [diff] [blame] | 290 | |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 291 | /** |
| 292 | * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode |
| 293 | * @ref: percpu_ref to switch to percpu mode |
| 294 | * |
| 295 | * There's no reason to use this function for the usual reference counting. |
| 296 | * To re-use an expired ref, use percpu_ref_reinit(). |
| 297 | * |
| 298 | * Switch @ref to percpu mode. This function may be invoked concurrently |
| 299 | * with all the get/put operations and can safely be mixed with kill and |
Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 300 | * reinit operations. This function reverses the sticky atomic state set |
| 301 | * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is |
| 302 | * dying or dead, the actual switching takes place on the following |
| 303 | * percpu_ref_reinit(). |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 304 | * |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 305 | * This function may block if @ref is in the process of switching to atomic |
| 306 | * mode. If the caller ensures that @ref is not in the process of |
| 307 | * switching to atomic mode, this function can be called from any context. |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 308 | */ |
| 309 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref) |
| 310 | { |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 311 | unsigned long flags; |
| 312 | |
| 313 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
| 314 | |
Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 315 | ref->force_atomic = false; |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 316 | __percpu_ref_switch_mode(ref, NULL); |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 317 | |
| 318 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 319 | } |
NeilBrown | 210f7cd | 2017-03-15 14:05:14 +1100 | [diff] [blame] | 320 | EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu); |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 321 | |
| 322 | /** |
| 323 | * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation |
| 324 | * @ref: percpu_ref to kill |
| 325 | * @confirm_kill: optional confirmation callback |
| 326 | * |
| 327 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if |
| 328 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be |
| 329 | * called after @ref is seen as dead from all CPUs at which point all |
| 330 | * further invocations of percpu_ref_tryget_live() will fail. See |
| 331 | * percpu_ref_tryget_live() for details. |
| 332 | * |
| 333 | * This function normally doesn't block and can be called from any context |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 334 | * but it may block if @confirm_kill is specified and @ref is in the |
Tejun Heo | a2f5630 | 2015-09-29 17:47:16 -0400 | [diff] [blame] | 335 | * process of switching to atomic mode by percpu_ref_switch_to_atomic(). |
Tejun Heo | b3a5d11 | 2018-03-14 12:45:12 -0700 | [diff] [blame] | 336 | * |
| 337 | * There are no implied RCU grace periods between kill and release. |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 338 | */ |
| 339 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
| 340 | percpu_ref_func_t *confirm_kill) |
| 341 | { |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 342 | unsigned long flags; |
| 343 | |
| 344 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
| 345 | |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 346 | WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 347 | "%s called more than once on %ps!", __func__, ref->release); |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 348 | |
| 349 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 350 | __percpu_ref_switch_mode(ref, confirm_kill); |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 351 | percpu_ref_put(ref); |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 352 | |
| 353 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
Tejun Heo | 490c79a | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 354 | } |
| 355 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 356 | |
| 357 | /** |
| 358 | * percpu_ref_reinit - re-initialize a percpu refcount |
| 359 | * @ref: perpcu_ref to re-initialize |
| 360 | * |
| 361 | * Re-initialize @ref so that it's in the same state as when it finished |
Tejun Heo | 1cae13e | 2014-09-24 13:31:50 -0400 | [diff] [blame] | 362 | * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been |
| 363 | * initialized successfully and reached 0 but not exited. |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 364 | * |
| 365 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while |
| 366 | * this function is in progress. |
| 367 | */ |
| 368 | void percpu_ref_reinit(struct percpu_ref *ref) |
| 369 | { |
Bart Van Assche | 18c9a6b | 2018-09-26 14:01:07 -0700 | [diff] [blame] | 370 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); |
| 371 | |
| 372 | percpu_ref_resurrect(ref); |
| 373 | } |
| 374 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); |
| 375 | |
| 376 | /** |
| 377 | * percpu_ref_resurrect - modify a percpu refcount from dead to live |
| 378 | * @ref: perpcu_ref to resurrect |
| 379 | * |
| 380 | * Modify @ref so that it's in the same state as before percpu_ref_kill() was |
| 381 | * called. @ref must be dead but must not yet have exited. |
| 382 | * |
| 383 | * If @ref->release() frees @ref then the caller is responsible for |
| 384 | * guaranteeing that @ref->release() does not get called while this |
| 385 | * function is in progress. |
| 386 | * |
| 387 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while |
| 388 | * this function is in progress. |
| 389 | */ |
| 390 | void percpu_ref_resurrect(struct percpu_ref *ref) |
| 391 | { |
| 392 | unsigned long __percpu *percpu_count; |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 393 | unsigned long flags; |
| 394 | |
| 395 | spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
| 396 | |
Bart Van Assche | 18c9a6b | 2018-09-26 14:01:07 -0700 | [diff] [blame] | 397 | WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)); |
| 398 | WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count)); |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 399 | |
| 400 | ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; |
| 401 | percpu_ref_get(ref); |
Tejun Heo | 3f49bdd | 2015-09-29 17:47:19 -0400 | [diff] [blame] | 402 | __percpu_ref_switch_mode(ref, NULL); |
Tejun Heo | 33e465c | 2015-09-29 17:47:20 -0400 | [diff] [blame] | 403 | |
| 404 | spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
Tejun Heo | f47ad45 | 2014-09-24 13:31:49 -0400 | [diff] [blame] | 405 | } |
Bart Van Assche | 18c9a6b | 2018-09-26 14:01:07 -0700 | [diff] [blame] | 406 | EXPORT_SYMBOL_GPL(percpu_ref_resurrect); |