blob: 22d9d183950dc2e17beca9debd89d665cd08b7b6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Kent Overstreet215e2622013-05-31 15:26:45 -07002/*
3 * Percpu refcounts:
4 * (C) 2012 Google, Inc.
5 * Author: Kent Overstreet <koverstreet@google.com>
6 *
7 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
8 * atomic_dec_and_test() - but percpu.
9 *
10 * There's one important difference between percpu refs and normal atomic_t
11 * refcounts; you have to keep track of your initial refcount, and then when you
12 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
13 * refcount.
14 *
15 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
16 * than an atomic_t - this is because of the way shutdown works, see
Tejun Heoeecc16b2014-09-24 13:31:48 -040017 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
Kent Overstreet215e2622013-05-31 15:26:45 -070018 *
19 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
20 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
21 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
22 * issuing the appropriate barriers, and then marks the ref as shutting down so
23 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
24 * it's safe to drop the initial ref.
25 *
26 * USAGE:
27 *
28 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
29 * is created when userspaces calls io_setup(), and destroyed when userspace
30 * calls io_destroy() or the process exits.
31 *
32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
Tejun Heob3a5d112018-03-14 12:45:12 -070033 * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
34 * After that, there can't be any new users of the kioctx (from lookup_ioctx())
35 * and it's then safe to drop the initial ref with percpu_ref_put().
36 *
37 * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
38 * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
39 * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
40 * with RCU protection, it must be done explicitly.
Kent Overstreet215e2622013-05-31 15:26:45 -070041 *
42 * Code that does a two stage shutdown like this often needs some kind of
43 * explicit synchronization to ensure the initial refcount can only be dropped
44 * once - percpu_ref_kill() does this for you, it returns true once and false if
45 * someone else already called it. The aio code uses it this way, but it's not
46 * necessary if the code has some other mechanism to synchronize teardown.
47 * around.
48 */
49
50#ifndef _LINUX_PERCPU_REFCOUNT_H
51#define _LINUX_PERCPU_REFCOUNT_H
52
53#include <linux/atomic.h>
54#include <linux/kernel.h>
55#include <linux/percpu.h>
56#include <linux/rcupdate.h>
Tejun Heoa34375e2014-09-08 09:51:30 +090057#include <linux/gfp.h>
Kent Overstreet215e2622013-05-31 15:26:45 -070058
59struct percpu_ref;
Tejun Heoac899062013-06-12 20:43:06 -070060typedef void (percpu_ref_func_t)(struct percpu_ref *);
Kent Overstreet215e2622013-05-31 15:26:45 -070061
Tejun Heo9e804d12014-09-24 13:31:48 -040062/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
63enum {
64 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
Tejun Heo27344a92014-09-24 13:31:49 -040065 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
66 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
67
68 __PERCPU_REF_FLAG_BITS = 2,
Tejun Heo9e804d12014-09-24 13:31:48 -040069};
70
Tejun Heo2aad2a82014-09-24 13:31:50 -040071/* @flags for percpu_ref_init() */
72enum {
73 /*
74 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
Tejun Heo1cae13e2014-09-24 13:31:50 -040075 * operation using percpu_ref_switch_to_percpu(). If initialized
76 * with this flag, the ref will stay in atomic mode until
77 * percpu_ref_switch_to_percpu() is invoked on it.
Roman Gushchin09ed79d62019-05-07 10:01:47 -070078 * Implies ALLOW_REINIT.
Tejun Heo2aad2a82014-09-24 13:31:50 -040079 */
80 PERCPU_REF_INIT_ATOMIC = 1 << 0,
81
82 /*
83 * Start dead w/ ref == 0 in atomic mode. Must be revived with
Roman Gushchin09ed79d62019-05-07 10:01:47 -070084 * percpu_ref_reinit() before used. Implies INIT_ATOMIC and
85 * ALLOW_REINIT.
Tejun Heo2aad2a82014-09-24 13:31:50 -040086 */
87 PERCPU_REF_INIT_DEAD = 1 << 1,
Roman Gushchin09ed79d62019-05-07 10:01:47 -070088
89 /*
90 * Allow switching from atomic mode to percpu mode.
91 */
92 PERCPU_REF_ALLOW_REINIT = 1 << 2,
Tejun Heo2aad2a82014-09-24 13:31:50 -040093};
94
Kent Overstreet215e2622013-05-31 15:26:45 -070095struct percpu_ref {
Tejun Heoe6253052014-09-20 01:27:25 -040096 atomic_long_t count;
Kent Overstreet215e2622013-05-31 15:26:45 -070097 /*
98 * The low bit of the pointer indicates whether the ref is in percpu
Tejun Heo9a1049d2014-06-28 08:10:14 -040099 * mode; if set, then get/put will manipulate the atomic_t.
Kent Overstreet215e2622013-05-31 15:26:45 -0700100 */
Tejun Heoeecc16b2014-09-24 13:31:48 -0400101 unsigned long percpu_count_ptr;
Tejun Heoac899062013-06-12 20:43:06 -0700102 percpu_ref_func_t *release;
Tejun Heo9e804d12014-09-24 13:31:48 -0400103 percpu_ref_func_t *confirm_switch;
Tejun Heo1cae13e2014-09-24 13:31:50 -0400104 bool force_atomic:1;
Roman Gushchin7d9ab9b2019-05-07 10:01:50 -0700105 bool allow_reinit:1;
Kent Overstreet215e2622013-05-31 15:26:45 -0700106 struct rcu_head rcu;
107};
108
Tejun Heoacac7882013-06-12 20:52:01 -0700109int __must_check percpu_ref_init(struct percpu_ref *ref,
Tejun Heo2aad2a82014-09-24 13:31:50 -0400110 percpu_ref_func_t *release, unsigned int flags,
111 gfp_t gfp);
Tejun Heo9a1049d2014-06-28 08:10:14 -0400112void percpu_ref_exit(struct percpu_ref *ref);
Tejun Heo490c79a2014-09-24 13:31:49 -0400113void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
114 percpu_ref_func_t *confirm_switch);
NeilBrown210f7cd2017-03-15 14:05:14 +1100115void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
Tejun Heof47ad452014-09-24 13:31:49 -0400116void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
Tejun Heodbece3a2013-06-13 19:23:53 -0700117void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
118 percpu_ref_func_t *confirm_kill);
Bart Van Assche18c9a6b2018-09-26 14:01:07 -0700119void percpu_ref_resurrect(struct percpu_ref *ref);
Tejun Heof47ad452014-09-24 13:31:49 -0400120void percpu_ref_reinit(struct percpu_ref *ref);
Tejun Heodbece3a2013-06-13 19:23:53 -0700121
122/**
123 * percpu_ref_kill - drop the initial ref
124 * @ref: percpu_ref to kill
125 *
126 * Must be used to drop the initial ref on a percpu refcount; must be called
127 * precisely once before shutdown.
128 *
Tejun Heob3a5d112018-03-14 12:45:12 -0700129 * Switches @ref into atomic mode before gathering up the percpu counters
130 * and dropping the initial ref.
131 *
132 * There are no implied RCU grace periods between kill and release.
Tejun Heodbece3a2013-06-13 19:23:53 -0700133 */
134static inline void percpu_ref_kill(struct percpu_ref *ref)
135{
Guillaume Gomez4d414262015-09-23 12:34:30 +0200136 percpu_ref_kill_and_confirm(ref, NULL);
Tejun Heodbece3a2013-06-13 19:23:53 -0700137}
Kent Overstreet215e2622013-05-31 15:26:45 -0700138
Tejun Heoeae79752014-06-28 08:10:13 -0400139/*
140 * Internal helper. Don't use outside percpu-refcount proper. The
141 * function doesn't return the pointer and let the caller test it for NULL
142 * because doing so forces the compiler to generate two conditional
Tejun Heoeecc16b2014-09-24 13:31:48 -0400143 * branches as it can't assume that @ref->percpu_count is not NULL.
Tejun Heoeae79752014-06-28 08:10:13 -0400144 */
Tejun Heo9e804d12014-09-24 13:31:48 -0400145static inline bool __ref_is_percpu(struct percpu_ref *ref,
146 unsigned long __percpu **percpu_countp)
Tejun Heoeae79752014-06-28 08:10:13 -0400147{
Tejun Heo6810e4a2015-01-06 10:26:10 -0500148 unsigned long percpu_ptr;
149
150 /*
151 * The value of @ref->percpu_count_ptr is tested for
152 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
153 * used as a pointer. If the compiler generates a separate fetch
154 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
155 * between contaminating the pointer value, meaning that
Tejun Heoed8ebd12016-05-25 16:11:57 -0400156 * READ_ONCE() is required when fetching it.
Paul E. McKenneyb393e8b2017-10-09 10:20:44 -0700157 *
158 * The smp_read_barrier_depends() implied by READ_ONCE() pairs
159 * with smp_store_release() in __percpu_ref_switch_to_percpu().
Tejun Heo6810e4a2015-01-06 10:26:10 -0500160 */
Tejun Heoed8ebd12016-05-25 16:11:57 -0400161 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
162
Tejun Heo4aab3b52014-11-22 09:22:42 -0500163 /*
164 * Theoretically, the following could test just ATOMIC; however,
165 * then we'd have to mask off DEAD separately as DEAD may be
166 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
167 * implies ATOMIC anyway. Test them together.
168 */
169 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
Tejun Heoeae79752014-06-28 08:10:13 -0400170 return false;
171
Tejun Heoeecc16b2014-09-24 13:31:48 -0400172 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
Tejun Heoeae79752014-06-28 08:10:13 -0400173 return true;
174}
Kent Overstreet215e2622013-05-31 15:26:45 -0700175
176/**
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800177 * percpu_ref_get_many - increment a percpu refcount
178 * @ref: percpu_ref to get
179 * @nr: number of references to get
180 *
181 * Analogous to atomic_long_add().
182 *
183 * This function is safe to call as long as @ref is between init and exit.
184 */
185static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
186{
187 unsigned long __percpu *percpu_count;
188
Sebastian Andrzej Siewior9e8d42a2019-11-08 18:35:53 +0100189 rcu_read_lock();
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800190
191 if (__ref_is_percpu(ref, &percpu_count))
192 this_cpu_add(*percpu_count, nr);
193 else
194 atomic_long_add(nr, &ref->count);
195
Sebastian Andrzej Siewior9e8d42a2019-11-08 18:35:53 +0100196 rcu_read_unlock();
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800197}
198
199/**
Kent Overstreet215e2622013-05-31 15:26:45 -0700200 * percpu_ref_get - increment a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -0700201 * @ref: percpu_ref to get
Kent Overstreet215e2622013-05-31 15:26:45 -0700202 *
Tejun Heo6251f992014-09-24 13:31:48 -0400203 * Analagous to atomic_long_inc().
204 *
205 * This function is safe to call as long as @ref is between init and exit.
206 */
Kent Overstreet215e2622013-05-31 15:26:45 -0700207static inline void percpu_ref_get(struct percpu_ref *ref)
208{
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800209 percpu_ref_get_many(ref, 1);
Kent Overstreet215e2622013-05-31 15:26:45 -0700210}
211
212/**
Pavel Begunkov4e5ef022019-12-28 14:13:02 +0300213 * percpu_ref_tryget_many - try to increment a percpu refcount
214 * @ref: percpu_ref to try-get
215 * @nr: number of references to get
216 *
217 * Increment a percpu refcount by @nr unless its count already reached zero.
218 * Returns %true on success; %false on failure.
219 *
220 * This function is safe to call as long as @ref is between init and exit.
221 */
222static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
223 unsigned long nr)
224{
225 unsigned long __percpu *percpu_count;
226 bool ret;
227
228 rcu_read_lock();
229
230 if (__ref_is_percpu(ref, &percpu_count)) {
231 this_cpu_add(*percpu_count, nr);
232 ret = true;
233 } else {
234 ret = atomic_long_add_unless(&ref->count, nr, 0);
235 }
236
237 rcu_read_unlock();
238
239 return ret;
240}
241
242/**
Tejun Heo4fb6e252014-05-09 15:11:53 -0400243 * percpu_ref_tryget - try to increment a percpu refcount
244 * @ref: percpu_ref to try-get
245 *
246 * Increment a percpu refcount unless its count already reached zero.
247 * Returns %true on success; %false on failure.
248 *
Tejun Heo6251f992014-09-24 13:31:48 -0400249 * This function is safe to call as long as @ref is between init and exit.
Tejun Heo4fb6e252014-05-09 15:11:53 -0400250 */
251static inline bool percpu_ref_tryget(struct percpu_ref *ref)
252{
Pavel Begunkov4e5ef022019-12-28 14:13:02 +0300253 return percpu_ref_tryget_many(ref, 1);
Tejun Heo4fb6e252014-05-09 15:11:53 -0400254}
255
256/**
Tejun Heo2070d502014-05-09 15:11:53 -0400257 * percpu_ref_tryget_live - try to increment a live percpu refcount
Tejun Heodbece3a2013-06-13 19:23:53 -0700258 * @ref: percpu_ref to try-get
259 *
260 * Increment a percpu refcount unless it has already been killed. Returns
261 * %true on success; %false on failure.
262 *
Tejun Heo6251f992014-09-24 13:31:48 -0400263 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
264 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
265 * should be used. After the confirm_kill callback is invoked, it's
266 * guaranteed that no new reference will be given out by
267 * percpu_ref_tryget_live().
Tejun Heo4fb6e252014-05-09 15:11:53 -0400268 *
Tejun Heo6251f992014-09-24 13:31:48 -0400269 * This function is safe to call as long as @ref is between init and exit.
Tejun Heodbece3a2013-06-13 19:23:53 -0700270 */
Tejun Heo2070d502014-05-09 15:11:53 -0400271static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
Tejun Heodbece3a2013-06-13 19:23:53 -0700272{
Tejun Heoeecc16b2014-09-24 13:31:48 -0400273 unsigned long __percpu *percpu_count;
Douglas Miller966d2b02017-01-28 06:42:20 -0600274 bool ret = false;
Tejun Heodbece3a2013-06-13 19:23:53 -0700275
Sebastian Andrzej Siewior9e8d42a2019-11-08 18:35:53 +0100276 rcu_read_lock();
Tejun Heodbece3a2013-06-13 19:23:53 -0700277
Tejun Heo9e804d12014-09-24 13:31:48 -0400278 if (__ref_is_percpu(ref, &percpu_count)) {
Tejun Heoeecc16b2014-09-24 13:31:48 -0400279 this_cpu_inc(*percpu_count);
Tejun Heodbece3a2013-06-13 19:23:53 -0700280 ret = true;
Tejun Heo6810e4a2015-01-06 10:26:10 -0500281 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
Tejun Heo490c79a2014-09-24 13:31:49 -0400282 ret = atomic_long_inc_not_zero(&ref->count);
Tejun Heodbece3a2013-06-13 19:23:53 -0700283 }
284
Sebastian Andrzej Siewior9e8d42a2019-11-08 18:35:53 +0100285 rcu_read_unlock();
Tejun Heodbece3a2013-06-13 19:23:53 -0700286
287 return ret;
288}
289
290/**
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800291 * percpu_ref_put_many - decrement a percpu refcount
292 * @ref: percpu_ref to put
293 * @nr: number of references to put
294 *
295 * Decrement the refcount, and if 0, call the release function (which was passed
296 * to percpu_ref_init())
297 *
298 * This function is safe to call as long as @ref is between init and exit.
299 */
300static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
301{
302 unsigned long __percpu *percpu_count;
303
Sebastian Andrzej Siewior9e8d42a2019-11-08 18:35:53 +0100304 rcu_read_lock();
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800305
306 if (__ref_is_percpu(ref, &percpu_count))
307 this_cpu_sub(*percpu_count, nr);
308 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
309 ref->release(ref);
310
Sebastian Andrzej Siewior9e8d42a2019-11-08 18:35:53 +0100311 rcu_read_unlock();
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800312}
313
314/**
Kent Overstreet215e2622013-05-31 15:26:45 -0700315 * percpu_ref_put - decrement a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -0700316 * @ref: percpu_ref to put
Kent Overstreet215e2622013-05-31 15:26:45 -0700317 *
318 * Decrement the refcount, and if 0, call the release function (which was passed
319 * to percpu_ref_init())
Tejun Heo6251f992014-09-24 13:31:48 -0400320 *
321 * This function is safe to call as long as @ref is between init and exit.
Kent Overstreet215e2622013-05-31 15:26:45 -0700322 */
323static inline void percpu_ref_put(struct percpu_ref *ref)
324{
Johannes Weinere8ea14c2014-12-10 15:42:42 -0800325 percpu_ref_put_many(ref, 1);
Kent Overstreet215e2622013-05-31 15:26:45 -0700326}
327
Tejun Heo2d722782014-06-28 08:10:14 -0400328/**
Tejun Heo4c907ba2015-01-06 10:26:10 -0500329 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
330 * @ref: percpu_ref to test
331 *
332 * Returns %true if @ref is dying or dead.
333 *
334 * This function is safe to call as long as @ref is between init and exit
335 * and the caller is responsible for synchronizing against state changes.
336 */
337static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
338{
339 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
340}
341
342/**
Tejun Heo2d722782014-06-28 08:10:14 -0400343 * percpu_ref_is_zero - test whether a percpu refcount reached zero
344 * @ref: percpu_ref to test
345 *
346 * Returns %true if @ref reached zero.
Tejun Heo6251f992014-09-24 13:31:48 -0400347 *
348 * This function is safe to call as long as @ref is between init and exit.
Tejun Heo2d722782014-06-28 08:10:14 -0400349 */
350static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
351{
Tejun Heoeecc16b2014-09-24 13:31:48 -0400352 unsigned long __percpu *percpu_count;
Tejun Heo2d722782014-06-28 08:10:14 -0400353
Tejun Heo9e804d12014-09-24 13:31:48 -0400354 if (__ref_is_percpu(ref, &percpu_count))
Tejun Heo2d722782014-06-28 08:10:14 -0400355 return false;
Tejun Heoe6253052014-09-20 01:27:25 -0400356 return !atomic_long_read(&ref->count);
Tejun Heo2d722782014-06-28 08:10:14 -0400357}
358
Kent Overstreet215e2622013-05-31 15:26:45 -0700359#endif