blob: ac4299120087e3ff25c9cc707f4c175dccfb2da5 [file] [log] [blame]
Kent Overstreet215e2622013-05-31 15:26:45 -07001#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2
3#include <linux/kernel.h>
4#include <linux/percpu-refcount.h>
5
6/*
7 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
8 * don't try to detect the ref hitting 0 - which means that get/put can just
9 * increment or decrement the local counter. Note that the counter on a
10 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
11 * percpu counters will all sum to the correct value
12 *
13 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and
15 * puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow).
17 *
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect
19 * the ref hitting 0 on every put - this would require global synchronization
20 * and defeat the whole purpose of using percpu refs.
21 *
22 * What we do is require the user to keep track of the initial refcount; we know
23 * the ref can't hit 0 before the user drops the initial ref, so as long as we
24 * convert to non percpu mode before the initial ref is dropped everything
25 * works.
26 *
27 * Converting to non percpu mode is done with some RCUish stuff in
28 * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t
29 * can't hit 0 before we've added up all the percpu refs.
30 */
31
32#define PCPU_COUNT_BIAS (1U << 31)
33
Tejun Heoeae79752014-06-28 08:10:13 -040034static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
35{
Tejun Heo7d742072014-06-28 08:10:13 -040036 return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
Tejun Heoeae79752014-06-28 08:10:13 -040037}
38
Kent Overstreet215e2622013-05-31 15:26:45 -070039/**
40 * percpu_ref_init - initialize a percpu refcount
Tejun Heoac899062013-06-12 20:43:06 -070041 * @ref: percpu_ref to initialize
42 * @release: function which will be called when refcount hits 0
Kent Overstreet215e2622013-05-31 15:26:45 -070043 *
44 * Initializes the refcount in single atomic counter mode with a refcount of 1;
45 * analagous to atomic_set(ref, 1).
46 *
47 * Note that @release must not sleep - it may potentially be called from RCU
48 * callback context by percpu_ref_kill().
49 */
Tejun Heoac899062013-06-12 20:43:06 -070050int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
Kent Overstreet215e2622013-05-31 15:26:45 -070051{
52 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
53
Tejun Heo7d742072014-06-28 08:10:13 -040054 ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
55 if (!ref->pcpu_count_ptr)
Kent Overstreet215e2622013-05-31 15:26:45 -070056 return -ENOMEM;
57
58 ref->release = release;
59 return 0;
60}
Matias Bjorling5e9dd372013-10-16 13:47:01 -070061EXPORT_SYMBOL_GPL(percpu_ref_init);
Kent Overstreet215e2622013-05-31 15:26:45 -070062
Tejun Heobc497bd2013-06-12 20:52:35 -070063/**
Tejun Heo9a1049d2014-06-28 08:10:14 -040064 * percpu_ref_exit - undo percpu_ref_init()
65 * @ref: percpu_ref to exit
Tejun Heobc497bd2013-06-12 20:52:35 -070066 *
Tejun Heo9a1049d2014-06-28 08:10:14 -040067 * This function exits @ref. The caller is responsible for ensuring that
68 * @ref is no longer in active use. The usual places to invoke this
69 * function from are the @ref->release() callback or in init failure path
70 * where percpu_ref_init() succeeded but other parts of the initialization
71 * of the embedding object failed.
Tejun Heobc497bd2013-06-12 20:52:35 -070072 */
Tejun Heo9a1049d2014-06-28 08:10:14 -040073void percpu_ref_exit(struct percpu_ref *ref)
Tejun Heobc497bd2013-06-12 20:52:35 -070074{
Tejun Heoeae79752014-06-28 08:10:13 -040075 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
Tejun Heobc497bd2013-06-12 20:52:35 -070076
77 if (pcpu_count) {
Tejun Heoeae79752014-06-28 08:10:13 -040078 free_percpu(pcpu_count);
Tejun Heo9a1049d2014-06-28 08:10:14 -040079 ref->pcpu_count_ptr = PCPU_REF_DEAD;
Tejun Heobc497bd2013-06-12 20:52:35 -070080 }
81}
Tejun Heo9a1049d2014-06-28 08:10:14 -040082EXPORT_SYMBOL_GPL(percpu_ref_exit);
Tejun Heobc497bd2013-06-12 20:52:35 -070083
Kent Overstreet215e2622013-05-31 15:26:45 -070084static void percpu_ref_kill_rcu(struct rcu_head *rcu)
85{
86 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
Tejun Heoeae79752014-06-28 08:10:13 -040087 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
Kent Overstreet215e2622013-05-31 15:26:45 -070088 unsigned count = 0;
89 int cpu;
90
Kent Overstreet215e2622013-05-31 15:26:45 -070091 for_each_possible_cpu(cpu)
92 count += *per_cpu_ptr(pcpu_count, cpu);
93
Kent Overstreet215e2622013-05-31 15:26:45 -070094 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
95
96 /*
97 * It's crucial that we sum the percpu counters _before_ adding the sum
98 * to &ref->count; since gets could be happening on one cpu while puts
99 * happen on another, adding a single cpu's count could cause
100 * @ref->count to hit 0 before we've got a consistent value - but the
101 * sum of all the counts will be consistent and correct.
102 *
103 * Subtracting the bias value then has to happen _after_ adding count to
104 * &ref->count; we need the bias value to prevent &ref->count from
105 * reaching 0 before we add the percpu counts. But doing it at the same
106 * time is equivalent and saves us atomic operations:
107 */
108
109 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count);
110
Kent Overstreet687b0ad2014-01-06 13:13:26 -0800111 WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)",
112 atomic_read(&ref->count));
113
Tejun Heodbece3a2013-06-13 19:23:53 -0700114 /* @ref is viewed as dead on all CPUs, send out kill confirmation */
115 if (ref->confirm_kill)
116 ref->confirm_kill(ref);
117
Kent Overstreet215e2622013-05-31 15:26:45 -0700118 /*
119 * Now we're in single atomic_t mode with a consistent refcount, so it's
120 * safe to drop our initial ref:
121 */
122 percpu_ref_put(ref);
123}
124
125/**
Tejun Heodbece3a2013-06-13 19:23:53 -0700126 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
Tejun Heoac899062013-06-12 20:43:06 -0700127 * @ref: percpu_ref to kill
Tejun Heodbece3a2013-06-13 19:23:53 -0700128 * @confirm_kill: optional confirmation callback
Kent Overstreet215e2622013-05-31 15:26:45 -0700129 *
Tejun Heodbece3a2013-06-13 19:23:53 -0700130 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
131 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
132 * called after @ref is seen as dead from all CPUs - all further
133 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget()
134 * for more details.
Kent Overstreet215e2622013-05-31 15:26:45 -0700135 *
Tejun Heodbece3a2013-06-13 19:23:53 -0700136 * Due to the way percpu_ref is implemented, @confirm_kill will be called
137 * after at least one full RCU grace period has passed but this is an
138 * implementation detail and callers must not depend on it.
Kent Overstreet215e2622013-05-31 15:26:45 -0700139 */
Tejun Heodbece3a2013-06-13 19:23:53 -0700140void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
141 percpu_ref_func_t *confirm_kill)
Kent Overstreet215e2622013-05-31 15:26:45 -0700142{
Tejun Heo7d742072014-06-28 08:10:13 -0400143 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
Kent Overstreetc1ae6e92013-06-03 16:02:29 -0700144 "percpu_ref_kill() called more than once!\n");
Kent Overstreet215e2622013-05-31 15:26:45 -0700145
Tejun Heo7d742072014-06-28 08:10:13 -0400146 ref->pcpu_count_ptr |= PCPU_REF_DEAD;
Tejun Heodbece3a2013-06-13 19:23:53 -0700147 ref->confirm_kill = confirm_kill;
Kent Overstreet215e2622013-05-31 15:26:45 -0700148
Tejun Heoa4244452013-06-16 16:12:26 -0700149 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
Kent Overstreet215e2622013-05-31 15:26:45 -0700150}
Matias Bjorling5e9dd372013-10-16 13:47:01 -0700151EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);