blob: a207a8f22b3ca35890671e51c480266d89e4d8d6 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01002/*
Will Deaconfb041bb2019-11-21 11:59:00 +00003 * Out-of-line refcount functions.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01004 */
5
Alexey Dobriyan75a040f2018-04-01 01:00:36 +03006#include <linux/mutex.h>
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01007#include <linux/refcount.h>
Alexey Dobriyan75a040f2018-04-01 01:00:36 +03008#include <linux/spinlock.h>
Peter Zijlstra29dee3c2017-02-10 16:27:52 +01009#include <linux/bug.h>
10
Will Deacon1eb085d2019-11-21 11:58:58 +000011#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n")
12
13void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
14{
15 refcount_set(r, REFCOUNT_SATURATED);
16
17 switch (t) {
18 case REFCOUNT_ADD_NOT_ZERO_OVF:
19 REFCOUNT_WARN("saturated; leaking memory");
20 break;
21 case REFCOUNT_ADD_OVF:
22 REFCOUNT_WARN("saturated; leaking memory");
23 break;
24 case REFCOUNT_ADD_UAF:
25 REFCOUNT_WARN("addition on 0; use-after-free");
26 break;
27 case REFCOUNT_SUB_UAF:
28 REFCOUNT_WARN("underflow; use-after-free");
29 break;
30 case REFCOUNT_DEC_LEAK:
31 REFCOUNT_WARN("decrement hit 0; leaking memory");
32 break;
33 default:
34 REFCOUNT_WARN("unknown saturation event!?");
35 }
36}
37EXPORT_SYMBOL(refcount_warn_saturate);
38
David Windsorbd174162017-03-10 10:34:12 -050039/**
40 * refcount_dec_if_one - decrement a refcount if it is 1
41 * @r: the refcount
42 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010043 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
44 * success thereof.
45 *
46 * Like all decrement operations, it provides release memory order and provides
47 * a control dependency.
48 *
49 * It can be used like a try-delete operator; this explicit case is provided
50 * and not cmpxchg in generic, because that would allow implementing unsafe
51 * operations.
David Windsorbd174162017-03-10 10:34:12 -050052 *
53 * Return: true if the resulting refcount is 0, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010054 */
55bool refcount_dec_if_one(refcount_t *r)
56{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010057 int val = 1;
58
59 return atomic_try_cmpxchg_release(&r->refs, &val, 0);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010060}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -070061EXPORT_SYMBOL(refcount_dec_if_one);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010062
David Windsorbd174162017-03-10 10:34:12 -050063/**
64 * refcount_dec_not_one - decrement a refcount if it is not 1
65 * @r: the refcount
66 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010067 * No atomic_t counterpart, it decrements unless the value is 1, in which case
68 * it will return false.
69 *
70 * Was often done like: atomic_add_unless(&var, -1, 1)
David Windsorbd174162017-03-10 10:34:12 -050071 *
72 * Return: true if the decrement operation was successful, false otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010073 */
74bool refcount_dec_not_one(refcount_t *r)
75{
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010076 unsigned int new, val = atomic_read(&r->refs);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010077
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010078 do {
Will Deacon23e6b162019-11-21 11:58:53 +000079 if (unlikely(val == REFCOUNT_SATURATED))
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010080 return true;
81
82 if (val == 1)
83 return false;
84
85 new = val - 1;
86 if (new > val) {
Ingo Molnar9dcfe2c2017-03-01 09:25:55 +010087 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010088 return true;
89 }
90
Peter Zijlstrab78c0d42017-02-01 16:07:55 +010091 } while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010092
93 return true;
94}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -070095EXPORT_SYMBOL(refcount_dec_not_one);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +010096
David Windsorbd174162017-03-10 10:34:12 -050097/**
98 * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
99 * refcount to 0
100 * @r: the refcount
101 * @lock: the mutex to be locked
102 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100103 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
Will Deacon23e6b162019-11-21 11:58:53 +0000104 * to decrement when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100105 *
106 * Provides release memory ordering, such that prior loads and stores are done
107 * before, and provides a control dependency such that free() must come after.
108 * See the comment on top.
David Windsorbd174162017-03-10 10:34:12 -0500109 *
110 * Return: true and hold mutex if able to decrement refcount to 0, false
111 * otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100112 */
113bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
114{
115 if (refcount_dec_not_one(r))
116 return false;
117
118 mutex_lock(lock);
119 if (!refcount_dec_and_test(r)) {
120 mutex_unlock(lock);
121 return false;
122 }
123
124 return true;
125}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -0700126EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100127
David Windsorbd174162017-03-10 10:34:12 -0500128/**
129 * refcount_dec_and_lock - return holding spinlock if able to decrement
130 * refcount to 0
131 * @r: the refcount
132 * @lock: the spinlock to be locked
133 *
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100134 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
Will Deacon23e6b162019-11-21 11:58:53 +0000135 * decrement when saturated at REFCOUNT_SATURATED.
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100136 *
137 * Provides release memory ordering, such that prior loads and stores are done
138 * before, and provides a control dependency such that free() must come after.
139 * See the comment on top.
David Windsorbd174162017-03-10 10:34:12 -0500140 *
141 * Return: true and hold spinlock if able to decrement refcount to 0, false
142 * otherwise
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100143 */
144bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
145{
146 if (refcount_dec_not_one(r))
147 return false;
148
149 spin_lock(lock);
150 if (!refcount_dec_and_test(r)) {
151 spin_unlock(lock);
152 return false;
153 }
154
155 return true;
156}
Greg Kroah-Hartmand557d1b2017-05-04 15:51:03 -0700157EXPORT_SYMBOL(refcount_dec_and_lock);
Peter Zijlstra29dee3c2017-02-10 16:27:52 +0100158
Anna-Maria Gleixner7ea959c2018-06-12 18:16:21 +0200159/**
160 * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
161 * interrupts if able to decrement refcount to 0
162 * @r: the refcount
163 * @lock: the spinlock to be locked
164 * @flags: saved IRQ-flags if the is acquired
165 *
166 * Same as refcount_dec_and_lock() above except that the spinlock is acquired
Zhen Lei9dbbc3b2021-07-07 18:07:31 -0700167 * with disabled interrupts.
Anna-Maria Gleixner7ea959c2018-06-12 18:16:21 +0200168 *
169 * Return: true and hold spinlock if able to decrement refcount to 0, false
170 * otherwise
171 */
172bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
173 unsigned long *flags)
174{
175 if (refcount_dec_not_one(r))
176 return false;
177
178 spin_lock_irqsave(lock, *flags);
179 if (!refcount_dec_and_test(r)) {
180 spin_unlock_irqrestore(lock, *flags);
181 return false;
182 }
183
184 return true;
185}
186EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);