Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 2 | /* |
Will Deacon | fb041bb | 2019-11-21 11:59:00 +0000 | [diff] [blame] | 3 | * Out-of-line refcount functions. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 4 | */ |
| 5 | |
Alexey Dobriyan | 75a040f | 2018-04-01 01:00:36 +0300 | [diff] [blame] | 6 | #include <linux/mutex.h> |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 7 | #include <linux/refcount.h> |
Alexey Dobriyan | 75a040f | 2018-04-01 01:00:36 +0300 | [diff] [blame] | 8 | #include <linux/spinlock.h> |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 9 | #include <linux/bug.h> |
| 10 | |
Will Deacon | 1eb085d | 2019-11-21 11:58:58 +0000 | [diff] [blame] | 11 | #define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n") |
| 12 | |
| 13 | void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t) |
| 14 | { |
| 15 | refcount_set(r, REFCOUNT_SATURATED); |
| 16 | |
| 17 | switch (t) { |
| 18 | case REFCOUNT_ADD_NOT_ZERO_OVF: |
| 19 | REFCOUNT_WARN("saturated; leaking memory"); |
| 20 | break; |
| 21 | case REFCOUNT_ADD_OVF: |
| 22 | REFCOUNT_WARN("saturated; leaking memory"); |
| 23 | break; |
| 24 | case REFCOUNT_ADD_UAF: |
| 25 | REFCOUNT_WARN("addition on 0; use-after-free"); |
| 26 | break; |
| 27 | case REFCOUNT_SUB_UAF: |
| 28 | REFCOUNT_WARN("underflow; use-after-free"); |
| 29 | break; |
| 30 | case REFCOUNT_DEC_LEAK: |
| 31 | REFCOUNT_WARN("decrement hit 0; leaking memory"); |
| 32 | break; |
| 33 | default: |
| 34 | REFCOUNT_WARN("unknown saturation event!?"); |
| 35 | } |
| 36 | } |
| 37 | EXPORT_SYMBOL(refcount_warn_saturate); |
| 38 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 39 | /** |
| 40 | * refcount_dec_if_one - decrement a refcount if it is 1 |
| 41 | * @r: the refcount |
| 42 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 43 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
| 44 | * success thereof. |
| 45 | * |
| 46 | * Like all decrement operations, it provides release memory order and provides |
| 47 | * a control dependency. |
| 48 | * |
| 49 | * It can be used like a try-delete operator; this explicit case is provided |
| 50 | * and not cmpxchg in generic, because that would allow implementing unsafe |
| 51 | * operations. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 52 | * |
| 53 | * Return: true if the resulting refcount is 0, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 54 | */ |
| 55 | bool refcount_dec_if_one(refcount_t *r) |
| 56 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 57 | int val = 1; |
| 58 | |
| 59 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 60 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 61 | EXPORT_SYMBOL(refcount_dec_if_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 62 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 63 | /** |
| 64 | * refcount_dec_not_one - decrement a refcount if it is not 1 |
| 65 | * @r: the refcount |
| 66 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 67 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
| 68 | * it will return false. |
| 69 | * |
| 70 | * Was often done like: atomic_add_unless(&var, -1, 1) |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 71 | * |
| 72 | * Return: true if the decrement operation was successful, false otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 73 | */ |
| 74 | bool refcount_dec_not_one(refcount_t *r) |
| 75 | { |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 76 | unsigned int new, val = atomic_read(&r->refs); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 77 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 78 | do { |
Will Deacon | 23e6b16 | 2019-11-21 11:58:53 +0000 | [diff] [blame] | 79 | if (unlikely(val == REFCOUNT_SATURATED)) |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 80 | return true; |
| 81 | |
| 82 | if (val == 1) |
| 83 | return false; |
| 84 | |
| 85 | new = val - 1; |
| 86 | if (new > val) { |
Ingo Molnar | 9dcfe2c | 2017-03-01 09:25:55 +0100 | [diff] [blame] | 87 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 88 | return true; |
| 89 | } |
| 90 | |
Peter Zijlstra | b78c0d4 | 2017-02-01 16:07:55 +0100 | [diff] [blame] | 91 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 92 | |
| 93 | return true; |
| 94 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 95 | EXPORT_SYMBOL(refcount_dec_not_one); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 96 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 97 | /** |
| 98 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement |
| 99 | * refcount to 0 |
| 100 | * @r: the refcount |
| 101 | * @lock: the mutex to be locked |
| 102 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 103 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
Will Deacon | 23e6b16 | 2019-11-21 11:58:53 +0000 | [diff] [blame] | 104 | * to decrement when saturated at REFCOUNT_SATURATED. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 105 | * |
| 106 | * Provides release memory ordering, such that prior loads and stores are done |
| 107 | * before, and provides a control dependency such that free() must come after. |
| 108 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 109 | * |
| 110 | * Return: true and hold mutex if able to decrement refcount to 0, false |
| 111 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 112 | */ |
| 113 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) |
| 114 | { |
| 115 | if (refcount_dec_not_one(r)) |
| 116 | return false; |
| 117 | |
| 118 | mutex_lock(lock); |
| 119 | if (!refcount_dec_and_test(r)) { |
| 120 | mutex_unlock(lock); |
| 121 | return false; |
| 122 | } |
| 123 | |
| 124 | return true; |
| 125 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 126 | EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 127 | |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 128 | /** |
| 129 | * refcount_dec_and_lock - return holding spinlock if able to decrement |
| 130 | * refcount to 0 |
| 131 | * @r: the refcount |
| 132 | * @lock: the spinlock to be locked |
| 133 | * |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 134 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
Will Deacon | 23e6b16 | 2019-11-21 11:58:53 +0000 | [diff] [blame] | 135 | * decrement when saturated at REFCOUNT_SATURATED. |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 136 | * |
| 137 | * Provides release memory ordering, such that prior loads and stores are done |
| 138 | * before, and provides a control dependency such that free() must come after. |
| 139 | * See the comment on top. |
David Windsor | bd17416 | 2017-03-10 10:34:12 -0500 | [diff] [blame] | 140 | * |
| 141 | * Return: true and hold spinlock if able to decrement refcount to 0, false |
| 142 | * otherwise |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 143 | */ |
| 144 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) |
| 145 | { |
| 146 | if (refcount_dec_not_one(r)) |
| 147 | return false; |
| 148 | |
| 149 | spin_lock(lock); |
| 150 | if (!refcount_dec_and_test(r)) { |
| 151 | spin_unlock(lock); |
| 152 | return false; |
| 153 | } |
| 154 | |
| 155 | return true; |
| 156 | } |
Greg Kroah-Hartman | d557d1b | 2017-05-04 15:51:03 -0700 | [diff] [blame] | 157 | EXPORT_SYMBOL(refcount_dec_and_lock); |
Peter Zijlstra | 29dee3c | 2017-02-10 16:27:52 +0100 | [diff] [blame] | 158 | |
Anna-Maria Gleixner | 7ea959c | 2018-06-12 18:16:21 +0200 | [diff] [blame] | 159 | /** |
| 160 | * refcount_dec_and_lock_irqsave - return holding spinlock with disabled |
| 161 | * interrupts if able to decrement refcount to 0 |
| 162 | * @r: the refcount |
| 163 | * @lock: the spinlock to be locked |
| 164 | * @flags: saved IRQ-flags if the is acquired |
| 165 | * |
| 166 | * Same as refcount_dec_and_lock() above except that the spinlock is acquired |
Zhen Lei | 9dbbc3b | 2021-07-07 18:07:31 -0700 | [diff] [blame] | 167 | * with disabled interrupts. |
Anna-Maria Gleixner | 7ea959c | 2018-06-12 18:16:21 +0200 | [diff] [blame] | 168 | * |
| 169 | * Return: true and hold spinlock if able to decrement refcount to 0, false |
| 170 | * otherwise |
| 171 | */ |
| 172 | bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, |
| 173 | unsigned long *flags) |
| 174 | { |
| 175 | if (refcount_dec_not_one(r)) |
| 176 | return false; |
| 177 | |
| 178 | spin_lock_irqsave(lock, *flags); |
| 179 | if (!refcount_dec_and_test(r)) { |
| 180 | spin_unlock_irqrestore(lock, *flags); |
| 181 | return false; |
| 182 | } |
| 183 | |
| 184 | return true; |
| 185 | } |
| 186 | EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |