Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_LOCKREF_H |
| 3 | #define __LINUX_LOCKREF_H |
| 4 | |
| 5 | /* |
| 6 | * Locked reference counts. |
| 7 | * |
| 8 | * These are different from just plain atomic refcounts in that they |
| 9 | * are atomic with respect to the spinlock that goes with them. In |
| 10 | * particular, there can be implementations that don't actually get |
| 11 | * the spinlock for the common decrement/increment operations, but they |
| 12 | * still have to check that the operation is done semantically as if |
| 13 | * the spinlock had been taken (using a cmpxchg operation that covers |
| 14 | * both the lock and the count word, or using memory transactions, for |
| 15 | * example). |
| 16 | */ |
| 17 | |
| 18 | #include <linux/spinlock.h> |
Peter Zijlstra | 57f4257 | 2013-11-14 14:31:54 -0800 | [diff] [blame] | 19 | #include <generated/bounds.h> |
| 20 | |
| 21 | #define USE_CMPXCHG_LOCKREF \ |
| 22 | (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ |
Kirill A. Shutemov | 597d795 | 2013-12-20 13:35:58 +0200 | [diff] [blame] | 23 | IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 24 | |
| 25 | struct lockref { |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame] | 26 | union { |
Peter Zijlstra | 57f4257 | 2013-11-14 14:31:54 -0800 | [diff] [blame] | 27 | #if USE_CMPXCHG_LOCKREF |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame] | 28 | aligned_u64 lock_count; |
| 29 | #endif |
| 30 | struct { |
| 31 | spinlock_t lock; |
Linus Torvalds | 360f547 | 2015-01-09 15:19:03 -0800 | [diff] [blame] | 32 | int count; |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame] | 33 | }; |
| 34 | }; |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 35 | }; |
| 36 | |
Linus Torvalds | 2f4f12e | 2013-09-02 11:58:20 -0700 | [diff] [blame] | 37 | extern void lockref_get(struct lockref *); |
Linus Torvalds | 360f547 | 2015-01-09 15:19:03 -0800 | [diff] [blame] | 38 | extern int lockref_put_return(struct lockref *); |
Linus Torvalds | 2f4f12e | 2013-09-02 11:58:20 -0700 | [diff] [blame] | 39 | extern int lockref_get_not_zero(struct lockref *); |
| 40 | extern int lockref_get_or_lock(struct lockref *); |
| 41 | extern int lockref_put_or_lock(struct lockref *); |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 42 | |
Linus Torvalds | e7d33bb | 2013-09-07 15:49:18 -0700 | [diff] [blame] | 43 | extern void lockref_mark_dead(struct lockref *); |
| 44 | extern int lockref_get_not_dead(struct lockref *); |
| 45 | |
Steven Whitehouse | e66cf16 | 2013-10-15 15:18:08 +0100 | [diff] [blame] | 46 | /* Must be called under spinlock for reliable results */ |
| 47 | static inline int __lockref_is_dead(const struct lockref *l) |
| 48 | { |
| 49 | return ((int)l->count < 0); |
| 50 | } |
| 51 | |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 52 | #endif /* __LINUX_LOCKREF_H */ |