Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_LOCKREF_H |
| 2 | #define __LINUX_LOCKREF_H |
| 3 | |
| 4 | /* |
| 5 | * Locked reference counts. |
| 6 | * |
| 7 | * These are different from just plain atomic refcounts in that they |
| 8 | * are atomic with respect to the spinlock that goes with them. In |
| 9 | * particular, there can be implementations that don't actually get |
| 10 | * the spinlock for the common decrement/increment operations, but they |
| 11 | * still have to check that the operation is done semantically as if |
| 12 | * the spinlock had been taken (using a cmpxchg operation that covers |
| 13 | * both the lock and the count word, or using memory transactions, for |
| 14 | * example). |
| 15 | */ |
| 16 | |
| 17 | #include <linux/spinlock.h> |
| 18 | |
| 19 | struct lockref { |
Linus Torvalds | bc08b44 | 2013-09-02 12:12:15 -0700 | [diff] [blame^] | 20 | union { |
| 21 | #ifdef CONFIG_CMPXCHG_LOCKREF |
| 22 | aligned_u64 lock_count; |
| 23 | #endif |
| 24 | struct { |
| 25 | spinlock_t lock; |
| 26 | unsigned int count; |
| 27 | }; |
| 28 | }; |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 29 | }; |
| 30 | |
Linus Torvalds | 2f4f12e | 2013-09-02 11:58:20 -0700 | [diff] [blame] | 31 | extern void lockref_get(struct lockref *); |
| 32 | extern int lockref_get_not_zero(struct lockref *); |
| 33 | extern int lockref_get_or_lock(struct lockref *); |
| 34 | extern int lockref_put_or_lock(struct lockref *); |
Waiman Long | 0f8f2aa | 2013-08-28 18:13:26 -0700 | [diff] [blame] | 35 | |
| 36 | #endif /* __LINUX_LOCKREF_H */ |