Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Paul Gortmaker | 8bc3bcc | 2011-11-16 21:29:17 -0500 | [diff] [blame] | 2 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | #include <linux/spinlock.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 4 | #include <linux/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
David S. Miller | 4db2ce0 | 2005-09-14 21:47:01 -0700 | [diff] [blame] | 6 | /* |
| 7 | * This is an implementation of the notion of "decrement a |
| 8 | * reference count, and return locked if it decremented to zero". |
| 9 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * NOTE NOTE NOTE! This is _not_ equivalent to |
| 11 | * |
| 12 | * if (atomic_dec_and_test(&atomic)) { |
| 13 | * spin_lock(&lock); |
| 14 | * return 1; |
| 15 | * } |
| 16 | * return 0; |
| 17 | * |
| 18 | * because the spin-lock and the decrement must be |
| 19 | * "atomic". |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
| 22 | { |
Nick Piggin | a57004e1 | 2006-01-08 01:02:19 -0800 | [diff] [blame] | 23 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
| 24 | if (atomic_add_unless(atomic, -1, 1)) |
| 25 | return 0; |
Jan Blunck | 417dcdf | 2009-06-16 15:33:33 -0700 | [diff] [blame] | 26 | |
Nick Piggin | a57004e1 | 2006-01-08 01:02:19 -0800 | [diff] [blame] | 27 | /* Otherwise do it the slow way */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | spin_lock(lock); |
| 29 | if (atomic_dec_and_test(atomic)) |
| 30 | return 1; |
| 31 | spin_unlock(lock); |
| 32 | return 0; |
| 33 | } |
| 34 | |
| 35 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
Anna-Maria Gleixner | ccfbb5b | 2018-06-12 18:16:20 +0200 | [diff] [blame] | 36 | |
| 37 | int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, |
| 38 | unsigned long *flags) |
| 39 | { |
| 40 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
| 41 | if (atomic_add_unless(atomic, -1, 1)) |
| 42 | return 0; |
| 43 | |
| 44 | /* Otherwise do it the slow way */ |
| 45 | spin_lock_irqsave(lock, *flags); |
| 46 | if (atomic_dec_and_test(atomic)) |
| 47 | return 1; |
| 48 | spin_unlock_irqrestore(lock, *flags); |
| 49 | return 0; |
| 50 | } |
| 51 | EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); |