Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_SPINLOCK_H |
| 3 | #define __LINUX_SPINLOCK_H |
| 4 | |
| 5 | /* |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 6 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
| 7 | * |
| 8 | * here's the role of the various spinlock/rwlock related include files: |
| 9 | * |
| 10 | * on SMP builds: |
| 11 | * |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 12 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 13 | * initializers |
| 14 | * |
| 15 | * linux/spinlock_types.h: |
| 16 | * defines the generic type and initializers |
| 17 | * |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 18 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 19 | * implementations, mostly inline assembly code |
| 20 | * |
| 21 | * (also included on UP-debug builds:) |
| 22 | * |
| 23 | * linux/spinlock_api_smp.h: |
| 24 | * contains the prototypes for the _spin_*() APIs. |
| 25 | * |
| 26 | * linux/spinlock.h: builds the final spin_*() APIs. |
| 27 | * |
| 28 | * on UP builds: |
| 29 | * |
| 30 | * linux/spinlock_type_up.h: |
| 31 | * contains the generic, simplified UP spinlock type. |
| 32 | * (which is an empty structure on non-debug builds) |
| 33 | * |
| 34 | * linux/spinlock_types.h: |
| 35 | * defines the generic type and initializers |
| 36 | * |
| 37 | * linux/spinlock_up.h: |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 38 | * contains the arch_spin_*()/etc. version of UP |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 39 | * builds. (which are NOPs on non-debug, non-preempt |
| 40 | * builds) |
| 41 | * |
| 42 | * (included on UP-non-debug builds:) |
| 43 | * |
| 44 | * linux/spinlock_api_up.h: |
| 45 | * builds the _spin_*() APIs. |
| 46 | * |
| 47 | * linux/spinlock.h: builds the final spin_*() APIs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | */ |
| 49 | |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 50 | #include <linux/typecheck.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/preempt.h> |
| 52 | #include <linux/linkage.h> |
| 53 | #include <linux/compiler.h> |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 54 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/thread_info.h> |
| 56 | #include <linux/kernel.h> |
| 57 | #include <linux/stringify.h> |
Andrew Morton | 676dcb8 | 2006-12-06 20:31:30 -0800 | [diff] [blame] | 58 | #include <linux/bottom_half.h> |
Herbert Xu | c935cd6 | 2020-06-17 17:17:19 +1000 | [diff] [blame] | 59 | #include <linux/lockdep.h> |
David Howells | 96f951e | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 60 | #include <asm/barrier.h> |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 61 | #include <asm/mmiowb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
| 64 | /* |
| 65 | * Must define these before including other files, inline functions need them |
| 66 | */ |
Denys Vlasenko | 75ddb0e | 2010-02-20 01:03:48 +0100 | [diff] [blame] | 67 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
| 69 | #define LOCK_SECTION_START(extra) \ |
| 70 | ".subsection 1\n\t" \ |
| 71 | extra \ |
| 72 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
| 73 | LOCK_SECTION_NAME ":\n\t" \ |
| 74 | ".endif\n" |
| 75 | |
| 76 | #define LOCK_SECTION_END \ |
| 77 | ".previous\n\t" |
| 78 | |
Joe Perches | 33def84 | 2020-10-21 19:36:07 -0700 | [diff] [blame] | 79 | #define __lockfunc __section(".spinlock.text") |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
| 81 | /* |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 82 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 84 | #include <linux/spinlock_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 86 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 87 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 88 | */ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 89 | #ifdef CONFIG_SMP |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 90 | # include <asm/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #else |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 92 | # include <linux/spinlock_up.h> |
| 93 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 95 | #ifdef CONFIG_DEBUG_SPINLOCK |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 96 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 97 | struct lock_class_key *key, short inner); |
| 98 | |
| 99 | # define raw_spin_lock_init(lock) \ |
| 100 | do { \ |
| 101 | static struct lock_class_key __key; \ |
| 102 | \ |
| 103 | __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 104 | } while (0) |
| 105 | |
| 106 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 107 | # define raw_spin_lock_init(lock) \ |
| 108 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 109 | #endif |
| 110 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 111 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 112 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 113 | #ifdef arch_spin_is_contended |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 114 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
Kyle McMartin | a5ef7ca | 2009-02-08 17:39:58 -0500 | [diff] [blame] | 115 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 116 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 117 | #endif /*arch_spin_is_contended*/ |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 118 | |
Oleg Nesterov | e0acd0a | 2013-08-12 18:14:00 +0200 | [diff] [blame] | 119 | /* |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 120 | * smp_mb__after_spinlock() provides the equivalent of a full memory barrier |
| 121 | * between program-order earlier lock acquisitions and program-order later |
| 122 | * memory accesses. |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 123 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 124 | * This guarantees that the following two properties hold: |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 125 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 126 | * 1) Given the snippet: |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 127 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 128 | * { X = 0; Y = 0; } |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 129 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 130 | * CPU0 CPU1 |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 131 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 132 | * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); |
| 133 | * spin_lock(S); smp_mb(); |
| 134 | * smp_mb__after_spinlock(); r1 = READ_ONCE(X); |
| 135 | * r0 = READ_ONCE(Y); |
| 136 | * spin_unlock(S); |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 137 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 138 | * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) |
| 139 | * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments |
| 140 | * preceding the call to smp_mb__after_spinlock() in __schedule() and in |
| 141 | * try_to_wake_up(). |
| 142 | * |
| 143 | * 2) Given the snippet: |
| 144 | * |
| 145 | * { X = 0; Y = 0; } |
| 146 | * |
| 147 | * CPU0 CPU1 CPU2 |
| 148 | * |
| 149 | * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); |
| 150 | * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); |
| 151 | * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); |
| 152 | * WRITE_ONCE(Y, 1); |
| 153 | * spin_unlock(S); |
| 154 | * |
| 155 | * it is forbidden that CPU0's critical section executes before CPU1's |
| 156 | * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) |
| 157 | * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments |
| 158 | * preceding the calls to smp_rmb() in try_to_wake_up() for similar |
| 159 | * snippets but "projected" onto two CPUs. |
| 160 | * |
| 161 | * Property (2) upgrades the lock to an RCsc lock. |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 162 | * |
| 163 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after |
| 164 | * the LL/SC loop, they need no further barriers. Similarly all our TSO |
| 165 | * architectures imply an smp_mb() for each atomic instruction and equally don't |
| 166 | * need more. |
| 167 | * |
| 168 | * Architectures that can implement ACQUIRE better need to take care. |
Oleg Nesterov | e0acd0a | 2013-08-12 18:14:00 +0200 | [diff] [blame] | 169 | */ |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 170 | #ifndef smp_mb__after_spinlock |
| 171 | #define smp_mb__after_spinlock() do { } while (0) |
Jiri Olsa | ad46276 | 2009-07-08 12:10:31 +0000 | [diff] [blame] | 172 | #endif |
| 173 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | #ifdef CONFIG_DEBUG_SPINLOCK |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 175 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 176 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
| 177 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 178 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | #else |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 180 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 181 | { |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 182 | __acquire(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 183 | arch_spin_lock(&lock->raw_lock); |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 184 | mmiowb_spin_lock(); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 185 | } |
| 186 | |
Will Deacon | a4c1887 | 2017-10-03 19:25:29 +0100 | [diff] [blame] | 187 | #ifndef arch_spin_lock_flags |
| 188 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 189 | #endif |
| 190 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 191 | static inline void |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 192 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 193 | { |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 194 | __acquire(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 195 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 196 | mmiowb_spin_lock(); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 197 | } |
| 198 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 199 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 200 | { |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 201 | int ret = arch_spin_trylock(&(lock)->raw_lock); |
| 202 | |
| 203 | if (ret) |
| 204 | mmiowb_spin_lock(); |
| 205 | |
| 206 | return ret; |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 207 | } |
| 208 | |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 209 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 210 | { |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 211 | mmiowb_spin_unlock(); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 212 | arch_spin_unlock(&lock->raw_lock); |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 213 | __release(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 214 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | #endif |
| 216 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | /* |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 218 | * Define the various spin_lock methods. Note we define these |
Thomas Gleixner | 2797276 | 2019-07-26 23:19:39 +0200 | [diff] [blame] | 219 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 220 | * various methods are defined as nops in the case they are not |
| 221 | * required. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | */ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 223 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 225 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 226 | |
| 227 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 228 | # define raw_spin_lock_nested(lock, subclass) \ |
| 229 | _raw_spin_lock_nested(lock, subclass) |
| 230 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 231 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 232 | do { \ |
| 233 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 234 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 235 | } while (0) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 236 | #else |
Bart Van Assche | 4999201 | 2014-08-08 12:35:36 +0200 | [diff] [blame] | 237 | /* |
| 238 | * Always evaluate the 'subclass' argument to avoid that the compiler |
| 239 | * warns about set-but-not-used variables when building with |
| 240 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. |
| 241 | */ |
| 242 | # define raw_spin_lock_nested(lock, subclass) \ |
| 243 | _raw_spin_lock(((void)(subclass), (lock))) |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 244 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 245 | #endif |
| 246 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 247 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 248 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 249 | #define raw_spin_lock_irqsave(lock, flags) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 250 | do { \ |
| 251 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 252 | flags = _raw_spin_lock_irqsave(lock); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 253 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 254 | |
| 255 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 256 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 257 | do { \ |
| 258 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 259 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 260 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 261 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 262 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 263 | do { \ |
| 264 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 265 | flags = _raw_spin_lock_irqsave(lock); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 266 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 267 | #endif |
| 268 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | #else |
Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 270 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 271 | #define raw_spin_lock_irqsave(lock, flags) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 272 | do { \ |
| 273 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 274 | _raw_spin_lock_irqsave(lock, flags); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 275 | } while (0) |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 276 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 277 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 278 | raw_spin_lock_irqsave(lock, flags) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 279 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | #endif |
| 281 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 282 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
| 283 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
| 284 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
| 285 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 287 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
| 288 | do { \ |
| 289 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 290 | _raw_spin_unlock_irqrestore(lock, flags); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 291 | } while (0) |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 292 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 294 | #define raw_spin_trylock_bh(lock) \ |
| 295 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 297 | #define raw_spin_trylock_irq(lock) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | ({ \ |
| 299 | local_irq_disable(); \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 300 | raw_spin_trylock(lock) ? \ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 301 | 1 : ({ local_irq_enable(); 0; }); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | }) |
| 303 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 304 | #define raw_spin_trylock_irqsave(lock, flags) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | ({ \ |
| 306 | local_irq_save(flags); \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 307 | raw_spin_trylock(lock) ? \ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 308 | 1 : ({ local_irq_restore(flags); 0; }); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | }) |
| 310 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 311 | /* Include rwlock functions */ |
| 312 | #include <linux/rwlock.h> |
| 313 | |
| 314 | /* |
| 315 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
| 316 | */ |
| 317 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 318 | # include <linux/spinlock_api_smp.h> |
| 319 | #else |
| 320 | # include <linux/spinlock_api_up.h> |
| 321 | #endif |
| 322 | |
| 323 | /* |
| 324 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
| 325 | */ |
| 326 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 327 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 328 | { |
| 329 | return &lock->rlock; |
| 330 | } |
| 331 | |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 332 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 333 | |
| 334 | # define spin_lock_init(lock) \ |
| 335 | do { \ |
| 336 | static struct lock_class_key __key; \ |
| 337 | \ |
| 338 | __raw_spin_lock_init(spinlock_check(lock), \ |
| 339 | #lock, &__key, LD_WAIT_CONFIG); \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 340 | } while (0) |
| 341 | |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame] | 342 | #else |
| 343 | |
| 344 | # define spin_lock_init(_lock) \ |
| 345 | do { \ |
| 346 | spinlock_check(_lock); \ |
| 347 | *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \ |
| 348 | } while (0) |
| 349 | |
| 350 | #endif |
| 351 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 352 | static __always_inline void spin_lock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 353 | { |
| 354 | raw_spin_lock(&lock->rlock); |
| 355 | } |
| 356 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 357 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 358 | { |
| 359 | raw_spin_lock_bh(&lock->rlock); |
| 360 | } |
| 361 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 362 | static __always_inline int spin_trylock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 363 | { |
| 364 | return raw_spin_trylock(&lock->rlock); |
| 365 | } |
| 366 | |
| 367 | #define spin_lock_nested(lock, subclass) \ |
| 368 | do { \ |
| 369 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ |
| 370 | } while (0) |
| 371 | |
| 372 | #define spin_lock_nest_lock(lock, nest_lock) \ |
| 373 | do { \ |
| 374 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
| 375 | } while (0) |
| 376 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 377 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 378 | { |
| 379 | raw_spin_lock_irq(&lock->rlock); |
| 380 | } |
| 381 | |
| 382 | #define spin_lock_irqsave(lock, flags) \ |
| 383 | do { \ |
| 384 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ |
| 385 | } while (0) |
| 386 | |
| 387 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 388 | do { \ |
| 389 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
| 390 | } while (0) |
| 391 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 392 | static __always_inline void spin_unlock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 393 | { |
| 394 | raw_spin_unlock(&lock->rlock); |
| 395 | } |
| 396 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 397 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 398 | { |
| 399 | raw_spin_unlock_bh(&lock->rlock); |
| 400 | } |
| 401 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 402 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 403 | { |
| 404 | raw_spin_unlock_irq(&lock->rlock); |
| 405 | } |
| 406 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 407 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 408 | { |
| 409 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
| 410 | } |
| 411 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 412 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 413 | { |
| 414 | return raw_spin_trylock_bh(&lock->rlock); |
| 415 | } |
| 416 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 417 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 418 | { |
| 419 | return raw_spin_trylock_irq(&lock->rlock); |
| 420 | } |
| 421 | |
| 422 | #define spin_trylock_irqsave(lock, flags) \ |
| 423 | ({ \ |
| 424 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
| 425 | }) |
| 426 | |
Andrea Parri | b7e4aad | 2018-05-14 16:01:27 -0700 | [diff] [blame] | 427 | /** |
| 428 | * spin_is_locked() - Check whether a spinlock is locked. |
| 429 | * @lock: Pointer to the spinlock. |
| 430 | * |
| 431 | * This function is NOT required to provide any memory ordering |
| 432 | * guarantees; it could be used for debugging purposes or, when |
| 433 | * additional synchronization is needed, accompanied with other |
| 434 | * constructs (memory barriers) enforcing the synchronization. |
| 435 | * |
| 436 | * Returns: 1 if @lock is locked, 0 otherwise. |
| 437 | * |
| 438 | * Note that the function only tells you that the spinlock is |
| 439 | * seen to be locked, not that it is locked on your CPU. |
| 440 | * |
| 441 | * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, |
| 442 | * the return value is always 0 (see include/linux/spinlock_up.h). |
| 443 | * Therefore you should not rely heavily on the return value. |
| 444 | */ |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 445 | static __always_inline int spin_is_locked(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 446 | { |
| 447 | return raw_spin_is_locked(&lock->rlock); |
| 448 | } |
| 449 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 450 | static __always_inline int spin_is_contended(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 451 | { |
| 452 | return raw_spin_is_contended(&lock->rlock); |
| 453 | } |
| 454 | |
Paul Gortmaker | 4ebc1b4 | 2012-01-20 18:20:37 -0500 | [diff] [blame] | 455 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 456 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 457 | /* |
| 458 | * Pull the atomic_t declaration: |
| 459 | * (asm-mips/atomic.h needs above definitions) |
| 460 | */ |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 461 | #include <linux/atomic.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 462 | /** |
| 463 | * atomic_dec_and_lock - lock on reaching reference count zero |
| 464 | * @atomic: the atomic counter |
| 465 | * @lock: the spinlock in question |
J. Bruce Fields | dc07e72 | 2008-04-07 15:59:05 -0400 | [diff] [blame] | 466 | * |
| 467 | * Decrements @atomic by 1. If the result is 0, returns true and locks |
| 468 | * @lock. Returns false for all other cases. |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 469 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 471 | #define atomic_dec_and_lock(atomic, lock) \ |
Josh Triplett | dcc8e55 | 2006-09-29 02:01:03 -0700 | [diff] [blame] | 472 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | |
Anna-Maria Gleixner | ccfbb5b | 2018-06-12 18:16:20 +0200 | [diff] [blame] | 474 | extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, |
| 475 | unsigned long *flags); |
| 476 | #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ |
| 477 | __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) |
| 478 | |
Cong Wang | ff93bca | 2018-08-14 15:21:31 -0700 | [diff] [blame] | 479 | int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, |
| 480 | size_t max_size, unsigned int cpu_mult, |
| 481 | gfp_t gfp, const char *name, |
| 482 | struct lock_class_key *key); |
| 483 | |
| 484 | #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ |
| 485 | ({ \ |
| 486 | static struct lock_class_key key; \ |
| 487 | int ret; \ |
| 488 | \ |
| 489 | ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ |
| 490 | cpu_mult, gfp, #locks, &key); \ |
| 491 | ret; \ |
| 492 | }) |
Tom Herbert | 92f36cc | 2017-12-04 10:31:44 -0800 | [diff] [blame] | 493 | |
| 494 | void free_bucket_spinlocks(spinlock_t *locks); |
| 495 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | #endif /* __LINUX_SPINLOCK_H */ |