Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_SPINLOCK_H |
| 3 | #define __LINUX_SPINLOCK_H |
| 4 | |
| 5 | /* |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 6 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
| 7 | * |
| 8 | * here's the role of the various spinlock/rwlock related include files: |
| 9 | * |
| 10 | * on SMP builds: |
| 11 | * |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 12 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 13 | * initializers |
| 14 | * |
| 15 | * linux/spinlock_types.h: |
| 16 | * defines the generic type and initializers |
| 17 | * |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 18 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 19 | * implementations, mostly inline assembly code |
| 20 | * |
| 21 | * (also included on UP-debug builds:) |
| 22 | * |
| 23 | * linux/spinlock_api_smp.h: |
| 24 | * contains the prototypes for the _spin_*() APIs. |
| 25 | * |
| 26 | * linux/spinlock.h: builds the final spin_*() APIs. |
| 27 | * |
| 28 | * on UP builds: |
| 29 | * |
| 30 | * linux/spinlock_type_up.h: |
| 31 | * contains the generic, simplified UP spinlock type. |
| 32 | * (which is an empty structure on non-debug builds) |
| 33 | * |
| 34 | * linux/spinlock_types.h: |
| 35 | * defines the generic type and initializers |
| 36 | * |
| 37 | * linux/spinlock_up.h: |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 38 | * contains the arch_spin_*()/etc. version of UP |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 39 | * builds. (which are NOPs on non-debug, non-preempt |
| 40 | * builds) |
| 41 | * |
| 42 | * (included on UP-non-debug builds:) |
| 43 | * |
| 44 | * linux/spinlock_api_up.h: |
| 45 | * builds the _spin_*() APIs. |
| 46 | * |
| 47 | * linux/spinlock.h: builds the final spin_*() APIs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | */ |
| 49 | |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 50 | #include <linux/typecheck.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | #include <linux/preempt.h> |
| 52 | #include <linux/linkage.h> |
| 53 | #include <linux/compiler.h> |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 54 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <linux/thread_info.h> |
| 56 | #include <linux/kernel.h> |
| 57 | #include <linux/stringify.h> |
Andrew Morton | 676dcb8 | 2006-12-06 20:31:30 -0800 | [diff] [blame] | 58 | #include <linux/bottom_half.h> |
David Howells | 96f951e | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 59 | #include <asm/barrier.h> |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 60 | #include <asm/mmiowb.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
| 63 | /* |
| 64 | * Must define these before including other files, inline functions need them |
| 65 | */ |
Denys Vlasenko | 75ddb0e | 2010-02-20 01:03:48 +0100 | [diff] [blame] | 66 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
| 68 | #define LOCK_SECTION_START(extra) \ |
| 69 | ".subsection 1\n\t" \ |
| 70 | extra \ |
| 71 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
| 72 | LOCK_SECTION_NAME ":\n\t" \ |
| 73 | ".endif\n" |
| 74 | |
| 75 | #define LOCK_SECTION_END \ |
| 76 | ".previous\n\t" |
| 77 | |
Harvey Harrison | ec70158 | 2008-02-08 04:19:55 -0800 | [diff] [blame] | 78 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
| 80 | /* |
Thomas Gleixner | fb3a6bb | 2009-12-03 20:01:19 +0100 | [diff] [blame] | 81 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 83 | #include <linux/spinlock_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 85 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 86 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 87 | */ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 88 | #ifdef CONFIG_SMP |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 89 | # include <asm/spinlock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | #else |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 91 | # include <linux/spinlock_up.h> |
| 92 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 94 | #ifdef CONFIG_DEBUG_SPINLOCK |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 95 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
| 96 | struct lock_class_key *key); |
| 97 | # define raw_spin_lock_init(lock) \ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 98 | do { \ |
| 99 | static struct lock_class_key __key; \ |
| 100 | \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 101 | __raw_spin_lock_init((lock), #lock, &__key); \ |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 102 | } while (0) |
| 103 | |
| 104 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 105 | # define raw_spin_lock_init(lock) \ |
| 106 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 107 | #endif |
| 108 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 109 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 110 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 111 | #ifdef arch_spin_is_contended |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 112 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
Kyle McMartin | a5ef7ca | 2009-02-08 17:39:58 -0500 | [diff] [blame] | 113 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 114 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 115 | #endif /*arch_spin_is_contended*/ |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 116 | |
Oleg Nesterov | e0acd0a | 2013-08-12 18:14:00 +0200 | [diff] [blame] | 117 | /* |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 118 | * smp_mb__after_spinlock() provides the equivalent of a full memory barrier |
| 119 | * between program-order earlier lock acquisitions and program-order later |
| 120 | * memory accesses. |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 121 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 122 | * This guarantees that the following two properties hold: |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 123 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 124 | * 1) Given the snippet: |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 125 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 126 | * { X = 0; Y = 0; } |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 127 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 128 | * CPU0 CPU1 |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 129 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 130 | * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1); |
| 131 | * spin_lock(S); smp_mb(); |
| 132 | * smp_mb__after_spinlock(); r1 = READ_ONCE(X); |
| 133 | * r0 = READ_ONCE(Y); |
| 134 | * spin_unlock(S); |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 135 | * |
Andrea Parri | 3d85b27 | 2018-07-16 11:06:02 -0700 | [diff] [blame] | 136 | * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0) |
| 137 | * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments |
| 138 | * preceding the call to smp_mb__after_spinlock() in __schedule() and in |
| 139 | * try_to_wake_up(). |
| 140 | * |
| 141 | * 2) Given the snippet: |
| 142 | * |
| 143 | * { X = 0; Y = 0; } |
| 144 | * |
| 145 | * CPU0 CPU1 CPU2 |
| 146 | * |
| 147 | * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y); |
| 148 | * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb(); |
| 149 | * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X); |
| 150 | * WRITE_ONCE(Y, 1); |
| 151 | * spin_unlock(S); |
| 152 | * |
| 153 | * it is forbidden that CPU0's critical section executes before CPU1's |
| 154 | * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1) |
| 155 | * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments |
| 156 | * preceding the calls to smp_rmb() in try_to_wake_up() for similar |
| 157 | * snippets but "projected" onto two CPUs. |
| 158 | * |
| 159 | * Property (2) upgrades the lock to an RCsc lock. |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 160 | * |
| 161 | * Since most load-store architectures implement ACQUIRE with an smp_mb() after |
| 162 | * the LL/SC loop, they need no further barriers. Similarly all our TSO |
| 163 | * architectures imply an smp_mb() for each atomic instruction and equally don't |
| 164 | * need more. |
| 165 | * |
| 166 | * Architectures that can implement ACQUIRE better need to take care. |
Oleg Nesterov | e0acd0a | 2013-08-12 18:14:00 +0200 | [diff] [blame] | 167 | */ |
Peter Zijlstra | d89e588c | 2016-09-05 11:37:53 +0200 | [diff] [blame] | 168 | #ifndef smp_mb__after_spinlock |
| 169 | #define smp_mb__after_spinlock() do { } while (0) |
Jiri Olsa | ad46276 | 2009-07-08 12:10:31 +0000 | [diff] [blame] | 170 | #endif |
| 171 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | #ifdef CONFIG_DEBUG_SPINLOCK |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 173 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 174 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
| 175 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 176 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | #else |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 178 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 179 | { |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 180 | __acquire(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 181 | arch_spin_lock(&lock->raw_lock); |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 182 | mmiowb_spin_lock(); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Will Deacon | a4c1887 | 2017-10-03 19:25:29 +0100 | [diff] [blame] | 185 | #ifndef arch_spin_lock_flags |
| 186 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 187 | #endif |
| 188 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 189 | static inline void |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 190 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 191 | { |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 192 | __acquire(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 193 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 194 | mmiowb_spin_lock(); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 195 | } |
| 196 | |
Thomas Gleixner | 9828ea9 | 2009-12-03 20:55:53 +0100 | [diff] [blame] | 197 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 198 | { |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 199 | int ret = arch_spin_trylock(&(lock)->raw_lock); |
| 200 | |
| 201 | if (ret) |
| 202 | mmiowb_spin_lock(); |
| 203 | |
| 204 | return ret; |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 205 | } |
| 206 | |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 207 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 208 | { |
Will Deacon | 60ca1e5 | 2019-02-22 12:59:59 +0000 | [diff] [blame] | 209 | mmiowb_spin_unlock(); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 210 | arch_spin_unlock(&lock->raw_lock); |
Luca Barbieri | b97c4bc | 2010-03-11 14:08:45 -0800 | [diff] [blame] | 211 | __release(lock); |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 212 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | #endif |
| 214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | /* |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 216 | * Define the various spin_lock methods. Note we define these |
Thomas Gleixner | 2797276 | 2019-07-26 23:19:39 +0200 | [diff] [blame^] | 217 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 218 | * various methods are defined as nops in the case they are not |
| 219 | * required. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | */ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 221 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 223 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 224 | |
| 225 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 226 | # define raw_spin_lock_nested(lock, subclass) \ |
| 227 | _raw_spin_lock_nested(lock, subclass) |
| 228 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 229 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 230 | do { \ |
| 231 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 232 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
Peter Zijlstra | b7d39af | 2008-08-11 09:30:24 +0200 | [diff] [blame] | 233 | } while (0) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 234 | #else |
Bart Van Assche | 4999201 | 2014-08-08 12:35:36 +0200 | [diff] [blame] | 235 | /* |
| 236 | * Always evaluate the 'subclass' argument to avoid that the compiler |
| 237 | * warns about set-but-not-used variables when building with |
| 238 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. |
| 239 | */ |
| 240 | # define raw_spin_lock_nested(lock, subclass) \ |
| 241 | _raw_spin_lock(((void)(subclass), (lock))) |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 242 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
Ingo Molnar | 8a25d5d | 2006-07-03 00:24:54 -0700 | [diff] [blame] | 243 | #endif |
| 244 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 245 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 246 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 247 | #define raw_spin_lock_irqsave(lock, flags) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 248 | do { \ |
| 249 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 250 | flags = _raw_spin_lock_irqsave(lock); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 251 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 252 | |
| 253 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 254 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 255 | do { \ |
| 256 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 257 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 258 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 259 | #else |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 260 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 261 | do { \ |
| 262 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 263 | flags = _raw_spin_lock_irqsave(lock); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 264 | } while (0) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 265 | #endif |
| 266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | #else |
Linus Torvalds | b8e6ec8 | 2006-11-26 16:27:17 -0800 | [diff] [blame] | 268 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 269 | #define raw_spin_lock_irqsave(lock, flags) \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 270 | do { \ |
| 271 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 272 | _raw_spin_lock_irqsave(lock, flags); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 273 | } while (0) |
Thomas Gleixner | ef12f10 | 2009-11-07 23:04:15 +0100 | [diff] [blame] | 274 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 275 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 276 | raw_spin_lock_irqsave(lock, flags) |
Arjan van de Ven | cfd3ef2 | 2006-11-25 11:09:37 -0800 | [diff] [blame] | 277 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | #endif |
| 279 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 280 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
| 281 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
| 282 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
| 283 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 285 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
| 286 | do { \ |
| 287 | typecheck(unsigned long, flags); \ |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 288 | _raw_spin_unlock_irqrestore(lock, flags); \ |
Steven Rostedt | 3f307891 | 2008-07-25 01:45:25 -0700 | [diff] [blame] | 289 | } while (0) |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 290 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | |
Thomas Gleixner | 9c1721a | 2009-12-03 21:52:18 +0100 | [diff] [blame] | 292 | #define raw_spin_trylock_bh(lock) \ |
| 293 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 295 | #define raw_spin_trylock_irq(lock) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | ({ \ |
| 297 | local_irq_disable(); \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 298 | raw_spin_trylock(lock) ? \ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 299 | 1 : ({ local_irq_enable(); 0; }); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | }) |
| 301 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 302 | #define raw_spin_trylock_irqsave(lock, flags) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | ({ \ |
| 304 | local_irq_save(flags); \ |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 305 | raw_spin_trylock(lock) ? \ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 306 | 1 : ({ local_irq_restore(flags); 0; }); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | }) |
| 308 | |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 309 | /* Include rwlock functions */ |
| 310 | #include <linux/rwlock.h> |
| 311 | |
| 312 | /* |
| 313 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
| 314 | */ |
| 315 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
| 316 | # include <linux/spinlock_api_smp.h> |
| 317 | #else |
| 318 | # include <linux/spinlock_api_up.h> |
| 319 | #endif |
| 320 | |
| 321 | /* |
| 322 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
| 323 | */ |
| 324 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 325 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 326 | { |
| 327 | return &lock->rlock; |
| 328 | } |
| 329 | |
| 330 | #define spin_lock_init(_lock) \ |
| 331 | do { \ |
| 332 | spinlock_check(_lock); \ |
| 333 | raw_spin_lock_init(&(_lock)->rlock); \ |
| 334 | } while (0) |
| 335 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 336 | static __always_inline void spin_lock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 337 | { |
| 338 | raw_spin_lock(&lock->rlock); |
| 339 | } |
| 340 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 341 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 342 | { |
| 343 | raw_spin_lock_bh(&lock->rlock); |
| 344 | } |
| 345 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 346 | static __always_inline int spin_trylock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 347 | { |
| 348 | return raw_spin_trylock(&lock->rlock); |
| 349 | } |
| 350 | |
| 351 | #define spin_lock_nested(lock, subclass) \ |
| 352 | do { \ |
| 353 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ |
| 354 | } while (0) |
| 355 | |
| 356 | #define spin_lock_nest_lock(lock, nest_lock) \ |
| 357 | do { \ |
| 358 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
| 359 | } while (0) |
| 360 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 361 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 362 | { |
| 363 | raw_spin_lock_irq(&lock->rlock); |
| 364 | } |
| 365 | |
| 366 | #define spin_lock_irqsave(lock, flags) \ |
| 367 | do { \ |
| 368 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ |
| 369 | } while (0) |
| 370 | |
| 371 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
| 372 | do { \ |
| 373 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
| 374 | } while (0) |
| 375 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 376 | static __always_inline void spin_unlock(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 377 | { |
| 378 | raw_spin_unlock(&lock->rlock); |
| 379 | } |
| 380 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 381 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 382 | { |
| 383 | raw_spin_unlock_bh(&lock->rlock); |
| 384 | } |
| 385 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 386 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 387 | { |
| 388 | raw_spin_unlock_irq(&lock->rlock); |
| 389 | } |
| 390 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 391 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 392 | { |
| 393 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
| 394 | } |
| 395 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 396 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 397 | { |
| 398 | return raw_spin_trylock_bh(&lock->rlock); |
| 399 | } |
| 400 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 401 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 402 | { |
| 403 | return raw_spin_trylock_irq(&lock->rlock); |
| 404 | } |
| 405 | |
| 406 | #define spin_trylock_irqsave(lock, flags) \ |
| 407 | ({ \ |
| 408 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
| 409 | }) |
| 410 | |
Andrea Parri | b7e4aad | 2018-05-14 16:01:27 -0700 | [diff] [blame] | 411 | /** |
| 412 | * spin_is_locked() - Check whether a spinlock is locked. |
| 413 | * @lock: Pointer to the spinlock. |
| 414 | * |
| 415 | * This function is NOT required to provide any memory ordering |
| 416 | * guarantees; it could be used for debugging purposes or, when |
| 417 | * additional synchronization is needed, accompanied with other |
| 418 | * constructs (memory barriers) enforcing the synchronization. |
| 419 | * |
| 420 | * Returns: 1 if @lock is locked, 0 otherwise. |
| 421 | * |
| 422 | * Note that the function only tells you that the spinlock is |
| 423 | * seen to be locked, not that it is locked on your CPU. |
| 424 | * |
| 425 | * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n, |
| 426 | * the return value is always 0 (see include/linux/spinlock_up.h). |
| 427 | * Therefore you should not rely heavily on the return value. |
| 428 | */ |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 429 | static __always_inline int spin_is_locked(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 430 | { |
| 431 | return raw_spin_is_locked(&lock->rlock); |
| 432 | } |
| 433 | |
Denys Vlasenko | 3490565 | 2015-07-13 20:31:03 +0200 | [diff] [blame] | 434 | static __always_inline int spin_is_contended(spinlock_t *lock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 435 | { |
| 436 | return raw_spin_is_contended(&lock->rlock); |
| 437 | } |
| 438 | |
Paul Gortmaker | 4ebc1b4 | 2012-01-20 18:20:37 -0500 | [diff] [blame] | 439 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
Thomas Gleixner | c2f21ce | 2009-12-02 20:02:59 +0100 | [diff] [blame] | 440 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 441 | /* |
| 442 | * Pull the atomic_t declaration: |
| 443 | * (asm-mips/atomic.h needs above definitions) |
| 444 | */ |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 445 | #include <linux/atomic.h> |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 446 | /** |
| 447 | * atomic_dec_and_lock - lock on reaching reference count zero |
| 448 | * @atomic: the atomic counter |
| 449 | * @lock: the spinlock in question |
J. Bruce Fields | dc07e72 | 2008-04-07 15:59:05 -0400 | [diff] [blame] | 450 | * |
| 451 | * Decrements @atomic by 1. If the result is 0, returns true and locks |
| 452 | * @lock. Returns false for all other cases. |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 453 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 455 | #define atomic_dec_and_lock(atomic, lock) \ |
Josh Triplett | dcc8e55 | 2006-09-29 02:01:03 -0700 | [diff] [blame] | 456 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | |
Anna-Maria Gleixner | ccfbb5b | 2018-06-12 18:16:20 +0200 | [diff] [blame] | 458 | extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, |
| 459 | unsigned long *flags); |
| 460 | #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \ |
| 461 | __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))) |
| 462 | |
Cong Wang | ff93bca | 2018-08-14 15:21:31 -0700 | [diff] [blame] | 463 | int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, |
| 464 | size_t max_size, unsigned int cpu_mult, |
| 465 | gfp_t gfp, const char *name, |
| 466 | struct lock_class_key *key); |
| 467 | |
| 468 | #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \ |
| 469 | ({ \ |
| 470 | static struct lock_class_key key; \ |
| 471 | int ret; \ |
| 472 | \ |
| 473 | ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \ |
| 474 | cpu_mult, gfp, #locks, &key); \ |
| 475 | ret; \ |
| 476 | }) |
Tom Herbert | 92f36cc | 2017-12-04 10:31:44 -0800 | [diff] [blame] | 477 | |
| 478 | void free_bucket_spinlocks(spinlock_t *locks); |
| 479 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | #endif /* __LINUX_SPINLOCK_H */ |