blob: 79897841a2cc8ad329e5237fa5b9f210cfcab5b7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4
5/*
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07006 * include/linux/spinlock.h - generic spinlock/rwlock declarations
7 *
8 * here's the role of the various spinlock/rwlock related include files:
9 *
10 * on SMP builds:
11 *
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010012 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070013 * initializers
14 *
15 * linux/spinlock_types.h:
16 * defines the generic type and initializers
17 *
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010018 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070019 * implementations, mostly inline assembly code
20 *
21 * (also included on UP-debug builds:)
22 *
23 * linux/spinlock_api_smp.h:
24 * contains the prototypes for the _spin_*() APIs.
25 *
26 * linux/spinlock.h: builds the final spin_*() APIs.
27 *
28 * on UP builds:
29 *
30 * linux/spinlock_type_up.h:
31 * contains the generic, simplified UP spinlock type.
32 * (which is an empty structure on non-debug builds)
33 *
34 * linux/spinlock_types.h:
35 * defines the generic type and initializers
36 *
37 * linux/spinlock_up.h:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010038 * contains the arch_spin_*()/etc. version of UP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070039 * builds. (which are NOPs on non-debug, non-preempt
40 * builds)
41 *
42 * (included on UP-non-debug builds:)
43 *
44 * linux/spinlock_api_up.h:
45 * builds the _spin_*() APIs.
46 *
47 * linux/spinlock.h: builds the final spin_*() APIs.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 */
49
Steven Rostedt3f3078912008-07-25 01:45:25 -070050#include <linux/typecheck.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/preempt.h>
52#include <linux/linkage.h>
53#include <linux/compiler.h>
David Howellsdf9ee292010-10-07 14:08:55 +010054#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/thread_info.h>
56#include <linux/kernel.h>
57#include <linux/stringify.h>
Andrew Morton676dcb82006-12-06 20:31:30 -080058#include <linux/bottom_half.h>
Herbert Xuc935cd62020-06-17 17:17:19 +100059#include <linux/lockdep.h>
David Howells96f951e2012-03-28 18:30:03 +010060#include <asm/barrier.h>
Will Deacon60ca1e52019-02-22 12:59:59 +000061#include <asm/mmiowb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64/*
65 * Must define these before including other files, inline functions need them
66 */
Denys Vlasenko75ddb0e2010-02-20 01:03:48 +010067#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69#define LOCK_SECTION_START(extra) \
70 ".subsection 1\n\t" \
71 extra \
72 ".ifndef " LOCK_SECTION_NAME "\n\t" \
73 LOCK_SECTION_NAME ":\n\t" \
74 ".endif\n"
75
76#define LOCK_SECTION_END \
77 ".previous\n\t"
78
Joe Perches33def842020-10-21 19:36:07 -070079#define __lockfunc __section(".spinlock.text")
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/*
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010082 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070084#include <linux/spinlock_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070086/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -030087 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070088 */
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070089#ifdef CONFIG_SMP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070090# include <asm/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#else
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070092# include <linux/spinlock_up.h>
93#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070095#ifdef CONFIG_DEBUG_SPINLOCK
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010096 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
Peter Zijlstrade8f5e42020-03-21 12:26:01 +010097 struct lock_class_key *key, short inner);
98
99# define raw_spin_lock_init(lock) \
100do { \
101 static struct lock_class_key __key; \
102 \
103 __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700104} while (0)
105
106#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100107# define raw_spin_lock_init(lock) \
108 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700109#endif
110
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100111#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700112
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100113#ifdef arch_spin_is_contended
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100114#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
Kyle McMartina5ef7ca2009-02-08 17:39:58 -0500115#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100116#define raw_spin_is_contended(lock) (((void)(lock), 0))
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100117#endif /*arch_spin_is_contended*/
Nick Piggin95c354f2008-01-30 13:31:20 +0100118
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200119/*
Andrea Parri3d85b272018-07-16 11:06:02 -0700120 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
121 * between program-order earlier lock acquisitions and program-order later
122 * memory accesses.
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200123 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700124 * This guarantees that the following two properties hold:
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200125 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700126 * 1) Given the snippet:
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200127 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700128 * { X = 0; Y = 0; }
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200129 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700130 * CPU0 CPU1
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200131 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700132 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
133 * spin_lock(S); smp_mb();
134 * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
135 * r0 = READ_ONCE(Y);
136 * spin_unlock(S);
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200137 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700138 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
139 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
140 * preceding the call to smp_mb__after_spinlock() in __schedule() and in
141 * try_to_wake_up().
142 *
143 * 2) Given the snippet:
144 *
145 * { X = 0; Y = 0; }
146 *
147 * CPU0 CPU1 CPU2
148 *
149 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
150 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
151 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
152 * WRITE_ONCE(Y, 1);
153 * spin_unlock(S);
154 *
155 * it is forbidden that CPU0's critical section executes before CPU1's
156 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
157 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
158 * preceding the calls to smp_rmb() in try_to_wake_up() for similar
159 * snippets but "projected" onto two CPUs.
160 *
161 * Property (2) upgrades the lock to an RCsc lock.
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200162 *
163 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
164 * the LL/SC loop, they need no further barriers. Similarly all our TSO
165 * architectures imply an smp_mb() for each atomic instruction and equally don't
166 * need more.
167 *
168 * Architectures that can implement ACQUIRE better need to take care.
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200169 */
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200170#ifndef smp_mb__after_spinlock
171#define smp_mb__after_spinlock() do { } while (0)
Jiri Olsaad462762009-07-08 12:10:31 +0000172#endif
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174#ifdef CONFIG_DEBUG_SPINLOCK
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800175 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100176#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
177 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800178 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179#else
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800180static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100181{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800182 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100183 arch_spin_lock(&lock->raw_lock);
Will Deacon60ca1e52019-02-22 12:59:59 +0000184 mmiowb_spin_lock();
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100185}
186
Will Deacona4c18872017-10-03 19:25:29 +0100187#ifndef arch_spin_lock_flags
188#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
189#endif
190
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100191static inline void
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800192do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100193{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800194 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100195 arch_spin_lock_flags(&lock->raw_lock, *flags);
Will Deacon60ca1e52019-02-22 12:59:59 +0000196 mmiowb_spin_lock();
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100197}
198
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100199static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100200{
Will Deacon60ca1e52019-02-22 12:59:59 +0000201 int ret = arch_spin_trylock(&(lock)->raw_lock);
202
203 if (ret)
204 mmiowb_spin_lock();
205
206 return ret;
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100207}
208
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800209static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100210{
Will Deacon60ca1e52019-02-22 12:59:59 +0000211 mmiowb_spin_unlock();
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100212 arch_spin_unlock(&lock->raw_lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800213 __release(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100214}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215#endif
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217/*
Thomas Gleixneref12f102009-11-07 23:04:15 +0100218 * Define the various spin_lock methods. Note we define these
Thomas Gleixner27972762019-07-26 23:19:39 +0200219 * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
Thomas Gleixneref12f102009-11-07 23:04:15 +0100220 * various methods are defined as nops in the case they are not
221 * required.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 */
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100223#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100225#define raw_spin_lock(lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700226
227#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100228# define raw_spin_lock_nested(lock, subclass) \
229 _raw_spin_lock_nested(lock, subclass)
230
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100231# define raw_spin_lock_nest_lock(lock, nest_lock) \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200232 do { \
233 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100234 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200235 } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700236#else
Bart Van Assche49992012014-08-08 12:35:36 +0200237/*
238 * Always evaluate the 'subclass' argument to avoid that the compiler
239 * warns about set-but-not-used variables when building with
240 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
241 */
242# define raw_spin_lock_nested(lock, subclass) \
243 _raw_spin_lock(((void)(subclass), (lock)))
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100244# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700245#endif
246
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700247#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800248
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100249#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700250 do { \
251 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100252 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700253 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800254
255#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100256#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700257 do { \
258 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100259 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700260 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800261#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100262#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700263 do { \
264 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100265 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700266 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800267#endif
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269#else
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800270
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100271#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700272 do { \
273 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100274 _raw_spin_lock_irqsave(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700275 } while (0)
Thomas Gleixneref12f102009-11-07 23:04:15 +0100276
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100277#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
278 raw_spin_lock_irqsave(lock, flags)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280#endif
281
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100282#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
283#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
284#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
285#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100287#define raw_spin_unlock_irqrestore(lock, flags) \
288 do { \
289 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100290 _raw_spin_unlock_irqrestore(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700291 } while (0)
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100292#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100294#define raw_spin_trylock_bh(lock) \
295 __cond_lock(lock, _raw_spin_trylock_bh(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100297#define raw_spin_trylock_irq(lock) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298({ \
299 local_irq_disable(); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100300 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700301 1 : ({ local_irq_enable(); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302})
303
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100304#define raw_spin_trylock_irqsave(lock, flags) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305({ \
306 local_irq_save(flags); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100307 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700308 1 : ({ local_irq_restore(flags); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309})
310
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100311/* Include rwlock functions */
312#include <linux/rwlock.h>
313
314/*
315 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
316 */
317#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
318# include <linux/spinlock_api_smp.h>
319#else
320# include <linux/spinlock_api_up.h>
321#endif
322
323/*
324 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
325 */
326
Denys Vlasenko34905652015-07-13 20:31:03 +0200327static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100328{
329 return &lock->rlock;
330}
331
Peter Zijlstrade8f5e42020-03-21 12:26:01 +0100332#ifdef CONFIG_DEBUG_SPINLOCK
333
334# define spin_lock_init(lock) \
335do { \
336 static struct lock_class_key __key; \
337 \
338 __raw_spin_lock_init(spinlock_check(lock), \
339 #lock, &__key, LD_WAIT_CONFIG); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100340} while (0)
341
Peter Zijlstrade8f5e42020-03-21 12:26:01 +0100342#else
343
344# define spin_lock_init(_lock) \
345do { \
346 spinlock_check(_lock); \
347 *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
348} while (0)
349
350#endif
351
Denys Vlasenko34905652015-07-13 20:31:03 +0200352static __always_inline void spin_lock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100353{
354 raw_spin_lock(&lock->rlock);
355}
356
Denys Vlasenko34905652015-07-13 20:31:03 +0200357static __always_inline void spin_lock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100358{
359 raw_spin_lock_bh(&lock->rlock);
360}
361
Denys Vlasenko34905652015-07-13 20:31:03 +0200362static __always_inline int spin_trylock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100363{
364 return raw_spin_trylock(&lock->rlock);
365}
366
367#define spin_lock_nested(lock, subclass) \
368do { \
369 raw_spin_lock_nested(spinlock_check(lock), subclass); \
370} while (0)
371
372#define spin_lock_nest_lock(lock, nest_lock) \
373do { \
374 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
375} while (0)
376
Denys Vlasenko34905652015-07-13 20:31:03 +0200377static __always_inline void spin_lock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100378{
379 raw_spin_lock_irq(&lock->rlock);
380}
381
382#define spin_lock_irqsave(lock, flags) \
383do { \
384 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
385} while (0)
386
387#define spin_lock_irqsave_nested(lock, flags, subclass) \
388do { \
389 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
390} while (0)
391
Denys Vlasenko34905652015-07-13 20:31:03 +0200392static __always_inline void spin_unlock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100393{
394 raw_spin_unlock(&lock->rlock);
395}
396
Denys Vlasenko34905652015-07-13 20:31:03 +0200397static __always_inline void spin_unlock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100398{
399 raw_spin_unlock_bh(&lock->rlock);
400}
401
Denys Vlasenko34905652015-07-13 20:31:03 +0200402static __always_inline void spin_unlock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100403{
404 raw_spin_unlock_irq(&lock->rlock);
405}
406
Denys Vlasenko34905652015-07-13 20:31:03 +0200407static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100408{
409 raw_spin_unlock_irqrestore(&lock->rlock, flags);
410}
411
Denys Vlasenko34905652015-07-13 20:31:03 +0200412static __always_inline int spin_trylock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100413{
414 return raw_spin_trylock_bh(&lock->rlock);
415}
416
Denys Vlasenko34905652015-07-13 20:31:03 +0200417static __always_inline int spin_trylock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100418{
419 return raw_spin_trylock_irq(&lock->rlock);
420}
421
422#define spin_trylock_irqsave(lock, flags) \
423({ \
424 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
425})
426
Andrea Parrib7e4aad2018-05-14 16:01:27 -0700427/**
428 * spin_is_locked() - Check whether a spinlock is locked.
429 * @lock: Pointer to the spinlock.
430 *
431 * This function is NOT required to provide any memory ordering
432 * guarantees; it could be used for debugging purposes or, when
433 * additional synchronization is needed, accompanied with other
434 * constructs (memory barriers) enforcing the synchronization.
435 *
436 * Returns: 1 if @lock is locked, 0 otherwise.
437 *
438 * Note that the function only tells you that the spinlock is
439 * seen to be locked, not that it is locked on your CPU.
440 *
441 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
442 * the return value is always 0 (see include/linux/spinlock_up.h).
443 * Therefore you should not rely heavily on the return value.
444 */
Denys Vlasenko34905652015-07-13 20:31:03 +0200445static __always_inline int spin_is_locked(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100446{
447 return raw_spin_is_locked(&lock->rlock);
448}
449
Denys Vlasenko34905652015-07-13 20:31:03 +0200450static __always_inline int spin_is_contended(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100451{
452 return raw_spin_is_contended(&lock->rlock);
453}
454
Paul Gortmaker4ebc1b42012-01-20 18:20:37 -0500455#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100456
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700457/*
458 * Pull the atomic_t declaration:
459 * (asm-mips/atomic.h needs above definitions)
460 */
Arun Sharma600634972011-07-26 16:09:06 -0700461#include <linux/atomic.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700462/**
463 * atomic_dec_and_lock - lock on reaching reference count zero
464 * @atomic: the atomic counter
465 * @lock: the spinlock in question
J. Bruce Fieldsdc07e722008-04-07 15:59:05 -0400466 *
467 * Decrements @atomic by 1. If the result is 0, returns true and locks
468 * @lock. Returns false for all other cases.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700469 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700471#define atomic_dec_and_lock(atomic, lock) \
Josh Triplettdcc8e552006-09-29 02:01:03 -0700472 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Anna-Maria Gleixnerccfbb5b2018-06-12 18:16:20 +0200474extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
475 unsigned long *flags);
476#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
477 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
478
Cong Wangff93bca2018-08-14 15:21:31 -0700479int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
480 size_t max_size, unsigned int cpu_mult,
481 gfp_t gfp, const char *name,
482 struct lock_class_key *key);
483
484#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
485 ({ \
486 static struct lock_class_key key; \
487 int ret; \
488 \
489 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
490 cpu_mult, gfp, #locks, &key); \
491 ret; \
492 })
Tom Herbert92f36cc2017-12-04 10:31:44 -0800493
494void free_bucket_spinlocks(spinlock_t *locks);
495
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496#endif /* __LINUX_SPINLOCK_H */