blob: 031ce8617df8fff0eb06caf65df251297b5c6862 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_SPINLOCK_H
3#define __LINUX_SPINLOCK_H
4
5/*
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07006 * include/linux/spinlock.h - generic spinlock/rwlock declarations
7 *
8 * here's the role of the various spinlock/rwlock related include files:
9 *
10 * on SMP builds:
11 *
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010012 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070013 * initializers
14 *
15 * linux/spinlock_types.h:
16 * defines the generic type and initializers
17 *
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010018 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070019 * implementations, mostly inline assembly code
20 *
21 * (also included on UP-debug builds:)
22 *
23 * linux/spinlock_api_smp.h:
24 * contains the prototypes for the _spin_*() APIs.
25 *
26 * linux/spinlock.h: builds the final spin_*() APIs.
27 *
28 * on UP builds:
29 *
30 * linux/spinlock_type_up.h:
31 * contains the generic, simplified UP spinlock type.
32 * (which is an empty structure on non-debug builds)
33 *
34 * linux/spinlock_types.h:
35 * defines the generic type and initializers
36 *
37 * linux/spinlock_up.h:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010038 * contains the arch_spin_*()/etc. version of UP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070039 * builds. (which are NOPs on non-debug, non-preempt
40 * builds)
41 *
42 * (included on UP-non-debug builds:)
43 *
44 * linux/spinlock_api_up.h:
45 * builds the _spin_*() APIs.
46 *
47 * linux/spinlock.h: builds the final spin_*() APIs.
Linus Torvalds1da177e2005-04-16 15:20:36 -070048 */
49
Steven Rostedt3f3078912008-07-25 01:45:25 -070050#include <linux/typecheck.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/preempt.h>
52#include <linux/linkage.h>
53#include <linux/compiler.h>
David Howellsdf9ee292010-10-07 14:08:55 +010054#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <linux/thread_info.h>
56#include <linux/kernel.h>
57#include <linux/stringify.h>
Andrew Morton676dcb82006-12-06 20:31:30 -080058#include <linux/bottom_half.h>
David Howells96f951e2012-03-28 18:30:03 +010059#include <asm/barrier.h>
Will Deacon60ca1e52019-02-22 12:59:59 +000060#include <asm/mmiowb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63/*
64 * Must define these before including other files, inline functions need them
65 */
Denys Vlasenko75ddb0e2010-02-20 01:03:48 +010066#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#define LOCK_SECTION_START(extra) \
69 ".subsection 1\n\t" \
70 extra \
71 ".ifndef " LOCK_SECTION_NAME "\n\t" \
72 LOCK_SECTION_NAME ":\n\t" \
73 ".endif\n"
74
75#define LOCK_SECTION_END \
76 ".previous\n\t"
77
Harvey Harrisonec701582008-02-08 04:19:55 -080078#define __lockfunc __attribute__((section(".spinlock.text")))
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/*
Thomas Gleixnerfb3a6bb2009-12-03 20:01:19 +010081 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 */
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070083#include <linux/spinlock_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070085/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -030086 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070087 */
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070088#ifdef CONFIG_SMP
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070089# include <asm/spinlock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070090#else
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070091# include <linux/spinlock_up.h>
92#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070094#ifdef CONFIG_DEBUG_SPINLOCK
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +010095 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
96 struct lock_class_key *key);
97# define raw_spin_lock_init(lock) \
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070098do { \
99 static struct lock_class_key __key; \
100 \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100101 __raw_spin_lock_init((lock), #lock, &__key); \
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700102} while (0)
103
104#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100105# define raw_spin_lock_init(lock) \
106 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700107#endif
108
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100109#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700110
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100111#ifdef arch_spin_is_contended
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100112#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
Kyle McMartina5ef7ca2009-02-08 17:39:58 -0500113#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100114#define raw_spin_is_contended(lock) (((void)(lock), 0))
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100115#endif /*arch_spin_is_contended*/
Nick Piggin95c354f2008-01-30 13:31:20 +0100116
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200117/*
Andrea Parri3d85b272018-07-16 11:06:02 -0700118 * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
119 * between program-order earlier lock acquisitions and program-order later
120 * memory accesses.
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200121 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700122 * This guarantees that the following two properties hold:
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200123 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700124 * 1) Given the snippet:
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200125 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700126 * { X = 0; Y = 0; }
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200127 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700128 * CPU0 CPU1
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200129 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700130 * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
131 * spin_lock(S); smp_mb();
132 * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
133 * r0 = READ_ONCE(Y);
134 * spin_unlock(S);
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200135 *
Andrea Parri3d85b272018-07-16 11:06:02 -0700136 * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
137 * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
138 * preceding the call to smp_mb__after_spinlock() in __schedule() and in
139 * try_to_wake_up().
140 *
141 * 2) Given the snippet:
142 *
143 * { X = 0; Y = 0; }
144 *
145 * CPU0 CPU1 CPU2
146 *
147 * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
148 * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
149 * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
150 * WRITE_ONCE(Y, 1);
151 * spin_unlock(S);
152 *
153 * it is forbidden that CPU0's critical section executes before CPU1's
154 * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
155 * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
156 * preceding the calls to smp_rmb() in try_to_wake_up() for similar
157 * snippets but "projected" onto two CPUs.
158 *
159 * Property (2) upgrades the lock to an RCsc lock.
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200160 *
161 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
162 * the LL/SC loop, they need no further barriers. Similarly all our TSO
163 * architectures imply an smp_mb() for each atomic instruction and equally don't
164 * need more.
165 *
166 * Architectures that can implement ACQUIRE better need to take care.
Oleg Nesterove0acd0a2013-08-12 18:14:00 +0200167 */
Peter Zijlstrad89e588c2016-09-05 11:37:53 +0200168#ifndef smp_mb__after_spinlock
169#define smp_mb__after_spinlock() do { } while (0)
Jiri Olsaad462762009-07-08 12:10:31 +0000170#endif
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172#ifdef CONFIG_DEBUG_SPINLOCK
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800173 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100174#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
175 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800176 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177#else
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800178static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100179{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800180 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100181 arch_spin_lock(&lock->raw_lock);
Will Deacon60ca1e52019-02-22 12:59:59 +0000182 mmiowb_spin_lock();
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100183}
184
Will Deacona4c18872017-10-03 19:25:29 +0100185#ifndef arch_spin_lock_flags
186#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
187#endif
188
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100189static inline void
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800190do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100191{
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800192 __acquire(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100193 arch_spin_lock_flags(&lock->raw_lock, *flags);
Will Deacon60ca1e52019-02-22 12:59:59 +0000194 mmiowb_spin_lock();
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100195}
196
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100197static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100198{
Will Deacon60ca1e52019-02-22 12:59:59 +0000199 int ret = arch_spin_trylock(&(lock)->raw_lock);
200
201 if (ret)
202 mmiowb_spin_lock();
203
204 return ret;
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100205}
206
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800207static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100208{
Will Deacon60ca1e52019-02-22 12:59:59 +0000209 mmiowb_spin_unlock();
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100210 arch_spin_unlock(&lock->raw_lock);
Luca Barbierib97c4bc2010-03-11 14:08:45 -0800211 __release(lock);
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100212}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213#endif
214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215/*
Thomas Gleixneref12f102009-11-07 23:04:15 +0100216 * Define the various spin_lock methods. Note we define these
Thomas Gleixner27972762019-07-26 23:19:39 +0200217 * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
Thomas Gleixneref12f102009-11-07 23:04:15 +0100218 * various methods are defined as nops in the case they are not
219 * required.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 */
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100221#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100223#define raw_spin_lock(lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700224
225#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100226# define raw_spin_lock_nested(lock, subclass) \
227 _raw_spin_lock_nested(lock, subclass)
228
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100229# define raw_spin_lock_nest_lock(lock, nest_lock) \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200230 do { \
231 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100232 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
Peter Zijlstrab7d39af2008-08-11 09:30:24 +0200233 } while (0)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700234#else
Bart Van Assche49992012014-08-08 12:35:36 +0200235/*
236 * Always evaluate the 'subclass' argument to avoid that the compiler
237 * warns about set-but-not-used variables when building with
238 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
239 */
240# define raw_spin_lock_nested(lock, subclass) \
241 _raw_spin_lock(((void)(subclass), (lock)))
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100242# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
Ingo Molnar8a25d5d2006-07-03 00:24:54 -0700243#endif
244
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700245#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800246
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100247#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700248 do { \
249 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100250 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700251 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800252
253#ifdef CONFIG_DEBUG_LOCK_ALLOC
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100254#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700255 do { \
256 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100257 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700258 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800259#else
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100260#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700261 do { \
262 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100263 flags = _raw_spin_lock_irqsave(lock); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700264 } while (0)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800265#endif
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267#else
Linus Torvaldsb8e6ec82006-11-26 16:27:17 -0800268
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100269#define raw_spin_lock_irqsave(lock, flags) \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700270 do { \
271 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100272 _raw_spin_lock_irqsave(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700273 } while (0)
Thomas Gleixneref12f102009-11-07 23:04:15 +0100274
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100275#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
276 raw_spin_lock_irqsave(lock, flags)
Arjan van de Vencfd3ef22006-11-25 11:09:37 -0800277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278#endif
279
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100280#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
281#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
282#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
283#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100285#define raw_spin_unlock_irqrestore(lock, flags) \
286 do { \
287 typecheck(unsigned long, flags); \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100288 _raw_spin_unlock_irqrestore(lock, flags); \
Steven Rostedt3f3078912008-07-25 01:45:25 -0700289 } while (0)
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100290#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100292#define raw_spin_trylock_bh(lock) \
293 __cond_lock(lock, _raw_spin_trylock_bh(lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100295#define raw_spin_trylock_irq(lock) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296({ \
297 local_irq_disable(); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100298 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700299 1 : ({ local_irq_enable(); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300})
301
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100302#define raw_spin_trylock_irqsave(lock, flags) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303({ \
304 local_irq_save(flags); \
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100305 raw_spin_trylock(lock) ? \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700306 1 : ({ local_irq_restore(flags); 0; }); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307})
308
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100309/* Include rwlock functions */
310#include <linux/rwlock.h>
311
312/*
313 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
314 */
315#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
316# include <linux/spinlock_api_smp.h>
317#else
318# include <linux/spinlock_api_up.h>
319#endif
320
321/*
322 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
323 */
324
Denys Vlasenko34905652015-07-13 20:31:03 +0200325static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100326{
327 return &lock->rlock;
328}
329
330#define spin_lock_init(_lock) \
331do { \
332 spinlock_check(_lock); \
333 raw_spin_lock_init(&(_lock)->rlock); \
334} while (0)
335
Denys Vlasenko34905652015-07-13 20:31:03 +0200336static __always_inline void spin_lock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100337{
338 raw_spin_lock(&lock->rlock);
339}
340
Denys Vlasenko34905652015-07-13 20:31:03 +0200341static __always_inline void spin_lock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100342{
343 raw_spin_lock_bh(&lock->rlock);
344}
345
Denys Vlasenko34905652015-07-13 20:31:03 +0200346static __always_inline int spin_trylock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100347{
348 return raw_spin_trylock(&lock->rlock);
349}
350
351#define spin_lock_nested(lock, subclass) \
352do { \
353 raw_spin_lock_nested(spinlock_check(lock), subclass); \
354} while (0)
355
356#define spin_lock_nest_lock(lock, nest_lock) \
357do { \
358 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
359} while (0)
360
Denys Vlasenko34905652015-07-13 20:31:03 +0200361static __always_inline void spin_lock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100362{
363 raw_spin_lock_irq(&lock->rlock);
364}
365
366#define spin_lock_irqsave(lock, flags) \
367do { \
368 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
369} while (0)
370
371#define spin_lock_irqsave_nested(lock, flags, subclass) \
372do { \
373 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
374} while (0)
375
Denys Vlasenko34905652015-07-13 20:31:03 +0200376static __always_inline void spin_unlock(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100377{
378 raw_spin_unlock(&lock->rlock);
379}
380
Denys Vlasenko34905652015-07-13 20:31:03 +0200381static __always_inline void spin_unlock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100382{
383 raw_spin_unlock_bh(&lock->rlock);
384}
385
Denys Vlasenko34905652015-07-13 20:31:03 +0200386static __always_inline void spin_unlock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100387{
388 raw_spin_unlock_irq(&lock->rlock);
389}
390
Denys Vlasenko34905652015-07-13 20:31:03 +0200391static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100392{
393 raw_spin_unlock_irqrestore(&lock->rlock, flags);
394}
395
Denys Vlasenko34905652015-07-13 20:31:03 +0200396static __always_inline int spin_trylock_bh(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100397{
398 return raw_spin_trylock_bh(&lock->rlock);
399}
400
Denys Vlasenko34905652015-07-13 20:31:03 +0200401static __always_inline int spin_trylock_irq(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100402{
403 return raw_spin_trylock_irq(&lock->rlock);
404}
405
406#define spin_trylock_irqsave(lock, flags) \
407({ \
408 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
409})
410
Andrea Parrib7e4aad2018-05-14 16:01:27 -0700411/**
412 * spin_is_locked() - Check whether a spinlock is locked.
413 * @lock: Pointer to the spinlock.
414 *
415 * This function is NOT required to provide any memory ordering
416 * guarantees; it could be used for debugging purposes or, when
417 * additional synchronization is needed, accompanied with other
418 * constructs (memory barriers) enforcing the synchronization.
419 *
420 * Returns: 1 if @lock is locked, 0 otherwise.
421 *
422 * Note that the function only tells you that the spinlock is
423 * seen to be locked, not that it is locked on your CPU.
424 *
425 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
426 * the return value is always 0 (see include/linux/spinlock_up.h).
427 * Therefore you should not rely heavily on the return value.
428 */
Denys Vlasenko34905652015-07-13 20:31:03 +0200429static __always_inline int spin_is_locked(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100430{
431 return raw_spin_is_locked(&lock->rlock);
432}
433
Denys Vlasenko34905652015-07-13 20:31:03 +0200434static __always_inline int spin_is_contended(spinlock_t *lock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100435{
436 return raw_spin_is_contended(&lock->rlock);
437}
438
Paul Gortmaker4ebc1b42012-01-20 18:20:37 -0500439#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100440
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700441/*
442 * Pull the atomic_t declaration:
443 * (asm-mips/atomic.h needs above definitions)
444 */
Arun Sharma600634972011-07-26 16:09:06 -0700445#include <linux/atomic.h>
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700446/**
447 * atomic_dec_and_lock - lock on reaching reference count zero
448 * @atomic: the atomic counter
449 * @lock: the spinlock in question
J. Bruce Fieldsdc07e722008-04-07 15:59:05 -0400450 *
451 * Decrements @atomic by 1. If the result is 0, returns true and locks
452 * @lock. Returns false for all other cases.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700453 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700455#define atomic_dec_and_lock(atomic, lock) \
Josh Triplettdcc8e552006-09-29 02:01:03 -0700456 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
Anna-Maria Gleixnerccfbb5b2018-06-12 18:16:20 +0200458extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
459 unsigned long *flags);
460#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
461 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
462
Cong Wangff93bca2018-08-14 15:21:31 -0700463int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
464 size_t max_size, unsigned int cpu_mult,
465 gfp_t gfp, const char *name,
466 struct lock_class_key *key);
467
468#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
469 ({ \
470 static struct lock_class_key key; \
471 int ret; \
472 \
473 ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
474 cpu_mult, gfp, #locks, &key); \
475 ret; \
476 })
Tom Herbert92f36cc2017-12-04 10:31:44 -0800477
478void free_bucket_spinlocks(spinlock_t *locks);
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480#endif /* __LINUX_SPINLOCK_H */