blob: b562f92893727c6885b1b75081783eb48770875f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (2004) Linus Torvalds
4 *
5 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 *
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07007 * Copyright (2004, 2005) Ingo Molnar
8 *
9 * This file contains the spinlock/rwlock implementations for the
10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
Andi Kleen0cb91a22006-09-26 10:52:28 +020011 *
12 * Note that some architectures have special knowledge about the
13 * stack frames of these functions in their profile_pc. If you
14 * change anything significant here that could change the stack
15 * frame contact the architecture maintainers.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/linkage.h>
19#include <linux/preempt.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070022#include <linux/debug_locks.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Will Deacond1be6a22019-02-22 12:48:44 +000025#ifdef CONFIG_MMIOWB
26#ifndef arch_mmiowb_state
27DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
28EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
29#endif
30#endif
31
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000032/*
33 * If lockdep is enabled then we use the non-preemption spin-ops
34 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
36 */
37#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
38/*
39 * The __lock_function inlines are taken from
Cheng Jianf791dd22017-11-03 18:59:48 +080040 * spinlock : include/linux/spinlock_api_smp.h
41 * rwlock : include/linux/rwlock_api_smp.h
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000042 */
43#else
Will Deaconc14c3382013-09-11 14:23:23 -070044
45/*
46 * Some architectures can relax in favour of the CPU owning the lock.
47 */
48#ifndef arch_read_relax
49# define arch_read_relax(l) cpu_relax()
50#endif
51#ifndef arch_write_relax
52# define arch_write_relax(l) cpu_relax()
53#endif
54#ifndef arch_spin_relax
55# define arch_spin_relax(l) cpu_relax()
56#endif
57
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000058/*
59 * We build the __lock_function inlines here. They are too large for
60 * inlining all over the place, but here is only one user per function
Ingo Molnare2db7592021-03-22 02:35:05 +010061 * which embeds them into the calling _lock_function below.
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000062 *
63 * This could be a long-held lock. We both prepare to spin for a long
Ingo Molnare2db7592021-03-22 02:35:05 +010064 * time (making _this_ CPU preemptible if possible), and we also signal
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000065 * towards that other CPU that it should break the lock ASAP.
66 */
67#define BUILD_LOCK_OPS(op, locktype) \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010068void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000069{ \
70 for (;;) { \
71 preempt_disable(); \
Thomas Gleixner9828ea92009-12-03 20:55:53 +010072 if (likely(do_raw_##op##_trylock(lock))) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000073 break; \
74 preempt_enable(); \
75 \
Will Deaconf87f3a32017-11-28 18:42:18 +000076 arch_##op##_relax(&lock->raw_lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000077 } \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000078} \
79 \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010080unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000081{ \
82 unsigned long flags; \
83 \
84 for (;;) { \
85 preempt_disable(); \
86 local_irq_save(flags); \
Thomas Gleixner9828ea92009-12-03 20:55:53 +010087 if (likely(do_raw_##op##_trylock(lock))) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000088 break; \
89 local_irq_restore(flags); \
90 preempt_enable(); \
91 \
Will Deaconf87f3a32017-11-28 18:42:18 +000092 arch_##op##_relax(&lock->raw_lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000093 } \
Will Deacond89c7032017-11-28 18:42:19 +000094 \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000095 return flags; \
96} \
97 \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010098void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000099{ \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100100 _raw_##op##_lock_irqsave(lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000101} \
102 \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100103void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000104{ \
105 unsigned long flags; \
106 \
107 /* */ \
108 /* Careful: we must exclude softirqs too, hence the */ \
109 /* irq-disabling. We use the generic preemption-aware */ \
110 /* function: */ \
111 /**/ \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100112 flags = _raw_##op##_lock_irqsave(lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000113 local_bh_disable(); \
114 local_irq_restore(flags); \
115} \
116
117/*
118 * Build preemption-friendly versions of the following
119 * lock-spinning functions:
120 *
121 * __[spin|read|write]_lock()
122 * __[spin|read|write]_lock_irq()
123 * __[spin|read|write]_lock_irqsave()
124 * __[spin|read|write]_lock_bh()
125 */
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100126BUILD_LOCK_OPS(spin, raw_spinlock);
Thomas Gleixner82829472021-08-15 23:28:28 +0200127
128#ifndef CONFIG_PREEMPT_RT
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000129BUILD_LOCK_OPS(read, rwlock);
130BUILD_LOCK_OPS(write, rwlock);
Thomas Gleixner82829472021-08-15 23:28:28 +0200131#endif
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000132
133#endif
134
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100135#ifndef CONFIG_INLINE_SPIN_TRYLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100136int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100137{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100138 return __raw_spin_trylock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100139}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100140EXPORT_SYMBOL(_raw_spin_trylock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100141#endif
142
143#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100144int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100145{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100146 return __raw_spin_trylock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100147}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100148EXPORT_SYMBOL(_raw_spin_trylock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100149#endif
150
151#ifndef CONFIG_INLINE_SPIN_LOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100152void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100153{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100154 __raw_spin_lock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100155}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100156EXPORT_SYMBOL(_raw_spin_lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100157#endif
158
159#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100160unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100161{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100162 return __raw_spin_lock_irqsave(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100163}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100164EXPORT_SYMBOL(_raw_spin_lock_irqsave);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100165#endif
166
167#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100168void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100169{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100170 __raw_spin_lock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100171}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100172EXPORT_SYMBOL(_raw_spin_lock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100173#endif
174
175#ifndef CONFIG_INLINE_SPIN_LOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100176void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100177{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100178 __raw_spin_lock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100179}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100180EXPORT_SYMBOL(_raw_spin_lock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100181#endif
182
Raghavendra K Te335e3e2012-03-22 15:25:08 +0530183#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100184void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100185{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100186 __raw_spin_unlock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100187}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100188EXPORT_SYMBOL(_raw_spin_unlock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100189#endif
190
191#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100192void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100193{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100194 __raw_spin_unlock_irqrestore(lock, flags);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100195}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100196EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100197#endif
198
199#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100200void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100201{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100202 __raw_spin_unlock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100203}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100204EXPORT_SYMBOL(_raw_spin_unlock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100205#endif
206
207#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100208void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100209{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100210 __raw_spin_unlock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100211}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100212EXPORT_SYMBOL(_raw_spin_unlock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100213#endif
214
Thomas Gleixner82829472021-08-15 23:28:28 +0200215#ifndef CONFIG_PREEMPT_RT
216
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100217#ifndef CONFIG_INLINE_READ_TRYLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100218int __lockfunc _raw_read_trylock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100219{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100220 return __raw_read_trylock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100221}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100222EXPORT_SYMBOL(_raw_read_trylock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100223#endif
224
225#ifndef CONFIG_INLINE_READ_LOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100226void __lockfunc _raw_read_lock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100227{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100228 __raw_read_lock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100229}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100230EXPORT_SYMBOL(_raw_read_lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100231#endif
232
233#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100234unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100235{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100236 return __raw_read_lock_irqsave(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100237}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100238EXPORT_SYMBOL(_raw_read_lock_irqsave);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100239#endif
240
241#ifndef CONFIG_INLINE_READ_LOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100242void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100243{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100244 __raw_read_lock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100245}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100246EXPORT_SYMBOL(_raw_read_lock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100247#endif
248
249#ifndef CONFIG_INLINE_READ_LOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100250void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100251{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100252 __raw_read_lock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100253}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100254EXPORT_SYMBOL(_raw_read_lock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100255#endif
256
257#ifndef CONFIG_INLINE_READ_UNLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100258void __lockfunc _raw_read_unlock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100259{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100260 __raw_read_unlock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100261}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100262EXPORT_SYMBOL(_raw_read_unlock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100263#endif
264
265#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100266void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100267{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100268 __raw_read_unlock_irqrestore(lock, flags);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100269}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100270EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100271#endif
272
273#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100274void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100275{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100276 __raw_read_unlock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100277}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100278EXPORT_SYMBOL(_raw_read_unlock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100279#endif
280
281#ifndef CONFIG_INLINE_READ_UNLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100282void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100283{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100284 __raw_read_unlock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100285}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100286EXPORT_SYMBOL(_raw_read_unlock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100287#endif
288
289#ifndef CONFIG_INLINE_WRITE_TRYLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100290int __lockfunc _raw_write_trylock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100291{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100292 return __raw_write_trylock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100293}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100294EXPORT_SYMBOL(_raw_write_trylock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100295#endif
296
297#ifndef CONFIG_INLINE_WRITE_LOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100298void __lockfunc _raw_write_lock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100299{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100300 __raw_write_lock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100301}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100302EXPORT_SYMBOL(_raw_write_lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100303#endif
304
305#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100306unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100307{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100308 return __raw_write_lock_irqsave(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100309}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100310EXPORT_SYMBOL(_raw_write_lock_irqsave);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100311#endif
312
313#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100314void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100315{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100316 __raw_write_lock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100317}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100318EXPORT_SYMBOL(_raw_write_lock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100319#endif
320
321#ifndef CONFIG_INLINE_WRITE_LOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100322void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100323{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100324 __raw_write_lock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100325}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100326EXPORT_SYMBOL(_raw_write_lock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100327#endif
328
329#ifndef CONFIG_INLINE_WRITE_UNLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100330void __lockfunc _raw_write_unlock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100331{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100332 __raw_write_unlock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100333}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100334EXPORT_SYMBOL(_raw_write_unlock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100335#endif
336
337#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100338void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100339{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100340 __raw_write_unlock_irqrestore(lock, flags);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100341}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100342EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100343#endif
344
345#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100346void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100347{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100348 __raw_write_unlock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100349}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100350EXPORT_SYMBOL(_raw_write_unlock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100351#endif
352
353#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100354void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100355{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100356 __raw_write_unlock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100357}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100358EXPORT_SYMBOL(_raw_write_unlock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100359#endif
360
Thomas Gleixner82829472021-08-15 23:28:28 +0200361#endif /* !CONFIG_PREEMPT_RT */
362
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000363#ifdef CONFIG_DEBUG_LOCK_ALLOC
364
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100365void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000366{
367 preempt_disable();
368 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100369 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000370}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100371EXPORT_SYMBOL(_raw_spin_lock_nested);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000372
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100373unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000374 int subclass)
375{
376 unsigned long flags;
377
378 local_irq_save(flags);
379 preempt_disable();
380 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
Arnd Bergmannf98a3dc2021-10-22 13:59:38 +0200381 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000382 return flags;
383}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100384EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000385
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100386void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000387 struct lockdep_map *nest_lock)
388{
389 preempt_disable();
390 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100391 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000392}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100393EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000394
395#endif
396
Steven Rostedt0764d232008-05-12 21:20:44 +0200397notrace int in_lock_functions(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
399 /* Linker adds these: start and end of __lockfunc functions */
400 extern char __lock_text_start[], __lock_text_end[];
401
402 return addr >= (unsigned long)__lock_text_start
403 && addr < (unsigned long)__lock_text_end;
404}
405EXPORT_SYMBOL(in_lock_functions);