blob: 936f3d14dd6bfeda3ef7921266fef5dc5b36e2a8 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (2004) Linus Torvalds
4 *
5 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 *
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07007 * Copyright (2004, 2005) Ingo Molnar
8 *
9 * This file contains the spinlock/rwlock implementations for the
10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
Andi Kleen0cb91a22006-09-26 10:52:28 +020011 *
12 * Note that some architectures have special knowledge about the
13 * stack frames of these functions in their profile_pc. If you
14 * change anything significant here that could change the stack
15 * frame contact the architecture maintainers.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/linkage.h>
19#include <linux/preempt.h>
20#include <linux/spinlock.h>
21#include <linux/interrupt.h>
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070022#include <linux/debug_locks.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040023#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000025/*
26 * If lockdep is enabled then we use the non-preemption spin-ops
27 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
28 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
29 */
30#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
31/*
32 * The __lock_function inlines are taken from
Cheng Jianf791dd22017-11-03 18:59:48 +080033 * spinlock : include/linux/spinlock_api_smp.h
34 * rwlock : include/linux/rwlock_api_smp.h
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000035 */
36#else
Will Deaconc14c3382013-09-11 14:23:23 -070037
38/*
39 * Some architectures can relax in favour of the CPU owning the lock.
40 */
41#ifndef arch_read_relax
42# define arch_read_relax(l) cpu_relax()
43#endif
44#ifndef arch_write_relax
45# define arch_write_relax(l) cpu_relax()
46#endif
47#ifndef arch_spin_relax
48# define arch_spin_relax(l) cpu_relax()
49#endif
50
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000051/*
52 * We build the __lock_function inlines here. They are too large for
53 * inlining all over the place, but here is only one user per function
54 * which embedds them into the calling _lock_function below.
55 *
56 * This could be a long-held lock. We both prepare to spin for a long
57 * time (making _this_ CPU preemptable if possible), and we also signal
58 * towards that other CPU that it should break the lock ASAP.
59 */
60#define BUILD_LOCK_OPS(op, locktype) \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010061void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000062{ \
63 for (;;) { \
64 preempt_disable(); \
Thomas Gleixner9828ea92009-12-03 20:55:53 +010065 if (likely(do_raw_##op##_trylock(lock))) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000066 break; \
67 preempt_enable(); \
68 \
Will Deaconf87f3a32017-11-28 18:42:18 +000069 arch_##op##_relax(&lock->raw_lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000070 } \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000071} \
72 \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010073unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000074{ \
75 unsigned long flags; \
76 \
77 for (;;) { \
78 preempt_disable(); \
79 local_irq_save(flags); \
Thomas Gleixner9828ea92009-12-03 20:55:53 +010080 if (likely(do_raw_##op##_trylock(lock))) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000081 break; \
82 local_irq_restore(flags); \
83 preempt_enable(); \
84 \
Will Deaconf87f3a32017-11-28 18:42:18 +000085 arch_##op##_relax(&lock->raw_lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000086 } \
Will Deacond89c7032017-11-28 18:42:19 +000087 \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000088 return flags; \
89} \
90 \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010091void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000092{ \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010093 _raw_##op##_lock_irqsave(lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000094} \
95 \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +010096void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +000097{ \
98 unsigned long flags; \
99 \
100 /* */ \
101 /* Careful: we must exclude softirqs too, hence the */ \
102 /* irq-disabling. We use the generic preemption-aware */ \
103 /* function: */ \
104 /**/ \
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100105 flags = _raw_##op##_lock_irqsave(lock); \
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000106 local_bh_disable(); \
107 local_irq_restore(flags); \
108} \
109
110/*
111 * Build preemption-friendly versions of the following
112 * lock-spinning functions:
113 *
114 * __[spin|read|write]_lock()
115 * __[spin|read|write]_lock_irq()
116 * __[spin|read|write]_lock_irqsave()
117 * __[spin|read|write]_lock_bh()
118 */
Thomas Gleixnerc2f21ce2009-12-02 20:02:59 +0100119BUILD_LOCK_OPS(spin, raw_spinlock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000120BUILD_LOCK_OPS(read, rwlock);
121BUILD_LOCK_OPS(write, rwlock);
122
123#endif
124
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100125#ifndef CONFIG_INLINE_SPIN_TRYLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100126int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100127{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100128 return __raw_spin_trylock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100129}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100130EXPORT_SYMBOL(_raw_spin_trylock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100131#endif
132
133#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100134int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100135{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100136 return __raw_spin_trylock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100137}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100138EXPORT_SYMBOL(_raw_spin_trylock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100139#endif
140
141#ifndef CONFIG_INLINE_SPIN_LOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100142void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100143{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100144 __raw_spin_lock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100145}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100146EXPORT_SYMBOL(_raw_spin_lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100147#endif
148
149#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100150unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100151{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100152 return __raw_spin_lock_irqsave(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100153}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100154EXPORT_SYMBOL(_raw_spin_lock_irqsave);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100155#endif
156
157#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100158void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100159{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100160 __raw_spin_lock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100161}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100162EXPORT_SYMBOL(_raw_spin_lock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100163#endif
164
165#ifndef CONFIG_INLINE_SPIN_LOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100166void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100167{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100168 __raw_spin_lock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100169}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100170EXPORT_SYMBOL(_raw_spin_lock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100171#endif
172
Raghavendra K Te335e3e2012-03-22 15:25:08 +0530173#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100174void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100175{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100176 __raw_spin_unlock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100177}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100178EXPORT_SYMBOL(_raw_spin_unlock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100179#endif
180
181#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100182void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100183{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100184 __raw_spin_unlock_irqrestore(lock, flags);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100185}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100186EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100187#endif
188
189#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100190void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100191{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100192 __raw_spin_unlock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100193}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100194EXPORT_SYMBOL(_raw_spin_unlock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100195#endif
196
197#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100198void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100199{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100200 __raw_spin_unlock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100201}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100202EXPORT_SYMBOL(_raw_spin_unlock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100203#endif
204
205#ifndef CONFIG_INLINE_READ_TRYLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100206int __lockfunc _raw_read_trylock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100207{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100208 return __raw_read_trylock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100209}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100210EXPORT_SYMBOL(_raw_read_trylock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100211#endif
212
213#ifndef CONFIG_INLINE_READ_LOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100214void __lockfunc _raw_read_lock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100215{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100216 __raw_read_lock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100217}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100218EXPORT_SYMBOL(_raw_read_lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100219#endif
220
221#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100222unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100223{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100224 return __raw_read_lock_irqsave(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100225}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100226EXPORT_SYMBOL(_raw_read_lock_irqsave);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100227#endif
228
229#ifndef CONFIG_INLINE_READ_LOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100230void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100231{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100232 __raw_read_lock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100233}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100234EXPORT_SYMBOL(_raw_read_lock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100235#endif
236
237#ifndef CONFIG_INLINE_READ_LOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100238void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100239{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100240 __raw_read_lock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100241}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100242EXPORT_SYMBOL(_raw_read_lock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100243#endif
244
245#ifndef CONFIG_INLINE_READ_UNLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100246void __lockfunc _raw_read_unlock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100247{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100248 __raw_read_unlock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100249}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100250EXPORT_SYMBOL(_raw_read_unlock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100251#endif
252
253#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100254void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100255{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100256 __raw_read_unlock_irqrestore(lock, flags);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100257}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100258EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100259#endif
260
261#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100262void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100263{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100264 __raw_read_unlock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100265}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100266EXPORT_SYMBOL(_raw_read_unlock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100267#endif
268
269#ifndef CONFIG_INLINE_READ_UNLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100270void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100271{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100272 __raw_read_unlock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100273}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100274EXPORT_SYMBOL(_raw_read_unlock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100275#endif
276
277#ifndef CONFIG_INLINE_WRITE_TRYLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100278int __lockfunc _raw_write_trylock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100279{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100280 return __raw_write_trylock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100281}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100282EXPORT_SYMBOL(_raw_write_trylock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100283#endif
284
285#ifndef CONFIG_INLINE_WRITE_LOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100286void __lockfunc _raw_write_lock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100287{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100288 __raw_write_lock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100289}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100290EXPORT_SYMBOL(_raw_write_lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100291#endif
292
293#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100294unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100295{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100296 return __raw_write_lock_irqsave(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100297}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100298EXPORT_SYMBOL(_raw_write_lock_irqsave);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100299#endif
300
301#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100302void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100303{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100304 __raw_write_lock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100305}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100306EXPORT_SYMBOL(_raw_write_lock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100307#endif
308
309#ifndef CONFIG_INLINE_WRITE_LOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100310void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100311{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100312 __raw_write_lock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100313}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100314EXPORT_SYMBOL(_raw_write_lock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100315#endif
316
317#ifndef CONFIG_INLINE_WRITE_UNLOCK
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100318void __lockfunc _raw_write_unlock(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100319{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100320 __raw_write_unlock(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100321}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100322EXPORT_SYMBOL(_raw_write_unlock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100323#endif
324
325#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100326void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100327{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100328 __raw_write_unlock_irqrestore(lock, flags);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100329}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100330EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100331#endif
332
333#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100334void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100335{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100336 __raw_write_unlock_irq(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100337}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100338EXPORT_SYMBOL(_raw_write_unlock_irq);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100339#endif
340
341#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100342void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100343{
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100344 __raw_write_unlock_bh(lock);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100345}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100346EXPORT_SYMBOL(_raw_write_unlock_bh);
Thomas Gleixnerb7b40ad2009-11-09 21:01:59 +0100347#endif
348
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000349#ifdef CONFIG_DEBUG_LOCK_ALLOC
350
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100351void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000352{
353 preempt_disable();
354 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100355 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000356}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100357EXPORT_SYMBOL(_raw_spin_lock_nested);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000358
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100359unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000360 int subclass)
361{
362 unsigned long flags;
363
364 local_irq_save(flags);
365 preempt_disable();
366 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100367 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
368 do_raw_spin_lock_flags, &flags);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000369 return flags;
370}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100371EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000372
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100373void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000374 struct lockdep_map *nest_lock)
375{
376 preempt_disable();
377 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
Thomas Gleixner9828ea92009-12-03 20:55:53 +0100378 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000379}
Thomas Gleixner9c1721a2009-12-03 21:52:18 +0100380EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
Thomas Gleixner8e13c7b2009-11-09 15:21:41 +0000381
382#endif
383
Steven Rostedt0764d232008-05-12 21:20:44 +0200384notrace int in_lock_functions(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
386 /* Linker adds these: start and end of __lockfunc functions */
387 extern char __lock_text_start[], __lock_text_end[];
388
389 return addr >= (unsigned long)__lock_text_start
390 && addr < (unsigned long)__lock_text_end;
391}
392EXPORT_SYMBOL(in_lock_functions);