blob: 0ac9112c1bbe3287658057098c37a67bc818c559 [file] [log] [blame]
Ingo Molnarfb1c8f92005-09-10 00:25:56 -07001#ifndef __LINUX_SPINLOCK_UP_H
2#define __LINUX_SPINLOCK_UP_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
Stephen Rothwelld974d902011-05-20 15:48:17 +10008#include <asm/processor.h> /* for cpu_relax() */
Peter Zijlstra726328d2016-05-26 10:35:03 +02009#include <asm/barrier.h>
Stephen Rothwelld974d902011-05-20 15:48:17 +100010
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070011/*
12 * include/linux/spinlock_up.h - UP-debug version of spinlocks.
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 *
17 * In the debug case, 1 means unlocked, 0 means locked. (the values
18 * are inverted, to catch initialization bugs)
19 *
Linus Torvalds386afc92013-04-09 10:48:33 -070020 * No atomicity anywhere, we are on UP. However, we still need
21 * the compiler barriers, because we do not want the compiler to
22 * move potentially faulting instructions (notably user accesses)
23 * into the locked sequence, resulting in non-atomic execution.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070024 */
25
26#ifdef CONFIG_DEBUG_SPINLOCK
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010027#define arch_spin_is_locked(x) ((x)->slock == 0)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070028
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010029static inline void arch_spin_lock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070030{
31 lock->slock = 0;
Linus Torvalds386afc92013-04-09 10:48:33 -070032 barrier();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070033}
34
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010035static inline int arch_spin_trylock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070036{
37 char oldval = lock->slock;
38
39 lock->slock = 0;
Linus Torvalds386afc92013-04-09 10:48:33 -070040 barrier();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070041
42 return oldval > 0;
43}
44
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010045static inline void arch_spin_unlock(arch_spinlock_t *lock)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070046{
Linus Torvalds386afc92013-04-09 10:48:33 -070047 barrier();
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070048 lock->slock = 1;
49}
50
51/*
52 * Read-write spinlocks. No debug version.
53 */
Linus Torvalds386afc92013-04-09 10:48:33 -070054#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
55#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
56#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
57#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
58#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
59#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070060
61#else /* DEBUG_SPINLOCK */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010062#define arch_spin_is_locked(lock) ((void)(lock), 0)
Viresh Kumar0a0fca92013-06-04 13:10:24 +053063/* for sched/core.c and kernel_lock.c: */
Linus Torvalds386afc92013-04-09 10:48:33 -070064# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
65# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
66# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
67# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070068#endif /* DEBUG_SPINLOCK */
69
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010070#define arch_spin_is_contended(lock) (((void)(lock), 0))
Nick Piggin95c354f2008-01-30 13:31:20 +010071
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070072#endif /* __LINUX_SPINLOCK_UP_H */