blob: b4548690ade9916e1d94e844641a505fad43bea4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechlef65e4fa2006-09-28 01:45:21 +01006 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
Ralf Baechle2a31b032008-08-28 15:17:49 +010012#include <linux/compiler.h>
13
Ralf Baechle0004a9d2006-10-31 03:45:07 +000014#include <asm/barrier.h>
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +000015#include <asm/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/war.h>
17
18/*
19 * Your basic SMP spinlocks, allowing only a single CPU anywhere
Ralf Baechle2a31b032008-08-28 15:17:49 +010020 *
Ralf Baechle70342282013-01-22 12:59:30 +010021 * Simple spin lock operations. There are two variants, one clears IRQ's
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 * on the local processor, one does not.
23 *
Ralf Baechle2a31b032008-08-28 15:17:49 +010024 * These are fair FIFO ticket locks
25 *
26 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
Ralf Baechle2a31b032008-08-28 15:17:49 +010029
30/*
31 * Ticket locks are conceptually two parts, one indicating the current head of
32 * the queue, and the other indicating the current tail. The lock is acquired
33 * by atomically noting the tail and incrementing it by one (thus adding
34 * ourself to the queue and noting our position), then waiting until the head
35 * becomes equal to the the initial value of the tail.
36 */
37
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010038static inline int arch_spin_is_locked(arch_spinlock_t *lock)
Ralf Baechle2a31b032008-08-28 15:17:49 +010039{
David Daney500c2e12010-02-04 11:31:49 -080040 u32 counters = ACCESS_ONCE(lock->lock);
Ralf Baechle2a31b032008-08-28 15:17:49 +010041
David Daney500c2e12010-02-04 11:31:49 -080042 return ((counters >> 16) ^ counters) & 0xffff;
Ralf Baechle2a31b032008-08-28 15:17:49 +010043}
44
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010045#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
46#define arch_spin_unlock_wait(x) \
47 while (arch_spin_is_locked(x)) { cpu_relax(); }
Ralf Baechle2a31b032008-08-28 15:17:49 +010048
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010049static inline int arch_spin_is_contended(arch_spinlock_t *lock)
Ralf Baechle2a31b032008-08-28 15:17:49 +010050{
David Daney500c2e12010-02-04 11:31:49 -080051 u32 counters = ACCESS_ONCE(lock->lock);
Ralf Baechle2a31b032008-08-28 15:17:49 +010052
David Daney500c2e12010-02-04 11:31:49 -080053 return (((counters >> 16) - counters) & 0xffff) > 1;
Ralf Baechle2a31b032008-08-28 15:17:49 +010054}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010055#define arch_spin_is_contended arch_spin_is_contended
Ralf Baechle2a31b032008-08-28 15:17:49 +010056
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010057static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Ralf Baechle2a31b032008-08-28 15:17:49 +010059 int my_ticket;
60 int tmp;
David Daney500c2e12010-02-04 11:31:49 -080061 int inc = 0x10000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63 if (R10000_LLSC_WAR) {
Ralf Baechle2a31b032008-08-28 15:17:49 +010064 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010065 " .set push # arch_spin_lock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010066 " .set noreorder \n"
67 " \n"
68 "1: ll %[ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -080069 " addu %[my_ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010070 " sc %[my_ticket], %[ticket_ptr] \n"
71 " beqzl %[my_ticket], 1b \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 " nop \n"
David Daney500c2e12010-02-04 11:31:49 -080073 " srl %[my_ticket], %[ticket], 16 \n"
74 " andi %[ticket], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010075 " bne %[ticket], %[my_ticket], 4f \n"
76 " subu %[ticket], %[my_ticket], %[ticket] \n"
77 "2: \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010078 " .subsection 2 \n"
David Daney500c2e12010-02-04 11:31:49 -080079 "4: andi %[ticket], %[ticket], 0xffff \n"
David Daney0e6826c2009-03-27 10:07:02 -070080 " sll %[ticket], 5 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010081 " \n"
82 "6: bnez %[ticket], 6b \n"
83 " subu %[ticket], 1 \n"
84 " \n"
David Daney500c2e12010-02-04 11:31:49 -080085 " lhu %[ticket], %[serving_now_ptr] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010086 " beq %[ticket], %[my_ticket], 2b \n"
87 " subu %[ticket], %[my_ticket], %[ticket] \n"
David Daney0e6826c2009-03-27 10:07:02 -070088 " b 4b \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010089 " subu %[ticket], %[ticket], 1 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010090 " .previous \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010091 " .set pop \n"
Markos Chandras94bfb752015-01-26 12:44:11 +000092 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
David Daney500c2e12010-02-04 11:31:49 -080093 [serving_now_ptr] "+m" (lock->h.serving_now),
Ralf Baechle2a31b032008-08-28 15:17:49 +010094 [ticket] "=&r" (tmp),
David Daney500c2e12010-02-04 11:31:49 -080095 [my_ticket] "=&r" (my_ticket)
96 : [inc] "r" (inc));
Ralf Baechle2a31b032008-08-28 15:17:49 +010097 } else {
98 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010099 " .set push # arch_spin_lock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100100 " .set noreorder \n"
101 " \n"
David Daney500c2e12010-02-04 11:31:49 -0800102 "1: ll %[ticket], %[ticket_ptr] \n"
103 " addu %[my_ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100104 " sc %[my_ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -0800105 " beqz %[my_ticket], 1b \n"
106 " srl %[my_ticket], %[ticket], 16 \n"
107 " andi %[ticket], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100108 " bne %[ticket], %[my_ticket], 4f \n"
109 " subu %[ticket], %[my_ticket], %[ticket] \n"
110 "2: \n"
111 " .subsection 2 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100112 "4: andi %[ticket], %[ticket], 0x1fff \n"
David Daney0e6826c2009-03-27 10:07:02 -0700113 " sll %[ticket], 5 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100114 " \n"
115 "6: bnez %[ticket], 6b \n"
116 " subu %[ticket], 1 \n"
117 " \n"
David Daney500c2e12010-02-04 11:31:49 -0800118 " lhu %[ticket], %[serving_now_ptr] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100119 " beq %[ticket], %[my_ticket], 2b \n"
120 " subu %[ticket], %[my_ticket], %[ticket] \n"
David Daney0e6826c2009-03-27 10:07:02 -0700121 " b 4b \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100122 " subu %[ticket], %[ticket], 1 \n"
123 " .previous \n"
124 " .set pop \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000125 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
David Daney500c2e12010-02-04 11:31:49 -0800126 [serving_now_ptr] "+m" (lock->h.serving_now),
Ralf Baechle2a31b032008-08-28 15:17:49 +0100127 [ticket] "=&r" (tmp),
David Daney500c2e12010-02-04 11:31:49 -0800128 [my_ticket] "=&r" (my_ticket)
129 : [inc] "r" (inc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000131
Ralf Baechle17099b12007-07-14 13:24:05 +0100132 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
134
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100135static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
David Daney500c2e12010-02-04 11:31:49 -0800137 unsigned int serving_now = lock->h.serving_now + 1;
138 wmb();
139 lock->h.serving_now = (u16)serving_now;
140 nudge_writes();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141}
142
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100143static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
Ralf Baechle2a31b032008-08-28 15:17:49 +0100145 int tmp, tmp2, tmp3;
David Daney500c2e12010-02-04 11:31:49 -0800146 int inc = 0x10000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 if (R10000_LLSC_WAR) {
Ralf Baechle2a31b032008-08-28 15:17:49 +0100149 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100150 " .set push # arch_spin_trylock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100151 " .set noreorder \n"
152 " \n"
153 "1: ll %[ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -0800154 " srl %[my_ticket], %[ticket], 16 \n"
David Daney500c2e12010-02-04 11:31:49 -0800155 " andi %[now_serving], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100156 " bne %[my_ticket], %[now_serving], 3f \n"
David Daney500c2e12010-02-04 11:31:49 -0800157 " addu %[ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100158 " sc %[ticket], %[ticket_ptr] \n"
159 " beqzl %[ticket], 1b \n"
160 " li %[ticket], 1 \n"
161 "2: \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100162 " .subsection 2 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100163 "3: b 2b \n"
164 " li %[ticket], 0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100165 " .previous \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100166 " .set pop \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000167 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
Ralf Baechle2a31b032008-08-28 15:17:49 +0100168 [ticket] "=&r" (tmp),
169 [my_ticket] "=&r" (tmp2),
David Daney500c2e12010-02-04 11:31:49 -0800170 [now_serving] "=&r" (tmp3)
171 : [inc] "r" (inc));
Ralf Baechle2a31b032008-08-28 15:17:49 +0100172 } else {
173 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100174 " .set push # arch_spin_trylock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100175 " .set noreorder \n"
176 " \n"
David Daney500c2e12010-02-04 11:31:49 -0800177 "1: ll %[ticket], %[ticket_ptr] \n"
178 " srl %[my_ticket], %[ticket], 16 \n"
David Daney500c2e12010-02-04 11:31:49 -0800179 " andi %[now_serving], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100180 " bne %[my_ticket], %[now_serving], 3f \n"
David Daney500c2e12010-02-04 11:31:49 -0800181 " addu %[ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100182 " sc %[ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -0800183 " beqz %[ticket], 1b \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100184 " li %[ticket], 1 \n"
185 "2: \n"
186 " .subsection 2 \n"
187 "3: b 2b \n"
188 " li %[ticket], 0 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100189 " .previous \n"
190 " .set pop \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000191 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
Ralf Baechle2a31b032008-08-28 15:17:49 +0100192 [ticket] "=&r" (tmp),
193 [my_ticket] "=&r" (tmp2),
David Daney500c2e12010-02-04 11:31:49 -0800194 [now_serving] "=&r" (tmp3)
195 : [inc] "r" (inc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 }
197
Ralf Baechle17099b12007-07-14 13:24:05 +0100198 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000199
Ralf Baechle2a31b032008-08-28 15:17:49 +0100200 return tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201}
202
203/*
204 * Read-write spinlocks, allowing multiple readers but only one writer.
205 *
206 * NOTE! it is quite common to have readers in interrupts but no interrupt
207 * writers. For those circumstances we can "mix" irq-safe locks - any writer
208 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
209 * read-locks.
210 */
211
Ralf Baechlee3c48072005-02-03 13:34:45 +0000212/*
213 * read_can_lock - would read_trylock() succeed?
214 * @lock: the rwlock in question.
215 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100216#define arch_read_can_lock(rw) ((rw)->lock >= 0)
Ralf Baechlee3c48072005-02-03 13:34:45 +0000217
218/*
219 * write_can_lock - would write_trylock() succeed?
220 * @lock: the rwlock in question.
221 */
Ralf Baechle70342282013-01-22 12:59:30 +0100222#define arch_write_can_lock(rw) (!(rw)->lock)
Ralf Baechlee3c48072005-02-03 13:34:45 +0000223
Thomas Gleixnere5931942009-12-03 20:08:46 +0100224static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
226 unsigned int tmp;
227
228 if (R10000_LLSC_WAR) {
229 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100230 " .set noreorder # arch_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 "1: ll %1, %2 \n"
232 " bltz %1, 1b \n"
233 " addu %1, 1 \n"
234 " sc %1, %0 \n"
235 " beqzl %1, 1b \n"
236 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 " .set reorder \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000238 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
239 : GCC_OFF_SMALL_ASM() (rw->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 : "memory");
241 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200242 do {
243 __asm__ __volatile__(
244 "1: ll %1, %2 # arch_read_lock \n"
245 " bltz %1, 1b \n"
246 " addu %1, 1 \n"
247 "2: sc %1, %0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000248 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
249 : GCC_OFF_SMALL_ASM() (rw->lock)
Ralf Baechlee01961c2013-04-11 00:16:53 +0200250 : "memory");
251 } while (unlikely(!tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000253
Ralf Baechle17099b12007-07-14 13:24:05 +0100254 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
Thomas Gleixnere5931942009-12-03 20:08:46 +0100257static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258{
259 unsigned int tmp;
260
David Daneyf252ffd2010-01-08 17:17:43 -0800261 smp_mb__before_llsc();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 if (R10000_LLSC_WAR) {
264 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100265 "1: ll %1, %2 # arch_read_unlock \n"
Markos Chandras57537622014-11-24 14:11:39 +0000266 " addiu %1, 1 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 " sc %1, %0 \n"
268 " beqzl %1, 1b \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000269 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
270 : GCC_OFF_SMALL_ASM() (rw->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 : "memory");
272 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200273 do {
274 __asm__ __volatile__(
275 "1: ll %1, %2 # arch_read_unlock \n"
Markos Chandras57537622014-11-24 14:11:39 +0000276 " addiu %1, -1 \n"
Ralf Baechlee01961c2013-04-11 00:16:53 +0200277 " sc %1, %0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000278 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
279 : GCC_OFF_SMALL_ASM() (rw->lock)
Ralf Baechlee01961c2013-04-11 00:16:53 +0200280 : "memory");
281 } while (unlikely(!tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 }
283}
284
Thomas Gleixnere5931942009-12-03 20:08:46 +0100285static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286{
287 unsigned int tmp;
288
289 if (R10000_LLSC_WAR) {
290 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100291 " .set noreorder # arch_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 "1: ll %1, %2 \n"
293 " bnez %1, 1b \n"
294 " lui %1, 0x8000 \n"
295 " sc %1, %0 \n"
296 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000297 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 " .set reorder \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000299 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
300 : GCC_OFF_SMALL_ASM() (rw->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 : "memory");
302 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200303 do {
304 __asm__ __volatile__(
305 "1: ll %1, %2 # arch_write_lock \n"
306 " bnez %1, 1b \n"
307 " lui %1, 0x8000 \n"
308 "2: sc %1, %0 \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000309 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
310 : GCC_OFF_SMALL_ASM() (rw->lock)
Ralf Baechlee01961c2013-04-11 00:16:53 +0200311 : "memory");
312 } while (unlikely(!tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000314
Ralf Baechle17099b12007-07-14 13:24:05 +0100315 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316}
317
Thomas Gleixnere5931942009-12-03 20:08:46 +0100318static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000320 smp_mb();
321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100323 " # arch_write_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 " sw $0, %0 \n"
325 : "=m" (rw->lock)
326 : "m" (rw->lock)
327 : "memory");
328}
329
Thomas Gleixnere5931942009-12-03 20:08:46 +0100330static inline int arch_read_trylock(arch_rwlock_t *rw)
Ralf Baechle65316fd2006-08-31 14:16:06 +0100331{
332 unsigned int tmp;
333 int ret;
334
335 if (R10000_LLSC_WAR) {
336 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100337 " .set noreorder # arch_read_trylock \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100338 " li %2, 0 \n"
339 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500340 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100341 " addu %1, 1 \n"
342 " sc %1, %0 \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100343 " .set reorder \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000344 " beqzl %1, 1b \n"
345 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100346 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100347 " li %2, 1 \n"
348 "2: \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000349 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
350 : GCC_OFF_SMALL_ASM() (rw->lock)
Ralf Baechle65316fd2006-08-31 14:16:06 +0100351 : "memory");
352 } else {
353 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100354 " .set noreorder # arch_read_trylock \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100355 " li %2, 0 \n"
356 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500357 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100358 " addu %1, 1 \n"
359 " sc %1, %0 \n"
360 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000361 " nop \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100362 " .set reorder \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100363 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100364 " li %2, 1 \n"
365 "2: \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000366 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
367 : GCC_OFF_SMALL_ASM() (rw->lock)
Ralf Baechle65316fd2006-08-31 14:16:06 +0100368 : "memory");
369 }
370
371 return ret;
372}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
Thomas Gleixnere5931942009-12-03 20:08:46 +0100374static inline int arch_write_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
376 unsigned int tmp;
377 int ret;
378
379 if (R10000_LLSC_WAR) {
380 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100381 " .set noreorder # arch_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 " li %2, 0 \n"
383 "1: ll %1, %3 \n"
384 " bnez %1, 2f \n"
385 " lui %1, 0x8000 \n"
386 " sc %1, %0 \n"
387 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000388 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100389 __WEAK_LLSC_MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 " li %2, 1 \n"
391 " .set reorder \n"
392 "2: \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000393 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
394 : GCC_OFF_SMALL_ASM() (rw->lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 : "memory");
396 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200397 do {
398 __asm__ __volatile__(
399 " ll %1, %3 # arch_write_trylock \n"
400 " li %2, 0 \n"
401 " bnez %1, 2f \n"
402 " lui %1, 0x8000 \n"
403 " sc %1, %0 \n"
404 " li %2, 1 \n"
405 "2: \n"
Markos Chandras94bfb752015-01-26 12:44:11 +0000406 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +0000407 "=&r" (ret)
Markos Chandras94bfb752015-01-26 12:44:11 +0000408 : GCC_OFF_SMALL_ASM() (rw->lock)
Ralf Baechlee01961c2013-04-11 00:16:53 +0200409 : "memory");
410 } while (unlikely(!tmp));
411
412 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 }
414
415 return ret;
416}
417
Thomas Gleixnere5931942009-12-03 20:08:46 +0100418#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
419#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Ralf Baechle65316fd2006-08-31 14:16:06 +0100420
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100421#define arch_spin_relax(lock) cpu_relax()
422#define arch_read_relax(lock) cpu_relax()
423#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425#endif /* _ASM_SPINLOCK_H */