blob: 78d201fb6c87c93608b8295327277a6e8804818e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Ralf Baechlef65e4fa2006-09-28 01:45:21 +01006 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_SPINLOCK_H
10#define _ASM_SPINLOCK_H
11
Ralf Baechle2a31b032008-08-28 15:17:49 +010012#include <linux/compiler.h>
13
Ralf Baechle0004a9d2006-10-31 03:45:07 +000014#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/war.h>
16
17/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
Ralf Baechle2a31b032008-08-28 15:17:49 +010019 *
Ralf Baechle70342282013-01-22 12:59:30 +010020 * Simple spin lock operations. There are two variants, one clears IRQ's
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 * on the local processor, one does not.
22 *
Ralf Baechle2a31b032008-08-28 15:17:49 +010023 * These are fair FIFO ticket locks
24 *
25 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 */
27
Ralf Baechle2a31b032008-08-28 15:17:49 +010028
29/*
30 * Ticket locks are conceptually two parts, one indicating the current head of
31 * the queue, and the other indicating the current tail. The lock is acquired
32 * by atomically noting the tail and incrementing it by one (thus adding
33 * ourself to the queue and noting our position), then waiting until the head
34 * becomes equal to the the initial value of the tail.
35 */
36
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010037static inline int arch_spin_is_locked(arch_spinlock_t *lock)
Ralf Baechle2a31b032008-08-28 15:17:49 +010038{
David Daney500c2e12010-02-04 11:31:49 -080039 u32 counters = ACCESS_ONCE(lock->lock);
Ralf Baechle2a31b032008-08-28 15:17:49 +010040
David Daney500c2e12010-02-04 11:31:49 -080041 return ((counters >> 16) ^ counters) & 0xffff;
Ralf Baechle2a31b032008-08-28 15:17:49 +010042}
43
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010044#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
45#define arch_spin_unlock_wait(x) \
46 while (arch_spin_is_locked(x)) { cpu_relax(); }
Ralf Baechle2a31b032008-08-28 15:17:49 +010047
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010048static inline int arch_spin_is_contended(arch_spinlock_t *lock)
Ralf Baechle2a31b032008-08-28 15:17:49 +010049{
David Daney500c2e12010-02-04 11:31:49 -080050 u32 counters = ACCESS_ONCE(lock->lock);
Ralf Baechle2a31b032008-08-28 15:17:49 +010051
David Daney500c2e12010-02-04 11:31:49 -080052 return (((counters >> 16) - counters) & 0xffff) > 1;
Ralf Baechle2a31b032008-08-28 15:17:49 +010053}
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010054#define arch_spin_is_contended arch_spin_is_contended
Ralf Baechle2a31b032008-08-28 15:17:49 +010055
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010056static inline void arch_spin_lock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Ralf Baechle2a31b032008-08-28 15:17:49 +010058 int my_ticket;
59 int tmp;
David Daney500c2e12010-02-04 11:31:49 -080060 int inc = 0x10000;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62 if (R10000_LLSC_WAR) {
Ralf Baechle2a31b032008-08-28 15:17:49 +010063 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010064 " .set push # arch_spin_lock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010065 " .set noreorder \n"
66 " \n"
67 "1: ll %[ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -080068 " addu %[my_ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010069 " sc %[my_ticket], %[ticket_ptr] \n"
70 " beqzl %[my_ticket], 1b \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 " nop \n"
David Daney500c2e12010-02-04 11:31:49 -080072 " srl %[my_ticket], %[ticket], 16 \n"
73 " andi %[ticket], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010074 " bne %[ticket], %[my_ticket], 4f \n"
75 " subu %[ticket], %[my_ticket], %[ticket] \n"
76 "2: \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010077 " .subsection 2 \n"
David Daney500c2e12010-02-04 11:31:49 -080078 "4: andi %[ticket], %[ticket], 0xffff \n"
David Daney0e6826c2009-03-27 10:07:02 -070079 " sll %[ticket], 5 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010080 " \n"
81 "6: bnez %[ticket], 6b \n"
82 " subu %[ticket], 1 \n"
83 " \n"
David Daney500c2e12010-02-04 11:31:49 -080084 " lhu %[ticket], %[serving_now_ptr] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010085 " beq %[ticket], %[my_ticket], 2b \n"
86 " subu %[ticket], %[my_ticket], %[ticket] \n"
David Daney0e6826c2009-03-27 10:07:02 -070087 " b 4b \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010088 " subu %[ticket], %[ticket], 1 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +010089 " .previous \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010090 " .set pop \n"
91 : [ticket_ptr] "+m" (lock->lock),
David Daney500c2e12010-02-04 11:31:49 -080092 [serving_now_ptr] "+m" (lock->h.serving_now),
Ralf Baechle2a31b032008-08-28 15:17:49 +010093 [ticket] "=&r" (tmp),
David Daney500c2e12010-02-04 11:31:49 -080094 [my_ticket] "=&r" (my_ticket)
95 : [inc] "r" (inc));
Ralf Baechle2a31b032008-08-28 15:17:49 +010096 } else {
97 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +010098 " .set push # arch_spin_lock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +010099 " .set noreorder \n"
100 " \n"
David Daney500c2e12010-02-04 11:31:49 -0800101 "1: ll %[ticket], %[ticket_ptr] \n"
102 " addu %[my_ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100103 " sc %[my_ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -0800104 " beqz %[my_ticket], 1b \n"
105 " srl %[my_ticket], %[ticket], 16 \n"
106 " andi %[ticket], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100107 " bne %[ticket], %[my_ticket], 4f \n"
108 " subu %[ticket], %[my_ticket], %[ticket] \n"
109 "2: \n"
110 " .subsection 2 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100111 "4: andi %[ticket], %[ticket], 0x1fff \n"
David Daney0e6826c2009-03-27 10:07:02 -0700112 " sll %[ticket], 5 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100113 " \n"
114 "6: bnez %[ticket], 6b \n"
115 " subu %[ticket], 1 \n"
116 " \n"
David Daney500c2e12010-02-04 11:31:49 -0800117 " lhu %[ticket], %[serving_now_ptr] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100118 " beq %[ticket], %[my_ticket], 2b \n"
119 " subu %[ticket], %[my_ticket], %[ticket] \n"
David Daney0e6826c2009-03-27 10:07:02 -0700120 " b 4b \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100121 " subu %[ticket], %[ticket], 1 \n"
122 " .previous \n"
123 " .set pop \n"
124 : [ticket_ptr] "+m" (lock->lock),
David Daney500c2e12010-02-04 11:31:49 -0800125 [serving_now_ptr] "+m" (lock->h.serving_now),
Ralf Baechle2a31b032008-08-28 15:17:49 +0100126 [ticket] "=&r" (tmp),
David Daney500c2e12010-02-04 11:31:49 -0800127 [my_ticket] "=&r" (my_ticket)
128 : [inc] "r" (inc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000130
Ralf Baechle17099b12007-07-14 13:24:05 +0100131 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100134static inline void arch_spin_unlock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
David Daney500c2e12010-02-04 11:31:49 -0800136 unsigned int serving_now = lock->h.serving_now + 1;
137 wmb();
138 lock->h.serving_now = (u16)serving_now;
139 nudge_writes();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140}
141
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100142static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143{
Ralf Baechle2a31b032008-08-28 15:17:49 +0100144 int tmp, tmp2, tmp3;
David Daney500c2e12010-02-04 11:31:49 -0800145 int inc = 0x10000;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147 if (R10000_LLSC_WAR) {
Ralf Baechle2a31b032008-08-28 15:17:49 +0100148 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100149 " .set push # arch_spin_trylock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100150 " .set noreorder \n"
151 " \n"
152 "1: ll %[ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -0800153 " srl %[my_ticket], %[ticket], 16 \n"
David Daney500c2e12010-02-04 11:31:49 -0800154 " andi %[now_serving], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100155 " bne %[my_ticket], %[now_serving], 3f \n"
David Daney500c2e12010-02-04 11:31:49 -0800156 " addu %[ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100157 " sc %[ticket], %[ticket_ptr] \n"
158 " beqzl %[ticket], 1b \n"
159 " li %[ticket], 1 \n"
160 "2: \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100161 " .subsection 2 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100162 "3: b 2b \n"
163 " li %[ticket], 0 \n"
Ralf Baechlef65e4fa2006-09-28 01:45:21 +0100164 " .previous \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100165 " .set pop \n"
166 : [ticket_ptr] "+m" (lock->lock),
167 [ticket] "=&r" (tmp),
168 [my_ticket] "=&r" (tmp2),
David Daney500c2e12010-02-04 11:31:49 -0800169 [now_serving] "=&r" (tmp3)
170 : [inc] "r" (inc));
Ralf Baechle2a31b032008-08-28 15:17:49 +0100171 } else {
172 __asm__ __volatile__ (
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100173 " .set push # arch_spin_trylock \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100174 " .set noreorder \n"
175 " \n"
David Daney500c2e12010-02-04 11:31:49 -0800176 "1: ll %[ticket], %[ticket_ptr] \n"
177 " srl %[my_ticket], %[ticket], 16 \n"
David Daney500c2e12010-02-04 11:31:49 -0800178 " andi %[now_serving], %[ticket], 0xffff \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100179 " bne %[my_ticket], %[now_serving], 3f \n"
David Daney500c2e12010-02-04 11:31:49 -0800180 " addu %[ticket], %[ticket], %[inc] \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100181 " sc %[ticket], %[ticket_ptr] \n"
David Daney500c2e12010-02-04 11:31:49 -0800182 " beqz %[ticket], 1b \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100183 " li %[ticket], 1 \n"
184 "2: \n"
185 " .subsection 2 \n"
186 "3: b 2b \n"
187 " li %[ticket], 0 \n"
Ralf Baechle2a31b032008-08-28 15:17:49 +0100188 " .previous \n"
189 " .set pop \n"
190 : [ticket_ptr] "+m" (lock->lock),
191 [ticket] "=&r" (tmp),
192 [my_ticket] "=&r" (tmp2),
David Daney500c2e12010-02-04 11:31:49 -0800193 [now_serving] "=&r" (tmp3)
194 : [inc] "r" (inc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196
Ralf Baechle17099b12007-07-14 13:24:05 +0100197 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000198
Ralf Baechle2a31b032008-08-28 15:17:49 +0100199 return tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
202/*
203 * Read-write spinlocks, allowing multiple readers but only one writer.
204 *
205 * NOTE! it is quite common to have readers in interrupts but no interrupt
206 * writers. For those circumstances we can "mix" irq-safe locks - any writer
207 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
208 * read-locks.
209 */
210
Ralf Baechlee3c48072005-02-03 13:34:45 +0000211/*
212 * read_can_lock - would read_trylock() succeed?
213 * @lock: the rwlock in question.
214 */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100215#define arch_read_can_lock(rw) ((rw)->lock >= 0)
Ralf Baechlee3c48072005-02-03 13:34:45 +0000216
217/*
218 * write_can_lock - would write_trylock() succeed?
219 * @lock: the rwlock in question.
220 */
Ralf Baechle70342282013-01-22 12:59:30 +0100221#define arch_write_can_lock(rw) (!(rw)->lock)
Ralf Baechlee3c48072005-02-03 13:34:45 +0000222
Thomas Gleixnere5931942009-12-03 20:08:46 +0100223static inline void arch_read_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224{
225 unsigned int tmp;
226
227 if (R10000_LLSC_WAR) {
228 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100229 " .set noreorder # arch_read_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 "1: ll %1, %2 \n"
231 " bltz %1, 1b \n"
232 " addu %1, 1 \n"
233 " sc %1, %0 \n"
234 " beqzl %1, 1b \n"
235 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 " .set reorder \n"
237 : "=m" (rw->lock), "=&r" (tmp)
238 : "m" (rw->lock)
239 : "memory");
240 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200241 do {
242 __asm__ __volatile__(
243 "1: ll %1, %2 # arch_read_lock \n"
244 " bltz %1, 1b \n"
245 " addu %1, 1 \n"
246 "2: sc %1, %0 \n"
247 : "=m" (rw->lock), "=&r" (tmp)
248 : "m" (rw->lock)
249 : "memory");
250 } while (unlikely(!tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000252
Ralf Baechle17099b12007-07-14 13:24:05 +0100253 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256/* Note the use of sub, not subu which will make the kernel die with an
257 overflow exception if we ever try to unlock an rwlock that is already
258 unlocked or is being held by a writer. */
Thomas Gleixnere5931942009-12-03 20:08:46 +0100259static inline void arch_read_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 unsigned int tmp;
262
David Daneyf252ffd2010-01-08 17:17:43 -0800263 smp_mb__before_llsc();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 if (R10000_LLSC_WAR) {
266 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100267 "1: ll %1, %2 # arch_read_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 " sub %1, 1 \n"
269 " sc %1, %0 \n"
270 " beqzl %1, 1b \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 : "=m" (rw->lock), "=&r" (tmp)
272 : "m" (rw->lock)
273 : "memory");
274 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200275 do {
276 __asm__ __volatile__(
277 "1: ll %1, %2 # arch_read_unlock \n"
278 " sub %1, 1 \n"
279 " sc %1, %0 \n"
280 : "=m" (rw->lock), "=&r" (tmp)
281 : "m" (rw->lock)
282 : "memory");
283 } while (unlikely(!tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 }
285}
286
Thomas Gleixnere5931942009-12-03 20:08:46 +0100287static inline void arch_write_lock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288{
289 unsigned int tmp;
290
291 if (R10000_LLSC_WAR) {
292 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100293 " .set noreorder # arch_write_lock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 "1: ll %1, %2 \n"
295 " bnez %1, 1b \n"
296 " lui %1, 0x8000 \n"
297 " sc %1, %0 \n"
298 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000299 " nop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 " .set reorder \n"
301 : "=m" (rw->lock), "=&r" (tmp)
302 : "m" (rw->lock)
303 : "memory");
304 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200305 do {
306 __asm__ __volatile__(
307 "1: ll %1, %2 # arch_write_lock \n"
308 " bnez %1, 1b \n"
309 " lui %1, 0x8000 \n"
310 "2: sc %1, %0 \n"
311 : "=m" (rw->lock), "=&r" (tmp)
312 : "m" (rw->lock)
313 : "memory");
314 } while (unlikely(!tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 }
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000316
Ralf Baechle17099b12007-07-14 13:24:05 +0100317 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
Thomas Gleixnere5931942009-12-03 20:08:46 +0100320static inline void arch_write_unlock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000322 smp_mb();
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100325 " # arch_write_unlock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 " sw $0, %0 \n"
327 : "=m" (rw->lock)
328 : "m" (rw->lock)
329 : "memory");
330}
331
Thomas Gleixnere5931942009-12-03 20:08:46 +0100332static inline int arch_read_trylock(arch_rwlock_t *rw)
Ralf Baechle65316fd2006-08-31 14:16:06 +0100333{
334 unsigned int tmp;
335 int ret;
336
337 if (R10000_LLSC_WAR) {
338 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100339 " .set noreorder # arch_read_trylock \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100340 " li %2, 0 \n"
341 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500342 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100343 " addu %1, 1 \n"
344 " sc %1, %0 \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100345 " .set reorder \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000346 " beqzl %1, 1b \n"
347 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100348 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100349 " li %2, 1 \n"
350 "2: \n"
351 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
352 : "m" (rw->lock)
353 : "memory");
354 } else {
355 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100356 " .set noreorder # arch_read_trylock \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100357 " li %2, 0 \n"
358 "1: ll %1, %3 \n"
Dave Johnsond52c2d52007-03-05 20:50:27 -0500359 " bltz %1, 2f \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100360 " addu %1, 1 \n"
361 " sc %1, %0 \n"
362 " beqz %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000363 " nop \n"
Ralf Baechle65316fd2006-08-31 14:16:06 +0100364 " .set reorder \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100365 __WEAK_LLSC_MB
Ralf Baechle65316fd2006-08-31 14:16:06 +0100366 " li %2, 1 \n"
367 "2: \n"
368 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
369 : "m" (rw->lock)
370 : "memory");
371 }
372
373 return ret;
374}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Thomas Gleixnere5931942009-12-03 20:08:46 +0100376static inline int arch_write_trylock(arch_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377{
378 unsigned int tmp;
379 int ret;
380
381 if (R10000_LLSC_WAR) {
382 __asm__ __volatile__(
Thomas Gleixnere5931942009-12-03 20:08:46 +0100383 " .set noreorder # arch_write_trylock \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 " li %2, 0 \n"
385 "1: ll %1, %3 \n"
386 " bnez %1, 2f \n"
387 " lui %1, 0x8000 \n"
388 " sc %1, %0 \n"
389 " beqzl %1, 1b \n"
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000390 " nop \n"
Ralf Baechle17099b12007-07-14 13:24:05 +0100391 __WEAK_LLSC_MB
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 " li %2, 1 \n"
393 " .set reorder \n"
394 "2: \n"
395 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
396 : "m" (rw->lock)
397 : "memory");
398 } else {
Ralf Baechlee01961c2013-04-11 00:16:53 +0200399 do {
400 __asm__ __volatile__(
401 " ll %1, %3 # arch_write_trylock \n"
402 " li %2, 0 \n"
403 " bnez %1, 2f \n"
404 " lui %1, 0x8000 \n"
405 " sc %1, %0 \n"
406 " li %2, 1 \n"
407 "2: \n"
408 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
409 : "m" (rw->lock)
410 : "memory");
411 } while (unlikely(!tmp));
412
413 smp_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
415
416 return ret;
417}
418
Thomas Gleixnere5931942009-12-03 20:08:46 +0100419#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
420#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
Ralf Baechle65316fd2006-08-31 14:16:06 +0100421
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100422#define arch_spin_relax(lock) cpu_relax()
423#define arch_read_relax(lock) cpu_relax()
424#define arch_write_relax(lock) cpu_relax()
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426#endif /* _ASM_SPINLOCK_H */