Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 6 | * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| 8 | */ |
| 9 | #ifndef _ASM_SPINLOCK_H |
| 10 | #define _ASM_SPINLOCK_H |
| 11 | |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 12 | #include <linux/compiler.h> |
| 13 | |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 14 | #include <asm/barrier.h> |
Maciej W. Rozycki | b0984c4 | 2014-11-15 22:08:48 +0000 | [diff] [blame] | 15 | #include <asm/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/war.h> |
| 17 | |
| 18 | /* |
| 19 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 20 | * |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 21 | * Simple spin lock operations. There are two variants, one clears IRQ's |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | * on the local processor, one does not. |
| 23 | * |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 24 | * These are fair FIFO ticket locks |
| 25 | * |
| 26 | * (the type definitions are in asm/spinlock_types.h) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | */ |
| 28 | |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 29 | |
| 30 | /* |
| 31 | * Ticket locks are conceptually two parts, one indicating the current head of |
| 32 | * the queue, and the other indicating the current tail. The lock is acquired |
| 33 | * by atomically noting the tail and incrementing it by one (thus adding |
| 34 | * ourself to the queue and noting our position), then waiting until the head |
| 35 | * becomes equal to the the initial value of the tail. |
| 36 | */ |
| 37 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 38 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 39 | { |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 40 | u32 counters = ACCESS_ONCE(lock->lock); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 41 | |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 42 | return ((counters >> 16) ^ counters) & 0xffff; |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 43 | } |
| 44 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 45 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
| 46 | #define arch_spin_unlock_wait(x) \ |
| 47 | while (arch_spin_is_locked(x)) { cpu_relax(); } |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 48 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 49 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 50 | { |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 51 | u32 counters = ACCESS_ONCE(lock->lock); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 52 | |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 53 | return (((counters >> 16) - counters) & 0xffff) > 1; |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 54 | } |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 55 | #define arch_spin_is_contended arch_spin_is_contended |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 56 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 57 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 59 | int my_ticket; |
| 60 | int tmp; |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 61 | int inc = 0x10000; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | |
| 63 | if (R10000_LLSC_WAR) { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 64 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 65 | " .set push # arch_spin_lock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 66 | " .set noreorder \n" |
| 67 | " \n" |
| 68 | "1: ll %[ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 69 | " addu %[my_ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 70 | " sc %[my_ticket], %[ticket_ptr] \n" |
| 71 | " beqzl %[my_ticket], 1b \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | " nop \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 73 | " srl %[my_ticket], %[ticket], 16 \n" |
| 74 | " andi %[ticket], %[ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 75 | " bne %[ticket], %[my_ticket], 4f \n" |
| 76 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
| 77 | "2: \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 78 | " .subsection 2 \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 79 | "4: andi %[ticket], %[ticket], 0xffff \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 80 | " sll %[ticket], 5 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 81 | " \n" |
| 82 | "6: bnez %[ticket], 6b \n" |
| 83 | " subu %[ticket], 1 \n" |
| 84 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 85 | " lhu %[ticket], %[serving_now_ptr] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 86 | " beq %[ticket], %[my_ticket], 2b \n" |
| 87 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 88 | " b 4b \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 89 | " subu %[ticket], %[ticket], 1 \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 90 | " .previous \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 91 | " .set pop \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 92 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 93 | [serving_now_ptr] "+m" (lock->h.serving_now), |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 94 | [ticket] "=&r" (tmp), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 95 | [my_ticket] "=&r" (my_ticket) |
| 96 | : [inc] "r" (inc)); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 97 | } else { |
| 98 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 99 | " .set push # arch_spin_lock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 100 | " .set noreorder \n" |
| 101 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 102 | "1: ll %[ticket], %[ticket_ptr] \n" |
| 103 | " addu %[my_ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 104 | " sc %[my_ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 105 | " beqz %[my_ticket], 1b \n" |
| 106 | " srl %[my_ticket], %[ticket], 16 \n" |
| 107 | " andi %[ticket], %[ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 108 | " bne %[ticket], %[my_ticket], 4f \n" |
| 109 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
| 110 | "2: \n" |
| 111 | " .subsection 2 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 112 | "4: andi %[ticket], %[ticket], 0x1fff \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 113 | " sll %[ticket], 5 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 114 | " \n" |
| 115 | "6: bnez %[ticket], 6b \n" |
| 116 | " subu %[ticket], 1 \n" |
| 117 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 118 | " lhu %[ticket], %[serving_now_ptr] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 119 | " beq %[ticket], %[my_ticket], 2b \n" |
| 120 | " subu %[ticket], %[my_ticket], %[ticket] \n" |
David Daney | 0e6826c | 2009-03-27 10:07:02 -0700 | [diff] [blame] | 121 | " b 4b \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 122 | " subu %[ticket], %[ticket], 1 \n" |
| 123 | " .previous \n" |
| 124 | " .set pop \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 125 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 126 | [serving_now_ptr] "+m" (lock->h.serving_now), |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 127 | [ticket] "=&r" (tmp), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 128 | [my_ticket] "=&r" (my_ticket) |
| 129 | : [inc] "r" (inc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | } |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 131 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 132 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | } |
| 134 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 135 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | { |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 137 | unsigned int serving_now = lock->h.serving_now + 1; |
| 138 | wmb(); |
| 139 | lock->h.serving_now = (u16)serving_now; |
| 140 | nudge_writes(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 143 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 145 | int tmp, tmp2, tmp3; |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 146 | int inc = 0x10000; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | |
| 148 | if (R10000_LLSC_WAR) { |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 149 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 150 | " .set push # arch_spin_trylock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 151 | " .set noreorder \n" |
| 152 | " \n" |
| 153 | "1: ll %[ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 154 | " srl %[my_ticket], %[ticket], 16 \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 155 | " andi %[now_serving], %[ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 156 | " bne %[my_ticket], %[now_serving], 3f \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 157 | " addu %[ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 158 | " sc %[ticket], %[ticket_ptr] \n" |
| 159 | " beqzl %[ticket], 1b \n" |
| 160 | " li %[ticket], 1 \n" |
| 161 | "2: \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 162 | " .subsection 2 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 163 | "3: b 2b \n" |
| 164 | " li %[ticket], 0 \n" |
Ralf Baechle | f65e4fa | 2006-09-28 01:45:21 +0100 | [diff] [blame] | 165 | " .previous \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 166 | " .set pop \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 167 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 168 | [ticket] "=&r" (tmp), |
| 169 | [my_ticket] "=&r" (tmp2), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 170 | [now_serving] "=&r" (tmp3) |
| 171 | : [inc] "r" (inc)); |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 172 | } else { |
| 173 | __asm__ __volatile__ ( |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 174 | " .set push # arch_spin_trylock \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 175 | " .set noreorder \n" |
| 176 | " \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 177 | "1: ll %[ticket], %[ticket_ptr] \n" |
| 178 | " srl %[my_ticket], %[ticket], 16 \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 179 | " andi %[now_serving], %[ticket], 0xffff \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 180 | " bne %[my_ticket], %[now_serving], 3f \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 181 | " addu %[ticket], %[ticket], %[inc] \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 182 | " sc %[ticket], %[ticket_ptr] \n" |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 183 | " beqz %[ticket], 1b \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 184 | " li %[ticket], 1 \n" |
| 185 | "2: \n" |
| 186 | " .subsection 2 \n" |
| 187 | "3: b 2b \n" |
| 188 | " li %[ticket], 0 \n" |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 189 | " .previous \n" |
| 190 | " .set pop \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 191 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 192 | [ticket] "=&r" (tmp), |
| 193 | [my_ticket] "=&r" (tmp2), |
David Daney | 500c2e1 | 2010-02-04 11:31:49 -0800 | [diff] [blame] | 194 | [now_serving] "=&r" (tmp3) |
| 195 | : [inc] "r" (inc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | } |
| 197 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 198 | smp_llsc_mb(); |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 199 | |
Ralf Baechle | 2a31b03 | 2008-08-28 15:17:49 +0100 | [diff] [blame] | 200 | return tmp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | } |
| 202 | |
| 203 | /* |
| 204 | * Read-write spinlocks, allowing multiple readers but only one writer. |
| 205 | * |
| 206 | * NOTE! it is quite common to have readers in interrupts but no interrupt |
| 207 | * writers. For those circumstances we can "mix" irq-safe locks - any writer |
| 208 | * needs to get a irq-safe write-lock, but readers can get non-irqsafe |
| 209 | * read-locks. |
| 210 | */ |
| 211 | |
Ralf Baechle | e3c4807 | 2005-02-03 13:34:45 +0000 | [diff] [blame] | 212 | /* |
| 213 | * read_can_lock - would read_trylock() succeed? |
| 214 | * @lock: the rwlock in question. |
| 215 | */ |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 216 | #define arch_read_can_lock(rw) ((rw)->lock >= 0) |
Ralf Baechle | e3c4807 | 2005-02-03 13:34:45 +0000 | [diff] [blame] | 217 | |
| 218 | /* |
| 219 | * write_can_lock - would write_trylock() succeed? |
| 220 | * @lock: the rwlock in question. |
| 221 | */ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 222 | #define arch_write_can_lock(rw) (!(rw)->lock) |
Ralf Baechle | e3c4807 | 2005-02-03 13:34:45 +0000 | [diff] [blame] | 223 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 224 | static inline void arch_read_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | { |
| 226 | unsigned int tmp; |
| 227 | |
| 228 | if (R10000_LLSC_WAR) { |
| 229 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 230 | " .set noreorder # arch_read_lock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | "1: ll %1, %2 \n" |
| 232 | " bltz %1, 1b \n" |
| 233 | " addu %1, 1 \n" |
| 234 | " sc %1, %0 \n" |
| 235 | " beqzl %1, 1b \n" |
| 236 | " nop \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | " .set reorder \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 238 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
| 239 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | : "memory"); |
| 241 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 242 | do { |
| 243 | __asm__ __volatile__( |
| 244 | "1: ll %1, %2 # arch_read_lock \n" |
| 245 | " bltz %1, 1b \n" |
| 246 | " addu %1, 1 \n" |
| 247 | "2: sc %1, %0 \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 248 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
| 249 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 250 | : "memory"); |
| 251 | } while (unlikely(!tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 253 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 254 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } |
| 256 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 257 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | { |
| 259 | unsigned int tmp; |
| 260 | |
David Daney | f252ffd | 2010-01-08 17:17:43 -0800 | [diff] [blame] | 261 | smp_mb__before_llsc(); |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | if (R10000_LLSC_WAR) { |
| 264 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 265 | "1: ll %1, %2 # arch_read_unlock \n" |
Markos Chandras | 5753762 | 2014-11-24 14:11:39 +0000 | [diff] [blame^] | 266 | " addiu %1, 1 \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | " sc %1, %0 \n" |
| 268 | " beqzl %1, 1b \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 269 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
| 270 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | : "memory"); |
| 272 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 273 | do { |
| 274 | __asm__ __volatile__( |
| 275 | "1: ll %1, %2 # arch_read_unlock \n" |
Markos Chandras | 5753762 | 2014-11-24 14:11:39 +0000 | [diff] [blame^] | 276 | " addiu %1, -1 \n" |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 277 | " sc %1, %0 \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 278 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
| 279 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 280 | : "memory"); |
| 281 | } while (unlikely(!tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | } |
| 283 | } |
| 284 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 285 | static inline void arch_write_lock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | { |
| 287 | unsigned int tmp; |
| 288 | |
| 289 | if (R10000_LLSC_WAR) { |
| 290 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 291 | " .set noreorder # arch_write_lock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 292 | "1: ll %1, %2 \n" |
| 293 | " bnez %1, 1b \n" |
| 294 | " lui %1, 0x8000 \n" |
| 295 | " sc %1, %0 \n" |
| 296 | " beqzl %1, 1b \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 297 | " nop \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | " .set reorder \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 299 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
| 300 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 301 | : "memory"); |
| 302 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 303 | do { |
| 304 | __asm__ __volatile__( |
| 305 | "1: ll %1, %2 # arch_write_lock \n" |
| 306 | " bnez %1, 1b \n" |
| 307 | " lui %1, 0x8000 \n" |
| 308 | "2: sc %1, %0 \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 309 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
| 310 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 311 | : "memory"); |
| 312 | } while (unlikely(!tmp)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | } |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 314 | |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 315 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } |
| 317 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 318 | static inline void arch_write_unlock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | { |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 320 | smp_mb(); |
| 321 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 323 | " # arch_write_unlock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | " sw $0, %0 \n" |
| 325 | : "=m" (rw->lock) |
| 326 | : "m" (rw->lock) |
| 327 | : "memory"); |
| 328 | } |
| 329 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 330 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 331 | { |
| 332 | unsigned int tmp; |
| 333 | int ret; |
| 334 | |
| 335 | if (R10000_LLSC_WAR) { |
| 336 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 337 | " .set noreorder # arch_read_trylock \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 338 | " li %2, 0 \n" |
| 339 | "1: ll %1, %3 \n" |
Dave Johnson | d52c2d5 | 2007-03-05 20:50:27 -0500 | [diff] [blame] | 340 | " bltz %1, 2f \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 341 | " addu %1, 1 \n" |
| 342 | " sc %1, %0 \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 343 | " .set reorder \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 344 | " beqzl %1, 1b \n" |
| 345 | " nop \n" |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 346 | __WEAK_LLSC_MB |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 347 | " li %2, 1 \n" |
| 348 | "2: \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 349 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
| 350 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 351 | : "memory"); |
| 352 | } else { |
| 353 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 354 | " .set noreorder # arch_read_trylock \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 355 | " li %2, 0 \n" |
| 356 | "1: ll %1, %3 \n" |
Dave Johnson | d52c2d5 | 2007-03-05 20:50:27 -0500 | [diff] [blame] | 357 | " bltz %1, 2f \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 358 | " addu %1, 1 \n" |
| 359 | " sc %1, %0 \n" |
| 360 | " beqz %1, 1b \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 361 | " nop \n" |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 362 | " .set reorder \n" |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 363 | __WEAK_LLSC_MB |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 364 | " li %2, 1 \n" |
| 365 | "2: \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 366 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
| 367 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 368 | : "memory"); |
| 369 | } |
| 370 | |
| 371 | return ret; |
| 372 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 374 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | { |
| 376 | unsigned int tmp; |
| 377 | int ret; |
| 378 | |
| 379 | if (R10000_LLSC_WAR) { |
| 380 | __asm__ __volatile__( |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 381 | " .set noreorder # arch_write_trylock \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | " li %2, 0 \n" |
| 383 | "1: ll %1, %3 \n" |
| 384 | " bnez %1, 2f \n" |
| 385 | " lui %1, 0x8000 \n" |
| 386 | " sc %1, %0 \n" |
| 387 | " beqzl %1, 1b \n" |
Ralf Baechle | 0004a9d | 2006-10-31 03:45:07 +0000 | [diff] [blame] | 388 | " nop \n" |
Ralf Baechle | 17099b1 | 2007-07-14 13:24:05 +0100 | [diff] [blame] | 389 | __WEAK_LLSC_MB |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | " li %2, 1 \n" |
| 391 | " .set reorder \n" |
| 392 | "2: \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 393 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
| 394 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | : "memory"); |
| 396 | } else { |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 397 | do { |
| 398 | __asm__ __volatile__( |
| 399 | " ll %1, %3 # arch_write_trylock \n" |
| 400 | " li %2, 0 \n" |
| 401 | " bnez %1, 2f \n" |
| 402 | " lui %1, 0x8000 \n" |
| 403 | " sc %1, %0 \n" |
| 404 | " li %2, 1 \n" |
| 405 | "2: \n" |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 406 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), |
Maciej W. Rozycki | b0984c4 | 2014-11-15 22:08:48 +0000 | [diff] [blame] | 407 | "=&r" (ret) |
Markos Chandras | 94bfb75 | 2015-01-26 12:44:11 +0000 | [diff] [blame] | 408 | : GCC_OFF_SMALL_ASM() (rw->lock) |
Ralf Baechle | e01961c | 2013-04-11 00:16:53 +0200 | [diff] [blame] | 409 | : "memory"); |
| 410 | } while (unlikely(!tmp)); |
| 411 | |
| 412 | smp_llsc_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | return ret; |
| 416 | } |
| 417 | |
Thomas Gleixner | e593194 | 2009-12-03 20:08:46 +0100 | [diff] [blame] | 418 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
| 419 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
Ralf Baechle | 65316fd | 2006-08-31 14:16:06 +0100 | [diff] [blame] | 420 | |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 421 | #define arch_spin_relax(lock) cpu_relax() |
| 422 | #define arch_read_relax(lock) cpu_relax() |
| 423 | #define arch_write_relax(lock) cpu_relax() |
Martin Schwidefsky | ef6edc9 | 2006-09-30 23:27:43 -0700 | [diff] [blame] | 424 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | #endif /* _ASM_SPINLOCK_H */ |