blob: 79bf34efbc04bb7061483f2e43d9fccb94263d68 [file] [log] [blame]
Ralf Baechlefef74702007-10-01 04:15:00 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 */
8#ifndef __ASM_CMPXCHG_H
9#define __ASM_CMPXCHG_H
10
Aaro Koskinen5520e422012-07-19 09:11:15 +020011#include <linux/bug.h>
Ralf Baechlefef74702007-10-01 04:15:00 +010012#include <linux/irqflags.h>
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +000013#include <asm/compiler.h>
David Howellsb81947c2012-03-28 18:30:02 +010014#include <asm/war.h>
15
Paul Burton6b1e7622017-06-09 17:26:33 -070016/*
17 * Using a branch-likely instruction to check the result of an sc instruction
18 * works around a bug present in R10000 CPUs prior to revision 3.0 that could
19 * cause ll-sc sequences to execute non-atomically.
20 */
21#if R10000_LLSC_WAR
22# define __scbeqz "beqzl"
23#else
24# define __scbeqz "beqz"
25#endif
26
Paul Burtond15dc682017-06-09 17:26:36 -070027/*
28 * These functions doesn't exist, so if they are called you'll either:
29 *
30 * - Get an error at compile-time due to __compiletime_error, if supported by
31 * your compiler.
32 *
33 * or:
34 *
35 * - Get an error at link-time due to the call to the missing function.
36 */
Paul Burton8263db42017-06-09 17:26:38 -070037extern unsigned long __cmpxchg_called_with_bad_pointer(void)
Paul Burtond15dc682017-06-09 17:26:36 -070038 __compiletime_error("Bad argument size for cmpxchg");
Paul Burtonc7e2d712019-02-06 14:38:56 -080039extern unsigned long __cmpxchg64_unsupported(void)
40 __compiletime_error("cmpxchg64 not available; cpu_has_64bits may be false");
Paul Burtond15dc682017-06-09 17:26:36 -070041extern unsigned long __xchg_called_with_bad_pointer(void)
42 __compiletime_error("Bad argument size for xchg");
43
Paul Burton5154f3b2017-06-09 17:26:34 -070044#define __xchg_asm(ld, st, m, val) \
45({ \
46 __typeof(*(m)) __ret; \
47 \
48 if (kernel_uses_llsc) { \
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +020049 loongson_llsc_mb(); \
Paul Burton5154f3b2017-06-09 17:26:34 -070050 __asm__ __volatile__( \
51 " .set push \n" \
52 " .set noat \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +000053 " .set push \n" \
Paul Burton5154f3b2017-06-09 17:26:34 -070054 " .set " MIPS_ISA_ARCH_LEVEL " \n" \
55 "1: " ld " %0, %2 # __xchg_asm \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +000056 " .set pop \n" \
Paul Burton5154f3b2017-06-09 17:26:34 -070057 " move $1, %z3 \n" \
58 " .set " MIPS_ISA_ARCH_LEVEL " \n" \
59 " " st " $1, %1 \n" \
60 "\t" __scbeqz " $1, 1b \n" \
61 " .set pop \n" \
62 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
63 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
Peter Zijlstra42344112019-06-13 15:43:20 +020064 : __LLSC_CLOBBER); \
Paul Burton5154f3b2017-06-09 17:26:34 -070065 } else { \
66 unsigned long __flags; \
67 \
68 raw_local_irq_save(__flags); \
69 __ret = *m; \
70 *m = val; \
71 raw_local_irq_restore(__flags); \
72 } \
73 \
74 __ret; \
75})
76
Paul Burtonb70eb302017-06-09 17:26:39 -070077extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
78 unsigned int size);
79
Paul Burton4843cf82017-06-09 17:26:41 -070080static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
81 int size)
David Howellsb81947c2012-03-28 18:30:02 +010082{
83 switch (size) {
Paul Burtonb70eb302017-06-09 17:26:39 -070084 case 1:
85 case 2:
86 return __xchg_small(ptr, x, size);
87
David Howellsb81947c2012-03-28 18:30:02 +010088 case 4:
Paul Burton62c60812017-06-09 17:26:37 -070089 return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x);
90
David Howellsb81947c2012-03-28 18:30:02 +010091 case 8:
Paul Burton62c60812017-06-09 17:26:37 -070092 if (!IS_ENABLED(CONFIG_64BIT))
93 return __xchg_called_with_bad_pointer();
94
95 return __xchg_asm("lld", "scd", (volatile u64 *)ptr, x);
96
Paul Burtond15dc682017-06-09 17:26:36 -070097 default:
98 return __xchg_called_with_bad_pointer();
David Howellsb81947c2012-03-28 18:30:02 +010099 }
David Howellsb81947c2012-03-28 18:30:02 +0100100}
101
102#define xchg(ptr, x) \
103({ \
Paul Burton62c60812017-06-09 17:26:37 -0700104 __typeof__(*(ptr)) __res; \
105 \
Paul Burton62c60812017-06-09 17:26:37 -0700106 smp_mb__before_llsc(); \
107 \
108 __res = (__typeof__(*(ptr))) \
Paul Burton4843cf82017-06-09 17:26:41 -0700109 __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
Paul Burton62c60812017-06-09 17:26:37 -0700110 \
111 smp_llsc_mb(); \
112 \
113 __res; \
David Howellsb81947c2012-03-28 18:30:02 +0100114})
Ralf Baechlefef74702007-10-01 04:15:00 +0100115
Ralf Baechlefef74702007-10-01 04:15:00 +0100116#define __cmpxchg_asm(ld, st, m, old, new) \
117({ \
118 __typeof(*(m)) __ret; \
119 \
Paul Burton6b1e7622017-06-09 17:26:33 -0700120 if (kernel_uses_llsc) { \
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +0200121 loongson_llsc_mb(); \
Ralf Baechlefef74702007-10-01 04:15:00 +0100122 __asm__ __volatile__( \
123 " .set push \n" \
124 " .set noat \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +0000125 " .set push \n" \
Markos Chandrasfa998eb2014-11-20 13:31:48 +0000126 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
Ralf Baechle70342282013-01-22 12:59:30 +0100127 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
Ralf Baechlefef74702007-10-01 04:15:00 +0100128 " bne %0, %z3, 2f \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +0000129 " .set pop \n" \
Ralf Baechlefef74702007-10-01 04:15:00 +0100130 " move $1, %z4 \n" \
Markos Chandrasfa998eb2014-11-20 13:31:48 +0000131 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
Ralf Baechlefef74702007-10-01 04:15:00 +0100132 " " st " $1, %1 \n" \
Paul Burton6b1e7622017-06-09 17:26:33 -0700133 "\t" __scbeqz " $1, 1b \n" \
Ralf Baechlefef74702007-10-01 04:15:00 +0100134 " .set pop \n" \
Ralf Baechle78373142010-10-29 19:08:24 +0100135 "2: \n" \
Markos Chandras94bfb752015-01-26 12:44:11 +0000136 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
Peter Zijlstra42344112019-06-13 15:43:20 +0200137 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
138 : __LLSC_CLOBBER); \
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +0200139 loongson_llsc_mb(); \
Ralf Baechlefef74702007-10-01 04:15:00 +0100140 } else { \
141 unsigned long __flags; \
142 \
143 raw_local_irq_save(__flags); \
144 __ret = *m; \
145 if (__ret == old) \
146 *m = new; \
147 raw_local_irq_restore(__flags); \
148 } \
149 \
150 __ret; \
151})
152
Paul Burton3ba7f442017-06-09 17:26:40 -0700153extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
154 unsigned long new, unsigned int size);
155
Paul Burton8263db42017-06-09 17:26:38 -0700156static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
157 unsigned long new, unsigned int size)
158{
159 switch (size) {
Paul Burton3ba7f442017-06-09 17:26:40 -0700160 case 1:
161 case 2:
162 return __cmpxchg_small(ptr, old, new, size);
163
Paul Burton8263db42017-06-09 17:26:38 -0700164 case 4:
Paul Burton133d68e2017-09-01 14:46:50 -0700165 return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
166 (u32)old, new);
Paul Burton8263db42017-06-09 17:26:38 -0700167
168 case 8:
169 /* lld/scd are only available for MIPS64 */
170 if (!IS_ENABLED(CONFIG_64BIT))
171 return __cmpxchg_called_with_bad_pointer();
172
Paul Burton133d68e2017-09-01 14:46:50 -0700173 return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
174 (u64)old, new);
Paul Burton8263db42017-06-09 17:26:38 -0700175
176 default:
177 return __cmpxchg_called_with_bad_pointer();
178 }
179}
180
181#define cmpxchg_local(ptr, old, new) \
182 ((__typeof__(*(ptr))) \
183 __cmpxchg((ptr), \
184 (unsigned long)(__typeof__(*(ptr)))(old), \
185 (unsigned long)(__typeof__(*(ptr)))(new), \
186 sizeof(*(ptr))))
187
188#define cmpxchg(ptr, old, new) \
Ralf Baechlefef74702007-10-01 04:15:00 +0100189({ \
Paul Burton8263db42017-06-09 17:26:38 -0700190 __typeof__(*(ptr)) __res; \
Ralf Baechlefef74702007-10-01 04:15:00 +0100191 \
Paul Burton8263db42017-06-09 17:26:38 -0700192 smp_mb__before_llsc(); \
193 __res = cmpxchg_local((ptr), (old), (new)); \
194 smp_llsc_mb(); \
Ralf Baechlefef74702007-10-01 04:15:00 +0100195 \
196 __res; \
197})
198
Mathieu Desnoyers3b96a562008-02-07 00:16:09 -0800199#ifdef CONFIG_64BIT
200#define cmpxchg64_local(ptr, o, n) \
201 ({ \
202 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
203 cmpxchg_local((ptr), (o), (n)); \
204 })
Deng-Cheng Zhue2093c72015-03-07 10:30:20 -0800205
206#define cmpxchg64(ptr, o, n) \
207 ({ \
208 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
209 cmpxchg((ptr), (o), (n)); \
210 })
Mathieu Desnoyers3b96a562008-02-07 00:16:09 -0800211#else
Paul Burtonc7e2d712019-02-06 14:38:56 -0800212
213# include <asm-generic/cmpxchg-local.h>
214# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
215
216# ifdef CONFIG_SMP
217
218static inline unsigned long __cmpxchg64(volatile void *ptr,
219 unsigned long long old,
220 unsigned long long new)
221{
222 unsigned long long tmp, ret;
223 unsigned long flags;
224
225 /*
226 * The assembly below has to combine 32 bit values into a 64 bit
227 * register, and split 64 bit values from one register into two. If we
228 * were to take an interrupt in the middle of this we'd only save the
229 * least significant 32 bits of each register & probably clobber the
230 * most significant 32 bits of the 64 bit values we're using. In order
231 * to avoid this we must disable interrupts.
232 */
233 local_irq_save(flags);
234
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +0200235 loongson_llsc_mb();
Paul Burtonc7e2d712019-02-06 14:38:56 -0800236 asm volatile(
237 " .set push \n"
238 " .set " MIPS_ISA_ARCH_LEVEL " \n"
239 /* Load 64 bits from ptr */
240 "1: lld %L0, %3 # __cmpxchg64 \n"
241 /*
242 * Split the 64 bit value we loaded into the 2 registers that hold the
243 * ret variable.
244 */
245 " dsra %M0, %L0, 32 \n"
246 " sll %L0, %L0, 0 \n"
247 /*
248 * Compare ret against old, breaking out of the loop if they don't
249 * match.
250 */
251 " bne %M0, %M4, 2f \n"
252 " bne %L0, %L4, 2f \n"
253 /*
254 * Combine the 32 bit halves from the 2 registers that hold the new
255 * variable into a single 64 bit register.
256 */
257# if MIPS_ISA_REV >= 2
258 " move %L1, %L5 \n"
259 " dins %L1, %M5, 32, 32 \n"
260# else
261 " dsll %L1, %L5, 32 \n"
262 " dsrl %L1, %L1, 32 \n"
263 " .set noat \n"
264 " dsll $at, %M5, 32 \n"
265 " or %L1, %L1, $at \n"
266 " .set at \n"
267# endif
268 /* Attempt to store new at ptr */
269 " scd %L1, %2 \n"
270 /* If we failed, loop! */
271 "\t" __scbeqz " %L1, 1b \n"
272 " .set pop \n"
273 "2: \n"
274 : "=&r"(ret),
275 "=&r"(tmp),
276 "=" GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr)
277 : GCC_OFF_SMALL_ASM() (*(unsigned long long *)ptr),
278 "r" (old),
279 "r" (new)
280 : "memory");
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +0200281 loongson_llsc_mb();
Paul Burtonc7e2d712019-02-06 14:38:56 -0800282
283 local_irq_restore(flags);
284 return ret;
285}
286
287# define cmpxchg64(ptr, o, n) ({ \
288 unsigned long long __old = (__typeof__(*(ptr)))(o); \
289 unsigned long long __new = (__typeof__(*(ptr)))(n); \
290 __typeof__(*(ptr)) __res; \
291 \
292 /* \
293 * We can only use cmpxchg64 if we know that the CPU supports \
294 * 64-bits, ie. lld & scd. Our call to __cmpxchg64_unsupported \
295 * will cause a build error unless cpu_has_64bits is a \
296 * compile-time constant 1. \
297 */ \
Peter Zijlstradfc8d8d2019-06-13 15:43:18 +0200298 if (cpu_has_64bits && kernel_uses_llsc) { \
299 smp_mb__before_llsc(); \
Paul Burtonc7e2d712019-02-06 14:38:56 -0800300 __res = __cmpxchg64((ptr), __old, __new); \
Peter Zijlstradfc8d8d2019-06-13 15:43:18 +0200301 smp_llsc_mb(); \
302 } else { \
Paul Burtonc7e2d712019-02-06 14:38:56 -0800303 __res = __cmpxchg64_unsupported(); \
Peter Zijlstradfc8d8d2019-06-13 15:43:18 +0200304 } \
Paul Burtonc7e2d712019-02-06 14:38:56 -0800305 \
306 __res; \
307})
308
309# else /* !CONFIG_SMP */
310# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
311# endif /* !CONFIG_SMP */
312#endif /* !CONFIG_64BIT */
Mathieu Desnoyers3b96a562008-02-07 00:16:09 -0800313
Paul Burton6b1e7622017-06-09 17:26:33 -0700314#undef __scbeqz
315
Ralf Baechlefef74702007-10-01 04:15:00 +0100316#endif /* __ASM_CMPXCHG_H */