blob: 37a0f03560f79bf0432e8134fbac8b11f158e1c7 [file] [log] [blame]
Will Deaconc0385b22015-02-03 12:39:03 +00001/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __ASM_ATOMIC_LSE_H
22#define __ASM_ATOMIC_LSE_H
23
24#ifndef __ARM64_IN_ATOMIC_IMPL
25#error "please don't include this file directly"
26#endif
27
Will Deaconc09d6a02015-02-03 16:14:13 +000028#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
Will Deacon6822a84d2016-04-22 18:01:32 +010029#define ATOMIC_OP(op, asm_op) \
30static inline void atomic_##op(int i, atomic_t *v) \
31{ \
32 register int w0 asm ("w0") = i; \
33 register atomic_t *x1 asm ("x1") = v; \
34 \
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
36" " #asm_op " %w[i], %[v]\n") \
37 : [i] "+r" (w0), [v] "+Q" (v->counter) \
38 : "r" (x1) \
39 : __LL_SC_CLOBBERS); \
Will Deaconc0385b22015-02-03 12:39:03 +000040}
41
Will Deacon6822a84d2016-04-22 18:01:32 +010042ATOMIC_OP(andnot, stclr)
43ATOMIC_OP(or, stset)
44ATOMIC_OP(xor, steor)
45ATOMIC_OP(add, stadd)
Will Deaconc0385b22015-02-03 12:39:03 +000046
Will Deacon6822a84d2016-04-22 18:01:32 +010047#undef ATOMIC_OP
Will Deaconc09d6a02015-02-03 16:14:13 +000048
Will Deacon305d4542015-10-08 20:15:18 +010049#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
50static inline int atomic_add_return##name(int i, atomic_t *v) \
51{ \
52 register int w0 asm ("w0") = i; \
53 register atomic_t *x1 asm ("x1") = v; \
54 \
55 asm volatile(ARM64_LSE_ATOMIC_INSN( \
56 /* LL/SC */ \
57 " nop\n" \
58 __LL_SC_ATOMIC(add_return##name), \
59 /* LSE atomics */ \
60 " ldadd" #mb " %w[i], w30, %[v]\n" \
61 " add %w[i], %w[i], w30") \
62 : [i] "+r" (w0), [v] "+Q" (v->counter) \
63 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +010064 : __LL_SC_CLOBBERS, ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +010065 \
66 return w0; \
Will Deaconc09d6a02015-02-03 16:14:13 +000067}
68
Will Deacon305d4542015-10-08 20:15:18 +010069ATOMIC_OP_ADD_RETURN(_relaxed, )
70ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
71ATOMIC_OP_ADD_RETURN(_release, l, "memory")
72ATOMIC_OP_ADD_RETURN( , al, "memory")
73
74#undef ATOMIC_OP_ADD_RETURN
75
Will Deaconc09d6a02015-02-03 16:14:13 +000076static inline void atomic_and(int i, atomic_t *v)
77{
78 register int w0 asm ("w0") = i;
79 register atomic_t *x1 asm ("x1") = v;
80
81 asm volatile(ARM64_LSE_ATOMIC_INSN(
82 /* LL/SC */
83 " nop\n"
84 __LL_SC_ATOMIC(and),
85 /* LSE atomics */
86 " mvn %w[i], %w[i]\n"
87 " stclr %w[i], %[v]")
88 : [i] "+r" (w0), [v] "+Q" (v->counter)
89 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +010090 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +000091}
92
93static inline void atomic_sub(int i, atomic_t *v)
94{
95 register int w0 asm ("w0") = i;
96 register atomic_t *x1 asm ("x1") = v;
97
98 asm volatile(ARM64_LSE_ATOMIC_INSN(
99 /* LL/SC */
100 " nop\n"
101 __LL_SC_ATOMIC(sub),
102 /* LSE atomics */
103 " neg %w[i], %w[i]\n"
104 " stadd %w[i], %[v]")
105 : [i] "+r" (w0), [v] "+Q" (v->counter)
106 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100107 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +0000108}
109
Will Deacon305d4542015-10-08 20:15:18 +0100110#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
111static inline int atomic_sub_return##name(int i, atomic_t *v) \
112{ \
113 register int w0 asm ("w0") = i; \
114 register atomic_t *x1 asm ("x1") = v; \
115 \
116 asm volatile(ARM64_LSE_ATOMIC_INSN( \
117 /* LL/SC */ \
118 " nop\n" \
119 __LL_SC_ATOMIC(sub_return##name) \
120 " nop", \
121 /* LSE atomics */ \
122 " neg %w[i], %w[i]\n" \
123 " ldadd" #mb " %w[i], w30, %[v]\n" \
124 " add %w[i], %w[i], w30") \
125 : [i] "+r" (w0), [v] "+Q" (v->counter) \
126 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100127 : __LL_SC_CLOBBERS , ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +0100128 \
129 return w0; \
Will Deaconc09d6a02015-02-03 16:14:13 +0000130}
Will Deaconc0385b22015-02-03 12:39:03 +0000131
Will Deacon305d4542015-10-08 20:15:18 +0100132ATOMIC_OP_SUB_RETURN(_relaxed, )
133ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
134ATOMIC_OP_SUB_RETURN(_release, l, "memory")
135ATOMIC_OP_SUB_RETURN( , al, "memory")
136
137#undef ATOMIC_OP_SUB_RETURN
Will Deaconc09d6a02015-02-03 16:14:13 +0000138#undef __LL_SC_ATOMIC
Will Deaconc0385b22015-02-03 12:39:03 +0000139
Will Deaconc09d6a02015-02-03 16:14:13 +0000140#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
Will Deacon6822a84d2016-04-22 18:01:32 +0100141#define ATOMIC64_OP(op, asm_op) \
142static inline void atomic64_##op(long i, atomic64_t *v) \
143{ \
144 register long x0 asm ("x0") = i; \
145 register atomic64_t *x1 asm ("x1") = v; \
146 \
147 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
148" " #asm_op " %[i], %[v]\n") \
149 : [i] "+r" (x0), [v] "+Q" (v->counter) \
150 : "r" (x1) \
151 : __LL_SC_CLOBBERS); \
Will Deaconc0385b22015-02-03 12:39:03 +0000152}
153
Will Deacon6822a84d2016-04-22 18:01:32 +0100154ATOMIC64_OP(andnot, stclr)
155ATOMIC64_OP(or, stset)
156ATOMIC64_OP(xor, steor)
157ATOMIC64_OP(add, stadd)
Will Deaconc0385b22015-02-03 12:39:03 +0000158
Will Deacon6822a84d2016-04-22 18:01:32 +0100159#undef ATOMIC64_OP
Will Deaconc09d6a02015-02-03 16:14:13 +0000160
Will Deacon305d4542015-10-08 20:15:18 +0100161#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
162static inline long atomic64_add_return##name(long i, atomic64_t *v) \
163{ \
164 register long x0 asm ("x0") = i; \
165 register atomic64_t *x1 asm ("x1") = v; \
166 \
167 asm volatile(ARM64_LSE_ATOMIC_INSN( \
168 /* LL/SC */ \
169 " nop\n" \
170 __LL_SC_ATOMIC64(add_return##name), \
171 /* LSE atomics */ \
172 " ldadd" #mb " %[i], x30, %[v]\n" \
173 " add %[i], %[i], x30") \
174 : [i] "+r" (x0), [v] "+Q" (v->counter) \
175 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100176 : __LL_SC_CLOBBERS, ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +0100177 \
178 return x0; \
Will Deaconc09d6a02015-02-03 16:14:13 +0000179}
180
Will Deacon305d4542015-10-08 20:15:18 +0100181ATOMIC64_OP_ADD_RETURN(_relaxed, )
182ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
183ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
184ATOMIC64_OP_ADD_RETURN( , al, "memory")
185
186#undef ATOMIC64_OP_ADD_RETURN
187
Will Deaconc09d6a02015-02-03 16:14:13 +0000188static inline void atomic64_and(long i, atomic64_t *v)
189{
190 register long x0 asm ("x0") = i;
191 register atomic64_t *x1 asm ("x1") = v;
192
193 asm volatile(ARM64_LSE_ATOMIC_INSN(
194 /* LL/SC */
195 " nop\n"
196 __LL_SC_ATOMIC64(and),
197 /* LSE atomics */
198 " mvn %[i], %[i]\n"
199 " stclr %[i], %[v]")
200 : [i] "+r" (x0), [v] "+Q" (v->counter)
201 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100202 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +0000203}
204
205static inline void atomic64_sub(long i, atomic64_t *v)
206{
207 register long x0 asm ("x0") = i;
208 register atomic64_t *x1 asm ("x1") = v;
209
210 asm volatile(ARM64_LSE_ATOMIC_INSN(
211 /* LL/SC */
212 " nop\n"
213 __LL_SC_ATOMIC64(sub),
214 /* LSE atomics */
215 " neg %[i], %[i]\n"
216 " stadd %[i], %[v]")
217 : [i] "+r" (x0), [v] "+Q" (v->counter)
218 : "r" (x1)
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100219 : __LL_SC_CLOBBERS);
Will Deaconc09d6a02015-02-03 16:14:13 +0000220}
221
Will Deacon305d4542015-10-08 20:15:18 +0100222#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
223static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
224{ \
225 register long x0 asm ("x0") = i; \
226 register atomic64_t *x1 asm ("x1") = v; \
227 \
228 asm volatile(ARM64_LSE_ATOMIC_INSN( \
229 /* LL/SC */ \
230 " nop\n" \
231 __LL_SC_ATOMIC64(sub_return##name) \
232 " nop", \
233 /* LSE atomics */ \
234 " neg %[i], %[i]\n" \
235 " ldadd" #mb " %[i], x30, %[v]\n" \
236 " add %[i], %[i], x30") \
237 : [i] "+r" (x0), [v] "+Q" (v->counter) \
238 : "r" (x1) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100239 : __LL_SC_CLOBBERS, ##cl); \
Will Deacon305d4542015-10-08 20:15:18 +0100240 \
241 return x0; \
Will Deaconc09d6a02015-02-03 16:14:13 +0000242}
Will Deaconc0385b22015-02-03 12:39:03 +0000243
Will Deacon305d4542015-10-08 20:15:18 +0100244ATOMIC64_OP_SUB_RETURN(_relaxed, )
245ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
246ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
247ATOMIC64_OP_SUB_RETURN( , al, "memory")
248
249#undef ATOMIC64_OP_SUB_RETURN
250
Will Deaconc0385b22015-02-03 12:39:03 +0000251static inline long atomic64_dec_if_positive(atomic64_t *v)
252{
Will Deaconc09d6a02015-02-03 16:14:13 +0000253 register long x0 asm ("x0") = (long)v;
Will Deaconc0385b22015-02-03 12:39:03 +0000254
Will Deaconc09d6a02015-02-03 16:14:13 +0000255 asm volatile(ARM64_LSE_ATOMIC_INSN(
256 /* LL/SC */
257 " nop\n"
258 __LL_SC_ATOMIC64(dec_if_positive)
259 " nop\n"
260 " nop\n"
261 " nop\n"
262 " nop\n"
263 " nop",
264 /* LSE atomics */
265 "1: ldr x30, %[v]\n"
266 " subs %[ret], x30, #1\n"
Will Deacondb262172015-05-29 14:44:06 +0100267 " b.lt 2f\n"
Will Deaconc09d6a02015-02-03 16:14:13 +0000268 " casal x30, %[ret], %[v]\n"
269 " sub x30, x30, #1\n"
270 " sub x30, x30, %[ret]\n"
271 " cbnz x30, 1b\n"
272 "2:")
273 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
Will Deaconc0385b22015-02-03 12:39:03 +0000274 :
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100275 : __LL_SC_CLOBBERS, "cc", "memory");
Will Deaconc0385b22015-02-03 12:39:03 +0000276
277 return x0;
278}
279
Will Deaconc09d6a02015-02-03 16:14:13 +0000280#undef __LL_SC_ATOMIC64
281
Will Deaconc342f782015-04-23 20:08:49 +0100282#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
283
284#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
285static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
286 unsigned long old, \
287 unsigned long new) \
288{ \
289 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
290 register unsigned long x1 asm ("x1") = old; \
291 register unsigned long x2 asm ("x2") = new; \
292 \
293 asm volatile(ARM64_LSE_ATOMIC_INSN( \
294 /* LL/SC */ \
Will Deacon484c96d2015-07-29 15:16:22 +0100295 " nop\n" \
296 __LL_SC_CMPXCHG(name) \
297 " nop", \
Will Deaconc342f782015-04-23 20:08:49 +0100298 /* LSE atomics */ \
299 " mov " #w "30, %" #w "[old]\n" \
300 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
301 " mov %" #w "[ret], " #w "30") \
302 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
303 : [old] "r" (x1), [new] "r" (x2) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100304 : __LL_SC_CLOBBERS, ##cl); \
Will Deaconc342f782015-04-23 20:08:49 +0100305 \
306 return x0; \
307}
308
Will Deacon305d4542015-10-08 20:15:18 +0100309__CMPXCHG_CASE(w, b, 1, )
310__CMPXCHG_CASE(w, h, 2, )
311__CMPXCHG_CASE(w, , 4, )
312__CMPXCHG_CASE(x, , 8, )
313__CMPXCHG_CASE(w, b, acq_1, a, "memory")
314__CMPXCHG_CASE(w, h, acq_2, a, "memory")
315__CMPXCHG_CASE(w, , acq_4, a, "memory")
316__CMPXCHG_CASE(x, , acq_8, a, "memory")
317__CMPXCHG_CASE(w, b, rel_1, l, "memory")
318__CMPXCHG_CASE(w, h, rel_2, l, "memory")
319__CMPXCHG_CASE(w, , rel_4, l, "memory")
320__CMPXCHG_CASE(x, , rel_8, l, "memory")
321__CMPXCHG_CASE(w, b, mb_1, al, "memory")
322__CMPXCHG_CASE(w, h, mb_2, al, "memory")
323__CMPXCHG_CASE(w, , mb_4, al, "memory")
324__CMPXCHG_CASE(x, , mb_8, al, "memory")
Will Deaconc342f782015-04-23 20:08:49 +0100325
326#undef __LL_SC_CMPXCHG
327#undef __CMPXCHG_CASE
328
Will Deacone9a4b792015-05-14 18:05:50 +0100329#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
330
331#define __CMPXCHG_DBL(name, mb, cl...) \
Lorenzo Pieralisi57a65662015-11-05 14:00:56 +0000332static inline long __cmpxchg_double##name(unsigned long old1, \
Will Deacone9a4b792015-05-14 18:05:50 +0100333 unsigned long old2, \
334 unsigned long new1, \
335 unsigned long new2, \
336 volatile void *ptr) \
337{ \
338 unsigned long oldval1 = old1; \
339 unsigned long oldval2 = old2; \
340 register unsigned long x0 asm ("x0") = old1; \
341 register unsigned long x1 asm ("x1") = old2; \
342 register unsigned long x2 asm ("x2") = new1; \
343 register unsigned long x3 asm ("x3") = new2; \
344 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
345 \
346 asm volatile(ARM64_LSE_ATOMIC_INSN( \
347 /* LL/SC */ \
348 " nop\n" \
349 " nop\n" \
350 " nop\n" \
351 __LL_SC_CMPXCHG_DBL(name), \
352 /* LSE atomics */ \
353 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
354 " eor %[old1], %[old1], %[oldval1]\n" \
355 " eor %[old2], %[old2], %[oldval2]\n" \
356 " orr %[old1], %[old1], %[old2]") \
357 : [old1] "+r" (x0), [old2] "+r" (x1), \
358 [v] "+Q" (*(unsigned long *)ptr) \
359 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
360 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
Ard Biesheuvel5be8b702016-02-25 20:48:53 +0100361 : __LL_SC_CLOBBERS, ##cl); \
Will Deacone9a4b792015-05-14 18:05:50 +0100362 \
363 return x0; \
364}
365
366__CMPXCHG_DBL( , )
367__CMPXCHG_DBL(_mb, al, "memory")
368
369#undef __LL_SC_CMPXCHG_DBL
370#undef __CMPXCHG_DBL
371
Will Deaconc0385b22015-02-03 12:39:03 +0000372#endif /* __ASM_ATOMIC_LSE_H */