blob: a0b9e7c1e4fce24344e98d95c536bdb8f1f750f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Maciej W. Rozyckiedf7b932013-11-01 23:47:05 +00002 * Atomic operations that C can't guarantee us. Useful for
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
Ralf Baechlee303e082006-11-30 01:14:50 +000012 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
Ralf Baechle192ef362006-07-07 14:07:18 +010017#include <linux/irqflags.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080018#include <linux/types.h>
Ralf Baechle0004a9d2006-10-31 03:45:07 +000019#include <asm/barrier.h>
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +000020#include <asm/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/cpu-features.h>
David Howellsb81947c2012-03-28 18:30:02 +010022#include <asm/cmpxchg.h>
Paul Burton878f75c2019-10-01 21:53:05 +000023#include <asm/llsc.h>
Paul Burton4d1dbfe2019-10-01 21:53:20 +000024#include <asm/sync.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/war.h>
26
Paul Burton1da7bce2019-10-01 21:53:24 +000027#define ATOMIC_OPS(pfx, type) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010028static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
Paul Burton1da7bce2019-10-01 21:53:24 +000029{ \
30 return READ_ONCE(v->counter); \
31} \
32 \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010033static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
Paul Burton1da7bce2019-10-01 21:53:24 +000034{ \
35 WRITE_ONCE(v->counter, i); \
36} \
37 \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010038static __always_inline type \
39arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
Paul Burton1da7bce2019-10-01 21:53:24 +000040{ \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010041 return arch_cmpxchg(&v->counter, o, n); \
Paul Burton1da7bce2019-10-01 21:53:24 +000042} \
43 \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010044static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
Paul Burton1da7bce2019-10-01 21:53:24 +000045{ \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010046 return arch_xchg(&v->counter, n); \
Paul Burton1da7bce2019-10-01 21:53:24 +000047}
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Paul Burton1da7bce2019-10-01 21:53:24 +000049ATOMIC_OPS(atomic, int)
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Paul Burton1da7bce2019-10-01 21:53:24 +000051#ifdef CONFIG_64BIT
52# define ATOMIC64_INIT(i) { (i) }
53ATOMIC_OPS(atomic64, s64)
54#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Paul Burtona38ee6b2019-10-01 21:53:18 +000056#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010057static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
Paul Burton36d32952019-10-01 21:53:15 +000058{ \
Paul Burtona38ee6b2019-10-01 21:53:18 +000059 type temp; \
Paul Burton36d32952019-10-01 21:53:15 +000060 \
Paul Burton9537db22019-10-01 21:53:16 +000061 if (!kernel_uses_llsc) { \
Paul Burton36d32952019-10-01 21:53:15 +000062 unsigned long flags; \
63 \
64 raw_local_irq_save(flags); \
65 v->counter c_op i; \
66 raw_local_irq_restore(flags); \
Paul Burton9537db22019-10-01 21:53:16 +000067 return; \
Paul Burton36d32952019-10-01 21:53:15 +000068 } \
Paul Burton9537db22019-10-01 21:53:16 +000069 \
Paul Burton9537db22019-10-01 21:53:16 +000070 __asm__ __volatile__( \
71 " .set push \n" \
72 " .set " MIPS_ISA_LEVEL " \n" \
Paul Burton4d1dbfe2019-10-01 21:53:20 +000073 " " __SYNC(full, loongson3_war) " \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +000074 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
Paul Burton9537db22019-10-01 21:53:16 +000075 " " #asm_op " %0, %2 \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +000076 " " #sc " %0, %1 \n" \
Paul Burton9537db22019-10-01 21:53:16 +000077 "\t" __SC_BEQZ "%0, 1b \n" \
78 " .set pop \n" \
79 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
80 : "Ir" (i) : __LLSC_CLOBBER); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070081}
82
Paul Burtona38ee6b2019-10-01 21:53:18 +000083#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010084static __inline__ type \
85arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
Paul Burton36d32952019-10-01 21:53:15 +000086{ \
Paul Burtona38ee6b2019-10-01 21:53:18 +000087 type temp, result; \
Paul Burton36d32952019-10-01 21:53:15 +000088 \
Paul Burton9537db22019-10-01 21:53:16 +000089 if (!kernel_uses_llsc) { \
Paul Burton36d32952019-10-01 21:53:15 +000090 unsigned long flags; \
91 \
92 raw_local_irq_save(flags); \
93 result = v->counter; \
94 result c_op i; \
95 v->counter = result; \
96 raw_local_irq_restore(flags); \
Paul Burton9537db22019-10-01 21:53:16 +000097 return result; \
Paul Burton36d32952019-10-01 21:53:15 +000098 } \
99 \
Paul Burton9537db22019-10-01 21:53:16 +0000100 __asm__ __volatile__( \
101 " .set push \n" \
102 " .set " MIPS_ISA_LEVEL " \n" \
Paul Burton4d1dbfe2019-10-01 21:53:20 +0000103 " " __SYNC(full, loongson3_war) " \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000104 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
Paul Burton9537db22019-10-01 21:53:16 +0000105 " " #asm_op " %0, %1, %3 \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000106 " " #sc " %0, %2 \n" \
Paul Burton9537db22019-10-01 21:53:16 +0000107 "\t" __SC_BEQZ "%0, 1b \n" \
108 " " #asm_op " %0, %1, %3 \n" \
109 " .set pop \n" \
110 : "=&r" (result), "=&r" (temp), \
111 "+" GCC_OFF_SMALL_ASM() (v->counter) \
112 : "Ir" (i) : __LLSC_CLOBBER); \
113 \
Paul Burton36d32952019-10-01 21:53:15 +0000114 return result; \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000115}
116
Paul Burtona38ee6b2019-10-01 21:53:18 +0000117#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100118static __inline__ type \
119arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
Paul Burton36d32952019-10-01 21:53:15 +0000120{ \
Paul Burton9537db22019-10-01 21:53:16 +0000121 int temp, result; \
Paul Burton36d32952019-10-01 21:53:15 +0000122 \
Paul Burton9537db22019-10-01 21:53:16 +0000123 if (!kernel_uses_llsc) { \
Paul Burton36d32952019-10-01 21:53:15 +0000124 unsigned long flags; \
125 \
126 raw_local_irq_save(flags); \
127 result = v->counter; \
128 v->counter c_op i; \
129 raw_local_irq_restore(flags); \
Paul Burton9537db22019-10-01 21:53:16 +0000130 return result; \
Paul Burton36d32952019-10-01 21:53:15 +0000131 } \
132 \
Paul Burton9537db22019-10-01 21:53:16 +0000133 __asm__ __volatile__( \
134 " .set push \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000135 " .set " MIPS_ISA_LEVEL " \n" \
Paul Burton4d1dbfe2019-10-01 21:53:20 +0000136 " " __SYNC(full, loongson3_war) " \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000137 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
Paul Burton9537db22019-10-01 21:53:16 +0000138 " " #asm_op " %0, %1, %3 \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000139 " " #sc " %0, %2 \n" \
Paul Burton9537db22019-10-01 21:53:16 +0000140 "\t" __SC_BEQZ "%0, 1b \n" \
141 " .set pop \n" \
142 " move %0, %1 \n" \
143 : "=&r" (result), "=&r" (temp), \
144 "+" GCC_OFF_SMALL_ASM() (v->counter) \
145 : "Ir" (i) : __LLSC_CLOBBER); \
146 \
Paul Burton36d32952019-10-01 21:53:15 +0000147 return result; \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200148}
149
Paul Burton1da7bce2019-10-01 21:53:24 +0000150#undef ATOMIC_OPS
Paul Burtona38ee6b2019-10-01 21:53:18 +0000151#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
152 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
153 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
154 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Paul Burtona38ee6b2019-10-01 21:53:18 +0000156ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
157ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100159#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
160#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
161#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
162#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200163
Paul Burtona38ee6b2019-10-01 21:53:18 +0000164#ifdef CONFIG_64BIT
165ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
166ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100167# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
168# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
169# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
170# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
Paul Burtona38ee6b2019-10-01 21:53:18 +0000171#endif /* CONFIG_64BIT */
Peter Zijlstra4edac522016-04-18 01:16:06 +0200172
Paul Burtona38ee6b2019-10-01 21:53:18 +0000173#undef ATOMIC_OPS
174#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
175 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
176 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
177
178ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
179ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
180ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
Peter Zijlstra27782f22014-04-23 19:51:36 +0200181
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100182#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
183#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
184#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200185
Paul Burtona38ee6b2019-10-01 21:53:18 +0000186#ifdef CONFIG_64BIT
187ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
188ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
189ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100190# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
191# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
192# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
Paul Burtona38ee6b2019-10-01 21:53:18 +0000193#endif
194
Peter Zijlstraef315632014-03-26 17:56:43 +0100195#undef ATOMIC_OPS
Peter Zijlstra4edac522016-04-18 01:16:06 +0200196#undef ATOMIC_FETCH_OP
Peter Zijlstraef315632014-03-26 17:56:43 +0100197#undef ATOMIC_OP_RETURN
198#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200/*
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100201 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
202 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 * @v: pointer of type atomic_t
204 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100205 * Atomically test @v and subtract @i if @v is greater or equal than @i.
206 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 */
Paul Burton40e784b2019-10-01 21:53:23 +0000208#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
Rui Wangcb95ea72021-07-29 17:31:52 +0800209static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
Paul Burton40e784b2019-10-01 21:53:23 +0000210{ \
211 type temp, result; \
212 \
213 smp_mb__before_atomic(); \
214 \
215 if (!kernel_uses_llsc) { \
216 unsigned long flags; \
217 \
218 raw_local_irq_save(flags); \
219 result = v->counter; \
220 result -= i; \
221 if (result >= 0) \
222 v->counter = result; \
223 raw_local_irq_restore(flags); \
224 smp_mb__after_atomic(); \
225 return result; \
226 } \
227 \
228 __asm__ __volatile__( \
229 " .set push \n" \
230 " .set " MIPS_ISA_LEVEL " \n" \
231 " " __SYNC(full, loongson3_war) " \n" \
232 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
233 " .set pop \n" \
234 " " #op " %0, %1, %3 \n" \
235 " move %1, %0 \n" \
236 " bltz %0, 2f \n" \
237 " .set push \n" \
238 " .set " MIPS_ISA_LEVEL " \n" \
239 " " #sc " %1, %2 \n" \
240 " " __SC_BEQZ "%1, 1b \n" \
241 "2: " __SYNC(full, loongson3_war) " \n" \
242 " .set pop \n" \
243 : "=&r" (result), "=&r" (temp), \
244 "+" GCC_OFF_SMALL_ASM() (v->counter) \
245 : "Ir" (i) \
246 : __LLSC_CLOBBER); \
247 \
248 /* \
249 * In the Loongson3 workaround case we already have a \
250 * completion barrier at 2: above, which is needed due to the \
251 * bltz that can branch to code outside of the LL/SC loop. As \
252 * such, we don't need to emit another barrier here. \
253 */ \
Nathan Chancellor8790ccf2021-01-14 10:34:16 -0700254 if (__SYNC_loongson3_war == 0) \
Paul Burton40e784b2019-10-01 21:53:23 +0000255 smp_mb__after_atomic(); \
256 \
257 return result; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
259
Paul Burton40e784b2019-10-01 21:53:23 +0000260ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100261#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
Paul Burton40e784b2019-10-01 21:53:23 +0000262
263#ifdef CONFIG_64BIT
264ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100265#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
Paul Burton40e784b2019-10-01 21:53:23 +0000266#endif
267
268#undef ATOMIC_SIP_OP
269
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270#endif /* _ASM_ATOMIC_H */