blob: 712fb5a6a5682b26d0e78a862796deecc4130b05 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Maciej W. Rozyckiedf7b932013-11-01 23:47:05 +00002 * Atomic operations that C can't guarantee us. Useful for
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
Ralf Baechlee303e082006-11-30 01:14:50 +000012 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
Ralf Baechle192ef362006-07-07 14:07:18 +010017#include <linux/irqflags.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080018#include <linux/types.h>
Huang Peif0b7ddb2021-12-15 16:45:00 +080019#include <asm/asm.h>
Ralf Baechle0004a9d2006-10-31 03:45:07 +000020#include <asm/barrier.h>
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +000021#include <asm/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/cpu-features.h>
David Howellsb81947c2012-03-28 18:30:02 +010023#include <asm/cmpxchg.h>
Paul Burton4d1dbfe2019-10-01 21:53:20 +000024#include <asm/sync.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Paul Burton1da7bce2019-10-01 21:53:24 +000026#define ATOMIC_OPS(pfx, type) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010027static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
Paul Burton1da7bce2019-10-01 21:53:24 +000028{ \
29 return READ_ONCE(v->counter); \
30} \
31 \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010032static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
Paul Burton1da7bce2019-10-01 21:53:24 +000033{ \
34 WRITE_ONCE(v->counter, i); \
35} \
36 \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010037static __always_inline type \
38arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
Paul Burton1da7bce2019-10-01 21:53:24 +000039{ \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010040 return arch_cmpxchg(&v->counter, o, n); \
Paul Burton1da7bce2019-10-01 21:53:24 +000041} \
42 \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010043static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
Paul Burton1da7bce2019-10-01 21:53:24 +000044{ \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010045 return arch_xchg(&v->counter, n); \
Paul Burton1da7bce2019-10-01 21:53:24 +000046}
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Paul Burton1da7bce2019-10-01 21:53:24 +000048ATOMIC_OPS(atomic, int)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Paul Burton1da7bce2019-10-01 21:53:24 +000050#ifdef CONFIG_64BIT
51# define ATOMIC64_INIT(i) { (i) }
52ATOMIC_OPS(atomic64, s64)
53#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Paul Burtona38ee6b2019-10-01 21:53:18 +000055#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010056static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
Paul Burton36d32952019-10-01 21:53:15 +000057{ \
Paul Burtona38ee6b2019-10-01 21:53:18 +000058 type temp; \
Paul Burton36d32952019-10-01 21:53:15 +000059 \
Paul Burton9537db22019-10-01 21:53:16 +000060 if (!kernel_uses_llsc) { \
Paul Burton36d32952019-10-01 21:53:15 +000061 unsigned long flags; \
62 \
63 raw_local_irq_save(flags); \
64 v->counter c_op i; \
65 raw_local_irq_restore(flags); \
Paul Burton9537db22019-10-01 21:53:16 +000066 return; \
Paul Burton36d32952019-10-01 21:53:15 +000067 } \
Paul Burton9537db22019-10-01 21:53:16 +000068 \
Paul Burton9537db22019-10-01 21:53:16 +000069 __asm__ __volatile__( \
70 " .set push \n" \
71 " .set " MIPS_ISA_LEVEL " \n" \
Paul Burton4d1dbfe2019-10-01 21:53:20 +000072 " " __SYNC(full, loongson3_war) " \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +000073 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
Paul Burton9537db22019-10-01 21:53:16 +000074 " " #asm_op " %0, %2 \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +000075 " " #sc " %0, %1 \n" \
Huang Peif0b7ddb2021-12-15 16:45:00 +080076 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
Paul Burton9537db22019-10-01 21:53:16 +000077 " .set pop \n" \
78 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
79 : "Ir" (i) : __LLSC_CLOBBER); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
Paul Burtona38ee6b2019-10-01 21:53:18 +000082#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +010083static __inline__ type \
84arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
Paul Burton36d32952019-10-01 21:53:15 +000085{ \
Paul Burtona38ee6b2019-10-01 21:53:18 +000086 type temp, result; \
Paul Burton36d32952019-10-01 21:53:15 +000087 \
Paul Burton9537db22019-10-01 21:53:16 +000088 if (!kernel_uses_llsc) { \
Paul Burton36d32952019-10-01 21:53:15 +000089 unsigned long flags; \
90 \
91 raw_local_irq_save(flags); \
92 result = v->counter; \
93 result c_op i; \
94 v->counter = result; \
95 raw_local_irq_restore(flags); \
Paul Burton9537db22019-10-01 21:53:16 +000096 return result; \
Paul Burton36d32952019-10-01 21:53:15 +000097 } \
98 \
Paul Burton9537db22019-10-01 21:53:16 +000099 __asm__ __volatile__( \
100 " .set push \n" \
101 " .set " MIPS_ISA_LEVEL " \n" \
Paul Burton4d1dbfe2019-10-01 21:53:20 +0000102 " " __SYNC(full, loongson3_war) " \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000103 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
Paul Burton9537db22019-10-01 21:53:16 +0000104 " " #asm_op " %0, %1, %3 \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000105 " " #sc " %0, %2 \n" \
Huang Peif0b7ddb2021-12-15 16:45:00 +0800106 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
Paul Burton9537db22019-10-01 21:53:16 +0000107 " " #asm_op " %0, %1, %3 \n" \
108 " .set pop \n" \
109 : "=&r" (result), "=&r" (temp), \
110 "+" GCC_OFF_SMALL_ASM() (v->counter) \
111 : "Ir" (i) : __LLSC_CLOBBER); \
112 \
Paul Burton36d32952019-10-01 21:53:15 +0000113 return result; \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000114}
115
Paul Burtona38ee6b2019-10-01 21:53:18 +0000116#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100117static __inline__ type \
118arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
Paul Burton36d32952019-10-01 21:53:15 +0000119{ \
Paul Burton9537db22019-10-01 21:53:16 +0000120 int temp, result; \
Paul Burton36d32952019-10-01 21:53:15 +0000121 \
Paul Burton9537db22019-10-01 21:53:16 +0000122 if (!kernel_uses_llsc) { \
Paul Burton36d32952019-10-01 21:53:15 +0000123 unsigned long flags; \
124 \
125 raw_local_irq_save(flags); \
126 result = v->counter; \
127 v->counter c_op i; \
128 raw_local_irq_restore(flags); \
Paul Burton9537db22019-10-01 21:53:16 +0000129 return result; \
Paul Burton36d32952019-10-01 21:53:15 +0000130 } \
131 \
Paul Burton9537db22019-10-01 21:53:16 +0000132 __asm__ __volatile__( \
133 " .set push \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000134 " .set " MIPS_ISA_LEVEL " \n" \
Paul Burton4d1dbfe2019-10-01 21:53:20 +0000135 " " __SYNC(full, loongson3_war) " \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000136 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
Paul Burton9537db22019-10-01 21:53:16 +0000137 " " #asm_op " %0, %1, %3 \n" \
Paul Burtona38ee6b2019-10-01 21:53:18 +0000138 " " #sc " %0, %2 \n" \
Huang Peif0b7ddb2021-12-15 16:45:00 +0800139 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
Paul Burton9537db22019-10-01 21:53:16 +0000140 " .set pop \n" \
141 " move %0, %1 \n" \
142 : "=&r" (result), "=&r" (temp), \
143 "+" GCC_OFF_SMALL_ASM() (v->counter) \
144 : "Ir" (i) : __LLSC_CLOBBER); \
145 \
Paul Burton36d32952019-10-01 21:53:15 +0000146 return result; \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200147}
148
Paul Burton1da7bce2019-10-01 21:53:24 +0000149#undef ATOMIC_OPS
Paul Burtona38ee6b2019-10-01 21:53:18 +0000150#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
151 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
152 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
153 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Paul Burtona38ee6b2019-10-01 21:53:18 +0000155ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
156ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100158#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
159#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
160#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
161#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200162
Paul Burtona38ee6b2019-10-01 21:53:18 +0000163#ifdef CONFIG_64BIT
164ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
165ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100166# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
167# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
168# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
169# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
Paul Burtona38ee6b2019-10-01 21:53:18 +0000170#endif /* CONFIG_64BIT */
Peter Zijlstra4edac522016-04-18 01:16:06 +0200171
Paul Burtona38ee6b2019-10-01 21:53:18 +0000172#undef ATOMIC_OPS
173#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
174 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
175 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
176
177ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
178ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
179ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
Peter Zijlstra27782f22014-04-23 19:51:36 +0200180
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100181#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
182#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
183#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200184
Paul Burtona38ee6b2019-10-01 21:53:18 +0000185#ifdef CONFIG_64BIT
186ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
187ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
188ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100189# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
190# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
191# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
Paul Burtona38ee6b2019-10-01 21:53:18 +0000192#endif
193
Peter Zijlstraef315632014-03-26 17:56:43 +0100194#undef ATOMIC_OPS
Peter Zijlstra4edac522016-04-18 01:16:06 +0200195#undef ATOMIC_FETCH_OP
Peter Zijlstraef315632014-03-26 17:56:43 +0100196#undef ATOMIC_OP_RETURN
197#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199/*
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100200 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
201 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * @v: pointer of type atomic_t
203 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100204 * Atomically test @v and subtract @i if @v is greater or equal than @i.
205 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
Paul Burton40e784b2019-10-01 21:53:23 +0000207#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
Rui Wangcb95ea72021-07-29 17:31:52 +0800208static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
Paul Burton40e784b2019-10-01 21:53:23 +0000209{ \
210 type temp, result; \
211 \
212 smp_mb__before_atomic(); \
213 \
214 if (!kernel_uses_llsc) { \
215 unsigned long flags; \
216 \
217 raw_local_irq_save(flags); \
218 result = v->counter; \
219 result -= i; \
220 if (result >= 0) \
221 v->counter = result; \
222 raw_local_irq_restore(flags); \
223 smp_mb__after_atomic(); \
224 return result; \
225 } \
226 \
227 __asm__ __volatile__( \
228 " .set push \n" \
229 " .set " MIPS_ISA_LEVEL " \n" \
230 " " __SYNC(full, loongson3_war) " \n" \
231 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
232 " .set pop \n" \
233 " " #op " %0, %1, %3 \n" \
234 " move %1, %0 \n" \
235 " bltz %0, 2f \n" \
236 " .set push \n" \
237 " .set " MIPS_ISA_LEVEL " \n" \
238 " " #sc " %1, %2 \n" \
Huang Peif0b7ddb2021-12-15 16:45:00 +0800239 " " __stringify(SC_BEQZ) " %1, 1b \n" \
Paul Burton40e784b2019-10-01 21:53:23 +0000240 "2: " __SYNC(full, loongson3_war) " \n" \
241 " .set pop \n" \
242 : "=&r" (result), "=&r" (temp), \
243 "+" GCC_OFF_SMALL_ASM() (v->counter) \
244 : "Ir" (i) \
245 : __LLSC_CLOBBER); \
246 \
247 /* \
248 * In the Loongson3 workaround case we already have a \
249 * completion barrier at 2: above, which is needed due to the \
250 * bltz that can branch to code outside of the LL/SC loop. As \
251 * such, we don't need to emit another barrier here. \
252 */ \
Nathan Chancellor8790ccf2021-01-14 10:34:16 -0700253 if (__SYNC_loongson3_war == 0) \
Paul Burton40e784b2019-10-01 21:53:23 +0000254 smp_mb__after_atomic(); \
255 \
256 return result; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
Paul Burton40e784b2019-10-01 21:53:23 +0000259ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100260#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
Paul Burton40e784b2019-10-01 21:53:23 +0000261
262#ifdef CONFIG_64BIT
263ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
Mark Rutlandc7b5fd62021-05-25 15:02:21 +0100264#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
Paul Burton40e784b2019-10-01 21:53:23 +0000265#endif
266
267#undef ATOMIC_SIP_OP
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269#endif /* _ASM_ATOMIC_H */