blob: 7578c807ef985c587df3e87e6bd8256839d325b3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Maciej W. Rozyckiedf7b932013-11-01 23:47:05 +00002 * Atomic operations that C can't guarantee us. Useful for
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
Ralf Baechlee303e082006-11-30 01:14:50 +000012 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
Ralf Baechle192ef362006-07-07 14:07:18 +010017#include <linux/irqflags.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080018#include <linux/types.h>
Ralf Baechle0004a9d2006-10-31 03:45:07 +000019#include <asm/barrier.h>
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +000020#include <asm/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/cpu-features.h>
David Howellsb81947c2012-03-28 18:30:02 +010022#include <asm/cmpxchg.h>
Paul Burton878f75c2019-10-01 21:53:05 +000023#include <asm/llsc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/war.h>
25
Ralf Baechle70342282013-01-22 12:59:30 +010026#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28/*
29 * atomic_read - read atomic variable
30 * @v: pointer of type atomic_t
31 *
32 * Atomically reads the value of @v.
33 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +020034#define atomic_read(v) READ_ONCE((v)->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36/*
37 * atomic_set - set atomic variable
38 * @v: pointer of type atomic_t
39 * @i: required value
40 *
41 * Atomically sets the value of @v to @i.
42 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +020043#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000045#define ATOMIC_OP(op, c_op, asm_op) \
46static __inline__ void atomic_##op(int i, atomic_t * v) \
47{ \
Joshua Kinard49360842017-11-18 23:00:51 -050048 if (kernel_uses_llsc) { \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000049 int temp; \
50 \
Huacai Chene02e07e2019-01-15 16:04:54 +080051 loongson_llsc_mb(); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000052 __asm__ __volatile__( \
Paul Burton378ed6f2018-11-08 20:14:38 +000053 " .set push \n" \
Joshua Kinard49360842017-11-18 23:00:51 -050054 " .set "MIPS_ISA_LEVEL" \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000055 "1: ll %0, %1 # atomic_" #op " \n" \
56 " " #asm_op " %0, %2 \n" \
57 " sc %0, %1 \n" \
Paul Burton878f75c2019-10-01 21:53:05 +000058 "\t" __SC_BEQZ "%0, 1b \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +000059 " .set pop \n" \
Markos Chandras94bfb752015-01-26 12:44:11 +000060 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
Peter Zijlstra42344112019-06-13 15:43:20 +020061 : "Ir" (i) : __LLSC_CLOBBER); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000062 } else { \
63 unsigned long flags; \
64 \
65 raw_local_irq_save(flags); \
66 v->counter c_op i; \
67 raw_local_irq_restore(flags); \
68 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000071#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
Peter Zijlstra4ec45852016-04-18 01:15:25 +020072static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000073{ \
74 int result; \
75 \
Joshua Kinard49360842017-11-18 23:00:51 -050076 if (kernel_uses_llsc) { \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000077 int temp; \
78 \
Huacai Chene02e07e2019-01-15 16:04:54 +080079 loongson_llsc_mb(); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000080 __asm__ __volatile__( \
Paul Burton378ed6f2018-11-08 20:14:38 +000081 " .set push \n" \
Joshua Kinard49360842017-11-18 23:00:51 -050082 " .set "MIPS_ISA_LEVEL" \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000083 "1: ll %1, %2 # atomic_" #op "_return \n" \
84 " " #asm_op " %0, %1, %3 \n" \
85 " sc %0, %2 \n" \
Paul Burton878f75c2019-10-01 21:53:05 +000086 "\t" __SC_BEQZ "%0, 1b \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000087 " " #asm_op " %0, %1, %3 \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +000088 " .set pop \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000089 : "=&r" (result), "=&r" (temp), \
Markos Chandras94bfb752015-01-26 12:44:11 +000090 "+" GCC_OFF_SMALL_ASM() (v->counter) \
Peter Zijlstra42344112019-06-13 15:43:20 +020091 : "Ir" (i) : __LLSC_CLOBBER); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +000092 } else { \
93 unsigned long flags; \
94 \
95 raw_local_irq_save(flags); \
96 result = v->counter; \
97 result c_op i; \
98 v->counter = result; \
99 raw_local_irq_restore(flags); \
100 } \
101 \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000102 return result; \
103}
104
Peter Zijlstra4edac522016-04-18 01:16:06 +0200105#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200106static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200107{ \
108 int result; \
109 \
Joshua Kinard49360842017-11-18 23:00:51 -0500110 if (kernel_uses_llsc) { \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200111 int temp; \
112 \
Huacai Chene02e07e2019-01-15 16:04:54 +0800113 loongson_llsc_mb(); \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200114 __asm__ __volatile__( \
Paul Burton378ed6f2018-11-08 20:14:38 +0000115 " .set push \n" \
Joshua Kinard49360842017-11-18 23:00:51 -0500116 " .set "MIPS_ISA_LEVEL" \n" \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200117 "1: ll %1, %2 # atomic_fetch_" #op " \n" \
118 " " #asm_op " %0, %1, %3 \n" \
119 " sc %0, %2 \n" \
Paul Burton878f75c2019-10-01 21:53:05 +0000120 "\t" __SC_BEQZ "%0, 1b \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +0000121 " .set pop \n" \
Paul Burtoncfd54de2018-08-17 15:36:24 -0700122 " move %0, %1 \n" \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200123 : "=&r" (result), "=&r" (temp), \
124 "+" GCC_OFF_SMALL_ASM() (v->counter) \
Peter Zijlstra42344112019-06-13 15:43:20 +0200125 : "Ir" (i) : __LLSC_CLOBBER); \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200126 } else { \
127 unsigned long flags; \
128 \
129 raw_local_irq_save(flags); \
130 result = v->counter; \
131 v->counter c_op i; \
132 raw_local_irq_restore(flags); \
133 } \
134 \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200135 return result; \
136}
137
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000138#define ATOMIC_OPS(op, c_op, asm_op) \
139 ATOMIC_OP(op, c_op, asm_op) \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200140 ATOMIC_OP_RETURN(op, c_op, asm_op) \
141 ATOMIC_FETCH_OP(op, c_op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Peter Zijlstraef315632014-03-26 17:56:43 +0100143ATOMIC_OPS(add, +=, addu)
144ATOMIC_OPS(sub, -=, subu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200146#define atomic_add_return_relaxed atomic_add_return_relaxed
147#define atomic_sub_return_relaxed atomic_sub_return_relaxed
148#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
149#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
150
Peter Zijlstra4edac522016-04-18 01:16:06 +0200151#undef ATOMIC_OPS
152#define ATOMIC_OPS(op, c_op, asm_op) \
153 ATOMIC_OP(op, c_op, asm_op) \
154 ATOMIC_FETCH_OP(op, c_op, asm_op)
155
Peter Zijlstra4edac522016-04-18 01:16:06 +0200156ATOMIC_OPS(and, &=, and)
157ATOMIC_OPS(or, |=, or)
158ATOMIC_OPS(xor, ^=, xor)
Peter Zijlstra27782f22014-04-23 19:51:36 +0200159
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200160#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
161#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
162#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
163
Peter Zijlstraef315632014-03-26 17:56:43 +0100164#undef ATOMIC_OPS
Peter Zijlstra4edac522016-04-18 01:16:06 +0200165#undef ATOMIC_FETCH_OP
Peter Zijlstraef315632014-03-26 17:56:43 +0100166#undef ATOMIC_OP_RETURN
167#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169/*
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100170 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
171 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 * @v: pointer of type atomic_t
173 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100174 * Atomically test @v and subtract @i if @v is greater or equal than @i.
175 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 */
177static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
178{
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000179 int result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
David Daneyf252ffd2010-01-08 17:17:43 -0800181 smp_mb__before_llsc();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000182
Joshua Kinard49360842017-11-18 23:00:51 -0500183 if (kernel_uses_llsc) {
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000184 int temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +0200186 loongson_llsc_mb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 __asm__ __volatile__(
Paul Burton378ed6f2018-11-08 20:14:38 +0000188 " .set push \n"
Markos Chandras0038df22015-01-06 11:09:24 +0000189 " .set "MIPS_ISA_LEVEL" \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 "1: ll %1, %2 # atomic_sub_if_positive\n"
Paul Burton378ed6f2018-11-08 20:14:38 +0000191 " .set pop \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 " subu %0, %1, %3 \n"
Joshua Kinarda0a5ac32017-11-18 22:29:56 -0500193 " move %1, %0 \n"
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +0200194 " bltz %0, 2f \n"
Paul Burton378ed6f2018-11-08 20:14:38 +0000195 " .set push \n"
Paul Burtoncfd54de2018-08-17 15:36:24 -0700196 " .set "MIPS_ISA_LEVEL" \n"
Joshua Kinarda0a5ac32017-11-18 22:29:56 -0500197 " sc %1, %2 \n"
Paul Burton878f75c2019-10-01 21:53:05 +0000198 "\t" __SC_BEQZ "%1, 1b \n"
Peter Zijlstra1c6c1ca2019-06-13 15:43:19 +0200199 "2: \n"
Paul Burton378ed6f2018-11-08 20:14:38 +0000200 " .set pop \n"
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +0000201 : "=&r" (result), "=&r" (temp),
Markos Chandras94bfb752015-01-26 12:44:11 +0000202 "+" GCC_OFF_SMALL_ASM() (v->counter)
Peter Zijlstra42344112019-06-13 15:43:20 +0200203 : "Ir" (i) : __LLSC_CLOBBER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 } else {
205 unsigned long flags;
206
Ralf Baechle49edd092007-03-16 16:10:36 +0000207 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 result = v->counter;
209 result -= i;
210 if (result >= 0)
211 v->counter = result;
Ralf Baechle49edd092007-03-16 16:10:36 +0000212 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214
Ralf Baechle17099b12007-07-14 13:24:05 +0100215 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 return result;
218}
219
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700220#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
221#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 * atomic_dec_if_positive - decrement by 1 if old value positive
225 * @v: pointer of type atomic_t
226 */
227#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
228
Ralf Baechle875d43e2005-09-03 15:56:16 -0700229#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231#define ATOMIC64_INIT(i) { (i) }
232
233/*
234 * atomic64_read - read atomic variable
235 * @v: pointer of type atomic64_t
236 *
237 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200238#define atomic64_read(v) READ_ONCE((v)->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240/*
241 * atomic64_set - set atomic variable
242 * @v: pointer of type atomic64_t
243 * @i: required value
244 */
Peter Zijlstra62e8a322015-09-18 11:13:10 +0200245#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000247#define ATOMIC64_OP(op, c_op, asm_op) \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100248static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000249{ \
Joshua Kinard49360842017-11-18 23:00:51 -0500250 if (kernel_uses_llsc) { \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100251 s64 temp; \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000252 \
Huacai Chene02e07e2019-01-15 16:04:54 +0800253 loongson_llsc_mb(); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000254 __asm__ __volatile__( \
Paul Burton378ed6f2018-11-08 20:14:38 +0000255 " .set push \n" \
Joshua Kinard49360842017-11-18 23:00:51 -0500256 " .set "MIPS_ISA_LEVEL" \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000257 "1: lld %0, %1 # atomic64_" #op " \n" \
258 " " #asm_op " %0, %2 \n" \
259 " scd %0, %1 \n" \
Paul Burton878f75c2019-10-01 21:53:05 +0000260 "\t" __SC_BEQZ "%0, 1b \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +0000261 " .set pop \n" \
Markos Chandras94bfb752015-01-26 12:44:11 +0000262 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
Peter Zijlstra42344112019-06-13 15:43:20 +0200263 : "Ir" (i) : __LLSC_CLOBBER); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000264 } else { \
265 unsigned long flags; \
266 \
267 raw_local_irq_save(flags); \
268 v->counter c_op i; \
269 raw_local_irq_restore(flags); \
270 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000273#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100274static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000275{ \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100276 s64 result; \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000277 \
Joshua Kinard49360842017-11-18 23:00:51 -0500278 if (kernel_uses_llsc) { \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100279 s64 temp; \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000280 \
Huacai Chene02e07e2019-01-15 16:04:54 +0800281 loongson_llsc_mb(); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000282 __asm__ __volatile__( \
Paul Burton378ed6f2018-11-08 20:14:38 +0000283 " .set push \n" \
Joshua Kinard49360842017-11-18 23:00:51 -0500284 " .set "MIPS_ISA_LEVEL" \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000285 "1: lld %1, %2 # atomic64_" #op "_return\n" \
286 " " #asm_op " %0, %1, %3 \n" \
287 " scd %0, %2 \n" \
Paul Burton878f75c2019-10-01 21:53:05 +0000288 "\t" __SC_BEQZ "%0, 1b \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000289 " " #asm_op " %0, %1, %3 \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +0000290 " .set pop \n" \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000291 : "=&r" (result), "=&r" (temp), \
Markos Chandras94bfb752015-01-26 12:44:11 +0000292 "+" GCC_OFF_SMALL_ASM() (v->counter) \
Peter Zijlstra42344112019-06-13 15:43:20 +0200293 : "Ir" (i) : __LLSC_CLOBBER); \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000294 } else { \
295 unsigned long flags; \
296 \
297 raw_local_irq_save(flags); \
298 result = v->counter; \
299 result c_op i; \
300 v->counter = result; \
301 raw_local_irq_restore(flags); \
302 } \
303 \
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000304 return result; \
305}
306
Peter Zijlstra4edac522016-04-18 01:16:06 +0200307#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100308static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200309{ \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100310 s64 result; \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200311 \
Huacai Chendb1ce3f2018-12-25 08:51:01 +0800312 if (kernel_uses_llsc) { \
Mark Rutlandd184cf12019-05-22 14:22:41 +0100313 s64 temp; \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200314 \
Huacai Chene02e07e2019-01-15 16:04:54 +0800315 loongson_llsc_mb(); \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200316 __asm__ __volatile__( \
Paul Burton378ed6f2018-11-08 20:14:38 +0000317 " .set push \n" \
Joshua Kinard49360842017-11-18 23:00:51 -0500318 " .set "MIPS_ISA_LEVEL" \n" \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200319 "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
320 " " #asm_op " %0, %1, %3 \n" \
321 " scd %0, %2 \n" \
Paul Burton878f75c2019-10-01 21:53:05 +0000322 "\t" __SC_BEQZ "%0, 1b \n" \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200323 " move %0, %1 \n" \
Paul Burton378ed6f2018-11-08 20:14:38 +0000324 " .set pop \n" \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200325 : "=&r" (result), "=&r" (temp), \
326 "+" GCC_OFF_SMALL_ASM() (v->counter) \
Peter Zijlstra42344112019-06-13 15:43:20 +0200327 : "Ir" (i) : __LLSC_CLOBBER); \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200328 } else { \
329 unsigned long flags; \
330 \
331 raw_local_irq_save(flags); \
332 result = v->counter; \
333 v->counter c_op i; \
334 raw_local_irq_restore(flags); \
335 } \
336 \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200337 return result; \
338}
339
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000340#define ATOMIC64_OPS(op, c_op, asm_op) \
341 ATOMIC64_OP(op, c_op, asm_op) \
Peter Zijlstra4edac522016-04-18 01:16:06 +0200342 ATOMIC64_OP_RETURN(op, c_op, asm_op) \
343 ATOMIC64_FETCH_OP(op, c_op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Peter Zijlstraef315632014-03-26 17:56:43 +0100345ATOMIC64_OPS(add, +=, daddu)
346ATOMIC64_OPS(sub, -=, dsubu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200348#define atomic64_add_return_relaxed atomic64_add_return_relaxed
349#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
350#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
351#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
352
Peter Zijlstraef315632014-03-26 17:56:43 +0100353#undef ATOMIC64_OPS
Peter Zijlstra4edac522016-04-18 01:16:06 +0200354#define ATOMIC64_OPS(op, c_op, asm_op) \
355 ATOMIC64_OP(op, c_op, asm_op) \
356 ATOMIC64_FETCH_OP(op, c_op, asm_op)
357
358ATOMIC64_OPS(and, &=, and)
359ATOMIC64_OPS(or, |=, or)
360ATOMIC64_OPS(xor, ^=, xor)
361
Peter Zijlstra4ec45852016-04-18 01:15:25 +0200362#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
363#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
364#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
365
Peter Zijlstra4edac522016-04-18 01:16:06 +0200366#undef ATOMIC64_OPS
367#undef ATOMIC64_FETCH_OP
Peter Zijlstraef315632014-03-26 17:56:43 +0100368#undef ATOMIC64_OP_RETURN
369#undef ATOMIC64_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371/*
Maciej W. Rozyckiddb31082014-11-15 22:09:54 +0000372 * atomic64_sub_if_positive - conditionally subtract integer from atomic
373 * variable
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100374 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 * @v: pointer of type atomic64_t
376 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100377 * Atomically test @v and subtract @i if @v is greater or equal than @i.
378 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 */
Mark Rutlandd184cf12019-05-22 14:22:41 +0100380static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
Mark Rutlandd184cf12019-05-22 14:22:41 +0100382 s64 result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
David Daneyf252ffd2010-01-08 17:17:43 -0800384 smp_mb__before_llsc();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000385
Joshua Kinard49360842017-11-18 23:00:51 -0500386 if (kernel_uses_llsc) {
Mark Rutlandd184cf12019-05-22 14:22:41 +0100387 s64 temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 __asm__ __volatile__(
Paul Burton378ed6f2018-11-08 20:14:38 +0000390 " .set push \n"
Markos Chandras0038df22015-01-06 11:09:24 +0000391 " .set "MIPS_ISA_LEVEL" \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 "1: lld %1, %2 # atomic64_sub_if_positive\n"
393 " dsubu %0, %1, %3 \n"
Joshua Kinarda0a5ac32017-11-18 22:29:56 -0500394 " move %1, %0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 " bltz %0, 1f \n"
Joshua Kinarda0a5ac32017-11-18 22:29:56 -0500396 " scd %1, %2 \n"
Paul Burton878f75c2019-10-01 21:53:05 +0000397 "\t" __SC_BEQZ "%1, 1b \n"
Ralf Baechle50952022008-07-03 23:28:35 +0100398 "1: \n"
Paul Burton378ed6f2018-11-08 20:14:38 +0000399 " .set pop \n"
Maciej W. Rozyckib0984c42014-11-15 22:08:48 +0000400 : "=&r" (result), "=&r" (temp),
Markos Chandras94bfb752015-01-26 12:44:11 +0000401 "+" GCC_OFF_SMALL_ASM() (v->counter)
Joshua Kinardb4f2a172012-06-24 21:01:34 -0400402 : "Ir" (i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 } else {
404 unsigned long flags;
405
Ralf Baechle49edd092007-03-16 16:10:36 +0000406 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 result = v->counter;
408 result -= i;
409 if (result >= 0)
410 v->counter = result;
Ralf Baechle49edd092007-03-16 16:10:36 +0000411 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
413
Ralf Baechle17099b12007-07-14 13:24:05 +0100414 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 return result;
417}
418
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700419#define atomic64_cmpxchg(v, o, n) \
Atsushi Nemoto7b239bb2007-05-10 23:47:45 +0900420 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700421#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 * atomic64_dec_if_positive - decrement by 1 if old value positive
425 * @v: pointer of type atomic64_t
426 */
427#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
428
Ralf Baechle875d43e2005-09-03 15:56:16 -0700429#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431#endif /* _ASM_ATOMIC_H */