blob: 476fe3b5dfc621e84b5e93332d8911ed7661367d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Maciej W. Rozyckiedf7b932013-11-01 23:47:05 +00002 * Atomic operations that C can't guarantee us. Useful for
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
Ralf Baechlee303e082006-11-30 01:14:50 +000012 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
Ralf Baechle192ef362006-07-07 14:07:18 +010017#include <linux/irqflags.h>
Matthew Wilcoxea4354672009-01-06 14:40:39 -080018#include <linux/types.h>
Ralf Baechle0004a9d2006-10-31 03:45:07 +000019#include <asm/barrier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/cpu-features.h>
David Howellsb81947c2012-03-28 18:30:02 +010021#include <asm/cmpxchg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/war.h>
23
Ralf Baechle70342282013-01-22 12:59:30 +010024#define ATOMIC_INIT(i) { (i) }
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26/*
27 * atomic_read - read atomic variable
28 * @v: pointer of type atomic_t
29 *
30 * Atomically reads the value of @v.
31 */
Anton Blanchardf3d46f92010-05-17 14:33:53 +100032#define atomic_read(v) (*(volatile int *)&(v)->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
34/*
35 * atomic_set - set atomic variable
36 * @v: pointer of type atomic_t
37 * @i: required value
38 *
39 * Atomically sets the value of @v to @i.
40 */
Ralf Baechle21a151d2007-10-11 23:46:15 +010041#define atomic_set(v, i) ((v)->counter = (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Peter Zijlstraef315632014-03-26 17:56:43 +010043#define ATOMIC_OP(op, c_op, asm_op) \
44static __inline__ void atomic_##op(int i, atomic_t * v) \
45{ \
46 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
47 int temp; \
48 \
49 __asm__ __volatile__( \
50 " .set arch=r4000 \n" \
51 "1: ll %0, %1 # atomic_" #op " \n" \
52 " " #asm_op " %0, %2 \n" \
53 " sc %0, %1 \n" \
54 " beqzl %0, 1b \n" \
55 " .set mips0 \n" \
56 : "=&r" (temp), "+m" (v->counter) \
57 : "Ir" (i)); \
58 } else if (kernel_uses_llsc) { \
59 int temp; \
60 \
61 do { \
62 __asm__ __volatile__( \
63 " .set arch=r4000 \n" \
64 " ll %0, %1 # atomic_" #op "\n" \
65 " " #asm_op " %0, %2 \n" \
66 " sc %0, %1 \n" \
67 " .set mips0 \n" \
68 : "=&r" (temp), "+m" (v->counter) \
69 : "Ir" (i)); \
70 } while (unlikely(!temp)); \
71 } else { \
72 unsigned long flags; \
73 \
74 raw_local_irq_save(flags); \
75 v->counter c_op i; \
76 raw_local_irq_restore(flags); \
77 } \
78} \
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Peter Zijlstraef315632014-03-26 17:56:43 +010080#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
81static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
82{ \
83 int result; \
84 \
85 smp_mb__before_llsc(); \
86 \
87 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
88 int temp; \
89 \
90 __asm__ __volatile__( \
91 " .set arch=r4000 \n" \
92 "1: ll %1, %2 # atomic_" #op "_return \n" \
93 " " #asm_op " %0, %1, %3 \n" \
94 " sc %0, %2 \n" \
95 " beqzl %0, 1b \n" \
96 " addu %0, %1, %3 \n" \
97 " .set mips0 \n" \
98 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
99 : "Ir" (i)); \
100 } else if (kernel_uses_llsc) { \
101 int temp; \
102 \
103 do { \
104 __asm__ __volatile__( \
105 " .set arch=r4000 \n" \
106 " ll %1, %2 # atomic_" #op "_return \n" \
107 " " #asm_op " %0, %1, %3 \n" \
108 " sc %0, %2 \n" \
109 " .set mips0 \n" \
110 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
111 : "Ir" (i)); \
112 } while (unlikely(!result)); \
113 \
114 result = temp + i; \
115 } else { \
116 unsigned long flags; \
117 \
118 raw_local_irq_save(flags); \
119 result = v->counter; \
120 result c_op i; \
121 v->counter = result; \
122 raw_local_irq_restore(flags); \
123 } \
124 \
125 smp_llsc_mb(); \
126 \
127 return result; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Peter Zijlstraef315632014-03-26 17:56:43 +0100130#define ATOMIC_OPS(op, c_op, asm_op) \
131 ATOMIC_OP(op, c_op, asm_op) \
132 ATOMIC_OP_RETURN(op, c_op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Peter Zijlstraef315632014-03-26 17:56:43 +0100134ATOMIC_OPS(add, +=, addu)
135ATOMIC_OPS(sub, -=, subu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Peter Zijlstraef315632014-03-26 17:56:43 +0100137#undef ATOMIC_OPS
138#undef ATOMIC_OP_RETURN
139#undef ATOMIC_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141/*
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100142 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
143 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 * @v: pointer of type atomic_t
145 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100146 * Atomically test @v and subtract @i if @v is greater or equal than @i.
147 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 */
149static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
150{
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000151 int result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
David Daneyf252ffd2010-01-08 17:17:43 -0800153 smp_mb__before_llsc();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000154
David Daneyb791d112009-07-13 11:15:19 -0700155 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000156 int temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
158 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200159 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 "1: ll %1, %2 # atomic_sub_if_positive\n"
161 " subu %0, %1, %3 \n"
162 " bltz %0, 1f \n"
163 " sc %0, %2 \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000164 " .set noreorder \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 " beqzl %0, 1b \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000166 " subu %0, %1, %3 \n"
167 " .set reorder \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000169 " .set mips0 \n"
Joshua Kinardb4f2a172012-06-24 21:01:34 -0400170 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 : "Ir" (i), "m" (v->counter)
172 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700173 } else if (kernel_uses_llsc) {
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000174 int temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
176 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200177 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 "1: ll %1, %2 # atomic_sub_if_positive\n"
179 " subu %0, %1, %3 \n"
180 " bltz %0, 1f \n"
181 " sc %0, %2 \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000182 " .set noreorder \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100183 " beqz %0, 1b \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000184 " subu %0, %1, %3 \n"
185 " .set reorder \n"
Ralf Baechle50952022008-07-03 23:28:35 +0100186 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000187 " .set mips0 \n"
Joshua Kinardb4f2a172012-06-24 21:01:34 -0400188 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
189 : "Ir" (i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 } else {
191 unsigned long flags;
192
Ralf Baechle49edd092007-03-16 16:10:36 +0000193 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 result = v->counter;
195 result -= i;
196 if (result >= 0)
197 v->counter = result;
Ralf Baechle49edd092007-03-16 16:10:36 +0000198 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 }
200
Ralf Baechle17099b12007-07-14 13:24:05 +0100201 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 return result;
204}
205
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700206#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
207#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
Nick Piggin4a6dae62005-11-13 16:07:24 -0800208
Nick Piggin8426e1f2005-11-13 16:07:25 -0800209/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700210 * __atomic_add_unless - add unless the number is a given value
Nick Piggin8426e1f2005-11-13 16:07:25 -0800211 * @v: pointer of type atomic_t
212 * @a: the amount to add to v...
213 * @u: ...unless v is equal to u.
214 *
215 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700216 * Returns the old value of @v.
Nick Piggin8426e1f2005-11-13 16:07:25 -0800217 */
Arun Sharmaf24219b2011-07-26 16:09:07 -0700218static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700219{
220 int c, old;
221 c = atomic_read(v);
222 for (;;) {
223 if (unlikely(c == (u)))
224 break;
225 old = atomic_cmpxchg((v), c, c + (a));
226 if (likely(old == c))
227 break;
228 c = old;
229 }
Arun Sharmaf24219b2011-07-26 16:09:07 -0700230 return c;
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700231}
Nick Piggin8426e1f2005-11-13 16:07:25 -0800232
Ralf Baechle21a151d2007-10-11 23:46:15 +0100233#define atomic_dec_return(v) atomic_sub_return(1, (v))
234#define atomic_inc_return(v) atomic_add_return(1, (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236/*
237 * atomic_sub_and_test - subtract value from variable and test result
238 * @i: integer value to subtract
239 * @v: pointer of type atomic_t
240 *
241 * Atomically subtracts @i from @v and returns
242 * true if the result is zero, or false for all
243 * other cases.
244 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100245#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247/*
248 * atomic_inc_and_test - increment and test
249 * @v: pointer of type atomic_t
250 *
251 * Atomically increments @v by 1
252 * and returns true if the result is zero, or false for all
253 * other cases.
254 */
255#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
256
257/*
258 * atomic_dec_and_test - decrement by 1 and test
259 * @v: pointer of type atomic_t
260 *
261 * Atomically decrements @v by 1 and
262 * returns true if the result is 0, or false for all other
263 * cases.
264 */
265#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
266
267/*
268 * atomic_dec_if_positive - decrement by 1 if old value positive
269 * @v: pointer of type atomic_t
270 */
271#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
272
273/*
274 * atomic_inc - increment atomic variable
275 * @v: pointer of type atomic_t
276 *
277 * Atomically increments @v by 1.
278 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100279#define atomic_inc(v) atomic_add(1, (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
281/*
282 * atomic_dec - decrement and test
283 * @v: pointer of type atomic_t
284 *
285 * Atomically decrements @v by 1.
286 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100287#define atomic_dec(v) atomic_sub(1, (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289/*
290 * atomic_add_negative - add and test if negative
291 * @v: pointer of type atomic_t
292 * @i: integer value to add
293 *
294 * Atomically adds @i to @v and returns true
295 * if the result is negative, or false when
296 * result is greater than or equal to zero.
297 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100298#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Ralf Baechle875d43e2005-09-03 15:56:16 -0700300#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302#define ATOMIC64_INIT(i) { (i) }
303
304/*
305 * atomic64_read - read atomic variable
306 * @v: pointer of type atomic64_t
307 *
308 */
Anton Blanchardf3d46f92010-05-17 14:33:53 +1000309#define atomic64_read(v) (*(volatile long *)&(v)->counter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311/*
312 * atomic64_set - set atomic variable
313 * @v: pointer of type atomic64_t
314 * @i: required value
315 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100316#define atomic64_set(v, i) ((v)->counter = (i))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Peter Zijlstraef315632014-03-26 17:56:43 +0100318#define ATOMIC64_OP(op, c_op, asm_op) \
319static __inline__ void atomic64_##op(long i, atomic64_t * v) \
320{ \
321 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
322 long temp; \
323 \
324 __asm__ __volatile__( \
325 " .set arch=r4000 \n" \
326 "1: lld %0, %1 # atomic64_" #op " \n" \
327 " " #asm_op " %0, %2 \n" \
328 " scd %0, %1 \n" \
329 " beqzl %0, 1b \n" \
330 " .set mips0 \n" \
331 : "=&r" (temp), "+m" (v->counter) \
332 : "Ir" (i)); \
333 } else if (kernel_uses_llsc) { \
334 long temp; \
335 \
336 do { \
337 __asm__ __volatile__( \
338 " .set arch=r4000 \n" \
339 " lld %0, %1 # atomic64_" #op "\n" \
340 " " #asm_op " %0, %2 \n" \
341 " scd %0, %1 \n" \
342 " .set mips0 \n" \
343 : "=&r" (temp), "+m" (v->counter) \
344 : "Ir" (i)); \
345 } while (unlikely(!temp)); \
346 } else { \
347 unsigned long flags; \
348 \
349 raw_local_irq_save(flags); \
350 v->counter c_op i; \
351 raw_local_irq_restore(flags); \
352 } \
353} \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Peter Zijlstraef315632014-03-26 17:56:43 +0100355#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
356static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
357{ \
358 long result; \
359 \
360 smp_mb__before_llsc(); \
361 \
362 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
363 long temp; \
364 \
365 __asm__ __volatile__( \
366 " .set arch=r4000 \n" \
367 "1: lld %1, %2 # atomic64_" #op "_return\n" \
368 " " #asm_op " %0, %1, %3 \n" \
369 " scd %0, %2 \n" \
370 " beqzl %0, 1b \n" \
371 " " #asm_op " %0, %1, %3 \n" \
372 " .set mips0 \n" \
373 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
374 : "Ir" (i)); \
375 } else if (kernel_uses_llsc) { \
376 long temp; \
377 \
378 do { \
379 __asm__ __volatile__( \
380 " .set arch=r4000 \n" \
381 " lld %1, %2 # atomic64_" #op "_return\n" \
382 " " #asm_op " %0, %1, %3 \n" \
383 " scd %0, %2 \n" \
384 " .set mips0 \n" \
385 : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
386 : "Ir" (i), "m" (v->counter) \
387 : "memory"); \
388 } while (unlikely(!result)); \
389 \
390 result = temp + i; \
391 } else { \
392 unsigned long flags; \
393 \
394 raw_local_irq_save(flags); \
395 result = v->counter; \
396 result c_op i; \
397 v->counter = result; \
398 raw_local_irq_restore(flags); \
399 } \
400 \
401 smp_llsc_mb(); \
402 \
403 return result; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404}
405
Peter Zijlstraef315632014-03-26 17:56:43 +0100406#define ATOMIC64_OPS(op, c_op, asm_op) \
407 ATOMIC64_OP(op, c_op, asm_op) \
408 ATOMIC64_OP_RETURN(op, c_op, asm_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Peter Zijlstraef315632014-03-26 17:56:43 +0100410ATOMIC64_OPS(add, +=, daddu)
411ATOMIC64_OPS(sub, -=, dsubu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Peter Zijlstraef315632014-03-26 17:56:43 +0100413#undef ATOMIC64_OPS
414#undef ATOMIC64_OP_RETURN
415#undef ATOMIC64_OP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
417/*
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100418 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
419 * @i: integer value to subtract
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 * @v: pointer of type atomic64_t
421 *
Arnaud Gierschf10d14d2005-11-13 00:38:18 +0100422 * Atomically test @v and subtract @i if @v is greater or equal than @i.
423 * The function returns the old value of @v minus @i.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 */
425static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
426{
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000427 long result;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
David Daneyf252ffd2010-01-08 17:17:43 -0800429 smp_mb__before_llsc();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000430
David Daneyb791d112009-07-13 11:15:19 -0700431 if (kernel_uses_llsc && R10000_LLSC_WAR) {
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000432 long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200435 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 "1: lld %1, %2 # atomic64_sub_if_positive\n"
437 " dsubu %0, %1, %3 \n"
438 " bltz %0, 1f \n"
439 " scd %0, %2 \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000440 " .set noreorder \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 " beqzl %0, 1b \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000442 " dsubu %0, %1, %3 \n"
443 " .set reorder \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000445 " .set mips0 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
447 : "Ir" (i), "m" (v->counter)
448 : "memory");
David Daneyb791d112009-07-13 11:15:19 -0700449 } else if (kernel_uses_llsc) {
Ralf Baechle915ec1e2009-01-12 00:52:18 +0000450 long temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 __asm__ __volatile__(
Ralf Baechlea809d462014-03-30 13:20:10 +0200453 " .set arch=r4000 \n"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 "1: lld %1, %2 # atomic64_sub_if_positive\n"
455 " dsubu %0, %1, %3 \n"
456 " bltz %0, 1f \n"
457 " scd %0, %2 \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000458 " .set noreorder \n"
Ralf Baechle78373142010-10-29 19:08:24 +0100459 " beqz %0, 1b \n"
Ralf Baechle92f22c12006-02-23 14:10:53 +0000460 " dsubu %0, %1, %3 \n"
461 " .set reorder \n"
Ralf Baechle50952022008-07-03 23:28:35 +0100462 "1: \n"
Maciej W. Rozyckiaac8aa72005-06-14 17:35:03 +0000463 " .set mips0 \n"
Joshua Kinardb4f2a172012-06-24 21:01:34 -0400464 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
465 : "Ir" (i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 } else {
467 unsigned long flags;
468
Ralf Baechle49edd092007-03-16 16:10:36 +0000469 raw_local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 result = v->counter;
471 result -= i;
472 if (result >= 0)
473 v->counter = result;
Ralf Baechle49edd092007-03-16 16:10:36 +0000474 raw_local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 }
476
Ralf Baechle17099b12007-07-14 13:24:05 +0100477 smp_llsc_mb();
Ralf Baechle0004a9d2006-10-31 03:45:07 +0000478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return result;
480}
481
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700482#define atomic64_cmpxchg(v, o, n) \
Atsushi Nemoto7b239bb2007-05-10 23:47:45 +0900483 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700484#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
485
486/**
487 * atomic64_add_unless - add unless the number is a given value
488 * @v: pointer of type atomic64_t
489 * @a: the amount to add to v...
490 * @u: ...unless v is equal to u.
491 *
492 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700493 * Returns the old value of @v.
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700494 */
Mathieu Desnoyers2856f5e2007-05-08 00:34:38 -0700495static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
496{
497 long c, old;
498 c = atomic64_read(v);
499 for (;;) {
500 if (unlikely(c == (u)))
501 break;
502 old = atomic64_cmpxchg((v), c, c + (a));
503 if (likely(old == c))
504 break;
505 c = old;
506 }
507 return c != (u);
508}
509
Mathieu Desnoyerse12f6442007-05-08 00:34:24 -0700510#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
511
Ralf Baechle21a151d2007-10-11 23:46:15 +0100512#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
513#define atomic64_inc_return(v) atomic64_add_return(1, (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
515/*
516 * atomic64_sub_and_test - subtract value from variable and test result
517 * @i: integer value to subtract
518 * @v: pointer of type atomic64_t
519 *
520 * Atomically subtracts @i from @v and returns
521 * true if the result is zero, or false for all
522 * other cases.
523 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100524#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526/*
527 * atomic64_inc_and_test - increment and test
528 * @v: pointer of type atomic64_t
529 *
530 * Atomically increments @v by 1
531 * and returns true if the result is zero, or false for all
532 * other cases.
533 */
534#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
535
536/*
537 * atomic64_dec_and_test - decrement by 1 and test
538 * @v: pointer of type atomic64_t
539 *
540 * Atomically decrements @v by 1 and
541 * returns true if the result is 0, or false for all other
542 * cases.
543 */
544#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
545
546/*
547 * atomic64_dec_if_positive - decrement by 1 if old value positive
548 * @v: pointer of type atomic64_t
549 */
550#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
551
552/*
553 * atomic64_inc - increment atomic variable
554 * @v: pointer of type atomic64_t
555 *
556 * Atomically increments @v by 1.
557 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100558#define atomic64_inc(v) atomic64_add(1, (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560/*
561 * atomic64_dec - decrement and test
562 * @v: pointer of type atomic64_t
563 *
564 * Atomically decrements @v by 1.
565 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100566#define atomic64_dec(v) atomic64_sub(1, (v))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568/*
569 * atomic64_add_negative - add and test if negative
570 * @v: pointer of type atomic64_t
571 * @i: integer value to add
572 *
573 * Atomically adds @i to @v and returns true
574 * if the result is negative, or false when
575 * result is greater than or equal to zero.
576 */
Ralf Baechle21a151d2007-10-11 23:46:15 +0100577#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Ralf Baechle875d43e2005-09-03 15:56:16 -0700579#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581#endif /* _ASM_ATOMIC_H */