blob: 4c0d009a46f05dc6647d273602fa56b69804fed0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Arun Sharmaacac43e2011-07-26 16:09:08 -07002/* Atomic operations usable in machine independent code */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -08003#ifndef _LINUX_ATOMIC_H
4#define _LINUX_ATOMIC_H
Mark Rutlandade5ef92018-06-21 13:13:07 +01005#include <linux/types.h>
6
Eric Dumazet3f9d35b2010-11-11 14:05:08 -08007#include <asm/atomic.h>
Will Deacon654672d2015-08-06 17:54:37 +01008#include <asm/barrier.h>
9
10/*
11 * Relaxed variants of xchg, cmpxchg and some atomic operations.
12 *
13 * We support four variants:
14 *
15 * - Fully ordered: The default implementation, no suffix required.
16 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
17 * - Release: Provides RELEASE semantics, _release suffix.
18 * - Relaxed: No ordering guarantees, _relaxed suffix.
19 *
20 * For compound atomics performing both a load and a store, ACQUIRE
21 * semantics apply only to the load and RELEASE semantics only to the
22 * store portion of the operation. Note that a failed cmpxchg_acquire
23 * does -not- imply any memory ordering constraints.
24 *
25 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
26 */
27
Will Deacon654672d2015-08-06 17:54:37 +010028/*
29 * The idea here is to build acquire/release variants by adding explicit
30 * barriers on top of the relaxed variant. In the case where the relaxed
31 * variant is already fully ordered, no additional barriers are needed.
Boqun Fenge1ab7f392015-12-15 22:24:14 +080032 *
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010033 * If an architecture overrides __atomic_acquire_fence() it will probably
34 * want to define smp_mb__after_spinlock().
Will Deacon654672d2015-08-06 17:54:37 +010035 */
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010036#ifndef __atomic_acquire_fence
37#define __atomic_acquire_fence smp_mb__after_atomic
38#endif
39
40#ifndef __atomic_release_fence
41#define __atomic_release_fence smp_mb__before_atomic
42#endif
43
44#ifndef __atomic_pre_full_fence
45#define __atomic_pre_full_fence smp_mb__before_atomic
46#endif
47
48#ifndef __atomic_post_full_fence
49#define __atomic_post_full_fence smp_mb__after_atomic
50#endif
51
Will Deacon654672d2015-08-06 17:54:37 +010052#define __atomic_op_acquire(op, args...) \
53({ \
54 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010055 __atomic_acquire_fence(); \
Will Deacon654672d2015-08-06 17:54:37 +010056 __ret; \
57})
58
59#define __atomic_op_release(op, args...) \
60({ \
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010061 __atomic_release_fence(); \
Will Deacon654672d2015-08-06 17:54:37 +010062 op##_relaxed(args); \
63})
64
65#define __atomic_op_fence(op, args...) \
66({ \
67 typeof(op##_relaxed(args)) __ret; \
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010068 __atomic_pre_full_fence(); \
Will Deacon654672d2015-08-06 17:54:37 +010069 __ret = op##_relaxed(args); \
Mark Rutlandfd2efaa2018-07-16 12:30:11 +010070 __atomic_post_full_fence(); \
Will Deacon654672d2015-08-06 17:54:37 +010071 __ret; \
72})
73
Mark Rutland9fa45072018-09-04 11:48:26 +010074#include <linux/atomic-fallback.h>
Will Deacon4df714b2017-10-12 13:20:48 +010075
Peter Zijlstra90fe6512015-09-18 15:04:59 +020076#include <asm-generic/atomic-long.h>
77
Eric Dumazet3f9d35b2010-11-11 14:05:08 -080078#endif /* _LINUX_ATOMIC_H */