blob: 85b28eb80b11fc4c72d2b88c9fd79e71f42cbdbf [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Peter Zijlstra93ea02b2013-11-06 14:57:36 +01002/*
David Howells739d8752018-03-08 09:48:46 +00003 * Generic barrier definitions.
David Howells885df912012-03-28 18:30:03 +01004 *
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
7 *
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
David Howells885df912012-03-28 18:30:03 +010010 */
11#ifndef __ASM_GENERIC_BARRIER_H
12#define __ASM_GENERIC_BARRIER_H
13
14#ifndef __ASSEMBLY__
15
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010016#include <linux/compiler.h>
17
18#ifndef nop
19#define nop() asm volatile ("nop")
20#endif
David Howells885df912012-03-28 18:30:03 +010021
22/*
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010023 * Force strict CPU ordering. And yes, this is required on UP too when we're
24 * talking to devices.
David Howells885df912012-03-28 18:30:03 +010025 *
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010026 * Fall back to compiler barriers if nothing better is provided.
David Howells885df912012-03-28 18:30:03 +010027 */
28
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010029#ifndef mb
30#define mb() barrier()
31#endif
32
33#ifndef rmb
David Howells885df912012-03-28 18:30:03 +010034#define rmb() mb()
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010035#endif
36
37#ifndef wmb
38#define wmb() mb()
39#endif
40
Alexander Duyck1077fa32014-12-11 15:02:06 -080041#ifndef dma_rmb
42#define dma_rmb() rmb()
43#endif
44
45#ifndef dma_wmb
46#define dma_wmb() wmb()
47#endif
48
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010049#ifndef read_barrier_depends
50#define read_barrier_depends() do { } while (0)
51#endif
David Howells885df912012-03-28 18:30:03 +010052
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020053#ifndef __smp_mb
54#define __smp_mb() mb()
55#endif
56
57#ifndef __smp_rmb
58#define __smp_rmb() rmb()
59#endif
60
61#ifndef __smp_wmb
62#define __smp_wmb() wmb()
63#endif
64
65#ifndef __smp_read_barrier_depends
66#define __smp_read_barrier_depends() read_barrier_depends()
67#endif
68
David Howells885df912012-03-28 18:30:03 +010069#ifdef CONFIG_SMP
Vineet Gupta470c27e2015-03-11 21:42:02 +053070
71#ifndef smp_mb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020072#define smp_mb() __smp_mb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053073#endif
74
75#ifndef smp_rmb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020076#define smp_rmb() __smp_rmb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053077#endif
78
79#ifndef smp_wmb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020080#define smp_wmb() __smp_wmb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053081#endif
82
83#ifndef smp_read_barrier_depends
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020084#define smp_read_barrier_depends() __smp_read_barrier_depends()
Vineet Gupta470c27e2015-03-11 21:42:02 +053085#endif
86
Linus Torvalds0890a262015-07-01 09:24:26 -070087#else /* !CONFIG_SMP */
88
Vineet Gupta470c27e2015-03-11 21:42:02 +053089#ifndef smp_mb
David Howells885df912012-03-28 18:30:03 +010090#define smp_mb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053091#endif
92
93#ifndef smp_rmb
David Howells885df912012-03-28 18:30:03 +010094#define smp_rmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053095#endif
96
97#ifndef smp_wmb
David Howells885df912012-03-28 18:30:03 +010098#define smp_wmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053099#endif
100
101#ifndef smp_read_barrier_depends
Peter Zijlstra93ea02b2013-11-06 14:57:36 +0100102#define smp_read_barrier_depends() do { } while (0)
David Howells885df912012-03-28 18:30:03 +0100103#endif
104
Linus Torvalds0890a262015-07-01 09:24:26 -0700105#endif /* CONFIG_SMP */
Vineet Gupta470c27e2015-03-11 21:42:02 +0530106
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200107#ifndef __smp_store_mb
108#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
109#endif
110
111#ifndef __smp_mb__before_atomic
112#define __smp_mb__before_atomic() __smp_mb()
113#endif
114
115#ifndef __smp_mb__after_atomic
116#define __smp_mb__after_atomic() __smp_mb()
117#endif
118
119#ifndef __smp_store_release
120#define __smp_store_release(p, v) \
121do { \
122 compiletime_assert_atomic_type(*p); \
123 __smp_mb(); \
124 WRITE_ONCE(*p, v); \
125} while (0)
126#endif
127
128#ifndef __smp_load_acquire
129#define __smp_load_acquire(p) \
130({ \
131 typeof(*p) ___p1 = READ_ONCE(*p); \
132 compiletime_assert_atomic_type(*p); \
133 __smp_mb(); \
134 ___p1; \
135})
136#endif
137
138#ifdef CONFIG_SMP
139
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200140#ifndef smp_store_mb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200141#define smp_store_mb(var, value) __smp_store_mb(var, value)
Peter Zijlstra93ea02b2013-11-06 14:57:36 +0100142#endif
David Howells885df912012-03-28 18:30:03 +0100143
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100144#ifndef smp_mb__before_atomic
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200145#define smp_mb__before_atomic() __smp_mb__before_atomic()
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100146#endif
147
148#ifndef smp_mb__after_atomic
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200149#define smp_mb__after_atomic() __smp_mb__after_atomic()
150#endif
151
152#ifndef smp_store_release
153#define smp_store_release(p, v) __smp_store_release(p, v)
154#endif
155
156#ifndef smp_load_acquire
157#define smp_load_acquire(p) __smp_load_acquire(p)
158#endif
159
160#else /* !CONFIG_SMP */
161
162#ifndef smp_store_mb
163#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
164#endif
165
166#ifndef smp_mb__before_atomic
167#define smp_mb__before_atomic() barrier()
168#endif
169
170#ifndef smp_mb__after_atomic
171#define smp_mb__after_atomic() barrier()
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100172#endif
173
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200174#ifndef smp_store_release
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100175#define smp_store_release(p, v) \
176do { \
177 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200178 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +0200179 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100180} while (0)
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200181#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100182
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200183#ifndef smp_load_acquire
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100184#define smp_load_acquire(p) \
185({ \
Andrey Konovalov76695af2015-08-02 17:11:04 +0200186 typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100187 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200188 barrier(); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100189 ___p1; \
190})
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200191#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100192
Peter Zijlstra726328d2016-05-26 10:35:03 +0200193#endif /* CONFIG_SMP */
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200194
Michael S. Tsirkin6a65d262015-12-27 18:23:01 +0200195/* Barriers for virtual machine guests when talking to an SMP host */
196#define virt_mb() __smp_mb()
197#define virt_rmb() __smp_rmb()
198#define virt_wmb() __smp_wmb()
199#define virt_read_barrier_depends() __smp_read_barrier_depends()
200#define virt_store_mb(var, value) __smp_store_mb(var, value)
201#define virt_mb__before_atomic() __smp_mb__before_atomic()
202#define virt_mb__after_atomic() __smp_mb__after_atomic()
203#define virt_store_release(p, v) __smp_store_release(p, v)
204#define virt_load_acquire(p) __smp_load_acquire(p)
205
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200206/**
207 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
208 *
209 * A control dependency provides a LOAD->STORE order, the additional RMB
210 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
211 * aka. (load)-ACQUIRE.
212 *
213 * Architectures that do not do load speculation can have this be barrier().
214 */
215#ifndef smp_acquire__after_ctrl_dep
216#define smp_acquire__after_ctrl_dep() smp_rmb()
217#endif
218
219/**
Will Deaconfcfdfe302018-04-26 11:34:15 +0100220 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200221 * @ptr: pointer to the variable to wait on
222 * @cond: boolean expression to wait for
223 *
Will Deaconfcfdfe302018-04-26 11:34:15 +0100224 * Equivalent to using READ_ONCE() on the condition variable.
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200225 *
226 * Due to C lacking lambda expressions we load the value of *ptr into a
227 * pre-named variable @VAL to be used in @cond.
228 */
Will Deaconfcfdfe302018-04-26 11:34:15 +0100229#ifndef smp_cond_load_relaxed
230#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200231 typeof(ptr) __PTR = (ptr); \
232 typeof(*ptr) VAL; \
233 for (;;) { \
234 VAL = READ_ONCE(*__PTR); \
235 if (cond_expr) \
236 break; \
237 cpu_relax(); \
238 } \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200239 VAL; \
240})
241#endif
242
Will Deaconfcfdfe302018-04-26 11:34:15 +0100243/**
244 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
245 * @ptr: pointer to the variable to wait on
246 * @cond: boolean expression to wait for
247 *
248 * Equivalent to using smp_load_acquire() on the condition variable but employs
249 * the control dependency of the wait to reduce the barrier on many platforms.
250 */
251#ifndef smp_cond_load_acquire
252#define smp_cond_load_acquire(ptr, cond_expr) ({ \
253 typeof(*ptr) _val; \
254 _val = smp_cond_load_relaxed(ptr, cond_expr); \
255 smp_acquire__after_ctrl_dep(); \
256 _val; \
257})
258#endif
259
David Howells885df912012-03-28 18:30:03 +0100260#endif /* !__ASSEMBLY__ */
261#endif /* __ASM_GENERIC_BARRIER_H */