blob: 640f09479bdf7984799d7ecdf40d7da17d83e26c [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Peter Zijlstra93ea02b2013-11-06 14:57:36 +01002/*
David Howells739d8752018-03-08 09:48:46 +00003 * Generic barrier definitions.
David Howells885df912012-03-28 18:30:03 +01004 *
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
7 *
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
David Howells885df912012-03-28 18:30:03 +010010 */
11#ifndef __ASM_GENERIC_BARRIER_H
12#define __ASM_GENERIC_BARRIER_H
13
14#ifndef __ASSEMBLY__
15
Arvind Sankar3347acc2020-11-13 22:51:59 -080016#include <linux/compiler.h>
Will Deacone506ea42019-10-15 16:29:32 -070017#include <asm/rwonce.h>
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010018
19#ifndef nop
20#define nop() asm volatile ("nop")
21#endif
David Howells885df912012-03-28 18:30:03 +010022
23/*
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010024 * Force strict CPU ordering. And yes, this is required on UP too when we're
25 * talking to devices.
David Howells885df912012-03-28 18:30:03 +010026 *
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010027 * Fall back to compiler barriers if nothing better is provided.
David Howells885df912012-03-28 18:30:03 +010028 */
29
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010030#ifndef mb
31#define mb() barrier()
32#endif
33
34#ifndef rmb
David Howells885df912012-03-28 18:30:03 +010035#define rmb() mb()
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010036#endif
37
38#ifndef wmb
39#define wmb() mb()
40#endif
41
Alexander Duyck1077fa32014-12-11 15:02:06 -080042#ifndef dma_rmb
43#define dma_rmb() rmb()
44#endif
45
46#ifndef dma_wmb
47#define dma_wmb() wmb()
48#endif
49
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020050#ifndef __smp_mb
51#define __smp_mb() mb()
52#endif
53
54#ifndef __smp_rmb
55#define __smp_rmb() rmb()
56#endif
57
58#ifndef __smp_wmb
59#define __smp_wmb() wmb()
60#endif
61
David Howells885df912012-03-28 18:30:03 +010062#ifdef CONFIG_SMP
Vineet Gupta470c27e2015-03-11 21:42:02 +053063
64#ifndef smp_mb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020065#define smp_mb() __smp_mb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053066#endif
67
68#ifndef smp_rmb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020069#define smp_rmb() __smp_rmb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053070#endif
71
72#ifndef smp_wmb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020073#define smp_wmb() __smp_wmb()
Vineet Gupta470c27e2015-03-11 21:42:02 +053074#endif
75
Linus Torvalds0890a262015-07-01 09:24:26 -070076#else /* !CONFIG_SMP */
77
Vineet Gupta470c27e2015-03-11 21:42:02 +053078#ifndef smp_mb
David Howells885df912012-03-28 18:30:03 +010079#define smp_mb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053080#endif
81
82#ifndef smp_rmb
David Howells885df912012-03-28 18:30:03 +010083#define smp_rmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053084#endif
85
86#ifndef smp_wmb
David Howells885df912012-03-28 18:30:03 +010087#define smp_wmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +053088#endif
89
Linus Torvalds0890a262015-07-01 09:24:26 -070090#endif /* CONFIG_SMP */
Vineet Gupta470c27e2015-03-11 21:42:02 +053091
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020092#ifndef __smp_store_mb
93#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
94#endif
95
96#ifndef __smp_mb__before_atomic
97#define __smp_mb__before_atomic() __smp_mb()
98#endif
99
100#ifndef __smp_mb__after_atomic
101#define __smp_mb__after_atomic() __smp_mb()
102#endif
103
104#ifndef __smp_store_release
105#define __smp_store_release(p, v) \
106do { \
107 compiletime_assert_atomic_type(*p); \
108 __smp_mb(); \
109 WRITE_ONCE(*p, v); \
110} while (0)
111#endif
112
113#ifndef __smp_load_acquire
114#define __smp_load_acquire(p) \
115({ \
Will Deacon54988722019-12-19 16:22:56 +0000116 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200117 compiletime_assert_atomic_type(*p); \
118 __smp_mb(); \
Will Deacon54988722019-12-19 16:22:56 +0000119 (typeof(*p))___p1; \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200120})
121#endif
122
123#ifdef CONFIG_SMP
124
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200125#ifndef smp_store_mb
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200126#define smp_store_mb(var, value) __smp_store_mb(var, value)
Peter Zijlstra93ea02b2013-11-06 14:57:36 +0100127#endif
David Howells885df912012-03-28 18:30:03 +0100128
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100129#ifndef smp_mb__before_atomic
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200130#define smp_mb__before_atomic() __smp_mb__before_atomic()
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100131#endif
132
133#ifndef smp_mb__after_atomic
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200134#define smp_mb__after_atomic() __smp_mb__after_atomic()
135#endif
136
137#ifndef smp_store_release
138#define smp_store_release(p, v) __smp_store_release(p, v)
139#endif
140
141#ifndef smp_load_acquire
142#define smp_load_acquire(p) __smp_load_acquire(p)
143#endif
144
145#else /* !CONFIG_SMP */
146
147#ifndef smp_store_mb
148#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
149#endif
150
151#ifndef smp_mb__before_atomic
152#define smp_mb__before_atomic() barrier()
153#endif
154
155#ifndef smp_mb__after_atomic
156#define smp_mb__after_atomic() barrier()
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100157#endif
158
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200159#ifndef smp_store_release
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100160#define smp_store_release(p, v) \
161do { \
162 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200163 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +0200164 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100165} while (0)
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200166#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100167
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200168#ifndef smp_load_acquire
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100169#define smp_load_acquire(p) \
170({ \
Will Deacon54988722019-12-19 16:22:56 +0000171 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100172 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200173 barrier(); \
Will Deacon54988722019-12-19 16:22:56 +0000174 (typeof(*p))___p1; \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100175})
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200176#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100177
Peter Zijlstra726328d2016-05-26 10:35:03 +0200178#endif /* CONFIG_SMP */
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200179
Michael S. Tsirkin6a65d262015-12-27 18:23:01 +0200180/* Barriers for virtual machine guests when talking to an SMP host */
181#define virt_mb() __smp_mb()
182#define virt_rmb() __smp_rmb()
183#define virt_wmb() __smp_wmb()
Michael S. Tsirkin6a65d262015-12-27 18:23:01 +0200184#define virt_store_mb(var, value) __smp_store_mb(var, value)
185#define virt_mb__before_atomic() __smp_mb__before_atomic()
186#define virt_mb__after_atomic() __smp_mb__after_atomic()
187#define virt_store_release(p, v) __smp_store_release(p, v)
188#define virt_load_acquire(p) __smp_load_acquire(p)
189
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200190/**
191 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
192 *
193 * A control dependency provides a LOAD->STORE order, the additional RMB
194 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
195 * aka. (load)-ACQUIRE.
196 *
197 * Architectures that do not do load speculation can have this be barrier().
198 */
199#ifndef smp_acquire__after_ctrl_dep
200#define smp_acquire__after_ctrl_dep() smp_rmb()
201#endif
202
203/**
Will Deaconfcfdfe302018-04-26 11:34:15 +0100204 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200205 * @ptr: pointer to the variable to wait on
206 * @cond: boolean expression to wait for
207 *
Will Deaconfcfdfe302018-04-26 11:34:15 +0100208 * Equivalent to using READ_ONCE() on the condition variable.
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200209 *
210 * Due to C lacking lambda expressions we load the value of *ptr into a
211 * pre-named variable @VAL to be used in @cond.
212 */
Will Deaconfcfdfe302018-04-26 11:34:15 +0100213#ifndef smp_cond_load_relaxed
214#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200215 typeof(ptr) __PTR = (ptr); \
Will Deacon54988722019-12-19 16:22:56 +0000216 __unqual_scalar_typeof(*ptr) VAL; \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200217 for (;;) { \
218 VAL = READ_ONCE(*__PTR); \
219 if (cond_expr) \
220 break; \
221 cpu_relax(); \
222 } \
Will Deacon54988722019-12-19 16:22:56 +0000223 (typeof(*ptr))VAL; \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200224})
225#endif
226
Will Deaconfcfdfe302018-04-26 11:34:15 +0100227/**
228 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
229 * @ptr: pointer to the variable to wait on
230 * @cond: boolean expression to wait for
231 *
232 * Equivalent to using smp_load_acquire() on the condition variable but employs
233 * the control dependency of the wait to reduce the barrier on many platforms.
234 */
235#ifndef smp_cond_load_acquire
236#define smp_cond_load_acquire(ptr, cond_expr) ({ \
Will Deacon54988722019-12-19 16:22:56 +0000237 __unqual_scalar_typeof(*ptr) _val; \
Will Deaconfcfdfe302018-04-26 11:34:15 +0100238 _val = smp_cond_load_relaxed(ptr, cond_expr); \
239 smp_acquire__after_ctrl_dep(); \
Will Deacon54988722019-12-19 16:22:56 +0000240 (typeof(*ptr))_val; \
Will Deaconfcfdfe302018-04-26 11:34:15 +0100241})
242#endif
243
Aneesh Kumar K.V3e79f082020-07-01 12:52:32 +0530244/*
245 * pmem_wmb() ensures that all stores for which the modification
246 * are written to persistent storage by preceding instructions have
247 * updated persistent storage before any data access or data transfer
248 * caused by subsequent instructions is initiated.
249 */
250#ifndef pmem_wmb
251#define pmem_wmb() wmb()
252#endif
253
David Howells885df912012-03-28 18:30:03 +0100254#endif /* !__ASSEMBLY__ */
255#endif /* __ASM_GENERIC_BARRIER_H */