blob: fd7e8fbaeef1585ddf6cd40a07cb8323d344cd1f [file] [log] [blame]
Thomas Gleixnerb4d0d232019-05-20 19:08:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Peter Zijlstra93ea02b2013-11-06 14:57:36 +01002/*
David Howells739d8752018-03-08 09:48:46 +00003 * Generic barrier definitions.
David Howells885df912012-03-28 18:30:03 +01004 *
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
7 *
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
David Howells885df912012-03-28 18:30:03 +010010 */
11#ifndef __ASM_GENERIC_BARRIER_H
12#define __ASM_GENERIC_BARRIER_H
13
14#ifndef __ASSEMBLY__
15
Arvind Sankar3347acc2020-11-13 22:51:59 -080016#include <linux/compiler.h>
Marco Elverf9486662021-11-30 12:44:22 +010017#include <linux/kcsan-checks.h>
Will Deacone506ea42019-10-15 16:29:32 -070018#include <asm/rwonce.h>
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010019
20#ifndef nop
21#define nop() asm volatile ("nop")
22#endif
David Howells885df912012-03-28 18:30:03 +010023
24/*
Marco Elver2505a512021-11-30 12:44:23 +010025 * Architectures that want generic instrumentation can define __ prefixed
26 * variants of all barriers.
27 */
28
29#ifdef __mb
30#define mb() do { kcsan_mb(); __mb(); } while (0)
31#endif
32
33#ifdef __rmb
34#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
35#endif
36
37#ifdef __wmb
38#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
39#endif
40
41#ifdef __dma_rmb
42#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
43#endif
44
45#ifdef __dma_wmb
46#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
47#endif
48
49/*
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010050 * Force strict CPU ordering. And yes, this is required on UP too when we're
51 * talking to devices.
David Howells885df912012-03-28 18:30:03 +010052 *
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010053 * Fall back to compiler barriers if nothing better is provided.
David Howells885df912012-03-28 18:30:03 +010054 */
55
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010056#ifndef mb
57#define mb() barrier()
58#endif
59
60#ifndef rmb
David Howells885df912012-03-28 18:30:03 +010061#define rmb() mb()
Peter Zijlstra93ea02b2013-11-06 14:57:36 +010062#endif
63
64#ifndef wmb
65#define wmb() mb()
66#endif
67
Alexander Duyck1077fa32014-12-11 15:02:06 -080068#ifndef dma_rmb
69#define dma_rmb() rmb()
70#endif
71
72#ifndef dma_wmb
73#define dma_wmb() wmb()
74#endif
75
Michael S. Tsirkina9e42522015-12-27 13:50:07 +020076#ifndef __smp_mb
77#define __smp_mb() mb()
78#endif
79
80#ifndef __smp_rmb
81#define __smp_rmb() rmb()
82#endif
83
84#ifndef __smp_wmb
85#define __smp_wmb() wmb()
86#endif
87
David Howells885df912012-03-28 18:30:03 +010088#ifdef CONFIG_SMP
Vineet Gupta470c27e2015-03-11 21:42:02 +053089
90#ifndef smp_mb
Marco Elverf9486662021-11-30 12:44:22 +010091#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
Vineet Gupta470c27e2015-03-11 21:42:02 +053092#endif
93
94#ifndef smp_rmb
Marco Elverf9486662021-11-30 12:44:22 +010095#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
Vineet Gupta470c27e2015-03-11 21:42:02 +053096#endif
97
98#ifndef smp_wmb
Marco Elverf9486662021-11-30 12:44:22 +010099#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
Vineet Gupta470c27e2015-03-11 21:42:02 +0530100#endif
101
Linus Torvalds0890a262015-07-01 09:24:26 -0700102#else /* !CONFIG_SMP */
103
Vineet Gupta470c27e2015-03-11 21:42:02 +0530104#ifndef smp_mb
David Howells885df912012-03-28 18:30:03 +0100105#define smp_mb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +0530106#endif
107
108#ifndef smp_rmb
David Howells885df912012-03-28 18:30:03 +0100109#define smp_rmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +0530110#endif
111
112#ifndef smp_wmb
David Howells885df912012-03-28 18:30:03 +0100113#define smp_wmb() barrier()
Vineet Gupta470c27e2015-03-11 21:42:02 +0530114#endif
115
Linus Torvalds0890a262015-07-01 09:24:26 -0700116#endif /* CONFIG_SMP */
Vineet Gupta470c27e2015-03-11 21:42:02 +0530117
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200118#ifndef __smp_store_mb
119#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
120#endif
121
122#ifndef __smp_mb__before_atomic
123#define __smp_mb__before_atomic() __smp_mb()
124#endif
125
126#ifndef __smp_mb__after_atomic
127#define __smp_mb__after_atomic() __smp_mb()
128#endif
129
130#ifndef __smp_store_release
131#define __smp_store_release(p, v) \
132do { \
133 compiletime_assert_atomic_type(*p); \
134 __smp_mb(); \
135 WRITE_ONCE(*p, v); \
136} while (0)
137#endif
138
139#ifndef __smp_load_acquire
140#define __smp_load_acquire(p) \
141({ \
Will Deacon54988722019-12-19 16:22:56 +0000142 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200143 compiletime_assert_atomic_type(*p); \
144 __smp_mb(); \
Will Deacon54988722019-12-19 16:22:56 +0000145 (typeof(*p))___p1; \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200146})
147#endif
148
149#ifdef CONFIG_SMP
150
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200151#ifndef smp_store_mb
Marco Elverf9486662021-11-30 12:44:22 +0100152#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
Peter Zijlstra93ea02b2013-11-06 14:57:36 +0100153#endif
David Howells885df912012-03-28 18:30:03 +0100154
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100155#ifndef smp_mb__before_atomic
Marco Elverf9486662021-11-30 12:44:22 +0100156#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100157#endif
158
159#ifndef smp_mb__after_atomic
Marco Elverf9486662021-11-30 12:44:22 +0100160#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200161#endif
162
163#ifndef smp_store_release
Marco Elverf9486662021-11-30 12:44:22 +0100164#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200165#endif
166
167#ifndef smp_load_acquire
168#define smp_load_acquire(p) __smp_load_acquire(p)
169#endif
170
171#else /* !CONFIG_SMP */
172
173#ifndef smp_store_mb
174#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
175#endif
176
177#ifndef smp_mb__before_atomic
178#define smp_mb__before_atomic() barrier()
179#endif
180
181#ifndef smp_mb__after_atomic
182#define smp_mb__after_atomic() barrier()
Peter Zijlstrafebdbfe2014-02-06 18:16:07 +0100183#endif
184
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200185#ifndef smp_store_release
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100186#define smp_store_release(p, v) \
187do { \
188 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200189 barrier(); \
Andrey Konovalov76695af2015-08-02 17:11:04 +0200190 WRITE_ONCE(*p, v); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100191} while (0)
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200192#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100193
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200194#ifndef smp_load_acquire
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100195#define smp_load_acquire(p) \
196({ \
Will Deacon54988722019-12-19 16:22:56 +0000197 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100198 compiletime_assert_atomic_type(*p); \
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200199 barrier(); \
Will Deacon54988722019-12-19 16:22:56 +0000200 (typeof(*p))___p1; \
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100201})
Michael S. Tsirkin57f7c032015-12-27 14:10:52 +0200202#endif
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100203
Peter Zijlstra726328d2016-05-26 10:35:03 +0200204#endif /* CONFIG_SMP */
Michael S. Tsirkina9e42522015-12-27 13:50:07 +0200205
Michael S. Tsirkin6a65d262015-12-27 18:23:01 +0200206/* Barriers for virtual machine guests when talking to an SMP host */
Marco Elverf9486662021-11-30 12:44:22 +0100207#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
208#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
209#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
210#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
211#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
212#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
213#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
Michael S. Tsirkin6a65d262015-12-27 18:23:01 +0200214#define virt_load_acquire(p) __smp_load_acquire(p)
215
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200216/**
217 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
218 *
219 * A control dependency provides a LOAD->STORE order, the additional RMB
220 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
221 * aka. (load)-ACQUIRE.
222 *
223 * Architectures that do not do load speculation can have this be barrier().
224 */
225#ifndef smp_acquire__after_ctrl_dep
226#define smp_acquire__after_ctrl_dep() smp_rmb()
227#endif
228
229/**
Will Deaconfcfdfe302018-04-26 11:34:15 +0100230 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200231 * @ptr: pointer to the variable to wait on
232 * @cond: boolean expression to wait for
233 *
Will Deaconfcfdfe302018-04-26 11:34:15 +0100234 * Equivalent to using READ_ONCE() on the condition variable.
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200235 *
236 * Due to C lacking lambda expressions we load the value of *ptr into a
237 * pre-named variable @VAL to be used in @cond.
238 */
Will Deaconfcfdfe302018-04-26 11:34:15 +0100239#ifndef smp_cond_load_relaxed
240#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200241 typeof(ptr) __PTR = (ptr); \
Will Deacon54988722019-12-19 16:22:56 +0000242 __unqual_scalar_typeof(*ptr) VAL; \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200243 for (;;) { \
244 VAL = READ_ONCE(*__PTR); \
245 if (cond_expr) \
246 break; \
247 cpu_relax(); \
248 } \
Will Deacon54988722019-12-19 16:22:56 +0000249 (typeof(*ptr))VAL; \
Peter Zijlstra7cb45c02016-06-01 19:23:54 +0200250})
251#endif
252
Will Deaconfcfdfe302018-04-26 11:34:15 +0100253/**
254 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
255 * @ptr: pointer to the variable to wait on
256 * @cond: boolean expression to wait for
257 *
258 * Equivalent to using smp_load_acquire() on the condition variable but employs
259 * the control dependency of the wait to reduce the barrier on many platforms.
260 */
261#ifndef smp_cond_load_acquire
262#define smp_cond_load_acquire(ptr, cond_expr) ({ \
Will Deacon54988722019-12-19 16:22:56 +0000263 __unqual_scalar_typeof(*ptr) _val; \
Will Deaconfcfdfe302018-04-26 11:34:15 +0100264 _val = smp_cond_load_relaxed(ptr, cond_expr); \
265 smp_acquire__after_ctrl_dep(); \
Will Deacon54988722019-12-19 16:22:56 +0000266 (typeof(*ptr))_val; \
Will Deaconfcfdfe302018-04-26 11:34:15 +0100267})
268#endif
269
Aneesh Kumar K.V3e79f082020-07-01 12:52:32 +0530270/*
271 * pmem_wmb() ensures that all stores for which the modification
272 * are written to persistent storage by preceding instructions have
273 * updated persistent storage before any data access or data transfer
274 * caused by subsequent instructions is initiated.
275 */
276#ifndef pmem_wmb
277#define pmem_wmb() wmb()
278#endif
279
Xiongfeng Wangd5624bb2021-12-21 11:55:56 +0800280/*
281 * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
282 * this kind of memory accesses, the CPU may wait for prior accesses to be
283 * merged with subsequent ones. In some situation, such wait is bad for the
284 * performance. io_stop_wc() can be used to prevent the merging of
285 * write-combining memory accesses before this macro with those after it.
286 */
287#ifndef io_stop_wc
Xiongfeng Wang440323b2022-01-14 18:58:57 +0800288#define io_stop_wc() do { } while (0)
Xiongfeng Wangd5624bb2021-12-21 11:55:56 +0800289#endif
290
David Howells885df912012-03-28 18:30:03 +0100291#endif /* !__ASSEMBLY__ */
292#endif /* __ASM_GENERIC_BARRIER_H */