Thomas Gleixner | b4d0d23 | 2019-05-20 19:08:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 2 | /* |
David Howells | 739d875 | 2018-03-08 09:48:46 +0000 | [diff] [blame] | 3 | * Generic barrier definitions. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 4 | * |
| 5 | * It should be possible to use these on really simple architectures, |
| 6 | * but it serves more as a starting point for new ports. |
| 7 | * |
| 8 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 9 | * Written by David Howells (dhowells@redhat.com) |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 10 | */ |
| 11 | #ifndef __ASM_GENERIC_BARRIER_H |
| 12 | #define __ASM_GENERIC_BARRIER_H |
| 13 | |
| 14 | #ifndef __ASSEMBLY__ |
| 15 | |
Arvind Sankar | 3347acc | 2020-11-13 22:51:59 -0800 | [diff] [blame] | 16 | #include <linux/compiler.h> |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 17 | #include <linux/kcsan-checks.h> |
Will Deacon | e506ea4 | 2019-10-15 16:29:32 -0700 | [diff] [blame] | 18 | #include <asm/rwonce.h> |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 19 | |
| 20 | #ifndef nop |
| 21 | #define nop() asm volatile ("nop") |
| 22 | #endif |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 23 | |
| 24 | /* |
Marco Elver | 2505a51 | 2021-11-30 12:44:23 +0100 | [diff] [blame] | 25 | * Architectures that want generic instrumentation can define __ prefixed |
| 26 | * variants of all barriers. |
| 27 | */ |
| 28 | |
| 29 | #ifdef __mb |
| 30 | #define mb() do { kcsan_mb(); __mb(); } while (0) |
| 31 | #endif |
| 32 | |
| 33 | #ifdef __rmb |
| 34 | #define rmb() do { kcsan_rmb(); __rmb(); } while (0) |
| 35 | #endif |
| 36 | |
| 37 | #ifdef __wmb |
| 38 | #define wmb() do { kcsan_wmb(); __wmb(); } while (0) |
| 39 | #endif |
| 40 | |
| 41 | #ifdef __dma_rmb |
| 42 | #define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0) |
| 43 | #endif |
| 44 | |
| 45 | #ifdef __dma_wmb |
| 46 | #define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0) |
| 47 | #endif |
| 48 | |
| 49 | /* |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 50 | * Force strict CPU ordering. And yes, this is required on UP too when we're |
| 51 | * talking to devices. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 52 | * |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 53 | * Fall back to compiler barriers if nothing better is provided. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 54 | */ |
| 55 | |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 56 | #ifndef mb |
| 57 | #define mb() barrier() |
| 58 | #endif |
| 59 | |
| 60 | #ifndef rmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 61 | #define rmb() mb() |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 62 | #endif |
| 63 | |
| 64 | #ifndef wmb |
| 65 | #define wmb() mb() |
| 66 | #endif |
| 67 | |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 68 | #ifndef dma_rmb |
| 69 | #define dma_rmb() rmb() |
| 70 | #endif |
| 71 | |
| 72 | #ifndef dma_wmb |
| 73 | #define dma_wmb() wmb() |
| 74 | #endif |
| 75 | |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 76 | #ifndef __smp_mb |
| 77 | #define __smp_mb() mb() |
| 78 | #endif |
| 79 | |
| 80 | #ifndef __smp_rmb |
| 81 | #define __smp_rmb() rmb() |
| 82 | #endif |
| 83 | |
| 84 | #ifndef __smp_wmb |
| 85 | #define __smp_wmb() wmb() |
| 86 | #endif |
| 87 | |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 88 | #ifdef CONFIG_SMP |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 89 | |
| 90 | #ifndef smp_mb |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 91 | #define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0) |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 92 | #endif |
| 93 | |
| 94 | #ifndef smp_rmb |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 95 | #define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 96 | #endif |
| 97 | |
| 98 | #ifndef smp_wmb |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 99 | #define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 100 | #endif |
| 101 | |
Linus Torvalds | 0890a26 | 2015-07-01 09:24:26 -0700 | [diff] [blame] | 102 | #else /* !CONFIG_SMP */ |
| 103 | |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 104 | #ifndef smp_mb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 105 | #define smp_mb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 106 | #endif |
| 107 | |
| 108 | #ifndef smp_rmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 109 | #define smp_rmb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 110 | #endif |
| 111 | |
| 112 | #ifndef smp_wmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 113 | #define smp_wmb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 114 | #endif |
| 115 | |
Linus Torvalds | 0890a26 | 2015-07-01 09:24:26 -0700 | [diff] [blame] | 116 | #endif /* CONFIG_SMP */ |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 117 | |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 118 | #ifndef __smp_store_mb |
| 119 | #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) |
| 120 | #endif |
| 121 | |
| 122 | #ifndef __smp_mb__before_atomic |
| 123 | #define __smp_mb__before_atomic() __smp_mb() |
| 124 | #endif |
| 125 | |
| 126 | #ifndef __smp_mb__after_atomic |
| 127 | #define __smp_mb__after_atomic() __smp_mb() |
| 128 | #endif |
| 129 | |
| 130 | #ifndef __smp_store_release |
| 131 | #define __smp_store_release(p, v) \ |
| 132 | do { \ |
| 133 | compiletime_assert_atomic_type(*p); \ |
| 134 | __smp_mb(); \ |
| 135 | WRITE_ONCE(*p, v); \ |
| 136 | } while (0) |
| 137 | #endif |
| 138 | |
| 139 | #ifndef __smp_load_acquire |
| 140 | #define __smp_load_acquire(p) \ |
| 141 | ({ \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 142 | __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 143 | compiletime_assert_atomic_type(*p); \ |
| 144 | __smp_mb(); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 145 | (typeof(*p))___p1; \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 146 | }) |
| 147 | #endif |
| 148 | |
| 149 | #ifdef CONFIG_SMP |
| 150 | |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 151 | #ifndef smp_store_mb |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 152 | #define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 153 | #endif |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 154 | |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 155 | #ifndef smp_mb__before_atomic |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 156 | #define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 157 | #endif |
| 158 | |
| 159 | #ifndef smp_mb__after_atomic |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 160 | #define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 161 | #endif |
| 162 | |
| 163 | #ifndef smp_store_release |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 164 | #define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 165 | #endif |
| 166 | |
| 167 | #ifndef smp_load_acquire |
| 168 | #define smp_load_acquire(p) __smp_load_acquire(p) |
| 169 | #endif |
| 170 | |
| 171 | #else /* !CONFIG_SMP */ |
| 172 | |
| 173 | #ifndef smp_store_mb |
| 174 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) |
| 175 | #endif |
| 176 | |
| 177 | #ifndef smp_mb__before_atomic |
| 178 | #define smp_mb__before_atomic() barrier() |
| 179 | #endif |
| 180 | |
| 181 | #ifndef smp_mb__after_atomic |
| 182 | #define smp_mb__after_atomic() barrier() |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 183 | #endif |
| 184 | |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 185 | #ifndef smp_store_release |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 186 | #define smp_store_release(p, v) \ |
| 187 | do { \ |
| 188 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 189 | barrier(); \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 190 | WRITE_ONCE(*p, v); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 191 | } while (0) |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 192 | #endif |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 193 | |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 194 | #ifndef smp_load_acquire |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 195 | #define smp_load_acquire(p) \ |
| 196 | ({ \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 197 | __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 198 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 199 | barrier(); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 200 | (typeof(*p))___p1; \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 201 | }) |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 202 | #endif |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 203 | |
Peter Zijlstra | 726328d | 2016-05-26 10:35:03 +0200 | [diff] [blame] | 204 | #endif /* CONFIG_SMP */ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 205 | |
Michael S. Tsirkin | 6a65d26 | 2015-12-27 18:23:01 +0200 | [diff] [blame] | 206 | /* Barriers for virtual machine guests when talking to an SMP host */ |
Marco Elver | f948666 | 2021-11-30 12:44:22 +0100 | [diff] [blame] | 207 | #define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0) |
| 208 | #define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0) |
| 209 | #define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0) |
| 210 | #define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0) |
| 211 | #define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0) |
| 212 | #define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0) |
| 213 | #define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0) |
Michael S. Tsirkin | 6a65d26 | 2015-12-27 18:23:01 +0200 | [diff] [blame] | 214 | #define virt_load_acquire(p) __smp_load_acquire(p) |
| 215 | |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 216 | /** |
| 217 | * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency |
| 218 | * |
| 219 | * A control dependency provides a LOAD->STORE order, the additional RMB |
| 220 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, |
| 221 | * aka. (load)-ACQUIRE. |
| 222 | * |
| 223 | * Architectures that do not do load speculation can have this be barrier(). |
| 224 | */ |
| 225 | #ifndef smp_acquire__after_ctrl_dep |
| 226 | #define smp_acquire__after_ctrl_dep() smp_rmb() |
| 227 | #endif |
| 228 | |
| 229 | /** |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 230 | * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 231 | * @ptr: pointer to the variable to wait on |
| 232 | * @cond: boolean expression to wait for |
| 233 | * |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 234 | * Equivalent to using READ_ONCE() on the condition variable. |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 235 | * |
| 236 | * Due to C lacking lambda expressions we load the value of *ptr into a |
| 237 | * pre-named variable @VAL to be used in @cond. |
| 238 | */ |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 239 | #ifndef smp_cond_load_relaxed |
| 240 | #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 241 | typeof(ptr) __PTR = (ptr); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 242 | __unqual_scalar_typeof(*ptr) VAL; \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 243 | for (;;) { \ |
| 244 | VAL = READ_ONCE(*__PTR); \ |
| 245 | if (cond_expr) \ |
| 246 | break; \ |
| 247 | cpu_relax(); \ |
| 248 | } \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 249 | (typeof(*ptr))VAL; \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 250 | }) |
| 251 | #endif |
| 252 | |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 253 | /** |
| 254 | * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering |
| 255 | * @ptr: pointer to the variable to wait on |
| 256 | * @cond: boolean expression to wait for |
| 257 | * |
| 258 | * Equivalent to using smp_load_acquire() on the condition variable but employs |
| 259 | * the control dependency of the wait to reduce the barrier on many platforms. |
| 260 | */ |
| 261 | #ifndef smp_cond_load_acquire |
| 262 | #define smp_cond_load_acquire(ptr, cond_expr) ({ \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 263 | __unqual_scalar_typeof(*ptr) _val; \ |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 264 | _val = smp_cond_load_relaxed(ptr, cond_expr); \ |
| 265 | smp_acquire__after_ctrl_dep(); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 266 | (typeof(*ptr))_val; \ |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 267 | }) |
| 268 | #endif |
| 269 | |
Aneesh Kumar K.V | 3e79f08 | 2020-07-01 12:52:32 +0530 | [diff] [blame] | 270 | /* |
| 271 | * pmem_wmb() ensures that all stores for which the modification |
| 272 | * are written to persistent storage by preceding instructions have |
| 273 | * updated persistent storage before any data access or data transfer |
| 274 | * caused by subsequent instructions is initiated. |
| 275 | */ |
| 276 | #ifndef pmem_wmb |
| 277 | #define pmem_wmb() wmb() |
| 278 | #endif |
| 279 | |
Xiongfeng Wang | d5624bb | 2021-12-21 11:55:56 +0800 | [diff] [blame] | 280 | /* |
| 281 | * ioremap_wc() maps I/O memory as memory with write-combining attributes. For |
| 282 | * this kind of memory accesses, the CPU may wait for prior accesses to be |
| 283 | * merged with subsequent ones. In some situation, such wait is bad for the |
| 284 | * performance. io_stop_wc() can be used to prevent the merging of |
| 285 | * write-combining memory accesses before this macro with those after it. |
| 286 | */ |
| 287 | #ifndef io_stop_wc |
Xiongfeng Wang | 440323b | 2022-01-14 18:58:57 +0800 | [diff] [blame] | 288 | #define io_stop_wc() do { } while (0) |
Xiongfeng Wang | d5624bb | 2021-12-21 11:55:56 +0800 | [diff] [blame] | 289 | #endif |
| 290 | |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 291 | #endif /* !__ASSEMBLY__ */ |
| 292 | #endif /* __ASM_GENERIC_BARRIER_H */ |