Thomas Gleixner | b4d0d23 | 2019-05-20 19:08:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 2 | /* |
David Howells | 739d875 | 2018-03-08 09:48:46 +0000 | [diff] [blame] | 3 | * Generic barrier definitions. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 4 | * |
| 5 | * It should be possible to use these on really simple architectures, |
| 6 | * but it serves more as a starting point for new ports. |
| 7 | * |
| 8 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 9 | * Written by David Howells (dhowells@redhat.com) |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 10 | */ |
| 11 | #ifndef __ASM_GENERIC_BARRIER_H |
| 12 | #define __ASM_GENERIC_BARRIER_H |
| 13 | |
| 14 | #ifndef __ASSEMBLY__ |
| 15 | |
Arvind Sankar | 3347acc | 2020-11-13 22:51:59 -0800 | [diff] [blame^] | 16 | #include <linux/compiler.h> |
Will Deacon | e506ea4 | 2019-10-15 16:29:32 -0700 | [diff] [blame] | 17 | #include <asm/rwonce.h> |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 18 | |
| 19 | #ifndef nop |
| 20 | #define nop() asm volatile ("nop") |
| 21 | #endif |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 22 | |
| 23 | /* |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 24 | * Force strict CPU ordering. And yes, this is required on UP too when we're |
| 25 | * talking to devices. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 26 | * |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 27 | * Fall back to compiler barriers if nothing better is provided. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 28 | */ |
| 29 | |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 30 | #ifndef mb |
| 31 | #define mb() barrier() |
| 32 | #endif |
| 33 | |
| 34 | #ifndef rmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 35 | #define rmb() mb() |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 36 | #endif |
| 37 | |
| 38 | #ifndef wmb |
| 39 | #define wmb() mb() |
| 40 | #endif |
| 41 | |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 42 | #ifndef dma_rmb |
| 43 | #define dma_rmb() rmb() |
| 44 | #endif |
| 45 | |
| 46 | #ifndef dma_wmb |
| 47 | #define dma_wmb() wmb() |
| 48 | #endif |
| 49 | |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 50 | #ifndef __smp_mb |
| 51 | #define __smp_mb() mb() |
| 52 | #endif |
| 53 | |
| 54 | #ifndef __smp_rmb |
| 55 | #define __smp_rmb() rmb() |
| 56 | #endif |
| 57 | |
| 58 | #ifndef __smp_wmb |
| 59 | #define __smp_wmb() wmb() |
| 60 | #endif |
| 61 | |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 62 | #ifdef CONFIG_SMP |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 63 | |
| 64 | #ifndef smp_mb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 65 | #define smp_mb() __smp_mb() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 66 | #endif |
| 67 | |
| 68 | #ifndef smp_rmb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 69 | #define smp_rmb() __smp_rmb() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 70 | #endif |
| 71 | |
| 72 | #ifndef smp_wmb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 73 | #define smp_wmb() __smp_wmb() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 74 | #endif |
| 75 | |
Linus Torvalds | 0890a26 | 2015-07-01 09:24:26 -0700 | [diff] [blame] | 76 | #else /* !CONFIG_SMP */ |
| 77 | |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 78 | #ifndef smp_mb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 79 | #define smp_mb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 80 | #endif |
| 81 | |
| 82 | #ifndef smp_rmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 83 | #define smp_rmb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 84 | #endif |
| 85 | |
| 86 | #ifndef smp_wmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 87 | #define smp_wmb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 88 | #endif |
| 89 | |
Linus Torvalds | 0890a26 | 2015-07-01 09:24:26 -0700 | [diff] [blame] | 90 | #endif /* CONFIG_SMP */ |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 91 | |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 92 | #ifndef __smp_store_mb |
| 93 | #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) |
| 94 | #endif |
| 95 | |
| 96 | #ifndef __smp_mb__before_atomic |
| 97 | #define __smp_mb__before_atomic() __smp_mb() |
| 98 | #endif |
| 99 | |
| 100 | #ifndef __smp_mb__after_atomic |
| 101 | #define __smp_mb__after_atomic() __smp_mb() |
| 102 | #endif |
| 103 | |
| 104 | #ifndef __smp_store_release |
| 105 | #define __smp_store_release(p, v) \ |
| 106 | do { \ |
| 107 | compiletime_assert_atomic_type(*p); \ |
| 108 | __smp_mb(); \ |
| 109 | WRITE_ONCE(*p, v); \ |
| 110 | } while (0) |
| 111 | #endif |
| 112 | |
| 113 | #ifndef __smp_load_acquire |
| 114 | #define __smp_load_acquire(p) \ |
| 115 | ({ \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 116 | __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 117 | compiletime_assert_atomic_type(*p); \ |
| 118 | __smp_mb(); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 119 | (typeof(*p))___p1; \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 120 | }) |
| 121 | #endif |
| 122 | |
| 123 | #ifdef CONFIG_SMP |
| 124 | |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 125 | #ifndef smp_store_mb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 126 | #define smp_store_mb(var, value) __smp_store_mb(var, value) |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 127 | #endif |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 128 | |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 129 | #ifndef smp_mb__before_atomic |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 130 | #define smp_mb__before_atomic() __smp_mb__before_atomic() |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 131 | #endif |
| 132 | |
| 133 | #ifndef smp_mb__after_atomic |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 134 | #define smp_mb__after_atomic() __smp_mb__after_atomic() |
| 135 | #endif |
| 136 | |
| 137 | #ifndef smp_store_release |
| 138 | #define smp_store_release(p, v) __smp_store_release(p, v) |
| 139 | #endif |
| 140 | |
| 141 | #ifndef smp_load_acquire |
| 142 | #define smp_load_acquire(p) __smp_load_acquire(p) |
| 143 | #endif |
| 144 | |
| 145 | #else /* !CONFIG_SMP */ |
| 146 | |
| 147 | #ifndef smp_store_mb |
| 148 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) |
| 149 | #endif |
| 150 | |
| 151 | #ifndef smp_mb__before_atomic |
| 152 | #define smp_mb__before_atomic() barrier() |
| 153 | #endif |
| 154 | |
| 155 | #ifndef smp_mb__after_atomic |
| 156 | #define smp_mb__after_atomic() barrier() |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 157 | #endif |
| 158 | |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 159 | #ifndef smp_store_release |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 160 | #define smp_store_release(p, v) \ |
| 161 | do { \ |
| 162 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 163 | barrier(); \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 164 | WRITE_ONCE(*p, v); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 165 | } while (0) |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 166 | #endif |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 167 | |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 168 | #ifndef smp_load_acquire |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 169 | #define smp_load_acquire(p) \ |
| 170 | ({ \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 171 | __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 172 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 173 | barrier(); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 174 | (typeof(*p))___p1; \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 175 | }) |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 176 | #endif |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 177 | |
Peter Zijlstra | 726328d | 2016-05-26 10:35:03 +0200 | [diff] [blame] | 178 | #endif /* CONFIG_SMP */ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 179 | |
Michael S. Tsirkin | 6a65d26 | 2015-12-27 18:23:01 +0200 | [diff] [blame] | 180 | /* Barriers for virtual machine guests when talking to an SMP host */ |
| 181 | #define virt_mb() __smp_mb() |
| 182 | #define virt_rmb() __smp_rmb() |
| 183 | #define virt_wmb() __smp_wmb() |
Michael S. Tsirkin | 6a65d26 | 2015-12-27 18:23:01 +0200 | [diff] [blame] | 184 | #define virt_store_mb(var, value) __smp_store_mb(var, value) |
| 185 | #define virt_mb__before_atomic() __smp_mb__before_atomic() |
| 186 | #define virt_mb__after_atomic() __smp_mb__after_atomic() |
| 187 | #define virt_store_release(p, v) __smp_store_release(p, v) |
| 188 | #define virt_load_acquire(p) __smp_load_acquire(p) |
| 189 | |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 190 | /** |
| 191 | * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency |
| 192 | * |
| 193 | * A control dependency provides a LOAD->STORE order, the additional RMB |
| 194 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, |
| 195 | * aka. (load)-ACQUIRE. |
| 196 | * |
| 197 | * Architectures that do not do load speculation can have this be barrier(). |
| 198 | */ |
| 199 | #ifndef smp_acquire__after_ctrl_dep |
| 200 | #define smp_acquire__after_ctrl_dep() smp_rmb() |
| 201 | #endif |
| 202 | |
| 203 | /** |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 204 | * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 205 | * @ptr: pointer to the variable to wait on |
| 206 | * @cond: boolean expression to wait for |
| 207 | * |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 208 | * Equivalent to using READ_ONCE() on the condition variable. |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 209 | * |
| 210 | * Due to C lacking lambda expressions we load the value of *ptr into a |
| 211 | * pre-named variable @VAL to be used in @cond. |
| 212 | */ |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 213 | #ifndef smp_cond_load_relaxed |
| 214 | #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 215 | typeof(ptr) __PTR = (ptr); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 216 | __unqual_scalar_typeof(*ptr) VAL; \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 217 | for (;;) { \ |
| 218 | VAL = READ_ONCE(*__PTR); \ |
| 219 | if (cond_expr) \ |
| 220 | break; \ |
| 221 | cpu_relax(); \ |
| 222 | } \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 223 | (typeof(*ptr))VAL; \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 224 | }) |
| 225 | #endif |
| 226 | |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 227 | /** |
| 228 | * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering |
| 229 | * @ptr: pointer to the variable to wait on |
| 230 | * @cond: boolean expression to wait for |
| 231 | * |
| 232 | * Equivalent to using smp_load_acquire() on the condition variable but employs |
| 233 | * the control dependency of the wait to reduce the barrier on many platforms. |
| 234 | */ |
| 235 | #ifndef smp_cond_load_acquire |
| 236 | #define smp_cond_load_acquire(ptr, cond_expr) ({ \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 237 | __unqual_scalar_typeof(*ptr) _val; \ |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 238 | _val = smp_cond_load_relaxed(ptr, cond_expr); \ |
| 239 | smp_acquire__after_ctrl_dep(); \ |
Will Deacon | 5498872 | 2019-12-19 16:22:56 +0000 | [diff] [blame] | 240 | (typeof(*ptr))_val; \ |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 241 | }) |
| 242 | #endif |
| 243 | |
Aneesh Kumar K.V | 3e79f08 | 2020-07-01 12:52:32 +0530 | [diff] [blame] | 244 | /* |
| 245 | * pmem_wmb() ensures that all stores for which the modification |
| 246 | * are written to persistent storage by preceding instructions have |
| 247 | * updated persistent storage before any data access or data transfer |
| 248 | * caused by subsequent instructions is initiated. |
| 249 | */ |
| 250 | #ifndef pmem_wmb |
| 251 | #define pmem_wmb() wmb() |
| 252 | #endif |
| 253 | |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 254 | #endif /* !__ASSEMBLY__ */ |
| 255 | #endif /* __ASM_GENERIC_BARRIER_H */ |