Thomas Gleixner | b4d0d23 | 2019-05-20 19:08:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 2 | /* |
David Howells | 739d875 | 2018-03-08 09:48:46 +0000 | [diff] [blame] | 3 | * Generic barrier definitions. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 4 | * |
| 5 | * It should be possible to use these on really simple architectures, |
| 6 | * but it serves more as a starting point for new ports. |
| 7 | * |
| 8 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. |
| 9 | * Written by David Howells (dhowells@redhat.com) |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 10 | */ |
| 11 | #ifndef __ASM_GENERIC_BARRIER_H |
| 12 | #define __ASM_GENERIC_BARRIER_H |
| 13 | |
| 14 | #ifndef __ASSEMBLY__ |
| 15 | |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 16 | #include <linux/compiler.h> |
| 17 | |
| 18 | #ifndef nop |
| 19 | #define nop() asm volatile ("nop") |
| 20 | #endif |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 21 | |
| 22 | /* |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 23 | * Force strict CPU ordering. And yes, this is required on UP too when we're |
| 24 | * talking to devices. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 25 | * |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 26 | * Fall back to compiler barriers if nothing better is provided. |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 27 | */ |
| 28 | |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 29 | #ifndef mb |
| 30 | #define mb() barrier() |
| 31 | #endif |
| 32 | |
| 33 | #ifndef rmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 34 | #define rmb() mb() |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 35 | #endif |
| 36 | |
| 37 | #ifndef wmb |
| 38 | #define wmb() mb() |
| 39 | #endif |
| 40 | |
Alexander Duyck | 1077fa3 | 2014-12-11 15:02:06 -0800 | [diff] [blame] | 41 | #ifndef dma_rmb |
| 42 | #define dma_rmb() rmb() |
| 43 | #endif |
| 44 | |
| 45 | #ifndef dma_wmb |
| 46 | #define dma_wmb() wmb() |
| 47 | #endif |
| 48 | |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 49 | #ifndef read_barrier_depends |
| 50 | #define read_barrier_depends() do { } while (0) |
| 51 | #endif |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 52 | |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 53 | #ifndef __smp_mb |
| 54 | #define __smp_mb() mb() |
| 55 | #endif |
| 56 | |
| 57 | #ifndef __smp_rmb |
| 58 | #define __smp_rmb() rmb() |
| 59 | #endif |
| 60 | |
| 61 | #ifndef __smp_wmb |
| 62 | #define __smp_wmb() wmb() |
| 63 | #endif |
| 64 | |
| 65 | #ifndef __smp_read_barrier_depends |
| 66 | #define __smp_read_barrier_depends() read_barrier_depends() |
| 67 | #endif |
| 68 | |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 69 | #ifdef CONFIG_SMP |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 70 | |
| 71 | #ifndef smp_mb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 72 | #define smp_mb() __smp_mb() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 73 | #endif |
| 74 | |
| 75 | #ifndef smp_rmb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 76 | #define smp_rmb() __smp_rmb() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 77 | #endif |
| 78 | |
| 79 | #ifndef smp_wmb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 80 | #define smp_wmb() __smp_wmb() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 81 | #endif |
| 82 | |
| 83 | #ifndef smp_read_barrier_depends |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 84 | #define smp_read_barrier_depends() __smp_read_barrier_depends() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 85 | #endif |
| 86 | |
Linus Torvalds | 0890a26 | 2015-07-01 09:24:26 -0700 | [diff] [blame] | 87 | #else /* !CONFIG_SMP */ |
| 88 | |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 89 | #ifndef smp_mb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 90 | #define smp_mb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 91 | #endif |
| 92 | |
| 93 | #ifndef smp_rmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 94 | #define smp_rmb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 95 | #endif |
| 96 | |
| 97 | #ifndef smp_wmb |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 98 | #define smp_wmb() barrier() |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 99 | #endif |
| 100 | |
| 101 | #ifndef smp_read_barrier_depends |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 102 | #define smp_read_barrier_depends() do { } while (0) |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 103 | #endif |
| 104 | |
Linus Torvalds | 0890a26 | 2015-07-01 09:24:26 -0700 | [diff] [blame] | 105 | #endif /* CONFIG_SMP */ |
Vineet Gupta | 470c27e | 2015-03-11 21:42:02 +0530 | [diff] [blame] | 106 | |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 107 | #ifndef __smp_store_mb |
| 108 | #define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) |
| 109 | #endif |
| 110 | |
| 111 | #ifndef __smp_mb__before_atomic |
| 112 | #define __smp_mb__before_atomic() __smp_mb() |
| 113 | #endif |
| 114 | |
| 115 | #ifndef __smp_mb__after_atomic |
| 116 | #define __smp_mb__after_atomic() __smp_mb() |
| 117 | #endif |
| 118 | |
| 119 | #ifndef __smp_store_release |
| 120 | #define __smp_store_release(p, v) \ |
| 121 | do { \ |
| 122 | compiletime_assert_atomic_type(*p); \ |
| 123 | __smp_mb(); \ |
| 124 | WRITE_ONCE(*p, v); \ |
| 125 | } while (0) |
| 126 | #endif |
| 127 | |
| 128 | #ifndef __smp_load_acquire |
| 129 | #define __smp_load_acquire(p) \ |
| 130 | ({ \ |
| 131 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
| 132 | compiletime_assert_atomic_type(*p); \ |
| 133 | __smp_mb(); \ |
| 134 | ___p1; \ |
| 135 | }) |
| 136 | #endif |
| 137 | |
| 138 | #ifdef CONFIG_SMP |
| 139 | |
Peter Zijlstra | b92b8b3 | 2015-05-12 10:51:55 +0200 | [diff] [blame] | 140 | #ifndef smp_store_mb |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 141 | #define smp_store_mb(var, value) __smp_store_mb(var, value) |
Peter Zijlstra | 93ea02b | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 142 | #endif |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 143 | |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 144 | #ifndef smp_mb__before_atomic |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 145 | #define smp_mb__before_atomic() __smp_mb__before_atomic() |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 146 | #endif |
| 147 | |
| 148 | #ifndef smp_mb__after_atomic |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 149 | #define smp_mb__after_atomic() __smp_mb__after_atomic() |
| 150 | #endif |
| 151 | |
| 152 | #ifndef smp_store_release |
| 153 | #define smp_store_release(p, v) __smp_store_release(p, v) |
| 154 | #endif |
| 155 | |
| 156 | #ifndef smp_load_acquire |
| 157 | #define smp_load_acquire(p) __smp_load_acquire(p) |
| 158 | #endif |
| 159 | |
| 160 | #else /* !CONFIG_SMP */ |
| 161 | |
| 162 | #ifndef smp_store_mb |
| 163 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) |
| 164 | #endif |
| 165 | |
| 166 | #ifndef smp_mb__before_atomic |
| 167 | #define smp_mb__before_atomic() barrier() |
| 168 | #endif |
| 169 | |
| 170 | #ifndef smp_mb__after_atomic |
| 171 | #define smp_mb__after_atomic() barrier() |
Peter Zijlstra | febdbfe | 2014-02-06 18:16:07 +0100 | [diff] [blame] | 172 | #endif |
| 173 | |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 174 | #ifndef smp_store_release |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 175 | #define smp_store_release(p, v) \ |
| 176 | do { \ |
| 177 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 178 | barrier(); \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 179 | WRITE_ONCE(*p, v); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 180 | } while (0) |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 181 | #endif |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 182 | |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 183 | #ifndef smp_load_acquire |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 184 | #define smp_load_acquire(p) \ |
| 185 | ({ \ |
Andrey Konovalov | 76695af | 2015-08-02 17:11:04 +0200 | [diff] [blame] | 186 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 187 | compiletime_assert_atomic_type(*p); \ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 188 | barrier(); \ |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 189 | ___p1; \ |
| 190 | }) |
Michael S. Tsirkin | 57f7c03 | 2015-12-27 14:10:52 +0200 | [diff] [blame] | 191 | #endif |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 192 | |
Peter Zijlstra | 726328d | 2016-05-26 10:35:03 +0200 | [diff] [blame] | 193 | #endif /* CONFIG_SMP */ |
Michael S. Tsirkin | a9e4252 | 2015-12-27 13:50:07 +0200 | [diff] [blame] | 194 | |
Michael S. Tsirkin | 6a65d26 | 2015-12-27 18:23:01 +0200 | [diff] [blame] | 195 | /* Barriers for virtual machine guests when talking to an SMP host */ |
| 196 | #define virt_mb() __smp_mb() |
| 197 | #define virt_rmb() __smp_rmb() |
| 198 | #define virt_wmb() __smp_wmb() |
| 199 | #define virt_read_barrier_depends() __smp_read_barrier_depends() |
| 200 | #define virt_store_mb(var, value) __smp_store_mb(var, value) |
| 201 | #define virt_mb__before_atomic() __smp_mb__before_atomic() |
| 202 | #define virt_mb__after_atomic() __smp_mb__after_atomic() |
| 203 | #define virt_store_release(p, v) __smp_store_release(p, v) |
| 204 | #define virt_load_acquire(p) __smp_load_acquire(p) |
| 205 | |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 206 | /** |
| 207 | * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency |
| 208 | * |
| 209 | * A control dependency provides a LOAD->STORE order, the additional RMB |
| 210 | * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, |
| 211 | * aka. (load)-ACQUIRE. |
| 212 | * |
| 213 | * Architectures that do not do load speculation can have this be barrier(). |
| 214 | */ |
| 215 | #ifndef smp_acquire__after_ctrl_dep |
| 216 | #define smp_acquire__after_ctrl_dep() smp_rmb() |
| 217 | #endif |
| 218 | |
| 219 | /** |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 220 | * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 221 | * @ptr: pointer to the variable to wait on |
| 222 | * @cond: boolean expression to wait for |
| 223 | * |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 224 | * Equivalent to using READ_ONCE() on the condition variable. |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 225 | * |
| 226 | * Due to C lacking lambda expressions we load the value of *ptr into a |
| 227 | * pre-named variable @VAL to be used in @cond. |
| 228 | */ |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 229 | #ifndef smp_cond_load_relaxed |
| 230 | #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 231 | typeof(ptr) __PTR = (ptr); \ |
| 232 | typeof(*ptr) VAL; \ |
| 233 | for (;;) { \ |
| 234 | VAL = READ_ONCE(*__PTR); \ |
| 235 | if (cond_expr) \ |
| 236 | break; \ |
| 237 | cpu_relax(); \ |
| 238 | } \ |
Peter Zijlstra | 7cb45c0 | 2016-06-01 19:23:54 +0200 | [diff] [blame] | 239 | VAL; \ |
| 240 | }) |
| 241 | #endif |
| 242 | |
Will Deacon | fcfdfe30 | 2018-04-26 11:34:15 +0100 | [diff] [blame] | 243 | /** |
| 244 | * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering |
| 245 | * @ptr: pointer to the variable to wait on |
| 246 | * @cond: boolean expression to wait for |
| 247 | * |
| 248 | * Equivalent to using smp_load_acquire() on the condition variable but employs |
| 249 | * the control dependency of the wait to reduce the barrier on many platforms. |
| 250 | */ |
| 251 | #ifndef smp_cond_load_acquire |
| 252 | #define smp_cond_load_acquire(ptr, cond_expr) ({ \ |
| 253 | typeof(*ptr) _val; \ |
| 254 | _val = smp_cond_load_relaxed(ptr, cond_expr); \ |
| 255 | smp_acquire__after_ctrl_dep(); \ |
| 256 | _val; \ |
| 257 | }) |
| 258 | #endif |
| 259 | |
David Howells | 885df91 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 260 | #endif /* !__ASSEMBLY__ */ |
| 261 | #endif /* __ASM_GENERIC_BARRIER_H */ |