Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __LINUX_COMPILER_H |
| 2 | #define __LINUX_COMPILER_H |
| 3 | |
Will Deacon | d151558 | 2017-10-24 11:22:46 +0100 | [diff] [blame^] | 4 | #include <linux/compiler_types.h> |
| 5 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #ifndef __ASSEMBLY__ |
| 7 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | #ifdef __KERNEL__ |
| 9 | |
Steven Rostedt | 2ed84ee | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 10 | /* |
| 11 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
| 12 | * to disable branch tracing on a per file basis. |
| 13 | */ |
Bart Van Assche | d9ad8bc | 2009-04-05 16:20:02 +0200 | [diff] [blame] | 14 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
| 15 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 16 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 17 | int expect, int is_constant); |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 18 | |
| 19 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
| 20 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
| 21 | |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 22 | #define __branch_check__(x, expect, is_constant) ({ \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 23 | int ______r; \ |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 24 | static struct ftrace_likely_data \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 25 | __attribute__((__aligned__(4))) \ |
Steven Rostedt | 45b7974 | 2008-11-21 00:40:40 -0500 | [diff] [blame] | 26 | __attribute__((section("_ftrace_annotated_branch"))) \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 27 | ______f = { \ |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 28 | .data.func = __func__, \ |
| 29 | .data.file = __FILE__, \ |
| 30 | .data.line = __LINE__, \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 31 | }; \ |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 32 | ______r = __builtin_expect(!!(x), expect); \ |
| 33 | ftrace_likely_update(&______f, ______r, \ |
| 34 | expect, is_constant); \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 35 | ______r; \ |
| 36 | }) |
| 37 | |
| 38 | /* |
| 39 | * Using __builtin_constant_p(x) to ignore cases where the return |
| 40 | * value is always the same. This idea is taken from a similar patch |
| 41 | * written by Daniel Walker. |
| 42 | */ |
| 43 | # ifndef likely |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 44 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 45 | # endif |
| 46 | # ifndef unlikely |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 47 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 48 | # endif |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 49 | |
| 50 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
| 51 | /* |
| 52 | * "Define 'is'", Bill Clinton |
| 53 | * "Define 'if'", Steven Rostedt |
| 54 | */ |
Linus Torvalds | ab3c9c6 | 2009-04-07 07:59:41 -0700 | [diff] [blame] | 55 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
| 56 | #define __trace_if(cond) \ |
Arnd Bergmann | b33c8ff | 2016-02-12 22:26:42 +0100 | [diff] [blame] | 57 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 58 | ({ \ |
| 59 | int ______r; \ |
| 60 | static struct ftrace_branch_data \ |
| 61 | __attribute__((__aligned__(4))) \ |
| 62 | __attribute__((section("_ftrace_branch"))) \ |
| 63 | ______f = { \ |
| 64 | .func = __func__, \ |
| 65 | .file = __FILE__, \ |
| 66 | .line = __LINE__, \ |
| 67 | }; \ |
| 68 | ______r = !!(cond); \ |
Witold Baryluk | 97e7e4f | 2009-03-17 21:15:44 +0100 | [diff] [blame] | 69 | ______f.miss_hit[______r]++; \ |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 70 | ______r; \ |
| 71 | })) |
| 72 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
| 73 | |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 74 | #else |
| 75 | # define likely(x) __builtin_expect(!!(x), 1) |
| 76 | # define unlikely(x) __builtin_expect(!!(x), 0) |
| 77 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | |
| 79 | /* Optimization barrier */ |
| 80 | #ifndef barrier |
| 81 | # define barrier() __memory_barrier() |
| 82 | #endif |
| 83 | |
Daniel Borkmann | 7829fb0 | 2015-04-30 04:13:52 +0200 | [diff] [blame] | 84 | #ifndef barrier_data |
| 85 | # define barrier_data(ptr) barrier() |
| 86 | #endif |
| 87 | |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 88 | /* Unreachable code */ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 89 | #ifdef CONFIG_STACK_VALIDATION |
| 90 | #define annotate_reachable() ({ \ |
| 91 | asm("%c0:\n\t" \ |
| 92 | ".pushsection .discard.reachable\n\t" \ |
| 93 | ".long %c0b - .\n\t" \ |
| 94 | ".popsection\n\t" : : "i" (__LINE__)); \ |
| 95 | }) |
| 96 | #define annotate_unreachable() ({ \ |
| 97 | asm("%c0:\n\t" \ |
| 98 | ".pushsection .discard.unreachable\n\t" \ |
| 99 | ".long %c0b - .\n\t" \ |
| 100 | ".popsection\n\t" : : "i" (__LINE__)); \ |
| 101 | }) |
| 102 | #define ASM_UNREACHABLE \ |
| 103 | "999:\n\t" \ |
| 104 | ".pushsection .discard.unreachable\n\t" \ |
| 105 | ".long 999b - .\n\t" \ |
| 106 | ".popsection\n\t" |
| 107 | #else |
| 108 | #define annotate_reachable() |
| 109 | #define annotate_unreachable() |
| 110 | #endif |
| 111 | |
Kees Cook | aa5d1b8 | 2017-07-24 11:35:48 -0700 | [diff] [blame] | 112 | #ifndef ASM_UNREACHABLE |
| 113 | # define ASM_UNREACHABLE |
| 114 | #endif |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 115 | #ifndef unreachable |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 116 | # define unreachable() do { annotate_reachable(); do { } while (1); } while (0) |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 117 | #endif |
| 118 | |
Nicholas Piggin | b67067f | 2016-08-24 22:29:20 +1000 | [diff] [blame] | 119 | /* |
| 120 | * KENTRY - kernel entry point |
| 121 | * This can be used to annotate symbols (functions or data) that are used |
| 122 | * without their linker symbol being referenced explicitly. For example, |
| 123 | * interrupt vector handlers, or functions in the kernel image that are found |
| 124 | * programatically. |
| 125 | * |
| 126 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those |
| 127 | * are handled in their own way (with KEEP() in linker scripts). |
| 128 | * |
| 129 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the |
| 130 | * linker script. For example an architecture could KEEP() its entire |
| 131 | * boot/exception vector code rather than annotate each function and data. |
| 132 | */ |
| 133 | #ifndef KENTRY |
| 134 | # define KENTRY(sym) \ |
| 135 | extern typeof(sym) sym; \ |
| 136 | static const unsigned long __kentry_##sym \ |
| 137 | __used \ |
| 138 | __attribute__((section("___kentry" "+" #sym ), used)) \ |
| 139 | = (unsigned long)&sym; |
| 140 | #endif |
| 141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | #ifndef RELOC_HIDE |
| 143 | # define RELOC_HIDE(ptr, off) \ |
| 144 | ({ unsigned long __ptr; \ |
| 145 | __ptr = (unsigned long) (ptr); \ |
| 146 | (typeof(ptr)) (__ptr + (off)); }) |
| 147 | #endif |
| 148 | |
Cesar Eduardo Barros | fe8c8a1 | 2013-11-25 22:00:41 -0200 | [diff] [blame] | 149 | #ifndef OPTIMIZER_HIDE_VAR |
| 150 | #define OPTIMIZER_HIDE_VAR(var) barrier() |
| 151 | #endif |
| 152 | |
Rusty Russell | 6f33d58 | 2012-11-22 12:30:25 +1030 | [diff] [blame] | 153 | /* Not-quite-unique ID. */ |
| 154 | #ifndef __UNIQUE_ID |
| 155 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 156 | #endif |
| 157 | |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 158 | #include <uapi/linux/types.h> |
| 159 | |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 160 | #define __READ_ONCE_SIZE \ |
| 161 | ({ \ |
| 162 | switch (size) { \ |
| 163 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ |
| 164 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ |
| 165 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ |
| 166 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ |
| 167 | default: \ |
| 168 | barrier(); \ |
| 169 | __builtin_memcpy((void *)res, (const void *)p, size); \ |
| 170 | barrier(); \ |
| 171 | } \ |
| 172 | }) |
| 173 | |
| 174 | static __always_inline |
| 175 | void __read_once_size(const volatile void *p, void *res, int size) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 176 | { |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 177 | __READ_ONCE_SIZE; |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 178 | } |
| 179 | |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 180 | #ifdef CONFIG_KASAN |
| 181 | /* |
| 182 | * This function is not 'inline' because __no_sanitize_address confilcts |
| 183 | * with inlining. Attempt to inline it may cause a build failure. |
| 184 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
| 185 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
| 186 | */ |
| 187 | static __no_sanitize_address __maybe_unused |
| 188 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
| 189 | { |
| 190 | __READ_ONCE_SIZE; |
| 191 | } |
| 192 | #else |
| 193 | static __always_inline |
| 194 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
| 195 | { |
| 196 | __READ_ONCE_SIZE; |
| 197 | } |
| 198 | #endif |
| 199 | |
Christian Borntraeger | 43239cb | 2015-01-13 10:46:42 +0100 | [diff] [blame] | 200 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 201 | { |
| 202 | switch (size) { |
| 203 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
| 204 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
| 205 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 206 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 207 | default: |
| 208 | barrier(); |
| 209 | __builtin_memcpy((void *)p, (const void *)res, size); |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 210 | barrier(); |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | /* |
| 215 | * Prevent the compiler from merging or refetching reads or writes. The |
| 216 | * compiler is also forbidden from reordering successive instances of |
Christian Borntraeger | 43239cb | 2015-01-13 10:46:42 +0100 | [diff] [blame] | 217 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 218 | * compiler is aware of some particular ordering. One way to make the |
| 219 | * compiler aware of ordering is to put the two invocations of READ_ONCE, |
Christian Borntraeger | 43239cb | 2015-01-13 10:46:42 +0100 | [diff] [blame] | 220 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 221 | * |
| 222 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate |
| 223 | * data types like structs or unions. If the size of the accessed data |
| 224 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) |
Konrad Rzeszutek Wilk | fed0764 | 2016-01-25 16:33:20 -0500 | [diff] [blame] | 225 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at |
| 226 | * least two memcpy()s: one for the __builtin_memcpy() and then one for |
| 227 | * the macro doing the copy of variable - '__u' allocated on the stack. |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 228 | * |
| 229 | * Their two major use cases are: (1) Mediating communication between |
| 230 | * process-level code and irq/NMI handlers, all running on the same CPU, |
| 231 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
| 232 | * mutilate accesses that either do not require ordering or that interact |
| 233 | * with an explicit memory barrier or atomic instruction that provides the |
| 234 | * required ordering. |
| 235 | */ |
Will Deacon | d151558 | 2017-10-24 11:22:46 +0100 | [diff] [blame^] | 236 | #include <asm/barrier.h> |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 237 | |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 238 | #define __READ_ONCE(x, check) \ |
| 239 | ({ \ |
| 240 | union { typeof(x) __val; char __c[1]; } __u; \ |
| 241 | if (check) \ |
| 242 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
| 243 | else \ |
| 244 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
| 245 | __u.__val; \ |
| 246 | }) |
| 247 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
| 248 | |
| 249 | /* |
| 250 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need |
| 251 | * to hide memory access from KASAN. |
| 252 | */ |
| 253 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 254 | |
Christian Borntraeger | 43239cb | 2015-01-13 10:46:42 +0100 | [diff] [blame] | 255 | #define WRITE_ONCE(x, val) \ |
Christian Borntraeger | ba33034 | 2015-08-04 09:55:48 +0200 | [diff] [blame] | 256 | ({ \ |
| 257 | union { typeof(x) __val; char __c[1]; } __u = \ |
| 258 | { .__val = (__force typeof(x)) (val) }; \ |
| 259 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
| 260 | __u.__val; \ |
| 261 | }) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 262 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | #endif /* __KERNEL__ */ |
| 264 | |
| 265 | #endif /* __ASSEMBLY__ */ |
| 266 | |
Arjan van de Ven | 9f0cf4a | 2009-09-26 14:33:01 +0200 | [diff] [blame] | 267 | /* Compile time object size, -1 for unknown */ |
| 268 | #ifndef __compiletime_object_size |
| 269 | # define __compiletime_object_size(obj) -1 |
| 270 | #endif |
Arjan van de Ven | 4a31276 | 2009-09-30 13:05:23 +0200 | [diff] [blame] | 271 | #ifndef __compiletime_warning |
| 272 | # define __compiletime_warning(message) |
| 273 | #endif |
Arjan van de Ven | 63312b6 | 2009-10-02 07:50:50 -0700 | [diff] [blame] | 274 | #ifndef __compiletime_error |
| 275 | # define __compiletime_error(message) |
James Hogan | 2c0d259 | 2014-06-04 16:11:16 -0700 | [diff] [blame] | 276 | /* |
| 277 | * Sparse complains of variable sized arrays due to the temporary variable in |
| 278 | * __compiletime_assert. Unfortunately we can't just expand it out to make |
| 279 | * sparse see a constant array size without breaking compiletime_assert on old |
| 280 | * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. |
| 281 | */ |
| 282 | # ifndef __CHECKER__ |
| 283 | # define __compiletime_error_fallback(condition) \ |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 284 | do { ((void)sizeof(char[1 - 2 * condition])); } while (0) |
James Hogan | 2c0d259 | 2014-06-04 16:11:16 -0700 | [diff] [blame] | 285 | # endif |
| 286 | #endif |
| 287 | #ifndef __compiletime_error_fallback |
Daniel Santos | c361d3e | 2013-02-21 16:41:54 -0800 | [diff] [blame] | 288 | # define __compiletime_error_fallback(condition) do { } while (0) |
Arjan van de Ven | 63312b6 | 2009-10-02 07:50:50 -0700 | [diff] [blame] | 289 | #endif |
Daniel Santos | c361d3e | 2013-02-21 16:41:54 -0800 | [diff] [blame] | 290 | |
Joe Stringer | c03567a8 | 2017-08-31 16:15:33 -0700 | [diff] [blame] | 291 | #ifdef __OPTIMIZE__ |
| 292 | # define __compiletime_assert(condition, msg, prefix, suffix) \ |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 293 | do { \ |
| 294 | bool __cond = !(condition); \ |
| 295 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
| 296 | if (__cond) \ |
| 297 | prefix ## suffix(); \ |
| 298 | __compiletime_error_fallback(__cond); \ |
| 299 | } while (0) |
Joe Stringer | c03567a8 | 2017-08-31 16:15:33 -0700 | [diff] [blame] | 300 | #else |
| 301 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) |
| 302 | #endif |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 303 | |
| 304 | #define _compiletime_assert(condition, msg, prefix, suffix) \ |
| 305 | __compiletime_assert(condition, msg, prefix, suffix) |
| 306 | |
| 307 | /** |
| 308 | * compiletime_assert - break build and emit msg if condition is false |
| 309 | * @condition: a compile-time constant condition to check |
| 310 | * @msg: a message to emit if condition is false |
| 311 | * |
| 312 | * In tradition of POSIX assert, this macro will break the build if the |
| 313 | * supplied condition is *false*, emitting the supplied error message if the |
| 314 | * compiler has support to do so. |
| 315 | */ |
| 316 | #define compiletime_assert(condition, msg) \ |
| 317 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
| 318 | |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 319 | #define compiletime_assert_atomic_type(t) \ |
| 320 | compiletime_assert(__native_word(t), \ |
| 321 | "Need native word sized stores/loads for atomicity.") |
| 322 | |
Linus Torvalds | 9c3cdc1 | 2008-05-10 19:51:16 -0700 | [diff] [blame] | 323 | /* |
| 324 | * Prevent the compiler from merging or refetching accesses. The compiler |
| 325 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
| 326 | * but only when the compiler is aware of some particular ordering. One way |
| 327 | * to make the compiler aware of ordering is to put the two invocations of |
| 328 | * ACCESS_ONCE() in different C statements. |
| 329 | * |
Christian Borntraeger | 927609d | 2014-11-25 10:16:39 +0100 | [diff] [blame] | 330 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE |
| 331 | * on a union member will work as long as the size of the member matches the |
| 332 | * size of the union and the size is smaller than word size. |
| 333 | * |
| 334 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication |
| 335 | * between process-level code and irq/NMI handlers, all running on the same CPU, |
| 336 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
| 337 | * mutilate accesses that either do not require ordering or that interact |
| 338 | * with an explicit memory barrier or atomic instruction that provides the |
| 339 | * required ordering. |
| 340 | * |
Preeti U Murthy | 663fdcb | 2015-04-30 17:27:21 +0530 | [diff] [blame] | 341 | * If possible use READ_ONCE()/WRITE_ONCE() instead. |
Linus Torvalds | 9c3cdc1 | 2008-05-10 19:51:16 -0700 | [diff] [blame] | 342 | */ |
Christian Borntraeger | 927609d | 2014-11-25 10:16:39 +0100 | [diff] [blame] | 343 | #define __ACCESS_ONCE(x) ({ \ |
Christian Borntraeger | c5b1994 | 2015-01-12 12:13:39 +0100 | [diff] [blame] | 344 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ |
Christian Borntraeger | 927609d | 2014-11-25 10:16:39 +0100 | [diff] [blame] | 345 | (volatile typeof(x) *)&(x); }) |
| 346 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) |
Linus Torvalds | 9c3cdc1 | 2008-05-10 19:51:16 -0700 | [diff] [blame] | 347 | |
Peter Zijlstra | 0a04b01 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 348 | /** |
| 349 | * lockless_dereference() - safely load a pointer for later dereference |
| 350 | * @p: The pointer to load |
| 351 | * |
| 352 | * Similar to rcu_dereference(), but for situations where the pointed-to |
| 353 | * object's lifetime is managed by something other than RCU. That |
| 354 | * "something other" might be reference counting or simple immortality. |
Peter Zijlstra | 331b6d8 | 2016-05-22 12:48:27 +0200 | [diff] [blame] | 355 | * |
Johannes Berg | d7127b5 | 2016-08-26 08:16:00 +0200 | [diff] [blame] | 356 | * The seemingly unused variable ___typecheck_p validates that @p is |
| 357 | * indeed a pointer type by using a pointer to typeof(*p) as the type. |
| 358 | * Taking a pointer to typeof(*p) again is needed in case p is void *. |
Peter Zijlstra | 0a04b01 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 359 | */ |
| 360 | #define lockless_dereference(p) \ |
| 361 | ({ \ |
Stephen Rothwell | 38183b9 | 2015-05-28 17:20:58 +1000 | [diff] [blame] | 362 | typeof(p) _________p1 = READ_ONCE(p); \ |
Johannes Berg | d7127b5 | 2016-08-26 08:16:00 +0200 | [diff] [blame] | 363 | typeof(*(p)) *___typecheck_p __maybe_unused; \ |
Peter Zijlstra | 0a04b01 | 2015-05-27 11:09:36 +0930 | [diff] [blame] | 364 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
| 365 | (_________p1); \ |
| 366 | }) |
| 367 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | #endif /* __LINUX_COMPILER_H */ |