Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef __LINUX_COMPILER_H |
| 3 | #define __LINUX_COMPILER_H |
| 4 | |
Will Deacon | d151558 | 2017-10-24 11:22:46 +0100 | [diff] [blame] | 5 | #include <linux/compiler_types.h> |
| 6 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #ifndef __ASSEMBLY__ |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #ifdef __KERNEL__ |
| 10 | |
Steven Rostedt | 2ed84ee | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 11 | /* |
| 12 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
| 13 | * to disable branch tracing on a per file basis. |
| 14 | */ |
Bart Van Assche | d9ad8bc | 2009-04-05 16:20:02 +0200 | [diff] [blame] | 15 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
| 16 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 17 | void ftrace_likely_update(struct ftrace_likely_data *f, int val, |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 18 | int expect, int is_constant); |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 19 | |
| 20 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
| 21 | #define unlikely_notrace(x) __builtin_expect(!!(x), 0) |
| 22 | |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 23 | #define __branch_check__(x, expect, is_constant) ({ \ |
Mikulas Patocka | 2026d35 | 2018-05-30 08:19:22 -0400 | [diff] [blame] | 24 | long ______r; \ |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 25 | static struct ftrace_likely_data \ |
Miguel Ojeda | e04462f | 2018-09-03 19:17:50 +0200 | [diff] [blame] | 26 | __aligned(4) \ |
| 27 | __section("_ftrace_annotated_branch") \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 28 | ______f = { \ |
Steven Rostedt (VMware) | 134e6a0 | 2017-01-19 08:57:14 -0500 | [diff] [blame] | 29 | .data.func = __func__, \ |
| 30 | .data.file = __FILE__, \ |
| 31 | .data.line = __LINE__, \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 32 | }; \ |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 33 | ______r = __builtin_expect(!!(x), expect); \ |
| 34 | ftrace_likely_update(&______f, ______r, \ |
| 35 | expect, is_constant); \ |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 36 | ______r; \ |
| 37 | }) |
| 38 | |
| 39 | /* |
| 40 | * Using __builtin_constant_p(x) to ignore cases where the return |
| 41 | * value is always the same. This idea is taken from a similar patch |
| 42 | * written by Daniel Walker. |
| 43 | */ |
| 44 | # ifndef likely |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 45 | # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 46 | # endif |
| 47 | # ifndef unlikely |
Steven Rostedt (VMware) | d45ae1f | 2017-01-17 12:29:35 -0500 | [diff] [blame] | 48 | # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 49 | # endif |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 50 | |
| 51 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
| 52 | /* |
| 53 | * "Define 'is'", Bill Clinton |
| 54 | * "Define 'if'", Steven Rostedt |
| 55 | */ |
Linus Torvalds | ab3c9c6 | 2009-04-07 07:59:41 -0700 | [diff] [blame] | 56 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
| 57 | #define __trace_if(cond) \ |
Arnd Bergmann | b33c8ff | 2016-02-12 22:26:42 +0100 | [diff] [blame] | 58 | if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 59 | ({ \ |
| 60 | int ______r; \ |
| 61 | static struct ftrace_branch_data \ |
Miguel Ojeda | e04462f | 2018-09-03 19:17:50 +0200 | [diff] [blame] | 62 | __aligned(4) \ |
| 63 | __section("_ftrace_branch") \ |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 64 | ______f = { \ |
| 65 | .func = __func__, \ |
| 66 | .file = __FILE__, \ |
| 67 | .line = __LINE__, \ |
| 68 | }; \ |
| 69 | ______r = !!(cond); \ |
Witold Baryluk | 97e7e4f | 2009-03-17 21:15:44 +0100 | [diff] [blame] | 70 | ______f.miss_hit[______r]++; \ |
Steven Rostedt | 2bcd521 | 2008-11-21 01:30:54 -0500 | [diff] [blame] | 71 | ______r; \ |
| 72 | })) |
| 73 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
| 74 | |
Steven Rostedt | 1f0d69a | 2008-11-12 00:14:39 -0500 | [diff] [blame] | 75 | #else |
| 76 | # define likely(x) __builtin_expect(!!(x), 1) |
| 77 | # define unlikely(x) __builtin_expect(!!(x), 0) |
| 78 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
| 80 | /* Optimization barrier */ |
| 81 | #ifndef barrier |
| 82 | # define barrier() __memory_barrier() |
| 83 | #endif |
| 84 | |
Daniel Borkmann | 7829fb0 | 2015-04-30 04:13:52 +0200 | [diff] [blame] | 85 | #ifndef barrier_data |
| 86 | # define barrier_data(ptr) barrier() |
| 87 | #endif |
| 88 | |
Arnd Bergmann | 173a3ef | 2018-02-21 14:45:54 -0800 | [diff] [blame] | 89 | /* workaround for GCC PR82365 if needed */ |
| 90 | #ifndef barrier_before_unreachable |
| 91 | # define barrier_before_unreachable() do { } while (0) |
| 92 | #endif |
| 93 | |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 94 | /* Unreachable code */ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 95 | #ifdef CONFIG_STACK_VALIDATION |
Josh Poimboeuf | d0c2e69 | 2017-11-06 07:17:37 -0600 | [diff] [blame] | 96 | /* |
| 97 | * These macros help objtool understand GCC code flow for unreachable code. |
| 98 | * The __COUNTER__ based labels are a hack to make each instance of the macros |
| 99 | * unique, to convince GCC not to merge duplicate inline asm statements. |
| 100 | */ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 101 | #define annotate_reachable() ({ \ |
Nadav Amit | c06c4d8 | 2018-10-03 14:30:53 -0700 | [diff] [blame] | 102 | asm volatile("ANNOTATE_REACHABLE counter=%c0" \ |
| 103 | : : "i" (__COUNTER__)); \ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 104 | }) |
| 105 | #define annotate_unreachable() ({ \ |
Nadav Amit | c06c4d8 | 2018-10-03 14:30:53 -0700 | [diff] [blame] | 106 | asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ |
| 107 | : : "i" (__COUNTER__)); \ |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 108 | }) |
Josh Poimboeuf | 649ea4d | 2017-07-27 15:56:53 -0500 | [diff] [blame] | 109 | #else |
| 110 | #define annotate_reachable() |
| 111 | #define annotate_unreachable() |
| 112 | #endif |
| 113 | |
Kees Cook | aa5d1b8 | 2017-07-24 11:35:48 -0700 | [diff] [blame] | 114 | #ifndef ASM_UNREACHABLE |
| 115 | # define ASM_UNREACHABLE |
| 116 | #endif |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 117 | #ifndef unreachable |
ndesaulniers@google.com | fe0640e | 2018-10-15 10:22:21 -0700 | [diff] [blame] | 118 | # define unreachable() do { \ |
| 119 | annotate_unreachable(); \ |
| 120 | __builtin_unreachable(); \ |
| 121 | } while (0) |
David Daney | 38938c8 | 2009-12-04 17:44:50 -0800 | [diff] [blame] | 122 | #endif |
| 123 | |
Nicholas Piggin | b67067f | 2016-08-24 22:29:20 +1000 | [diff] [blame] | 124 | /* |
| 125 | * KENTRY - kernel entry point |
| 126 | * This can be used to annotate symbols (functions or data) that are used |
| 127 | * without their linker symbol being referenced explicitly. For example, |
| 128 | * interrupt vector handlers, or functions in the kernel image that are found |
| 129 | * programatically. |
| 130 | * |
| 131 | * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those |
| 132 | * are handled in their own way (with KEEP() in linker scripts). |
| 133 | * |
| 134 | * KENTRY can be avoided if the symbols in question are marked as KEEP() in the |
| 135 | * linker script. For example an architecture could KEEP() its entire |
| 136 | * boot/exception vector code rather than annotate each function and data. |
| 137 | */ |
| 138 | #ifndef KENTRY |
| 139 | # define KENTRY(sym) \ |
| 140 | extern typeof(sym) sym; \ |
| 141 | static const unsigned long __kentry_##sym \ |
| 142 | __used \ |
Miguel Ojeda | e04462f | 2018-09-03 19:17:50 +0200 | [diff] [blame] | 143 | __section("___kentry" "+" #sym ) \ |
Nicholas Piggin | b67067f | 2016-08-24 22:29:20 +1000 | [diff] [blame] | 144 | = (unsigned long)&sym; |
| 145 | #endif |
| 146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | #ifndef RELOC_HIDE |
| 148 | # define RELOC_HIDE(ptr, off) \ |
| 149 | ({ unsigned long __ptr; \ |
| 150 | __ptr = (unsigned long) (ptr); \ |
| 151 | (typeof(ptr)) (__ptr + (off)); }) |
| 152 | #endif |
| 153 | |
Cesar Eduardo Barros | fe8c8a1 | 2013-11-25 22:00:41 -0200 | [diff] [blame] | 154 | #ifndef OPTIMIZER_HIDE_VAR |
| 155 | #define OPTIMIZER_HIDE_VAR(var) barrier() |
| 156 | #endif |
| 157 | |
Rusty Russell | 6f33d58 | 2012-11-22 12:30:25 +1030 | [diff] [blame] | 158 | /* Not-quite-unique ID. */ |
| 159 | #ifndef __UNIQUE_ID |
| 160 | # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
| 161 | #endif |
| 162 | |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 163 | #include <uapi/linux/types.h> |
| 164 | |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 165 | #define __READ_ONCE_SIZE \ |
| 166 | ({ \ |
| 167 | switch (size) { \ |
| 168 | case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ |
| 169 | case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ |
| 170 | case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ |
| 171 | case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ |
| 172 | default: \ |
| 173 | barrier(); \ |
| 174 | __builtin_memcpy((void *)res, (const void *)p, size); \ |
| 175 | barrier(); \ |
| 176 | } \ |
| 177 | }) |
| 178 | |
| 179 | static __always_inline |
| 180 | void __read_once_size(const volatile void *p, void *res, int size) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 181 | { |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 182 | __READ_ONCE_SIZE; |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 185 | #ifdef CONFIG_KASAN |
| 186 | /* |
Andrey Ryabinin | bdb5ac80 | 2018-02-01 21:00:48 +0300 | [diff] [blame] | 187 | * We can't declare function 'inline' because __no_sanitize_address confilcts |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 188 | * with inlining. Attempt to inline it may cause a build failure. |
| 189 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
| 190 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
| 191 | */ |
Martin Schwidefsky | 163c8d5 | 2018-11-05 07:36:28 +0100 | [diff] [blame] | 192 | # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 193 | #else |
Andrey Ryabinin | bdb5ac80 | 2018-02-01 21:00:48 +0300 | [diff] [blame] | 194 | # define __no_kasan_or_inline __always_inline |
| 195 | #endif |
| 196 | |
| 197 | static __no_kasan_or_inline |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 198 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
| 199 | { |
| 200 | __READ_ONCE_SIZE; |
| 201 | } |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 202 | |
Christian Borntraeger | 43239cb | 2015-01-13 10:46:42 +0100 | [diff] [blame] | 203 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 204 | { |
| 205 | switch (size) { |
| 206 | case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
| 207 | case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
| 208 | case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 209 | case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 210 | default: |
| 211 | barrier(); |
| 212 | __builtin_memcpy((void *)p, (const void *)res, size); |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 213 | barrier(); |
| 214 | } |
| 215 | } |
| 216 | |
| 217 | /* |
| 218 | * Prevent the compiler from merging or refetching reads or writes. The |
| 219 | * compiler is also forbidden from reordering successive instances of |
Mark Rutland | b899a85 | 2017-11-27 10:38:23 +0000 | [diff] [blame] | 220 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 221 | * particular ordering. One way to make the compiler aware of ordering is to |
| 222 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 223 | * statements. |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 224 | * |
Mark Rutland | b899a85 | 2017-11-27 10:38:23 +0000 | [diff] [blame] | 225 | * These two macros will also work on aggregate data types like structs or |
| 226 | * unions. If the size of the accessed data type exceeds the word size of |
| 227 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 228 | * fall back to memcpy(). There's at least two memcpy()s: one for the |
| 229 | * __builtin_memcpy() and then one for the macro doing the copy of variable |
| 230 | * - '__u' allocated on the stack. |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 231 | * |
| 232 | * Their two major use cases are: (1) Mediating communication between |
| 233 | * process-level code and irq/NMI handlers, all running on the same CPU, |
Mark Rutland | b899a85 | 2017-11-27 10:38:23 +0000 | [diff] [blame] | 234 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 235 | * mutilate accesses that either do not require ordering or that interact |
| 236 | * with an explicit memory barrier or atomic instruction that provides the |
| 237 | * required ordering. |
| 238 | */ |
Will Deacon | d151558 | 2017-10-24 11:22:46 +0100 | [diff] [blame] | 239 | #include <asm/barrier.h> |
Andrey Ryabinin | 7f1e541f | 2018-02-01 21:00:49 +0300 | [diff] [blame] | 240 | #include <linux/kasan-checks.h> |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 241 | |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 242 | #define __READ_ONCE(x, check) \ |
| 243 | ({ \ |
| 244 | union { typeof(x) __val; char __c[1]; } __u; \ |
| 245 | if (check) \ |
| 246 | __read_once_size(&(x), __u.__c, sizeof(x)); \ |
| 247 | else \ |
| 248 | __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ |
Will Deacon | 76ebbe7 | 2017-10-24 11:22:47 +0100 | [diff] [blame] | 249 | smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ |
Andrey Ryabinin | d976441 | 2015-10-19 11:37:17 +0300 | [diff] [blame] | 250 | __u.__val; \ |
| 251 | }) |
| 252 | #define READ_ONCE(x) __READ_ONCE(x, 1) |
| 253 | |
| 254 | /* |
| 255 | * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need |
| 256 | * to hide memory access from KASAN. |
| 257 | */ |
| 258 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 259 | |
Andrey Ryabinin | 7f1e541f | 2018-02-01 21:00:49 +0300 | [diff] [blame] | 260 | static __no_kasan_or_inline |
| 261 | unsigned long read_word_at_a_time(const void *addr) |
| 262 | { |
| 263 | kasan_check_read(addr, 1); |
| 264 | return *(unsigned long *)addr; |
| 265 | } |
| 266 | |
Christian Borntraeger | 43239cb | 2015-01-13 10:46:42 +0100 | [diff] [blame] | 267 | #define WRITE_ONCE(x, val) \ |
Christian Borntraeger | ba33034 | 2015-08-04 09:55:48 +0200 | [diff] [blame] | 268 | ({ \ |
| 269 | union { typeof(x) __val; char __c[1]; } __u = \ |
| 270 | { .__val = (__force typeof(x)) (val) }; \ |
| 271 | __write_once_size(&(x), __u.__c, sizeof(x)); \ |
| 272 | __u.__val; \ |
| 273 | }) |
Christian Borntraeger | 230fa25 | 2014-11-25 10:01:16 +0100 | [diff] [blame] | 274 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | #endif /* __KERNEL__ */ |
| 276 | |
Ard Biesheuvel | 7290d58 | 2018-08-21 21:56:09 -0700 | [diff] [blame] | 277 | /* |
| 278 | * Force the compiler to emit 'sym' as a symbol, so that we can reference |
| 279 | * it from inline assembler. Necessary in case 'sym' could be inlined |
| 280 | * otherwise, or eliminated entirely due to lack of references that are |
| 281 | * visible to the compiler. |
| 282 | */ |
| 283 | #define __ADDRESSABLE(sym) \ |
Miguel Ojeda | e04462f | 2018-09-03 19:17:50 +0200 | [diff] [blame] | 284 | static void * __section(".discard.addressable") __used \ |
Ard Biesheuvel | 7290d58 | 2018-08-21 21:56:09 -0700 | [diff] [blame] | 285 | __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; |
| 286 | |
| 287 | /** |
| 288 | * offset_to_ptr - convert a relative memory offset to an absolute pointer |
| 289 | * @off: the address of the 32-bit offset value |
| 290 | */ |
| 291 | static inline void *offset_to_ptr(const int *off) |
| 292 | { |
| 293 | return (void *)((unsigned long)off + *off); |
| 294 | } |
| 295 | |
Nadav Amit | c06c4d8 | 2018-10-03 14:30:53 -0700 | [diff] [blame] | 296 | #else /* __ASSEMBLY__ */ |
| 297 | |
| 298 | #ifdef __KERNEL__ |
| 299 | #ifndef LINKER_SCRIPT |
| 300 | |
| 301 | #ifdef CONFIG_STACK_VALIDATION |
| 302 | .macro ANNOTATE_UNREACHABLE counter:req |
| 303 | \counter: |
| 304 | .pushsection .discard.unreachable |
| 305 | .long \counter\()b -. |
| 306 | .popsection |
| 307 | .endm |
| 308 | |
| 309 | .macro ANNOTATE_REACHABLE counter:req |
| 310 | \counter: |
| 311 | .pushsection .discard.reachable |
| 312 | .long \counter\()b -. |
| 313 | .popsection |
| 314 | .endm |
| 315 | |
| 316 | .macro ASM_UNREACHABLE |
| 317 | 999: |
| 318 | .pushsection .discard.unreachable |
| 319 | .long 999b - . |
| 320 | .popsection |
| 321 | .endm |
| 322 | #else /* CONFIG_STACK_VALIDATION */ |
| 323 | .macro ANNOTATE_UNREACHABLE counter:req |
| 324 | .endm |
| 325 | |
| 326 | .macro ANNOTATE_REACHABLE counter:req |
| 327 | .endm |
| 328 | |
| 329 | .macro ASM_UNREACHABLE |
| 330 | .endm |
| 331 | #endif /* CONFIG_STACK_VALIDATION */ |
| 332 | |
| 333 | #endif /* LINKER_SCRIPT */ |
| 334 | #endif /* __KERNEL__ */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | #endif /* __ASSEMBLY__ */ |
| 336 | |
Arjan van de Ven | 9f0cf4a | 2009-09-26 14:33:01 +0200 | [diff] [blame] | 337 | /* Compile time object size, -1 for unknown */ |
| 338 | #ifndef __compiletime_object_size |
| 339 | # define __compiletime_object_size(obj) -1 |
| 340 | #endif |
Arjan van de Ven | 4a31276 | 2009-09-30 13:05:23 +0200 | [diff] [blame] | 341 | #ifndef __compiletime_warning |
| 342 | # define __compiletime_warning(message) |
| 343 | #endif |
Arjan van de Ven | 63312b6 | 2009-10-02 07:50:50 -0700 | [diff] [blame] | 344 | #ifndef __compiletime_error |
| 345 | # define __compiletime_error(message) |
| 346 | #endif |
Daniel Santos | c361d3e | 2013-02-21 16:41:54 -0800 | [diff] [blame] | 347 | |
Joe Stringer | c03567a8 | 2017-08-31 16:15:33 -0700 | [diff] [blame] | 348 | #ifdef __OPTIMIZE__ |
| 349 | # define __compiletime_assert(condition, msg, prefix, suffix) \ |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 350 | do { \ |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 351 | extern void prefix ## suffix(void) __compiletime_error(msg); \ |
Masahiro Yamada | 81b4568 | 2018-08-26 03:16:29 +0900 | [diff] [blame] | 352 | if (!(condition)) \ |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 353 | prefix ## suffix(); \ |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 354 | } while (0) |
Joe Stringer | c03567a8 | 2017-08-31 16:15:33 -0700 | [diff] [blame] | 355 | #else |
| 356 | # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) |
| 357 | #endif |
Daniel Santos | 9a8ab1c | 2013-02-21 16:41:55 -0800 | [diff] [blame] | 358 | |
| 359 | #define _compiletime_assert(condition, msg, prefix, suffix) \ |
| 360 | __compiletime_assert(condition, msg, prefix, suffix) |
| 361 | |
| 362 | /** |
| 363 | * compiletime_assert - break build and emit msg if condition is false |
| 364 | * @condition: a compile-time constant condition to check |
| 365 | * @msg: a message to emit if condition is false |
| 366 | * |
| 367 | * In tradition of POSIX assert, this macro will break the build if the |
| 368 | * supplied condition is *false*, emitting the supplied error message if the |
| 369 | * compiler has support to do so. |
| 370 | */ |
| 371 | #define compiletime_assert(condition, msg) \ |
| 372 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
| 373 | |
Peter Zijlstra | 47933ad | 2013-11-06 14:57:36 +0100 | [diff] [blame] | 374 | #define compiletime_assert_atomic_type(t) \ |
| 375 | compiletime_assert(__native_word(t), \ |
| 376 | "Need native word sized stores/loads for atomicity.") |
| 377 | |
Miguel Ojeda | ec0bbef | 2018-08-30 19:25:14 +0200 | [diff] [blame] | 378 | /* &a[0] degrades to a pointer: a different type from an array */ |
| 379 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
Miguel Ojeda | ec0bbef | 2018-08-30 19:25:14 +0200 | [diff] [blame] | 380 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | #endif /* __LINUX_COMPILER_H */ |