blob: 448c91bf543b7394bdbcde5c8b8458df131c4c33 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_COMPILER_H
3#define __LINUX_COMPILER_H
4
Will Deacond1515582017-10-24 11:22:46 +01005#include <linux/compiler_types.h>
6
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#ifndef __ASSEMBLY__
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#ifdef __KERNEL__
10
Steven Rostedt2ed84ee2008-11-12 15:24:24 -050011/*
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
14 */
Bart Van Assched9ad8bc2009-04-05 16:20:02 +020015#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
Steven Rostedt (VMware)134e6a02017-01-19 08:57:14 -050017void ftrace_likely_update(struct ftrace_likely_data *f, int val,
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050018 int expect, int is_constant);
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050019
20#define likely_notrace(x) __builtin_expect(!!(x), 1)
21#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
22
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050023#define __branch_check__(x, expect, is_constant) ({ \
Mikulas Patocka2026d352018-05-30 08:19:22 -040024 long ______r; \
Steven Rostedt (VMware)134e6a02017-01-19 08:57:14 -050025 static struct ftrace_likely_data \
Miguel Ojedae04462f2018-09-03 19:17:50 +020026 __aligned(4) \
Nick Desaulniersbfafddd2019-08-28 15:55:23 -070027 __section(_ftrace_annotated_branch) \
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050028 ______f = { \
Steven Rostedt (VMware)134e6a02017-01-19 08:57:14 -050029 .data.func = __func__, \
30 .data.file = __FILE__, \
31 .data.line = __LINE__, \
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050032 }; \
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050033 ______r = __builtin_expect(!!(x), expect); \
34 ftrace_likely_update(&______f, ______r, \
35 expect, is_constant); \
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050036 ______r; \
37 })
38
39/*
40 * Using __builtin_constant_p(x) to ignore cases where the return
41 * value is always the same. This idea is taken from a similar patch
42 * written by Daniel Walker.
43 */
44# ifndef likely
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050045# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050046# endif
47# ifndef unlikely
Steven Rostedt (VMware)d45ae1f2017-01-17 12:29:35 -050048# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050049# endif
Steven Rostedt2bcd5212008-11-21 01:30:54 -050050
51#ifdef CONFIG_PROFILE_ALL_BRANCHES
52/*
53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt
55 */
Linus Torvaldsa15fd602019-03-20 10:26:17 -070056#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57
58#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59
60#define __trace_if_value(cond) ({ \
61 static struct ftrace_branch_data \
62 __aligned(4) \
Nick Desaulniersbfafddd2019-08-28 15:55:23 -070063 __section(_ftrace_branch) \
Linus Torvaldsa15fd602019-03-20 10:26:17 -070064 __if_trace = { \
65 .func = __func__, \
66 .file = __FILE__, \
67 .line = __LINE__, \
68 }; \
69 (cond) ? \
70 (__if_trace.miss_hit[1]++,1) : \
71 (__if_trace.miss_hit[0]++,0); \
72})
73
Steven Rostedt2bcd5212008-11-21 01:30:54 -050074#endif /* CONFIG_PROFILE_ALL_BRANCHES */
75
Steven Rostedt1f0d69a2008-11-12 00:14:39 -050076#else
77# define likely(x) __builtin_expect(!!(x), 1)
78# define unlikely(x) __builtin_expect(!!(x), 0)
79#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/* Optimization barrier */
82#ifndef barrier
83# define barrier() __memory_barrier()
84#endif
85
Daniel Borkmann7829fb02015-04-30 04:13:52 +020086#ifndef barrier_data
87# define barrier_data(ptr) barrier()
88#endif
89
Arnd Bergmann173a3ef2018-02-21 14:45:54 -080090/* workaround for GCC PR82365 if needed */
91#ifndef barrier_before_unreachable
92# define barrier_before_unreachable() do { } while (0)
93#endif
94
David Daney38938c82009-12-04 17:44:50 -080095/* Unreachable code */
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -050096#ifdef CONFIG_STACK_VALIDATION
Josh Poimboeufd0c2e692017-11-06 07:17:37 -060097/*
98 * These macros help objtool understand GCC code flow for unreachable code.
99 * The __COUNTER__ based labels are a hack to make each instance of the macros
100 * unique, to convince GCC not to merge duplicate inline asm statements.
101 */
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500102#define annotate_reachable() ({ \
Ingo Molnar96af6cd2018-12-19 11:23:27 +0100103 asm volatile("%c0:\n\t" \
104 ".pushsection .discard.reachable\n\t" \
105 ".long %c0b - .\n\t" \
106 ".popsection\n\t" : : "i" (__COUNTER__)); \
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500107})
108#define annotate_unreachable() ({ \
Ingo Molnar96af6cd2018-12-19 11:23:27 +0100109 asm volatile("%c0:\n\t" \
110 ".pushsection .discard.unreachable\n\t" \
111 ".long %c0b - .\n\t" \
112 ".popsection\n\t" : : "i" (__COUNTER__)); \
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500113})
Ingo Molnar96af6cd2018-12-19 11:23:27 +0100114#define ASM_UNREACHABLE \
115 "999:\n\t" \
116 ".pushsection .discard.unreachable\n\t" \
117 ".long 999b - .\n\t" \
118 ".popsection\n\t"
Josh Poimboeuf87b512d2019-06-27 20:50:46 -0500119
120/* Annotate a C jump table to allow objtool to follow the code flow */
Nick Desaulniersbfafddd2019-08-28 15:55:23 -0700121#define __annotate_jump_table __section(.rodata..c_jump_table)
Josh Poimboeuf87b512d2019-06-27 20:50:46 -0500122
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500123#else
124#define annotate_reachable()
125#define annotate_unreachable()
Josh Poimboeuf87b512d2019-06-27 20:50:46 -0500126#define __annotate_jump_table
Josh Poimboeuf649ea4d2017-07-27 15:56:53 -0500127#endif
128
Kees Cookaa5d1b82017-07-24 11:35:48 -0700129#ifndef ASM_UNREACHABLE
130# define ASM_UNREACHABLE
131#endif
David Daney38938c82009-12-04 17:44:50 -0800132#ifndef unreachable
ndesaulniers@google.comfe0640e2018-10-15 10:22:21 -0700133# define unreachable() do { \
134 annotate_unreachable(); \
135 __builtin_unreachable(); \
136} while (0)
David Daney38938c82009-12-04 17:44:50 -0800137#endif
138
Nicholas Pigginb67067f2016-08-24 22:29:20 +1000139/*
140 * KENTRY - kernel entry point
141 * This can be used to annotate symbols (functions or data) that are used
142 * without their linker symbol being referenced explicitly. For example,
143 * interrupt vector handlers, or functions in the kernel image that are found
144 * programatically.
145 *
146 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
147 * are handled in their own way (with KEEP() in linker scripts).
148 *
149 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
150 * linker script. For example an architecture could KEEP() its entire
151 * boot/exception vector code rather than annotate each function and data.
152 */
153#ifndef KENTRY
154# define KENTRY(sym) \
155 extern typeof(sym) sym; \
156 static const unsigned long __kentry_##sym \
157 __used \
Miguel Ojedae04462f2018-09-03 19:17:50 +0200158 __section("___kentry" "+" #sym ) \
Nicholas Pigginb67067f2016-08-24 22:29:20 +1000159 = (unsigned long)&sym;
160#endif
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#ifndef RELOC_HIDE
163# define RELOC_HIDE(ptr, off) \
164 ({ unsigned long __ptr; \
165 __ptr = (unsigned long) (ptr); \
166 (typeof(ptr)) (__ptr + (off)); })
167#endif
168
Cesar Eduardo Barrosfe8c8a12013-11-25 22:00:41 -0200169#ifndef OPTIMIZER_HIDE_VAR
Michael S. Tsirkin3e2ffd62019-01-02 15:57:49 -0500170/* Make the optimizer believe the variable can be manipulated arbitrarily. */
171#define OPTIMIZER_HIDE_VAR(var) \
172 __asm__ ("" : "=r" (var) : "0" (var))
Cesar Eduardo Barrosfe8c8a12013-11-25 22:00:41 -0200173#endif
174
Rusty Russell6f33d582012-11-22 12:30:25 +1030175/* Not-quite-unique ID. */
176#ifndef __UNIQUE_ID
177# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
178#endif
179
Christian Borntraeger230fa252014-11-25 10:01:16 +0100180#include <uapi/linux/types.h>
181
Andrey Ryabinind9764412015-10-19 11:37:17 +0300182#define __READ_ONCE_SIZE \
183({ \
184 switch (size) { \
185 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
186 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
187 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
188 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
189 default: \
190 barrier(); \
191 __builtin_memcpy((void *)res, (const void *)p, size); \
192 barrier(); \
193 } \
194})
195
196static __always_inline
197void __read_once_size(const volatile void *p, void *res, int size)
Christian Borntraeger230fa252014-11-25 10:01:16 +0100198{
Andrey Ryabinind9764412015-10-19 11:37:17 +0300199 __READ_ONCE_SIZE;
Christian Borntraeger230fa252014-11-25 10:01:16 +0100200}
201
Andrey Ryabinind9764412015-10-19 11:37:17 +0300202#ifdef CONFIG_KASAN
203/*
Andrey Ryabininbdb5ac802018-02-01 21:00:48 +0300204 * We can't declare function 'inline' because __no_sanitize_address confilcts
Andrey Ryabinind9764412015-10-19 11:37:17 +0300205 * with inlining. Attempt to inline it may cause a build failure.
206 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
207 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
208 */
Martin Schwidefsky163c8d52018-11-05 07:36:28 +0100209# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
Andrey Ryabinind9764412015-10-19 11:37:17 +0300210#else
Andrey Ryabininbdb5ac802018-02-01 21:00:48 +0300211# define __no_kasan_or_inline __always_inline
212#endif
213
214static __no_kasan_or_inline
Andrey Ryabinind9764412015-10-19 11:37:17 +0300215void __read_once_size_nocheck(const volatile void *p, void *res, int size)
216{
217 __READ_ONCE_SIZE;
218}
Andrey Ryabinind9764412015-10-19 11:37:17 +0300219
Christian Borntraeger43239cb2015-01-13 10:46:42 +0100220static __always_inline void __write_once_size(volatile void *p, void *res, int size)
Christian Borntraeger230fa252014-11-25 10:01:16 +0100221{
222 switch (size) {
223 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
224 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
225 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
Christian Borntraeger230fa252014-11-25 10:01:16 +0100226 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
Christian Borntraeger230fa252014-11-25 10:01:16 +0100227 default:
228 barrier();
229 __builtin_memcpy((void *)p, (const void *)res, size);
Christian Borntraeger230fa252014-11-25 10:01:16 +0100230 barrier();
231 }
232}
233
234/*
235 * Prevent the compiler from merging or refetching reads or writes. The
236 * compiler is also forbidden from reordering successive instances of
Mark Rutlandb899a852017-11-27 10:38:23 +0000237 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
238 * particular ordering. One way to make the compiler aware of ordering is to
239 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
240 * statements.
Christian Borntraeger230fa252014-11-25 10:01:16 +0100241 *
Mark Rutlandb899a852017-11-27 10:38:23 +0000242 * These two macros will also work on aggregate data types like structs or
243 * unions. If the size of the accessed data type exceeds the word size of
244 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
245 * fall back to memcpy(). There's at least two memcpy()s: one for the
246 * __builtin_memcpy() and then one for the macro doing the copy of variable
247 * - '__u' allocated on the stack.
Christian Borntraeger230fa252014-11-25 10:01:16 +0100248 *
249 * Their two major use cases are: (1) Mediating communication between
250 * process-level code and irq/NMI handlers, all running on the same CPU,
Mark Rutlandb899a852017-11-27 10:38:23 +0000251 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
Christian Borntraeger230fa252014-11-25 10:01:16 +0100252 * mutilate accesses that either do not require ordering or that interact
253 * with an explicit memory barrier or atomic instruction that provides the
254 * required ordering.
255 */
Will Deacond1515582017-10-24 11:22:46 +0100256#include <asm/barrier.h>
Andrey Ryabinin7f1e541f2018-02-01 21:00:49 +0300257#include <linux/kasan-checks.h>
Christian Borntraeger230fa252014-11-25 10:01:16 +0100258
Andrey Ryabinind9764412015-10-19 11:37:17 +0300259#define __READ_ONCE(x, check) \
260({ \
261 union { typeof(x) __val; char __c[1]; } __u; \
262 if (check) \
263 __read_once_size(&(x), __u.__c, sizeof(x)); \
264 else \
265 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
Will Deacon76ebbe72017-10-24 11:22:47 +0100266 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
Andrey Ryabinind9764412015-10-19 11:37:17 +0300267 __u.__val; \
268})
269#define READ_ONCE(x) __READ_ONCE(x, 1)
270
271/*
272 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
273 * to hide memory access from KASAN.
274 */
275#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
Christian Borntraeger230fa252014-11-25 10:01:16 +0100276
Andrey Ryabinin7f1e541f2018-02-01 21:00:49 +0300277static __no_kasan_or_inline
278unsigned long read_word_at_a_time(const void *addr)
279{
280 kasan_check_read(addr, 1);
281 return *(unsigned long *)addr;
282}
283
Christian Borntraeger43239cb2015-01-13 10:46:42 +0100284#define WRITE_ONCE(x, val) \
Christian Borntraegerba330342015-08-04 09:55:48 +0200285({ \
286 union { typeof(x) __val; char __c[1]; } __u = \
287 { .__val = (__force typeof(x)) (val) }; \
288 __write_once_size(&(x), __u.__c, sizeof(x)); \
289 __u.__val; \
290})
Christian Borntraeger230fa252014-11-25 10:01:16 +0100291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292#endif /* __KERNEL__ */
293
Ard Biesheuvel7290d582018-08-21 21:56:09 -0700294/*
295 * Force the compiler to emit 'sym' as a symbol, so that we can reference
296 * it from inline assembler. Necessary in case 'sym' could be inlined
297 * otherwise, or eliminated entirely due to lack of references that are
298 * visible to the compiler.
299 */
300#define __ADDRESSABLE(sym) \
Nick Desaulniersbfafddd2019-08-28 15:55:23 -0700301 static void * __section(.discard.addressable) __used \
Ard Biesheuvel7290d582018-08-21 21:56:09 -0700302 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
303
304/**
305 * offset_to_ptr - convert a relative memory offset to an absolute pointer
306 * @off: the address of the 32-bit offset value
307 */
308static inline void *offset_to_ptr(const int *off)
309{
310 return (void *)((unsigned long)off + *off);
311}
312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313#endif /* __ASSEMBLY__ */
314
Arjan van de Ven9f0cf4a2009-09-26 14:33:01 +0200315/* Compile time object size, -1 for unknown */
316#ifndef __compiletime_object_size
317# define __compiletime_object_size(obj) -1
318#endif
Arjan van de Ven4a312762009-09-30 13:05:23 +0200319#ifndef __compiletime_warning
320# define __compiletime_warning(message)
321#endif
Arjan van de Ven63312b62009-10-02 07:50:50 -0700322#ifndef __compiletime_error
323# define __compiletime_error(message)
324#endif
Daniel Santosc361d3e2013-02-21 16:41:54 -0800325
Joe Stringerc03567a82017-08-31 16:15:33 -0700326#ifdef __OPTIMIZE__
327# define __compiletime_assert(condition, msg, prefix, suffix) \
Daniel Santos9a8ab1c2013-02-21 16:41:55 -0800328 do { \
Daniel Santos9a8ab1c2013-02-21 16:41:55 -0800329 extern void prefix ## suffix(void) __compiletime_error(msg); \
Masahiro Yamada81b45682018-08-26 03:16:29 +0900330 if (!(condition)) \
Daniel Santos9a8ab1c2013-02-21 16:41:55 -0800331 prefix ## suffix(); \
Daniel Santos9a8ab1c2013-02-21 16:41:55 -0800332 } while (0)
Joe Stringerc03567a82017-08-31 16:15:33 -0700333#else
334# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
335#endif
Daniel Santos9a8ab1c2013-02-21 16:41:55 -0800336
337#define _compiletime_assert(condition, msg, prefix, suffix) \
338 __compiletime_assert(condition, msg, prefix, suffix)
339
340/**
341 * compiletime_assert - break build and emit msg if condition is false
342 * @condition: a compile-time constant condition to check
343 * @msg: a message to emit if condition is false
344 *
345 * In tradition of POSIX assert, this macro will break the build if the
346 * supplied condition is *false*, emitting the supplied error message if the
347 * compiler has support to do so.
348 */
349#define compiletime_assert(condition, msg) \
Vegard Nossumaf9c5d22020-04-06 20:09:37 -0700350 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
Daniel Santos9a8ab1c2013-02-21 16:41:55 -0800351
Peter Zijlstra47933ad2013-11-06 14:57:36 +0100352#define compiletime_assert_atomic_type(t) \
353 compiletime_assert(__native_word(t), \
354 "Need native word sized stores/loads for atomicity.")
355
Miguel Ojedaec0bbef2018-08-30 19:25:14 +0200356/* &a[0] degrades to a pointer: a different type from an array */
357#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
Miguel Ojedaec0bbef2018-08-30 19:25:14 +0200358
Borislav Petkova9a3ed12020-04-22 18:11:30 +0200359/*
360 * This is needed in functions which generate the stack canary, see
361 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
362 */
363#define prevent_tail_call_optimization() mb()
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365#endif /* __LINUX_COMPILER_H */