Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Namhyung Kim | 8a625c1 | 2014-01-09 23:00:52 +0900 | [diff] [blame] | 2 | #ifndef _TOOLS_LINUX_COMPILER_H_ |
| 3 | #define _TOOLS_LINUX_COMPILER_H_ |
Frederic Weisbecker | 5a116dd | 2009-10-17 17:12:33 +0200 | [diff] [blame] | 4 | |
Arnaldo Carvalho de Melo | 1926140 | 2017-02-10 11:41:11 -0300 | [diff] [blame] | 5 | #ifdef __GNUC__ |
| 6 | #include <linux/compiler-gcc.h> |
| 7 | #endif |
| 8 | |
Arnaldo Carvalho de Melo | 4900653 | 2017-02-22 16:48:24 -0300 | [diff] [blame] | 9 | #ifndef __compiletime_error |
| 10 | # define __compiletime_error(message) |
| 11 | #endif |
| 12 | |
Arnaldo Carvalho de Melo | 5ac6973 | 2015-05-07 13:38:16 -0300 | [diff] [blame] | 13 | /* Optimization barrier */ |
| 14 | /* The "volatile" is due to gcc bugs */ |
| 15 | #define barrier() __asm__ __volatile__("": : :"memory") |
| 16 | |
Frederic Weisbecker | 5a116dd | 2009-10-17 17:12:33 +0200 | [diff] [blame] | 17 | #ifndef __always_inline |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 18 | # define __always_inline inline __attribute__((always_inline)) |
Frederic Weisbecker | 5a116dd | 2009-10-17 17:12:33 +0200 | [diff] [blame] | 19 | #endif |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 20 | |
Arnaldo Carvalho de Melo | 9dd4ca4 | 2017-06-16 11:39:15 -0300 | [diff] [blame] | 21 | #ifndef noinline |
| 22 | #define noinline |
| 23 | #endif |
| 24 | |
Arnaldo Carvalho de Melo | f6441af | 2017-04-17 11:25:00 -0300 | [diff] [blame] | 25 | /* Are two types/vars the same type (ignoring qualifiers)? */ |
| 26 | #ifndef __same_type |
| 27 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
| 28 | #endif |
| 29 | |
Arnaldo Carvalho de Melo | 8c98abf | 2016-07-14 12:03:04 -0300 | [diff] [blame] | 30 | #ifdef __ANDROID__ |
| 31 | /* |
| 32 | * FIXME: Big hammer to get rid of tons of: |
| 33 | * "warning: always_inline function might not be inlinable" |
| 34 | * |
| 35 | * At least on android-ndk-r12/platforms/android-24/arch-arm |
| 36 | */ |
| 37 | #undef __always_inline |
| 38 | #define __always_inline inline |
| 39 | #endif |
| 40 | |
Frederic Weisbecker | 5a116dd | 2009-10-17 17:12:33 +0200 | [diff] [blame] | 41 | #define __user |
Matthew Wilcox | 12ea653 | 2016-12-16 14:53:45 -0500 | [diff] [blame] | 42 | #define __rcu |
| 43 | #define __read_mostly |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 44 | |
Josh Boyer | 195bcbf | 2011-08-18 07:37:21 -0400 | [diff] [blame] | 45 | #ifndef __attribute_const__ |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 46 | # define __attribute_const__ |
Josh Boyer | 195bcbf | 2011-08-18 07:37:21 -0400 | [diff] [blame] | 47 | #endif |
Frederic Weisbecker | 5a116dd | 2009-10-17 17:12:33 +0200 | [diff] [blame] | 48 | |
Irina Tirdea | 1d037ca | 2012-09-11 01:15:03 +0300 | [diff] [blame] | 49 | #ifndef __maybe_unused |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 50 | # define __maybe_unused __attribute__((unused)) |
Irina Tirdea | 1d037ca | 2012-09-11 01:15:03 +0300 | [diff] [blame] | 51 | #endif |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 52 | |
Levin, Alexander (Sasha Levin) | e58e871 | 2017-05-31 00:38:09 +0000 | [diff] [blame] | 53 | #ifndef __used |
| 54 | # define __used __attribute__((__unused__)) |
| 55 | #endif |
| 56 | |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 57 | #ifndef __packed |
| 58 | # define __packed __attribute__((__packed__)) |
| 59 | #endif |
Arnaldo Carvalho de Melo | 618038d | 2010-03-25 19:58:59 -0300 | [diff] [blame] | 60 | |
Irina Tirdea | 86d5a70 | 2012-09-11 01:14:59 +0300 | [diff] [blame] | 61 | #ifndef __force |
Ingo Molnar | 7a10822 | 2013-10-07 11:26:18 +0200 | [diff] [blame] | 62 | # define __force |
Irina Tirdea | 86d5a70 | 2012-09-11 01:14:59 +0300 | [diff] [blame] | 63 | #endif |
| 64 | |
Ingo Molnar | fb1c918 | 2013-10-01 13:26:13 +0200 | [diff] [blame] | 65 | #ifndef __weak |
| 66 | # define __weak __attribute__((weak)) |
| 67 | #endif |
| 68 | |
Namhyung Kim | 835d44b | 2014-01-09 23:00:53 +0900 | [diff] [blame] | 69 | #ifndef likely |
| 70 | # define likely(x) __builtin_expect(!!(x), 1) |
| 71 | #endif |
| 72 | |
| 73 | #ifndef unlikely |
| 74 | # define unlikely(x) __builtin_expect(!!(x), 0) |
| 75 | #endif |
| 76 | |
Levin, Alexander (Sasha Levin) | e58e871 | 2017-05-31 00:38:09 +0000 | [diff] [blame] | 77 | #ifndef __init |
| 78 | # define __init |
| 79 | #endif |
| 80 | |
| 81 | #ifndef noinline |
| 82 | # define noinline |
| 83 | #endif |
| 84 | |
Matthew Wilcox | 12ea653 | 2016-12-16 14:53:45 -0500 | [diff] [blame] | 85 | #define uninitialized_var(x) x = *(&(x)) |
| 86 | |
Arnaldo Carvalho de Melo | 728abda | 2015-07-05 14:12:42 -0300 | [diff] [blame] | 87 | #include <linux/types.h> |
| 88 | |
Jiri Olsa | c95f343 | 2015-10-13 10:52:14 +0200 | [diff] [blame] | 89 | /* |
| 90 | * Following functions are taken from kernel sources and |
| 91 | * break aliasing rules in their original form. |
| 92 | * |
| 93 | * While kernel is compiled with -fno-strict-aliasing, |
| 94 | * perf uses -Wstrict-aliasing=3 which makes build fail |
| 95 | * under gcc 4.4. |
| 96 | * |
| 97 | * Using extra __may_alias__ type to allow aliasing |
| 98 | * in this case. |
| 99 | */ |
| 100 | typedef __u8 __attribute__((__may_alias__)) __u8_alias_t; |
| 101 | typedef __u16 __attribute__((__may_alias__)) __u16_alias_t; |
| 102 | typedef __u32 __attribute__((__may_alias__)) __u32_alias_t; |
| 103 | typedef __u64 __attribute__((__may_alias__)) __u64_alias_t; |
| 104 | |
Arnaldo Carvalho de Melo | 728abda | 2015-07-05 14:12:42 -0300 | [diff] [blame] | 105 | static __always_inline void __read_once_size(const volatile void *p, void *res, int size) |
| 106 | { |
| 107 | switch (size) { |
Jiri Olsa | c95f343 | 2015-10-13 10:52:14 +0200 | [diff] [blame] | 108 | case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break; |
| 109 | case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break; |
| 110 | case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break; |
| 111 | case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break; |
Arnaldo Carvalho de Melo | 728abda | 2015-07-05 14:12:42 -0300 | [diff] [blame] | 112 | default: |
| 113 | barrier(); |
| 114 | __builtin_memcpy((void *)res, (const void *)p, size); |
| 115 | barrier(); |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
| 120 | { |
| 121 | switch (size) { |
Jiri Olsa | c95f343 | 2015-10-13 10:52:14 +0200 | [diff] [blame] | 122 | case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break; |
| 123 | case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break; |
| 124 | case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break; |
| 125 | case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break; |
Arnaldo Carvalho de Melo | 728abda | 2015-07-05 14:12:42 -0300 | [diff] [blame] | 126 | default: |
| 127 | barrier(); |
| 128 | __builtin_memcpy((void *)p, (const void *)res, size); |
| 129 | barrier(); |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Prevent the compiler from merging or refetching reads or writes. The |
| 135 | * compiler is also forbidden from reordering successive instances of |
Mark Rutland | 2a22f69 | 2017-11-27 10:38:22 +0000 | [diff] [blame] | 136 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 137 | * particular ordering. One way to make the compiler aware of ordering is to |
| 138 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 139 | * statements. |
Arnaldo Carvalho de Melo | 728abda | 2015-07-05 14:12:42 -0300 | [diff] [blame] | 140 | * |
Mark Rutland | 2a22f69 | 2017-11-27 10:38:22 +0000 | [diff] [blame] | 141 | * These two macros will also work on aggregate data types like structs or |
| 142 | * unions. If the size of the accessed data type exceeds the word size of |
| 143 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 144 | * fall back to memcpy and print a compile-time warning. |
Arnaldo Carvalho de Melo | 728abda | 2015-07-05 14:12:42 -0300 | [diff] [blame] | 145 | * |
| 146 | * Their two major use cases are: (1) Mediating communication between |
| 147 | * process-level code and irq/NMI handlers, all running on the same CPU, |
Mark Rutland | 2a22f69 | 2017-11-27 10:38:22 +0000 | [diff] [blame] | 148 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
Arnaldo Carvalho de Melo | 728abda | 2015-07-05 14:12:42 -0300 | [diff] [blame] | 149 | * mutilate accesses that either do not require ordering or that interact |
| 150 | * with an explicit memory barrier or atomic instruction that provides the |
| 151 | * required ordering. |
| 152 | */ |
| 153 | |
| 154 | #define READ_ONCE(x) \ |
| 155 | ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) |
| 156 | |
| 157 | #define WRITE_ONCE(x, val) \ |
| 158 | ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) |
| 159 | |
Arnaldo Carvalho de Melo | b5bf173 | 2017-02-08 17:01:46 -0300 | [diff] [blame] | 160 | |
| 161 | #ifndef __fallthrough |
Arnaldo Carvalho de Melo | 1926140 | 2017-02-10 11:41:11 -0300 | [diff] [blame] | 162 | # define __fallthrough |
Arnaldo Carvalho de Melo | b5bf173 | 2017-02-08 17:01:46 -0300 | [diff] [blame] | 163 | #endif |
| 164 | |
Namhyung Kim | 8a625c1 | 2014-01-09 23:00:52 +0900 | [diff] [blame] | 165 | #endif /* _TOOLS_LINUX_COMPILER_H */ |